Browse Source

Merge pull request #138 from drasticactions/semantic-kernel

Enable Semantic kernel support
tags/v0.5.1
Rinne GitHub 2 years ago
parent
commit
1533ee7dbf
No known key found for this signature in database GPG Key ID: 4AEE18F83AFDEB23
15 changed files with 487 additions and 2 deletions
  1. +5
    -0
      LLama.Examples/LLama.Examples.csproj
  2. +69
    -0
      LLama.Examples/NewVersion/SemanticKernelChat.cs
  3. +55
    -0
      LLama.Examples/NewVersion/SemanticKernelPrompt.cs
  4. +11
    -1
      LLama.Examples/NewVersion/TestRunner.cs
  5. +17
    -0
      LLama.SemanticKernel/ChatCompletion/HistoryTransform.cs
  6. +74
    -0
      LLama.SemanticKernel/ChatCompletion/LLamaSharpChatCompletion.cs
  7. +14
    -0
      LLama.SemanticKernel/ChatCompletion/LLamaSharpChatMessage.cs
  8. +38
    -0
      LLama.SemanticKernel/ChatCompletion/LLamaSharpChatResult.cs
  9. +72
    -0
      LLama.SemanticKernel/ExtensionMethods.cs
  10. +22
    -0
      LLama.SemanticKernel/LLamaSharp.SemanticKernel.csproj
  11. +26
    -0
      LLama.SemanticKernel/README.md
  12. +27
    -0
      LLama.SemanticKernel/TextCompletion/LLamaSharpTextCompletion.cs
  13. +37
    -0
      LLama.SemanticKernel/TextCompletion/LLamaTextResult.cs
  14. +5
    -0
      LLama/LLamaEmbedder.cs
  15. +15
    -1
      LLamaSharp.sln

+ 5
- 0
LLama.Examples/LLama.Examples.csproj View File

@@ -27,6 +27,11 @@
</PropertyGroup> </PropertyGroup>


<ItemGroup> <ItemGroup>
<PackageReference Include="Microsoft.SemanticKernel" Version="0.21.230828.2-preview" />
</ItemGroup>

<ItemGroup>
<ProjectReference Include="..\LLama.SemanticKernel\LLamaSharp.SemanticKernel.csproj" />
<ProjectReference Include="..\LLama\LLamaSharp.csproj" /> <ProjectReference Include="..\LLama\LLamaSharp.csproj" />
</ItemGroup> </ItemGroup>




+ 69
- 0
LLama.Examples/NewVersion/SemanticKernelChat.cs View File

@@ -0,0 +1,69 @@
using System.Reflection.Metadata;
using System.Security.Cryptography;
using System.Text;
using LLama.Abstractions;
using LLama.Common;
using Microsoft.SemanticKernel;
using Microsoft.SemanticKernel.AI.ChatCompletion;
using Microsoft.SemanticKernel.AI.TextCompletion;
using LLamaSharp.SemanticKernel.ChatCompletion;
using LLamaSharp.SemanticKernel.TextCompletion;

namespace LLama.Examples.NewVersion
{
public class SemanticKernelChat
{
public static async Task Run()
{
Console.WriteLine("Example from: https://github.com/microsoft/semantic-kernel/blob/main/dotnet/README.md");
Console.Write("Please input your model path: ");
var modelPath = Console.ReadLine();

// Load weights into memory
var parameters = new ModelParams(modelPath)
{
Seed = RandomNumberGenerator.GetInt32(int.MaxValue),
};
using var model = LLamaWeights.LoadFromFile(parameters);
using var context = model.CreateContext(parameters);
var ex = new InteractiveExecutor(context);

var chatGPT = new LLamaSharpChatCompletion(ex);

var chatHistory = chatGPT.CreateNewChat("You are a librarian, expert about books");

Console.WriteLine("Chat content:");
Console.WriteLine("------------------------");

chatHistory.AddUserMessage("Hi, I'm looking for book suggestions");
await MessageOutputAsync(chatHistory);

// First bot assistant message
string reply = await chatGPT.GenerateMessageAsync(chatHistory);
chatHistory.AddAssistantMessage(reply);
await MessageOutputAsync(chatHistory);

// Second user message
chatHistory.AddUserMessage("I love history and philosophy, I'd like to learn something new about Greece, any suggestion");
await MessageOutputAsync(chatHistory);

// Second bot assistant message
reply = await chatGPT.GenerateMessageAsync(chatHistory);
chatHistory.AddAssistantMessage(reply);
await MessageOutputAsync(chatHistory);
}

/// <summary>
/// Outputs the last message of the chat history
/// </summary>
private static Task MessageOutputAsync(Microsoft.SemanticKernel.AI.ChatCompletion.ChatHistory chatHistory)
{
var message = chatHistory.Messages.Last();

Console.WriteLine($"{message.Role}: {message.Content}");
Console.WriteLine("------------------------");

return Task.CompletedTask;
}
}
}

+ 55
- 0
LLama.Examples/NewVersion/SemanticKernelPrompt.cs View File

@@ -0,0 +1,55 @@
using System.Reflection.Metadata;
using System.Security.Cryptography;
using System.Text;
using LLama.Abstractions;
using LLama.Common;
using Microsoft.SemanticKernel;
using Microsoft.SemanticKernel.AI.ChatCompletion;
using Microsoft.SemanticKernel.AI.TextCompletion;
using LLamaSharp.SemanticKernel.TextCompletion;

namespace LLama.Examples.NewVersion
{
public class SemanticKernelPrompt
{
public static async Task Run()
{
Console.WriteLine("Example from: https://github.com/microsoft/semantic-kernel/blob/main/dotnet/samples/KernelSyntaxExamples/Example17_ChatGPT.cs");
Console.Write("Please input your model path: ");
var modelPath = Console.ReadLine();

// Load weights into memory
var parameters = new ModelParams(modelPath)
{
Seed = RandomNumberGenerator.GetInt32(int.MaxValue),
};
using var model = LLamaWeights.LoadFromFile(parameters);
var ex = new StatelessExecutor(model, parameters);

var builder = new KernelBuilder();
builder.WithAIService<ITextCompletion>("local-llama", new LLamaSharpTextCompletion(ex), true);

var kernel = builder.Build();

var prompt = @"{{$input}}

One line TLDR with the fewest words.";

var summarize = kernel.CreateSemanticFunction(prompt, maxTokens: 100);

string text1 = @"
1st Law of Thermodynamics - Energy cannot be created or destroyed.
2nd Law of Thermodynamics - For a spontaneous process, the entropy of the universe increases.
3rd Law of Thermodynamics - A perfect crystal at zero Kelvin has zero entropy.";

string text2 = @"
1. An object at rest remains at rest, and an object in motion remains in motion at constant speed and in a straight line unless acted on by an unbalanced force.
2. The acceleration of an object depends on the mass of the object and the amount of force applied.
3. Whenever one object exerts a force on another object, the second object exerts an equal and opposite on the first.";

Console.WriteLine(await summarize.InvokeAsync(text1));

Console.WriteLine(await summarize.InvokeAsync(text2));
}
}
}

+ 11
- 1
LLama.Examples/NewVersion/TestRunner.cs View File

@@ -8,7 +8,7 @@


Console.WriteLine("Please input a number to choose an example to run:"); Console.WriteLine("Please input a number to choose an example to run:");
Console.WriteLine("0: Run a chat session without stripping the role names."); Console.WriteLine("0: Run a chat session without stripping the role names.");
Console.WriteLine("1: Run a chat session with the role names strippped.");
Console.WriteLine("1: Run a chat session with the role names stripped.");
Console.WriteLine("2: Interactive mode chat by using executor."); Console.WriteLine("2: Interactive mode chat by using executor.");
Console.WriteLine("3: Instruct mode chat by using executor."); Console.WriteLine("3: Instruct mode chat by using executor.");
Console.WriteLine("4: Stateless mode chat by using executor."); Console.WriteLine("4: Stateless mode chat by using executor.");
@@ -18,6 +18,8 @@
Console.WriteLine("8: Quantize the model."); Console.WriteLine("8: Quantize the model.");
Console.WriteLine("9: Automatic conversation."); Console.WriteLine("9: Automatic conversation.");
Console.WriteLine("10: Constrain response to json format using grammar."); Console.WriteLine("10: Constrain response to json format using grammar.");
Console.WriteLine("11: Semantic Kernel Prompt.");
Console.WriteLine("12: Semantic Kernel Chat.");


while (true) while (true)
{ {
@@ -68,6 +70,14 @@
{ {
GrammarJsonResponse.Run(); GrammarJsonResponse.Run();
} }
else if (choice == 11)
{
await SemanticKernelPrompt.Run();
}
else if (choice == 12)
{
await SemanticKernelChat.Run();
}
else else
{ {
Console.WriteLine("Cannot parse your choice. Please select again."); Console.WriteLine("Cannot parse your choice. Please select again.");


+ 17
- 0
LLama.SemanticKernel/ChatCompletion/HistoryTransform.cs View File

@@ -0,0 +1,17 @@
using static LLama.LLamaTransforms;

namespace LLamaSharp.SemanticKernel.ChatCompletion;

/// <summary>
/// Default HistoryTransform Patch
/// </summary>
public class HistoryTransform : DefaultHistoryTransform
{
/// <inheritdoc/>
public override string HistoryToText(global::LLama.Common.ChatHistory history)
{
var prompt = base.HistoryToText(history);
return prompt + "\nAssistant:";

}
}

+ 74
- 0
LLama.SemanticKernel/ChatCompletion/LLamaSharpChatCompletion.cs View File

@@ -0,0 +1,74 @@
using LLama;
using Microsoft.SemanticKernel.AI.ChatCompletion;
using System;
using System.Collections.Generic;
using System.IO;
using System.Linq;
using System.Runtime.CompilerServices;
using System.Text;
using System.Threading;
using System.Threading.Tasks;

namespace LLamaSharp.SemanticKernel.ChatCompletion;

/// <summary>
/// LLamaSharp ChatCompletion
/// </summary>
public sealed class LLamaSharpChatCompletion : IChatCompletion
{
private const string UserRole = "user:";
private const string AssistantRole = "assistant:";
private ChatSession session;

public LLamaSharpChatCompletion(InteractiveExecutor model)
{
this.session = new ChatSession(model)
.WithHistoryTransform(new HistoryTransform())
.WithOutputTransform(new LLamaTransforms.KeywordTextOutputStreamTransform(new string[] { UserRole, AssistantRole }));
}

/// <inheritdoc/>
public ChatHistory CreateNewChat(string? instructions = "")
{
var history = new ChatHistory();

if (instructions != null && !string.IsNullOrEmpty(instructions))
{
history.AddSystemMessage(instructions);
}

return history;
}

/// <inheritdoc/>
public async Task<IReadOnlyList<IChatResult>> GetChatCompletionsAsync(ChatHistory chat, ChatRequestSettings? requestSettings = null, CancellationToken cancellationToken = default)
{
requestSettings ??= new ChatRequestSettings()
{
MaxTokens = 256,
Temperature = 0,
TopP = 0,
StopSequences = new List<string> { }
};

var result = this.session.ChatAsync(chat.ToLLamaSharpChatHistory(), requestSettings.ToLLamaSharpInferenceParams(), cancellationToken);

return new List<IChatResult> { new LLamaSharpChatResult(result) }.AsReadOnly();
}

/// <inheritdoc/>
public async IAsyncEnumerable<IChatStreamingResult> GetStreamingChatCompletionsAsync(ChatHistory chat, ChatRequestSettings? requestSettings = null, [EnumeratorCancellation] CancellationToken cancellationToken = default)
{
requestSettings ??= new ChatRequestSettings()
{
MaxTokens = 256,
Temperature = 0,
TopP = 0,
StopSequences = new List<string> { }
};

var result = this.session.ChatAsync(chat.ToLLamaSharpChatHistory(), requestSettings.ToLLamaSharpInferenceParams(), cancellationToken);

yield return new LLamaSharpChatResult(result);
}
}

+ 14
- 0
LLama.SemanticKernel/ChatCompletion/LLamaSharpChatMessage.cs View File

@@ -0,0 +1,14 @@
using Microsoft.SemanticKernel.AI.ChatCompletion;

namespace LLamaSharp.SemanticKernel.ChatCompletion;

/// <summary>
/// LLamaSharp Chat Message
/// </summary>
public class LLamaSharpChatMessage : ChatMessageBase
{
/// <inheritdoc/>
public LLamaSharpChatMessage(AuthorRole role, string content) : base(role, content)
{
}
}

+ 38
- 0
LLama.SemanticKernel/ChatCompletion/LLamaSharpChatResult.cs View File

@@ -0,0 +1,38 @@
using Microsoft.SemanticKernel.AI.ChatCompletion;
using System.Runtime.CompilerServices;
using System.Text;

namespace LLamaSharp.SemanticKernel.ChatCompletion;

internal sealed class LLamaSharpChatResult : IChatStreamingResult
{
private readonly IAsyncEnumerable<string> _stream;

/// <summary>
///
/// </summary>
/// <param name="stream"></param>
public LLamaSharpChatResult(IAsyncEnumerable<string> stream)
{
_stream = stream;
}
/// <inheritdoc/>
public async Task<ChatMessageBase> GetChatMessageAsync(CancellationToken cancellationToken = default)
{
var sb = new StringBuilder();
await foreach (var token in _stream)
{
sb.Append(token);
}
return await Task.FromResult(new LLamaSharpChatMessage(AuthorRole.Assistant, sb.ToString())).ConfigureAwait(false);
}

/// <inheritdoc/>
public async IAsyncEnumerable<ChatMessageBase> GetStreamingChatMessageAsync([EnumeratorCancellation] CancellationToken cancellationToken = default)
{
await foreach (var token in _stream)
{
yield return new LLamaSharpChatMessage(AuthorRole.Assistant, token);
}
}
}

+ 72
- 0
LLama.SemanticKernel/ExtensionMethods.cs View File

@@ -0,0 +1,72 @@
using Microsoft.SemanticKernel.AI.ChatCompletion;
using Microsoft.SemanticKernel.AI.TextCompletion;

namespace LLamaSharp.SemanticKernel;

internal static class ExtensionMethods
{
internal static global::LLama.Common.ChatHistory ToLLamaSharpChatHistory(this ChatHistory chatHistory)
{
if (chatHistory is null)
{
throw new ArgumentNullException(nameof(chatHistory));
}

var history = new global::LLama.Common.ChatHistory();

foreach (var chat in chatHistory)
{
var role = Enum.TryParse<global::LLama.Common.AuthorRole>(chat.Role.Label, out var _role) ? _role : global::LLama.Common.AuthorRole.Unknown;
history.AddMessage(role, chat.Content);
}

return history;
}

/// <summary>
/// Convert ChatRequestSettings to LLamaSharp InferenceParams
/// </summary>
/// <param name="requestSettings"></param>
/// <returns></returns>
internal static global::LLama.Common.InferenceParams ToLLamaSharpInferenceParams(this ChatRequestSettings requestSettings)
{
if (requestSettings is null)
{
throw new ArgumentNullException(nameof(requestSettings));
}

var antiPrompts = new List<string>(requestSettings.StopSequences) { AuthorRole.User.ToString() + ":" };
return new global::LLama.Common.InferenceParams
{
Temperature = (float)requestSettings.Temperature,
TopP = (float)requestSettings.TopP,
PresencePenalty = (float)requestSettings.PresencePenalty,
FrequencyPenalty = (float)requestSettings.FrequencyPenalty,
AntiPrompts = antiPrompts,
MaxTokens = requestSettings.MaxTokens ?? -1
};
}

/// <summary>
/// Convert CompleteRequestSettings to LLamaSharp InferenceParams
/// </summary>
/// <param name="requestSettings"></param>
/// <returns></returns>
internal static global::LLama.Common.InferenceParams ToLLamaSharpInferenceParams(this CompleteRequestSettings requestSettings)
{
if (requestSettings is null)
{
throw new ArgumentNullException(nameof(requestSettings));
}

return new global::LLama.Common.InferenceParams
{
Temperature = (float)requestSettings.Temperature,
TopP = (float)requestSettings.TopP,
PresencePenalty = (float)requestSettings.PresencePenalty,
FrequencyPenalty = (float)requestSettings.FrequencyPenalty,
AntiPrompts = requestSettings.StopSequences,
MaxTokens = requestSettings.MaxTokens ?? -1
};
}
}

+ 22
- 0
LLama.SemanticKernel/LLamaSharp.SemanticKernel.csproj View File

@@ -0,0 +1,22 @@
<Project Sdk="Microsoft.NET.Sdk">

<PropertyGroup>
<TargetFrameworks>netstandard2.0;net6.0;net7.0</TargetFrameworks>
<RootNamespace>LLamaSharp.SemanticKernel</RootNamespace>
<Nullable>enable</Nullable>
<LangVersion>10</LangVersion>
<Platforms>AnyCPU;x64;Arm64</Platforms>
<AllowUnsafeBlocks>True</AllowUnsafeBlocks>
<ImplicitUsings>enable</ImplicitUsings>
<Nullable>enable</Nullable>
</PropertyGroup>

<ItemGroup>
<PackageReference Include="Microsoft.SemanticKernel.Abstractions" Version="0.21.230828.2-preview" />
</ItemGroup>

<ItemGroup>
<ProjectReference Include="..\LLama\LLamaSharp.csproj" />
</ItemGroup>

</Project>

+ 26
- 0
LLama.SemanticKernel/README.md View File

@@ -0,0 +1,26 @@
# LLamaSharp.SemanticKernel

LLamaSharp.SemanticKernel are connections for [SemanticKernel](https://github.com/microsoft/semantic-kernel): an SDK for intergrating various LLM interfaces into a single implementation. With this, you can add local LLaMa queries as another connection point with your existing connections.

For reference on how to implement it, view the following examples:

- [SemanticKernelChat](../LLama.Examples/NewVersion/SemanticKernelChat.cs)
- [SemanticKernelPrompt](../LLama.Examples/NewVersion/SemanticKernelPrompt.cs)

## ITextCompletion
```csharp
using var model = LLamaWeights.LoadFromFile(parameters);
// LLamaSharpTextCompletion can accept ILLamaExecutor.
var ex = new StatelessExecutor(model, parameters);
var builder = new KernelBuilder();
builder.WithAIService<ITextCompletion>("local-llama", new LLamaSharpTextCompletion(ex), true);
```

## IChatCompletion
```csharp
using var model = LLamaWeights.LoadFromFile(parameters);
using var context = model.CreateContext(parameters);
// LLamaSharpChatCompletion requires InteractiveExecutor, as it's the best fit for the given command.
var ex = new InteractiveExecutor(context);
var chatGPT = new LLamaSharpChatCompletion(ex);
```

+ 27
- 0
LLama.SemanticKernel/TextCompletion/LLamaSharpTextCompletion.cs View File

@@ -0,0 +1,27 @@
using LLama;
using LLama.Abstractions;
using Microsoft.SemanticKernel.AI.TextCompletion;

namespace LLamaSharp.SemanticKernel.TextCompletion;

public sealed class LLamaSharpTextCompletion : ITextCompletion
{
public ILLamaExecutor executor;

public LLamaSharpTextCompletion(ILLamaExecutor executor)
{
this.executor = executor;
}

public async Task<IReadOnlyList<ITextResult>> GetCompletionsAsync(string text, CompleteRequestSettings requestSettings, CancellationToken cancellationToken = default)
{
var result = executor.InferAsync(text, requestSettings.ToLLamaSharpInferenceParams(), cancellationToken);
return await Task.FromResult(new List<ITextResult> { new LLamaTextResult(result) }.AsReadOnly()).ConfigureAwait(false);
}

public async IAsyncEnumerable<ITextStreamingResult> GetStreamingCompletionsAsync(string text, CompleteRequestSettings requestSettings, CancellationToken cancellationToken = default)
{
var result = executor.InferAsync(text, requestSettings.ToLLamaSharpInferenceParams(), cancellationToken);
yield return new LLamaTextResult(result);
}
}

+ 37
- 0
LLama.SemanticKernel/TextCompletion/LLamaTextResult.cs View File

@@ -0,0 +1,37 @@
using Microsoft.SemanticKernel.AI.TextCompletion;
using Microsoft.SemanticKernel.Orchestration;
using System.Runtime.CompilerServices;
using System.Text;

namespace LLamaSharp.SemanticKernel.TextCompletion;

internal sealed class LLamaTextResult : ITextStreamingResult
{
private readonly IAsyncEnumerable<string> _text;

public LLamaTextResult(IAsyncEnumerable<string> text)
{
_text = text;
ModelResult = new(text);
}

public ModelResult ModelResult { get; }

public async Task<string> GetCompletionAsync(CancellationToken cancellationToken = default)
{
var sb = new StringBuilder();
await foreach (var token in _text)
{
sb.Append(token);
}
return await Task.FromResult(sb.ToString()).ConfigureAwait(false);
}

public async IAsyncEnumerable<string> GetCompletionStreamingAsync([EnumeratorCancellation] CancellationToken cancellationToken = default)
{
await foreach (string word in _text)
{
yield return word;
}
}
}

+ 5
- 0
LLama/LLamaEmbedder.cs View File

@@ -29,6 +29,11 @@ namespace LLama
_ctx = weights.CreateContext(@params); _ctx = weights.CreateContext(@params);
} }


public LLamaEmbedder(LLamaWeights weights, IModelParams @params)
{
_ctx = weights.CreateContext(@params);
}

/// <summary> /// <summary>
/// Get the embeddings of the text. /// Get the embeddings of the text.
/// </summary> /// </summary>


+ 15
- 1
LLamaSharp.sln View File

@@ -11,7 +11,9 @@ Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "LLamaSharp", "LLama\LLamaSh
EndProject EndProject
Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "LLama.WebAPI", "LLama.WebAPI\LLama.WebAPI.csproj", "{D3CEC57A-9027-4DA4-AAAC-612A1EB50ADF}" Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "LLama.WebAPI", "LLama.WebAPI\LLama.WebAPI.csproj", "{D3CEC57A-9027-4DA4-AAAC-612A1EB50ADF}"
EndProject EndProject
Project("{FAE04EC0-301F-11D3-BF4B-00C04F79EFBC}") = "LLama.Web", "LLama.Web\LLama.Web.csproj", "{C3531DB2-1B2B-433C-8DE6-3541E3620DB1}"
Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "LLama.Web", "LLama.Web\LLama.Web.csproj", "{C3531DB2-1B2B-433C-8DE6-3541E3620DB1}"
EndProject
Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "LLamaSharp.SemanticKernel", "LLama.SemanticKernel\LLamaSharp.SemanticKernel.csproj", "{D98F93E3-B344-4F9D-86BB-FDBF6768B587}"
EndProject EndProject
Global Global
GlobalSection(SolutionConfigurationPlatforms) = preSolution GlobalSection(SolutionConfigurationPlatforms) = preSolution
@@ -83,6 +85,18 @@ Global
{C3531DB2-1B2B-433C-8DE6-3541E3620DB1}.Release|Any CPU.Build.0 = Release|Any CPU {C3531DB2-1B2B-433C-8DE6-3541E3620DB1}.Release|Any CPU.Build.0 = Release|Any CPU
{C3531DB2-1B2B-433C-8DE6-3541E3620DB1}.Release|x64.ActiveCfg = Release|Any CPU {C3531DB2-1B2B-433C-8DE6-3541E3620DB1}.Release|x64.ActiveCfg = Release|Any CPU
{C3531DB2-1B2B-433C-8DE6-3541E3620DB1}.Release|x64.Build.0 = Release|Any CPU {C3531DB2-1B2B-433C-8DE6-3541E3620DB1}.Release|x64.Build.0 = Release|Any CPU
{D98F93E3-B344-4F9D-86BB-FDBF6768B587}.Debug|Any CPU.ActiveCfg = Debug|Any CPU
{D98F93E3-B344-4F9D-86BB-FDBF6768B587}.Debug|Any CPU.Build.0 = Debug|Any CPU
{D98F93E3-B344-4F9D-86BB-FDBF6768B587}.Debug|x64.ActiveCfg = Debug|Any CPU
{D98F93E3-B344-4F9D-86BB-FDBF6768B587}.Debug|x64.Build.0 = Debug|Any CPU
{D98F93E3-B344-4F9D-86BB-FDBF6768B587}.GPU|Any CPU.ActiveCfg = Debug|Any CPU
{D98F93E3-B344-4F9D-86BB-FDBF6768B587}.GPU|Any CPU.Build.0 = Debug|Any CPU
{D98F93E3-B344-4F9D-86BB-FDBF6768B587}.GPU|x64.ActiveCfg = Debug|Any CPU
{D98F93E3-B344-4F9D-86BB-FDBF6768B587}.GPU|x64.Build.0 = Debug|Any CPU
{D98F93E3-B344-4F9D-86BB-FDBF6768B587}.Release|Any CPU.ActiveCfg = Release|Any CPU
{D98F93E3-B344-4F9D-86BB-FDBF6768B587}.Release|Any CPU.Build.0 = Release|Any CPU
{D98F93E3-B344-4F9D-86BB-FDBF6768B587}.Release|x64.ActiveCfg = Release|Any CPU
{D98F93E3-B344-4F9D-86BB-FDBF6768B587}.Release|x64.Build.0 = Release|Any CPU
EndGlobalSection EndGlobalSection
GlobalSection(SolutionProperties) = preSolution GlobalSection(SolutionProperties) = preSolution
HideSolutionNode = FALSE HideSolutionNode = FALSE


Loading…
Cancel
Save