Browse Source

bump sk-1.0.0-rc4

tags/0.9.1
xbotter 1 year ago
parent
commit
213b4be723
No known key found for this signature in database GPG Key ID: A3F32F44E9F160E1
11 changed files with 30 additions and 37 deletions
  1. +5
    -4
      LLama.Examples/Examples/SemanticKernelChat.cs
  2. +4
    -4
      LLama.Examples/Examples/SemanticKernelPrompt.cs
  3. +1
    -1
      LLama.Examples/LLama.Examples.csproj
  4. +1
    -1
      LLama.SemanticKernel/ChatCompletion/ChatRequestSettings.cs
  5. +4
    -5
      LLama.SemanticKernel/ChatCompletion/ChatRequestSettingsConverter.cs
  6. +3
    -6
      LLama.SemanticKernel/ChatCompletion/LLamaSharpChatCompletion.cs
  7. +1
    -2
      LLama.SemanticKernel/ExtensionMethods.cs
  8. +1
    -1
      LLama.SemanticKernel/LLamaSharp.SemanticKernel.csproj
  9. +3
    -6
      LLama.SemanticKernel/TextCompletion/LLamaSharpTextCompletion.cs
  10. +2
    -2
      LLama.SemanticKernel/TextEmbedding/LLamaSharpEmbeddingGeneration.cs
  11. +5
    -5
      LLama.Unittest/SemanticKernel/ChatRequestSettingsTests.cs

+ 5
- 4
LLama.Examples/Examples/SemanticKernelChat.cs View File

@@ -2,6 +2,7 @@
using LLama.Common;
using Microsoft.SemanticKernel.AI.ChatCompletion;
using LLamaSharp.SemanticKernel.ChatCompletion;
using Microsoft.SemanticKernel.ChatCompletion;

namespace LLama.Examples.Examples
{
@@ -29,8 +30,8 @@ namespace LLama.Examples.Examples
await MessageOutputAsync(chatHistory);

// First bot assistant message
string reply = await chatGPT.GetChatMessageContentAsync(chatHistory);
chatHistory.AddAssistantMessage(reply);
var reply = await chatGPT.GetChatMessageContentAsync(chatHistory);
chatHistory.AddAssistantMessage(reply.Content);
await MessageOutputAsync(chatHistory);

// Second user message
@@ -39,14 +40,14 @@ namespace LLama.Examples.Examples

// Second bot assistant message
reply = await chatGPT.GetChatMessageContentAsync(chatHistory);
chatHistory.AddAssistantMessage(reply);
chatHistory.AddAssistantMessage(reply.Content);
await MessageOutputAsync(chatHistory);
}

/// <summary>
/// Outputs the last message of the chat history
/// </summary>
private static Task MessageOutputAsync(Microsoft.SemanticKernel.AI.ChatCompletion.ChatHistory chatHistory)
private static Task MessageOutputAsync(Microsoft.SemanticKernel.ChatCompletion.ChatHistory chatHistory)
{
var message = chatHistory.Last();



+ 4
- 4
LLama.Examples/Examples/SemanticKernelPrompt.cs View File

@@ -3,7 +3,7 @@ using LLama.Common;
using LLamaSharp.SemanticKernel.ChatCompletion;
using Microsoft.SemanticKernel;
using LLamaSharp.SemanticKernel.TextCompletion;
using Microsoft.SemanticKernel.AI.TextGeneration;
using Microsoft.SemanticKernel.TextGeneration;
using Microsoft.Extensions.DependencyInjection;

namespace LLama.Examples.Examples
@@ -21,7 +21,7 @@ namespace LLama.Examples.Examples
using var model = LLamaWeights.LoadFromFile(parameters);
var ex = new StatelessExecutor(model, parameters);

var builder = new KernelBuilder();
var builder = Kernel.CreateBuilder();
builder.Services.AddKeyedSingleton<ITextGenerationService>("local-llama", new LLamaSharpTextCompletion(ex));

var kernel = builder.Build();
@@ -43,9 +43,9 @@ One line TLDR with the fewest words.";
2. The acceleration of an object depends on the mass of the object and the amount of force applied.
3. Whenever one object exerts a force on another object, the second object exerts an equal and opposite on the first.";

Console.WriteLine((await kernel.InvokeAsync(summarize,new KernelArguments(text1))).GetValue<string>());
Console.WriteLine((await kernel.InvokeAsync(summarize, new() { ["input"] = text1 })).GetValue<string>());

Console.WriteLine((await kernel.InvokeAsync(summarize, new KernelArguments(text2))).GetValue<string>());
Console.WriteLine((await kernel.InvokeAsync(summarize, new() { ["input"] = text2 })).GetValue<string>());
}
}
}

+ 1
- 1
LLama.Examples/LLama.Examples.csproj View File

@@ -16,7 +16,7 @@
<ItemGroup>
<PackageReference Include="Microsoft.Extensions.Logging.Console" Version="8.0.0" />
<PackageReference Include="Microsoft.KernelMemory.Core" Version="0.18.231209.1-preview" />
<PackageReference Include="Microsoft.SemanticKernel" Version="1.0.0-rc3" />
<PackageReference Include="Microsoft.SemanticKernel" Version="1.0.0-rc4" />
<PackageReference Include="Spectre.Console" Version="0.48.0" />
</ItemGroup>



+ 1
- 1
LLama.SemanticKernel/ChatCompletion/ChatRequestSettings.cs View File

@@ -1,4 +1,4 @@
using Microsoft.SemanticKernel.AI;
using Microsoft.SemanticKernel;
using System.Text.Json;
using System.Text.Json.Serialization;



+ 4
- 5
LLama.SemanticKernel/ChatCompletion/ChatRequestSettingsConverter.cs View File

@@ -31,6 +31,10 @@ public class ChatRequestSettingsConverter : JsonConverter<ChatRequestSettings>

switch (propertyName)
{
case "MODELID":
case "MODEL_ID":
requestSettings.ModelId = reader.GetString();
break;
case "TEMPERATURE":
requestSettings.Temperature = reader.GetDouble();
break;
@@ -62,10 +66,6 @@ public class ChatRequestSettingsConverter : JsonConverter<ChatRequestSettings>
case "TOKEN_SELECTION_BIASES":
requestSettings.TokenSelectionBiases = JsonSerializer.Deserialize<IDictionary<int, int>>(ref reader, options) ?? new Dictionary<int, int>();
break;
case "SERVICEID":
case "SERVICE_ID":
requestSettings.ServiceId = reader.GetString();
break;
default:
reader.Skip();
break;
@@ -98,7 +98,6 @@ public class ChatRequestSettingsConverter : JsonConverter<ChatRequestSettings>
writer.WriteNumber("results_per_prompt", value.ResultsPerPrompt);
writer.WritePropertyName("token_selection_biases");
JsonSerializer.Serialize(writer, value.TokenSelectionBiases, options);
writer.WriteString("service_id", value.ServiceId);

writer.WriteEndObject();
}

+ 3
- 6
LLama.SemanticKernel/ChatCompletion/LLamaSharpChatCompletion.cs View File

@@ -1,8 +1,7 @@
using LLama;
using LLama.Abstractions;
using Microsoft.SemanticKernel;
using Microsoft.SemanticKernel.AI;
using Microsoft.SemanticKernel.AI.ChatCompletion;
using Microsoft.SemanticKernel.ChatCompletion;
using Microsoft.SemanticKernel.Services;
using System;
using System.IO;
@@ -22,11 +21,9 @@ public sealed class LLamaSharpChatCompletion : IChatCompletionService
private readonly IHistoryTransform historyTransform;
private readonly ITextStreamTransform outputTransform;

private readonly Dictionary<string, string> _attributes = new();
private readonly Dictionary<string, object?> _attributes = new();

public IReadOnlyDictionary<string, string> Attributes => this._attributes;

IReadOnlyDictionary<string, object?> IAIService.Attributes => throw new NotImplementedException();
public IReadOnlyDictionary<string, object?> Attributes => this._attributes;

static ChatRequestSettings GetDefaultSettings()
{


+ 1
- 2
LLama.SemanticKernel/ExtensionMethods.cs View File

@@ -1,6 +1,5 @@
using LLamaSharp.SemanticKernel.ChatCompletion;
using Microsoft.SemanticKernel.AI.ChatCompletion;

using Microsoft.SemanticKernel.ChatCompletion;
namespace LLamaSharp.SemanticKernel;

public static class ExtensionMethods


+ 1
- 1
LLama.SemanticKernel/LLamaSharp.SemanticKernel.csproj View File

@@ -34,7 +34,7 @@
</PropertyGroup>

<ItemGroup>
<PackageReference Include="Microsoft.SemanticKernel.Abstractions" Version="1.0.0-rc3" />
<PackageReference Include="Microsoft.SemanticKernel.Abstractions" Version="1.0.0-rc4" />
</ItemGroup>

<ItemGroup Condition="'$(TargetFramework)' == 'netstandard2.0'">


+ 3
- 6
LLama.SemanticKernel/TextCompletion/LLamaSharpTextCompletion.cs View File

@@ -1,9 +1,8 @@
using LLama.Abstractions;
using LLamaSharp.SemanticKernel.ChatCompletion;
using Microsoft.SemanticKernel;
using Microsoft.SemanticKernel.AI;
using Microsoft.SemanticKernel.AI.TextGeneration;
using Microsoft.SemanticKernel.Services;
using Microsoft.SemanticKernel.TextGeneration;
using System.Runtime.CompilerServices;
using System.Text;

@@ -13,11 +12,9 @@ public sealed class LLamaSharpTextCompletion : ITextGenerationService
{
public ILLamaExecutor executor;

private readonly Dictionary<string, string> _attributes = new();
private readonly Dictionary<string, object?> _attributes = new();

public IReadOnlyDictionary<string, string> Attributes => this._attributes;

IReadOnlyDictionary<string, object?> IAIService.Attributes => throw new NotImplementedException();
public IReadOnlyDictionary<string, object?> Attributes => this._attributes;

public LLamaSharpTextCompletion(ILLamaExecutor executor)
{


+ 2
- 2
LLama.SemanticKernel/TextEmbedding/LLamaSharpEmbeddingGeneration.cs View File

@@ -1,10 +1,10 @@
using LLama;
using Microsoft.SemanticKernel;
using Microsoft.SemanticKernel.AI.Embeddings;
using Microsoft.SemanticKernel.Embeddings;

namespace LLamaSharp.SemanticKernel.TextEmbedding;

public sealed class LLamaSharpEmbeddingGeneration : ITextEmbeddingGeneration
public sealed class LLamaSharpEmbeddingGeneration : ITextEmbeddingGenerationService
{
private LLamaEmbedder _embedder;



+ 5
- 5
LLama.Unittest/SemanticKernel/ChatRequestSettingsTests.cs View File

@@ -1,5 +1,5 @@
using LLamaSharp.SemanticKernel.ChatCompletion;
using Microsoft.SemanticKernel.AI;
using Microsoft.SemanticKernel;

namespace LLama.Unittest.SemanticKernel
{
@@ -77,7 +77,7 @@ namespace LLama.Unittest.SemanticKernel
// Arrange
var originalRequestSettings = new PromptExecutionSettings()
{
ServiceId = "test",
ModelId = "test",
};

// Act
@@ -85,7 +85,7 @@ namespace LLama.Unittest.SemanticKernel

// Assert
Assert.NotNull(requestSettings);
Assert.Equal(originalRequestSettings.ServiceId, requestSettings.ServiceId);
Assert.Equal(originalRequestSettings.ModelId, requestSettings.ModelId);
}

[Fact]
@@ -94,7 +94,7 @@ namespace LLama.Unittest.SemanticKernel
// Arrange
var originalRequestSettings = new PromptExecutionSettings()
{
ServiceId = "test",
ModelId = "test",
ExtensionData = new Dictionary<string, object>
{
{ "frequency_penalty", 0.5 },
@@ -133,7 +133,7 @@ namespace LLama.Unittest.SemanticKernel
// Arrange
var originalRequestSettings = new PromptExecutionSettings()
{
ServiceId = "test",
ModelId = "test",
ExtensionData = new Dictionary<string, object>
{
{ "FrequencyPenalty", 0.5 },


Loading…
Cancel
Save