- Updated LLamaSharpChatCompletion class in LLama.SemanticKernel/ChatCompletion/LLamaSharpChatCompletion.cs
- Changed the type of the "_model" field from "StatelessExecutor" to "ILLamaExecutor"
- Updated the constructor to accept an "ILLamaExecutor" parameter instead of a "StatelessExecutor" parameter
- Updated LLamaSharpChatCompletion class in LLama.SemanticKernel/LLamaSharp.SemanticKernel.csproj
- Updated LLama.Unittest project in LLama.Unittest/LLama.Unittest.csproj
- Added a "PackageReference" for "Moq" version 4.20.70
- Added ExtensionMethodsTests class in LLama.Unittest/SemanticKernel/ExtensionMethodsTests.cs
- Added tests for the "ToLLamaSharpChatHistory" and "ToLLamaSharpInferenceParams" extension methods
- Added LLamaSharpChatCompletionTests class in LLama.Unittest/SemanticKernel/LLamaSharpChatCompletionTests.cs
- Added tests for the LLamaSharpChatCompletion class
ℹ️ The LLamaSharpChatCompletion class in the LLama.SemanticKernel project has been updated to use the ILLamaExecutor interface instead of the StatelessExecutor class. This change allows for better abstraction and flexibility in the implementation of the LLamaSharpChatCompletion class. The LLamaSharpChatCompletion class is responsible for providing chat completion functionality in the LLamaSharp project. The LLama.Unittest project has also been updated to include tests for the LLamaSharpChatCompletion class and the extension methods used by the class.
tags/0.11.0
| @@ -16,7 +16,7 @@ namespace LLamaSharp.SemanticKernel.ChatCompletion; | |||
| /// </summary> | |||
| public sealed class LLamaSharpChatCompletion : IChatCompletionService | |||
| { | |||
| private readonly StatelessExecutor _model; | |||
| private readonly ILLamaExecutor _model; | |||
| private ChatRequestSettings defaultRequestSettings; | |||
| private readonly IHistoryTransform historyTransform; | |||
| private readonly ITextStreamTransform outputTransform; | |||
| @@ -36,7 +36,7 @@ public sealed class LLamaSharpChatCompletion : IChatCompletionService | |||
| }; | |||
| } | |||
| public LLamaSharpChatCompletion(StatelessExecutor model, | |||
| public LLamaSharpChatCompletion(ILLamaExecutor model, | |||
| ChatRequestSettings? defaultRequestSettings = default, | |||
| IHistoryTransform? historyTransform = null, | |||
| ITextStreamTransform? outputTransform = null) | |||
| @@ -10,39 +10,42 @@ | |||
| <ImplicitUsings>enable</ImplicitUsings> | |||
| <Nullable>enable</Nullable> | |||
| <Version>0.8.0</Version> | |||
| <Authors>Tim Miller, Xbotter</Authors> | |||
| <Company>SciSharp STACK</Company> | |||
| <GeneratePackageOnBuild>true</GeneratePackageOnBuild> | |||
| <Copyright>MIT, SciSharp STACK $([System.DateTime]::UtcNow.ToString(yyyy))</Copyright> | |||
| <RepositoryUrl>https://github.com/SciSharp/LLamaSharp</RepositoryUrl> | |||
| <RepositoryType>git</RepositoryType> | |||
| <PackageIconUrl>https://avatars3.githubusercontent.com/u/44989469?s=200&v=4</PackageIconUrl> | |||
| <PackageTags>LLama, LLM, GPT, ChatGPT, semantic-kernel, SciSharp</PackageTags> | |||
| <Description> | |||
| The integration of LLamaSharp and Microsoft semantic-kernel. | |||
| </Description> | |||
| <PackageReleaseNotes> | |||
| Support integration with semantic-kernel | |||
| </PackageReleaseNotes> | |||
| <PackageLicenseExpression>MIT</PackageLicenseExpression> | |||
| <PackageOutputPath>packages</PackageOutputPath> | |||
| <Platforms>AnyCPU;x64;Arm64</Platforms> | |||
| <PackageId>LLamaSharp.semantic-kernel</PackageId> | |||
| <Configurations>Debug;Release;GPU</Configurations> | |||
| <NoWarn>SKEXP0001,SKEXP0052</NoWarn> | |||
| <Version>0.8.0</Version> | |||
| <Authors>Tim Miller, Xbotter</Authors> | |||
| <Company>SciSharp STACK</Company> | |||
| <GeneratePackageOnBuild>true</GeneratePackageOnBuild> | |||
| <Copyright>MIT, SciSharp STACK $([System.DateTime]::UtcNow.ToString(yyyy))</Copyright> | |||
| <RepositoryUrl>https://github.com/SciSharp/LLamaSharp</RepositoryUrl> | |||
| <RepositoryType>git</RepositoryType> | |||
| <PackageIconUrl>https://avatars3.githubusercontent.com/u/44989469?s=200&v=4</PackageIconUrl> | |||
| <PackageTags>LLama, LLM, GPT, ChatGPT, semantic-kernel, SciSharp</PackageTags> | |||
| <Description> | |||
| The integration of LLamaSharp and Microsoft semantic-kernel. | |||
| </Description> | |||
| <PackageReleaseNotes> | |||
| Support integration with semantic-kernel | |||
| </PackageReleaseNotes> | |||
| <PackageLicenseExpression>MIT</PackageLicenseExpression> | |||
| <PackageOutputPath>packages</PackageOutputPath> | |||
| <Platforms>AnyCPU;x64;Arm64</Platforms> | |||
| <PackageId>LLamaSharp.semantic-kernel</PackageId> | |||
| <Configurations>Debug;Release;GPU</Configurations> | |||
| <NoWarn>SKEXP0001,SKEXP0052</NoWarn> | |||
| </PropertyGroup> | |||
| <ItemGroup> | |||
| <PackageReference Include="Microsoft.SemanticKernel.Abstractions" Version="1.6.2" /> | |||
| <PackageReference Include="Microsoft.SemanticKernel.Abstractions" Version="1.6.2" /> | |||
| </ItemGroup> | |||
| <ItemGroup Condition="'$(TargetFramework)' == 'netstandard2.0'"> | |||
| <PackageReference Include="System.Memory" Version="4.5.5" PrivateAssets="all" /> | |||
| </ItemGroup> | |||
| <ItemGroup Condition="'$(TargetFramework)' == 'netstandard2.0'"> | |||
| <PackageReference Include="System.Memory" Version="4.5.5" PrivateAssets="all" /> | |||
| </ItemGroup> | |||
| <ItemGroup> | |||
| <ProjectReference Include="..\LLama\LLamaSharp.csproj" /> | |||
| </ItemGroup> | |||
| <ItemGroup> | |||
| <InternalsVisibleTo Include="Llama.Unittest"/> | |||
| </ItemGroup> | |||
| </Project> | |||
| @@ -14,6 +14,7 @@ | |||
| <ItemGroup> | |||
| <PackageReference Include="Microsoft.NET.Test.Sdk" Version="17.9.0" /> | |||
| <PackageReference Include="Moq" Version="4.20.70" /> | |||
| <PackageReference Include="System.Linq.Async" Version="6.0.1" /> | |||
| <PackageReference Include="xunit" Version="2.7.0" /> | |||
| <PackageReference Include="xunit.runner.visualstudio" Version="2.5.7"> | |||
| @@ -0,0 +1,50 @@ | |||
| using Xunit; | |||
| using LLama; | |||
| using LLama.Abstractions; | |||
| using Microsoft.SemanticKernel; | |||
| using Microsoft.SemanticKernel.Services; | |||
| using System; | |||
| using System.IO; | |||
| using System.Runtime.CompilerServices; | |||
| using System.Text; | |||
| using static LLama.LLamaTransforms; | |||
| using System.Threading.Tasks; | |||
| using System.Collections.Generic; | |||
| using System.Threading; | |||
| using LLamaSharp.SemanticKernel.ChatCompletion; | |||
| namespace LLamaSharp.SemanticKernel.Tests | |||
| { | |||
| public class ExtensionMethodsTests | |||
| { | |||
| [Fact] | |||
| public void ToLLamaSharpChatHistory_StateUnderTest_ExpectedBehavior() | |||
| { | |||
| // Arrange | |||
| var chatHistory = new Microsoft.SemanticKernel.ChatCompletion.ChatHistory(); | |||
| bool ignoreCase = true; | |||
| // Act | |||
| var result = ExtensionMethods.ToLLamaSharpChatHistory( | |||
| chatHistory, | |||
| ignoreCase); | |||
| // Assert | |||
| Assert.NotNull(result); | |||
| } | |||
| [Fact] | |||
| public void ToLLamaSharpInferenceParams_StateUnderTest_ExpectedBehavior() | |||
| { | |||
| // Arrange | |||
| var requestSettings = new ChatRequestSettings(); | |||
| // Act | |||
| var result = ExtensionMethods.ToLLamaSharpInferenceParams( | |||
| requestSettings); | |||
| // Assert | |||
| Assert.NotNull(result); | |||
| } | |||
| } | |||
| } | |||
| @@ -0,0 +1,85 @@ | |||
| using Xunit; | |||
| using Moq; | |||
| using LLama; | |||
| using LLama.Abstractions; | |||
| using Microsoft.SemanticKernel; | |||
| using Microsoft.SemanticKernel.ChatCompletion; | |||
| using Microsoft.SemanticKernel.Services; | |||
| using System; | |||
| using System.IO; | |||
| using System.Runtime.CompilerServices; | |||
| using System.Text; | |||
| using static LLama.LLamaTransforms; | |||
| using System.Threading.Tasks; | |||
| using System.Collections.Generic; | |||
| using System.Threading; | |||
| namespace LLamaSharp.SemanticKernel.ChatCompletion.Tests | |||
| { | |||
| public class LLamaSharpChatCompletionTests | |||
| { | |||
| private Mock<ILLamaExecutor> mockStatelessExecutor; | |||
| public LLamaSharpChatCompletionTests() | |||
| { | |||
| this.mockStatelessExecutor = new Mock<ILLamaExecutor>(); | |||
| } | |||
| private LLamaSharpChatCompletion CreateLLamaSharpChatCompletion() | |||
| { | |||
| return new LLamaSharpChatCompletion( | |||
| this.mockStatelessExecutor.Object, | |||
| null, | |||
| null, | |||
| null); | |||
| } | |||
| [Fact] | |||
| public async Task GetChatMessageContentsAsync_StateUnderTest_ExpectedBehavior() | |||
| { | |||
| // Arrange | |||
| var unitUnderTest = this.CreateLLamaSharpChatCompletion(); | |||
| ChatHistory chatHistory = new ChatHistory(); | |||
| PromptExecutionSettings? executionSettings = null; | |||
| Kernel? kernel = null; | |||
| CancellationToken cancellationToken = default; | |||
| mockStatelessExecutor.Setup(e => e.InferAsync(It.IsAny<string>(), It.IsAny<IInferenceParams>(), It.IsAny<CancellationToken>())) | |||
| .Returns(new List<string> { "test" }.ToAsyncEnumerable()); | |||
| // Act | |||
| var result = await unitUnderTest.GetChatMessageContentsAsync( | |||
| chatHistory, | |||
| executionSettings, | |||
| kernel, | |||
| cancellationToken); | |||
| // Assert | |||
| Assert.True(result.Count > 0); | |||
| } | |||
| [Fact] | |||
| public async Task GetStreamingChatMessageContentsAsync_StateUnderTest_ExpectedBehavior() | |||
| { | |||
| // Arrange | |||
| var unitUnderTest = this.CreateLLamaSharpChatCompletion(); | |||
| ChatHistory chatHistory = new ChatHistory(); | |||
| PromptExecutionSettings? executionSettings = null; | |||
| Kernel? kernel = null; | |||
| CancellationToken cancellationToken = default; | |||
| mockStatelessExecutor.Setup(e => e.InferAsync(It.IsAny<string>(), It.IsAny<IInferenceParams>(), It.IsAny<CancellationToken>())) | |||
| .Returns(new List<string> { "test" }.ToAsyncEnumerable()); | |||
| // Act | |||
| await foreach (var result in unitUnderTest.GetStreamingChatMessageContentsAsync( | |||
| chatHistory, | |||
| executionSettings, | |||
| kernel, | |||
| cancellationToken)) | |||
| { | |||
| // Assert | |||
| Assert.NotNull(result); | |||
| } | |||
| } | |||
| } | |||
| } | |||
| @@ -0,0 +1,87 @@ | |||
| using Xunit; | |||
| using Moq; | |||
| using LLama; | |||
| using LLama.Abstractions; | |||
| using Microsoft.SemanticKernel; | |||
| using Microsoft.SemanticKernel.Services; | |||
| using System; | |||
| using System.IO; | |||
| using System.Runtime.CompilerServices; | |||
| using System.Text; | |||
| using static LLama.LLamaTransforms; | |||
| using System.Threading.Tasks; | |||
| using System.Collections.Generic; | |||
| using System.Threading; | |||
| namespace LLamaSharp.SemanticKernel.TextCompletion.Tests | |||
| { | |||
| public class LLamaSharpTextCompletionTests : IDisposable | |||
| { | |||
| private MockRepository mockRepository; | |||
| private Mock<ILLamaExecutor> mockExecutor; | |||
| public LLamaSharpTextCompletionTests() | |||
| { | |||
| this.mockRepository = new MockRepository(MockBehavior.Strict); | |||
| this.mockExecutor = this.mockRepository.Create<ILLamaExecutor>(); | |||
| } | |||
| public void Dispose() | |||
| { | |||
| this.mockRepository.VerifyAll(); | |||
| } | |||
| private LLamaSharpTextCompletion CreateLLamaSharpTextCompletion() | |||
| { | |||
| return new LLamaSharpTextCompletion( | |||
| this.mockExecutor.Object); | |||
| } | |||
| [Fact] | |||
| public async Task GetTextContentsAsync_StateUnderTest_ExpectedBehavior() | |||
| { | |||
| // Arrange | |||
| var unitUnderTest = this.CreateLLamaSharpTextCompletion(); | |||
| string prompt = "Test"; | |||
| PromptExecutionSettings? executionSettings = null; | |||
| Kernel? kernel = null; | |||
| CancellationToken cancellationToken = default; | |||
| mockExecutor.Setup(e => e.InferAsync(It.IsAny<string>(), It.IsAny<IInferenceParams>(), It.IsAny<CancellationToken>())) | |||
| .Returns(new List<string> { "test" }.ToAsyncEnumerable()); | |||
| // Act | |||
| var result = await unitUnderTest.GetTextContentsAsync( | |||
| prompt, | |||
| executionSettings, | |||
| kernel, | |||
| cancellationToken); | |||
| // Assert | |||
| Assert.True(result.Count > 0); | |||
| } | |||
| [Fact] | |||
| public async Task GetStreamingTextContentsAsync_StateUnderTest_ExpectedBehavior() | |||
| { | |||
| // Arrange | |||
| var unitUnderTest = this.CreateLLamaSharpTextCompletion(); | |||
| string prompt = "Test"; | |||
| PromptExecutionSettings? executionSettings = null; | |||
| Kernel? kernel = null; | |||
| CancellationToken cancellationToken = default; | |||
| mockExecutor.Setup(e => e.InferAsync(It.IsAny<string>(), It.IsAny<IInferenceParams>(), It.IsAny<CancellationToken>())) | |||
| .Returns(new List<string> { "test" }.ToAsyncEnumerable()); | |||
| // Act | |||
| await foreach (var result in unitUnderTest.GetStreamingTextContentsAsync( | |||
| prompt, | |||
| executionSettings, | |||
| kernel, | |||
| cancellationToken)) | |||
| { | |||
| // Assert | |||
| Assert.NotNull(result); | |||
| } | |||
| } | |||
| } | |||
| } | |||