diff --git a/.github/prepare_release.sh b/.github/prepare_release.sh index fd4427e8..e4409997 100755 --- a/.github/prepare_release.sh +++ b/.github/prepare_release.sh @@ -22,13 +22,19 @@ fi mkdir ./temp; mkdir ./temp/runtimes; -cp ./LLama/runtimes/*.* ./temp/runtimes/; +# For sure it could be done better but cp -R did not work on osx +mkdir ./temp/runtimes/osx-arm64 +mkdir ./temp/runtimes/osx-x64 +cp ./LLama/runtimes/*.* ./temp/runtimes/; +cp ./LLama/runtimes/osx-arm64/*.* ./temp/runtimes/osx-arm64/; +cp ./LLama/runtimes/osx-x64/*.* ./temp/runtimes/osx-x64; cp ./LLama/runtimes/build/*.* ./temp/; # get the current version cd temp; dotnet add package LLamaSharp; version=$(dotnet list temp.csproj package | grep LLamaSharp); +# TODO: This didn´t work on osx...we need a solution read -ra arr <<< "$version" version="${arr[-1]}" echo "The latest version: $version"; diff --git a/.github/workflows/compile.yml b/.github/workflows/compile.yml index 42cb98dc..5dda197c 100644 --- a/.github/workflows/compile.yml +++ b/.github/workflows/compile.yml @@ -6,9 +6,9 @@ on: cublas: type: boolean description: Build CUBLAS binaries - macos: + osx: type: boolean - description: Build MacOS binaries + description: Build OSX binaries push: branches: [cron_job] #schedule: @@ -147,7 +147,7 @@ jobs: include: - build: 'arm64' defines: '-DCMAKE_OSX_ARCHITECTURES=arm64' - - build: 'x86_64' + - build: 'x64' defines: '-DLLAMA_METAL=OFF -DCMAKE_OSX_ARCHITECTURES=x86_64' runs-on: macos-latest steps: @@ -169,7 +169,7 @@ jobs: uses: actions/upload-artifact@v3 with: path: ./build/libllama.dylib - name: llama-bin-macos-${{ matrix.build }}.dylib + name: llama-bin-osx-${{ matrix.build }}.dylib - name: Upload Metal uses: actions/upload-artifact@v3 with: @@ -212,12 +212,12 @@ jobs: - name: Rearrange MacOS files if: ${{ github.event.inputs.macos }} run: | - mkdir deps/macos-arm64 - mkdir deps/macos-x86_64 + mkdir deps/osx-arm64 + mkdir deps/osx-x64 - cp artifacts/llama-bin-macos-arm64.dylib/libllama.dylib deps/macos-arm64/libllama.dylib - cp artifacts/ggml-metal.metal/ggml-metal.metal deps/macos-arm64/ggml-metal.metal - cp artifacts/llama-bin-macos-x86_64.dylib/libllama.dylib deps/macos-x86_64/libllama.dylib + cp artifacts/llama-bin-osx-arm64.dylib/libllama.dylib deps/osx-arm64/libllama.dylib + cp artifacts/ggml-metal.metal/ggml-metal.metal deps/osx-arm64/ggml-metal.metal + cp artifacts/llama-bin-osx-x64.dylib/libllama.dylib deps/osx-x64/libllama.dylib - name: Rearrange CUDA files diff --git a/.github/workflows/main.yml b/.github/workflows/main.yml index db051001..eb0e936f 100644 --- a/.github/workflows/main.yml +++ b/.github/workflows/main.yml @@ -12,12 +12,12 @@ jobs: strategy: fail-fast: false matrix: - build: [linux-release, windows-release, macos-release] + build: [linux-release, windows-release, osx-release] include: - build: linux-release os: ubuntu-latest config: release - - build: macos-release + - build: osx-release os: macos-latest config: release - build: windows-release diff --git a/LLama.Examples/LLama.Examples.csproj b/LLama.Examples/LLama.Examples.csproj index 3ecacdfe..02066fa0 100644 --- a/LLama.Examples/LLama.Examples.csproj +++ b/LLama.Examples/LLama.Examples.csproj @@ -29,7 +29,8 @@ - + + diff --git a/LLama.KernelMemory/LLamaSharp.KernelMemory.csproj b/LLama.KernelMemory/LLamaSharp.KernelMemory.csproj index 54766b02..7fd99e2c 100644 --- a/LLama.KernelMemory/LLamaSharp.KernelMemory.csproj +++ b/LLama.KernelMemory/LLamaSharp.KernelMemory.csproj @@ -4,6 +4,27 @@ net6.0 enable enable + + 0.7.1 + Xbotter + SciSharp STACK + true + MIT, SciSharp STACK $([System.DateTime]::UtcNow.ToString(yyyy)) + https://github.com/SciSharp/LLamaSharp + git + https://avatars3.githubusercontent.com/u/44989469?s=200&v=4 + LLama, LLM, GPT, ChatGPT, kernel-memory, vector search, SciSharp + + The integration of LLamaSharp and Microsoft kernel-memory. It could make it easy to support document search for LLamaSharp model inference. + + + Support integration with kernel-memory + + MIT + packages + AnyCPU;x64;Arm64 + LLamaSharp.kernel-memory + Debug;Release;GPU diff --git a/LLama.SemanticKernel/ExtensionMethods.cs b/LLama.SemanticKernel/ExtensionMethods.cs index 6a48eab0..b3ff6a7b 100644 --- a/LLama.SemanticKernel/ExtensionMethods.cs +++ b/LLama.SemanticKernel/ExtensionMethods.cs @@ -3,9 +3,9 @@ using Microsoft.SemanticKernel.AI.ChatCompletion; namespace LLamaSharp.SemanticKernel; -internal static class ExtensionMethods +public static class ExtensionMethods { - internal static global::LLama.Common.ChatHistory ToLLamaSharpChatHistory(this ChatHistory chatHistory) + public static global::LLama.Common.ChatHistory ToLLamaSharpChatHistory(this ChatHistory chatHistory, bool ignoreCase = true) { if (chatHistory is null) { @@ -16,7 +16,7 @@ internal static class ExtensionMethods foreach (var chat in chatHistory) { - var role = Enum.TryParse(chat.Role.Label, out var _role) ? _role : global::LLama.Common.AuthorRole.Unknown; + var role = Enum.TryParse(chat.Role.Label, ignoreCase, out var _role) ? _role : global::LLama.Common.AuthorRole.Unknown; history.AddMessage(role, chat.Content); } diff --git a/LLama.SemanticKernel/LLamaSharp.SemanticKernel.csproj b/LLama.SemanticKernel/LLamaSharp.SemanticKernel.csproj index ccec8445..3962a075 100644 --- a/LLama.SemanticKernel/LLamaSharp.SemanticKernel.csproj +++ b/LLama.SemanticKernel/LLamaSharp.SemanticKernel.csproj @@ -20,7 +20,7 @@ https://avatars3.githubusercontent.com/u/44989469?s=200&v=4 LLama, LLM, GPT, ChatGPT, semantic-kernel, SciSharp - The integration of LLamaSharp ans semantic-kernel. + The integration of LLamaSharp and Microsoft semantic-kernel. Support integration with semantic-kernel diff --git a/LLama.Web/Common/ModelOptions.cs b/LLama.Web/Common/ModelOptions.cs index 6a63ccc3..182ace00 100644 --- a/LLama.Web/Common/ModelOptions.cs +++ b/LLama.Web/Common/ModelOptions.cs @@ -1,5 +1,6 @@ using System.Text; using LLama.Abstractions; +using LLama.Native; namespace LLama.Web.Common { @@ -118,6 +119,24 @@ namespace LLama.Web.Common /// public float? RopeFrequencyScale { get; set; } + /// + public float? YarnExtrapolationFactor { get; set; } + + /// + public float? YarnAttentionFactor { get; set; } + + /// + public float? YarnBetaFast { get; set; } + + /// + public float? YarnBetaSlow { get; set; } + + /// + public uint? YarnOriginalContext { get; set; } + + /// + public RopeScalingType? YarnScalingType { get; set; } + /// /// Use experimental mul_mat_q kernels /// diff --git a/LLama/Abstractions/IContextParams.cs b/LLama/Abstractions/IContextParams.cs index 8ff6d7cc..0f129217 100644 --- a/LLama/Abstractions/IContextParams.cs +++ b/LLama/Abstractions/IContextParams.cs @@ -1,4 +1,5 @@ using System.Text; +using LLama.Native; namespace LLama.Abstractions; @@ -67,4 +68,34 @@ public interface IContextParams /// Number of threads to use for batch processing (null = autodetect) (n_threads) /// uint? BatchThreads { get; set; } + + /// + /// YaRN extrapolation mix factor + /// + float? YarnExtrapolationFactor { get; set; } + + /// + /// YaRN magnitude scaling factor + /// + float? YarnAttentionFactor { get; set; } + + /// + /// YaRN low correction dim + /// + float? YarnBetaFast { get; set; } + + /// + /// YaRN high correction dim + /// + float? YarnBetaSlow { get; set; } + + /// + /// YaRN original context length + /// + uint? YarnOriginalContext { get; set; } + + /// + /// YaRN scaling method to use. + /// + RopeScalingType? YarnScalingType { get; set; } } \ No newline at end of file diff --git a/LLama/ChatSession.cs b/LLama/ChatSession.cs index 457e7e48..7ee99590 100644 --- a/LLama/ChatSession.cs +++ b/LLama/ChatSession.cs @@ -1,11 +1,14 @@ using LLama.Abstractions; using LLama.Common; +using System; using System.Collections.Generic; using System.IO; +using System.Linq; using System.Runtime.CompilerServices; using System.Text; using System.Threading; using System.Threading.Tasks; +using static LLama.InteractiveExecutor; namespace LLama { @@ -95,11 +98,11 @@ namespace LLama Directory.CreateDirectory(path); } _executor.Context.SaveState(Path.Combine(path, _modelStateFilename)); - if(Executor is StatelessExecutor) + if (Executor is StatelessExecutor) { } - else if(Executor is StatefulExecutorBase statefulExecutor) + else if (Executor is StatefulExecutorBase statefulExecutor) { statefulExecutor.SaveState(Path.Combine(path, _executorStateFilename)); } @@ -135,46 +138,90 @@ namespace LLama } /// - /// Get the response from the LLama model. Note that prompt could not only be the preset words, - /// but also the question you want to ask. + /// Generates a response for a given user prompt and manages history state for the user. + /// This will always pass the whole history to the model. Don't pass a whole history + /// to this method as the user prompt will be appended to the history of the current session. + /// If more control is needed, use the other overload of this method that accepts a ChatHistory object. /// /// /// /// - /// + /// Returns generated text of the assistant message. public async IAsyncEnumerable ChatAsync(string prompt, IInferenceParams? inferenceParams = null, [EnumeratorCancellation] CancellationToken cancellationToken = default) { - foreach(var inputTransform in InputTransformPipeline) + foreach (var inputTransform in InputTransformPipeline) prompt = inputTransform.Transform(prompt); - - History.Messages.AddRange(HistoryTransform.TextToHistory(AuthorRole.User, prompt).Messages); + + History.Messages.Add(new ChatHistory.Message(AuthorRole.User, prompt)); + + if (_executor is InteractiveExecutor executor) + { + InteractiveExecutorState state = (InteractiveExecutorState)executor.GetStateData(); + prompt = state.IsPromptRun + ? HistoryTransform.HistoryToText(History) + : prompt; + } + StringBuilder sb = new(); + await foreach (var result in ChatAsyncInternal(prompt, inferenceParams, cancellationToken)) { yield return result; sb.Append(result); } - History.Messages.AddRange(HistoryTransform.TextToHistory(AuthorRole.Assistant, sb.ToString()).Messages); + + string assistantMessage = sb.ToString(); + + // Remove end tokens from the assistant message + // if defined in inferenceParams.AntiPrompts. + // We only want the response that was generated and not tokens + // that are delimiting the beginning or end of the response. + if (inferenceParams?.AntiPrompts != null) + { + foreach (var stopToken in inferenceParams.AntiPrompts) + { + assistantMessage = assistantMessage.Replace(stopToken, ""); + } + } + + History.Messages.Add(new ChatHistory.Message(AuthorRole.Assistant, assistantMessage)); } /// - /// Get the response from the LLama model with chat histories. + /// Generates a response for a given chat history. This method does not manage history state for the user. + /// If you want to e.g. truncate the history of a session to fit into the model's context window, + /// use this method and pass the truncated history to it. If you don't need this control, use the other + /// overload of this method that accepts a user prompt instead. /// /// /// /// - /// + /// Returns generated text of the assistant message. public async IAsyncEnumerable ChatAsync(ChatHistory history, IInferenceParams? inferenceParams = null, [EnumeratorCancellation] CancellationToken cancellationToken = default) { - var prompt = HistoryTransform.HistoryToText(history); - History.Messages.AddRange(HistoryTransform.TextToHistory(AuthorRole.User, prompt).Messages); - StringBuilder sb = new(); + if (history.Messages.Count == 0) + { + throw new ArgumentException("History must contain at least one message."); + } + + string prompt; + if (_executor is InteractiveExecutor executor) + { + InteractiveExecutorState state = (InteractiveExecutorState)executor.GetStateData(); + + prompt = state.IsPromptRun + ? HistoryTransform.HistoryToText(History) + : history.Messages.Last().Content; + } + else + { + prompt = history.Messages.Last().Content; + } + await foreach (var result in ChatAsyncInternal(prompt, inferenceParams, cancellationToken)) { yield return result; - sb.Append(result); } - History.Messages.AddRange(HistoryTransform.TextToHistory(AuthorRole.Assistant, sb.ToString()).Messages); } private async IAsyncEnumerable ChatAsyncInternal(string prompt, IInferenceParams? inferenceParams = null, [EnumeratorCancellation] CancellationToken cancellationToken = default) diff --git a/LLama/Common/ModelParams.cs b/LLama/Common/ModelParams.cs index ee5bd3e4..8bf59fa5 100644 --- a/LLama/Common/ModelParams.cs +++ b/LLama/Common/ModelParams.cs @@ -3,6 +3,7 @@ using System; using System.Text; using System.Text.Json; using System.Text.Json.Serialization; +using LLama.Native; namespace LLama.Common { @@ -98,10 +99,30 @@ namespace LLama.Common /// public float? RopeFrequencyScale { get; set; } - /// - /// Use experimental mul_mat_q kernels - /// - public bool MulMatQ { get; set; } + + /// + public float? YarnExtrapolationFactor { get; set; } + + /// + public float? YarnAttentionFactor { get; set; } + + /// + public float? YarnBetaFast { get; set; } + + /// + public float? YarnBetaSlow { get; set; } + + /// + public uint? YarnOriginalContext { get; set; } + + /// + public RopeScalingType? YarnScalingType { get; set; } + + /// + /// Use experimental mul_mat_q kernels + /// + public bool MulMatQ { get; set; } + /// /// Load vocab only (no weights) diff --git a/LLama/Extensions/IContextParamsExtensions.cs b/LLama/Extensions/IContextParamsExtensions.cs index fcc9d372..16716b53 100644 --- a/LLama/Extensions/IContextParamsExtensions.cs +++ b/LLama/Extensions/IContextParamsExtensions.cs @@ -29,6 +29,15 @@ namespace LLama.Extensions result.embedding = @params.EmbeddingMode; result.rope_freq_base = @params.RopeFrequencyBase ?? 0; result.rope_freq_scale = @params.RopeFrequencyScale ?? 0; + + // Default YaRN values copied from here: https://github.com/ggerganov/llama.cpp/blob/381efbf480959bb6d1e247a8b0c2328f22e350f8/common/common.h#L67 + result.yarn_ext_factor = @params.YarnExtrapolationFactor ?? -1f; + result.yarn_attn_factor = @params.YarnAttentionFactor ?? 1f; + result.yarn_beta_fast = @params.YarnBetaFast ?? 32f; + result.yarn_beta_slow = @params.YarnBetaSlow ?? 1f; + result.yarn_orig_ctx = @params.YarnOriginalContext ?? 0; + result.rope_scaling_type = @params.YarnScalingType ?? RopeScalingType.LLAMA_ROPE_SCALING_UNSPECIFIED; + result.mul_mat_q = @params.MulMatQ; result.n_threads = Threads(@params.Threads); diff --git a/LLama/LLamaSharp.Runtime.targets b/LLama/LLamaSharp.Runtime.targets index b98fa2f6..9085e5f7 100644 --- a/LLama/LLamaSharp.Runtime.targets +++ b/LLama/LLamaSharp.Runtime.targets @@ -27,15 +27,15 @@ PreserveNewest runtimes/linux-x64/native/cuda12/libllama.so - + PreserveNewest runtimes/osx-arm64/native/libllama.dylib - + PreserveNewest runtimes/osx-arm64/native/ggml-metal.metal - + PreserveNewest runtimes/osx-x64/native/libllama.dylib diff --git a/LLama/Native/LLamaContextParams.cs b/LLama/Native/LLamaContextParams.cs index 0a397a3d..f1ba569d 100644 --- a/LLama/Native/LLamaContextParams.cs +++ b/LLama/Native/LLamaContextParams.cs @@ -44,13 +44,13 @@ namespace LLama.Native /// /// RoPE scaling type, from `enum llama_rope_scaling_type` /// - public sbyte rope_scaling_type; + public RopeScalingType rope_scaling_type; /// /// RoPE base frequency, 0 = from model /// - public float rope_freq_base; + public float rope_freq_base; /// /// RoPE frequency scaling factor, 0 = from model /// diff --git a/LLama/Native/RopeScalingType.cs b/LLama/Native/RopeScalingType.cs new file mode 100644 index 00000000..435932e8 --- /dev/null +++ b/LLama/Native/RopeScalingType.cs @@ -0,0 +1,17 @@ +namespace LLama.Native +{ + /// + /// RoPE scaling type. C# equivalent of llama_rope_scaling_type + /// + public enum RopeScalingType + : sbyte + { + LLAMA_ROPE_SCALING_UNSPECIFIED = -1, + + LLAMA_ROPE_SCALING_NONE = 0, + + LLAMA_ROPE_SCALING_LINEAR = 1, + + LLAMA_ROPE_SCALING_YARN = 2, + } +} diff --git a/LLama/runtimes/build/LLamaSharp.Backend.Cpu.nuspec b/LLama/runtimes/build/LLamaSharp.Backend.Cpu.nuspec index d7d1fd6b..29466a1f 100644 --- a/LLama/runtimes/build/LLamaSharp.Backend.Cpu.nuspec +++ b/LLama/runtimes/build/LLamaSharp.Backend.Cpu.nuspec @@ -19,9 +19,9 @@ - - - + + + diff --git a/LLama/runtimes/macos-arm64/ggml-metal.metal b/LLama/runtimes/osx-arm64/ggml-metal.metal similarity index 100% rename from LLama/runtimes/macos-arm64/ggml-metal.metal rename to LLama/runtimes/osx-arm64/ggml-metal.metal diff --git a/LLama/runtimes/macos-arm64/libllama.dylib b/LLama/runtimes/osx-arm64/libllama.dylib similarity index 100% rename from LLama/runtimes/macos-arm64/libllama.dylib rename to LLama/runtimes/osx-arm64/libllama.dylib diff --git a/LLama/runtimes/macos-x86_64/libllama.dylib b/LLama/runtimes/osx-x64/libllama.dylib similarity index 100% rename from LLama/runtimes/macos-x86_64/libllama.dylib rename to LLama/runtimes/osx-x64/libllama.dylib diff --git a/README.md b/README.md index ea72e8a4..216db124 100644 --- a/README.md +++ b/README.md @@ -54,6 +54,12 @@ For [microsoft semantic-kernel](https://github.com/microsoft/semantic-kernel) in LLamaSharp.semantic-kernel ``` +For [microsoft kernel-memory](https://github.com/microsoft/kernel-memory) integration, please search and install the following package (currently kernel-memory only supports net6.0): + +``` +LLamaSharp.kernel-memory +``` + ### Tips for choosing a version In general, there may be some break changes between two minor releases, for example 0.5.1 and 0.6.0. On the contrary, we don't introduce API break changes in patch release. Therefore it's recommended to keep the highest patch version of a minor release. For example, keep 0.5.6 instead of 0.5.3. @@ -196,7 +202,7 @@ Another choice is generate gguf format file yourself with a pytorch weight (or a 🔳 Fine-tune -⚠️ Local document search (enabled by kernel-memory now) +✅ Local document search (enabled by kernel-memory now) 🔳 MAUI Integration