You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

LLamaSharpTextEmbeddingGenerator.cs 3.9 kB

April 2024 Binary Update (#662) * Updated binaries, using [this build](https://github.com/SciSharp/LLamaSharp/actions/runs/8654672719/job/23733195669) for llama.cpp commit `f7001ccc5aa359fcf41bba19d1c99c3d25c9bcc7`. - Added all new functions. - Moved some functions (e.g. `SafeLlamaModelHandle` specific functions) into `SafeLlamaModelHandle.cs` - Exposed tokens on `SafeLlamaModelHandle` and `LLamaWeights` through a `Tokens` property. As new special tokens are added in the future they can be added here. - Changed all token properties to return nullable tokens, to handle some models not having some tokens. - Fixed `DefaultSamplingPipeline` to handle no newline token in some models. * Moved native methods to more specific locations. - Context specific things have been moved into `SafeLLamaContextHandle.cs` and made private - they're exposed through C# properties and methods already. - Checking that GPU layer count is zero if GPU offload is not supported. - Moved methods for creating default structs (`llama_model_quantize_default_params` and `llama_context_default_params`) into relevant structs. * Removed exception if `GpuLayerCount > 0` when GPU is not supported. * - Added low level wrapper methods for new per-sequence state load/save in `SafeLLamaContextHandle` - Added high level wrapper methods (save/load with `State` object or memory mapped file) in `LLamaContext` - Moved native methods for per-sequence state load/save into `SafeLLamaContextHandle` * Added update and defrag methods for KV cache in `SafeLLamaContextHandle` * Updated submodule to `f7001ccc5aa359fcf41bba19d1c99c3d25c9bcc7` * Passing the sequence ID when saving a single sequence state
1 year ago
April 2024 Binary Update (#662) * Updated binaries, using [this build](https://github.com/SciSharp/LLamaSharp/actions/runs/8654672719/job/23733195669) for llama.cpp commit `f7001ccc5aa359fcf41bba19d1c99c3d25c9bcc7`. - Added all new functions. - Moved some functions (e.g. `SafeLlamaModelHandle` specific functions) into `SafeLlamaModelHandle.cs` - Exposed tokens on `SafeLlamaModelHandle` and `LLamaWeights` through a `Tokens` property. As new special tokens are added in the future they can be added here. - Changed all token properties to return nullable tokens, to handle some models not having some tokens. - Fixed `DefaultSamplingPipeline` to handle no newline token in some models. * Moved native methods to more specific locations. - Context specific things have been moved into `SafeLLamaContextHandle.cs` and made private - they're exposed through C# properties and methods already. - Checking that GPU layer count is zero if GPU offload is not supported. - Moved methods for creating default structs (`llama_model_quantize_default_params` and `llama_context_default_params`) into relevant structs. * Removed exception if `GpuLayerCount > 0` when GPU is not supported. * - Added low level wrapper methods for new per-sequence state load/save in `SafeLLamaContextHandle` - Added high level wrapper methods (save/load with `State` object or memory mapped file) in `LLamaContext` - Moved native methods for per-sequence state load/save into `SafeLLamaContextHandle` * Added update and defrag methods for KV cache in `SafeLLamaContextHandle` * Updated submodule to `f7001ccc5aa359fcf41bba19d1c99c3d25c9bcc7` * Passing the sequence ID when saving a single sequence state
1 year ago
123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109
  1. using LLama;
  2. using LLama.Common;
  3. using Microsoft.KernelMemory;
  4. using Microsoft.KernelMemory.AI;
  5. namespace LLamaSharp.KernelMemory
  6. {
  7. /// <summary>
  8. /// Provides text embedding generation for LLamaSharp.
  9. /// </summary>
  10. public class LLamaSharpTextEmbeddingGenerator
  11. : ITextEmbeddingGenerator, IDisposable
  12. {
  13. private readonly LLamaSharpConfig? _config;
  14. private readonly LLamaWeights? _weights;
  15. private readonly LLamaEmbedder _embedder;
  16. private bool _ownsEmbedder = false;
  17. private bool _ownsWeights = false;
  18. /// <inheritdoc/>
  19. public int MaxTokens => (int?)_config?.ContextSize ?? 2048;
  20. /// <summary>
  21. /// Initializes a new instance of the <see cref="LLamaSharpTextEmbeddingGenerator"/> class.
  22. /// </summary>
  23. /// <param name="config">The configuration for LLamaSharp.</param>
  24. public LLamaSharpTextEmbeddingGenerator(LLamaSharpConfig config)
  25. {
  26. this._config = config;
  27. var @params = new ModelParams(_config.ModelPath)
  28. {
  29. Embeddings = true,
  30. MainGpu = _config.MainGpu,
  31. SplitMode = _config.SplitMode
  32. };
  33. _weights = LLamaWeights.LoadFromFile(@params);
  34. _embedder = new LLamaEmbedder(_weights, @params);
  35. _ownsWeights = true;
  36. _ownsEmbedder = true;
  37. }
  38. /// <summary>
  39. /// Initializes a new instance of the <see cref="LLamaSharpTextEmbeddingGenerator"/> class from reused weights.
  40. /// </summary>
  41. /// <param name="config">The configuration for LLamaSharp.</param>
  42. /// <param name="weights">A LLamaWeights object.</param>
  43. public LLamaSharpTextEmbeddingGenerator(LLamaSharpConfig config, LLamaWeights weights)
  44. {
  45. this._config = config;
  46. var @params = new ModelParams(_config.ModelPath)
  47. {
  48. Embeddings = true,
  49. MainGpu = _config.MainGpu,
  50. SplitMode = _config.SplitMode
  51. };
  52. _weights = weights;
  53. _embedder = new LLamaEmbedder(_weights, @params);
  54. _ownsEmbedder = true;
  55. }
  56. /// <summary>
  57. /// Initializes a new instance of the <see cref="LLamaSharpTextEmbeddingGenerator"/> class from reused embedder.
  58. /// </summary>
  59. /// <param name="embedder">A LLamaEmbedder object.</param>
  60. public LLamaSharpTextEmbeddingGenerator(LLamaEmbedder embedder)
  61. {
  62. this._config = null;
  63. this._weights = null;
  64. _embedder = embedder;
  65. }
  66. /// <inheritdoc/>
  67. public void Dispose()
  68. {
  69. if (_ownsWeights)
  70. {
  71. _weights?.Dispose();
  72. }
  73. if (_ownsEmbedder)
  74. {
  75. _embedder.Dispose();
  76. }
  77. }
  78. /// <inheritdoc/>
  79. public async Task<IList<ReadOnlyMemory<float>>> GenerateEmbeddingsAsync(IList<string> data, CancellationToken cancellationToken = default)
  80. {
  81. IList<ReadOnlyMemory<float>> results = new List<ReadOnlyMemory<float>>();
  82. foreach (var d in data)
  83. {
  84. var embeddings = await _embedder.GetEmbeddings(d, cancellationToken);
  85. results.Add(new ReadOnlyMemory<float>(embeddings));
  86. }
  87. return results;
  88. }
  89. /// <inheritdoc/>
  90. public async Task<Embedding> GenerateEmbeddingAsync(string text, CancellationToken cancellationToken = default)
  91. {
  92. var embeddings = await _embedder.GetEmbeddings(text, cancellationToken);
  93. return new Embedding(embeddings);
  94. }
  95. /// <inheritdoc/>
  96. public int CountTokens(string text) => _embedder.Context.Tokenize(text, special: true).Length;
  97. }
  98. }