You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

DefaultSamplingPipeline.cs 7.0 kB

April 2024 Binary Update (#662) * Updated binaries, using [this build](https://github.com/SciSharp/LLamaSharp/actions/runs/8654672719/job/23733195669) for llama.cpp commit `f7001ccc5aa359fcf41bba19d1c99c3d25c9bcc7`. - Added all new functions. - Moved some functions (e.g. `SafeLlamaModelHandle` specific functions) into `SafeLlamaModelHandle.cs` - Exposed tokens on `SafeLlamaModelHandle` and `LLamaWeights` through a `Tokens` property. As new special tokens are added in the future they can be added here. - Changed all token properties to return nullable tokens, to handle some models not having some tokens. - Fixed `DefaultSamplingPipeline` to handle no newline token in some models. * Moved native methods to more specific locations. - Context specific things have been moved into `SafeLLamaContextHandle.cs` and made private - they're exposed through C# properties and methods already. - Checking that GPU layer count is zero if GPU offload is not supported. - Moved methods for creating default structs (`llama_model_quantize_default_params` and `llama_context_default_params`) into relevant structs. * Removed exception if `GpuLayerCount > 0` when GPU is not supported. * - Added low level wrapper methods for new per-sequence state load/save in `SafeLLamaContextHandle` - Added high level wrapper methods (save/load with `State` object or memory mapped file) in `LLamaContext` - Moved native methods for per-sequence state load/save into `SafeLLamaContextHandle` * Added update and defrag methods for KV cache in `SafeLLamaContextHandle` * Updated submodule to `f7001ccc5aa359fcf41bba19d1c99c3d25c9bcc7` * Passing the sequence ID when saving a single sequence state
1 year ago
April 2024 Binary Update (#662) * Updated binaries, using [this build](https://github.com/SciSharp/LLamaSharp/actions/runs/8654672719/job/23733195669) for llama.cpp commit `f7001ccc5aa359fcf41bba19d1c99c3d25c9bcc7`. - Added all new functions. - Moved some functions (e.g. `SafeLlamaModelHandle` specific functions) into `SafeLlamaModelHandle.cs` - Exposed tokens on `SafeLlamaModelHandle` and `LLamaWeights` through a `Tokens` property. As new special tokens are added in the future they can be added here. - Changed all token properties to return nullable tokens, to handle some models not having some tokens. - Fixed `DefaultSamplingPipeline` to handle no newline token in some models. * Moved native methods to more specific locations. - Context specific things have been moved into `SafeLLamaContextHandle.cs` and made private - they're exposed through C# properties and methods already. - Checking that GPU layer count is zero if GPU offload is not supported. - Moved methods for creating default structs (`llama_model_quantize_default_params` and `llama_context_default_params`) into relevant structs. * Removed exception if `GpuLayerCount > 0` when GPU is not supported. * - Added low level wrapper methods for new per-sequence state load/save in `SafeLLamaContextHandle` - Added high level wrapper methods (save/load with `State` object or memory mapped file) in `LLamaContext` - Moved native methods for per-sequence state load/save into `SafeLLamaContextHandle` * Added update and defrag methods for KV cache in `SafeLLamaContextHandle` * Updated submodule to `f7001ccc5aa359fcf41bba19d1c99c3d25c9bcc7` * Passing the sequence ID when saving a single sequence state
1 year ago
April 2024 Binary Update (#662) * Updated binaries, using [this build](https://github.com/SciSharp/LLamaSharp/actions/runs/8654672719/job/23733195669) for llama.cpp commit `f7001ccc5aa359fcf41bba19d1c99c3d25c9bcc7`. - Added all new functions. - Moved some functions (e.g. `SafeLlamaModelHandle` specific functions) into `SafeLlamaModelHandle.cs` - Exposed tokens on `SafeLlamaModelHandle` and `LLamaWeights` through a `Tokens` property. As new special tokens are added in the future they can be added here. - Changed all token properties to return nullable tokens, to handle some models not having some tokens. - Fixed `DefaultSamplingPipeline` to handle no newline token in some models. * Moved native methods to more specific locations. - Context specific things have been moved into `SafeLLamaContextHandle.cs` and made private - they're exposed through C# properties and methods already. - Checking that GPU layer count is zero if GPU offload is not supported. - Moved methods for creating default structs (`llama_model_quantize_default_params` and `llama_context_default_params`) into relevant structs. * Removed exception if `GpuLayerCount > 0` when GPU is not supported. * - Added low level wrapper methods for new per-sequence state load/save in `SafeLLamaContextHandle` - Added high level wrapper methods (save/load with `State` object or memory mapped file) in `LLamaContext` - Moved native methods for per-sequence state load/save into `SafeLLamaContextHandle` * Added update and defrag methods for KV cache in `SafeLLamaContextHandle` * Updated submodule to `f7001ccc5aa359fcf41bba19d1c99c3d25c9bcc7` * Passing the sequence ID when saving a single sequence state
1 year ago
April 2024 Binary Update (#662) * Updated binaries, using [this build](https://github.com/SciSharp/LLamaSharp/actions/runs/8654672719/job/23733195669) for llama.cpp commit `f7001ccc5aa359fcf41bba19d1c99c3d25c9bcc7`. - Added all new functions. - Moved some functions (e.g. `SafeLlamaModelHandle` specific functions) into `SafeLlamaModelHandle.cs` - Exposed tokens on `SafeLlamaModelHandle` and `LLamaWeights` through a `Tokens` property. As new special tokens are added in the future they can be added here. - Changed all token properties to return nullable tokens, to handle some models not having some tokens. - Fixed `DefaultSamplingPipeline` to handle no newline token in some models. * Moved native methods to more specific locations. - Context specific things have been moved into `SafeLLamaContextHandle.cs` and made private - they're exposed through C# properties and methods already. - Checking that GPU layer count is zero if GPU offload is not supported. - Moved methods for creating default structs (`llama_model_quantize_default_params` and `llama_context_default_params`) into relevant structs. * Removed exception if `GpuLayerCount > 0` when GPU is not supported. * - Added low level wrapper methods for new per-sequence state load/save in `SafeLLamaContextHandle` - Added high level wrapper methods (save/load with `State` object or memory mapped file) in `LLamaContext` - Moved native methods for per-sequence state load/save into `SafeLLamaContextHandle` * Added update and defrag methods for KV cache in `SafeLLamaContextHandle` * Updated submodule to `f7001ccc5aa359fcf41bba19d1c99c3d25c9bcc7` * Passing the sequence ID when saving a single sequence state
1 year ago
123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209
  1. using System;
  2. using System.Collections.Generic;
  3. using LLama.Extensions;
  4. using LLama.Native;
  5. namespace LLama.Sampling;
  6. /// <summary>
  7. /// An implementation of ISamplePipeline which mimics the default llama.cpp sampling
  8. /// </summary>
  9. public sealed class DefaultSamplingPipeline
  10. : BaseSamplingPipeline
  11. {
  12. /// <summary>
  13. /// Bias values to add to certain logits
  14. /// </summary>
  15. public Dictionary<int, float> LogitBias { get; } = new();
  16. /// <summary>
  17. /// Repetition penalty, as described in https://arxiv.org/abs/1909.05858
  18. /// </summary>
  19. public float RepeatPenalty { get; set; }
  20. /// <summary>
  21. /// Frequency penalty as described by OpenAI: https://platform.openai.com/docs/api-reference/chat/create<br />
  22. /// Number between -2.0 and 2.0. Positive values penalize new tokens based on their existing frequency in the text
  23. /// so far, decreasing the model's likelihood to repeat the same line verbatim.
  24. /// </summary>
  25. public float AlphaFrequency
  26. {
  27. get => _alphaFreq;
  28. set
  29. {
  30. if (value < -2)
  31. throw new ArgumentOutOfRangeException(nameof(value), "AlphaFrequency must be greater than -2");
  32. if (value > 2)
  33. throw new ArgumentOutOfRangeException(nameof(value), "AlphaFrequency must be less than 2");
  34. _alphaFreq = value;
  35. }
  36. }
  37. private float _alphaFreq;
  38. /// <summary>
  39. /// Presence penalty as described by OpenAI: https://platform.openai.com/docs/api-reference/chat/create<br />
  40. /// Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the
  41. /// text so far, increasing the model's likelihood to talk about new topics.
  42. /// </summary>
  43. public float AlphaPresence
  44. {
  45. get => _alphaPresence;
  46. set
  47. {
  48. if (value < -2)
  49. throw new ArgumentOutOfRangeException(nameof(value), "AlphaFrequency must be greater than -2");
  50. if (value > 2)
  51. throw new ArgumentOutOfRangeException(nameof(value), "AlphaFrequency must be less than 2");
  52. _alphaPresence = value;
  53. }
  54. }
  55. private float _alphaPresence;
  56. /// <summary>
  57. /// Temperature to apply (higher temperature is more "creative")
  58. /// </summary>
  59. public float Temperature { get; set; } = 0.75f;
  60. /// <summary>
  61. /// Number of tokens to keep in TopK sampling
  62. /// </summary>
  63. public int TopK { get; set; }
  64. /// <summary>
  65. /// Z value for tail free sampling
  66. /// </summary>
  67. public float TailFreeZ { get; set; }
  68. /// <summary>
  69. /// P value for locally typical sampling
  70. /// </summary>
  71. public float TypicalP { get; set; }
  72. /// <summary>
  73. /// P value for TopP sampling
  74. /// </summary>
  75. public float TopP { get; set; } = 1f;
  76. /// <summary>
  77. /// P value for MinP sampling
  78. /// </summary>
  79. public float MinP { get; set; }
  80. /// <summary>
  81. /// Whether the newline value should be protected from being modified by logit bias and repeat penalty
  82. /// </summary>
  83. public bool PenalizeNewline { get; set; } = false;
  84. /// <inheritdoc />
  85. protected override void ProcessLogits(SafeLLamaContextHandle ctx, Span<float> logits, ReadOnlySpan<LLamaToken> lastTokens)
  86. {
  87. // Apply logit bias
  88. foreach (var (key, value) in LogitBias)
  89. logits[key] += value;
  90. }
  91. /// <inheritdoc />
  92. protected override LLamaToken ProcessTokenDataArray(SafeLLamaContextHandle ctx, LLamaTokenDataArray candidates, ReadOnlySpan<LLamaToken> lastTokens)
  93. {
  94. // Only apply repetition penalty if we really must. Otherwise avoid all this work
  95. if (lastTokens.Length > 0 && (RepeatPenalty != 0 || AlphaFrequency != 0 || AlphaPresence != 0))
  96. {
  97. // Save the logit value for the newline token
  98. var (nlIndex, nlLogit) = PenalizeNewline ? GetNewlineLogit(ctx, candidates) : (-1, 0);
  99. // Apply penalties to candidates
  100. candidates.RepetitionPenalty(ctx, lastTokens, RepeatPenalty, AlphaFrequency, AlphaPresence);
  101. // Restore newline token
  102. if (!PenalizeNewline)
  103. SetNewlineLogit(ctx, candidates, nlIndex, nlLogit);
  104. }
  105. // Apply the normal llama.cpp pipeline
  106. candidates.ApplyGrammar(ctx, Grammar);
  107. candidates.TopK(ctx, TopK);
  108. candidates.TailFree(ctx, TailFreeZ);
  109. candidates.LocallyTypical(ctx, TypicalP);
  110. candidates.TopP(ctx, TopP);
  111. candidates.MinP(ctx, MinP);
  112. candidates.Temperature(ctx, Temperature);
  113. return candidates.SampleToken(ctx);
  114. }
  115. private static (int, float) GetNewlineLogit(SafeLLamaContextHandle ctx, LLamaTokenDataArray candidates)
  116. {
  117. var nlToken = ctx.ModelHandle.Tokens.Newline;
  118. if (nlToken.HasValue)
  119. {
  120. // Try using the ID as an index
  121. if (candidates.data.Span[(int)nlToken].id == nlToken)
  122. return ((int)nlToken, candidates.data.Span[(int)nlToken].logit);
  123. // Exhaustive search
  124. var span = candidates.data.Span;
  125. for (var i = 0; i < span.Length; i++)
  126. {
  127. if (span[i].id == nlToken)
  128. return (i, span[i].logit);
  129. }
  130. }
  131. return (-1, 0);
  132. }
  133. private static void SetNewlineLogit(SafeLLamaContextHandle ctx, LLamaTokenDataArray candidates, int indexHint, float logit)
  134. {
  135. var nlToken = ctx.ModelHandle.Tokens.Newline;
  136. if (!nlToken.HasValue)
  137. return;
  138. // Try checking the index where we found it last time. It might not be there if `RepetitionPenalty` changed order
  139. if (indexHint >= 0 && candidates.data.Span[indexHint].id == nlToken)
  140. {
  141. candidates.data.Span[indexHint].logit = logit;
  142. return;
  143. }
  144. // Didn't find it, do an exhaustive search for it
  145. var span = candidates.data.Span;
  146. for (var i = 0; i < candidates.data.Length; i++)
  147. {
  148. if (span[i].id == nlToken)
  149. {
  150. span[i].logit = logit;
  151. return;
  152. }
  153. }
  154. }
  155. /// <inheritdoc />
  156. public override void Accept(SafeLLamaContextHandle ctx, LLamaToken token)
  157. {
  158. Grammar?.AcceptToken(ctx, token);
  159. }
  160. /// <inheritdoc />
  161. public override ISamplingPipeline Clone()
  162. {
  163. var clone = new DefaultSamplingPipeline();
  164. foreach (var (k, v) in LogitBias)
  165. clone.LogitBias.Add(k, v);
  166. clone.Grammar = Grammar?.Clone();
  167. clone.RepeatPenalty = RepeatPenalty;
  168. clone.AlphaFrequency = AlphaFrequency;
  169. clone.AlphaPresence = AlphaPresence;
  170. clone.Temperature = Temperature;
  171. clone.TopK = TopK;
  172. clone.TailFreeZ = TailFreeZ;
  173. clone.TypicalP = TypicalP;
  174. clone.TopP = TopP;
  175. clone.MinP = MinP;
  176. clone.PenalizeNewline = PenalizeNewline;
  177. return clone;
  178. }
  179. }