You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

DefaultSamplingPipeline.cs 7.4 kB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219
  1. using System;
  2. using System.Collections.Generic;
  3. using LLama.Extensions;
  4. using LLama.Native;
  5. namespace LLama.Sampling;
  6. /// <summary>
  7. /// An implementation of ISamplePipeline which mimics the default llama.cpp sampling
  8. /// </summary>
  9. public sealed class DefaultSamplingPipeline
  10. : BaseSamplingPipeline
  11. {
  12. /// <summary>
  13. /// Bias values to add to certain logits
  14. /// </summary>
  15. public Dictionary<int, float> LogitBias { get; } = new();
  16. /// <summary>
  17. /// Repetition penalty, as described in https://arxiv.org/abs/1909.05858
  18. /// </summary>
  19. public float RepeatPenalty { get; set; }
  20. /// <summary>
  21. /// Frequency penalty as described by OpenAI: https://platform.openai.com/docs/api-reference/chat/create<br />
  22. /// Number between -2.0 and 2.0. Positive values penalize new tokens based on their existing frequency in the text
  23. /// so far, decreasing the model's likelihood to repeat the same line verbatim.
  24. /// </summary>
  25. public float AlphaFrequency
  26. {
  27. get => _alphaFreq;
  28. set
  29. {
  30. if (value < -2)
  31. throw new ArgumentOutOfRangeException(nameof(value), "AlphaFrequency must be greater than -2");
  32. if (value > 2)
  33. throw new ArgumentOutOfRangeException(nameof(value), "AlphaFrequency must be less than 2");
  34. _alphaFreq = value;
  35. }
  36. }
  37. private float _alphaFreq;
  38. /// <summary>
  39. /// Presence penalty as described by OpenAI: https://platform.openai.com/docs/api-reference/chat/create<br />
  40. /// Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the
  41. /// text so far, increasing the model's likelihood to talk about new topics.
  42. /// </summary>
  43. public float AlphaPresence
  44. {
  45. get => _alphaPresence;
  46. set
  47. {
  48. if (value < -2)
  49. throw new ArgumentOutOfRangeException(nameof(value), "AlphaFrequency must be greater than -2");
  50. if (value > 2)
  51. throw new ArgumentOutOfRangeException(nameof(value), "AlphaFrequency must be less than 2");
  52. _alphaPresence = value;
  53. }
  54. }
  55. private float _alphaPresence;
  56. /// <summary>
  57. /// Temperature to apply (higher temperature is more "creative")
  58. /// </summary>
  59. public float Temperature { get; set; } = 0.75f;
  60. /// <summary>
  61. /// Number of tokens to keep in TopK sampling
  62. /// </summary>
  63. public int TopK { get; set; }
  64. /// <summary>
  65. /// Z value for tail free sampling
  66. /// </summary>
  67. public float TailFreeZ { get; set; }
  68. /// <summary>
  69. /// P value for locally typical sampling
  70. /// </summary>
  71. public float TypicalP { get; set; }
  72. /// <summary>
  73. /// P value for TopP sampling
  74. /// </summary>
  75. public float TopP { get; set; } = 1f;
  76. /// <summary>
  77. /// P value for MinP sampling
  78. /// </summary>
  79. public float MinP { get; set; }
  80. /// <summary>
  81. /// Whether the newline value should be protected from being modified by logit bias and repeat penalty
  82. /// </summary>
  83. public bool PenalizeNewline { get; set; } = false;
  84. private float[]? _logits;
  85. /// <inheritdoc />
  86. protected override ReadOnlySpan<float> ProcessLogits(SafeLLamaContextHandle ctx, ReadOnlySpan<float> logits, ReadOnlySpan<LLamaToken> lastTokens)
  87. {
  88. // Skip work if possible
  89. if (LogitBias.Count == 0)
  90. return logits;
  91. // Create a temporary array to hold logits
  92. if (_logits == null || _logits.Length < logits.Length)
  93. _logits = new float[logits.Length];
  94. // Copy logits
  95. logits.CopyTo(_logits);
  96. var mutable = _logits.AsSpan(0, logits.Length);
  97. // Apply logit bias
  98. foreach (var (key, value) in LogitBias)
  99. mutable[key] += value;
  100. return mutable;
  101. }
  102. /// <inheritdoc />
  103. protected override LLamaToken ProcessTokenDataArray(SafeLLamaContextHandle ctx, LLamaTokenDataArray candidates, ReadOnlySpan<LLamaToken> lastTokens)
  104. {
  105. // Only apply repetition penalty if we really must. Otherwise avoid all this work
  106. if (lastTokens.Length > 0 && (RepeatPenalty != 0 || AlphaFrequency != 0 || AlphaPresence != 0))
  107. {
  108. // Save the logit value for the newline token
  109. var (nlIndex, nlLogit) = PenalizeNewline ? GetNewlineLogit(ctx, candidates) : (-1, 0);
  110. // Apply penalties to candidates
  111. candidates.RepetitionPenalty(ctx, lastTokens, RepeatPenalty, AlphaFrequency, AlphaPresence);
  112. // Restore newline token
  113. if (!PenalizeNewline)
  114. SetNewlineLogit(ctx, candidates, nlIndex, nlLogit);
  115. }
  116. // Apply the normal llama.cpp pipeline
  117. candidates.ApplyGrammar(ctx, Grammar);
  118. candidates.TopK(ctx, TopK);
  119. candidates.TailFree(ctx, TailFreeZ);
  120. candidates.LocallyTypical(ctx, TypicalP);
  121. candidates.TopP(ctx, TopP);
  122. candidates.MinP(ctx, MinP);
  123. candidates.Temperature(ctx, Temperature);
  124. return candidates.SampleToken(ctx);
  125. }
  126. private static (int, float) GetNewlineLogit(SafeLLamaContextHandle ctx, LLamaTokenDataArray candidates)
  127. {
  128. var nlToken = NativeApi.llama_token_nl(ctx.ModelHandle);
  129. // Try using the ID as an index
  130. if (candidates.data.Span[(int)nlToken].id == nlToken)
  131. return ((int)nlToken, candidates.data.Span[(int)nlToken].logit);
  132. // Exhaustive search
  133. var span = candidates.data.Span;
  134. for (var i = 0; i < span.Length; i++)
  135. {
  136. if (span[i].id == nlToken)
  137. return (i, span[i].logit);
  138. }
  139. return (-1, 0);
  140. }
  141. private static void SetNewlineLogit(SafeLLamaContextHandle ctx, LLamaTokenDataArray candidates, int indexHint, float logit)
  142. {
  143. var nlToken = NativeApi.llama_token_nl(ctx.ModelHandle);
  144. // Try checking the index where we found it last time. It might not be there if `RepetitionPenalty` changed order
  145. if (indexHint >= 0 && candidates.data.Span[indexHint].id == nlToken)
  146. {
  147. candidates.data.Span[indexHint].logit = logit;
  148. return;
  149. }
  150. // Didn't find it, do an exhaustive search for it
  151. var span = candidates.data.Span;
  152. for (var i = 0; i < candidates.data.Length; i++)
  153. {
  154. if (span[i].id == nlToken)
  155. {
  156. span[i].logit = logit;
  157. return;
  158. }
  159. }
  160. }
  161. /// <inheritdoc />
  162. public override void Accept(SafeLLamaContextHandle ctx, LLamaToken token)
  163. {
  164. Grammar?.AcceptToken(ctx, token);
  165. }
  166. /// <inheritdoc />
  167. public override ISamplingPipeline Clone()
  168. {
  169. var clone = new DefaultSamplingPipeline();
  170. foreach (var (k, v) in LogitBias)
  171. clone.LogitBias.Add(k, v);
  172. clone.Grammar = Grammar?.Clone();
  173. clone.RepeatPenalty = RepeatPenalty;
  174. clone.AlphaFrequency = AlphaFrequency;
  175. clone.AlphaPresence = AlphaPresence;
  176. clone.Temperature = Temperature;
  177. clone.TopK = TopK;
  178. clone.TailFreeZ = TailFreeZ;
  179. clone.TypicalP = TypicalP;
  180. clone.TopP = TopP;
  181. clone.MinP = MinP;
  182. clone.PenalizeNewline = PenalizeNewline;
  183. return clone;
  184. }
  185. }