You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

LLamaStatelessExecutor.cs 5.6 kB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136
  1. using LLama.Abstractions;
  2. using LLama.Common;
  3. using System;
  4. using System.Collections.Generic;
  5. using System.Linq;
  6. using System.Runtime.CompilerServices;
  7. using System.Threading;
  8. using System.Threading.Tasks;
  9. using LLama.Extensions;
  10. namespace LLama
  11. {
  12. using llama_token = Int32;
  13. /// <summary>
  14. /// This executor infer the input as one-time job. Previous inputs won't impact on the
  15. /// response to current input.
  16. /// </summary>
  17. public class StatelessExecutor
  18. : ILLamaExecutor
  19. {
  20. private readonly LLamaWeights _weights;
  21. private readonly IModelParams _params;
  22. /// <summary>
  23. /// The context used by the executor when running the inference.
  24. /// </summary>
  25. public LLamaContext Context { get; private set; }
  26. /// <summary>
  27. /// Create a new stateless executor which will use the given model
  28. /// </summary>
  29. /// <param name="weights"></param>
  30. /// <param name="params"></param>
  31. public StatelessExecutor(LLamaWeights weights, IModelParams @params)
  32. {
  33. _weights = weights;
  34. _params = @params;
  35. Context = _weights.CreateContext(_params);
  36. Context.Dispose();
  37. }
  38. /// <summary>
  39. /// Create a new stateless executor which will use the model used to create the given context
  40. /// </summary>
  41. /// <param name="context"></param>
  42. [Obsolete("Use the constructor which automatically creates contexts using the LLamaWeights")]
  43. public StatelessExecutor(LLamaContext context)
  44. {
  45. _weights = new LLamaWeights(context.NativeHandle.ModelHandle, context.Params.Encoding);
  46. _params = context.Params;
  47. Context = _weights.CreateContext(_params);
  48. Context.Dispose();
  49. }
  50. /// <inheritdoc />
  51. public async IAsyncEnumerable<string> InferAsync(string text, IInferenceParams? inferenceParams = null, [EnumeratorCancellation] CancellationToken cancellationToken = default)
  52. {
  53. using var context = _weights.CreateContext(_params);
  54. Context = context;
  55. if (!Context.NativeHandle.IsClosed)
  56. Context.Dispose();
  57. Context = _weights.CreateContext(Context.Params);
  58. if (inferenceParams != null)
  59. {
  60. if (inferenceParams.TokensKeep > Context.ContextSize)
  61. throw new ArgumentOutOfRangeException(nameof(inferenceParams), $"TokensKeep ({inferenceParams.TokensKeep}) cannot be larger than ContextSize ({Context.ContextSize})");
  62. }
  63. cancellationToken.ThrowIfCancellationRequested();
  64. var antiprompts = inferenceParams?.AntiPrompts.ToArray() ?? Array.Empty<string>();
  65. inferenceParams ??= new InferenceParams();
  66. var lastTokens = new List<llama_token>(inferenceParams.RepeatLastTokensCount);
  67. for (var i = 0; i < inferenceParams.RepeatLastTokensCount; i++)
  68. lastTokens.Add(0);
  69. var tokens = Context.Tokenize(text).ToList();
  70. await Task.Run(() => { Context.Eval(tokens, 1); }, cancellationToken)
  71. .ConfigureAwait(false);
  72. lastTokens.AddRange(tokens);
  73. var n_past = 1 + tokens.Count;
  74. var mu = (float?)null;
  75. var max_tokens = inferenceParams.MaxTokens < 0 ? int.MaxValue : inferenceParams.MaxTokens;
  76. for(var i = 0; i < max_tokens; i++)
  77. {
  78. if (cancellationToken.IsCancellationRequested)
  79. break;
  80. var repeat_last_n = inferenceParams.RepeatLastTokensCount < 0 ? Context.ContextSize : inferenceParams.RepeatLastTokensCount;
  81. var tokenDataArray = Context.ApplyPenalty(lastTokens, inferenceParams.LogitBias, repeat_last_n,
  82. inferenceParams.RepeatPenalty, inferenceParams.FrequencyPenalty, inferenceParams.PresencePenalty, inferenceParams.PenalizeNL);
  83. var id = Context.Sample(tokenDataArray, ref mu, inferenceParams.Temperature, inferenceParams.Mirostat, inferenceParams.MirostatTau,
  84. inferenceParams.MirostatEta, inferenceParams.TopK, inferenceParams.TopP, inferenceParams.TfsZ, inferenceParams.TypicalP, inferenceParams.Grammar);
  85. lastTokens.Add(id);
  86. var response = Context.TokenToString(id);
  87. yield return response;
  88. tokens.Clear();
  89. tokens.Add(id);
  90. // Check if any of the antiprompts have been generated
  91. if (tokens.TokensEndsWithAnyString(antiprompts, Context))
  92. break;
  93. // when run out of context
  94. // based on this logic: https://github.com/ggerganov/llama.cpp/blob/master/examples/main/main.cpp#L433
  95. if (n_past + tokens.Count > Context.ContextSize)
  96. {
  97. var n_left = n_past - inferenceParams.TokensKeep;
  98. n_past = Math.Max(1, inferenceParams.TokensKeep);
  99. tokens.Clear();
  100. tokens.AddRange(lastTokens.Skip(lastTokens.Count - n_left / 2).Take(n_left / 2));
  101. }
  102. // ReSharper disable once AccessToModifiedClosure (Justification: n_past is modified inside and outside the capture, but not concurrently)
  103. n_past = await Task.Run(() => Context.Eval(tokens, n_past), cancellationToken)
  104. .ConfigureAwait(false);
  105. }
  106. }
  107. }
  108. }