You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

LLamaStatelessExecutor.cs 5.4 kB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126
  1. using LLama.Abstractions;
  2. using LLama.Common;
  3. using System;
  4. using System.Collections.Generic;
  5. using System.Linq;
  6. using System.Runtime.CompilerServices;
  7. using System.Threading;
  8. using System.Threading.Tasks;
  9. using LLama.Extensions;
  10. using LLama.Native;
  11. using Microsoft.Extensions.Logging;
  12. namespace LLama
  13. {
  14. using llama_token = Int32;
  15. /// <summary>
  16. /// This executor infer the input as one-time job. Previous inputs won't impact on the
  17. /// response to current input.
  18. /// </summary>
  19. public class StatelessExecutor
  20. : ILLamaExecutor
  21. {
  22. private readonly LLamaWeights _weights;
  23. private readonly IContextParams _params;
  24. private readonly ILogger? _logger;
  25. /// <summary>
  26. /// The context used by the executor when running the inference.
  27. /// </summary>
  28. public LLamaContext Context { get; private set; }
  29. /// <summary>
  30. /// Create a new stateless executor which will use the given model
  31. /// </summary>
  32. /// <param name="weights"></param>
  33. /// <param name="params"></param>
  34. /// <param name="logger"></param>
  35. public StatelessExecutor(LLamaWeights weights, IContextParams @params, ILogger? logger = null)
  36. {
  37. _weights = weights;
  38. _params = @params;
  39. _logger = logger;
  40. Context = _weights.CreateContext(_params, logger);
  41. Context.Dispose();
  42. }
  43. /// <inheritdoc />
  44. public async IAsyncEnumerable<string> InferAsync(string text, IInferenceParams? inferenceParams = null, [EnumeratorCancellation] CancellationToken cancellationToken = default)
  45. {
  46. using var context = _weights.CreateContext(_params, _logger);
  47. Context = context;
  48. if (!Context.NativeHandle.IsClosed)
  49. Context.Dispose();
  50. Context = _weights.CreateContext(Context.Params, _logger);
  51. if (inferenceParams != null)
  52. {
  53. if (inferenceParams.TokensKeep > Context.ContextSize)
  54. throw new ArgumentOutOfRangeException(nameof(inferenceParams), $"TokensKeep ({inferenceParams.TokensKeep}) cannot be larger than ContextSize ({Context.ContextSize})");
  55. }
  56. cancellationToken.ThrowIfCancellationRequested();
  57. var antiprompts = inferenceParams?.AntiPrompts.ToArray() ?? Array.Empty<string>();
  58. inferenceParams ??= new InferenceParams();
  59. var lastTokens = new List<llama_token>(inferenceParams.RepeatLastTokensCount);
  60. for (var i = 0; i < inferenceParams.RepeatLastTokensCount; i++)
  61. lastTokens.Add(0);
  62. var tokens = Context.Tokenize(text).ToList();
  63. await Task.Run(() => { Context.Eval(tokens, 1); }, cancellationToken)
  64. .ConfigureAwait(false);
  65. lastTokens.AddRange(tokens);
  66. var n_past = 1 + tokens.Count;
  67. var mu = (float?)null;
  68. var max_tokens = inferenceParams.MaxTokens < 0 ? int.MaxValue : inferenceParams.MaxTokens;
  69. for(var i = 0; i < max_tokens; i++)
  70. {
  71. if (cancellationToken.IsCancellationRequested)
  72. break;
  73. var repeat_last_n = inferenceParams.RepeatLastTokensCount < 0 ? Context.ContextSize : inferenceParams.RepeatLastTokensCount;
  74. var tokenDataArray = Context.ApplyPenalty(lastTokens, inferenceParams.LogitBias, repeat_last_n,
  75. inferenceParams.RepeatPenalty, inferenceParams.FrequencyPenalty, inferenceParams.PresencePenalty, inferenceParams.PenalizeNL);
  76. var id = Context.Sample(tokenDataArray, ref mu, inferenceParams.Temperature, inferenceParams.Mirostat, inferenceParams.MirostatTau,
  77. inferenceParams.MirostatEta, inferenceParams.TopK, inferenceParams.TopP, inferenceParams.TfsZ, inferenceParams.TypicalP, inferenceParams.Grammar);
  78. lastTokens.Add(id);
  79. yield return Context.TokenToString(id);
  80. tokens.Clear();
  81. tokens.Add(id);
  82. // Check if any of the antiprompts have been generated
  83. if (lastTokens.TokensEndsWithAnyString(antiprompts, Context))
  84. break;
  85. // when run out of context
  86. // based on this logic: https://github.com/ggerganov/llama.cpp/blob/master/examples/main/main.cpp#L497
  87. if (n_past + tokens.Count >= Context.ContextSize)
  88. {
  89. var n_left = n_past - inferenceParams.TokensKeep - 1;
  90. var n_discard = n_left / 2;
  91. NativeApi.llama_kv_cache_seq_rm(Context.NativeHandle, (LLamaSeqId)0, inferenceParams.TokensKeep + 1, inferenceParams.TokensKeep + n_discard + 1);
  92. NativeApi.llama_kv_cache_seq_shift(Context.NativeHandle, (LLamaSeqId)0, inferenceParams.TokensKeep + 1 + n_discard, n_past, -n_discard);
  93. n_past -= n_discard;
  94. }
  95. // ReSharper disable once AccessToModifiedClosure (Justification: n_past is modified inside and outside the capture, but not concurrently)
  96. n_past = await Task.Run(() => Context.Eval(tokens, n_past), cancellationToken)
  97. .ConfigureAwait(false);
  98. }
  99. }
  100. }
  101. }