You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

LLamaStatelessExecutor.cs 6.9 kB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158
  1. using LLama.Abstractions;
  2. using LLama.Common;
  3. using System;
  4. using System.Collections.Generic;
  5. using System.Linq;
  6. using System.Runtime.CompilerServices;
  7. using System.Threading;
  8. using LLama.Exceptions;
  9. using LLama.Native;
  10. using LLama.Sampling;
  11. using Microsoft.Extensions.Logging;
  12. namespace LLama
  13. {
  14. /// <summary>
  15. /// This executor infer the input as one-time job. Previous inputs won't impact on the
  16. /// response to current input.
  17. /// </summary>
  18. public class StatelessExecutor
  19. : ILLamaExecutor
  20. {
  21. private readonly LLamaWeights _weights;
  22. private readonly IContextParams _params;
  23. private readonly ILogger? _logger;
  24. private readonly LLamaBatch _batch;
  25. // LLava Section
  26. public bool IsMultiModal => false;
  27. public bool MultiModalProject { get; }
  28. public LLavaWeights ClipModel { get; }
  29. public string ImagePath { get; set; }
  30. /// <summary>
  31. /// The context used by the executor when running the inference.
  32. /// </summary>
  33. public LLamaContext Context { get; private set; }
  34. /// <summary>
  35. /// Create a new stateless executor which will use the given model
  36. /// </summary>
  37. /// <param name="weights"></param>
  38. /// <param name="params"></param>
  39. /// <param name="logger"></param>
  40. public StatelessExecutor(LLamaWeights weights, IContextParams @params, ILogger? logger = null)
  41. {
  42. _weights = weights;
  43. _params = @params;
  44. _logger = logger;
  45. _batch = new LLamaBatch();
  46. Context = _weights.CreateContext(_params, logger);
  47. Context.Dispose();
  48. }
  49. /// <inheritdoc />
  50. public async IAsyncEnumerable<string> InferAsync(string prompt, IInferenceParams? inferenceParams = null, [EnumeratorCancellation] CancellationToken cancellationToken = default)
  51. {
  52. // Ensure the context from last time is disposed (it always hould be)
  53. if (!Context.NativeHandle.IsClosed)
  54. Context.Dispose();
  55. // Create an inference context which will be disposed when this method exits
  56. using var context = _weights.CreateContext(_params, _logger);
  57. Context = context;
  58. // Reset the sampling pipeline (if there is one)
  59. inferenceParams?.SamplingPipeline?.Reset();
  60. // Sanity check inference params
  61. inferenceParams ??= new InferenceParams();
  62. if (inferenceParams.TokensKeep > Context.ContextSize)
  63. throw new ArgumentOutOfRangeException(nameof(inferenceParams), $"TokensKeep ({inferenceParams.TokensKeep}) cannot be larger than ContextSize ({Context.ContextSize})");
  64. // Create decoders for the token stream
  65. var decoder = new StreamingTokenDecoder(Context);
  66. var antiprocessor = new AntipromptProcessor(inferenceParams.AntiPrompts);
  67. // Keep track of the last N tokens emitted
  68. var repeat_last_n = Math.Max(0, inferenceParams.RepeatLastTokensCount <0 ? _weights.ContextSize : inferenceParams.RepeatLastTokensCount);
  69. var lastTokens = new List<LLamaToken>(repeat_last_n);
  70. for (var i = 0; i < repeat_last_n; i++)
  71. lastTokens.Add(0);
  72. // Tokenize the prompt
  73. var tokens = Context.Tokenize(prompt).ToList();
  74. lastTokens.AddRange(tokens);
  75. // Evaluate the prompt, in chunks smaller than the max batch size
  76. var n_past = 0;
  77. var (r, _) = Context.NativeHandle.Decode(tokens, LLamaSeqId.Zero, _batch, ref n_past);
  78. if (r != DecodeResult.Ok)
  79. throw new LLamaDecodeError(r);
  80. // Begin loop, evaluating one token at a time
  81. var mu = (float?)null;
  82. var max_tokens = inferenceParams.MaxTokens < 0 ? int.MaxValue : inferenceParams.MaxTokens;
  83. for(var i = 0; i < max_tokens && !cancellationToken.IsCancellationRequested; i++)
  84. {
  85. LLamaToken id;
  86. if (inferenceParams.SamplingPipeline is not null)
  87. {
  88. id = inferenceParams.SamplingPipeline.Sample(Context.NativeHandle, Context.NativeHandle.GetLogitsIth(_batch.TokenCount - 1), lastTokens);
  89. }
  90. else
  91. {
  92. // Penalize the generated tokens by various penalties
  93. var tokenDataArray = Context.ApplyPenalty(_batch.TokenCount - 1, lastTokens, inferenceParams.LogitBias, repeat_last_n,
  94. inferenceParams.RepeatPenalty, inferenceParams.FrequencyPenalty, inferenceParams.PresencePenalty, inferenceParams.PenalizeNL);
  95. // Sample a single token
  96. id = Context.Sample(
  97. tokenDataArray, ref mu, inferenceParams.Temperature, inferenceParams.Mirostat, inferenceParams.MirostatTau,
  98. inferenceParams.MirostatEta, inferenceParams.TopK, inferenceParams.TopP, inferenceParams.TfsZ, inferenceParams.TypicalP, inferenceParams.Grammar,
  99. inferenceParams.MinP
  100. );
  101. }
  102. // Check if this is the EOS token
  103. if (id == _weights.EndOfSentenceToken)
  104. break;
  105. // Decode this token into text
  106. decoder.Add(id);
  107. var decoded = decoder.Read();
  108. yield return decoded;
  109. // Check if any of the antiprompts have been generated
  110. if (antiprocessor.Add(decoded))
  111. break;
  112. lastTokens.Add(id);
  113. tokens.Clear();
  114. tokens.Add(id);
  115. // when run out of context
  116. // based on this logic: https://github.com/ggerganov/llama.cpp/blob/master/examples/main/main.cpp#L497
  117. if (n_past + tokens.Count >= Context.ContextSize)
  118. {
  119. var n_left = n_past - inferenceParams.TokensKeep - 1;
  120. var n_discard = n_left / 2;
  121. NativeApi.llama_kv_cache_seq_rm(Context.NativeHandle, (LLamaSeqId)0, inferenceParams.TokensKeep + 1, inferenceParams.TokensKeep + n_discard + 1);
  122. NativeApi.llama_kv_cache_seq_add(Context.NativeHandle, (LLamaSeqId)0, inferenceParams.TokensKeep + 1 + n_discard, n_past, -n_discard);
  123. n_past -= n_discard;
  124. }
  125. // Evaluate with this new token
  126. _batch.Clear();
  127. _batch.Add(id, n_past++, LLamaSeqId.Zero, true);
  128. var returnCode = await context.DecodeAsync(_batch, cancellationToken);
  129. if (returnCode != 0)
  130. throw new LLamaDecodeError(returnCode);
  131. }
  132. }
  133. }
  134. }