You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

LLamaStatelessExecutor.cs 5.3 kB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133
  1. using LLama.Abstractions;
  2. using LLama.Common;
  3. using LLama.Native;
  4. using System;
  5. using System.Collections.Generic;
  6. using System.Linq;
  7. using System.Runtime.CompilerServices;
  8. using System.Threading;
  9. namespace LLama
  10. {
  11. using llama_token = Int32;
  12. /// <summary>
  13. /// This executor infer the input as one-time job. Previous inputs won't impact on the
  14. /// response to current input.
  15. /// </summary>
  16. public class StatelessExecutor : ILLamaExecutor
  17. {
  18. private LLamaModel _model;
  19. private LLamaModel.State _originalState;
  20. /// <summary>
  21. /// The mode used by the executor when running the inference.
  22. /// </summary>
  23. public LLamaModel Model => _model;
  24. /// <summary>
  25. ///
  26. /// </summary>
  27. /// <param name="model">The LLama model.</param>
  28. public StatelessExecutor(LLamaModel model)
  29. {
  30. _model = model;
  31. var tokens = model.Tokenize(" ", true).ToArray();
  32. Utils.Eval(_model.NativeHandle, tokens, 0, tokens.Length, 0, _model.Params.Threads);
  33. _originalState = model.GetState();
  34. }
  35. /// <inheritdoc />
  36. public IEnumerable<string> Infer(string text, InferenceParams? inferenceParams = null, CancellationToken cancellationToken = default)
  37. {
  38. cancellationToken.ThrowIfCancellationRequested();
  39. int n_past = 1;
  40. if(inferenceParams is null)
  41. {
  42. inferenceParams = new InferenceParams();
  43. }
  44. List<llama_token> lastTokens = new(inferenceParams.RepeatLastTokensCount);
  45. for(int i = 0; i < lastTokens.Count; i++)
  46. {
  47. lastTokens[i] = 0;
  48. }
  49. List<llama_token> tokens = _model.Tokenize(text, true).ToList();
  50. int n_prompt_tokens = tokens.Count;
  51. Utils.Eval(_model.NativeHandle, tokens.ToArray(), 0, n_prompt_tokens, n_past, _model.Params.Threads);
  52. lastTokens.AddRange(tokens);
  53. n_past += n_prompt_tokens;
  54. int max_tokens = inferenceParams.MaxTokens < 0 ? int.MaxValue : inferenceParams.MaxTokens;
  55. for(int i = 0; i < max_tokens; i++)
  56. {
  57. if (cancellationToken.IsCancellationRequested)
  58. {
  59. _model.LoadState(_originalState);
  60. break;
  61. }
  62. var repeat_last_n = inferenceParams.RepeatLastTokensCount < 0 ? _model.ContextSize : inferenceParams.RepeatLastTokensCount;
  63. var tokenDataArray = _model.ApplyPenalty(lastTokens, inferenceParams.LogitBias, repeat_last_n,
  64. inferenceParams.RepeatPenalty, inferenceParams.FrequencyPenalty, inferenceParams.PresencePenalty, inferenceParams.PenalizeNL);
  65. var id = _model.Sample(tokenDataArray, inferenceParams.Temperature, inferenceParams.Mirostat, inferenceParams.MirostatTau,
  66. inferenceParams.MirostatEta, inferenceParams.TopK, inferenceParams.TopP, inferenceParams.TfsZ, inferenceParams.TypicalP);
  67. lastTokens.Add(id);
  68. string response = Utils.TokenToString(id, _model.NativeHandle, _model.Encoding);
  69. yield return response;
  70. tokens.Clear();
  71. tokens.Add(id);
  72. if (inferenceParams.AntiPrompts is not null && inferenceParams.AntiPrompts.Count() > 0)
  73. {
  74. string last_output = "";
  75. foreach (var token in lastTokens)
  76. {
  77. last_output += Utils.PtrToString(NativeApi.llama_token_to_str(_model.NativeHandle, id), _model.Encoding);
  78. }
  79. bool should_break = false;
  80. foreach (var antiprompt in inferenceParams.AntiPrompts)
  81. {
  82. if (last_output.EndsWith(antiprompt))
  83. {
  84. should_break = true;
  85. break;
  86. }
  87. }
  88. if (should_break)
  89. {
  90. break;
  91. }
  92. }
  93. // when run out of context
  94. if (n_past + tokens.Count > _model.ContextSize)
  95. {
  96. int n_left = n_past - inferenceParams.TokensKeep;
  97. n_past = Math.Max(1, inferenceParams.TokensKeep);
  98. // insert n_left/2 tokens at the start of embed from last_n_tokens
  99. tokens.InsertRange(0, lastTokens.Take(lastTokens.Count - tokens.Count).Skip(_model.ContextSize - n_left / 2 - tokens.Count));
  100. }
  101. n_past = _model.Eval(tokens.ToArray(), n_past);
  102. }
  103. _model.LoadState(_originalState);
  104. }
  105. /// <inheritdoc />
  106. public async IAsyncEnumerable<string> InferAsync(string text, InferenceParams? inferenceParams = null, [EnumeratorCancellation] CancellationToken cancellationToken = default)
  107. {
  108. foreach (var result in Infer(text, inferenceParams, cancellationToken))
  109. {
  110. yield return result;
  111. }
  112. }
  113. }
  114. }