You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

LLamaInteractExecutor.cs 8.7 kB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223
  1. using LLama.Common;
  2. using LLama.Native;
  3. using System;
  4. using System.Collections.Generic;
  5. using System.IO;
  6. using System.Linq;
  7. using System.Runtime.CompilerServices;
  8. using System.Text;
  9. using System.Text.Json;
  10. using System.Text.Json.Serialization;
  11. using System.Threading;
  12. using System.Threading.Tasks;
  13. namespace LLama
  14. {
  15. using llama_token = Int32;
  16. public class InteractiveExecutor : StatefulExecutorBase
  17. {
  18. bool _is_prompt_run = true;
  19. llama_token[] _llama_token_newline;
  20. public InteractiveExecutor(LLamaModel model) : base(model)
  21. {
  22. _llama_token_newline = Utils.Tokenize(_model.NativeHandle, "\n", false, _model.Encoding).ToArray();
  23. }
  24. public override void SaveState(string filename)
  25. {
  26. InteractiveExecutorState state = new()
  27. {
  28. ConsumedSessionCount = _n_session_consumed,
  29. EmbedInps = _embed_inps,
  30. IsPromptRun = _is_prompt_run,
  31. ConsumedTokensCount = _consumedTokensCount,
  32. Embeds = _embeds,
  33. LastTokens = _last_n_tokens.ToArray(),
  34. LLamaNewlineTokens = _llama_token_newline,
  35. MatchingSessionTokensCount = _n_matching_session_tokens,
  36. PastTokensCount = _pastTokensCount,
  37. SessionFilePath = _pathSession,
  38. SessionTokens = _session_tokens,
  39. LastTokensCapacity = _last_n_tokens.Capacity
  40. };
  41. using(FileStream fs = new FileStream(filename, FileMode.OpenOrCreate, FileAccess.Write))
  42. {
  43. JsonSerializer.Serialize<InteractiveExecutorState>(fs, state);
  44. }
  45. }
  46. public override void LoadState(string filename)
  47. {
  48. using (FileStream fs = new FileStream(filename, FileMode.Open, FileAccess.Read))
  49. {
  50. var state = JsonSerializer.Deserialize<InteractiveExecutorState>(fs);
  51. _n_session_consumed = state.ConsumedSessionCount;
  52. _embed_inps = state.EmbedInps;
  53. _is_prompt_run = state.IsPromptRun;
  54. _consumedTokensCount = state.ConsumedTokensCount;
  55. _embeds = state.Embeds;
  56. _last_n_tokens = new FixedSizeQueue<llama_token>(state.LastTokensCapacity, state.LastTokens);
  57. _llama_token_newline = state.LLamaNewlineTokens;
  58. _n_matching_session_tokens = state.MatchingSessionTokensCount;
  59. _pastTokensCount = state.PastTokensCount;
  60. _pathSession = state.SessionFilePath;
  61. _session_tokens = state.SessionTokens;
  62. }
  63. }
  64. /// <summary>
  65. /// Define whether to continue the loop to generate responses.
  66. /// </summary>
  67. /// <returns></returns>
  68. protected override bool GetLoopCondition(InferStateArgs args)
  69. {
  70. return args.RemainedTokens != 0 && !args.WaitForInput || _is_prompt_run;
  71. }
  72. protected override void PreprocessInputs(string text, InferStateArgs args)
  73. {
  74. if (_is_prompt_run)
  75. {
  76. // When running the first input (prompt) in inteactive mode, we should specially process it.
  77. text = " " + text;
  78. _embed_inps = _model.Tokenize(text, true).ToList();
  79. }
  80. else
  81. {
  82. if (!text.EndsWith("\n"))
  83. {
  84. text += "\n";
  85. }
  86. var line_inp = _model.Tokenize(text, false);
  87. _embed_inps.AddRange(line_inp);
  88. args.RemainedTokens -= line_inp.Count();
  89. }
  90. }
  91. /// <summary>
  92. /// Return whether to break the generation.
  93. /// </summary>
  94. /// <param name="args"></param>
  95. /// <returns></returns>
  96. protected override bool PostProcess(InferenceParams inferenceParams, InferStateArgs args, out IEnumerable<string>? extraOutputs)
  97. {
  98. extraOutputs = null;
  99. if (_embed_inps.Count <= _consumedTokensCount)
  100. {
  101. if (args.Antiprompts is not null && args.Antiprompts.Count > 0)
  102. {
  103. string last_output = "";
  104. foreach (var id in _last_n_tokens)
  105. {
  106. last_output += Utils.PtrToString(NativeApi.llama_token_to_str(_model.NativeHandle, id), _model.Encoding);
  107. }
  108. foreach (var antiprompt in args.Antiprompts)
  109. {
  110. if (last_output.EndsWith(antiprompt))
  111. {
  112. args.WaitForInput = true;
  113. break;
  114. }
  115. }
  116. }
  117. if (_pastTokensCount > 0 && args.WaitForInput)
  118. {
  119. return true;
  120. }
  121. }
  122. if (_embeds.Count > 0 && _embeds.Last() == NativeApi.llama_token_eos())
  123. {
  124. extraOutputs = new string[] { " [end of text]\n" };
  125. return true;
  126. }
  127. if (args.RemainedTokens <= 0 && inferenceParams.MaxTokens != -1)
  128. {
  129. args.RemainedTokens = inferenceParams.MaxTokens;
  130. args.WaitForInput = true;
  131. }
  132. return false;
  133. }
  134. protected override void InferInternal(InferenceParams inferenceParams, InferStateArgs args)
  135. {
  136. if (_embeds.Count > 0)
  137. {
  138. _is_prompt_run = false;
  139. if (_pastTokensCount + _embeds.Count > _model.ContextSize)
  140. {
  141. HandleRunOutOfContext(inferenceParams.TokensKeep);
  142. }
  143. TryReuseMathingPrefix();
  144. _pastTokensCount = _model.Eval(_embeds.ToArray(), _pastTokensCount);
  145. if (_embeds.Count > 0 && !string.IsNullOrEmpty(_pathSession))
  146. {
  147. _session_tokens.AddRange(_embeds);
  148. _n_session_consumed = _session_tokens.Count;
  149. }
  150. }
  151. _embeds.Clear();
  152. if (_embed_inps.Count <= _consumedTokensCount && !args.WaitForInput)
  153. {
  154. var repeat_last_n = inferenceParams.RepeatLastTokensCount < 0 ? _model.ContextSize : inferenceParams.RepeatLastTokensCount;
  155. // optionally save the session on first sample (for faster prompt loading next time)
  156. if (!string.IsNullOrEmpty(_pathSession) && args.NeedToSaveSession)
  157. {
  158. args.NeedToSaveSession = false;
  159. SaveSessionFile(_pathSession);
  160. }
  161. var tokenDataArray = _model.ApplyPenalty(_last_n_tokens, inferenceParams.LogitBias, repeat_last_n,
  162. inferenceParams.RepeatPenalty, inferenceParams.FrequencyPenalty, inferenceParams.PresencePenalty, inferenceParams.PenalizeNL);
  163. var id = _model.Sample(tokenDataArray, inferenceParams.Temperature, inferenceParams.Mirostat, inferenceParams.MirostatTau,
  164. inferenceParams.MirostatEta, inferenceParams.TopK, inferenceParams.TopP, inferenceParams.TfsZ, inferenceParams.TypicalP);
  165. _last_n_tokens.Enqueue(id);
  166. if (id == NativeApi.llama_token_eos())
  167. {
  168. id = _llama_token_newline.First();
  169. if (args.Antiprompts is not null && args.Antiprompts.Count > 0)
  170. {
  171. var first_antiprompt = _model.Tokenize(args.Antiprompts[0], false);
  172. _embed_inps.AddRange(first_antiprompt);
  173. }
  174. }
  175. _embeds.Add(id);
  176. args.RemainedTokens--;
  177. args.ReturnValue = true;
  178. }
  179. else
  180. {
  181. while (_embed_inps.Count > _consumedTokensCount)
  182. {
  183. _embeds.Add(_embed_inps[_consumedTokensCount]);
  184. _last_n_tokens.Enqueue(_embed_inps[_consumedTokensCount]);
  185. _consumedTokensCount++;
  186. if (_embeds.Count >= _model.Params.BatchSize)
  187. {
  188. break;
  189. }
  190. }
  191. }
  192. }
  193. public class InteractiveExecutorState : ExecutorBaseState
  194. {
  195. [JsonPropertyName("is_prompt_run")]
  196. public bool IsPromptRun { get; set; }
  197. [JsonPropertyName("llama_token_newline")]
  198. public llama_token[] LLamaNewlineTokens { get; set; }
  199. }
  200. }
  201. }

C#/.NET上易用的LLM高性能推理框架,支持LLaMA和LLaVA系列模型。