You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

LLamaInteractExecutor.cs 10 kB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262
  1. using LLama.Common;
  2. using LLama.Native;
  3. using LLama.Abstractions;
  4. using System;
  5. using System.Collections.Generic;
  6. using System.IO;
  7. using System.Linq;
  8. using System.Text.Json;
  9. using System.Text.Json.Serialization;
  10. namespace LLama
  11. {
  12. using llama_token = Int32;
  13. /// <summary>
  14. /// The LLama executor for interactive mode.
  15. /// </summary>
  16. public class InteractiveExecutor : StatefulExecutorBase
  17. {
  18. bool _is_prompt_run = true;
  19. llama_token[] _llama_token_newline;
  20. /// <summary>
  21. ///
  22. /// </summary>
  23. /// <param name="model"></param>
  24. public InteractiveExecutor(LLamaModel model) : base(model)
  25. {
  26. _llama_token_newline = _model.NativeHandle.Tokenize("\n", false, _model.Encoding);
  27. }
  28. /// <inheritdoc />
  29. public override ExecutorBaseState GetStateData()
  30. {
  31. InteractiveExecutorState state = new()
  32. {
  33. ConsumedSessionCount = _n_session_consumed,
  34. EmbedInps = _embed_inps,
  35. IsPromptRun = _is_prompt_run,
  36. ConsumedTokensCount = _consumedTokensCount,
  37. Embeds = _embeds,
  38. LastTokens = _last_n_tokens.ToArray(),
  39. LLamaNewlineTokens = _llama_token_newline,
  40. MatchingSessionTokensCount = _n_matching_session_tokens,
  41. PastTokensCount = _pastTokensCount,
  42. SessionFilePath = _pathSession,
  43. SessionTokens = _session_tokens,
  44. LastTokensCapacity = _last_n_tokens.Capacity,
  45. MirostatMu = MirostatMu
  46. };
  47. return state;
  48. }
  49. /// <inheritdoc />
  50. public override void LoadState(ExecutorBaseState data)
  51. {
  52. if (data is InteractiveExecutorState state)
  53. {
  54. _n_session_consumed = state.ConsumedSessionCount;
  55. _embed_inps = state.EmbedInps;
  56. _is_prompt_run = state.IsPromptRun;
  57. _consumedTokensCount = state.ConsumedTokensCount;
  58. _embeds = state.Embeds;
  59. _last_n_tokens = new FixedSizeQueue<llama_token>(state.LastTokensCapacity, state.LastTokens);
  60. _llama_token_newline = state.LLamaNewlineTokens;
  61. _n_matching_session_tokens = state.MatchingSessionTokensCount;
  62. _pastTokensCount = state.PastTokensCount;
  63. _pathSession = state.SessionFilePath;
  64. _session_tokens = state.SessionTokens;
  65. }
  66. else
  67. throw new ArgumentException("Invalid state data type.");
  68. }
  69. /// <inheritdoc />
  70. public override void SaveState(string filename)
  71. {
  72. InteractiveExecutorState state = GetStateData() as InteractiveExecutorState;
  73. using(FileStream fs = new FileStream(filename, FileMode.OpenOrCreate, FileAccess.Write))
  74. {
  75. JsonSerializer.Serialize<InteractiveExecutorState>(fs, state);
  76. }
  77. }
  78. /// <inheritdoc />
  79. public override void LoadState(string filename)
  80. {
  81. using (FileStream fs = new FileStream(filename, FileMode.Open, FileAccess.Read))
  82. {
  83. var state = JsonSerializer.Deserialize<InteractiveExecutorState>(fs);
  84. LoadState(state);
  85. }
  86. }
  87. /// <summary>
  88. /// Define whether to continue the loop to generate responses.
  89. /// </summary>
  90. /// <returns></returns>
  91. protected override bool GetLoopCondition(InferStateArgs args)
  92. {
  93. return args.RemainedTokens != 0 && !args.WaitForInput || _is_prompt_run;
  94. }
  95. /// <inheritdoc />
  96. protected override void PreprocessInputs(string text, InferStateArgs args)
  97. {
  98. if (_is_prompt_run)
  99. {
  100. // When running the first input (prompt) in inteactive mode, we should specially process it.
  101. text = " " + text;
  102. _embed_inps = _model.Tokenize(text, true).ToList();
  103. }
  104. else
  105. {
  106. if (!text.EndsWith("\n"))
  107. {
  108. text += "\n";
  109. }
  110. var line_inp = _model.Tokenize(text, false);
  111. _embed_inps.AddRange(line_inp);
  112. args.RemainedTokens -= line_inp.Length;
  113. }
  114. }
  115. /// <summary>
  116. /// Return whether to break the generation.
  117. /// </summary>
  118. /// <param name="args"></param>
  119. /// <returns></returns>
  120. protected override bool PostProcess(IInferenceParams inferenceParams, InferStateArgs args, out IEnumerable<string>? extraOutputs)
  121. {
  122. extraOutputs = null;
  123. if (_embed_inps.Count <= _consumedTokensCount)
  124. {
  125. if (args.Antiprompts is not null && args.Antiprompts.Count > 0)
  126. {
  127. string last_output = "";
  128. foreach (var id in _last_n_tokens)
  129. {
  130. last_output += _model.NativeHandle.TokenToString(id, _model.Encoding);
  131. }
  132. foreach (var antiprompt in args.Antiprompts)
  133. {
  134. if (last_output.EndsWith(antiprompt))
  135. {
  136. args.WaitForInput = true;
  137. break;
  138. }
  139. }
  140. }
  141. if (_pastTokensCount > 0 && args.WaitForInput)
  142. {
  143. return true;
  144. }
  145. }
  146. if (_embeds.Count > 0 && _embeds.Last() == NativeApi.llama_token_eos())
  147. {
  148. extraOutputs = new string[] { " [end of text]\n" };
  149. return true;
  150. }
  151. if (args.RemainedTokens <= 0 && inferenceParams.MaxTokens != -1)
  152. {
  153. args.RemainedTokens = inferenceParams.MaxTokens;
  154. args.WaitForInput = true;
  155. }
  156. return false;
  157. }
  158. /// <inheritdoc />
  159. protected override void InferInternal(IInferenceParams inferenceParams, InferStateArgs args)
  160. {
  161. if (_embeds.Count > 0)
  162. {
  163. _is_prompt_run = false;
  164. if (_pastTokensCount + _embeds.Count > _model.ContextSize)
  165. {
  166. HandleRunOutOfContext(inferenceParams.TokensKeep);
  167. }
  168. TryReuseMathingPrefix();
  169. _pastTokensCount = _model.Eval(_embeds.ToArray(), _pastTokensCount);
  170. if (_embeds.Count > 0 && !string.IsNullOrEmpty(_pathSession))
  171. {
  172. _session_tokens.AddRange(_embeds);
  173. _n_session_consumed = _session_tokens.Count;
  174. }
  175. }
  176. _embeds.Clear();
  177. if (_embed_inps.Count <= _consumedTokensCount && !args.WaitForInput)
  178. {
  179. var repeat_last_n = inferenceParams.RepeatLastTokensCount < 0 ? _model.ContextSize : inferenceParams.RepeatLastTokensCount;
  180. // optionally save the session on first sample (for faster prompt loading next time)
  181. if (!string.IsNullOrEmpty(_pathSession) && args.NeedToSaveSession)
  182. {
  183. args.NeedToSaveSession = false;
  184. SaveSessionFile(_pathSession);
  185. }
  186. var tokenDataArray = _model.ApplyPenalty(_last_n_tokens, inferenceParams.LogitBias, repeat_last_n,
  187. inferenceParams.RepeatPenalty, inferenceParams.FrequencyPenalty, inferenceParams.PresencePenalty, inferenceParams.PenalizeNL);
  188. var mu = MirostatMu;
  189. var id = _model.Sample(
  190. tokenDataArray, ref mu, inferenceParams.Temperature, inferenceParams.Mirostat, inferenceParams.MirostatTau,
  191. inferenceParams.MirostatEta, inferenceParams.TopK, inferenceParams.TopP, inferenceParams.TfsZ, inferenceParams.TypicalP
  192. );
  193. MirostatMu = mu;
  194. _last_n_tokens.Enqueue(id);
  195. if (id == NativeApi.llama_token_eos())
  196. {
  197. id = _llama_token_newline.First();
  198. if (args.Antiprompts is not null && args.Antiprompts.Count > 0)
  199. {
  200. var first_antiprompt = _model.Tokenize(args.Antiprompts[0], false);
  201. _embed_inps.AddRange(first_antiprompt);
  202. }
  203. }
  204. _embeds.Add(id);
  205. args.RemainedTokens--;
  206. args.ReturnValue = true;
  207. }
  208. else
  209. {
  210. while (_embed_inps.Count > _consumedTokensCount)
  211. {
  212. _embeds.Add(_embed_inps[_consumedTokensCount]);
  213. _last_n_tokens.Enqueue(_embed_inps[_consumedTokensCount]);
  214. _consumedTokensCount++;
  215. if (_embeds.Count >= _model.Params.BatchSize)
  216. {
  217. break;
  218. }
  219. }
  220. }
  221. }
  222. /// <summary>
  223. /// The descriptor of the state of the interactive executor.
  224. /// </summary>
  225. public class InteractiveExecutorState : ExecutorBaseState
  226. {
  227. /// <summary>
  228. /// Whether the executor is running for the first time (running the prompt).
  229. /// </summary>
  230. [JsonPropertyName("is_prompt_run")]
  231. public bool IsPromptRun { get; set; }
  232. /// <summary>
  233. /// Tokens that represent a new line in with the current model.
  234. /// </summary>
  235. [JsonPropertyName("llama_token_newline")]
  236. public llama_token[] LLamaNewlineTokens { get; set; }
  237. }
  238. }
  239. }