You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

LLamaInteractExecutor.cs 10 kB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261
  1. using LLama.Common;
  2. using LLama.Native;
  3. using System;
  4. using System.Collections.Generic;
  5. using System.IO;
  6. using System.Linq;
  7. using System.Text.Json;
  8. using System.Text.Json.Serialization;
  9. namespace LLama
  10. {
  11. using llama_token = Int32;
  12. /// <summary>
  13. /// The LLama executor for interactive mode.
  14. /// </summary>
  15. public class InteractiveExecutor : StatefulExecutorBase
  16. {
  17. bool _is_prompt_run = true;
  18. llama_token[] _llama_token_newline;
  19. /// <summary>
  20. ///
  21. /// </summary>
  22. /// <param name="model"></param>
  23. public InteractiveExecutor(LLamaModel model) : base(model)
  24. {
  25. _llama_token_newline = Utils.Tokenize(_model.NativeHandle, "\n", false, _model.Encoding).ToArray();
  26. }
  27. /// <inheritdoc />
  28. public override ExecutorBaseState GetStateData()
  29. {
  30. InteractiveExecutorState state = new()
  31. {
  32. ConsumedSessionCount = _n_session_consumed,
  33. EmbedInps = _embed_inps,
  34. IsPromptRun = _is_prompt_run,
  35. ConsumedTokensCount = _consumedTokensCount,
  36. Embeds = _embeds,
  37. LastTokens = _last_n_tokens.ToArray(),
  38. LLamaNewlineTokens = _llama_token_newline,
  39. MatchingSessionTokensCount = _n_matching_session_tokens,
  40. PastTokensCount = _pastTokensCount,
  41. SessionFilePath = _pathSession,
  42. SessionTokens = _session_tokens,
  43. LastTokensCapacity = _last_n_tokens.Capacity,
  44. MirostateMu = MirostateMu
  45. };
  46. return state;
  47. }
  48. /// <inheritdoc />
  49. public override void LoadState(ExecutorBaseState data)
  50. {
  51. if (data is InteractiveExecutorState state)
  52. {
  53. _n_session_consumed = state.ConsumedSessionCount;
  54. _embed_inps = state.EmbedInps;
  55. _is_prompt_run = state.IsPromptRun;
  56. _consumedTokensCount = state.ConsumedTokensCount;
  57. _embeds = state.Embeds;
  58. _last_n_tokens = new FixedSizeQueue<llama_token>(state.LastTokensCapacity, state.LastTokens);
  59. _llama_token_newline = state.LLamaNewlineTokens;
  60. _n_matching_session_tokens = state.MatchingSessionTokensCount;
  61. _pastTokensCount = state.PastTokensCount;
  62. _pathSession = state.SessionFilePath;
  63. _session_tokens = state.SessionTokens;
  64. }
  65. else
  66. throw new ArgumentException("Invalid state data type.");
  67. }
  68. /// <inheritdoc />
  69. public override void SaveState(string filename)
  70. {
  71. InteractiveExecutorState state = GetStateData() as InteractiveExecutorState;
  72. using(FileStream fs = new FileStream(filename, FileMode.OpenOrCreate, FileAccess.Write))
  73. {
  74. JsonSerializer.Serialize<InteractiveExecutorState>(fs, state);
  75. }
  76. }
  77. /// <inheritdoc />
  78. public override void LoadState(string filename)
  79. {
  80. using (FileStream fs = new FileStream(filename, FileMode.Open, FileAccess.Read))
  81. {
  82. var state = JsonSerializer.Deserialize<InteractiveExecutorState>(fs);
  83. LoadState(state);
  84. }
  85. }
  86. /// <summary>
  87. /// Define whether to continue the loop to generate responses.
  88. /// </summary>
  89. /// <returns></returns>
  90. protected override bool GetLoopCondition(InferStateArgs args)
  91. {
  92. return args.RemainedTokens != 0 && !args.WaitForInput || _is_prompt_run;
  93. }
  94. /// <inheritdoc />
  95. protected override void PreprocessInputs(string text, InferStateArgs args)
  96. {
  97. if (_is_prompt_run)
  98. {
  99. // When running the first input (prompt) in inteactive mode, we should specially process it.
  100. text = " " + text;
  101. _embed_inps = _model.Tokenize(text, true).ToList();
  102. }
  103. else
  104. {
  105. if (!text.EndsWith("\n"))
  106. {
  107. text += "\n";
  108. }
  109. var line_inp = _model.Tokenize(text, false);
  110. _embed_inps.AddRange(line_inp);
  111. args.RemainedTokens -= line_inp.Count();
  112. }
  113. }
  114. /// <summary>
  115. /// Return whether to break the generation.
  116. /// </summary>
  117. /// <param name="args"></param>
  118. /// <returns></returns>
  119. protected override bool PostProcess(InferenceParams inferenceParams, InferStateArgs args, out IEnumerable<string>? extraOutputs)
  120. {
  121. extraOutputs = null;
  122. if (_embed_inps.Count <= _consumedTokensCount)
  123. {
  124. if (args.Antiprompts is not null && args.Antiprompts.Count > 0)
  125. {
  126. string last_output = "";
  127. foreach (var id in _last_n_tokens)
  128. {
  129. last_output += Utils.PtrToString(NativeApi.llama_token_to_str(_model.NativeHandle, id), _model.Encoding);
  130. }
  131. foreach (var antiprompt in args.Antiprompts)
  132. {
  133. if (last_output.EndsWith(antiprompt))
  134. {
  135. args.WaitForInput = true;
  136. break;
  137. }
  138. }
  139. }
  140. if (_pastTokensCount > 0 && args.WaitForInput)
  141. {
  142. return true;
  143. }
  144. }
  145. if (_embeds.Count > 0 && _embeds.Last() == NativeApi.llama_token_eos())
  146. {
  147. extraOutputs = new string[] { " [end of text]\n" };
  148. return true;
  149. }
  150. if (args.RemainedTokens <= 0 && inferenceParams.MaxTokens != -1)
  151. {
  152. args.RemainedTokens = inferenceParams.MaxTokens;
  153. args.WaitForInput = true;
  154. }
  155. return false;
  156. }
  157. /// <inheritdoc />
  158. protected override void InferInternal(InferenceParams inferenceParams, InferStateArgs args)
  159. {
  160. if (_embeds.Count > 0)
  161. {
  162. _is_prompt_run = false;
  163. if (_pastTokensCount + _embeds.Count > _model.ContextSize)
  164. {
  165. HandleRunOutOfContext(inferenceParams.TokensKeep);
  166. }
  167. TryReuseMathingPrefix();
  168. _pastTokensCount = _model.Eval(_embeds.ToArray(), _pastTokensCount);
  169. if (_embeds.Count > 0 && !string.IsNullOrEmpty(_pathSession))
  170. {
  171. _session_tokens.AddRange(_embeds);
  172. _n_session_consumed = _session_tokens.Count;
  173. }
  174. }
  175. _embeds.Clear();
  176. if (_embed_inps.Count <= _consumedTokensCount && !args.WaitForInput)
  177. {
  178. var repeat_last_n = inferenceParams.RepeatLastTokensCount < 0 ? _model.ContextSize : inferenceParams.RepeatLastTokensCount;
  179. // optionally save the session on first sample (for faster prompt loading next time)
  180. if (!string.IsNullOrEmpty(_pathSession) && args.NeedToSaveSession)
  181. {
  182. args.NeedToSaveSession = false;
  183. SaveSessionFile(_pathSession);
  184. }
  185. var tokenDataArray = _model.ApplyPenalty(_last_n_tokens, inferenceParams.LogitBias, repeat_last_n,
  186. inferenceParams.RepeatPenalty, inferenceParams.FrequencyPenalty, inferenceParams.PresencePenalty, inferenceParams.PenalizeNL);
  187. var mu = MirostateMu;
  188. var id = _model.Sample(
  189. tokenDataArray, ref mu, inferenceParams.Temperature, inferenceParams.Mirostat, inferenceParams.MirostatTau,
  190. inferenceParams.MirostatEta, inferenceParams.TopK, inferenceParams.TopP, inferenceParams.TfsZ, inferenceParams.TypicalP
  191. );
  192. MirostateMu = mu;
  193. _last_n_tokens.Enqueue(id);
  194. if (id == NativeApi.llama_token_eos())
  195. {
  196. id = _llama_token_newline.First();
  197. if (args.Antiprompts is not null && args.Antiprompts.Count > 0)
  198. {
  199. var first_antiprompt = _model.Tokenize(args.Antiprompts[0], false);
  200. _embed_inps.AddRange(first_antiprompt);
  201. }
  202. }
  203. _embeds.Add(id);
  204. args.RemainedTokens--;
  205. args.ReturnValue = true;
  206. }
  207. else
  208. {
  209. while (_embed_inps.Count > _consumedTokensCount)
  210. {
  211. _embeds.Add(_embed_inps[_consumedTokensCount]);
  212. _last_n_tokens.Enqueue(_embed_inps[_consumedTokensCount]);
  213. _consumedTokensCount++;
  214. if (_embeds.Count >= _model.Params.BatchSize)
  215. {
  216. break;
  217. }
  218. }
  219. }
  220. }
  221. /// <summary>
  222. /// The descriptor of the state of the interactive executor.
  223. /// </summary>
  224. public class InteractiveExecutorState : ExecutorBaseState
  225. {
  226. /// <summary>
  227. /// Whether the executor is running for the first time (running the prompt).
  228. /// </summary>
  229. [JsonPropertyName("is_prompt_run")]
  230. public bool IsPromptRun { get; set; }
  231. /// <summary>
  232. /// Tokens that represent a new line in with the current model.
  233. /// </summary>
  234. [JsonPropertyName("llama_token_newline")]
  235. public llama_token[] LLamaNewlineTokens { get; set; }
  236. }
  237. }
  238. }