You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

LLamaInteractExecutor.cs 10 kB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264
  1. using LLama.Common;
  2. using LLama.Native;
  3. using LLama.Abstractions;
  4. using System;
  5. using System.Collections.Generic;
  6. using System.IO;
  7. using System.Linq;
  8. using System.Text.Json;
  9. using System.Text.Json.Serialization;
  10. using System.Text;
  11. namespace LLama
  12. {
  13. using llama_token = Int32;
  14. /// <summary>
  15. /// The LLama executor for interactive mode.
  16. /// </summary>
  17. public class InteractiveExecutor : StatefulExecutorBase
  18. {
  19. bool _is_prompt_run = true;
  20. llama_token[] _llama_token_newline;
  21. /// <summary>
  22. ///
  23. /// </summary>
  24. /// <param name="context"></param>
  25. public InteractiveExecutor(LLamaContext context) : base(context)
  26. {
  27. _llama_token_newline = new [] { NativeApi.llama_token_nl(Context.NativeHandle) };
  28. }
  29. /// <inheritdoc />
  30. public override ExecutorBaseState GetStateData()
  31. {
  32. InteractiveExecutorState state = new()
  33. {
  34. ConsumedSessionCount = _n_session_consumed,
  35. EmbedInps = _embed_inps,
  36. IsPromptRun = _is_prompt_run,
  37. ConsumedTokensCount = _consumedTokensCount,
  38. Embeds = _embeds,
  39. LastTokens = _last_n_tokens.ToArray(),
  40. LLamaNewlineTokens = _llama_token_newline,
  41. MatchingSessionTokensCount = _n_matching_session_tokens,
  42. PastTokensCount = _pastTokensCount,
  43. SessionFilePath = _pathSession,
  44. SessionTokens = _session_tokens,
  45. LastTokensCapacity = _last_n_tokens.Capacity,
  46. MirostatMu = MirostatMu
  47. };
  48. return state;
  49. }
  50. /// <inheritdoc />
  51. public override void LoadState(ExecutorBaseState data)
  52. {
  53. if (data is InteractiveExecutorState state)
  54. {
  55. _n_session_consumed = state.ConsumedSessionCount;
  56. _embed_inps = state.EmbedInps;
  57. _is_prompt_run = state.IsPromptRun;
  58. _consumedTokensCount = state.ConsumedTokensCount;
  59. _embeds = state.Embeds;
  60. _last_n_tokens = new FixedSizeQueue<llama_token>(state.LastTokensCapacity, state.LastTokens);
  61. _llama_token_newline = state.LLamaNewlineTokens;
  62. _n_matching_session_tokens = state.MatchingSessionTokensCount;
  63. _pastTokensCount = state.PastTokensCount;
  64. _pathSession = state.SessionFilePath;
  65. _session_tokens = state.SessionTokens;
  66. }
  67. else
  68. throw new ArgumentException("Invalid state data type.");
  69. }
  70. /// <inheritdoc />
  71. public override void SaveState(string filename)
  72. {
  73. InteractiveExecutorState state = (InteractiveExecutorState)GetStateData();
  74. using(FileStream fs = new FileStream(filename, FileMode.OpenOrCreate, FileAccess.Write))
  75. {
  76. JsonSerializer.Serialize(fs, state);
  77. }
  78. }
  79. /// <inheritdoc />
  80. public override void LoadState(string filename)
  81. {
  82. using (FileStream fs = new FileStream(filename, FileMode.Open, FileAccess.Read))
  83. {
  84. var state = JsonSerializer.Deserialize<InteractiveExecutorState>(fs);
  85. LoadState(state);
  86. }
  87. }
  88. /// <summary>
  89. /// Define whether to continue the loop to generate responses.
  90. /// </summary>
  91. /// <returns></returns>
  92. protected override bool GetLoopCondition(InferStateArgs args)
  93. {
  94. return args.RemainedTokens != 0 && !args.WaitForInput || _is_prompt_run;
  95. }
  96. /// <inheritdoc />
  97. protected override void PreprocessInputs(string text, InferStateArgs args)
  98. {
  99. if (_is_prompt_run)
  100. {
  101. // When running the first input (prompt) in inteactive mode, we should specially process it.
  102. _embed_inps = Context.Tokenize(text, true).ToList();
  103. }
  104. else
  105. {
  106. if (!text.EndsWith("\n"))
  107. {
  108. text += "\n";
  109. }
  110. var line_inp = Context.Tokenize(text, false);
  111. _embed_inps.AddRange(line_inp);
  112. args.RemainedTokens -= line_inp.Length;
  113. }
  114. }
  115. /// <summary>
  116. /// Return whether to break the generation.
  117. /// </summary>
  118. /// <param name="inferenceParams"></param>
  119. /// <param name="args"></param>
  120. /// <param name="extraOutputs"></param>
  121. /// <returns></returns>
  122. protected override bool PostProcess(IInferenceParams inferenceParams, InferStateArgs args, out IEnumerable<string>? extraOutputs)
  123. {
  124. extraOutputs = null;
  125. if (_embed_inps.Count <= _consumedTokensCount)
  126. {
  127. if (args.Antiprompts is not null && args.Antiprompts.Count > 0)
  128. {
  129. var last_output_builder = new StringBuilder();
  130. foreach (var token in _last_n_tokens)
  131. Context.NativeHandle.TokenToString(token, Context.Encoding, last_output_builder);
  132. var last_output = last_output_builder.ToString();
  133. foreach (var antiprompt in args.Antiprompts)
  134. {
  135. if (last_output.EndsWith(antiprompt))
  136. {
  137. args.WaitForInput = true;
  138. break;
  139. }
  140. }
  141. }
  142. if (_pastTokensCount > 0 && args.WaitForInput)
  143. {
  144. return true;
  145. }
  146. }
  147. if (_embeds.Count > 0 && _embeds.Last() == NativeApi.llama_token_eos(Context.NativeHandle))
  148. {
  149. extraOutputs = new[] { " [end of text]\n" };
  150. return true;
  151. }
  152. if (args.RemainedTokens <= 0 && inferenceParams.MaxTokens != -1)
  153. {
  154. args.RemainedTokens = inferenceParams.MaxTokens;
  155. args.WaitForInput = true;
  156. }
  157. return false;
  158. }
  159. /// <inheritdoc />
  160. protected override void InferInternal(IInferenceParams inferenceParams, InferStateArgs args)
  161. {
  162. if (_embeds.Count > 0)
  163. {
  164. _is_prompt_run = false;
  165. if (_pastTokensCount + _embeds.Count > Context.ContextSize)
  166. {
  167. HandleRunOutOfContext(inferenceParams.TokensKeep);
  168. }
  169. TryReuseMathingPrefix();
  170. _pastTokensCount = Context.Eval(_embeds, _pastTokensCount);
  171. if (_embeds.Count > 0 && !string.IsNullOrEmpty(_pathSession))
  172. {
  173. _session_tokens.AddRange(_embeds);
  174. _n_session_consumed = _session_tokens.Count;
  175. }
  176. }
  177. _embeds.Clear();
  178. if (_embed_inps.Count <= _consumedTokensCount && !args.WaitForInput)
  179. {
  180. var repeat_last_n = inferenceParams.RepeatLastTokensCount < 0 ? Context.ContextSize : inferenceParams.RepeatLastTokensCount;
  181. // optionally save the session on first sample (for faster prompt loading next time)
  182. if (!string.IsNullOrEmpty(_pathSession) && args.NeedToSaveSession)
  183. {
  184. args.NeedToSaveSession = false;
  185. SaveSessionFile(_pathSession);
  186. }
  187. var tokenDataArray = Context.ApplyPenalty(_last_n_tokens, inferenceParams.LogitBias, repeat_last_n,
  188. inferenceParams.RepeatPenalty, inferenceParams.FrequencyPenalty, inferenceParams.PresencePenalty, inferenceParams.PenalizeNL);
  189. var mu = MirostatMu;
  190. var id = Context.Sample(
  191. tokenDataArray, ref mu, inferenceParams.Temperature, inferenceParams.Mirostat, inferenceParams.MirostatTau,
  192. inferenceParams.MirostatEta, inferenceParams.TopK, inferenceParams.TopP, inferenceParams.TfsZ, inferenceParams.TypicalP,
  193. inferenceParams.Grammar
  194. );
  195. MirostatMu = mu;
  196. _last_n_tokens.Enqueue(id);
  197. if (id == NativeApi.llama_token_eos(Context.NativeHandle))
  198. {
  199. id = _llama_token_newline.First();
  200. if (args.Antiprompts is not null && args.Antiprompts.Count > 0)
  201. {
  202. var first_antiprompt = Context.Tokenize(args.Antiprompts[0], false);
  203. _embed_inps.AddRange(first_antiprompt);
  204. }
  205. }
  206. _embeds.Add(id);
  207. args.RemainedTokens--;
  208. args.ReturnValue = true;
  209. }
  210. else
  211. {
  212. while (_embed_inps.Count > _consumedTokensCount)
  213. {
  214. _embeds.Add(_embed_inps[_consumedTokensCount]);
  215. _last_n_tokens.Enqueue(_embed_inps[_consumedTokensCount]);
  216. _consumedTokensCount++;
  217. if (_embeds.Count >= Context.Params.BatchSize)
  218. {
  219. break;
  220. }
  221. }
  222. }
  223. }
  224. /// <summary>
  225. /// The descriptor of the state of the interactive executor.
  226. /// </summary>
  227. public class InteractiveExecutorState : ExecutorBaseState
  228. {
  229. /// <summary>
  230. /// Whether the executor is running for the first time (running the prompt).
  231. /// </summary>
  232. [JsonPropertyName("is_prompt_run")]
  233. public bool IsPromptRun { get; set; }
  234. /// <summary>
  235. /// Tokens that represent a new line in with the current model.
  236. /// </summary>
  237. [JsonPropertyName("llama_token_newline")]
  238. public llama_token[] LLamaNewlineTokens { get; set; }
  239. }
  240. }
  241. }