You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

LLamaInstructExecutor.cs 10 kB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269
  1. using LLama.Abstractions;
  2. using LLama.Common;
  3. using LLama.Native;
  4. using System;
  5. using System.Collections.Generic;
  6. using System.IO;
  7. using System.Linq;
  8. using System.Text.Json;
  9. using System.Text.Json.Serialization;
  10. namespace LLama
  11. {
  12. using llama_token = Int32;
  13. /// <summary>
  14. /// The LLama executor for instruct mode.
  15. /// </summary>
  16. public class InstructExecutor : StatefulExecutorBase
  17. {
  18. bool _is_prompt_run = true;
  19. string _instructionPrefix;
  20. llama_token[] _inp_pfx;
  21. llama_token[] _inp_sfx;
  22. /// <summary>
  23. ///
  24. /// </summary>
  25. /// <param name="model"></param>
  26. /// <param name="instructionPrefix"></param>
  27. /// <param name="instructionSuffix"></param>
  28. public InstructExecutor(LLamaModel model, string instructionPrefix = "\n\n### Instruction:\n\n",
  29. string instructionSuffix = "\n\n### Response:\n\n") : base(model)
  30. {
  31. _inp_pfx = _model.Tokenize(instructionPrefix, true).ToArray();
  32. _inp_sfx = _model.Tokenize(instructionSuffix, false).ToArray();
  33. _instructionPrefix = instructionPrefix;
  34. }
  35. /// <inheritdoc />
  36. public override ExecutorBaseState GetStateData()
  37. {
  38. InstructExecutorState state = new()
  39. {
  40. ConsumedSessionCount = _n_session_consumed,
  41. EmbedInps = _embed_inps,
  42. IsPromptRun = _is_prompt_run,
  43. ConsumedTokensCount = _consumedTokensCount,
  44. Embeds = _embeds,
  45. LastTokens = _last_n_tokens.ToArray(),
  46. InputPrefixTokens = _inp_pfx,
  47. InputSuffixTokens = _inp_sfx,
  48. MatchingSessionTokensCount = _n_matching_session_tokens,
  49. PastTokensCount = _pastTokensCount,
  50. SessionFilePath = _pathSession,
  51. SessionTokens = _session_tokens,
  52. LastTokensCapacity = _last_n_tokens.Capacity,
  53. MirostatMu = MirostatMu
  54. };
  55. return state;
  56. }
  57. /// <inheritdoc />
  58. public override void LoadState(ExecutorBaseState data)
  59. {
  60. if(data is InstructExecutorState state)
  61. {
  62. _n_session_consumed = state.ConsumedSessionCount;
  63. _embed_inps = state.EmbedInps;
  64. _is_prompt_run = state.IsPromptRun;
  65. _consumedTokensCount = state.ConsumedTokensCount;
  66. _embeds = state.Embeds;
  67. _last_n_tokens = new FixedSizeQueue<llama_token>(state.LastTokensCapacity, state.LastTokens);
  68. _inp_pfx = state.InputPrefixTokens;
  69. _inp_sfx = state.InputSuffixTokens;
  70. _n_matching_session_tokens = state.MatchingSessionTokensCount;
  71. _pastTokensCount = state.PastTokensCount;
  72. _pathSession = state.SessionFilePath;
  73. _session_tokens = state.SessionTokens;
  74. }
  75. else
  76. {
  77. throw new ArgumentException("Invalid state data type.");
  78. }
  79. }
  80. /// <inheritdoc />
  81. public override void SaveState(string filename)
  82. {
  83. InstructExecutorState state = GetStateData() as InstructExecutorState;
  84. using (FileStream fs = new FileStream(filename, FileMode.OpenOrCreate, FileAccess.Write))
  85. {
  86. JsonSerializer.Serialize<InstructExecutorState>(fs, state);
  87. }
  88. }
  89. /// <inheritdoc />
  90. public override void LoadState(string filename)
  91. {
  92. using (FileStream fs = new FileStream(filename, FileMode.Open, FileAccess.Read))
  93. {
  94. var state = JsonSerializer.Deserialize<InstructExecutorState>(fs);
  95. LoadState(state);
  96. }
  97. }
  98. /// <inheritdoc />
  99. protected override bool GetLoopCondition(InferStateArgs args)
  100. {
  101. return args.RemainedTokens != 0 || _is_prompt_run;
  102. }
  103. /// <inheritdoc />
  104. protected override void PreprocessInputs(string text, InferStateArgs args)
  105. {
  106. if(args.Antiprompts is null)
  107. {
  108. args.Antiprompts = new List<string>();
  109. }
  110. args.Antiprompts.Add(_instructionPrefix);
  111. if (_is_prompt_run)
  112. {
  113. // When running the first input (prompt) in inteactive mode, we should specially process it.
  114. text = " " + text;
  115. _embed_inps = _model.Tokenize(text, true).ToList();
  116. }
  117. else
  118. {
  119. if (!text.EndsWith("\n"))
  120. {
  121. text += "\n";
  122. }
  123. _consumedTokensCount = _embed_inps.Count;
  124. _embed_inps.AddRange(_inp_pfx);
  125. var line_inp = _model.Tokenize(text, false);
  126. _embed_inps.AddRange(line_inp);
  127. _embed_inps.AddRange(_inp_sfx);
  128. args.RemainedTokens -= line_inp.Count();
  129. }
  130. }
  131. /// <inheritdoc />
  132. protected override bool PostProcess(IInferenceParams inferenceParams, InferStateArgs args, out IEnumerable<string>? extraOutputs)
  133. {
  134. extraOutputs = null;
  135. if (_embed_inps.Count <= _consumedTokensCount)
  136. {
  137. if (args.Antiprompts is not null && args.Antiprompts.Count > 0)
  138. {
  139. string last_output = "";
  140. foreach (var id in _last_n_tokens)
  141. {
  142. last_output += Utils.PtrToString(NativeApi.llama_token_to_str(_model.NativeHandle, id), _model.Encoding);
  143. }
  144. foreach (var antiprompt in args.Antiprompts)
  145. {
  146. if (last_output.EndsWith(antiprompt))
  147. {
  148. args.WaitForInput = true;
  149. return true;
  150. }
  151. }
  152. }
  153. if (_pastTokensCount > 0 && args.WaitForInput)
  154. {
  155. extraOutputs = new string[] { "\n> " };
  156. return true;
  157. }
  158. }
  159. if (_embeds.Count > 0 && _embeds.Last() == NativeApi.llama_token_eos())
  160. {
  161. args.WaitForInput = true;
  162. }
  163. if (args.RemainedTokens <= 0 && inferenceParams.MaxTokens != -1)
  164. {
  165. args.RemainedTokens = inferenceParams.MaxTokens;
  166. args.WaitForInput = true;
  167. }
  168. return false;
  169. }
  170. /// <inheritdoc />
  171. protected override void InferInternal(IInferenceParams inferenceParams, InferStateArgs args)
  172. {
  173. if (_embeds.Count > 0)
  174. {
  175. _is_prompt_run = false;
  176. if (_pastTokensCount + _embeds.Count > _model.ContextSize)
  177. {
  178. HandleRunOutOfContext(inferenceParams.TokensKeep);
  179. }
  180. TryReuseMathingPrefix();
  181. _pastTokensCount = _model.Eval(_embeds.ToArray(), _pastTokensCount);
  182. if (_embeds.Count > 0 && !string.IsNullOrEmpty(_pathSession))
  183. {
  184. _session_tokens.AddRange(_embeds);
  185. _n_session_consumed = _session_tokens.Count;
  186. }
  187. }
  188. _embeds.Clear();
  189. if (_embed_inps.Count <= _consumedTokensCount && !args.WaitForInput)
  190. {
  191. var repeat_last_n = inferenceParams.RepeatLastTokensCount < 0 ? _model.ContextSize : inferenceParams.RepeatLastTokensCount;
  192. // optionally save the session on first sample (for faster prompt loading next time)
  193. if (!string.IsNullOrEmpty(_pathSession) && args.NeedToSaveSession)
  194. {
  195. args.NeedToSaveSession = false;
  196. SaveSessionFile(_pathSession);
  197. }
  198. var tokenDataArray = _model.ApplyPenalty(_last_n_tokens, inferenceParams.LogitBias, repeat_last_n,
  199. inferenceParams.RepeatPenalty, inferenceParams.FrequencyPenalty, inferenceParams.PresencePenalty, inferenceParams.PenalizeNL);
  200. var mu = MirostatMu;
  201. var id = _model.Sample(
  202. tokenDataArray, ref mu, inferenceParams.Temperature, inferenceParams.Mirostat, inferenceParams.MirostatTau,
  203. inferenceParams.MirostatEta, inferenceParams.TopK, inferenceParams.TopP, inferenceParams.TfsZ, inferenceParams.TypicalP
  204. );
  205. MirostatMu = mu;
  206. _last_n_tokens.Enqueue(id);
  207. _embeds.Add(id);
  208. args.RemainedTokens--;
  209. args.ReturnValue = true;
  210. }
  211. else
  212. {
  213. while (_embed_inps.Count > _consumedTokensCount)
  214. {
  215. _embeds.Add(_embed_inps[_consumedTokensCount]);
  216. _last_n_tokens.Enqueue(_embed_inps[_consumedTokensCount]);
  217. _consumedTokensCount++;
  218. if (_embeds.Count >= _model.Params.BatchSize)
  219. {
  220. break;
  221. }
  222. }
  223. }
  224. }
  225. /// <summary>
  226. /// The desciptor of the state of the instruct executor.
  227. /// </summary>
  228. public class InstructExecutorState : ExecutorBaseState
  229. {
  230. /// <summary>
  231. /// Whether the executor is running for the first time (running the prompt).
  232. /// </summary>
  233. [JsonPropertyName("is_prompt_run")]
  234. public bool IsPromptRun { get; set; }
  235. /// <summary>
  236. /// Instruction prefix tokens.
  237. /// </summary>
  238. [JsonPropertyName("inp_pfx")]
  239. public llama_token[] InputPrefixTokens { get; set; }
  240. /// <summary>
  241. /// Instruction suffix tokens.
  242. /// </summary>
  243. [JsonPropertyName("inp_sfx")]
  244. public llama_token[] InputSuffixTokens { get; set; }
  245. }
  246. }
  247. }