You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

LLamaInstructExecutor.cs 8.3 kB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210
  1. using LLama.Common;
  2. using LLama.Native;
  3. using System;
  4. using System.Collections.Generic;
  5. using System.IO;
  6. using System.Linq;
  7. using System.Text;
  8. using System.Text.Json;
  9. using System.Text.Json.Serialization;
  10. namespace LLama
  11. {
  12. using llama_token = Int32;
  13. public class InstructExecutor : StatefulExecutorBase
  14. {
  15. bool _is_prompt_run = true;
  16. llama_token[] _inp_pfx;
  17. llama_token[] _inp_sfx;
  18. public InstructExecutor(LLamaModel model, string inputPrefix = "\n\n### Instruction:\n\n",
  19. string inputSuffix = "\n\n### Response:\n\n") : base(model)
  20. {
  21. _inp_pfx = _model.Tokenize(inputPrefix, true).ToArray();
  22. _inp_sfx = _model.Tokenize(inputSuffix, false).ToArray();
  23. }
  24. public override void SaveState(string filename)
  25. {
  26. InstructExecutorState state = new()
  27. {
  28. ConsumedSessionCount = _n_session_consumed,
  29. EmbedInps = _embed_inps,
  30. IsPromptRun = _is_prompt_run,
  31. ConsumedTokensCount = _consumedTokensCount,
  32. Embeds = _embeds,
  33. LastTokens = _last_n_tokens.ToArray(),
  34. InputPrefixTokens = _inp_pfx,
  35. InputSuffixTokens = _inp_sfx,
  36. MatchingSessionTokensCount = _n_matching_session_tokens,
  37. PastTokensCount = _pastTokensCount,
  38. SessionFilePath = _pathSession,
  39. SessionTokens = _session_tokens,
  40. LastTokensCapacity = _last_n_tokens.Capacity
  41. };
  42. using (FileStream fs = new FileStream(filename, FileMode.OpenOrCreate, FileAccess.Write))
  43. {
  44. JsonSerializer.Serialize<InstructExecutorState>(fs, state);
  45. }
  46. }
  47. public override void LoadState(string filename)
  48. {
  49. using (FileStream fs = new FileStream(filename, FileMode.Open, FileAccess.Read))
  50. {
  51. var state = JsonSerializer.Deserialize<InstructExecutorState>(fs);
  52. _n_session_consumed = state.ConsumedSessionCount;
  53. _embed_inps = state.EmbedInps;
  54. _is_prompt_run = state.IsPromptRun;
  55. _consumedTokensCount = state.ConsumedTokensCount;
  56. _embeds = state.Embeds;
  57. _last_n_tokens = new FixedSizeQueue<llama_token>(state.LastTokensCapacity, state.LastTokens);
  58. _inp_pfx = state.InputPrefixTokens;
  59. _inp_sfx = state.InputSuffixTokens;
  60. _n_matching_session_tokens = state.MatchingSessionTokensCount;
  61. _pastTokensCount = state.PastTokensCount;
  62. _pathSession = state.SessionFilePath;
  63. _session_tokens = state.SessionTokens;
  64. }
  65. }
  66. protected override bool GetLoopCondition(InferStateArgs args)
  67. {
  68. return args.RemainedTokens != 0 || _is_prompt_run;
  69. }
  70. protected override void PreprocessInputs(string text, InferStateArgs args)
  71. {
  72. if (_is_prompt_run)
  73. {
  74. // When running the first input (prompt) in inteactive mode, we should specially process it.
  75. text = " " + text;
  76. _embed_inps = _model.Tokenize(text, true).ToList();
  77. }
  78. else
  79. {
  80. if (!text.EndsWith("\n"))
  81. {
  82. text += "\n";
  83. }
  84. _consumedTokensCount = _embed_inps.Count;
  85. _embed_inps.AddRange(_inp_pfx);
  86. var line_inp = _model.Tokenize(text, false);
  87. _embed_inps.AddRange(line_inp);
  88. _embed_inps.AddRange(_inp_sfx);
  89. args.RemainedTokens -= line_inp.Count();
  90. }
  91. }
  92. protected override bool PostProcess(InferenceParams inferenceParams, InferStateArgs args, out IEnumerable<string>? extraOutputs)
  93. {
  94. extraOutputs = null;
  95. if (_embed_inps.Count <= _consumedTokensCount)
  96. {
  97. if (args.Antiprompts is not null && args.Antiprompts.Count > 0)
  98. {
  99. string last_output = "";
  100. foreach (var id in _last_n_tokens)
  101. {
  102. last_output += Utils.PtrToString(NativeApi.llama_token_to_str(_model.NativeHandle, id), _model.Encoding);
  103. }
  104. foreach (var antiprompt in args.Antiprompts)
  105. {
  106. if (last_output.EndsWith(antiprompt))
  107. {
  108. args.WaitForInput = true;
  109. return true;
  110. }
  111. }
  112. }
  113. if (_pastTokensCount > 0 && args.WaitForInput)
  114. {
  115. extraOutputs = new string[] { "\n> " };
  116. return true;
  117. }
  118. }
  119. if (_embeds.Count > 0 && _embeds.Last() == NativeApi.llama_token_eos())
  120. {
  121. args.WaitForInput = true;
  122. }
  123. if (args.RemainedTokens <= 0 && inferenceParams.MaxTokens != -1)
  124. {
  125. args.RemainedTokens = inferenceParams.MaxTokens;
  126. args.WaitForInput = true;
  127. }
  128. return false;
  129. }
  130. protected override void InferInternal(InferenceParams inferenceParams, InferStateArgs args)
  131. {
  132. if (_embeds.Count > 0)
  133. {
  134. _is_prompt_run = false;
  135. if (_pastTokensCount + _embeds.Count > _model.ContextSize)
  136. {
  137. HandleRunOutOfContext(inferenceParams.TokensKeep);
  138. }
  139. TryReuseMathingPrefix();
  140. _pastTokensCount = _model.Eval(_embeds.ToArray(), _pastTokensCount);
  141. if (_embeds.Count > 0 && !string.IsNullOrEmpty(_pathSession))
  142. {
  143. _session_tokens.AddRange(_embeds);
  144. _n_session_consumed = _session_tokens.Count;
  145. }
  146. }
  147. _embeds.Clear();
  148. if (_embed_inps.Count <= _consumedTokensCount && !args.WaitForInput)
  149. {
  150. var repeat_last_n = inferenceParams.RepeatLastTokensCount < 0 ? _model.ContextSize : inferenceParams.RepeatLastTokensCount;
  151. // optionally save the session on first sample (for faster prompt loading next time)
  152. if (!string.IsNullOrEmpty(_pathSession) && args.NeedToSaveSession)
  153. {
  154. args.NeedToSaveSession = false;
  155. SaveSessionFile(_pathSession);
  156. }
  157. var tokenDataArray = _model.ApplyPenalty(_last_n_tokens, inferenceParams.LogitBias, repeat_last_n,
  158. inferenceParams.RepeatPenalty, inferenceParams.FrequencyPenalty, inferenceParams.PresencePenalty, inferenceParams.PenalizeNL);
  159. var id = _model.Sample(tokenDataArray, inferenceParams.Temperature, inferenceParams.Mirostat, inferenceParams.MirostatTau,
  160. inferenceParams.MirostatEta, inferenceParams.TopK, inferenceParams.TopP, inferenceParams.TfsZ, inferenceParams.TypicalP);
  161. _last_n_tokens.Enqueue(id);
  162. _embeds.Add(id);
  163. args.RemainedTokens--;
  164. args.ReturnValue = true;
  165. }
  166. else
  167. {
  168. while (_embed_inps.Count > _consumedTokensCount)
  169. {
  170. _embeds.Add(_embed_inps[_consumedTokensCount]);
  171. _last_n_tokens.Enqueue(_embed_inps[_consumedTokensCount]);
  172. _consumedTokensCount++;
  173. if (_embeds.Count >= _model.Params.BatchSize)
  174. {
  175. break;
  176. }
  177. }
  178. }
  179. }
  180. public class InstructExecutorState : ExecutorBaseState
  181. {
  182. [JsonPropertyName("is_prompt_run")]
  183. public bool IsPromptRun { get; set; }
  184. [JsonPropertyName("inp_pfx")]
  185. public llama_token[] InputPrefixTokens { get; set; }
  186. [JsonPropertyName("inp_sfx")]
  187. public llama_token[] InputSuffixTokens { get; set; }
  188. }
  189. }
  190. }

C#/.NET上易用的LLM高性能推理框架,支持LLaMA和LLaVA系列模型。