You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

LLamaInstructExecutor.cs 11 kB

April 2024 Binary Update (#662) * Updated binaries, using [this build](https://github.com/SciSharp/LLamaSharp/actions/runs/8654672719/job/23733195669) for llama.cpp commit `f7001ccc5aa359fcf41bba19d1c99c3d25c9bcc7`. - Added all new functions. - Moved some functions (e.g. `SafeLlamaModelHandle` specific functions) into `SafeLlamaModelHandle.cs` - Exposed tokens on `SafeLlamaModelHandle` and `LLamaWeights` through a `Tokens` property. As new special tokens are added in the future they can be added here. - Changed all token properties to return nullable tokens, to handle some models not having some tokens. - Fixed `DefaultSamplingPipeline` to handle no newline token in some models. * Moved native methods to more specific locations. - Context specific things have been moved into `SafeLLamaContextHandle.cs` and made private - they're exposed through C# properties and methods already. - Checking that GPU layer count is zero if GPU offload is not supported. - Moved methods for creating default structs (`llama_model_quantize_default_params` and `llama_context_default_params`) into relevant structs. * Removed exception if `GpuLayerCount > 0` when GPU is not supported. * - Added low level wrapper methods for new per-sequence state load/save in `SafeLLamaContextHandle` - Added high level wrapper methods (save/load with `State` object or memory mapped file) in `LLamaContext` - Moved native methods for per-sequence state load/save into `SafeLLamaContextHandle` * Added update and defrag methods for KV cache in `SafeLLamaContextHandle` * Updated submodule to `f7001ccc5aa359fcf41bba19d1c99c3d25c9bcc7` * Passing the sequence ID when saving a single sequence state
1 year ago
123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283
  1. using LLama.Abstractions;
  2. using LLama.Common;
  3. using LLama.Native;
  4. using System;
  5. using System.Collections.Generic;
  6. using System.IO;
  7. using System.Linq;
  8. using System.Text.Json;
  9. using System.Text.Json.Serialization;
  10. using System.Threading.Tasks;
  11. using LLama.Exceptions;
  12. using LLama.Extensions;
  13. using Microsoft.Extensions.Logging;
  14. namespace LLama
  15. {
  16. /// <summary>
  17. /// The LLama executor for instruct mode.
  18. /// </summary>
  19. public class InstructExecutor
  20. : StatefulExecutorBase
  21. {
  22. private bool _is_prompt_run = true;
  23. private readonly string _instructionPrefix;
  24. private LLamaToken[] _inp_pfx;
  25. private LLamaToken[] _inp_sfx;
  26. /// <summary>
  27. ///
  28. /// </summary>
  29. /// <param name="context"></param>
  30. /// <param name="instructionPrefix"></param>
  31. /// <param name="instructionSuffix"></param>
  32. /// <param name="logger"></param>
  33. public InstructExecutor(LLamaContext context,
  34. string instructionPrefix = "\n\n### Instruction:\n\n",
  35. string instructionSuffix = "\n\n### Response:\n\n",
  36. ILogger? logger = null)
  37. : base(context, logger)
  38. {
  39. _inp_pfx = Context.Tokenize(instructionPrefix, true, true);
  40. _inp_sfx = Context.Tokenize(instructionSuffix, false, true);
  41. _instructionPrefix = instructionPrefix;
  42. }
  43. /// <inheritdoc />
  44. public override ExecutorBaseState GetStateData()
  45. {
  46. InstructExecutorState state = new()
  47. {
  48. ConsumedSessionCount = _n_session_consumed,
  49. EmbedInps = _embed_inps.ToArray(),
  50. IsPromptRun = _is_prompt_run,
  51. ConsumedTokensCount = _consumedTokensCount,
  52. Embeds = _embeds.ToArray(),
  53. LastTokens = _last_n_tokens.ToArray(),
  54. InputPrefixTokens = _inp_pfx,
  55. InputSuffixTokens = _inp_sfx,
  56. MatchingSessionTokensCount = _n_matching_session_tokens,
  57. PastTokensCount = _pastTokensCount,
  58. SessionFilePath = _pathSession,
  59. SessionTokens = _session_tokens.ToArray(),
  60. LastTokensCapacity = _last_n_tokens.Capacity,
  61. MirostatMu = MirostatMu
  62. };
  63. return state;
  64. }
  65. /// <inheritdoc />
  66. public override Task LoadState(ExecutorBaseState data)
  67. {
  68. if(data is InstructExecutorState state)
  69. {
  70. _n_session_consumed = state.ConsumedSessionCount;
  71. _embed_inps = state.EmbedInps.ToList();
  72. _is_prompt_run = state.IsPromptRun;
  73. _consumedTokensCount = state.ConsumedTokensCount;
  74. _embeds = state.Embeds.ToList();
  75. _last_n_tokens = new FixedSizeQueue<LLamaToken>(state.LastTokensCapacity, state.LastTokens);
  76. _inp_pfx = state.InputPrefixTokens;
  77. _inp_sfx = state.InputSuffixTokens;
  78. _n_matching_session_tokens = state.MatchingSessionTokensCount;
  79. _pastTokensCount = state.PastTokensCount;
  80. _pathSession = state.SessionFilePath;
  81. _session_tokens = state.SessionTokens.ToList();
  82. }
  83. else
  84. {
  85. throw new ArgumentException("Invalid state data type.");
  86. }
  87. return Task.CompletedTask;
  88. }
  89. /// <inheritdoc />
  90. public override async Task SaveState(string filename)
  91. {
  92. var state = (InstructExecutorState)GetStateData();
  93. using (var fs = new FileStream(filename, FileMode.Create, FileAccess.Write))
  94. {
  95. await JsonSerializer.SerializeAsync(fs, state);
  96. }
  97. }
  98. /// <inheritdoc />
  99. public override async Task LoadState(string filename)
  100. {
  101. using (var fs = new FileStream(filename, FileMode.Open, FileAccess.Read))
  102. {
  103. var state = await JsonSerializer.DeserializeAsync<InstructExecutorState>(fs);
  104. await LoadState(state);
  105. }
  106. }
  107. /// <inheritdoc />
  108. protected override Task<bool> GetLoopCondition(InferStateArgs args)
  109. {
  110. return Task.FromResult(args.RemainedTokens != 0 || _is_prompt_run);
  111. }
  112. /// <inheritdoc />
  113. protected override Task PreprocessInputs(string text, InferStateArgs args)
  114. {
  115. args.Antiprompts ??= new List<string>();
  116. args.Antiprompts.Add(_instructionPrefix);
  117. if (_is_prompt_run)
  118. {
  119. // When running the first input (prompt) in inteactive mode, we should specially process it.
  120. _embed_inps = Context.Tokenize(text, true, true).ToList();
  121. }
  122. else
  123. {
  124. if (!text.EndsWith("\n"))
  125. {
  126. text += "\n";
  127. }
  128. _consumedTokensCount = _embed_inps.Count;
  129. _embed_inps.AddRange(_inp_pfx);
  130. var line_inp = Context.Tokenize(text, false, true);
  131. _embed_inps.AddRange(line_inp);
  132. _embed_inps.AddRange(_inp_sfx);
  133. args.RemainedTokens -= line_inp.Length;
  134. }
  135. return Task.CompletedTask;
  136. }
  137. /// <inheritdoc />
  138. protected override async Task<(bool, IReadOnlyList<string>)> PostProcess(IInferenceParams inferenceParams, InferStateArgs args)
  139. {
  140. if (_embed_inps.Count <= _consumedTokensCount)
  141. {
  142. if (_last_n_tokens.TokensEndsWithAnyString(args.Antiprompts, Context.NativeHandle.ModelHandle, Context.Encoding))
  143. {
  144. args.WaitForInput = true;
  145. return (true, Array.Empty<string>());
  146. }
  147. if (_pastTokensCount > 0 && args.WaitForInput)
  148. {
  149. return (true, new[] { "\n> " });
  150. }
  151. }
  152. if (_embeds.Count > 0 && _embeds.Last() == Context.NativeHandle.ModelHandle.Tokens.EOS)
  153. {
  154. args.WaitForInput = true;
  155. }
  156. if (args.RemainedTokens <= 0 && inferenceParams.MaxTokens != -1)
  157. {
  158. args.RemainedTokens = inferenceParams.MaxTokens;
  159. args.WaitForInput = true;
  160. }
  161. return (false, Array.Empty<string>());
  162. }
  163. /// <inheritdoc />
  164. protected override Task InferInternal(IInferenceParams inferenceParams, InferStateArgs args)
  165. {
  166. var batch = new LLamaBatch();
  167. if (_embeds.Count > 0)
  168. {
  169. _is_prompt_run = false;
  170. if (_pastTokensCount + _embeds.Count > Context.ContextSize)
  171. {
  172. HandleRunOutOfContext(inferenceParams.TokensKeep);
  173. }
  174. TryReuseMathingPrefix();
  175. var (result, _) = Context.NativeHandle.Decode(_embeds, LLamaSeqId.Zero, batch, ref _pastTokensCount);
  176. if (result != DecodeResult.Ok)
  177. throw new LLamaDecodeError(result);
  178. if (_embeds.Count > 0 && !string.IsNullOrEmpty(_pathSession))
  179. {
  180. _session_tokens.AddRange(_embeds);
  181. _n_session_consumed = _session_tokens.Count;
  182. }
  183. }
  184. _embeds.Clear();
  185. if (_embed_inps.Count <= _consumedTokensCount && !args.WaitForInput)
  186. {
  187. var repeat_last_n = inferenceParams.RepeatLastTokensCount < 0 ? (int)Context.ContextSize : inferenceParams.RepeatLastTokensCount;
  188. // optionally save the session on first sample (for faster prompt loading next time)
  189. if (!string.IsNullOrEmpty(_pathSession) && args.NeedToSaveSession)
  190. {
  191. args.NeedToSaveSession = false;
  192. SaveSessionFile(_pathSession);
  193. }
  194. LLamaToken id;
  195. if (inferenceParams.SamplingPipeline is not null)
  196. {
  197. id = inferenceParams.SamplingPipeline.Sample(Context.NativeHandle, Context.NativeHandle.GetLogitsIth(batch.TokenCount - 1), _last_n_tokens.ToArray());
  198. inferenceParams.SamplingPipeline.Accept(Context.NativeHandle, id);
  199. }
  200. else
  201. {
  202. var tokenDataArray = Context.ApplyPenalty(batch.TokenCount - 1, _last_n_tokens, inferenceParams.LogitBias, repeat_last_n,
  203. inferenceParams.RepeatPenalty, inferenceParams.FrequencyPenalty, inferenceParams.PresencePenalty, inferenceParams.PenalizeNL);
  204. var mu = MirostatMu;
  205. id = Context.Sample(
  206. tokenDataArray, ref mu, inferenceParams.Temperature, inferenceParams.Mirostat, inferenceParams.MirostatTau,
  207. inferenceParams.MirostatEta, inferenceParams.TopK, inferenceParams.TopP, inferenceParams.TfsZ, inferenceParams.TypicalP, inferenceParams.Grammar,
  208. inferenceParams.MinP
  209. );
  210. MirostatMu = mu;
  211. }
  212. _last_n_tokens.Enqueue(id);
  213. _embeds.Add(id);
  214. args.RemainedTokens--;
  215. args.ReturnValue = true;
  216. }
  217. else
  218. {
  219. while (_embed_inps.Count > _consumedTokensCount)
  220. {
  221. _embeds.Add(_embed_inps[_consumedTokensCount]);
  222. _last_n_tokens.Enqueue(_embed_inps[_consumedTokensCount]);
  223. _consumedTokensCount++;
  224. if (_embeds.Count >= Context.Params.BatchSize)
  225. {
  226. break;
  227. }
  228. }
  229. }
  230. return Task.CompletedTask;
  231. }
  232. /// <summary>
  233. /// The desciptor of the state of the instruct executor.
  234. /// </summary>
  235. public class InstructExecutorState : ExecutorBaseState
  236. {
  237. /// <summary>
  238. /// Whether the executor is running for the first time (running the prompt).
  239. /// </summary>
  240. [JsonPropertyName("is_prompt_run")]
  241. public bool IsPromptRun { get; set; }
  242. /// <summary>
  243. /// Instruction prefix tokens.
  244. /// </summary>
  245. [JsonPropertyName("inp_pfx")]
  246. public LLamaToken[] InputPrefixTokens { get; set; }
  247. /// <summary>
  248. /// Instruction suffix tokens.
  249. /// </summary>
  250. [JsonPropertyName("inp_sfx")]
  251. public LLamaToken[] InputSuffixTokens { get; set; }
  252. }
  253. }
  254. }