You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

LLamaInteractExecutor.cs 13 kB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326
  1. using LLama.Common;
  2. using LLama.Native;
  3. using LLama.Abstractions;
  4. using System;
  5. using System.Collections.Generic;
  6. using System.IO;
  7. using System.Linq;
  8. using System.Text.Json;
  9. using System.Text.Json.Serialization;
  10. using System.Threading.Tasks;
  11. using LLama.Exceptions;
  12. using LLama.Extensions;
  13. using Microsoft.Extensions.Logging;
  14. namespace LLama
  15. {
  16. /// <summary>
  17. /// The LLama executor for interactive mode.
  18. /// </summary>
  19. public class InteractiveExecutor : StatefulExecutorBase
  20. {
  21. private bool _is_prompt_run = true;
  22. private readonly LLamaToken _llama_token_newline;
  23. // LLava
  24. private int _EmbedImagePosition = -1;
  25. private SafeLlavaImageEmbedHandle _imageEmbedHandle = null;
  26. private bool _imageInPrompt = false;
  27. /// <summary>
  28. ///
  29. /// </summary>
  30. /// <param name="context"></param>
  31. /// <param name="logger"></param>
  32. public InteractiveExecutor(LLamaContext context, ILogger? logger = null)
  33. : base(context, logger)
  34. {
  35. _llama_token_newline = NativeApi.llama_token_nl(Context.NativeHandle.ModelHandle);
  36. }
  37. public InteractiveExecutor(LLamaContext context, LLavaWeights clipModel, ILogger? logger = null)
  38. : base(context, clipModel, logger)
  39. {
  40. _llama_token_newline = NativeApi.llama_token_nl(Context.NativeHandle.ModelHandle);
  41. }
  42. /// <inheritdoc />
  43. public override ExecutorBaseState GetStateData()
  44. {
  45. InteractiveExecutorState state = new()
  46. {
  47. ConsumedSessionCount = _n_session_consumed,
  48. EmbedInps = _embed_inps.ToArray(),
  49. IsPromptRun = _is_prompt_run,
  50. ConsumedTokensCount = _consumedTokensCount,
  51. Embeds = _embeds.ToArray(),
  52. LastTokens = _last_n_tokens.ToArray(),
  53. MatchingSessionTokensCount = _n_matching_session_tokens,
  54. PastTokensCount = _pastTokensCount,
  55. SessionFilePath = _pathSession,
  56. SessionTokens = _session_tokens.ToArray(),
  57. LastTokensCapacity = _last_n_tokens.Capacity,
  58. MirostatMu = MirostatMu
  59. };
  60. return state;
  61. }
  62. /// <inheritdoc />
  63. public override Task LoadState(ExecutorBaseState data)
  64. {
  65. if (data is InteractiveExecutorState state)
  66. {
  67. _n_session_consumed = state.ConsumedSessionCount;
  68. _embed_inps = state.EmbedInps.ToList();
  69. _is_prompt_run = state.IsPromptRun;
  70. _consumedTokensCount = state.ConsumedTokensCount;
  71. _embeds = state.Embeds.ToList();
  72. _last_n_tokens = new FixedSizeQueue<LLamaToken>(state.LastTokensCapacity, state.LastTokens);
  73. _n_matching_session_tokens = state.MatchingSessionTokensCount;
  74. _pastTokensCount = state.PastTokensCount;
  75. _pathSession = state.SessionFilePath;
  76. _session_tokens = state.SessionTokens.ToList();
  77. }
  78. else
  79. throw new ArgumentException("Invalid state data type.");
  80. return Task.CompletedTask;
  81. }
  82. /// <inheritdoc />
  83. public override async Task SaveState(string filename)
  84. {
  85. var state = (InteractiveExecutorState)GetStateData();
  86. using(var fs = new FileStream(filename, FileMode.Create, FileAccess.Write))
  87. {
  88. await JsonSerializer.SerializeAsync(fs, state);
  89. }
  90. }
  91. /// <inheritdoc />
  92. public override async Task LoadState(string filename)
  93. {
  94. using (var fs = new FileStream(filename, FileMode.Open, FileAccess.Read))
  95. {
  96. var state = await JsonSerializer.DeserializeAsync<InteractiveExecutorState>(fs);
  97. await LoadState(state);
  98. }
  99. }
  100. /// <summary>
  101. /// Define whether to continue the loop to generate responses.
  102. /// </summary>
  103. /// <returns></returns>
  104. protected override Task<bool> GetLoopCondition(InferStateArgs args)
  105. {
  106. return Task.FromResult(args.RemainedTokens != 0 && !args.WaitForInput || _is_prompt_run);
  107. }
  108. /// <inheritdoc />
  109. protected override Task PreprocessInputs(string text, InferStateArgs args)
  110. {
  111. if (_is_prompt_run)
  112. {
  113. // When running the first input (prompt) in interactive mode, we should specially process it.
  114. if (!this.IsMultiModal)
  115. {
  116. _embed_inps = Context.Tokenize(text, true).ToList();
  117. }
  118. else
  119. {
  120. // If the prompt contains the tag <image> extract this.
  121. _imageInPrompt = text.Contains("<image>");
  122. if (_imageInPrompt)
  123. {
  124. if (!string.IsNullOrEmpty(ImagePath))
  125. {
  126. _imageEmbedHandle = SafeLlavaImageEmbedHandle.CreateFromFileName( ClipModel.NativeHandle, Context, ImagePath);
  127. }
  128. int imageIndex = text.IndexOf("<image>");
  129. // Tokenize segment 1 (before <image> tag)
  130. string preImagePrompt = text.Substring(0, imageIndex);
  131. var segment1 = Context.Tokenize(preImagePrompt, true);
  132. // Remember the position to add the image embeddings
  133. _EmbedImagePosition = segment1.Length;
  134. string postImagePrompt = text.Substring(imageIndex + 7);
  135. var segment2 = Context.Tokenize(postImagePrompt, false);
  136. _embed_inps.AddRange(segment1);
  137. _embed_inps.AddRange(segment2);
  138. }
  139. else
  140. {
  141. _embed_inps = Context.Tokenize(text, true).ToList();
  142. }
  143. }
  144. }
  145. else
  146. {
  147. if (!text.EndsWith("\n"))
  148. {
  149. text += "\n";
  150. }
  151. var line_inp = Context.Tokenize(text, false);
  152. _embed_inps.AddRange(line_inp);
  153. args.RemainedTokens -= line_inp.Length;
  154. }
  155. return Task.CompletedTask;
  156. }
  157. /// <summary>
  158. /// Return whether to break the generation.
  159. /// </summary>
  160. /// <param name="inferenceParams"></param>
  161. /// <param name="args"></param>
  162. /// <returns></returns>
  163. protected override async Task<(bool, IReadOnlyList<string>)> PostProcess(IInferenceParams inferenceParams, InferStateArgs args)
  164. {
  165. if (_embed_inps.Count <= _consumedTokensCount)
  166. {
  167. if (_last_n_tokens.TokensEndsWithAnyString(args.Antiprompts, Context.NativeHandle.ModelHandle, Context.Encoding))
  168. args.WaitForInput = true;
  169. if (_pastTokensCount > 0 && args.WaitForInput)
  170. return (true, Array.Empty<string>());
  171. }
  172. if (_embeds.Count > 0 && _embeds.Last() == NativeApi.llama_token_eos(Context.NativeHandle.ModelHandle))
  173. {
  174. return (true, new[] { " [end of text]\n" });
  175. }
  176. if (args.RemainedTokens <= 0 && inferenceParams.MaxTokens != -1)
  177. {
  178. args.RemainedTokens = inferenceParams.MaxTokens;
  179. args.WaitForInput = true;
  180. }
  181. return (false, Array.Empty<string>());
  182. }
  183. /// <inheritdoc />
  184. protected override Task InferInternal(IInferenceParams inferenceParams, InferStateArgs args)
  185. {
  186. var batch = new LLamaBatch();
  187. if (_embeds.Count > 0)
  188. {
  189. _is_prompt_run = false;
  190. if (_pastTokensCount + _embeds.Count > Context.ContextSize)
  191. {
  192. HandleRunOutOfContext(inferenceParams.TokensKeep);
  193. }
  194. TryReuseMathingPrefix();
  195. // Changes to support Multi-Modal LLMs.
  196. //
  197. (DecodeResult, int) header, end, result;
  198. if (IsMultiModal && _EmbedImagePosition > 0)
  199. {
  200. // Previous to Image
  201. header = Context.NativeHandle.Decode(_embeds.GetRange(0, _EmbedImagePosition), LLamaSeqId.Zero, batch, ref _pastTokensCount);
  202. if (header.Item1 != DecodeResult.Ok) throw new LLamaDecodeError(header.Item1);
  203. // Image
  204. ClipModel.EvalImageEmbed(Context, _imageEmbedHandle, ref _pastTokensCount);
  205. // Post-image
  206. end = Context.NativeHandle.Decode(_embeds.GetRange(_EmbedImagePosition, _embeds.Count - _EmbedImagePosition), LLamaSeqId.Zero, batch, ref _pastTokensCount);
  207. _EmbedImagePosition = -1;
  208. }
  209. else
  210. {
  211. result = Context.NativeHandle.Decode(_embeds, LLamaSeqId.Zero, batch, ref _pastTokensCount);
  212. if (result.Item1 != DecodeResult.Ok) throw new LLamaDecodeError(result.Item1);
  213. }
  214. if (_embeds.Count > 0 && !string.IsNullOrEmpty(_pathSession))
  215. {
  216. _session_tokens.AddRange(_embeds);
  217. _n_session_consumed = _session_tokens.Count;
  218. }
  219. }
  220. _embeds.Clear();
  221. if (_embed_inps.Count <= _consumedTokensCount && !args.WaitForInput)
  222. {
  223. var repeat_last_n = inferenceParams.RepeatLastTokensCount < 0 ? (int)Context.ContextSize : inferenceParams.RepeatLastTokensCount;
  224. // optionally save the session on first sample (for faster prompt loading next time)
  225. if (!string.IsNullOrEmpty(_pathSession) && args.NeedToSaveSession)
  226. {
  227. args.NeedToSaveSession = false;
  228. SaveSessionFile(_pathSession);
  229. }
  230. LLamaToken id;
  231. if (inferenceParams.SamplingPipeline is not null)
  232. {
  233. id = inferenceParams.SamplingPipeline.Sample(Context.NativeHandle, Context.NativeHandle.GetLogitsIth(batch.TokenCount - 1), _last_n_tokens.ToArray());
  234. inferenceParams.SamplingPipeline.Accept(Context.NativeHandle, id);
  235. }
  236. else
  237. {
  238. var tokenDataArray = Context.ApplyPenalty(batch.TokenCount - 1, _last_n_tokens, inferenceParams.LogitBias, repeat_last_n,
  239. inferenceParams.RepeatPenalty, inferenceParams.FrequencyPenalty, inferenceParams.PresencePenalty, inferenceParams.PenalizeNL);
  240. var mu = MirostatMu;
  241. id = Context.Sample(
  242. tokenDataArray, ref mu, inferenceParams.Temperature, inferenceParams.Mirostat, inferenceParams.MirostatTau,
  243. inferenceParams.MirostatEta, inferenceParams.TopK, inferenceParams.TopP, inferenceParams.TfsZ, inferenceParams.TypicalP, inferenceParams.Grammar,
  244. inferenceParams.MinP
  245. );
  246. MirostatMu = mu;
  247. }
  248. _last_n_tokens.Enqueue(id);
  249. if (id == NativeApi.llama_token_eos(Context.NativeHandle.ModelHandle))
  250. {
  251. id = _llama_token_newline;
  252. if (args.Antiprompts is not null && args.Antiprompts.Count > 0)
  253. {
  254. var first_antiprompt = Context.Tokenize(args.Antiprompts[0], false);
  255. _embed_inps.AddRange(first_antiprompt);
  256. }
  257. }
  258. _embeds.Add(id);
  259. args.RemainedTokens--;
  260. args.ReturnValue = true;
  261. }
  262. else
  263. {
  264. while (_embed_inps.Count > _consumedTokensCount)
  265. {
  266. _embeds.Add(_embed_inps[_consumedTokensCount]);
  267. _last_n_tokens.Enqueue(_embed_inps[_consumedTokensCount]);
  268. _consumedTokensCount++;
  269. if (_embeds.Count >= Context.Params.BatchSize)
  270. {
  271. break;
  272. }
  273. }
  274. }
  275. return Task.CompletedTask;
  276. }
  277. /// <summary>
  278. /// The descriptor of the state of the interactive executor.
  279. /// </summary>
  280. public class InteractiveExecutorState
  281. : ExecutorBaseState
  282. {
  283. /// <summary>
  284. /// Whether the executor is running for the first time (running the prompt).
  285. /// </summary>
  286. [JsonPropertyName("is_prompt_run")]
  287. public bool IsPromptRun { get; set; }
  288. }
  289. }
  290. }