You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

LLamaInteractExecutor.cs 14 kB

April 2024 Binary Update (#662) * Updated binaries, using [this build](https://github.com/SciSharp/LLamaSharp/actions/runs/8654672719/job/23733195669) for llama.cpp commit `f7001ccc5aa359fcf41bba19d1c99c3d25c9bcc7`. - Added all new functions. - Moved some functions (e.g. `SafeLlamaModelHandle` specific functions) into `SafeLlamaModelHandle.cs` - Exposed tokens on `SafeLlamaModelHandle` and `LLamaWeights` through a `Tokens` property. As new special tokens are added in the future they can be added here. - Changed all token properties to return nullable tokens, to handle some models not having some tokens. - Fixed `DefaultSamplingPipeline` to handle no newline token in some models. * Moved native methods to more specific locations. - Context specific things have been moved into `SafeLLamaContextHandle.cs` and made private - they're exposed through C# properties and methods already. - Checking that GPU layer count is zero if GPU offload is not supported. - Moved methods for creating default structs (`llama_model_quantize_default_params` and `llama_context_default_params`) into relevant structs. * Removed exception if `GpuLayerCount > 0` when GPU is not supported. * - Added low level wrapper methods for new per-sequence state load/save in `SafeLLamaContextHandle` - Added high level wrapper methods (save/load with `State` object or memory mapped file) in `LLamaContext` - Moved native methods for per-sequence state load/save into `SafeLLamaContextHandle` * Added update and defrag methods for KV cache in `SafeLLamaContextHandle` * Updated submodule to `f7001ccc5aa359fcf41bba19d1c99c3d25c9bcc7` * Passing the sequence ID when saving a single sequence state
1 year ago
April 2024 Binary Update (#662) * Updated binaries, using [this build](https://github.com/SciSharp/LLamaSharp/actions/runs/8654672719/job/23733195669) for llama.cpp commit `f7001ccc5aa359fcf41bba19d1c99c3d25c9bcc7`. - Added all new functions. - Moved some functions (e.g. `SafeLlamaModelHandle` specific functions) into `SafeLlamaModelHandle.cs` - Exposed tokens on `SafeLlamaModelHandle` and `LLamaWeights` through a `Tokens` property. As new special tokens are added in the future they can be added here. - Changed all token properties to return nullable tokens, to handle some models not having some tokens. - Fixed `DefaultSamplingPipeline` to handle no newline token in some models. * Moved native methods to more specific locations. - Context specific things have been moved into `SafeLLamaContextHandle.cs` and made private - they're exposed through C# properties and methods already. - Checking that GPU layer count is zero if GPU offload is not supported. - Moved methods for creating default structs (`llama_model_quantize_default_params` and `llama_context_default_params`) into relevant structs. * Removed exception if `GpuLayerCount > 0` when GPU is not supported. * - Added low level wrapper methods for new per-sequence state load/save in `SafeLLamaContextHandle` - Added high level wrapper methods (save/load with `State` object or memory mapped file) in `LLamaContext` - Moved native methods for per-sequence state load/save into `SafeLLamaContextHandle` * Added update and defrag methods for KV cache in `SafeLLamaContextHandle` * Updated submodule to `f7001ccc5aa359fcf41bba19d1c99c3d25c9bcc7` * Passing the sequence ID when saving a single sequence state
1 year ago
April 2024 Binary Update (#662) * Updated binaries, using [this build](https://github.com/SciSharp/LLamaSharp/actions/runs/8654672719/job/23733195669) for llama.cpp commit `f7001ccc5aa359fcf41bba19d1c99c3d25c9bcc7`. - Added all new functions. - Moved some functions (e.g. `SafeLlamaModelHandle` specific functions) into `SafeLlamaModelHandle.cs` - Exposed tokens on `SafeLlamaModelHandle` and `LLamaWeights` through a `Tokens` property. As new special tokens are added in the future they can be added here. - Changed all token properties to return nullable tokens, to handle some models not having some tokens. - Fixed `DefaultSamplingPipeline` to handle no newline token in some models. * Moved native methods to more specific locations. - Context specific things have been moved into `SafeLLamaContextHandle.cs` and made private - they're exposed through C# properties and methods already. - Checking that GPU layer count is zero if GPU offload is not supported. - Moved methods for creating default structs (`llama_model_quantize_default_params` and `llama_context_default_params`) into relevant structs. * Removed exception if `GpuLayerCount > 0` when GPU is not supported. * - Added low level wrapper methods for new per-sequence state load/save in `SafeLLamaContextHandle` - Added high level wrapper methods (save/load with `State` object or memory mapped file) in `LLamaContext` - Moved native methods for per-sequence state load/save into `SafeLLamaContextHandle` * Added update and defrag methods for KV cache in `SafeLLamaContextHandle` * Updated submodule to `f7001ccc5aa359fcf41bba19d1c99c3d25c9bcc7` * Passing the sequence ID when saving a single sequence state
1 year ago
123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353
  1. using LLama.Common;
  2. using LLama.Native;
  3. using LLama.Abstractions;
  4. using System;
  5. using System.Collections.Generic;
  6. using System.IO;
  7. using System.Linq;
  8. using System.Text.Json;
  9. using System.Text.Json.Serialization;
  10. using System.Threading.Tasks;
  11. using LLama.Exceptions;
  12. using LLama.Extensions;
  13. using Microsoft.Extensions.Logging;
  14. namespace LLama
  15. {
  16. /// <summary>
  17. /// The LLama executor for interactive mode.
  18. /// </summary>
  19. public class InteractiveExecutor : StatefulExecutorBase
  20. {
  21. private bool _is_prompt_run = true;
  22. // LLava
  23. private int _EmbedImagePosition = -1;
  24. private List<SafeLlavaImageEmbedHandle> _imageEmbedHandles = new List<SafeLlavaImageEmbedHandle>();
  25. private bool _imageInPrompt = false;
  26. /// <summary>
  27. ///
  28. /// </summary>
  29. /// <param name="context"></param>
  30. /// <param name="logger"></param>
  31. public InteractiveExecutor(LLamaContext context, ILogger? logger = null)
  32. : base(context, logger)
  33. {
  34. }
  35. public InteractiveExecutor(LLamaContext context, LLavaWeights clipModel, ILogger? logger = null)
  36. : base(context, clipModel, logger)
  37. {
  38. }
  39. /// <inheritdoc />
  40. public override ExecutorBaseState GetStateData()
  41. {
  42. InteractiveExecutorState state = new()
  43. {
  44. ConsumedSessionCount = _n_session_consumed,
  45. EmbedInps = _embed_inps.ToArray(),
  46. IsPromptRun = _is_prompt_run,
  47. ConsumedTokensCount = _consumedTokensCount,
  48. Embeds = _embeds.ToArray(),
  49. LastTokens = _last_n_tokens.ToArray(),
  50. MatchingSessionTokensCount = _n_matching_session_tokens,
  51. PastTokensCount = _pastTokensCount,
  52. SessionFilePath = _pathSession,
  53. SessionTokens = _session_tokens.ToArray(),
  54. LastTokensCapacity = _last_n_tokens.Capacity,
  55. MirostatMu = MirostatMu
  56. };
  57. return state;
  58. }
  59. /// <inheritdoc />
  60. public override Task LoadState(ExecutorBaseState data)
  61. {
  62. if (data is InteractiveExecutorState state)
  63. {
  64. _n_session_consumed = state.ConsumedSessionCount;
  65. _embed_inps = state.EmbedInps.ToList();
  66. _is_prompt_run = state.IsPromptRun;
  67. _consumedTokensCount = state.ConsumedTokensCount;
  68. _embeds = state.Embeds.ToList();
  69. _last_n_tokens = new FixedSizeQueue<LLamaToken>(state.LastTokensCapacity, state.LastTokens);
  70. _n_matching_session_tokens = state.MatchingSessionTokensCount;
  71. _pastTokensCount = state.PastTokensCount;
  72. _pathSession = state.SessionFilePath;
  73. _session_tokens = state.SessionTokens.ToList();
  74. }
  75. else
  76. throw new ArgumentException("Invalid state data type.");
  77. return Task.CompletedTask;
  78. }
  79. /// <inheritdoc />
  80. public override async Task SaveState(string filename)
  81. {
  82. var state = (InteractiveExecutorState)GetStateData();
  83. using(var fs = new FileStream(filename, FileMode.Create, FileAccess.Write))
  84. {
  85. await JsonSerializer.SerializeAsync(fs, state);
  86. }
  87. }
  88. /// <inheritdoc />
  89. public override async Task LoadState(string filename)
  90. {
  91. using (var fs = new FileStream(filename, FileMode.Open, FileAccess.Read))
  92. {
  93. var state = await JsonSerializer.DeserializeAsync<InteractiveExecutorState>(fs);
  94. await LoadState(state);
  95. }
  96. }
  97. /// <summary>
  98. /// Define whether to continue the loop to generate responses.
  99. /// </summary>
  100. /// <returns></returns>
  101. protected override Task<bool> GetLoopCondition(InferStateArgs args)
  102. {
  103. return Task.FromResult(args.RemainedTokens != 0 && !args.WaitForInput || _is_prompt_run);
  104. }
  105. /// <inheritdoc />
  106. protected override Task PreprocessInputs(string text, InferStateArgs args)
  107. {
  108. if (_is_prompt_run)
  109. {
  110. // When running the first input (prompt) in interactive mode, we should specially process it.
  111. if (!this.IsMultiModal)
  112. {
  113. _embed_inps = Context.Tokenize(text, true, true).ToList();
  114. }
  115. else
  116. {
  117. PreprocessLlava(text, args, true );
  118. }
  119. }
  120. else
  121. {
  122. if (!text.EndsWith("\n"))
  123. {
  124. text += "\n";
  125. }
  126. if (!this.IsMultiModal)
  127. {
  128. var line_inp = Context.Tokenize(text, false, true);
  129. _embed_inps.AddRange(line_inp);
  130. args.RemainedTokens -= line_inp.Length;
  131. }
  132. else
  133. {
  134. PreprocessLlava(text, args, false);
  135. }
  136. }
  137. return Task.CompletedTask;
  138. }
  139. /// <inheritdoc />
  140. private Task PreprocessLlava(string text, InferStateArgs args, bool addBos = true )
  141. {
  142. int usedTokens = 0;
  143. // If the prompt contains the tag <image> extract this.
  144. _imageInPrompt = text.Contains("<image>");
  145. if (_imageInPrompt && IsMultiModal )
  146. {
  147. foreach (var image in Images)
  148. {
  149. _imageEmbedHandles.Add(SafeLlavaImageEmbedHandle.CreateFromMemory(ClipModel.NativeHandle, Context, image));
  150. }
  151. int imageIndex = text.IndexOf("<image>");
  152. // Tokenize segment 1 (before <image> tag)
  153. string preImagePrompt = text.Substring(0, imageIndex);
  154. var segment1 = Context.Tokenize(preImagePrompt, addBos, true);
  155. // Remember the position to add the image embeddings
  156. _EmbedImagePosition = segment1.Length;
  157. string postImagePrompt = text.Substring(imageIndex + 7);
  158. var segment2 = Context.Tokenize(postImagePrompt, false, true);
  159. _embed_inps.AddRange(segment1);
  160. _embed_inps.AddRange(segment2);
  161. usedTokens += (segment1.Length + segment2.Length);
  162. }
  163. else
  164. {
  165. if (addBos)
  166. {
  167. _embed_inps = Context.Tokenize(text, true, true).ToList();
  168. }
  169. else
  170. {
  171. var line_inp = Context.Tokenize(text, false, true);
  172. _embed_inps.AddRange(line_inp);
  173. args.RemainedTokens -= line_inp.Length;
  174. }
  175. }
  176. return Task.CompletedTask;
  177. }
  178. /// <summary>
  179. /// Return whether to break the generation.
  180. /// </summary>
  181. /// <param name="inferenceParams"></param>
  182. /// <param name="args"></param>
  183. /// <returns></returns>
  184. protected override async Task<(bool, IReadOnlyList<string>)> PostProcess(IInferenceParams inferenceParams, InferStateArgs args)
  185. {
  186. if (_embed_inps.Count <= _consumedTokensCount)
  187. {
  188. if (_last_n_tokens.TokensEndsWithAnyString(args.Antiprompts, Context.NativeHandle.ModelHandle, Context.Encoding))
  189. args.WaitForInput = true;
  190. if (_pastTokensCount > 0 && args.WaitForInput)
  191. return (true, Array.Empty<string>());
  192. }
  193. if (_embeds.Count > 0 && _embeds.Last() == Context.NativeHandle.ModelHandle.Tokens.EOS)
  194. {
  195. return (true, new[] { " [end of text]\n" });
  196. }
  197. if (args.RemainedTokens <= 0 && inferenceParams.MaxTokens != -1)
  198. {
  199. args.RemainedTokens = inferenceParams.MaxTokens;
  200. args.WaitForInput = true;
  201. }
  202. return (false, Array.Empty<string>());
  203. }
  204. /// <inheritdoc />
  205. protected override Task InferInternal(IInferenceParams inferenceParams, InferStateArgs args)
  206. {
  207. var batch = new LLamaBatch();
  208. if (_embeds.Count > 0)
  209. {
  210. _is_prompt_run = false;
  211. if (_pastTokensCount + _embeds.Count > Context.ContextSize)
  212. {
  213. HandleRunOutOfContext(inferenceParams.TokensKeep);
  214. }
  215. TryReuseMathingPrefix();
  216. // Changes to support Multi-Modal LLMs.
  217. //
  218. (DecodeResult, int) header, end, result;
  219. if (IsMultiModal && _EmbedImagePosition > 0)
  220. {
  221. // Tokens previous to the images
  222. header = Context.NativeHandle.Decode(_embeds.GetRange(0, _EmbedImagePosition), LLamaSeqId.Zero, batch, ref _pastTokensCount);
  223. if (header.Item1 != DecodeResult.Ok) throw new LLamaDecodeError(header.Item1);
  224. // Images
  225. foreach( var image in _imageEmbedHandles )
  226. ClipModel.EvalImageEmbed(Context, image, ref _pastTokensCount);
  227. // Post-image Tokens
  228. end = Context.NativeHandle.Decode(_embeds.GetRange(_EmbedImagePosition, _embeds.Count - _EmbedImagePosition), LLamaSeqId.Zero, batch, ref _pastTokensCount);
  229. _EmbedImagePosition = -1;
  230. _imageEmbedHandles.Clear();
  231. Images.Clear();
  232. }
  233. else
  234. {
  235. result = Context.NativeHandle.Decode(_embeds, LLamaSeqId.Zero, batch, ref _pastTokensCount);
  236. if (result.Item1 != DecodeResult.Ok) throw new LLamaDecodeError(result.Item1);
  237. }
  238. if (_embeds.Count > 0 && !string.IsNullOrEmpty(_pathSession))
  239. {
  240. _session_tokens.AddRange(_embeds);
  241. _n_session_consumed = _session_tokens.Count;
  242. }
  243. }
  244. _embeds.Clear();
  245. if (_embed_inps.Count <= _consumedTokensCount && !args.WaitForInput)
  246. {
  247. var repeat_last_n = inferenceParams.RepeatLastTokensCount < 0 ? (int)Context.ContextSize : inferenceParams.RepeatLastTokensCount;
  248. // optionally save the session on first sample (for faster prompt loading next time)
  249. if (!string.IsNullOrEmpty(_pathSession) && args.NeedToSaveSession)
  250. {
  251. args.NeedToSaveSession = false;
  252. SaveSessionFile(_pathSession);
  253. }
  254. LLamaToken id;
  255. if (inferenceParams.SamplingPipeline is not null)
  256. {
  257. id = inferenceParams.SamplingPipeline.Sample(Context.NativeHandle, Context.NativeHandle.GetLogitsIth(batch.TokenCount - 1), _last_n_tokens.ToArray());
  258. inferenceParams.SamplingPipeline.Accept(Context.NativeHandle, id);
  259. }
  260. else
  261. {
  262. var tokenDataArray = Context.ApplyPenalty(batch.TokenCount - 1, _last_n_tokens, inferenceParams.LogitBias, repeat_last_n,
  263. inferenceParams.RepeatPenalty, inferenceParams.FrequencyPenalty, inferenceParams.PresencePenalty, inferenceParams.PenalizeNL);
  264. var mu = MirostatMu;
  265. id = Context.Sample(
  266. tokenDataArray, ref mu, inferenceParams.Temperature, inferenceParams.Mirostat, inferenceParams.MirostatTau,
  267. inferenceParams.MirostatEta, inferenceParams.TopK, inferenceParams.TopP, inferenceParams.TfsZ, inferenceParams.TypicalP, inferenceParams.Grammar,
  268. inferenceParams.MinP
  269. );
  270. MirostatMu = mu;
  271. }
  272. _last_n_tokens.Enqueue(id);
  273. if (id == Context.NativeHandle.ModelHandle.Tokens.EOS)
  274. {
  275. id = Context.NativeHandle.ModelHandle.Tokens.Newline!.Value;
  276. if (args.Antiprompts is not null && args.Antiprompts.Count > 0)
  277. {
  278. var first_antiprompt = Context.Tokenize(args.Antiprompts[0], false);
  279. _embed_inps.AddRange(first_antiprompt);
  280. }
  281. }
  282. _embeds.Add(id);
  283. args.RemainedTokens--;
  284. args.ReturnValue = true;
  285. }
  286. else
  287. {
  288. while (_embed_inps.Count > _consumedTokensCount)
  289. {
  290. _embeds.Add(_embed_inps[_consumedTokensCount]);
  291. _last_n_tokens.Enqueue(_embed_inps[_consumedTokensCount]);
  292. _consumedTokensCount++;
  293. if (_embeds.Count >= Context.Params.BatchSize)
  294. {
  295. break;
  296. }
  297. }
  298. }
  299. return Task.CompletedTask;
  300. }
  301. /// <summary>
  302. /// The descriptor of the state of the interactive executor.
  303. /// </summary>
  304. public class InteractiveExecutorState
  305. : ExecutorBaseState
  306. {
  307. /// <summary>
  308. /// Whether the executor is running for the first time (running the prompt).
  309. /// </summary>
  310. [JsonPropertyName("is_prompt_run")]
  311. public bool IsPromptRun { get; set; }
  312. }
  313. }
  314. }