You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

LLamaExecutorBase.cs 16 kB

April 2024 Binary Update (#662) * Updated binaries, using [this build](https://github.com/SciSharp/LLamaSharp/actions/runs/8654672719/job/23733195669) for llama.cpp commit `f7001ccc5aa359fcf41bba19d1c99c3d25c9bcc7`. - Added all new functions. - Moved some functions (e.g. `SafeLlamaModelHandle` specific functions) into `SafeLlamaModelHandle.cs` - Exposed tokens on `SafeLlamaModelHandle` and `LLamaWeights` through a `Tokens` property. As new special tokens are added in the future they can be added here. - Changed all token properties to return nullable tokens, to handle some models not having some tokens. - Fixed `DefaultSamplingPipeline` to handle no newline token in some models. * Moved native methods to more specific locations. - Context specific things have been moved into `SafeLLamaContextHandle.cs` and made private - they're exposed through C# properties and methods already. - Checking that GPU layer count is zero if GPU offload is not supported. - Moved methods for creating default structs (`llama_model_quantize_default_params` and `llama_context_default_params`) into relevant structs. * Removed exception if `GpuLayerCount > 0` when GPU is not supported. * - Added low level wrapper methods for new per-sequence state load/save in `SafeLLamaContextHandle` - Added high level wrapper methods (save/load with `State` object or memory mapped file) in `LLamaContext` - Moved native methods for per-sequence state load/save into `SafeLLamaContextHandle` * Added update and defrag methods for KV cache in `SafeLLamaContextHandle` * Updated submodule to `f7001ccc5aa359fcf41bba19d1c99c3d25c9bcc7` * Passing the sequence ID when saving a single sequence state
1 year ago
April 2024 Binary Update (#662) * Updated binaries, using [this build](https://github.com/SciSharp/LLamaSharp/actions/runs/8654672719/job/23733195669) for llama.cpp commit `f7001ccc5aa359fcf41bba19d1c99c3d25c9bcc7`. - Added all new functions. - Moved some functions (e.g. `SafeLlamaModelHandle` specific functions) into `SafeLlamaModelHandle.cs` - Exposed tokens on `SafeLlamaModelHandle` and `LLamaWeights` through a `Tokens` property. As new special tokens are added in the future they can be added here. - Changed all token properties to return nullable tokens, to handle some models not having some tokens. - Fixed `DefaultSamplingPipeline` to handle no newline token in some models. * Moved native methods to more specific locations. - Context specific things have been moved into `SafeLLamaContextHandle.cs` and made private - they're exposed through C# properties and methods already. - Checking that GPU layer count is zero if GPU offload is not supported. - Moved methods for creating default structs (`llama_model_quantize_default_params` and `llama_context_default_params`) into relevant structs. * Removed exception if `GpuLayerCount > 0` when GPU is not supported. * - Added low level wrapper methods for new per-sequence state load/save in `SafeLLamaContextHandle` - Added high level wrapper methods (save/load with `State` object or memory mapped file) in `LLamaContext` - Moved native methods for per-sequence state load/save into `SafeLLamaContextHandle` * Added update and defrag methods for KV cache in `SafeLLamaContextHandle` * Updated submodule to `f7001ccc5aa359fcf41bba19d1c99c3d25c9bcc7` * Passing the sequence ID when saving a single sequence state
1 year ago
1 year ago
123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441
  1. using LLama.Abstractions;
  2. using LLama.Common;
  3. using LLama.Exceptions;
  4. using LLama.Native;
  5. using Microsoft.Extensions.Logging;
  6. using System;
  7. using System.Collections.Generic;
  8. using System.IO;
  9. using System.Linq;
  10. using System.Runtime.CompilerServices;
  11. using System.Text.Json.Serialization;
  12. using System.Threading;
  13. using System.Threading.Tasks;
  14. namespace LLama
  15. {
  16. /// <summary>
  17. /// The base class for stateful LLama executors.
  18. /// </summary>
  19. public abstract class StatefulExecutorBase : ILLamaExecutor
  20. {
  21. /// <summary>
  22. /// The logger used by this executor.
  23. /// </summary>
  24. protected ILogger? _logger;
  25. /// <summary>
  26. /// The tokens that were already processed by the model.
  27. /// </summary>
  28. protected int _pastTokensCount; // n_past
  29. /// <summary>
  30. /// The tokens that were consumed by the model during the current inference.
  31. /// </summary>
  32. protected int _consumedTokensCount; // n_consume
  33. /// <summary>
  34. ///
  35. /// </summary>
  36. protected int _n_session_consumed;
  37. /// <summary>
  38. ///
  39. /// </summary>
  40. protected int _n_matching_session_tokens;
  41. /// <summary>
  42. /// The path of the session file.
  43. /// </summary>
  44. protected string? _pathSession;
  45. /// <summary>
  46. /// A container of the tokens to be processed and after processed.
  47. /// </summary>
  48. protected List<LLamaToken> _embeds = new(); // embd
  49. /// <summary>
  50. /// A container for the tokens of input.
  51. /// </summary>
  52. protected List<LLamaToken> _embed_inps = new();
  53. /// <summary>
  54. ///
  55. /// </summary>
  56. protected List<LLamaToken> _session_tokens = new();
  57. /// <summary>
  58. /// The last tokens generated by the model.
  59. /// </summary>
  60. protected FixedSizeQueue<LLamaToken> _last_n_tokens;
  61. /// <summary>
  62. /// The context used by the executor.
  63. /// </summary>
  64. public LLamaContext Context { get; }
  65. // LLava Section
  66. //
  67. /// <inheritdoc />
  68. public bool IsMultiModal
  69. {
  70. get
  71. {
  72. return ClipModel != null;
  73. }
  74. }
  75. /// <inheritdoc />
  76. public LLavaWeights? ClipModel { get; }
  77. /// <inheritdoc />
  78. public List<byte[]> Images { get; }
  79. /// <summary>
  80. /// Current "mu" value for mirostat sampling
  81. /// </summary>
  82. protected float? MirostatMu { get; set; }
  83. private readonly StreamingTokenDecoder _decoder;
  84. /// <summary>
  85. ///
  86. /// </summary>
  87. /// <param name="context"></param>
  88. /// <param name="logger"></param>
  89. protected StatefulExecutorBase(LLamaContext context, ILogger? logger = null)
  90. {
  91. Images = new List<byte[]>();
  92. _logger = logger;
  93. Context = context;
  94. _pastTokensCount = 0;
  95. _consumedTokensCount = 0;
  96. _n_session_consumed = 0;
  97. _last_n_tokens = new FixedSizeQueue<LLamaToken>((int)Context.ContextSize);
  98. _decoder = new StreamingTokenDecoder(context);
  99. }
  100. /// <summary>
  101. ///
  102. /// </summary>
  103. /// <param name="context"></param>
  104. /// <param name="lLavaWeights"></param>
  105. /// <param name="logger"></param>
  106. public StatefulExecutorBase(LLamaContext context, LLavaWeights lLavaWeights, ILogger? logger = null) :
  107. this( context, logger )
  108. {
  109. ClipModel = lLavaWeights;
  110. }
  111. /// <summary>
  112. /// This API is currently not verified.
  113. /// </summary>
  114. /// <param name="filename"></param>
  115. /// <returns></returns>
  116. /// <exception cref="ArgumentNullException"></exception>
  117. /// <exception cref="RuntimeError"></exception>
  118. public StatefulExecutorBase WithSessionFile(string filename)
  119. {
  120. _pathSession = filename;
  121. if (string.IsNullOrEmpty(filename))
  122. {
  123. throw new ArgumentNullException(nameof(filename), "File name cannot be empty.");
  124. }
  125. if (File.Exists(filename))
  126. {
  127. _logger?.LogInformation($"[LLamaExecutor] Attempting to load saved session from {filename}");
  128. var session_tokens = new LLamaToken[Context.ContextSize];
  129. if (!NativeApi.llama_state_load_file(Context.NativeHandle, _pathSession, session_tokens, (ulong)Context.ContextSize, out var n_token_count_out))
  130. {
  131. _logger?.LogError($"[LLamaExecutor] Failed to load session file {filename}");
  132. throw new RuntimeError($"Failed to load session file {_pathSession}");
  133. }
  134. _session_tokens = session_tokens.Take((int)n_token_count_out).ToList();
  135. _logger?.LogInformation($"[LLamaExecutor] Loaded a session with prompt size of {session_tokens.Length} tokens");
  136. }
  137. else
  138. {
  139. _logger?.LogWarning("[LLamaExecutor] Session file does not exist, will create");
  140. }
  141. _n_matching_session_tokens = 0;
  142. if (_session_tokens.Count > 0)
  143. {
  144. foreach (var id in _session_tokens)
  145. {
  146. if (_n_matching_session_tokens >= _embed_inps.Count || id != _embed_inps[_n_matching_session_tokens])
  147. {
  148. break;
  149. }
  150. _n_matching_session_tokens++;
  151. }
  152. if (_n_matching_session_tokens >= _embed_inps.Count)
  153. {
  154. _logger?.LogInformation("[LLamaExecutor] Session file has exact match for prompt!");
  155. }
  156. else if (_n_matching_session_tokens < _embed_inps.Count / 2)
  157. {
  158. _logger?.LogWarning($"[LLamaExecutor] Session file has low similarity to prompt ({_n_matching_session_tokens} / {_embed_inps.Count} tokens) will mostly be reevaluated");
  159. }
  160. else
  161. {
  162. _logger?.LogInformation($"[LLamaExecutor] Session file matches {_n_matching_session_tokens} / {_embed_inps.Count} tokens of prompt");
  163. }
  164. }
  165. return this;
  166. }
  167. /// <summary>
  168. /// This API has not been verified currently.
  169. /// </summary>
  170. /// <param name="filename"></param>
  171. public void SaveSessionFile(string filename)
  172. {
  173. var session_token_array = _session_tokens.ToArray();
  174. NativeApi.llama_state_save_file(Context.NativeHandle, filename, session_token_array, (ulong)session_token_array.Length);
  175. }
  176. /// <summary>
  177. /// After running out of the context, take some tokens from the original prompt and recompute the logits in batches.
  178. /// </summary>
  179. /// <param name="tokensToKeep"></param>
  180. protected virtual void HandleRunOutOfContext(int tokensToKeep)
  181. {
  182. // if we run out of context:
  183. // - take the tokensToKeep first tokens from the original prompt (via n_past)
  184. // - take half of the last (n_ctx - tokensToKeep) tokens and recompute the logits in batches
  185. var n_left = _pastTokensCount - tokensToKeep;
  186. var n_discard = n_left / 2;
  187. NativeApi.llama_kv_cache_seq_rm(Context.NativeHandle, (LLamaSeqId)0, tokensToKeep, tokensToKeep + n_discard);
  188. NativeApi.llama_kv_cache_seq_add(Context.NativeHandle, (LLamaSeqId)0, tokensToKeep + n_discard, _pastTokensCount, -n_discard);
  189. _pastTokensCount -= n_discard;
  190. // stop saving session if we run out of context
  191. _pathSession = string.Empty;
  192. }
  193. /// <summary>
  194. /// Try to reuse the matching prefix from the session file.
  195. /// </summary>
  196. protected virtual void TryReuseMatchingPrefix()
  197. {
  198. if (_n_session_consumed < _session_tokens.Count)
  199. {
  200. int i = 0;
  201. for (; i < _embeds.Count; i++)
  202. {
  203. if (_embeds[i] != _session_tokens[_n_session_consumed])
  204. {
  205. _session_tokens = _session_tokens.Take(_n_session_consumed).ToList();
  206. break;
  207. }
  208. _pastTokensCount++;
  209. _n_session_consumed++;
  210. if (_n_session_consumed >= _session_tokens.Count)
  211. {
  212. i++;
  213. break;
  214. }
  215. }
  216. if (i > 0)
  217. {
  218. _embeds.RemoveRange(0, i);
  219. }
  220. }
  221. }
  222. /// <summary>
  223. /// Decide whether to continue the loop.
  224. /// </summary>
  225. /// <param name="args"></param>
  226. /// <returns></returns>
  227. protected abstract Task<bool> GetLoopCondition(InferStateArgs args);
  228. /// <summary>
  229. /// Preprocess the inputs before the inference.
  230. /// </summary>
  231. /// <param name="text"></param>
  232. /// <param name="args"></param>
  233. protected abstract Task PreprocessInputs(string text, InferStateArgs args);
  234. /// <summary>
  235. /// Do some post processing after the inference.
  236. /// </summary>
  237. /// <param name="inferenceParams"></param>
  238. /// <param name="args"></param>
  239. /// <returns></returns>
  240. protected abstract Task<(bool, IReadOnlyList<string>)> PostProcess(IInferenceParams inferenceParams, InferStateArgs args);
  241. /// <summary>
  242. /// The core inference logic.
  243. /// </summary>
  244. /// <param name="inferenceParams"></param>
  245. /// <param name="args"></param>
  246. protected abstract Task InferInternal(IInferenceParams inferenceParams, InferStateArgs args);
  247. /// <summary>
  248. /// Save the current state to a file.
  249. /// </summary>
  250. /// <param name="filename"></param>
  251. public abstract Task SaveState(string filename);
  252. /// <summary>
  253. /// Get the current state data.
  254. /// </summary>
  255. /// <returns></returns>
  256. public abstract ExecutorBaseState GetStateData();
  257. /// <summary>
  258. /// Load the state from data.
  259. /// </summary>
  260. /// <param name="data"></param>
  261. public abstract Task LoadState(ExecutorBaseState data);
  262. /// <summary>
  263. /// Load the state from a file.
  264. /// </summary>
  265. /// <param name="filename"></param>
  266. public abstract Task LoadState(string filename);
  267. /// <summary>
  268. /// Execute the inference.
  269. /// </summary>
  270. /// <param name="text"></param>
  271. /// <param name="inferenceParams"></param>
  272. /// <param name="cancellationToken"></param>
  273. /// <returns></returns>
  274. public virtual async IAsyncEnumerable<string> InferAsync(string text, IInferenceParams? inferenceParams = null, [EnumeratorCancellation] CancellationToken cancellationToken = default)
  275. {
  276. cancellationToken.ThrowIfCancellationRequested();
  277. inferenceParams ??= new InferenceParams();
  278. var args = new InferStateArgs
  279. {
  280. Antiprompts = inferenceParams.AntiPrompts.ToList(),
  281. RemainedTokens = inferenceParams.MaxTokens,
  282. ReturnValue = false,
  283. WaitForInput = false,
  284. NeedToSaveSession = !string.IsNullOrEmpty(_pathSession) && _n_matching_session_tokens < _embed_inps.Count
  285. };
  286. await PreprocessInputs(text, args);
  287. while (await GetLoopCondition(args))
  288. {
  289. if (cancellationToken.IsCancellationRequested)
  290. {
  291. break;
  292. }
  293. await InferInternal(inferenceParams, args);
  294. if (args.ReturnValue)
  295. {
  296. _decoder.AddRange(_embeds);
  297. yield return _decoder.Read();
  298. }
  299. var (breakGeneration, extraOutputs) = await PostProcess(inferenceParams, args);
  300. if (extraOutputs is { Count: > 0 })
  301. {
  302. foreach (var item in extraOutputs)
  303. {
  304. yield return item;
  305. }
  306. }
  307. if (breakGeneration)
  308. {
  309. break;
  310. }
  311. }
  312. }
  313. /// <summary>
  314. /// Asynchronously runs a prompt through the model to compute KV cache without generating any new tokens.
  315. /// It could reduce the latency of the first time response if the first input from the user is not immediate.
  316. /// </summary>
  317. /// <param name="prompt">Prompt to process</param>
  318. /// <returns></returns>
  319. public virtual async Task PrefillPromptAsync(string prompt)
  320. {
  321. var inferenceParams = new InferenceParams
  322. {
  323. MaxTokens = 0
  324. };
  325. var args = new InferStateArgs
  326. {
  327. Antiprompts = new List<string>(),
  328. RemainedTokens = 0,
  329. ReturnValue = false,
  330. WaitForInput = true,
  331. NeedToSaveSession = false
  332. };
  333. await PreprocessInputs(prompt, args);
  334. // First run adds the prompt to the _embeds
  335. await InferInternal(inferenceParams, args);
  336. // Second run puts it through decode
  337. await InferInternal(inferenceParams, args);
  338. }
  339. /// <summary>
  340. /// State arguments that are used in single inference
  341. /// </summary>
  342. protected class InferStateArgs
  343. {
  344. /// <summary>
  345. ///
  346. /// </summary>
  347. public IList<string>? Antiprompts { get; set; }
  348. /// <summary>
  349. /// Tokens count remained to be used. (n_remain)
  350. /// </summary>
  351. public int RemainedTokens { get; set; }
  352. /// <summary>
  353. ///
  354. /// </summary>
  355. public bool ReturnValue { get; set; }
  356. /// <summary>
  357. ///
  358. /// </summary>
  359. public bool WaitForInput { get; set; }
  360. /// <summary>
  361. ///
  362. /// </summary>
  363. public bool NeedToSaveSession { get; set; }
  364. }
  365. [JsonConverter(typeof(PolymorphicJSONConverter<ExecutorBaseState>))]
  366. public class ExecutorBaseState
  367. {
  368. [JsonPropertyName("n_past")]
  369. public int PastTokensCount { get; set; }
  370. [JsonPropertyName("n_consumed")]
  371. public int ConsumedTokensCount { get; set; }
  372. [JsonPropertyName("n_session_consumed")]
  373. public int ConsumedSessionCount { get; set; }
  374. [JsonPropertyName("n_matching_session_tokens")]
  375. public int MatchingSessionTokensCount { get; set; }
  376. [JsonPropertyName("path_session")]
  377. public string? SessionFilePath { get; set; }
  378. [JsonPropertyName("embd")]
  379. public LLamaToken[] Embeds { get; set; }
  380. [JsonPropertyName("embd_inps")]
  381. public LLamaToken[] EmbedInps { get; set; }
  382. [JsonPropertyName("session_tokens")]
  383. public LLamaToken[] SessionTokens { get; set; }
  384. [JsonPropertyName("last_n_tokens")]
  385. public LLamaToken[] LastTokens { get; set; }
  386. [JsonPropertyName("last_tokens_maximum_count")]
  387. public int LastTokensCapacity { get; set; }
  388. [JsonPropertyName("mirostat_mu")]
  389. public float? MirostatMu { get; set; }
  390. }
  391. }
  392. }