You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

LLamaContext.cs 16 kB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433
  1. using LLama.Exceptions;
  2. using LLama.Native;
  3. using System;
  4. using System.Collections.Generic;
  5. using System.Linq;
  6. using System.Text;
  7. using System.IO;
  8. using System.IO.MemoryMappedFiles;
  9. using LLama.Common;
  10. using System.Runtime.InteropServices;
  11. using System.Threading.Tasks;
  12. using LLama.Extensions;
  13. using LLama.Abstractions;
  14. using LLama.Sampling;
  15. using Microsoft.Extensions.Logging;
  16. using System.Threading;
  17. namespace LLama
  18. {
  19. /// <summary>
  20. /// A llama_context, which holds all the context required to interact with a model
  21. /// </summary>
  22. public sealed class LLamaContext
  23. : IDisposable
  24. {
  25. private readonly ILogger? _logger;
  26. /// <summary>
  27. /// Total number of tokens in vocabulary of this model
  28. /// </summary>
  29. public int VocabCount => NativeHandle.VocabCount;
  30. /// <summary>
  31. /// Total number of tokens in the context
  32. /// </summary>
  33. public uint ContextSize => NativeHandle.ContextSize;
  34. /// <summary>
  35. /// Dimension of embedding vectors
  36. /// </summary>
  37. public int EmbeddingSize => NativeHandle.EmbeddingSize;
  38. /// <summary>
  39. /// The context params set for this context
  40. /// </summary>
  41. public IContextParams Params { get; }
  42. /// <summary>
  43. /// The native handle, which is used to be passed to the native APIs
  44. /// </summary>
  45. /// <remarks>Be careful how you use this!</remarks>
  46. public SafeLLamaContextHandle NativeHandle { get; }
  47. /// <summary>
  48. /// The encoding set for this model to deal with text input.
  49. /// </summary>
  50. public Encoding Encoding { get; }
  51. private uint _generationThreads;
  52. private uint _batchThreads;
  53. /// <summary>
  54. /// Get or set the number of threads to use for generation
  55. /// </summary>
  56. public uint GenerationThreads
  57. {
  58. get => _generationThreads;
  59. set
  60. {
  61. _generationThreads = value;
  62. NativeHandle.SetThreads(_generationThreads, _batchThreads);
  63. }
  64. }
  65. /// <summary>
  66. /// Get or set the number of threads to use for batch processing
  67. /// </summary>
  68. public uint BatchThreads
  69. {
  70. get => _batchThreads;
  71. set
  72. {
  73. _batchThreads = value;
  74. NativeHandle.SetThreads(_generationThreads, _batchThreads);
  75. }
  76. }
  77. /// <summary>
  78. /// Create a new LLamaContext for the given LLamaWeights
  79. /// </summary>
  80. /// <param name="model"></param>
  81. /// <param name="params"></param>
  82. /// <param name="logger"></param>
  83. /// <exception cref="ObjectDisposedException"></exception>
  84. public LLamaContext(LLamaWeights model, IContextParams @params, ILogger? logger = null)
  85. {
  86. if (model.NativeHandle.IsClosed)
  87. throw new ObjectDisposedException("Cannot create context, model weights have been disposed");
  88. Params = @params;
  89. _logger = logger;
  90. Encoding = @params.Encoding;
  91. @params.ToLlamaContextParams(out var lparams);
  92. NativeHandle = SafeLLamaContextHandle.Create(model.NativeHandle, lparams);
  93. // It's not possible to get these values from llama.cpp, store a copy of them here.
  94. _generationThreads = lparams.n_threads;
  95. _batchThreads = lparams.n_threads_batch;
  96. }
  97. /// <summary>
  98. /// Set the seed for the RNG
  99. /// </summary>
  100. /// <param name="seed"></param>
  101. public void SetSeed(uint seed)
  102. {
  103. NativeHandle.SetSeed(seed);
  104. }
  105. /// <summary>
  106. /// Tokenize a string.
  107. /// </summary>
  108. /// <param name="text"></param>
  109. /// <param name="addBos">Whether to add a bos to the text.</param>
  110. /// <param name="special">Allow tokenizing special and/or control tokens which otherwise are not exposed and treated as plaintext.</param>
  111. /// <returns></returns>
  112. public LLamaToken[] Tokenize(string text, bool addBos = true, bool special = false)
  113. {
  114. return NativeHandle.Tokenize(text, addBos, special, Encoding);
  115. }
  116. /// <summary>
  117. /// Detokenize the tokens to text.
  118. /// </summary>
  119. /// <param name="tokens"></param>
  120. /// <returns></returns>
  121. [Obsolete("Use a `StreamingTokenDecoder` instead")]
  122. public string DeTokenize(IReadOnlyList<LLamaToken> tokens)
  123. {
  124. // Do **not** use this method as an example of how to correctly use the StreamingTokenDecoder!
  125. // It should be kept around for the entire time you are decoding one stream of tokens.
  126. var decoder = new StreamingTokenDecoder(this);
  127. decoder.AddRange(tokens);
  128. return decoder.Read();
  129. }
  130. /// <summary>
  131. /// Save the state to specified path.
  132. /// </summary>
  133. /// <param name="filename"></param>
  134. public void SaveState(string filename)
  135. {
  136. // Delete that file before overwriting it
  137. if (File.Exists(filename))
  138. File.Delete(filename);
  139. // Estimate size of state to write to disk, this is always equal to or greater than the actual size
  140. var estimatedStateSize = (long)NativeApi.llama_get_state_size(NativeHandle);
  141. // Map the file and write the bytes directly to it. This saves copying the bytes into a C# array
  142. long writtenBytes;
  143. using (var file = MemoryMappedFile.CreateFromFile(filename, FileMode.Create, null, estimatedStateSize))
  144. using (var view = file.CreateViewAccessor(0, estimatedStateSize))
  145. {
  146. unsafe
  147. {
  148. byte* ptr = null;
  149. view.SafeMemoryMappedViewHandle.AcquirePointer(ref ptr);
  150. writtenBytes = (long)NativeApi.llama_copy_state_data(NativeHandle, ptr);
  151. view.SafeMemoryMappedViewHandle.ReleasePointer();
  152. }
  153. }
  154. // Truncate the file to the actual size of data that was written
  155. using (var fileStream = new FileStream(filename, FileMode.Open))
  156. fileStream.SetLength(writtenBytes);
  157. }
  158. /// <summary>
  159. /// Get the state data as an opaque handle, which can be loaded later using <see cref="LoadState(State)"/>
  160. /// </summary>
  161. /// <remarks>Use <see cref="SaveState"/> if you intend to save this state to disk.</remarks>
  162. /// <returns></returns>
  163. public State GetState()
  164. {
  165. var stateSize = NativeHandle.GetStateSize();
  166. // Allocate a chunk of memory large enough to hold the entire state
  167. var memory = Marshal.AllocHGlobal((nint)stateSize);
  168. try
  169. {
  170. // Copy the state data into memory, discover the actual size required
  171. var actualSize = NativeHandle.GetState(memory, stateSize);
  172. // Shrink to size
  173. memory = Marshal.ReAllocHGlobal(memory, (nint)actualSize);
  174. // Wrap memory in a "state"
  175. var state = new State(memory);
  176. // Set memory to zero, to prevent it being freed in finally block
  177. memory = IntPtr.Zero;
  178. return state;
  179. }
  180. finally
  181. {
  182. if (memory != IntPtr.Zero)
  183. Marshal.FreeHGlobal(memory);
  184. }
  185. }
  186. /// <summary>
  187. /// Load the state from specified path.
  188. /// </summary>
  189. /// <param name="filename"></param>
  190. /// <exception cref="RuntimeError"></exception>
  191. public void LoadState(string filename)
  192. {
  193. // Map state file into memory and pass that pointer directly to `llama_set_state_data` to load from
  194. using (var file = MemoryMappedFile.CreateFromFile(filename, FileMode.Open, null))
  195. using (var view = file.CreateViewAccessor())
  196. {
  197. unsafe
  198. {
  199. byte* ptr = null;
  200. view.SafeMemoryMappedViewHandle.AcquirePointer(ref ptr);
  201. NativeApi.llama_set_state_data(NativeHandle, ptr);
  202. view.SafeMemoryMappedViewHandle.ReleasePointer();
  203. }
  204. }
  205. }
  206. /// <summary>
  207. /// Load the state from memory.
  208. /// </summary>
  209. /// <param name="state"></param>
  210. /// <exception cref="RuntimeError"></exception>
  211. public void LoadState(State state)
  212. {
  213. unsafe
  214. {
  215. NativeHandle.SetState((byte*)state.DangerousGetHandle().ToPointer());
  216. }
  217. }
  218. /// <summary>
  219. /// Sample a single token from this context, using the given sampling pipeline
  220. /// </summary>
  221. /// <param name="pipeline">The pipeline to use to process the logits and to select a token</param>
  222. /// <param name="lastTokens">The tokens recently returned from the model</param>
  223. /// <returns>The selected token</returns>
  224. public LLamaToken Sample(ISamplingPipeline pipeline, ReadOnlySpan<LLamaToken> lastTokens)
  225. {
  226. var token = pipeline.Sample(NativeHandle, NativeHandle.GetLogits(), lastTokens);
  227. pipeline.Accept(NativeHandle, token);
  228. return token;
  229. }
  230. /// <summary>
  231. /// Perform the sampling. Please don't use it unless you fully know what it does.
  232. /// </summary>
  233. /// <param name="candidates"></param>
  234. /// <param name="mirostat_mu"></param>
  235. /// <param name="temperature"></param>
  236. /// <param name="mirostat"></param>
  237. /// <param name="mirostatTau"></param>
  238. /// <param name="mirostatEta"></param>
  239. /// <param name="topK"></param>
  240. /// <param name="topP"></param>
  241. /// <param name="tfsZ"></param>
  242. /// <param name="typicalP"></param>
  243. /// <param name="grammar"></param>
  244. /// <param name="minP"></param>
  245. /// <returns></returns>
  246. public LLamaToken Sample(LLamaTokenDataArray candidates, ref float? mirostat_mu, float temperature, MirostatType mirostat,
  247. float mirostatTau, float mirostatEta, int topK, float topP, float tfsZ, float typicalP,
  248. SafeLLamaGrammarHandle? grammar, float minP)
  249. {
  250. LLamaToken id;
  251. if (grammar != null)
  252. {
  253. candidates.ApplyGrammar(NativeHandle, grammar);
  254. }
  255. if (temperature <= 0)
  256. {
  257. // Greedy sampling
  258. id = candidates.SampleTokenGreedy(NativeHandle);
  259. }
  260. else
  261. {
  262. var mu = mirostat_mu ?? (2 * mirostatTau);
  263. {
  264. if (mirostat == MirostatType.Mirostat)
  265. {
  266. const int mirostat_m = 100;
  267. candidates.Temperature(NativeHandle, temperature);
  268. id = candidates.SampleTokenMirostat(NativeHandle, mirostatTau, mirostatEta, mirostat_m, ref mu);
  269. }
  270. else if (mirostat == MirostatType.Mirostat2)
  271. {
  272. candidates.Temperature(NativeHandle, temperature);
  273. id = candidates.SampleTokenMirostat2(NativeHandle, mirostatTau, mirostatEta, ref mu);
  274. }
  275. else
  276. {
  277. candidates.TopK(NativeHandle, topK);
  278. candidates.TailFree(NativeHandle, tfsZ);
  279. candidates.LocallyTypical(NativeHandle, typicalP);
  280. candidates.TopP(NativeHandle, topP);
  281. candidates.MinP(NativeHandle, minP);
  282. candidates.Temperature(NativeHandle, temperature);
  283. id = candidates.SampleToken(NativeHandle);
  284. }
  285. }
  286. mirostat_mu = mu;
  287. }
  288. grammar?.AcceptToken(NativeHandle, id);
  289. return id;
  290. }
  291. /// <summary>
  292. /// Apply the penalty for the tokens. Please don't use it unless you fully know what it does.
  293. /// </summary>
  294. /// <param name="logits_i"></param>
  295. /// <param name="lastTokens"></param>
  296. /// <param name="logitBias"></param>
  297. /// <param name="repeatLastTokensCount"></param>
  298. /// <param name="repeatPenalty"></param>
  299. /// <param name="alphaFrequency"></param>
  300. /// <param name="alphaPresence"></param>
  301. /// <param name="penalizeNL"></param>
  302. /// <returns></returns>
  303. public LLamaTokenDataArray ApplyPenalty(int logits_i, IEnumerable<LLamaToken> lastTokens, Dictionary<LLamaToken, float>? logitBias = null,
  304. int repeatLastTokensCount = 64, float repeatPenalty = 1.1f, float alphaFrequency = .0f, float alphaPresence = .0f,
  305. bool penalizeNL = true)
  306. {
  307. var logits = NativeHandle.GetLogitsIth(logits_i);
  308. // Apply params.logit_bias map
  309. if (logitBias is not null)
  310. {
  311. foreach (var (key, value) in logitBias)
  312. logits[(int)key] += value;
  313. }
  314. // Save the newline logit value
  315. var nl_token = NativeApi.llama_token_nl(NativeHandle.ModelHandle);
  316. var nl_logit = logits[(int)nl_token];
  317. // Convert logits into token candidates
  318. var candidates_p = LLamaTokenDataArray.Create(logits);
  319. // Extract most recently returned tokens
  320. var last_n_repeat = Math.Min((int)ContextSize, repeatLastTokensCount);
  321. var last_n_array = lastTokens.TakeLast(last_n_repeat).ToArray();
  322. // Apply penalties to candidates
  323. candidates_p.RepetitionPenalty(NativeHandle, last_n_array, repeatPenalty, alphaFrequency, alphaPresence);
  324. // Restore newline token logit value if necessary
  325. if (!penalizeNL)
  326. {
  327. var candidatesSpan = candidates_p.data.Span;
  328. for (var i = 0; i < candidates_p.data.Length; i++)
  329. {
  330. ref var item = ref candidatesSpan[i];
  331. if (item.id == nl_token)
  332. item.logit = nl_logit;
  333. }
  334. candidates_p.sorted = false;
  335. }
  336. return candidates_p;
  337. }
  338. #region eval overloads
  339. /// <summary>
  340. /// </summary>
  341. /// <param name="batch"></param>
  342. public DecodeResult Decode(LLamaBatch batch)
  343. {
  344. if (batch.TokenCount == 0)
  345. return 0;
  346. if (batch.TokenCount > Params.BatchSize)
  347. throw new ArgumentException("Input contains more tokens than configured batch size", nameof(batch));
  348. return (DecodeResult)NativeHandle.Decode(batch);
  349. }
  350. /// <summary>
  351. /// </summary>
  352. /// <param name="batch"></param>
  353. /// <param name="cancellationToken"></param>
  354. public Task<DecodeResult> DecodeAsync(LLamaBatch batch, CancellationToken cancellationToken = default)
  355. {
  356. return Task.Run(() => Decode(batch), cancellationToken);
  357. }
  358. #endregion
  359. /// <inheritdoc />
  360. public void Dispose()
  361. {
  362. NativeHandle.Dispose();
  363. }
  364. /// <summary>
  365. /// The state of this model, which can be reloaded later
  366. /// </summary>
  367. public class State
  368. : SafeLLamaHandleBase
  369. {
  370. internal State(IntPtr memory)
  371. : base(memory, true)
  372. {
  373. }
  374. /// <inheritdoc />
  375. protected override bool ReleaseHandle()
  376. {
  377. Marshal.FreeHGlobal(handle);
  378. return true;
  379. }
  380. }
  381. }
  382. }