You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

NativeApi.cs 25 kB

2 years ago
2 years ago
2 years ago
123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515
  1. using System;
  2. using System.Runtime.InteropServices;
  3. #pragma warning disable IDE1006 // Naming Styles
  4. namespace LLama.Native
  5. {
  6. /// <summary>
  7. /// Callback from llama.cpp with log messages
  8. /// </summary>
  9. /// <param name="level"></param>
  10. /// <param name="message"></param>
  11. public delegate void LLamaLogCallback(LLamaLogLevel level, string message);
  12. /// <summary>
  13. /// Direct translation of the llama.cpp API
  14. /// </summary>
  15. public static partial class NativeApi
  16. {
  17. /// <summary>
  18. /// A method that does nothing. This is a native method, calling it will force the llama native dependencies to be loaded.
  19. /// </summary>
  20. /// <returns></returns>
  21. public static void llama_empty_call()
  22. {
  23. llama_max_devices();
  24. }
  25. [DllImport("kernel32.dll", CharSet = CharSet.Unicode, SetLastError = true)]
  26. private static extern int AddDllDirectory(string NewDirectory);
  27. /// <summary>
  28. /// Get the maximum number of devices supported by llama.cpp
  29. /// </summary>
  30. /// <returns></returns>
  31. [DllImport(libraryName, CallingConvention = CallingConvention.Cdecl)]
  32. public static extern long llama_max_devices();
  33. /// <summary>
  34. /// Create a LLamaModelParams with default values
  35. /// </summary>
  36. /// <returns></returns>
  37. [DllImport(libraryName, CallingConvention = CallingConvention.Cdecl)]
  38. public static extern LLamaModelParams llama_model_default_params();
  39. /// <summary>
  40. /// Create a LLamaContextParams with default values
  41. /// </summary>
  42. /// <returns></returns>
  43. [DllImport(libraryName, CallingConvention = CallingConvention.Cdecl)]
  44. public static extern LLamaContextParams llama_context_default_params();
  45. /// <summary>
  46. /// Create a LLamaModelQuantizeParams with default values
  47. /// </summary>
  48. /// <returns></returns>
  49. [DllImport(libraryName, CallingConvention = CallingConvention.Cdecl)]
  50. public static extern LLamaModelQuantizeParams llama_model_quantize_default_params();
  51. /// <summary>
  52. /// Check if memory mapping is supported
  53. /// </summary>
  54. /// <returns></returns>
  55. [DllImport(libraryName, CallingConvention = CallingConvention.Cdecl)]
  56. public static extern bool llama_supports_mmap();
  57. /// <summary>
  58. /// Check if memory locking is supported
  59. /// </summary>
  60. /// <returns></returns>
  61. [DllImport(libraryName, CallingConvention = CallingConvention.Cdecl)]
  62. public static extern bool llama_supports_mlock();
  63. /// <summary>
  64. /// Check if GPU offload is supported
  65. /// </summary>
  66. /// <returns></returns>
  67. [DllImport(libraryName, CallingConvention = CallingConvention.Cdecl)]
  68. public static extern bool llama_supports_gpu_offload();
  69. /// <summary>
  70. /// Initialize the llama + ggml backend
  71. /// Call once at the start of the program
  72. /// </summary>
  73. [DllImport(libraryName, CallingConvention = CallingConvention.Cdecl)]
  74. private static extern void llama_backend_init();
  75. // Note: this is not implemented because we don't have a definition for `ggml_numa_strategy` in C#. That definition doesn't
  76. // exist because it's not in llama.h, it's in ggml.h which we don't currently build a wrapper for. If there's demand
  77. // for better NUMA support that will need adding.
  78. ///// <summary>
  79. ///// Optional, enable NUMA optimisations
  80. ///// </summary>
  81. //[DllImport(libraryName, CallingConvention = CallingConvention.Cdecl)]
  82. //public static extern void llama_numa_init(ggml_numa_strategy numa);
  83. /// <summary>
  84. /// Sets the current rng seed.
  85. /// </summary>
  86. /// <param name="ctx"></param>
  87. /// <param name="seed"></param>
  88. [DllImport(libraryName, CallingConvention = CallingConvention.Cdecl)]
  89. public static extern void llama_set_rng_seed(SafeLLamaContextHandle ctx, uint seed);
  90. /// <summary>
  91. /// Returns the maximum size in bytes of the state (rng, logits, embedding
  92. /// and kv_cache) - will often be smaller after compacting tokens
  93. /// </summary>
  94. /// <param name="ctx"></param>
  95. /// <returns></returns>
  96. [DllImport(libraryName, CallingConvention = CallingConvention.Cdecl)]
  97. public static extern ulong llama_get_state_size(SafeLLamaContextHandle ctx);
  98. /// <summary>
  99. /// Copies the state to the specified destination address.
  100. /// Destination needs to have allocated enough memory.
  101. /// </summary>
  102. /// <param name="ctx"></param>
  103. /// <param name="dest"></param>
  104. /// <returns>the number of bytes copied</returns>
  105. [DllImport(libraryName, CallingConvention = CallingConvention.Cdecl)]
  106. public static extern unsafe ulong llama_copy_state_data(SafeLLamaContextHandle ctx, byte* dest);
  107. /// <summary>
  108. /// Set the state reading from the specified address
  109. /// </summary>
  110. /// <param name="ctx"></param>
  111. /// <param name="src"></param>
  112. /// <returns>the number of bytes read</returns>
  113. [DllImport(libraryName, CallingConvention = CallingConvention.Cdecl)]
  114. public static extern unsafe ulong llama_set_state_data(SafeLLamaContextHandle ctx, byte* src);
  115. /// <summary>
  116. /// Load session file
  117. /// </summary>
  118. /// <param name="ctx"></param>
  119. /// <param name="path_session"></param>
  120. /// <param name="tokens_out"></param>
  121. /// <param name="n_token_capacity"></param>
  122. /// <param name="n_token_count_out"></param>
  123. /// <returns></returns>
  124. [DllImport(libraryName, CallingConvention = CallingConvention.Cdecl)]
  125. public static extern bool llama_load_session_file(SafeLLamaContextHandle ctx, string path_session, LLamaToken[] tokens_out, ulong n_token_capacity, out ulong n_token_count_out);
  126. /// <summary>
  127. /// Save session file
  128. /// </summary>
  129. /// <param name="ctx"></param>
  130. /// <param name="path_session"></param>
  131. /// <param name="tokens"></param>
  132. /// <param name="n_token_count"></param>
  133. /// <returns></returns>
  134. [DllImport(libraryName, CallingConvention = CallingConvention.Cdecl)]
  135. public static extern bool llama_save_session_file(SafeLLamaContextHandle ctx, string path_session, LLamaToken[] tokens, ulong n_token_count);
  136. [DllImport(libraryName, CallingConvention = CallingConvention.Cdecl)]
  137. public static extern unsafe byte* llama_token_get_text(SafeLlamaModelHandle model, LLamaToken token);
  138. [DllImport(libraryName, CallingConvention = CallingConvention.Cdecl)]
  139. public static extern float llama_token_get_score(SafeLlamaModelHandle model, LLamaToken token);
  140. [DllImport(libraryName, CallingConvention = CallingConvention.Cdecl)]
  141. public static extern LLamaTokenType llama_token_get_type(SafeLlamaModelHandle model, LLamaToken token);
  142. /// <summary>
  143. /// Get the size of the context window for the model for this context
  144. /// </summary>
  145. /// <param name="ctx"></param>
  146. /// <returns></returns>
  147. [DllImport(libraryName, CallingConvention = CallingConvention.Cdecl)]
  148. public static extern uint llama_n_ctx(SafeLLamaContextHandle ctx);
  149. /// <summary>
  150. /// Get the batch size for this context
  151. /// </summary>
  152. /// <param name="ctx"></param>
  153. /// <returns></returns>
  154. [DllImport(libraryName, CallingConvention = CallingConvention.Cdecl)]
  155. public static extern uint llama_n_batch(SafeLLamaContextHandle ctx);
  156. /// <summary>
  157. /// Token logits obtained from the last call to llama_decode
  158. /// The logits for the last token are stored in the last row
  159. /// Can be mutated in order to change the probabilities of the next token.<br />
  160. /// Rows: n_tokens<br />
  161. /// Cols: n_vocab
  162. /// </summary>
  163. /// <param name="ctx"></param>
  164. /// <returns></returns>
  165. [DllImport(libraryName, CallingConvention = CallingConvention.Cdecl)]
  166. public static extern unsafe float* llama_get_logits(SafeLLamaContextHandle ctx);
  167. /// <summary>
  168. /// Logits for the ith token. Equivalent to: llama_get_logits(ctx) + i*n_vocab
  169. /// </summary>
  170. /// <param name="ctx"></param>
  171. /// <param name="i"></param>
  172. /// <returns></returns>
  173. [DllImport(libraryName, CallingConvention = CallingConvention.Cdecl)]
  174. public static extern unsafe float* llama_get_logits_ith(SafeLLamaContextHandle ctx, int i);
  175. /// <summary>
  176. /// Get the embeddings for the ith sequence. Equivalent to: llama_get_embeddings(ctx) + i*n_embd
  177. /// </summary>
  178. /// <returns></returns>
  179. [DllImport(libraryName, CallingConvention = CallingConvention.Cdecl)]
  180. public static extern unsafe float* llama_get_embeddings_ith(SafeLLamaContextHandle ctx, int i);
  181. /// <summary>
  182. /// Get the embeddings for the input
  183. /// </summary>
  184. /// <param name="ctx"></param>
  185. /// <returns></returns>
  186. public static Span<float> llama_get_embeddings(SafeLLamaContextHandle ctx)
  187. {
  188. unsafe
  189. {
  190. var ptr = llama_get_embeddings_native(ctx);
  191. return new Span<float>(ptr, ctx.EmbeddingSize);
  192. }
  193. [DllImport(libraryName, CallingConvention = CallingConvention.Cdecl, EntryPoint = "llama_get_embeddings")]
  194. static extern unsafe float* llama_get_embeddings_native(SafeLLamaContextHandle ctx);
  195. }
  196. /// <summary>
  197. /// Apply chat template. Inspired by hf apply_chat_template() on python.
  198. /// Both "model" and "custom_template" are optional, but at least one is required. "custom_template" has higher precedence than "model"
  199. /// NOTE: This function does not use a jinja parser. It only support a pre-defined list of template. See more: https://github.com/ggerganov/llama.cpp/wiki/Templates-supported-by-llama_chat_apply_template
  200. /// </summary>
  201. /// <param name="model"></param>
  202. /// <param name="tmpl">A Jinja template to use for this chat. If this is nullptr, the model’s default chat template will be used instead.</param>
  203. /// <param name="chat">Pointer to a list of multiple llama_chat_message</param>
  204. /// <param name="n_msg">Number of llama_chat_message in this chat</param>
  205. /// <param name="add_ass">Whether to end the prompt with the token(s) that indicate the start of an assistant message.</param>
  206. /// <param name="buf">A buffer to hold the output formatted prompt. The recommended alloc size is 2 * (total number of characters of all messages)</param>
  207. /// <param name="length">The size of the allocated buffer</param>
  208. /// <returns>The total number of bytes of the formatted prompt. If is it larger than the size of buffer, you may need to re-alloc it and then re-apply the template.</returns>
  209. [DllImport(libraryName, CallingConvention = CallingConvention.Cdecl, EntryPoint = "llama_get_embeddings")]
  210. public static extern unsafe int llama_chat_apply_template(SafeLlamaModelHandle model, char* tmpl, LLamaChatMessage* chat, nint n_msg, bool add_ass, char* buf, int length);
  211. /// <summary>
  212. /// Get the "Beginning of sentence" token
  213. /// </summary>
  214. /// <returns></returns>
  215. [DllImport(libraryName, CallingConvention = CallingConvention.Cdecl)]
  216. public static extern LLamaToken llama_token_bos(SafeLlamaModelHandle model);
  217. /// <summary>
  218. /// Get the "End of sentence" token
  219. /// </summary>
  220. /// <returns></returns>
  221. [DllImport(libraryName, CallingConvention = CallingConvention.Cdecl)]
  222. public static extern LLamaToken llama_token_eos(SafeLlamaModelHandle model);
  223. /// <summary>
  224. /// Get the "new line" token
  225. /// </summary>
  226. /// <returns></returns>
  227. [DllImport(libraryName, CallingConvention = CallingConvention.Cdecl)]
  228. public static extern LLamaToken llama_token_nl(SafeLlamaModelHandle model);
  229. /// <summary>
  230. /// Returns -1 if unknown, 1 for true or 0 for false.
  231. /// </summary>
  232. /// <returns></returns>
  233. [DllImport(libraryName, CallingConvention = CallingConvention.Cdecl)]
  234. public static extern int llama_add_bos_token(SafeLlamaModelHandle model);
  235. /// <summary>
  236. /// Returns -1 if unknown, 1 for true or 0 for false.
  237. /// </summary>
  238. /// <returns></returns>
  239. [DllImport(libraryName, CallingConvention = CallingConvention.Cdecl)]
  240. public static extern int llama_add_eos_token(SafeLlamaModelHandle model);
  241. /// <summary>
  242. /// codellama infill tokens, Beginning of infill prefix
  243. /// </summary>
  244. /// <returns></returns>
  245. [DllImport(libraryName, CallingConvention = CallingConvention.Cdecl)]
  246. public static extern int llama_token_prefix(SafeLlamaModelHandle model);
  247. /// <summary>
  248. /// codellama infill tokens, Beginning of infill middle
  249. /// </summary>
  250. /// <returns></returns>
  251. [DllImport(libraryName, CallingConvention = CallingConvention.Cdecl)]
  252. public static extern int llama_token_middle(SafeLlamaModelHandle model);
  253. /// <summary>
  254. /// codellama infill tokens, Beginning of infill suffix
  255. /// </summary>
  256. /// <returns></returns>
  257. [DllImport(libraryName, CallingConvention = CallingConvention.Cdecl)]
  258. public static extern int llama_token_suffix(SafeLlamaModelHandle model);
  259. /// <summary>
  260. /// codellama infill tokens, End of infill middle
  261. /// </summary>
  262. /// <returns></returns>
  263. [DllImport(libraryName, CallingConvention = CallingConvention.Cdecl)]
  264. public static extern int llama_token_eot(SafeLlamaModelHandle model);
  265. /// <summary>
  266. /// Print out timing information for this context
  267. /// </summary>
  268. /// <param name="ctx"></param>
  269. [DllImport(libraryName, CallingConvention = CallingConvention.Cdecl)]
  270. public static extern void llama_print_timings(SafeLLamaContextHandle ctx);
  271. /// <summary>
  272. /// Reset all collected timing information for this context
  273. /// </summary>
  274. /// <param name="ctx"></param>
  275. [DllImport(libraryName, CallingConvention = CallingConvention.Cdecl)]
  276. public static extern void llama_reset_timings(SafeLLamaContextHandle ctx);
  277. /// <summary>
  278. /// Print system information
  279. /// </summary>
  280. /// <returns></returns>
  281. [DllImport(libraryName, CallingConvention = CallingConvention.Cdecl)]
  282. public static extern IntPtr llama_print_system_info();
  283. /// <summary>
  284. /// Convert a single token into text
  285. /// </summary>
  286. /// <param name="model"></param>
  287. /// <param name="llamaToken"></param>
  288. /// <param name="buffer">buffer to write string into</param>
  289. /// <returns>The length written, or if the buffer is too small a negative that indicates the length required</returns>
  290. public static int llama_token_to_piece(SafeLlamaModelHandle model, LLamaToken llamaToken, Span<byte> buffer)
  291. {
  292. unsafe
  293. {
  294. fixed (byte* bufferPtr = buffer)
  295. {
  296. return llama_token_to_piece_native(model, llamaToken, bufferPtr, buffer.Length);
  297. }
  298. }
  299. [DllImport(libraryName, CallingConvention = CallingConvention.Cdecl, EntryPoint = "llama_token_to_piece")]
  300. static extern unsafe int llama_token_to_piece_native(SafeLlamaModelHandle model, LLamaToken llamaToken, byte* buffer, int length);
  301. }
  302. /// <summary>
  303. /// Convert text into tokens
  304. /// </summary>
  305. /// <param name="model"></param>
  306. /// <param name="text"></param>
  307. /// <param name="text_len"></param>
  308. /// <param name="tokens"></param>
  309. /// <param name="n_max_tokens"></param>
  310. /// <param name="add_bos"></param>
  311. /// <param name="special">Allow tokenizing special and/or control tokens which otherwise are not exposed and treated as plaintext. Does not insert a leading space.</param>
  312. /// <returns>Returns the number of tokens on success, no more than n_max_tokens.
  313. /// Returns a negative number on failure - the number of tokens that would have been returned
  314. /// </returns>
  315. [DllImport(libraryName, CallingConvention = CallingConvention.Cdecl)]
  316. public static extern unsafe int llama_tokenize(SafeLlamaModelHandle model, byte* text, int text_len, LLamaToken* tokens, int n_max_tokens, bool add_bos, bool special);
  317. /// <summary>
  318. /// Register a callback to receive llama log messages
  319. /// </summary>
  320. /// <param name="logCallback"></param>
  321. [DllImport(libraryName, CallingConvention = CallingConvention.Cdecl)]
  322. public static extern void llama_log_set(LLamaLogCallback logCallback);
  323. /// <summary>
  324. /// Clear the KV cache
  325. /// </summary>
  326. /// <param name="ctx"></param>
  327. [DllImport(libraryName, CallingConvention = CallingConvention.Cdecl)]
  328. public static extern void llama_kv_cache_clear(SafeLLamaContextHandle ctx);
  329. /// <summary>
  330. /// Removes all tokens that belong to the specified sequence and have positions in [p0, p1)
  331. /// </summary>
  332. /// <param name="ctx"></param>
  333. /// <param name="seq"></param>
  334. /// <param name="p0"></param>
  335. /// <param name="p1"></param>
  336. [DllImport(libraryName, CallingConvention = CallingConvention.Cdecl)]
  337. public static extern void llama_kv_cache_seq_rm(SafeLLamaContextHandle ctx, LLamaSeqId seq, LLamaPos p0, LLamaPos p1);
  338. /// <summary>
  339. /// Copy all tokens that belong to the specified sequence to another sequence
  340. /// Note that this does not allocate extra KV cache memory - it simply assigns the tokens to the new sequence
  341. /// </summary>
  342. /// <param name="ctx"></param>
  343. /// <param name="src"></param>
  344. /// <param name="dest"></param>
  345. /// <param name="p0"></param>
  346. /// <param name="p1"></param>
  347. [DllImport(libraryName, CallingConvention = CallingConvention.Cdecl)]
  348. public static extern void llama_kv_cache_seq_cp(SafeLLamaContextHandle ctx, LLamaSeqId src, LLamaSeqId dest, LLamaPos p0, LLamaPos p1);
  349. /// <summary>
  350. /// Removes all tokens that do not belong to the specified sequence
  351. /// </summary>
  352. /// <param name="ctx"></param>
  353. /// <param name="seq"></param>
  354. [DllImport(libraryName, CallingConvention = CallingConvention.Cdecl)]
  355. public static extern void llama_kv_cache_seq_keep(SafeLLamaContextHandle ctx, LLamaSeqId seq);
  356. /// <summary>
  357. /// Adds relative position "delta" to all tokens that belong to the specified sequence and have positions in [p0, p1)
  358. /// If the KV cache is RoPEd, the KV data is updated accordingly:
  359. /// - lazily on next llama_decode()
  360. /// - explicitly with llama_kv_cache_update()
  361. /// </summary>
  362. /// <param name="ctx"></param>
  363. /// <param name="seq"></param>
  364. /// <param name="p0"></param>
  365. /// <param name="p1"></param>
  366. /// <param name="delta"></param>
  367. [DllImport(libraryName, CallingConvention = CallingConvention.Cdecl)]
  368. public static extern void llama_kv_cache_seq_add(SafeLLamaContextHandle ctx, LLamaSeqId seq, LLamaPos p0, LLamaPos p1, int delta);
  369. /// <summary>
  370. /// Integer division of the positions by factor of `d > 1`
  371. /// If the KV cache is RoPEd, the KV data is updated accordingly:
  372. /// - lazily on next llama_decode()
  373. /// - explicitly with llama_kv_cache_update()
  374. /// <br />
  375. /// p0 &lt; 0 : [0, p1]
  376. /// <br />
  377. /// p1 &lt; 0 : [p0, inf)
  378. /// </summary>
  379. /// <param name="ctx"></param>
  380. /// <param name="seq"></param>
  381. /// <param name="p0"></param>
  382. /// <param name="p1"></param>
  383. /// <param name="d"></param>
  384. [DllImport(libraryName, CallingConvention = CallingConvention.Cdecl)]
  385. public static extern void llama_kv_cache_seq_div(SafeLLamaContextHandle ctx, LLamaSeqId seq, LLamaPos p0, LLamaPos p1, int d);
  386. /// <summary>
  387. /// Returns the largest position present in the KV cache for the specified sequence
  388. /// </summary>
  389. /// <param name="ctx"></param>
  390. /// <param name="seq"></param>
  391. /// <returns></returns>
  392. [DllImport(libraryName, CallingConvention = CallingConvention.Cdecl)]
  393. public static extern LLamaPos llama_kv_cache_seq_pos_max(SafeLLamaContextHandle ctx, LLamaSeqId seq);
  394. /// <summary>
  395. /// Defragment the KV cache. This will be applied:
  396. /// - lazily on next llama_decode()
  397. /// - explicitly with llama_kv_cache_update()
  398. /// </summary>
  399. /// <param name="ctx"></param>
  400. /// <returns></returns>
  401. [DllImport(libraryName, CallingConvention = CallingConvention.Cdecl)]
  402. public static extern LLamaPos llama_kv_cache_defrag(SafeLLamaContextHandle ctx);
  403. /// <summary>
  404. /// Apply the KV cache updates (such as K-shifts, defragmentation, etc.)
  405. /// </summary>
  406. /// <param name="ctx"></param>
  407. [DllImport(libraryName, CallingConvention = CallingConvention.Cdecl)]
  408. public static extern void llama_kv_cache_update(SafeLLamaContextHandle ctx);
  409. /// <summary>
  410. /// Allocates a batch of tokens on the heap
  411. /// Each token can be assigned up to n_seq_max sequence ids
  412. /// The batch has to be freed with llama_batch_free()
  413. /// If embd != 0, llama_batch.embd will be allocated with size of n_tokens * embd * sizeof(float)
  414. /// Otherwise, llama_batch.token will be allocated to store n_tokens llama_token
  415. /// The rest of the llama_batch members are allocated with size n_tokens
  416. /// All members are left uninitialized
  417. /// </summary>
  418. /// <param name="n_tokens"></param>
  419. /// <param name="embd"></param>
  420. /// <param name="n_seq_max">Each token can be assigned up to n_seq_max sequence ids</param>
  421. [DllImport(libraryName, CallingConvention = CallingConvention.Cdecl)]
  422. public static extern LLamaNativeBatch llama_batch_init(int n_tokens, int embd, int n_seq_max);
  423. /// <summary>
  424. /// Frees a batch of tokens allocated with llama_batch_init()
  425. /// </summary>
  426. /// <param name="batch"></param>
  427. [DllImport(libraryName, CallingConvention = CallingConvention.Cdecl)]
  428. public static extern void llama_batch_free(LLamaNativeBatch batch);
  429. /// <summary>
  430. /// </summary>
  431. /// <param name="ctx"></param>
  432. /// <param name="batch"></param>
  433. /// <returns>Positive return values does not mean a fatal error, but rather a warning:<br />
  434. /// - 0: success<br />
  435. /// - 1: could not find a KV slot for the batch (try reducing the size of the batch or increase the context)<br />
  436. /// - &lt; 0: error<br />
  437. /// </returns>
  438. [DllImport(libraryName, CallingConvention = CallingConvention.Cdecl)]
  439. public static extern int llama_decode(SafeLLamaContextHandle ctx, LLamaNativeBatch batch);
  440. /// <summary>
  441. /// Set the number of threads used for decoding
  442. /// </summary>
  443. /// <param name="ctx"></param>
  444. /// <param name="n_threads">n_threads is the number of threads used for generation (single token)</param>
  445. /// <param name="n_threads_batch">n_threads_batch is the number of threads used for prompt and batch processing (multiple tokens)</param>
  446. /// <returns></returns>
  447. [DllImport(libraryName, CallingConvention = CallingConvention.Cdecl)]
  448. public static extern void llama_set_n_threads(SafeLLamaContextHandle ctx, uint n_threads, uint n_threads_batch);
  449. [DllImport(libraryName, CallingConvention = CallingConvention.Cdecl)]
  450. public static extern LLamaVocabType llama_vocab_type(SafeLlamaModelHandle model);
  451. [DllImport(libraryName, CallingConvention = CallingConvention.Cdecl)]
  452. public static extern LLamaRopeType llama_rope_type(SafeLlamaModelHandle model);
  453. }
  454. }