You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

NativeApi.cs 25 kB

2 years ago
2 years ago
2 years ago
123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512
  1. using System;
  2. using System.Runtime.InteropServices;
  3. #pragma warning disable IDE1006 // Naming Styles
  4. namespace LLama.Native
  5. {
  6. /// <summary>
  7. /// Callback from llama.cpp with log messages
  8. /// </summary>
  9. /// <param name="level"></param>
  10. /// <param name="message"></param>
  11. public delegate void LLamaLogCallback(LLamaLogLevel level, string message);
  12. /// <summary>
  13. /// Direct translation of the llama.cpp API
  14. /// </summary>
  15. public static partial class NativeApi
  16. {
  17. /// <summary>
  18. /// A method that does nothing. This is a native method, calling it will force the llama native dependencies to be loaded.
  19. /// </summary>
  20. /// <returns></returns>
  21. public static void llama_empty_call()
  22. {
  23. llama_max_devices();
  24. }
  25. /// <summary>
  26. /// Get the maximum number of devices supported by llama.cpp
  27. /// </summary>
  28. /// <returns></returns>
  29. [DllImport(libraryName, CallingConvention = CallingConvention.Cdecl)]
  30. public static extern long llama_max_devices();
  31. /// <summary>
  32. /// Create a LLamaModelParams with default values
  33. /// </summary>
  34. /// <returns></returns>
  35. [DllImport(libraryName, CallingConvention = CallingConvention.Cdecl)]
  36. public static extern LLamaModelParams llama_model_default_params();
  37. /// <summary>
  38. /// Create a LLamaContextParams with default values
  39. /// </summary>
  40. /// <returns></returns>
  41. [DllImport(libraryName, CallingConvention = CallingConvention.Cdecl)]
  42. public static extern LLamaContextParams llama_context_default_params();
  43. /// <summary>
  44. /// Create a LLamaModelQuantizeParams with default values
  45. /// </summary>
  46. /// <returns></returns>
  47. [DllImport(libraryName, CallingConvention = CallingConvention.Cdecl)]
  48. public static extern LLamaModelQuantizeParams llama_model_quantize_default_params();
  49. /// <summary>
  50. /// Check if memory mapping is supported
  51. /// </summary>
  52. /// <returns></returns>
  53. [DllImport(libraryName, CallingConvention = CallingConvention.Cdecl)]
  54. public static extern bool llama_supports_mmap();
  55. /// <summary>
  56. /// Check if memory locking is supported
  57. /// </summary>
  58. /// <returns></returns>
  59. [DllImport(libraryName, CallingConvention = CallingConvention.Cdecl)]
  60. public static extern bool llama_supports_mlock();
  61. /// <summary>
  62. /// Check if GPU offload is supported
  63. /// </summary>
  64. /// <returns></returns>
  65. [DllImport(libraryName, CallingConvention = CallingConvention.Cdecl)]
  66. public static extern bool llama_supports_gpu_offload();
  67. /// <summary>
  68. /// Initialize the llama + ggml backend
  69. /// Call once at the start of the program
  70. /// </summary>
  71. [DllImport(libraryName, CallingConvention = CallingConvention.Cdecl)]
  72. private static extern void llama_backend_init();
  73. // Note: this is not implemented because we don't have a definition for `ggml_numa_strategy` in C#. That definition doesn't
  74. // exist because it's not in llama.h, it's in ggml.h which we don't currently build a wrapper for. If there's demand
  75. // for better NUMA support that will need adding.
  76. ///// <summary>
  77. ///// Optional, enable NUMA optimisations
  78. ///// </summary>
  79. //[DllImport(libraryName, CallingConvention = CallingConvention.Cdecl)]
  80. //public static extern void llama_numa_init(ggml_numa_strategy numa);
  81. /// <summary>
  82. /// Sets the current rng seed.
  83. /// </summary>
  84. /// <param name="ctx"></param>
  85. /// <param name="seed"></param>
  86. [DllImport(libraryName, CallingConvention = CallingConvention.Cdecl)]
  87. public static extern void llama_set_rng_seed(SafeLLamaContextHandle ctx, uint seed);
  88. /// <summary>
  89. /// Returns the maximum size in bytes of the state (rng, logits, embedding
  90. /// and kv_cache) - will often be smaller after compacting tokens
  91. /// </summary>
  92. /// <param name="ctx"></param>
  93. /// <returns></returns>
  94. [DllImport(libraryName, CallingConvention = CallingConvention.Cdecl)]
  95. public static extern ulong llama_get_state_size(SafeLLamaContextHandle ctx);
  96. /// <summary>
  97. /// Copies the state to the specified destination address.
  98. /// Destination needs to have allocated enough memory.
  99. /// </summary>
  100. /// <param name="ctx"></param>
  101. /// <param name="dest"></param>
  102. /// <returns>the number of bytes copied</returns>
  103. [DllImport(libraryName, CallingConvention = CallingConvention.Cdecl)]
  104. public static extern unsafe ulong llama_copy_state_data(SafeLLamaContextHandle ctx, byte* dest);
  105. /// <summary>
  106. /// Set the state reading from the specified address
  107. /// </summary>
  108. /// <param name="ctx"></param>
  109. /// <param name="src"></param>
  110. /// <returns>the number of bytes read</returns>
  111. [DllImport(libraryName, CallingConvention = CallingConvention.Cdecl)]
  112. public static extern unsafe ulong llama_set_state_data(SafeLLamaContextHandle ctx, byte* src);
  113. /// <summary>
  114. /// Load session file
  115. /// </summary>
  116. /// <param name="ctx"></param>
  117. /// <param name="path_session"></param>
  118. /// <param name="tokens_out"></param>
  119. /// <param name="n_token_capacity"></param>
  120. /// <param name="n_token_count_out"></param>
  121. /// <returns></returns>
  122. [DllImport(libraryName, CallingConvention = CallingConvention.Cdecl)]
  123. public static extern bool llama_load_session_file(SafeLLamaContextHandle ctx, string path_session, LLamaToken[] tokens_out, ulong n_token_capacity, out ulong n_token_count_out);
  124. /// <summary>
  125. /// Save session file
  126. /// </summary>
  127. /// <param name="ctx"></param>
  128. /// <param name="path_session"></param>
  129. /// <param name="tokens"></param>
  130. /// <param name="n_token_count"></param>
  131. /// <returns></returns>
  132. [DllImport(libraryName, CallingConvention = CallingConvention.Cdecl)]
  133. public static extern bool llama_save_session_file(SafeLLamaContextHandle ctx, string path_session, LLamaToken[] tokens, ulong n_token_count);
  134. [DllImport(libraryName, CallingConvention = CallingConvention.Cdecl)]
  135. public static extern unsafe byte* llama_token_get_text(SafeLlamaModelHandle model, LLamaToken token);
  136. [DllImport(libraryName, CallingConvention = CallingConvention.Cdecl)]
  137. public static extern float llama_token_get_score(SafeLlamaModelHandle model, LLamaToken token);
  138. [DllImport(libraryName, CallingConvention = CallingConvention.Cdecl)]
  139. public static extern LLamaTokenType llama_token_get_type(SafeLlamaModelHandle model, LLamaToken token);
  140. /// <summary>
  141. /// Get the size of the context window for the model for this context
  142. /// </summary>
  143. /// <param name="ctx"></param>
  144. /// <returns></returns>
  145. [DllImport(libraryName, CallingConvention = CallingConvention.Cdecl)]
  146. public static extern uint llama_n_ctx(SafeLLamaContextHandle ctx);
  147. /// <summary>
  148. /// Get the batch size for this context
  149. /// </summary>
  150. /// <param name="ctx"></param>
  151. /// <returns></returns>
  152. [DllImport(libraryName, CallingConvention = CallingConvention.Cdecl)]
  153. public static extern uint llama_n_batch(SafeLLamaContextHandle ctx);
  154. /// <summary>
  155. /// Token logits obtained from the last call to llama_decode
  156. /// The logits for the last token are stored in the last row
  157. /// Can be mutated in order to change the probabilities of the next token.<br />
  158. /// Rows: n_tokens<br />
  159. /// Cols: n_vocab
  160. /// </summary>
  161. /// <param name="ctx"></param>
  162. /// <returns></returns>
  163. [DllImport(libraryName, CallingConvention = CallingConvention.Cdecl)]
  164. public static extern unsafe float* llama_get_logits(SafeLLamaContextHandle ctx);
  165. /// <summary>
  166. /// Logits for the ith token. Equivalent to: llama_get_logits(ctx) + i*n_vocab
  167. /// </summary>
  168. /// <param name="ctx"></param>
  169. /// <param name="i"></param>
  170. /// <returns></returns>
  171. [DllImport(libraryName, CallingConvention = CallingConvention.Cdecl)]
  172. public static extern unsafe float* llama_get_logits_ith(SafeLLamaContextHandle ctx, int i);
  173. /// <summary>
  174. /// Get the embeddings for the ith sequence. Equivalent to: llama_get_embeddings(ctx) + i*n_embd
  175. /// </summary>
  176. /// <returns></returns>
  177. [DllImport(libraryName, CallingConvention = CallingConvention.Cdecl)]
  178. public static extern unsafe float* llama_get_embeddings_ith(SafeLLamaContextHandle ctx, int i);
  179. /// <summary>
  180. /// Get the embeddings for the input
  181. /// </summary>
  182. /// <param name="ctx"></param>
  183. /// <returns></returns>
  184. public static Span<float> llama_get_embeddings(SafeLLamaContextHandle ctx)
  185. {
  186. unsafe
  187. {
  188. var ptr = llama_get_embeddings_native(ctx);
  189. return new Span<float>(ptr, ctx.EmbeddingSize);
  190. }
  191. [DllImport(libraryName, CallingConvention = CallingConvention.Cdecl, EntryPoint = "llama_get_embeddings")]
  192. static extern unsafe float* llama_get_embeddings_native(SafeLLamaContextHandle ctx);
  193. }
  194. /// <summary>
  195. /// Apply chat template. Inspired by hf apply_chat_template() on python.
  196. /// Both "model" and "custom_template" are optional, but at least one is required. "custom_template" has higher precedence than "model"
  197. /// NOTE: This function does not use a jinja parser. It only support a pre-defined list of template. See more: https://github.com/ggerganov/llama.cpp/wiki/Templates-supported-by-llama_chat_apply_template
  198. /// </summary>
  199. /// <param name="model"></param>
  200. /// <param name="tmpl">A Jinja template to use for this chat. If this is nullptr, the model’s default chat template will be used instead.</param>
  201. /// <param name="chat">Pointer to a list of multiple llama_chat_message</param>
  202. /// <param name="n_msg">Number of llama_chat_message in this chat</param>
  203. /// <param name="add_ass">Whether to end the prompt with the token(s) that indicate the start of an assistant message.</param>
  204. /// <param name="buf">A buffer to hold the output formatted prompt. The recommended alloc size is 2 * (total number of characters of all messages)</param>
  205. /// <param name="length">The size of the allocated buffer</param>
  206. /// <returns>The total number of bytes of the formatted prompt. If is it larger than the size of buffer, you may need to re-alloc it and then re-apply the template.</returns>
  207. [DllImport(libraryName, CallingConvention = CallingConvention.Cdecl, EntryPoint = "llama_get_embeddings")]
  208. public static extern unsafe int llama_chat_apply_template(SafeLlamaModelHandle model, char* tmpl, LLamaChatMessage* chat, nint n_msg, bool add_ass, char* buf, int length);
  209. /// <summary>
  210. /// Get the "Beginning of sentence" token
  211. /// </summary>
  212. /// <returns></returns>
  213. [DllImport(libraryName, CallingConvention = CallingConvention.Cdecl)]
  214. public static extern LLamaToken llama_token_bos(SafeLlamaModelHandle model);
  215. /// <summary>
  216. /// Get the "End of sentence" token
  217. /// </summary>
  218. /// <returns></returns>
  219. [DllImport(libraryName, CallingConvention = CallingConvention.Cdecl)]
  220. public static extern LLamaToken llama_token_eos(SafeLlamaModelHandle model);
  221. /// <summary>
  222. /// Get the "new line" token
  223. /// </summary>
  224. /// <returns></returns>
  225. [DllImport(libraryName, CallingConvention = CallingConvention.Cdecl)]
  226. public static extern LLamaToken llama_token_nl(SafeLlamaModelHandle model);
  227. /// <summary>
  228. /// Returns -1 if unknown, 1 for true or 0 for false.
  229. /// </summary>
  230. /// <returns></returns>
  231. [DllImport(libraryName, CallingConvention = CallingConvention.Cdecl)]
  232. public static extern int llama_add_bos_token(SafeLlamaModelHandle model);
  233. /// <summary>
  234. /// Returns -1 if unknown, 1 for true or 0 for false.
  235. /// </summary>
  236. /// <returns></returns>
  237. [DllImport(libraryName, CallingConvention = CallingConvention.Cdecl)]
  238. public static extern int llama_add_eos_token(SafeLlamaModelHandle model);
  239. /// <summary>
  240. /// codellama infill tokens, Beginning of infill prefix
  241. /// </summary>
  242. /// <returns></returns>
  243. [DllImport(libraryName, CallingConvention = CallingConvention.Cdecl)]
  244. public static extern int llama_token_prefix(SafeLlamaModelHandle model);
  245. /// <summary>
  246. /// codellama infill tokens, Beginning of infill middle
  247. /// </summary>
  248. /// <returns></returns>
  249. [DllImport(libraryName, CallingConvention = CallingConvention.Cdecl)]
  250. public static extern int llama_token_middle(SafeLlamaModelHandle model);
  251. /// <summary>
  252. /// codellama infill tokens, Beginning of infill suffix
  253. /// </summary>
  254. /// <returns></returns>
  255. [DllImport(libraryName, CallingConvention = CallingConvention.Cdecl)]
  256. public static extern int llama_token_suffix(SafeLlamaModelHandle model);
  257. /// <summary>
  258. /// codellama infill tokens, End of infill middle
  259. /// </summary>
  260. /// <returns></returns>
  261. [DllImport(libraryName, CallingConvention = CallingConvention.Cdecl)]
  262. public static extern int llama_token_eot(SafeLlamaModelHandle model);
  263. /// <summary>
  264. /// Print out timing information for this context
  265. /// </summary>
  266. /// <param name="ctx"></param>
  267. [DllImport(libraryName, CallingConvention = CallingConvention.Cdecl)]
  268. public static extern void llama_print_timings(SafeLLamaContextHandle ctx);
  269. /// <summary>
  270. /// Reset all collected timing information for this context
  271. /// </summary>
  272. /// <param name="ctx"></param>
  273. [DllImport(libraryName, CallingConvention = CallingConvention.Cdecl)]
  274. public static extern void llama_reset_timings(SafeLLamaContextHandle ctx);
  275. /// <summary>
  276. /// Print system information
  277. /// </summary>
  278. /// <returns></returns>
  279. [DllImport(libraryName, CallingConvention = CallingConvention.Cdecl)]
  280. public static extern IntPtr llama_print_system_info();
  281. /// <summary>
  282. /// Convert a single token into text
  283. /// </summary>
  284. /// <param name="model"></param>
  285. /// <param name="llamaToken"></param>
  286. /// <param name="buffer">buffer to write string into</param>
  287. /// <returns>The length written, or if the buffer is too small a negative that indicates the length required</returns>
  288. public static int llama_token_to_piece(SafeLlamaModelHandle model, LLamaToken llamaToken, Span<byte> buffer)
  289. {
  290. unsafe
  291. {
  292. fixed (byte* bufferPtr = buffer)
  293. {
  294. return llama_token_to_piece_native(model, llamaToken, bufferPtr, buffer.Length);
  295. }
  296. }
  297. [DllImport(libraryName, CallingConvention = CallingConvention.Cdecl, EntryPoint = "llama_token_to_piece")]
  298. static extern unsafe int llama_token_to_piece_native(SafeLlamaModelHandle model, LLamaToken llamaToken, byte* buffer, int length);
  299. }
  300. /// <summary>
  301. /// Convert text into tokens
  302. /// </summary>
  303. /// <param name="model"></param>
  304. /// <param name="text"></param>
  305. /// <param name="text_len"></param>
  306. /// <param name="tokens"></param>
  307. /// <param name="n_max_tokens"></param>
  308. /// <param name="add_bos"></param>
  309. /// <param name="special">Allow tokenizing special and/or control tokens which otherwise are not exposed and treated as plaintext. Does not insert a leading space.</param>
  310. /// <returns>Returns the number of tokens on success, no more than n_max_tokens.
  311. /// Returns a negative number on failure - the number of tokens that would have been returned
  312. /// </returns>
  313. [DllImport(libraryName, CallingConvention = CallingConvention.Cdecl)]
  314. public static extern unsafe int llama_tokenize(SafeLlamaModelHandle model, byte* text, int text_len, LLamaToken* tokens, int n_max_tokens, bool add_bos, bool special);
  315. /// <summary>
  316. /// Register a callback to receive llama log messages
  317. /// </summary>
  318. /// <param name="logCallback"></param>
  319. [DllImport(libraryName, CallingConvention = CallingConvention.Cdecl)]
  320. public static extern void llama_log_set(LLamaLogCallback logCallback);
  321. /// <summary>
  322. /// Clear the KV cache
  323. /// </summary>
  324. /// <param name="ctx"></param>
  325. [DllImport(libraryName, CallingConvention = CallingConvention.Cdecl)]
  326. public static extern void llama_kv_cache_clear(SafeLLamaContextHandle ctx);
  327. /// <summary>
  328. /// Removes all tokens that belong to the specified sequence and have positions in [p0, p1)
  329. /// </summary>
  330. /// <param name="ctx"></param>
  331. /// <param name="seq"></param>
  332. /// <param name="p0"></param>
  333. /// <param name="p1"></param>
  334. [DllImport(libraryName, CallingConvention = CallingConvention.Cdecl)]
  335. public static extern void llama_kv_cache_seq_rm(SafeLLamaContextHandle ctx, LLamaSeqId seq, LLamaPos p0, LLamaPos p1);
  336. /// <summary>
  337. /// Copy all tokens that belong to the specified sequence to another sequence
  338. /// Note that this does not allocate extra KV cache memory - it simply assigns the tokens to the new sequence
  339. /// </summary>
  340. /// <param name="ctx"></param>
  341. /// <param name="src"></param>
  342. /// <param name="dest"></param>
  343. /// <param name="p0"></param>
  344. /// <param name="p1"></param>
  345. [DllImport(libraryName, CallingConvention = CallingConvention.Cdecl)]
  346. public static extern void llama_kv_cache_seq_cp(SafeLLamaContextHandle ctx, LLamaSeqId src, LLamaSeqId dest, LLamaPos p0, LLamaPos p1);
  347. /// <summary>
  348. /// Removes all tokens that do not belong to the specified sequence
  349. /// </summary>
  350. /// <param name="ctx"></param>
  351. /// <param name="seq"></param>
  352. [DllImport(libraryName, CallingConvention = CallingConvention.Cdecl)]
  353. public static extern void llama_kv_cache_seq_keep(SafeLLamaContextHandle ctx, LLamaSeqId seq);
  354. /// <summary>
  355. /// Adds relative position "delta" to all tokens that belong to the specified sequence and have positions in [p0, p1)
  356. /// If the KV cache is RoPEd, the KV data is updated accordingly:
  357. /// - lazily on next llama_decode()
  358. /// - explicitly with llama_kv_cache_update()
  359. /// </summary>
  360. /// <param name="ctx"></param>
  361. /// <param name="seq"></param>
  362. /// <param name="p0"></param>
  363. /// <param name="p1"></param>
  364. /// <param name="delta"></param>
  365. [DllImport(libraryName, CallingConvention = CallingConvention.Cdecl)]
  366. public static extern void llama_kv_cache_seq_add(SafeLLamaContextHandle ctx, LLamaSeqId seq, LLamaPos p0, LLamaPos p1, int delta);
  367. /// <summary>
  368. /// Integer division of the positions by factor of `d > 1`
  369. /// If the KV cache is RoPEd, the KV data is updated accordingly:
  370. /// - lazily on next llama_decode()
  371. /// - explicitly with llama_kv_cache_update()
  372. /// <br />
  373. /// p0 &lt; 0 : [0, p1]
  374. /// <br />
  375. /// p1 &lt; 0 : [p0, inf)
  376. /// </summary>
  377. /// <param name="ctx"></param>
  378. /// <param name="seq"></param>
  379. /// <param name="p0"></param>
  380. /// <param name="p1"></param>
  381. /// <param name="d"></param>
  382. [DllImport(libraryName, CallingConvention = CallingConvention.Cdecl)]
  383. public static extern void llama_kv_cache_seq_div(SafeLLamaContextHandle ctx, LLamaSeqId seq, LLamaPos p0, LLamaPos p1, int d);
  384. /// <summary>
  385. /// Returns the largest position present in the KV cache for the specified sequence
  386. /// </summary>
  387. /// <param name="ctx"></param>
  388. /// <param name="seq"></param>
  389. /// <returns></returns>
  390. [DllImport(libraryName, CallingConvention = CallingConvention.Cdecl)]
  391. public static extern LLamaPos llama_kv_cache_seq_pos_max(SafeLLamaContextHandle ctx, LLamaSeqId seq);
  392. /// <summary>
  393. /// Defragment the KV cache. This will be applied:
  394. /// - lazily on next llama_decode()
  395. /// - explicitly with llama_kv_cache_update()
  396. /// </summary>
  397. /// <param name="ctx"></param>
  398. /// <returns></returns>
  399. [DllImport(libraryName, CallingConvention = CallingConvention.Cdecl)]
  400. public static extern LLamaPos llama_kv_cache_defrag(SafeLLamaContextHandle ctx);
  401. /// <summary>
  402. /// Apply the KV cache updates (such as K-shifts, defragmentation, etc.)
  403. /// </summary>
  404. /// <param name="ctx"></param>
  405. [DllImport(libraryName, CallingConvention = CallingConvention.Cdecl)]
  406. public static extern void llama_kv_cache_update(SafeLLamaContextHandle ctx);
  407. /// <summary>
  408. /// Allocates a batch of tokens on the heap
  409. /// Each token can be assigned up to n_seq_max sequence ids
  410. /// The batch has to be freed with llama_batch_free()
  411. /// If embd != 0, llama_batch.embd will be allocated with size of n_tokens * embd * sizeof(float)
  412. /// Otherwise, llama_batch.token will be allocated to store n_tokens llama_token
  413. /// The rest of the llama_batch members are allocated with size n_tokens
  414. /// All members are left uninitialized
  415. /// </summary>
  416. /// <param name="n_tokens"></param>
  417. /// <param name="embd"></param>
  418. /// <param name="n_seq_max">Each token can be assigned up to n_seq_max sequence ids</param>
  419. [DllImport(libraryName, CallingConvention = CallingConvention.Cdecl)]
  420. public static extern LLamaNativeBatch llama_batch_init(int n_tokens, int embd, int n_seq_max);
  421. /// <summary>
  422. /// Frees a batch of tokens allocated with llama_batch_init()
  423. /// </summary>
  424. /// <param name="batch"></param>
  425. [DllImport(libraryName, CallingConvention = CallingConvention.Cdecl)]
  426. public static extern void llama_batch_free(LLamaNativeBatch batch);
  427. /// <summary>
  428. /// </summary>
  429. /// <param name="ctx"></param>
  430. /// <param name="batch"></param>
  431. /// <returns>Positive return values does not mean a fatal error, but rather a warning:<br />
  432. /// - 0: success<br />
  433. /// - 1: could not find a KV slot for the batch (try reducing the size of the batch or increase the context)<br />
  434. /// - &lt; 0: error<br />
  435. /// </returns>
  436. [DllImport(libraryName, CallingConvention = CallingConvention.Cdecl)]
  437. public static extern int llama_decode(SafeLLamaContextHandle ctx, LLamaNativeBatch batch);
  438. /// <summary>
  439. /// Set the number of threads used for decoding
  440. /// </summary>
  441. /// <param name="ctx"></param>
  442. /// <param name="n_threads">n_threads is the number of threads used for generation (single token)</param>
  443. /// <param name="n_threads_batch">n_threads_batch is the number of threads used for prompt and batch processing (multiple tokens)</param>
  444. /// <returns></returns>
  445. [DllImport(libraryName, CallingConvention = CallingConvention.Cdecl)]
  446. public static extern void llama_set_n_threads(SafeLLamaContextHandle ctx, uint n_threads, uint n_threads_batch);
  447. [DllImport(libraryName, CallingConvention = CallingConvention.Cdecl)]
  448. public static extern LLamaVocabType llama_vocab_type(SafeLlamaModelHandle model);
  449. [DllImport(libraryName, CallingConvention = CallingConvention.Cdecl)]
  450. public static extern LLamaRopeType llama_rope_type(SafeLlamaModelHandle model);
  451. }
  452. }