You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

NativeApi.cs 26 kB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565
  1. using System;
  2. using System.Buffers;
  3. using System.Runtime.InteropServices;
  4. using System.Text;
  5. using LLama.Exceptions;
  6. #pragma warning disable IDE1006 // Naming Styles
  7. namespace LLama.Native
  8. {
  9. using llama_token = Int32;
  10. /// <summary>
  11. /// Callback from llama.cpp with log messages
  12. /// </summary>
  13. /// <param name="level"></param>
  14. /// <param name="message"></param>
  15. public delegate void LLamaLogCallback(LLamaLogLevel level, string message);
  16. /// <summary>
  17. /// Direct translation of the llama.cpp API
  18. /// </summary>
  19. public unsafe partial class NativeApi
  20. {
  21. static NativeApi()
  22. {
  23. // Try to load a preferred library, based on CPU feature detection
  24. TryLoadLibrary();
  25. try
  26. {
  27. llama_empty_call();
  28. }
  29. catch (DllNotFoundException)
  30. {
  31. throw new RuntimeError("The native library cannot be found. It could be one of the following reasons: \n" +
  32. "1. No LLamaSharp backend was installed. Please search LLamaSharp.Backend and install one of them. \n" +
  33. "2. You are using a device with only CPU but installed cuda backend. Please install cpu backend instead. \n" +
  34. "3. The backend is not compatible with your system cuda environment. Please check and fix it. If the environment is " +
  35. "expected not to be changed, then consider build llama.cpp from source or submit an issue to LLamaSharp.\n" +
  36. "4. One of the dependency of the native library is missed.\n");
  37. }
  38. llama_backend_init(false);
  39. }
  40. /// <summary>
  41. /// Try to load libllama, using CPU feature detection to try and load a more specialised DLL if possible
  42. /// </summary>
  43. /// <returns>The library handle to unload later, or IntPtr.Zero if no library was loaded</returns>
  44. private static IntPtr TryLoadLibrary()
  45. {
  46. #if NET6_0_OR_GREATER
  47. if (RuntimeInformation.IsOSPlatform(OSPlatform.Windows))
  48. {
  49. // All of the Windows libraries, in order of preference
  50. return TryLoad("cu12.1.0/libllama.dll")
  51. ?? TryLoad("cu11.7.1/libllama.dll")
  52. #if NET8_0_OR_GREATER
  53. ?? TryLoad("avx512/libllama.dll", System.Runtime.Intrinsics.X86.Avx512.IsSupported)
  54. #endif
  55. ?? TryLoad("avx2/libllama.dll", System.Runtime.Intrinsics.X86.Avx2.IsSupported)
  56. ?? TryLoad("avx/libllama.dll", System.Runtime.Intrinsics.X86.Avx.IsSupported)
  57. ?? IntPtr.Zero;
  58. }
  59. if (RuntimeInformation.IsOSPlatform(OSPlatform.Linux))
  60. {
  61. // All of the Linux libraries, in order of preference
  62. return TryLoad("cu12.1.0/libllama.so")
  63. ?? TryLoad("cu11.7.1/libllama.so")
  64. #if NET8_0_OR_GREATER
  65. ?? TryLoad("avx512/libllama.so", System.Runtime.Intrinsics.X86.Avx512.IsSupported)
  66. #endif
  67. ?? TryLoad("avx2/libllama.so", System.Runtime.Intrinsics.X86.Avx2.IsSupported)
  68. ?? TryLoad("avx/libllama.so", System.Runtime.Intrinsics.X86.Avx.IsSupported)
  69. ?? IntPtr.Zero;
  70. }
  71. if (RuntimeInformation.IsOSPlatform(OSPlatform.OSX))
  72. {
  73. return IntPtr.Zero;
  74. }
  75. #endif
  76. return IntPtr.Zero;
  77. #if NET6_0_OR_GREATER
  78. // Try to load a DLL from the path if supported. Returns null if nothing is loaded.
  79. static IntPtr? TryLoad(string path, bool supported = true)
  80. {
  81. if (!supported)
  82. return null;
  83. if (NativeLibrary.TryLoad(path, out var handle))
  84. return handle;
  85. return null;
  86. }
  87. #endif
  88. }
  89. private const string libraryName = "libllama";
  90. /// <summary>
  91. /// A method that does nothing. This is a native method, calling it will force the llama native dependencies to be loaded.
  92. /// </summary>
  93. /// <returns></returns>
  94. [DllImport(libraryName, EntryPoint = "llama_mmap_supported", CallingConvention = CallingConvention.Cdecl)]
  95. public static extern bool llama_empty_call();
  96. /// <summary>
  97. /// Create a LLamaModelParams with default values
  98. /// </summary>
  99. /// <returns></returns>
  100. [DllImport(libraryName, CallingConvention = CallingConvention.Cdecl)]
  101. public static extern LLamaModelParams llama_model_default_params();
  102. /// <summary>
  103. /// Create a LLamaContextParams with default values
  104. /// </summary>
  105. /// <returns></returns>
  106. [DllImport(libraryName, CallingConvention = CallingConvention.Cdecl)]
  107. public static extern LLamaContextParams llama_context_default_params();
  108. /// <summary>
  109. /// Create a LLamaModelQuantizeParams with default values
  110. /// </summary>
  111. /// <returns></returns>
  112. [DllImport(libraryName, CallingConvention = CallingConvention.Cdecl)]
  113. public static extern LLamaModelQuantizeParams llama_model_quantize_default_params();
  114. /// <summary>
  115. /// Check if memory mapping is supported
  116. /// </summary>
  117. /// <returns></returns>
  118. [DllImport(libraryName, CallingConvention = CallingConvention.Cdecl)]
  119. public static extern bool llama_mmap_supported();
  120. /// <summary>
  121. /// Check if memory lockingis supported
  122. /// </summary>
  123. /// <returns></returns>
  124. [DllImport(libraryName, CallingConvention = CallingConvention.Cdecl)]
  125. public static extern bool llama_mlock_supported();
  126. /// <summary>
  127. /// Various functions for loading a ggml llama model.
  128. /// Allocate (almost) all memory needed for the model.
  129. /// Return NULL on failure
  130. /// </summary>
  131. /// <param name="path_model"></param>
  132. /// <param name="params"></param>
  133. /// <returns></returns>
  134. [DllImport(libraryName, CallingConvention = CallingConvention.Cdecl)]
  135. public static extern IntPtr llama_load_model_from_file(string path_model, LLamaModelParams @params);
  136. /// <summary>
  137. /// Create a new llama_context with the given model.
  138. /// Return value should always be wrapped in SafeLLamaContextHandle!
  139. /// </summary>
  140. /// <param name="model"></param>
  141. /// <param name="params"></param>
  142. /// <returns></returns>
  143. [DllImport(libraryName, CallingConvention = CallingConvention.Cdecl)]
  144. public static extern IntPtr llama_new_context_with_model(SafeLlamaModelHandle model, LLamaContextParams @params);
  145. /// <summary>
  146. /// not great API - very likely to change.
  147. /// Initialize the llama + ggml backend
  148. /// Call once at the start of the program
  149. /// </summary>
  150. [DllImport(libraryName, CallingConvention = CallingConvention.Cdecl)]
  151. public static extern void llama_backend_init(bool numa);
  152. /// <summary>
  153. /// Frees all allocated memory in the given llama_context
  154. /// </summary>
  155. /// <param name="ctx"></param>
  156. [DllImport(libraryName, CallingConvention = CallingConvention.Cdecl)]
  157. public static extern void llama_free(IntPtr ctx);
  158. /// <summary>
  159. /// Frees all allocated memory associated with a model
  160. /// </summary>
  161. /// <param name="model"></param>
  162. [DllImport(libraryName, CallingConvention = CallingConvention.Cdecl)]
  163. public static extern void llama_free_model(IntPtr model);
  164. /// <summary>
  165. /// Apply a LoRA adapter to a loaded model
  166. /// path_base_model is the path to a higher quality model to use as a base for
  167. /// the layers modified by the adapter. Can be NULL to use the current loaded model.
  168. /// The model needs to be reloaded before applying a new adapter, otherwise the adapter
  169. /// will be applied on top of the previous one
  170. /// </summary>
  171. /// <param name="model_ptr"></param>
  172. /// <param name="path_lora"></param>
  173. /// <param name="scale"></param>
  174. /// <param name="path_base_model"></param>
  175. /// <param name="n_threads"></param>
  176. /// <returns>Returns 0 on success</returns>
  177. [DllImport(libraryName, CallingConvention = CallingConvention.Cdecl)]
  178. public static extern int llama_model_apply_lora_from_file(SafeLlamaModelHandle model_ptr, string path_lora, float scale, string? path_base_model, int n_threads);
  179. /// <summary>
  180. /// Sets the current rng seed.
  181. /// </summary>
  182. /// <param name="ctx"></param>
  183. /// <param name="seed"></param>
  184. [DllImport(libraryName, CallingConvention = CallingConvention.Cdecl)]
  185. public static extern void llama_set_rng_seed(SafeLLamaContextHandle ctx, uint seed);
  186. /// <summary>
  187. /// Returns the maximum size in bytes of the state (rng, logits, embedding
  188. /// and kv_cache) - will often be smaller after compacting tokens
  189. /// </summary>
  190. /// <param name="ctx"></param>
  191. /// <returns></returns>
  192. [DllImport(libraryName, CallingConvention = CallingConvention.Cdecl)]
  193. public static extern ulong llama_get_state_size(SafeLLamaContextHandle ctx);
  194. /// <summary>
  195. /// Copies the state to the specified destination address.
  196. /// Destination needs to have allocated enough memory.
  197. /// </summary>
  198. /// <param name="ctx"></param>
  199. /// <param name="dest"></param>
  200. /// <returns>the number of bytes copied</returns>
  201. [DllImport(libraryName, CallingConvention = CallingConvention.Cdecl)]
  202. public static extern ulong llama_copy_state_data(SafeLLamaContextHandle ctx, byte* dest);
  203. /// <summary>
  204. /// Set the state reading from the specified address
  205. /// </summary>
  206. /// <param name="ctx"></param>
  207. /// <param name="src"></param>
  208. /// <returns>the number of bytes read</returns>
  209. [DllImport(libraryName, CallingConvention = CallingConvention.Cdecl)]
  210. public static extern ulong llama_set_state_data(SafeLLamaContextHandle ctx, byte* src);
  211. /// <summary>
  212. /// Load session file
  213. /// </summary>
  214. /// <param name="ctx"></param>
  215. /// <param name="path_session"></param>
  216. /// <param name="tokens_out"></param>
  217. /// <param name="n_token_capacity"></param>
  218. /// <param name="n_token_count_out"></param>
  219. /// <returns></returns>
  220. [DllImport(libraryName, CallingConvention = CallingConvention.Cdecl)]
  221. public static extern bool llama_load_session_file(SafeLLamaContextHandle ctx, string path_session, llama_token[] tokens_out, ulong n_token_capacity, ulong* n_token_count_out);
  222. /// <summary>
  223. /// Save session file
  224. /// </summary>
  225. /// <param name="ctx"></param>
  226. /// <param name="path_session"></param>
  227. /// <param name="tokens"></param>
  228. /// <param name="n_token_count"></param>
  229. /// <returns></returns>
  230. [DllImport(libraryName, CallingConvention = CallingConvention.Cdecl)]
  231. public static extern bool llama_save_session_file(SafeLLamaContextHandle ctx, string path_session, llama_token[] tokens, ulong n_token_count);
  232. /// <summary>
  233. /// Run the llama inference to obtain the logits and probabilities for the next token.
  234. /// tokens + n_tokens is the provided batch of new tokens to process
  235. /// n_past is the number of tokens to use from previous eval calls
  236. /// </summary>
  237. /// <param name="ctx"></param>
  238. /// <param name="tokens"></param>
  239. /// <param name="n_tokens"></param>
  240. /// <param name="n_past"></param>
  241. /// <returns>Returns 0 on success</returns>
  242. [DllImport(libraryName, CallingConvention = CallingConvention.Cdecl)]
  243. public static extern int llama_eval(SafeLLamaContextHandle ctx, llama_token* tokens, int n_tokens, int n_past);
  244. /// <summary>
  245. /// Convert the provided text into tokens.
  246. /// </summary>
  247. /// <param name="ctx"></param>
  248. /// <param name="text"></param>
  249. /// <param name="encoding"></param>
  250. /// <param name="tokens"></param>
  251. /// <param name="n_max_tokens"></param>
  252. /// <param name="add_bos"></param>
  253. /// <param name="special">Allow tokenizing special and/or control tokens which otherwise are not exposed and treated as plaintext. Does not insert a leading space.</param>
  254. /// <returns>Returns the number of tokens on success, no more than n_max_tokens.
  255. /// Returns a negative number on failure - the number of tokens that would have been returned
  256. /// </returns>
  257. public static int llama_tokenize(SafeLLamaContextHandle ctx, string text, Encoding encoding, llama_token[] tokens, int n_max_tokens, bool add_bos, bool special)
  258. {
  259. // Calculate number of bytes in text and borrow an array that large (+1 for nul byte)
  260. var byteCount = encoding.GetByteCount(text);
  261. var array = ArrayPool<byte>.Shared.Rent(byteCount + 1);
  262. try
  263. {
  264. // Convert to bytes
  265. fixed (char* textPtr = text)
  266. fixed (byte* arrayPtr = array)
  267. {
  268. encoding.GetBytes(textPtr, text.Length, arrayPtr, array.Length);
  269. }
  270. // Add a zero byte to the end to terminate the string
  271. array[byteCount] = 0;
  272. // Do the actual tokenization
  273. fixed (byte* arrayPtr = array)
  274. fixed (llama_token* tokensPtr = tokens)
  275. return llama_tokenize(ctx.ModelHandle, arrayPtr, byteCount, tokensPtr, n_max_tokens, add_bos, special);
  276. }
  277. finally
  278. {
  279. ArrayPool<byte>.Shared.Return(array);
  280. }
  281. }
  282. /// <summary>
  283. /// Get the size of the context window for the model for this context
  284. /// </summary>
  285. /// <param name="ctx"></param>
  286. /// <returns></returns>
  287. [DllImport(libraryName, CallingConvention = CallingConvention.Cdecl)]
  288. public static extern int llama_n_ctx(SafeLLamaContextHandle ctx);
  289. /// <summary>
  290. /// Token logits obtained from the last call to llama_eval()
  291. /// The logits for the last token are stored in the last row
  292. /// Can be mutated in order to change the probabilities of the next token.<br />
  293. /// Rows: n_tokens<br />
  294. /// Cols: n_vocab
  295. /// </summary>
  296. /// <param name="ctx"></param>
  297. /// <returns></returns>
  298. [DllImport(libraryName, CallingConvention = CallingConvention.Cdecl)]
  299. public static extern float* llama_get_logits(SafeLLamaContextHandle ctx);
  300. /// <summary>
  301. /// Logits for the ith token. Equivalent to: llama_get_logits(ctx) + i*n_vocab
  302. /// </summary>
  303. /// <param name="ctx"></param>
  304. /// <returns></returns>
  305. [DllImport(libraryName, CallingConvention = CallingConvention.Cdecl)]
  306. public static extern float* llama_get_logits_ith(SafeLLamaContextHandle ctx);
  307. /// <summary>
  308. /// Get the embeddings for the input
  309. /// shape: [n_embd] (1-dimensional)
  310. /// </summary>
  311. /// <param name="ctx"></param>
  312. /// <returns></returns>
  313. [DllImport(libraryName, CallingConvention = CallingConvention.Cdecl)]
  314. public static extern float* llama_get_embeddings(SafeLLamaContextHandle ctx);
  315. /// <summary>
  316. /// Get the "Beginning of sentence" token
  317. /// </summary>
  318. /// <returns></returns>
  319. [DllImport(libraryName, CallingConvention = CallingConvention.Cdecl)]
  320. public static extern llama_token llama_token_bos(SafeLLamaContextHandle ctx);
  321. /// <summary>
  322. /// Get the "End of sentence" token
  323. /// </summary>
  324. /// <returns></returns>
  325. [DllImport(libraryName, CallingConvention = CallingConvention.Cdecl)]
  326. public static extern llama_token llama_token_eos(SafeLLamaContextHandle ctx);
  327. /// <summary>
  328. /// Get the "new line" token
  329. /// </summary>
  330. /// <returns></returns>
  331. [DllImport(libraryName, CallingConvention = CallingConvention.Cdecl)]
  332. public static extern llama_token llama_token_nl(SafeLLamaContextHandle ctx);
  333. /// <summary>
  334. /// Print out timing information for this context
  335. /// </summary>
  336. /// <param name="ctx"></param>
  337. [DllImport(libraryName, CallingConvention = CallingConvention.Cdecl)]
  338. public static extern void llama_print_timings(SafeLLamaContextHandle ctx);
  339. /// <summary>
  340. /// Reset all collected timing information for this context
  341. /// </summary>
  342. /// <param name="ctx"></param>
  343. [DllImport(libraryName, CallingConvention = CallingConvention.Cdecl)]
  344. public static extern void llama_reset_timings(SafeLLamaContextHandle ctx);
  345. /// <summary>
  346. /// Print system information
  347. /// </summary>
  348. /// <returns></returns>
  349. [DllImport(libraryName, CallingConvention = CallingConvention.Cdecl)]
  350. public static extern IntPtr llama_print_system_info();
  351. /// <summary>
  352. /// Get the number of tokens in the model vocabulary
  353. /// </summary>
  354. /// <param name="model"></param>
  355. /// <returns></returns>
  356. [DllImport(libraryName, CallingConvention = CallingConvention.Cdecl)]
  357. public static extern int llama_n_vocab(SafeLlamaModelHandle model);
  358. /// <summary>
  359. /// Get the size of the context window for the model
  360. /// </summary>
  361. /// <param name="model"></param>
  362. /// <returns></returns>
  363. [DllImport(libraryName, CallingConvention = CallingConvention.Cdecl)]
  364. public static extern int llama_n_ctx_train(SafeLlamaModelHandle model);
  365. /// <summary>
  366. /// Get the dimension of embedding vectors from this model
  367. /// </summary>
  368. /// <param name="model"></param>
  369. /// <returns></returns>
  370. [DllImport(libraryName, CallingConvention = CallingConvention.Cdecl)]
  371. public static extern int llama_n_embd(SafeLlamaModelHandle model);
  372. /// <summary>
  373. /// Get the size of the model in bytes
  374. /// </summary>
  375. /// <param name="model"></param>
  376. /// <returns></returns>
  377. [DllImport(libraryName, CallingConvention = CallingConvention.Cdecl)]
  378. public static extern ulong llama_model_size(SafeLlamaModelHandle model);
  379. /// <summary>
  380. /// Get the number of parameters in this model
  381. /// </summary>
  382. /// <param name="model"></param>
  383. /// <returns></returns>
  384. [DllImport(libraryName, CallingConvention = CallingConvention.Cdecl)]
  385. public static extern ulong llama_model_n_params(SafeLlamaModelHandle model);
  386. /// <summary>
  387. /// Convert a single token into text
  388. /// </summary>
  389. /// <param name="model"></param>
  390. /// <param name="llamaToken"></param>
  391. /// <param name="buffer">buffer to write string into</param>
  392. /// <param name="length">size of the buffer</param>
  393. /// <returns>The length writte, or if the buffer is too small a negative that indicates the length required</returns>
  394. [DllImport(libraryName, CallingConvention = CallingConvention.Cdecl)]
  395. public static extern int llama_token_to_piece(SafeLlamaModelHandle model, int llamaToken, byte* buffer, int length);
  396. /// <summary>
  397. /// Convert text into tokens
  398. /// </summary>
  399. /// <param name="model"></param>
  400. /// <param name="text"></param>
  401. /// <param name="text_len"></param>
  402. /// <param name="tokens"></param>
  403. /// <param name="n_max_tokens"></param>
  404. /// <param name="add_bos"></param>
  405. /// <param name="special">Allow tokenizing special and/or control tokens which otherwise are not exposed and treated as plaintext. Does not insert a leading space.</param>
  406. /// <returns>Returns the number of tokens on success, no more than n_max_tokens.
  407. /// Returns a negative number on failure - the number of tokens that would have been returned
  408. /// </returns>
  409. [DllImport(libraryName, CallingConvention = CallingConvention.Cdecl)]
  410. public static extern int llama_tokenize(SafeLlamaModelHandle model, byte* text, int text_len, int* tokens, int n_max_tokens, bool add_bos, bool special);
  411. /// <summary>
  412. /// Register a callback to receive llama log messages
  413. /// </summary>
  414. /// <param name="logCallback"></param>
  415. [DllImport(libraryName, CallingConvention = CallingConvention.Cdecl)]
  416. public static extern void llama_log_set(LLamaLogCallback logCallback);
  417. /// <summary>
  418. /// Remove all tokens data of cells in [c0, c1)
  419. /// </summary>
  420. /// <param name="ctx"></param>
  421. /// <param name="c0"></param>
  422. /// <param name="c1"></param>
  423. [DllImport(libraryName, CallingConvention = CallingConvention.Cdecl)]
  424. public static extern void llama_kv_cache_tokens_rm(SafeLLamaContextHandle ctx, int c0, int c1);
  425. /// <summary>
  426. /// Removes all tokens that belong to the specified sequence and have positions in [p0, p1)
  427. /// </summary>
  428. /// <param name="ctx"></param>
  429. /// <param name="seq"></param>
  430. /// <param name="p0"></param>
  431. /// <param name="p1"></param>
  432. [DllImport(libraryName, CallingConvention = CallingConvention.Cdecl)]
  433. public static extern void llama_kv_cache_seq_rm(SafeLLamaContextHandle ctx, LLamaSeqId seq, LLamaPos p0, LLamaPos p1);
  434. /// <summary>
  435. /// Copy all tokens that belong to the specified sequence to another sequence
  436. /// Note that this does not allocate extra KV cache memory - it simply assigns the tokens to the new sequence
  437. /// </summary>
  438. /// <param name="ctx"></param>
  439. /// <param name="src"></param>
  440. /// <param name="dest"></param>
  441. /// <param name="p0"></param>
  442. /// <param name="p1"></param>
  443. [DllImport(libraryName, CallingConvention = CallingConvention.Cdecl)]
  444. public static extern void llama_kv_cache_seq_cp(SafeLLamaContextHandle ctx, LLamaSeqId src, LLamaSeqId dest, LLamaPos p0, LLamaPos p1);
  445. /// <summary>
  446. /// Removes all tokens that do not belong to the specified sequence
  447. /// </summary>
  448. /// <param name="ctx"></param>
  449. /// <param name="seq"></param>
  450. [DllImport(libraryName, CallingConvention = CallingConvention.Cdecl)]
  451. public static extern void llama_kv_cache_seq_keep(SafeLLamaContextHandle ctx, LLamaSeqId seq);
  452. /// <summary>
  453. /// Adds relative position "delta" to all tokens that belong to the specified sequence and have positions in [p0, p1)
  454. /// If the KV cache is RoPEd, the KV data is updated accordingly
  455. /// </summary>
  456. /// <param name="ctx"></param>
  457. /// <param name="seq"></param>
  458. /// <param name="p0"></param>
  459. /// <param name="p1"></param>
  460. /// <param name="delta"></param>
  461. [DllImport(libraryName, CallingConvention = CallingConvention.Cdecl)]
  462. public static extern void llama_kv_cache_seq_shift(SafeLLamaContextHandle ctx, LLamaSeqId seq, LLamaPos p0, LLamaPos p1, LLamaPos delta);
  463. /// <summary>
  464. /// Allocates a batch of tokens on the heap
  465. /// The batch has to be freed with llama_batch_free()
  466. /// If embd != 0, llama_batch.embd will be allocated with size of n_tokens * embd * sizeof(float)
  467. /// Otherwise, llama_batch.token will be allocated to store n_tokens llama_token
  468. /// The rest of the llama_batch members are allocated with size n_tokens
  469. /// All members are left uninitialized
  470. /// </summary>
  471. /// <param name="n_tokens"></param>
  472. /// <param name="embd"></param>
  473. [DllImport(libraryName, CallingConvention = CallingConvention.Cdecl)]
  474. public static extern LLamaNativeBatch llama_batch_init(int n_tokens, int embd);
  475. /// <summary>
  476. /// Frees a batch of tokens allocated with llama_batch_init()
  477. /// </summary>
  478. /// <param name="batch"></param>
  479. [DllImport(libraryName, CallingConvention = CallingConvention.Cdecl)]
  480. public static extern void llama_batch_free(LLamaNativeBatch batch);
  481. /// <summary>
  482. /// </summary>
  483. /// <param name="ctx"></param>
  484. /// <param name="batch"></param>
  485. /// <returns>Positive return values does not mean a fatal error, but rather a warning:<br />
  486. /// - 0: success<br />
  487. /// - 1: could not find a KV slot for the batch (try reducing the size of the batch or increase the context)<br />
  488. /// - &lt; 0: error<br />
  489. /// </returns>
  490. [DllImport(libraryName, CallingConvention = CallingConvention.Cdecl)]
  491. public static extern int llama_decode(SafeLLamaContextHandle ctx, LLamaNativeBatch batch);
  492. /// <summary>
  493. /// Set the number of threads used for decoding
  494. /// </summary>
  495. /// <param name="ctx"></param>
  496. /// <param name="n_threads">n_threads is the number of threads used for generation (single token)</param>
  497. /// <param name="n_threads_batch">n_threads_batch is the number of threads used for prompt and batch processing (multiple tokens)</param>
  498. /// <returns></returns>
  499. [DllImport(libraryName, CallingConvention = CallingConvention.Cdecl)]
  500. public static extern int llama_set_n_threads(SafeLLamaContextHandle ctx, uint n_threads, uint n_threads_batch);
  501. }
  502. }