You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

NativeApi.cs 24 kB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541
  1. using System;
  2. using System.Buffers;
  3. using System.Runtime.InteropServices;
  4. using System.Text;
  5. using LLama.Common;
  6. using LLama.Exceptions;
  7. #pragma warning disable IDE1006 // Naming Styles
  8. namespace LLama.Native
  9. {
  10. using llama_token = Int32;
  11. /// <summary>
  12. /// Callback from llama.cpp with log messages
  13. /// </summary>
  14. /// <param name="level"></param>
  15. /// <param name="message"></param>
  16. public delegate void LLamaLogCallback(LLamaLogLevel level, string message);
  17. /// <summary>
  18. /// Direct translation of the llama.cpp API
  19. /// </summary>
  20. public unsafe partial class NativeApi
  21. {
  22. static NativeApi()
  23. {
  24. // Try to load a preferred library, based on CPU feature detection
  25. TryLoadLibrary();
  26. try
  27. {
  28. llama_empty_call();
  29. }
  30. catch (DllNotFoundException)
  31. {
  32. throw new RuntimeError("The native library cannot be found. It could be one of the following reasons: \n" +
  33. "1. No LLamaSharp backend was installed. Please search LLamaSharp.Backend and install one of them. \n" +
  34. "2. You are using a device with only CPU but installed cuda backend. Please install cpu backend instead. \n" +
  35. "3. The backend is not compatible with your system cuda environment. Please check and fix it. If the environment is " +
  36. "expected not to be changed, then consider build llama.cpp from source or submit an issue to LLamaSharp.\n" +
  37. "4. One of the dependency of the native library is missed.\n");
  38. }
  39. llama_backend_init(false);
  40. }
  41. /// <summary>
  42. /// Try to load libllama, using CPU feature detection to try and load a more specialised DLL if possible
  43. /// </summary>
  44. /// <returns>The library handle to unload later, or IntPtr.Zero if no library was loaded</returns>
  45. private static IntPtr TryLoadLibrary()
  46. {
  47. #if NET6_0_OR_GREATER
  48. if (RuntimeInformation.IsOSPlatform(OSPlatform.Windows))
  49. {
  50. // All of the Windows libraries, in order of preference
  51. return TryLoad("cu12.1.0/libllama.dll")
  52. ?? TryLoad("cu11.7.1/libllama.dll")
  53. #if NET8_0_OR_GREATER
  54. ?? TryLoad("avx512/libllama.dll", System.Runtime.Intrinsics.X86.Avx512.IsSupported)
  55. #endif
  56. ?? TryLoad("avx2/libllama.dll", System.Runtime.Intrinsics.X86.Avx2.IsSupported)
  57. ?? TryLoad("avx/libllama.dll", System.Runtime.Intrinsics.X86.Avx.IsSupported)
  58. ?? IntPtr.Zero;
  59. }
  60. if (RuntimeInformation.IsOSPlatform(OSPlatform.Linux))
  61. {
  62. // All of the Linux libraries, in order of preference
  63. return TryLoad("cu12.1.0/libllama.so")
  64. ?? TryLoad("cu11.7.1/libllama.so")
  65. #if NET8_0_OR_GREATER
  66. ?? TryLoad("avx512/libllama.so", System.Runtime.Intrinsics.X86.Avx512.IsSupported)
  67. #endif
  68. ?? TryLoad("avx2/libllama.so", System.Runtime.Intrinsics.X86.Avx2.IsSupported)
  69. ?? TryLoad("avx/libllama.so", System.Runtime.Intrinsics.X86.Avx.IsSupported)
  70. ?? IntPtr.Zero;
  71. }
  72. if (RuntimeInformation.IsOSPlatform(OSPlatform.OSX))
  73. {
  74. return IntPtr.Zero;
  75. }
  76. #endif
  77. return IntPtr.Zero;
  78. #if NET6_0_OR_GREATER
  79. // Try to load a DLL from the path if supported. Returns null if nothing is loaded.
  80. static IntPtr? TryLoad(string path, bool supported = true)
  81. {
  82. if (!supported)
  83. return null;
  84. if (NativeLibrary.TryLoad(path, out var handle))
  85. return handle;
  86. return null;
  87. }
  88. #endif
  89. }
  90. private const string libraryName = "libllama";
  91. /// <summary>
  92. /// A method that does nothing. This is a native method, calling it will force the llama native dependencies to be loaded.
  93. /// </summary>
  94. /// <returns></returns>
  95. [DllImport(libraryName, EntryPoint = "llama_mmap_supported", CallingConvention = CallingConvention.Cdecl)]
  96. public static extern bool llama_empty_call();
  97. /// <summary>
  98. /// Create a LLamaContextParams with default values
  99. /// </summary>
  100. /// <returns></returns>
  101. [DllImport(libraryName, CallingConvention = CallingConvention.Cdecl)]
  102. public static extern LLamaContextParams llama_context_default_params();
  103. /// <summary>
  104. /// Create a LLamaModelQuantizeParams with default values
  105. /// </summary>
  106. /// <returns></returns>
  107. [DllImport(libraryName, CallingConvention = CallingConvention.Cdecl)]
  108. public static extern LLamaModelQuantizeParams llama_model_quantize_default_params();
  109. /// <summary>
  110. /// Check if memory mapping is supported
  111. /// </summary>
  112. /// <returns></returns>
  113. [DllImport(libraryName, CallingConvention = CallingConvention.Cdecl)]
  114. public static extern bool llama_mmap_supported();
  115. /// <summary>
  116. /// Check if memory lockingis supported
  117. /// </summary>
  118. /// <returns></returns>
  119. [DllImport(libraryName, CallingConvention = CallingConvention.Cdecl)]
  120. public static extern bool llama_mlock_supported();
  121. /// <summary>
  122. /// Export a static computation graph for context of 511 and batch size of 1
  123. /// NOTE: since this functionality is mostly for debugging and demonstration purposes, we hardcode these
  124. /// parameters here to keep things simple
  125. /// IMPORTANT: do not use for anything else other than debugging and testing!
  126. /// </summary>
  127. /// <param name="ctx"></param>
  128. /// <param name="fname"></param>
  129. /// <returns></returns>
  130. [DllImport(libraryName, CallingConvention = CallingConvention.Cdecl)]
  131. public static extern int llama_eval_export(SafeLLamaContextHandle ctx, string fname);
  132. /// <summary>
  133. /// Various functions for loading a ggml llama model.
  134. /// Allocate (almost) all memory needed for the model.
  135. /// Return NULL on failure
  136. /// </summary>
  137. /// <param name="path_model"></param>
  138. /// <param name="params"></param>
  139. /// <returns></returns>
  140. [DllImport(libraryName, CallingConvention = CallingConvention.Cdecl)]
  141. public static extern IntPtr llama_load_model_from_file(string path_model, LLamaContextParams @params);
  142. /// <summary>
  143. /// Create a new llama_context with the given model.
  144. /// Return value should always be wrapped in SafeLLamaContextHandle!
  145. /// </summary>
  146. /// <param name="model"></param>
  147. /// <param name="params"></param>
  148. /// <returns></returns>
  149. [DllImport(libraryName, CallingConvention = CallingConvention.Cdecl)]
  150. public static extern IntPtr llama_new_context_with_model(SafeLlamaModelHandle model, LLamaContextParams @params);
  151. /// <summary>
  152. /// not great API - very likely to change.
  153. /// Initialize the llama + ggml backend
  154. /// Call once at the start of the program
  155. /// </summary>
  156. [DllImport(libraryName, CallingConvention = CallingConvention.Cdecl)]
  157. public static extern void llama_backend_init(bool numa);
  158. /// <summary>
  159. /// Frees all allocated memory in the given llama_context
  160. /// </summary>
  161. /// <param name="ctx"></param>
  162. [DllImport(libraryName, CallingConvention = CallingConvention.Cdecl)]
  163. public static extern void llama_free(IntPtr ctx);
  164. /// <summary>
  165. /// Frees all allocated memory associated with a model
  166. /// </summary>
  167. /// <param name="model"></param>
  168. [DllImport(libraryName, CallingConvention = CallingConvention.Cdecl)]
  169. public static extern void llama_free_model(IntPtr model);
  170. /// <summary>
  171. /// Apply a LoRA adapter to a loaded model
  172. /// path_base_model is the path to a higher quality model to use as a base for
  173. /// the layers modified by the adapter. Can be NULL to use the current loaded model.
  174. /// The model needs to be reloaded before applying a new adapter, otherwise the adapter
  175. /// will be applied on top of the previous one
  176. /// </summary>
  177. /// <param name="model_ptr"></param>
  178. /// <param name="path_lora"></param>
  179. /// <param name="path_base_model"></param>
  180. /// <param name="n_threads"></param>
  181. /// <returns>Returns 0 on success</returns>
  182. [DllImport(libraryName, CallingConvention = CallingConvention.Cdecl)]
  183. public static extern int llama_model_apply_lora_from_file(SafeLlamaModelHandle model_ptr, string path_lora, string? path_base_model, int n_threads);
  184. /// <summary>
  185. /// Returns the number of tokens in the KV cache
  186. /// </summary>
  187. /// <param name="ctx"></param>
  188. /// <returns></returns>
  189. [DllImport(libraryName, CallingConvention = CallingConvention.Cdecl)]
  190. public static extern int llama_get_kv_cache_token_count(SafeLLamaContextHandle ctx);
  191. /// <summary>
  192. /// Sets the current rng seed.
  193. /// </summary>
  194. /// <param name="ctx"></param>
  195. /// <param name="seed"></param>
  196. [DllImport(libraryName, CallingConvention = CallingConvention.Cdecl)]
  197. public static extern void llama_set_rng_seed(SafeLLamaContextHandle ctx, int seed);
  198. /// <summary>
  199. /// Returns the maximum size in bytes of the state (rng, logits, embedding
  200. /// and kv_cache) - will often be smaller after compacting tokens
  201. /// </summary>
  202. /// <param name="ctx"></param>
  203. /// <returns></returns>
  204. [DllImport(libraryName, CallingConvention = CallingConvention.Cdecl)]
  205. public static extern ulong llama_get_state_size(SafeLLamaContextHandle ctx);
  206. /// <summary>
  207. /// Copies the state to the specified destination address.
  208. /// Destination needs to have allocated enough memory.
  209. /// </summary>
  210. /// <param name="ctx"></param>
  211. /// <param name="dest"></param>
  212. /// <returns>the number of bytes copied</returns>
  213. [DllImport(libraryName, CallingConvention = CallingConvention.Cdecl)]
  214. public static extern ulong llama_copy_state_data(SafeLLamaContextHandle ctx, byte* dest);
  215. /// <summary>
  216. /// Copies the state to the specified destination address.
  217. /// Destination needs to have allocated enough memory (see llama_get_state_size)
  218. /// </summary>
  219. /// <param name="ctx"></param>
  220. /// <param name="dest"></param>
  221. /// <returns>the number of bytes copied</returns>
  222. public static ulong llama_copy_state_data(SafeLLamaContextHandle ctx, byte[] dest)
  223. {
  224. fixed (byte* dstPtr = &dest[0])
  225. {
  226. return llama_copy_state_data(ctx, dstPtr);
  227. }
  228. }
  229. /// <summary>
  230. /// Set the state reading from the specified address
  231. /// </summary>
  232. /// <param name="ctx"></param>
  233. /// <param name="src"></param>
  234. /// <returns>the number of bytes read</returns>
  235. [DllImport(libraryName, CallingConvention = CallingConvention.Cdecl)]
  236. public static extern ulong llama_set_state_data(SafeLLamaContextHandle ctx, byte* src);
  237. /// <summary>
  238. /// Set the state reading from the specified address
  239. /// </summary>
  240. /// <param name="ctx"></param>
  241. /// <param name="src"></param>
  242. /// <returns>the number of bytes read</returns>
  243. public static ulong llama_set_state_data(SafeLLamaContextHandle ctx, byte[] src)
  244. {
  245. fixed (byte* srcPtr = &src[0])
  246. {
  247. return llama_set_state_data(ctx, srcPtr);
  248. }
  249. }
  250. /// <summary>
  251. /// Load session file
  252. /// </summary>
  253. /// <param name="ctx"></param>
  254. /// <param name="path_session"></param>
  255. /// <param name="tokens_out"></param>
  256. /// <param name="n_token_capacity"></param>
  257. /// <param name="n_token_count_out"></param>
  258. /// <returns></returns>
  259. [DllImport(libraryName, CallingConvention = CallingConvention.Cdecl)]
  260. public static extern bool llama_load_session_file(SafeLLamaContextHandle ctx, string path_session, llama_token[] tokens_out, ulong n_token_capacity, ulong* n_token_count_out);
  261. /// <summary>
  262. /// Save session file
  263. /// </summary>
  264. /// <param name="ctx"></param>
  265. /// <param name="path_session"></param>
  266. /// <param name="tokens"></param>
  267. /// <param name="n_token_count"></param>
  268. /// <returns></returns>
  269. [DllImport(libraryName, CallingConvention = CallingConvention.Cdecl)]
  270. public static extern bool llama_save_session_file(SafeLLamaContextHandle ctx, string path_session, llama_token[] tokens, ulong n_token_count);
  271. /// <summary>
  272. /// Run the llama inference to obtain the logits and probabilities for the next token.
  273. /// tokens + n_tokens is the provided batch of new tokens to process
  274. /// n_past is the number of tokens to use from previous eval calls
  275. /// </summary>
  276. /// <param name="ctx"></param>
  277. /// <param name="tokens"></param>
  278. /// <param name="n_tokens"></param>
  279. /// <param name="n_past"></param>
  280. /// <param name="n_threads"></param>
  281. /// <returns>Returns 0 on success</returns>
  282. [DllImport(libraryName, CallingConvention = CallingConvention.Cdecl)]
  283. public static extern int llama_eval(SafeLLamaContextHandle ctx, llama_token[] tokens, int n_tokens, int n_past, int n_threads);
  284. /// <summary>
  285. /// Run the llama inference to obtain the logits and probabilities for the next token.
  286. /// tokens + n_tokens is the provided batch of new tokens to process
  287. /// n_past is the number of tokens to use from previous eval calls
  288. /// </summary>
  289. /// <param name="ctx"></param>
  290. /// <param name="tokens"></param>
  291. /// <param name="n_tokens"></param>
  292. /// <param name="n_past"></param>
  293. /// <param name="n_threads"></param>
  294. /// <returns>Returns 0 on success</returns>
  295. [DllImport(libraryName, EntryPoint = "llama_eval", CallingConvention = CallingConvention.Cdecl)]
  296. public static extern int llama_eval_with_pointer(SafeLLamaContextHandle ctx, llama_token* tokens, int n_tokens, int n_past, int n_threads);
  297. /// <summary>
  298. /// Convert the provided text into tokens.
  299. /// </summary>
  300. /// <param name="ctx"></param>
  301. /// <param name="text"></param>
  302. /// <param name="encoding"></param>
  303. /// <param name="tokens"></param>
  304. /// <param name="n_max_tokens"></param>
  305. /// <param name="add_bos"></param>
  306. /// <returns>Returns the number of tokens on success, no more than n_max_tokens.
  307. /// Returns a negative number on failure - the number of tokens that would have been returned
  308. /// </returns>
  309. public static int llama_tokenize(SafeLLamaContextHandle ctx, string text, Encoding encoding, llama_token[] tokens, int n_max_tokens, bool add_bos)
  310. {
  311. // Calculate number of bytes in text and borrow an array that large (+1 for nul byte)
  312. var byteCount = encoding.GetByteCount(text);
  313. var array = ArrayPool<byte>.Shared.Rent(byteCount + 1);
  314. try
  315. {
  316. // Convert to bytes
  317. fixed (char* textPtr = text)
  318. fixed (byte* arrayPtr = array)
  319. {
  320. encoding.GetBytes(textPtr, text.Length, arrayPtr, array.Length);
  321. }
  322. // Add a zero byte to the end to terminate the string
  323. array[byteCount] = 0;
  324. // Do the actual tokenization
  325. fixed (byte* arrayPtr = array)
  326. fixed (llama_token* tokensPtr = tokens)
  327. return llama_tokenize_native(ctx, arrayPtr, tokensPtr, n_max_tokens, add_bos);
  328. }
  329. finally
  330. {
  331. ArrayPool<byte>.Shared.Return(array);
  332. }
  333. }
  334. /// <summary>
  335. /// Convert the provided text into tokens.
  336. /// </summary>
  337. /// <param name="ctx"></param>
  338. /// <param name="text"></param>
  339. /// <param name="tokens"></param>
  340. /// <param name="n_max_tokens"></param>
  341. /// <param name="add_bos"></param>
  342. /// <returns>Returns the number of tokens on success, no more than n_max_tokens.
  343. /// Returns a negative number on failure - the number of tokens that would have been returned
  344. /// </returns>
  345. [DllImport(libraryName, EntryPoint = "llama_tokenize", CallingConvention = CallingConvention.Cdecl)]
  346. public static extern int llama_tokenize_native(SafeLLamaContextHandle ctx, byte* text, llama_token* tokens, int n_max_tokens, bool add_bos);
  347. /// <summary>
  348. /// Get the number of tokens in the model vocabulary for this context
  349. /// </summary>
  350. /// <param name="ctx"></param>
  351. /// <returns></returns>
  352. [DllImport(libraryName, CallingConvention = CallingConvention.Cdecl)]
  353. public static extern int llama_n_vocab(SafeLLamaContextHandle ctx);
  354. /// <summary>
  355. /// Get the size of the context window for the model for this context
  356. /// </summary>
  357. /// <param name="ctx"></param>
  358. /// <returns></returns>
  359. [DllImport(libraryName, CallingConvention = CallingConvention.Cdecl)]
  360. public static extern int llama_n_ctx(SafeLLamaContextHandle ctx);
  361. /// <summary>
  362. /// Get the dimension of embedding vectors from the model for this context
  363. /// </summary>
  364. /// <param name="ctx"></param>
  365. /// <returns></returns>
  366. [DllImport(libraryName, CallingConvention = CallingConvention.Cdecl)]
  367. public static extern int llama_n_embd(SafeLLamaContextHandle ctx);
  368. /// <summary>
  369. /// Token logits obtained from the last call to llama_eval()
  370. /// The logits for the last token are stored in the last row
  371. /// Can be mutated in order to change the probabilities of the next token.<br />
  372. /// Rows: n_tokens<br />
  373. /// Cols: n_vocab
  374. /// </summary>
  375. /// <param name="ctx"></param>
  376. /// <returns></returns>
  377. [DllImport(libraryName, CallingConvention = CallingConvention.Cdecl)]
  378. public static extern float* llama_get_logits(SafeLLamaContextHandle ctx);
  379. /// <summary>
  380. /// Get the embeddings for the input
  381. /// shape: [n_embd] (1-dimensional)
  382. /// </summary>
  383. /// <param name="ctx"></param>
  384. /// <returns></returns>
  385. [DllImport(libraryName, CallingConvention = CallingConvention.Cdecl)]
  386. public static extern float* llama_get_embeddings(SafeLLamaContextHandle ctx);
  387. /// <summary>
  388. /// Token Id -> String. Uses the vocabulary in the provided context
  389. /// </summary>
  390. /// <param name="ctx"></param>
  391. /// <param name="token"></param>
  392. /// <returns>Pointer to a string.</returns>
  393. [DllImport(libraryName, CallingConvention = CallingConvention.Cdecl)]
  394. public static extern IntPtr llama_token_to_str(SafeLLamaContextHandle ctx, llama_token token);
  395. /// <summary>
  396. /// Get the "Beginning of sentence" token
  397. /// </summary>
  398. /// <returns></returns>
  399. [DllImport(libraryName, CallingConvention = CallingConvention.Cdecl)]
  400. public static extern llama_token llama_token_bos(SafeLLamaContextHandle ctx);
  401. /// <summary>
  402. /// Get the "End of sentence" token
  403. /// </summary>
  404. /// <returns></returns>
  405. [DllImport(libraryName, CallingConvention = CallingConvention.Cdecl)]
  406. public static extern llama_token llama_token_eos(SafeLLamaContextHandle ctx);
  407. /// <summary>
  408. /// Get the "new line" token
  409. /// </summary>
  410. /// <returns></returns>
  411. [DllImport(libraryName, CallingConvention = CallingConvention.Cdecl)]
  412. public static extern llama_token llama_token_nl(SafeLLamaContextHandle ctx);
  413. /// <summary>
  414. /// Print out timing information for this context
  415. /// </summary>
  416. /// <param name="ctx"></param>
  417. [DllImport(libraryName, CallingConvention = CallingConvention.Cdecl)]
  418. public static extern void llama_print_timings(SafeLLamaContextHandle ctx);
  419. /// <summary>
  420. /// Reset all collected timing information for this context
  421. /// </summary>
  422. /// <param name="ctx"></param>
  423. [DllImport(libraryName, CallingConvention = CallingConvention.Cdecl)]
  424. public static extern void llama_reset_timings(SafeLLamaContextHandle ctx);
  425. /// <summary>
  426. /// Print system information
  427. /// </summary>
  428. /// <returns></returns>
  429. [DllImport(libraryName, CallingConvention = CallingConvention.Cdecl)]
  430. public static extern IntPtr llama_print_system_info();
  431. /// <summary>
  432. /// Get the number of tokens in the model vocabulary
  433. /// </summary>
  434. /// <param name="model"></param>
  435. /// <returns></returns>
  436. [DllImport(libraryName, CallingConvention = CallingConvention.Cdecl)]
  437. public static extern int llama_model_n_vocab(SafeLlamaModelHandle model);
  438. /// <summary>
  439. /// Get the size of the context window for the model
  440. /// </summary>
  441. /// <param name="model"></param>
  442. /// <returns></returns>
  443. [DllImport(libraryName, CallingConvention = CallingConvention.Cdecl)]
  444. public static extern int llama_model_n_ctx(SafeLlamaModelHandle model);
  445. /// <summary>
  446. /// Get the dimension of embedding vectors from this model
  447. /// </summary>
  448. /// <param name="model"></param>
  449. /// <returns></returns>
  450. [DllImport(libraryName, CallingConvention = CallingConvention.Cdecl)]
  451. public static extern int llama_model_n_embd(SafeLlamaModelHandle model);
  452. /// <summary>
  453. /// Convert a single token into text
  454. /// </summary>
  455. /// <param name="model"></param>
  456. /// <param name="llamaToken"></param>
  457. /// <param name="buffer">buffer to write string into</param>
  458. /// <param name="length">size of the buffer</param>
  459. /// <returns>The length writte, or if the buffer is too small a negative that indicates the length required</returns>
  460. [DllImport(libraryName, CallingConvention = CallingConvention.Cdecl)]
  461. public static extern int llama_token_to_piece_with_model(SafeLlamaModelHandle model, int llamaToken, byte* buffer, int length);
  462. /// <summary>
  463. /// Convert text into tokens
  464. /// </summary>
  465. /// <param name="model"></param>
  466. /// <param name="text"></param>
  467. /// <param name="tokens"></param>
  468. /// <param name="n_max_tokens"></param>
  469. /// <param name="add_bos"></param>
  470. /// <returns>Returns the number of tokens on success, no more than n_max_tokens.
  471. /// Returns a negative number on failure - the number of tokens that would have been returned
  472. /// </returns>
  473. [DllImport(libraryName, CallingConvention = CallingConvention.Cdecl)]
  474. public static extern int llama_tokenize_with_model(SafeLlamaModelHandle model, byte* text, int* tokens, int n_max_tokens, bool add_bos);
  475. /// <summary>
  476. /// Register a callback to receive llama log messages
  477. /// </summary>
  478. /// <param name="logCallback"></param>
  479. [DllImport(libraryName, CallingConvention = CallingConvention.Cdecl)]
  480. public static extern void llama_log_set(LLamaLogCallback logCallback);
  481. }
  482. }