You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

NativeApi.cs 29 kB

2 years ago
2 years ago
2 years ago
123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649
  1. using System;
  2. using System.Buffers;
  3. using System.Reflection;
  4. using System.Runtime.InteropServices;
  5. using System.Text;
  6. using LLama.Exceptions;
  7. using ManagedCuda;
  8. #if NET6_0_OR_GREATER
  9. using System.Runtime.Intrinsics.X86;
  10. #endif
  11. #pragma warning disable IDE1006 // Naming Styles
  12. namespace LLama.Native
  13. {
  14. using llama_token = Int32;
  15. /// <summary>
  16. /// Callback from llama.cpp with log messages
  17. /// </summary>
  18. /// <param name="level"></param>
  19. /// <param name="message"></param>
  20. public delegate void LLamaLogCallback(LLamaLogLevel level, string message);
  21. /// <summary>
  22. /// Direct translation of the llama.cpp API
  23. /// </summary>
  24. public unsafe partial class NativeApi
  25. {
  26. static NativeApi()
  27. {
  28. #if NET6_0_OR_GREATER
  29. NativeLibrary.SetDllImportResolver(typeof(NativeApi).Assembly, LLamaImportResolver);
  30. #endif
  31. try
  32. {
  33. llama_empty_call();
  34. }
  35. catch (DllNotFoundException)
  36. {
  37. throw new RuntimeError("The native library cannot be found. It could be one of the following reasons: \n" +
  38. "1. No LLamaSharp backend was installed. Please search LLamaSharp.Backend and install one of them. \n" +
  39. "2. You are using a device with only CPU but installed cuda backend. Please install cpu backend instead. \n" +
  40. "3. The backend is not compatible with your system cuda environment. Please check and fix it. If the environment is " +
  41. "expected not to be changed, then consider build llama.cpp from source or submit an issue to LLamaSharp.\n" +
  42. "4. One of the dependency of the native library is missed.\n");
  43. }
  44. llama_backend_init(false);
  45. }
  46. /// <summary>
  47. /// Get the cuda version if possible.
  48. /// </summary>
  49. /// <returns> -1 for no cuda</returns>
  50. private static int GetCudaVersion()
  51. {
  52. int deviceCount = CudaContext.GetDeviceCount();
  53. for (int deviceIndex = 0; deviceIndex < deviceCount; deviceIndex++)
  54. {
  55. using (CudaContext ctx = new CudaContext(deviceIndex))
  56. {
  57. var version = ctx.GetAPIVersionOfCurrentContext();
  58. return version.Major;
  59. }
  60. }
  61. return -1;
  62. }
  63. /// <summary>
  64. /// Get the xla flag for native library name.
  65. /// </summary>
  66. /// <returns></returns>
  67. private static string GetAvxFlag()
  68. {
  69. AvxLevel level = AvxLevel.None;
  70. #if NET6_0_OR_GREATER
  71. if (Avx.IsSupported) level = AvxLevel.Avx;
  72. if (Avx2.IsSupported) level = AvxLevel.Avx2;
  73. #if NET8_0_OR_GREATER
  74. if(Avx512F.IsSupported) level = AvxLevel.Avx512;
  75. #endif
  76. return level switch
  77. {
  78. AvxLevel.None => "",
  79. AvxLevel.Avx => "-avx",
  80. AvxLevel.Avx2 => "-avx2",
  81. AvxLevel.Avx512 => "-avx512",
  82. };
  83. #else
  84. return string.Empty;
  85. #endif
  86. }
  87. #if NET6_0_OR_GREATER
  88. private static IntPtr LLamaImportResolver(string name, Assembly assembly, DllImportSearchPath? searchPath)
  89. {
  90. IntPtr handle = IntPtr.Zero;
  91. if(!name.Equals(libraryName))
  92. {
  93. return NativeLibrary.Load(name, assembly, searchPath);
  94. }
  95. string libraryPath = string.Empty;
  96. if (RuntimeInformation.IsOSPlatform(OSPlatform.Windows))
  97. {
  98. var avxFlag = GetAvxFlag();
  99. // check cuda
  100. var cudaVersion = GetCudaVersion();
  101. if(cudaVersion == 11)
  102. {
  103. libraryPath = $"runtimes/win-x64/libllama-cuda11{avxFlag}.dll";
  104. }
  105. else if (cudaVersion == 12)
  106. {
  107. libraryPath = $"runtimes/win-x64/libllama-cuda12{avxFlag}.dll";
  108. }
  109. else if(cudaVersion == -1) // cpu version
  110. {
  111. libraryPath = $"runtimes/win-x64/libllama{avxFlag}.dll";
  112. }
  113. else
  114. {
  115. throw new NotImplementedException($"Cuda version {cudaVersion} has not been supported, please compile dll yourself or open an issue in LLamaSharp.");
  116. }
  117. }
  118. else if (RuntimeInformation.IsOSPlatform(OSPlatform.Linux))
  119. {
  120. var avxFlag = GetAvxFlag();
  121. // check cuda
  122. var cudaVersion = GetCudaVersion();
  123. if (cudaVersion == 11)
  124. {
  125. libraryPath = $"runtimes/linux-x64/libllama-cuda11{avxFlag}.so";
  126. }
  127. else if (cudaVersion == 12)
  128. {
  129. libraryPath = $"runtimes/linux-x64/libllama-cuda12{avxFlag}.so";
  130. }
  131. else if (cudaVersion == -1) // cpu version
  132. {
  133. libraryPath = $"runtimes/linux-x64/libllama{avxFlag}.so";
  134. }
  135. else
  136. {
  137. throw new NotImplementedException($"Cuda version {cudaVersion} has not been supported, please compile dll yourself or open an issue in LLamaSharp.");
  138. }
  139. }
  140. else if (RuntimeInformation.IsOSPlatform(OSPlatform.OSX))
  141. {
  142. if (System.Runtime.Intrinsics.Arm.ArmBase.Arm64.IsSupported)
  143. {
  144. libraryPath = $"runtimes/osx-arm64/libllama.dylib";
  145. }
  146. else
  147. {
  148. libraryPath = $"runtimes/osx-x86_64/libllama.dylib";
  149. }
  150. }
  151. NativeLibrary.TryLoad(libraryPath, assembly, searchPath, out handle);
  152. return handle;
  153. }
  154. #endif
  155. private const string libraryName = "libllama";
  156. /// <summary>
  157. /// A method that does nothing. This is a native method, calling it will force the llama native dependencies to be loaded.
  158. /// </summary>
  159. /// <returns></returns>
  160. [DllImport(libraryName, EntryPoint = "llama_mmap_supported", CallingConvention = CallingConvention.Cdecl)]
  161. public static extern bool llama_empty_call();
  162. /// <summary>
  163. /// Get the maximum number of devices supported by llama.cpp
  164. /// </summary>
  165. /// <returns></returns>
  166. [DllImport(libraryName, CallingConvention = CallingConvention.Cdecl)]
  167. public static extern int llama_max_devices();
  168. /// <summary>
  169. /// Create a LLamaModelParams with default values
  170. /// </summary>
  171. /// <returns></returns>
  172. [DllImport(libraryName, CallingConvention = CallingConvention.Cdecl)]
  173. public static extern LLamaModelParams llama_model_default_params();
  174. /// <summary>
  175. /// Create a LLamaContextParams with default values
  176. /// </summary>
  177. /// <returns></returns>
  178. [DllImport(libraryName, CallingConvention = CallingConvention.Cdecl)]
  179. public static extern LLamaContextParams llama_context_default_params();
  180. /// <summary>
  181. /// Create a LLamaModelQuantizeParams with default values
  182. /// </summary>
  183. /// <returns></returns>
  184. [DllImport(libraryName, CallingConvention = CallingConvention.Cdecl)]
  185. public static extern LLamaModelQuantizeParams llama_model_quantize_default_params();
  186. /// <summary>
  187. /// Check if memory mapping is supported
  188. /// </summary>
  189. /// <returns></returns>
  190. [DllImport(libraryName, CallingConvention = CallingConvention.Cdecl)]
  191. public static extern bool llama_mmap_supported();
  192. /// <summary>
  193. /// Check if memory lockingis supported
  194. /// </summary>
  195. /// <returns></returns>
  196. [DllImport(libraryName, CallingConvention = CallingConvention.Cdecl)]
  197. public static extern bool llama_mlock_supported();
  198. /// <summary>
  199. /// Various functions for loading a ggml llama model.
  200. /// Allocate (almost) all memory needed for the model.
  201. /// Return NULL on failure
  202. /// </summary>
  203. /// <param name="path_model"></param>
  204. /// <param name="params"></param>
  205. /// <returns></returns>
  206. [DllImport(libraryName, CallingConvention = CallingConvention.Cdecl)]
  207. public static extern IntPtr llama_load_model_from_file(string path_model, LLamaModelParams @params);
  208. /// <summary>
  209. /// Create a new llama_context with the given model.
  210. /// Return value should always be wrapped in SafeLLamaContextHandle!
  211. /// </summary>
  212. /// <param name="model"></param>
  213. /// <param name="params"></param>
  214. /// <returns></returns>
  215. [DllImport(libraryName, CallingConvention = CallingConvention.Cdecl)]
  216. public static extern IntPtr llama_new_context_with_model(SafeLlamaModelHandle model, LLamaContextParams @params);
  217. /// <summary>
  218. /// not great API - very likely to change.
  219. /// Initialize the llama + ggml backend
  220. /// Call once at the start of the program
  221. /// </summary>
  222. [DllImport(libraryName, CallingConvention = CallingConvention.Cdecl)]
  223. public static extern void llama_backend_init(bool numa);
  224. /// <summary>
  225. /// Frees all allocated memory in the given llama_context
  226. /// </summary>
  227. /// <param name="ctx"></param>
  228. [DllImport(libraryName, CallingConvention = CallingConvention.Cdecl)]
  229. public static extern void llama_free(IntPtr ctx);
  230. /// <summary>
  231. /// Frees all allocated memory associated with a model
  232. /// </summary>
  233. /// <param name="model"></param>
  234. [DllImport(libraryName, CallingConvention = CallingConvention.Cdecl)]
  235. public static extern void llama_free_model(IntPtr model);
  236. /// <summary>
  237. /// Apply a LoRA adapter to a loaded model
  238. /// path_base_model is the path to a higher quality model to use as a base for
  239. /// the layers modified by the adapter. Can be NULL to use the current loaded model.
  240. /// The model needs to be reloaded before applying a new adapter, otherwise the adapter
  241. /// will be applied on top of the previous one
  242. /// </summary>
  243. /// <param name="model_ptr"></param>
  244. /// <param name="path_lora"></param>
  245. /// <param name="scale"></param>
  246. /// <param name="path_base_model"></param>
  247. /// <param name="n_threads"></param>
  248. /// <returns>Returns 0 on success</returns>
  249. [DllImport(libraryName, CallingConvention = CallingConvention.Cdecl)]
  250. public static extern int llama_model_apply_lora_from_file(SafeLlamaModelHandle model_ptr, string path_lora, float scale, string? path_base_model, int n_threads);
  251. /// <summary>
  252. /// Sets the current rng seed.
  253. /// </summary>
  254. /// <param name="ctx"></param>
  255. /// <param name="seed"></param>
  256. [DllImport(libraryName, CallingConvention = CallingConvention.Cdecl)]
  257. public static extern void llama_set_rng_seed(SafeLLamaContextHandle ctx, uint seed);
  258. /// <summary>
  259. /// Returns the maximum size in bytes of the state (rng, logits, embedding
  260. /// and kv_cache) - will often be smaller after compacting tokens
  261. /// </summary>
  262. /// <param name="ctx"></param>
  263. /// <returns></returns>
  264. [DllImport(libraryName, CallingConvention = CallingConvention.Cdecl)]
  265. public static extern ulong llama_get_state_size(SafeLLamaContextHandle ctx);
  266. /// <summary>
  267. /// Copies the state to the specified destination address.
  268. /// Destination needs to have allocated enough memory.
  269. /// </summary>
  270. /// <param name="ctx"></param>
  271. /// <param name="dest"></param>
  272. /// <returns>the number of bytes copied</returns>
  273. [DllImport(libraryName, CallingConvention = CallingConvention.Cdecl)]
  274. public static extern ulong llama_copy_state_data(SafeLLamaContextHandle ctx, byte* dest);
  275. /// <summary>
  276. /// Set the state reading from the specified address
  277. /// </summary>
  278. /// <param name="ctx"></param>
  279. /// <param name="src"></param>
  280. /// <returns>the number of bytes read</returns>
  281. [DllImport(libraryName, CallingConvention = CallingConvention.Cdecl)]
  282. public static extern ulong llama_set_state_data(SafeLLamaContextHandle ctx, byte* src);
  283. /// <summary>
  284. /// Load session file
  285. /// </summary>
  286. /// <param name="ctx"></param>
  287. /// <param name="path_session"></param>
  288. /// <param name="tokens_out"></param>
  289. /// <param name="n_token_capacity"></param>
  290. /// <param name="n_token_count_out"></param>
  291. /// <returns></returns>
  292. [DllImport(libraryName, CallingConvention = CallingConvention.Cdecl)]
  293. public static extern bool llama_load_session_file(SafeLLamaContextHandle ctx, string path_session, llama_token[] tokens_out, ulong n_token_capacity, ulong* n_token_count_out);
  294. /// <summary>
  295. /// Save session file
  296. /// </summary>
  297. /// <param name="ctx"></param>
  298. /// <param name="path_session"></param>
  299. /// <param name="tokens"></param>
  300. /// <param name="n_token_count"></param>
  301. /// <returns></returns>
  302. [DllImport(libraryName, CallingConvention = CallingConvention.Cdecl)]
  303. public static extern bool llama_save_session_file(SafeLLamaContextHandle ctx, string path_session, llama_token[] tokens, ulong n_token_count);
  304. /// <summary>
  305. /// Run the llama inference to obtain the logits and probabilities for the next token.
  306. /// tokens + n_tokens is the provided batch of new tokens to process
  307. /// n_past is the number of tokens to use from previous eval calls
  308. /// </summary>
  309. /// <param name="ctx"></param>
  310. /// <param name="tokens"></param>
  311. /// <param name="n_tokens"></param>
  312. /// <param name="n_past"></param>
  313. /// <returns>Returns 0 on success</returns>
  314. [DllImport(libraryName, CallingConvention = CallingConvention.Cdecl)]
  315. [Obsolete("use llama_decode() instead")]
  316. public static extern int llama_eval(SafeLLamaContextHandle ctx, llama_token* tokens, int n_tokens, int n_past);
  317. /// <summary>
  318. /// Convert the provided text into tokens.
  319. /// </summary>
  320. /// <param name="ctx"></param>
  321. /// <param name="text"></param>
  322. /// <param name="encoding"></param>
  323. /// <param name="tokens"></param>
  324. /// <param name="n_max_tokens"></param>
  325. /// <param name="add_bos"></param>
  326. /// <param name="special">Allow tokenizing special and/or control tokens which otherwise are not exposed and treated as plaintext. Does not insert a leading space.</param>
  327. /// <returns>Returns the number of tokens on success, no more than n_max_tokens.
  328. /// Returns a negative number on failure - the number of tokens that would have been returned
  329. /// </returns>
  330. public static int llama_tokenize(SafeLLamaContextHandle ctx, string text, Encoding encoding, llama_token[] tokens, int n_max_tokens, bool add_bos, bool special)
  331. {
  332. // Calculate number of bytes in text and borrow an array that large (+1 for nul byte)
  333. var byteCount = encoding.GetByteCount(text);
  334. var array = ArrayPool<byte>.Shared.Rent(byteCount + 1);
  335. try
  336. {
  337. // Convert to bytes
  338. fixed (char* textPtr = text)
  339. fixed (byte* arrayPtr = array)
  340. {
  341. encoding.GetBytes(textPtr, text.Length, arrayPtr, array.Length);
  342. }
  343. // Add a zero byte to the end to terminate the string
  344. array[byteCount] = 0;
  345. // Do the actual tokenization
  346. fixed (byte* arrayPtr = array)
  347. fixed (llama_token* tokensPtr = tokens)
  348. return llama_tokenize(ctx.ModelHandle, arrayPtr, byteCount, tokensPtr, n_max_tokens, add_bos, special);
  349. }
  350. finally
  351. {
  352. ArrayPool<byte>.Shared.Return(array);
  353. }
  354. }
  355. /// <summary>
  356. /// Get the size of the context window for the model for this context
  357. /// </summary>
  358. /// <param name="ctx"></param>
  359. /// <returns></returns>
  360. [DllImport(libraryName, CallingConvention = CallingConvention.Cdecl)]
  361. public static extern int llama_n_ctx(SafeLLamaContextHandle ctx);
  362. /// <summary>
  363. /// Token logits obtained from the last call to llama_eval()
  364. /// The logits for the last token are stored in the last row
  365. /// Can be mutated in order to change the probabilities of the next token.<br />
  366. /// Rows: n_tokens<br />
  367. /// Cols: n_vocab
  368. /// </summary>
  369. /// <param name="ctx"></param>
  370. /// <returns></returns>
  371. [DllImport(libraryName, CallingConvention = CallingConvention.Cdecl)]
  372. public static extern float* llama_get_logits(SafeLLamaContextHandle ctx);
  373. /// <summary>
  374. /// Logits for the ith token. Equivalent to: llama_get_logits(ctx) + i*n_vocab
  375. /// </summary>
  376. /// <param name="ctx"></param>
  377. /// <param name="i"></param>
  378. /// <returns></returns>
  379. [DllImport(libraryName, CallingConvention = CallingConvention.Cdecl)]
  380. public static extern float* llama_get_logits_ith(SafeLLamaContextHandle ctx, int i);
  381. /// <summary>
  382. /// Get the embeddings for the input
  383. /// shape: [n_embd] (1-dimensional)
  384. /// </summary>
  385. /// <param name="ctx"></param>
  386. /// <returns></returns>
  387. [DllImport(libraryName, CallingConvention = CallingConvention.Cdecl)]
  388. public static extern float* llama_get_embeddings(SafeLLamaContextHandle ctx);
  389. /// <summary>
  390. /// Get the "Beginning of sentence" token
  391. /// </summary>
  392. /// <returns></returns>
  393. [DllImport(libraryName, CallingConvention = CallingConvention.Cdecl)]
  394. public static extern llama_token llama_token_bos(SafeLlamaModelHandle model);
  395. /// <summary>
  396. /// Get the "End of sentence" token
  397. /// </summary>
  398. /// <returns></returns>
  399. [DllImport(libraryName, CallingConvention = CallingConvention.Cdecl)]
  400. public static extern llama_token llama_token_eos(SafeLlamaModelHandle model);
  401. /// <summary>
  402. /// Get the "new line" token
  403. /// </summary>
  404. /// <returns></returns>
  405. [DllImport(libraryName, CallingConvention = CallingConvention.Cdecl)]
  406. public static extern llama_token llama_token_nl(SafeLlamaModelHandle model);
  407. /// <summary>
  408. /// Print out timing information for this context
  409. /// </summary>
  410. /// <param name="ctx"></param>
  411. [DllImport(libraryName, CallingConvention = CallingConvention.Cdecl)]
  412. public static extern void llama_print_timings(SafeLLamaContextHandle ctx);
  413. /// <summary>
  414. /// Reset all collected timing information for this context
  415. /// </summary>
  416. /// <param name="ctx"></param>
  417. [DllImport(libraryName, CallingConvention = CallingConvention.Cdecl)]
  418. public static extern void llama_reset_timings(SafeLLamaContextHandle ctx);
  419. /// <summary>
  420. /// Print system information
  421. /// </summary>
  422. /// <returns></returns>
  423. [DllImport(libraryName, CallingConvention = CallingConvention.Cdecl)]
  424. public static extern IntPtr llama_print_system_info();
  425. /// <summary>
  426. /// Get the number of tokens in the model vocabulary
  427. /// </summary>
  428. /// <param name="model"></param>
  429. /// <returns></returns>
  430. [DllImport(libraryName, CallingConvention = CallingConvention.Cdecl)]
  431. public static extern int llama_n_vocab(SafeLlamaModelHandle model);
  432. /// <summary>
  433. /// Get the size of the context window for the model
  434. /// </summary>
  435. /// <param name="model"></param>
  436. /// <returns></returns>
  437. [DllImport(libraryName, CallingConvention = CallingConvention.Cdecl)]
  438. public static extern int llama_n_ctx_train(SafeLlamaModelHandle model);
  439. /// <summary>
  440. /// Get the dimension of embedding vectors from this model
  441. /// </summary>
  442. /// <param name="model"></param>
  443. /// <returns></returns>
  444. [DllImport(libraryName, CallingConvention = CallingConvention.Cdecl)]
  445. public static extern int llama_n_embd(SafeLlamaModelHandle model);
  446. /// <summary>
  447. /// Get the size of the model in bytes
  448. /// </summary>
  449. /// <param name="model"></param>
  450. /// <returns></returns>
  451. [DllImport(libraryName, CallingConvention = CallingConvention.Cdecl)]
  452. public static extern ulong llama_model_size(SafeLlamaModelHandle model);
  453. /// <summary>
  454. /// Get the number of parameters in this model
  455. /// </summary>
  456. /// <param name="model"></param>
  457. /// <returns></returns>
  458. [DllImport(libraryName, CallingConvention = CallingConvention.Cdecl)]
  459. public static extern ulong llama_model_n_params(SafeLlamaModelHandle model);
  460. /// <summary>
  461. /// Convert a single token into text
  462. /// </summary>
  463. /// <param name="model"></param>
  464. /// <param name="llamaToken"></param>
  465. /// <param name="buffer">buffer to write string into</param>
  466. /// <param name="length">size of the buffer</param>
  467. /// <returns>The length writte, or if the buffer is too small a negative that indicates the length required</returns>
  468. [DllImport(libraryName, CallingConvention = CallingConvention.Cdecl)]
  469. public static extern int llama_token_to_piece(SafeLlamaModelHandle model, int llamaToken, byte* buffer, int length);
  470. /// <summary>
  471. /// Convert text into tokens
  472. /// </summary>
  473. /// <param name="model"></param>
  474. /// <param name="text"></param>
  475. /// <param name="text_len"></param>
  476. /// <param name="tokens"></param>
  477. /// <param name="n_max_tokens"></param>
  478. /// <param name="add_bos"></param>
  479. /// <param name="special">Allow tokenizing special and/or control tokens which otherwise are not exposed and treated as plaintext. Does not insert a leading space.</param>
  480. /// <returns>Returns the number of tokens on success, no more than n_max_tokens.
  481. /// Returns a negative number on failure - the number of tokens that would have been returned
  482. /// </returns>
  483. [DllImport(libraryName, CallingConvention = CallingConvention.Cdecl)]
  484. public static extern int llama_tokenize(SafeLlamaModelHandle model, byte* text, int text_len, int* tokens, int n_max_tokens, bool add_bos, bool special);
  485. /// <summary>
  486. /// Register a callback to receive llama log messages
  487. /// </summary>
  488. /// <param name="logCallback"></param>
  489. [DllImport(libraryName, CallingConvention = CallingConvention.Cdecl)]
  490. public static extern void llama_log_set(LLamaLogCallback logCallback);
  491. /// <summary>
  492. /// Remove all tokens data of cells in [c0, c1)
  493. /// </summary>
  494. /// <param name="ctx"></param>
  495. /// <param name="c0"></param>
  496. /// <param name="c1"></param>
  497. [DllImport(libraryName, CallingConvention = CallingConvention.Cdecl)]
  498. public static extern void llama_kv_cache_tokens_rm(SafeLLamaContextHandle ctx, int c0, int c1);
  499. /// <summary>
  500. /// Removes all tokens that belong to the specified sequence and have positions in [p0, p1)
  501. /// </summary>
  502. /// <param name="ctx"></param>
  503. /// <param name="seq"></param>
  504. /// <param name="p0"></param>
  505. /// <param name="p1"></param>
  506. [DllImport(libraryName, CallingConvention = CallingConvention.Cdecl)]
  507. public static extern void llama_kv_cache_seq_rm(SafeLLamaContextHandle ctx, LLamaSeqId seq, LLamaPos p0, LLamaPos p1);
  508. /// <summary>
  509. /// Copy all tokens that belong to the specified sequence to another sequence
  510. /// Note that this does not allocate extra KV cache memory - it simply assigns the tokens to the new sequence
  511. /// </summary>
  512. /// <param name="ctx"></param>
  513. /// <param name="src"></param>
  514. /// <param name="dest"></param>
  515. /// <param name="p0"></param>
  516. /// <param name="p1"></param>
  517. [DllImport(libraryName, CallingConvention = CallingConvention.Cdecl)]
  518. public static extern void llama_kv_cache_seq_cp(SafeLLamaContextHandle ctx, LLamaSeqId src, LLamaSeqId dest, LLamaPos p0, LLamaPos p1);
  519. /// <summary>
  520. /// Removes all tokens that do not belong to the specified sequence
  521. /// </summary>
  522. /// <param name="ctx"></param>
  523. /// <param name="seq"></param>
  524. [DllImport(libraryName, CallingConvention = CallingConvention.Cdecl)]
  525. public static extern void llama_kv_cache_seq_keep(SafeLLamaContextHandle ctx, LLamaSeqId seq);
  526. /// <summary>
  527. /// Adds relative position "delta" to all tokens that belong to the specified sequence and have positions in [p0, p1)
  528. /// If the KV cache is RoPEd, the KV data is updated accordingly
  529. /// </summary>
  530. /// <param name="ctx"></param>
  531. /// <param name="seq"></param>
  532. /// <param name="p0"></param>
  533. /// <param name="p1"></param>
  534. /// <param name="delta"></param>
  535. [DllImport(libraryName, CallingConvention = CallingConvention.Cdecl)]
  536. public static extern void llama_kv_cache_seq_shift(SafeLLamaContextHandle ctx, LLamaSeqId seq, LLamaPos p0, LLamaPos p1, LLamaPos delta);
  537. /// <summary>
  538. /// Allocates a batch of tokens on the heap
  539. /// Each token can be assigned up to n_seq_max sequence ids
  540. /// The batch has to be freed with llama_batch_free()
  541. /// If embd != 0, llama_batch.embd will be allocated with size of n_tokens * embd * sizeof(float)
  542. /// Otherwise, llama_batch.token will be allocated to store n_tokens llama_token
  543. /// The rest of the llama_batch members are allocated with size n_tokens
  544. /// All members are left uninitialized
  545. /// </summary>
  546. /// <param name="n_tokens"></param>
  547. /// <param name="embd"></param>
  548. /// <param name="n_seq_max">Each token can be assigned up to n_seq_max sequence ids</param>
  549. [DllImport(libraryName, CallingConvention = CallingConvention.Cdecl)]
  550. public static extern LLamaNativeBatch llama_batch_init(int n_tokens, int embd, int n_seq_max);
  551. /// <summary>
  552. /// Frees a batch of tokens allocated with llama_batch_init()
  553. /// </summary>
  554. /// <param name="batch"></param>
  555. [DllImport(libraryName, CallingConvention = CallingConvention.Cdecl)]
  556. public static extern void llama_batch_free(LLamaNativeBatch batch);
  557. /// <summary>
  558. /// </summary>
  559. /// <param name="ctx"></param>
  560. /// <param name="batch"></param>
  561. /// <returns>Positive return values does not mean a fatal error, but rather a warning:<br />
  562. /// - 0: success<br />
  563. /// - 1: could not find a KV slot for the batch (try reducing the size of the batch or increase the context)<br />
  564. /// - &lt; 0: error<br />
  565. /// </returns>
  566. [DllImport(libraryName, CallingConvention = CallingConvention.Cdecl)]
  567. public static extern int llama_decode(SafeLLamaContextHandle ctx, LLamaNativeBatch batch);
  568. /// <summary>
  569. /// Set the number of threads used for decoding
  570. /// </summary>
  571. /// <param name="ctx"></param>
  572. /// <param name="n_threads">n_threads is the number of threads used for generation (single token)</param>
  573. /// <param name="n_threads_batch">n_threads_batch is the number of threads used for prompt and batch processing (multiple tokens)</param>
  574. /// <returns></returns>
  575. [DllImport(libraryName, CallingConvention = CallingConvention.Cdecl)]
  576. public static extern int llama_set_n_threads(SafeLLamaContextHandle ctx, uint n_threads, uint n_threads_batch);
  577. private enum AvxLevel
  578. {
  579. None = 0,
  580. Avx = 1,
  581. Avx2 = 2,
  582. Avx512 = 3
  583. }
  584. }
  585. }