You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

NativeApi.cs 35 kB

2 years ago
2 years ago
2 years ago
123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805
  1. using System;
  2. using System.Buffers;
  3. using System.Collections.Generic;
  4. using System.IO;
  5. using System.Runtime.InteropServices;
  6. using System.Text;
  7. using System.Text.Json;
  8. using LLama.Exceptions;
  9. #pragma warning disable IDE1006 // Naming Styles
  10. namespace LLama.Native
  11. {
  12. using llama_token = Int32;
  13. /// <summary>
  14. /// Callback from llama.cpp with log messages
  15. /// </summary>
  16. /// <param name="level"></param>
  17. /// <param name="message"></param>
  18. public delegate void LLamaLogCallback(LLamaLogLevel level, string message);
  19. /// <summary>
  20. /// Direct translation of the llama.cpp API
  21. /// </summary>
  22. public unsafe partial class NativeApi
  23. {
  24. static NativeApi()
  25. {
  26. // Try to load a preferred library, based on CPU feature detection
  27. TryLoadLibrary();
  28. try
  29. {
  30. llama_empty_call();
  31. }
  32. catch (DllNotFoundException)
  33. {
  34. throw new RuntimeError("The native library cannot be found. It could be one of the following reasons: \n" +
  35. "1. No LLamaSharp backend was installed. Please search LLamaSharp.Backend and install one of them. \n" +
  36. "2. You are using a device with only CPU but installed cuda backend. Please install cpu backend instead. \n" +
  37. "3. The backend is not compatible with your system cuda environment. Please check and fix it. If the environment is " +
  38. "expected not to be changed, then consider build llama.cpp from source or submit an issue to LLamaSharp.\n" +
  39. "4. One of the dependency of the native library is missed.\n");
  40. }
  41. llama_backend_init(false);
  42. }
  43. private static int GetCudaMajorVersion()
  44. {
  45. string? cudaPath;
  46. string version = "";
  47. if (RuntimeInformation.IsOSPlatform(OSPlatform.Windows))
  48. {
  49. cudaPath = Environment.GetEnvironmentVariable("CUDA_PATH");
  50. if(cudaPath is null)
  51. {
  52. return -1;
  53. }
  54. version = GetCudaVersionFromPath(cudaPath);
  55. }
  56. else if (RuntimeInformation.IsOSPlatform(OSPlatform.Linux))
  57. {
  58. // Try the default first
  59. cudaPath = "/usr/local/bin/cuda";
  60. version = GetCudaVersionFromPath(cudaPath);
  61. if (string.IsNullOrEmpty(version))
  62. {
  63. cudaPath = Environment.GetEnvironmentVariable("LD_LIBRARY_PATH");
  64. if(cudaPath is null)
  65. {
  66. return -1;
  67. }
  68. foreach(var path in cudaPath.Split(':'))
  69. {
  70. version = GetCudaVersionFromPath(Path.Combine(path, ".."));
  71. if (string.IsNullOrEmpty(version))
  72. {
  73. break;
  74. }
  75. }
  76. }
  77. }
  78. if (string.IsNullOrEmpty(version))
  79. {
  80. return -1;
  81. }
  82. else
  83. {
  84. version = version.Split('.')[0];
  85. bool success = int.TryParse(version, out var majorVersion);
  86. if (success)
  87. {
  88. return majorVersion;
  89. }
  90. else
  91. {
  92. return -1;
  93. }
  94. }
  95. }
  96. private static string GetCudaVersionFromPath(string cudaPath)
  97. {
  98. try
  99. {
  100. string json = File.ReadAllText(Path.Combine(cudaPath, cudaVersionFile));
  101. using (JsonDocument document = JsonDocument.Parse(json))
  102. {
  103. JsonElement root = document.RootElement;
  104. JsonElement cublasNode = root.GetProperty("libcublas");
  105. JsonElement versionNode = cublasNode.GetProperty("version");
  106. if (versionNode.ValueKind == JsonValueKind.Undefined)
  107. {
  108. return string.Empty;
  109. }
  110. return versionNode.GetString();
  111. }
  112. }
  113. catch (Exception)
  114. {
  115. return string.Empty;
  116. }
  117. }
  118. #if NET6_0_OR_GREATER
  119. private static string GetAvxLibraryPath(NativeLibraryConfig.AvxLevel avxLevel, string prefix, string suffix)
  120. {
  121. var avxStr = NativeLibraryConfig.AvxLevelToString(avxLevel);
  122. if (!string.IsNullOrEmpty(avxStr))
  123. {
  124. avxStr += "/";
  125. }
  126. return $"{prefix}{avxStr}{libraryName}{suffix}";
  127. }
  128. private static List<string> GetLibraryTryOrder(NativeLibraryConfig.Description configuration)
  129. {
  130. OSPlatform platform;
  131. string prefix, suffix;
  132. if (RuntimeInformation.IsOSPlatform(OSPlatform.Windows))
  133. {
  134. platform = OSPlatform.Windows;
  135. prefix = "runtimes/win-x64/native/";
  136. suffix = ".dll";
  137. }
  138. else if (RuntimeInformation.IsOSPlatform(OSPlatform.Linux))
  139. {
  140. platform = OSPlatform.Linux;
  141. prefix = "runtimes/linux-x64/native/";
  142. suffix = ".so";
  143. }
  144. else if (RuntimeInformation.IsOSPlatform(OSPlatform.OSX))
  145. {
  146. platform = OSPlatform.OSX;
  147. suffix = ".dylib";
  148. if (System.Runtime.Intrinsics.Arm.ArmBase.Arm64.IsSupported)
  149. {
  150. prefix = "runtimes/osx-arm64/native/";
  151. }
  152. else
  153. {
  154. prefix = "runtimes/osx-x64/native/";
  155. }
  156. }
  157. else
  158. {
  159. throw new RuntimeError($"Your system plarform is not supported, please open an issue in LLamaSharp.");
  160. }
  161. List<string> result = new();
  162. if (configuration.UseCuda && (platform == OSPlatform.Windows || platform == OSPlatform.Linux)) // no cuda on macos
  163. {
  164. int cudaVersion = GetCudaMajorVersion();
  165. // TODO: load cuda library with avx
  166. if (cudaVersion == -1 && !configuration.AllowFallback)
  167. {
  168. // if check skipped, we just try to load cuda libraries one by one.
  169. if (configuration.SkipCheck)
  170. {
  171. result.Add($"{prefix}cuda12/{libraryName}{suffix}");
  172. result.Add($"{prefix}cuda11/{libraryName}{suffix}");
  173. }
  174. else
  175. {
  176. throw new RuntimeError("Configured to load a cuda library but no cuda detected on your device.");
  177. }
  178. }
  179. else if (cudaVersion == 11)
  180. {
  181. result.Add($"{prefix}cuda11/{libraryName}{suffix}");
  182. }
  183. else if (cudaVersion == 12)
  184. {
  185. result.Add($"{prefix}cuda12/{libraryName}{suffix}");
  186. }
  187. else if (cudaVersion > 0)
  188. {
  189. throw new RuntimeError($"Cuda version {cudaVersion} hasn't been supported by LLamaSharp, please open an issue for it.");
  190. }
  191. // otherwise no cuda detected but allow fallback
  192. }
  193. // use cpu (or mac possibly with metal)
  194. if (!configuration.AllowFallback && platform != OSPlatform.OSX)
  195. {
  196. result.Add(GetAvxLibraryPath(configuration.AvxLevel, prefix, suffix));
  197. }
  198. else if(platform != OSPlatform.OSX) // in macos there's absolutely no avx
  199. {
  200. #if NET8_0_OR_GREATER
  201. if (configuration.AvxLevel == NativeLibraryConfig.AvxLevel.Avx512)
  202. {
  203. result.Add(GetAvxLibraryPath(NativeLibraryConfig.AvxLevel.Avx512, prefix, suffix)));
  204. result.Add(GetAvxLibraryPath(NativeLibraryConfig.AvxLevel.Avx2, prefix, suffix)));
  205. result.Add(GetAvxLibraryPath(NativeLibraryConfig.AvxLevel.Avx, prefix, suffix)));
  206. }
  207. else
  208. #endif
  209. if (configuration.AvxLevel == NativeLibraryConfig.AvxLevel.Avx2)
  210. {
  211. result.Add(GetAvxLibraryPath(NativeLibraryConfig.AvxLevel.Avx2, prefix, suffix));
  212. result.Add(GetAvxLibraryPath(NativeLibraryConfig.AvxLevel.Avx, prefix, suffix));
  213. }
  214. else if (configuration.AvxLevel == NativeLibraryConfig.AvxLevel.Avx)
  215. {
  216. result.Add(GetAvxLibraryPath(NativeLibraryConfig.AvxLevel.Avx, prefix, suffix));
  217. }
  218. result.Add(GetAvxLibraryPath(NativeLibraryConfig.AvxLevel.None, prefix, suffix));
  219. }
  220. if(platform == OSPlatform.OSX)
  221. {
  222. result.Add($"{prefix}{libraryName}{suffix}");
  223. }
  224. return result;
  225. }
  226. #endif
  227. /// <summary>
  228. /// Try to load libllama, using CPU feature detection to try and load a more specialised DLL if possible
  229. /// </summary>
  230. /// <returns>The library handle to unload later, or IntPtr.Zero if no library was loaded</returns>
  231. private static IntPtr TryLoadLibrary()
  232. {
  233. #if NET6_0_OR_GREATER
  234. var configuration = NativeLibraryConfig.GetInstance().Desc;
  235. if (!string.IsNullOrEmpty(configuration.Path))
  236. {
  237. // When loading the user specified library, there's no fallback.
  238. var result = TryLoad(configuration.Path, true);
  239. if (result is null || result == IntPtr.Zero)
  240. {
  241. throw new RuntimeError($"Failed to load the native library [{configuration.Path}] you specified.");
  242. }
  243. return result ?? IntPtr.Zero;
  244. }
  245. var libraryTryLoadOrder = GetLibraryTryOrder(configuration);
  246. string[] possiblePathPrefix = new string[] {
  247. System.AppDomain.CurrentDomain.BaseDirectory,
  248. Path.GetDirectoryName(System.Reflection.Assembly.GetExecutingAssembly().Location) ?? ""
  249. };
  250. var tryFindPath = (string filename) =>
  251. {
  252. int i = 0;
  253. while (!File.Exists(filename))
  254. {
  255. if (i < possiblePathPrefix.Length)
  256. {
  257. filename = Path.Combine(possiblePathPrefix[i], filename);
  258. i++;
  259. }
  260. else
  261. {
  262. break;
  263. }
  264. }
  265. return filename;
  266. };
  267. foreach (var libraryPath in libraryTryLoadOrder)
  268. {
  269. var fullPath = tryFindPath(libraryPath);
  270. var result = TryLoad(fullPath, true);
  271. if(result is not null && result != IntPtr.Zero)
  272. {
  273. Console.ForegroundColor = ConsoleColor.Red;
  274. Console.WriteLine($"[Native Library] {fullPath} is loaded.");
  275. Console.ResetColor();
  276. return result ?? IntPtr.Zero;
  277. }
  278. else
  279. {
  280. Console.WriteLine($"Tried to load {fullPath}");
  281. }
  282. }
  283. if (!configuration.AllowFallback)
  284. {
  285. throw new RuntimeError("Failed to load the library that match your rule, please" +
  286. " 1) check your rule." +
  287. " 2) try to allow fallback." +
  288. " 3) or open an issue if it's expected to be successful.");
  289. }
  290. #endif
  291. return IntPtr.Zero;
  292. #if NET6_0_OR_GREATER
  293. // Try to load a DLL from the path if supported. Returns null if nothing is loaded.
  294. static IntPtr? TryLoad(string path, bool supported = true)
  295. {
  296. if (!supported)
  297. return null;
  298. if (NativeLibrary.TryLoad(path, out var handle))
  299. return handle;
  300. return null;
  301. }
  302. #endif
  303. }
  304. private const string libraryName = "libllama";
  305. private const string cudaVersionFile = "version.json";
  306. /// <summary>
  307. /// A method that does nothing. This is a native method, calling it will force the llama native dependencies to be loaded.
  308. /// </summary>
  309. /// <returns></returns>
  310. [DllImport(libraryName, EntryPoint = "llama_mmap_supported", CallingConvention = CallingConvention.Cdecl)]
  311. public static extern bool llama_empty_call();
  312. /// <summary>
  313. /// Get the maximum number of devices supported by llama.cpp
  314. /// </summary>
  315. /// <returns></returns>
  316. [DllImport(libraryName, CallingConvention = CallingConvention.Cdecl)]
  317. public static extern int llama_max_devices();
  318. /// <summary>
  319. /// Create a LLamaModelParams with default values
  320. /// </summary>
  321. /// <returns></returns>
  322. [DllImport(libraryName, CallingConvention = CallingConvention.Cdecl)]
  323. public static extern LLamaModelParams llama_model_default_params();
  324. /// <summary>
  325. /// Create a LLamaContextParams with default values
  326. /// </summary>
  327. /// <returns></returns>
  328. [DllImport(libraryName, CallingConvention = CallingConvention.Cdecl)]
  329. public static extern LLamaContextParams llama_context_default_params();
  330. /// <summary>
  331. /// Create a LLamaModelQuantizeParams with default values
  332. /// </summary>
  333. /// <returns></returns>
  334. [DllImport(libraryName, CallingConvention = CallingConvention.Cdecl)]
  335. public static extern LLamaModelQuantizeParams llama_model_quantize_default_params();
  336. /// <summary>
  337. /// Check if memory mapping is supported
  338. /// </summary>
  339. /// <returns></returns>
  340. [DllImport(libraryName, CallingConvention = CallingConvention.Cdecl)]
  341. public static extern bool llama_mmap_supported();
  342. /// <summary>
  343. /// Check if memory lockingis supported
  344. /// </summary>
  345. /// <returns></returns>
  346. [DllImport(libraryName, CallingConvention = CallingConvention.Cdecl)]
  347. public static extern bool llama_mlock_supported();
  348. /// <summary>
  349. /// Various functions for loading a ggml llama model.
  350. /// Allocate (almost) all memory needed for the model.
  351. /// Return NULL on failure
  352. /// </summary>
  353. /// <param name="path_model"></param>
  354. /// <param name="params"></param>
  355. /// <returns></returns>
  356. [DllImport(libraryName, CallingConvention = CallingConvention.Cdecl)]
  357. public static extern IntPtr llama_load_model_from_file(string path_model, LLamaModelParams @params);
  358. /// <summary>
  359. /// Create a new llama_context with the given model.
  360. /// Return value should always be wrapped in SafeLLamaContextHandle!
  361. /// </summary>
  362. /// <param name="model"></param>
  363. /// <param name="params"></param>
  364. /// <returns></returns>
  365. [DllImport(libraryName, CallingConvention = CallingConvention.Cdecl)]
  366. public static extern IntPtr llama_new_context_with_model(SafeLlamaModelHandle model, LLamaContextParams @params);
  367. /// <summary>
  368. /// not great API - very likely to change.
  369. /// Initialize the llama + ggml backend
  370. /// Call once at the start of the program
  371. /// </summary>
  372. [DllImport(libraryName, CallingConvention = CallingConvention.Cdecl)]
  373. public static extern void llama_backend_init(bool numa);
  374. /// <summary>
  375. /// Frees all allocated memory in the given llama_context
  376. /// </summary>
  377. /// <param name="ctx"></param>
  378. [DllImport(libraryName, CallingConvention = CallingConvention.Cdecl)]
  379. public static extern void llama_free(IntPtr ctx);
  380. /// <summary>
  381. /// Frees all allocated memory associated with a model
  382. /// </summary>
  383. /// <param name="model"></param>
  384. [DllImport(libraryName, CallingConvention = CallingConvention.Cdecl)]
  385. public static extern void llama_free_model(IntPtr model);
  386. /// <summary>
  387. /// Apply a LoRA adapter to a loaded model
  388. /// path_base_model is the path to a higher quality model to use as a base for
  389. /// the layers modified by the adapter. Can be NULL to use the current loaded model.
  390. /// The model needs to be reloaded before applying a new adapter, otherwise the adapter
  391. /// will be applied on top of the previous one
  392. /// </summary>
  393. /// <param name="model_ptr"></param>
  394. /// <param name="path_lora"></param>
  395. /// <param name="scale"></param>
  396. /// <param name="path_base_model"></param>
  397. /// <param name="n_threads"></param>
  398. /// <returns>Returns 0 on success</returns>
  399. [DllImport(libraryName, CallingConvention = CallingConvention.Cdecl)]
  400. public static extern int llama_model_apply_lora_from_file(SafeLlamaModelHandle model_ptr, string path_lora, float scale, string? path_base_model, int n_threads);
  401. /// <summary>
  402. /// Sets the current rng seed.
  403. /// </summary>
  404. /// <param name="ctx"></param>
  405. /// <param name="seed"></param>
  406. [DllImport(libraryName, CallingConvention = CallingConvention.Cdecl)]
  407. public static extern void llama_set_rng_seed(SafeLLamaContextHandle ctx, uint seed);
  408. /// <summary>
  409. /// Returns the maximum size in bytes of the state (rng, logits, embedding
  410. /// and kv_cache) - will often be smaller after compacting tokens
  411. /// </summary>
  412. /// <param name="ctx"></param>
  413. /// <returns></returns>
  414. [DllImport(libraryName, CallingConvention = CallingConvention.Cdecl)]
  415. public static extern ulong llama_get_state_size(SafeLLamaContextHandle ctx);
  416. /// <summary>
  417. /// Copies the state to the specified destination address.
  418. /// Destination needs to have allocated enough memory.
  419. /// </summary>
  420. /// <param name="ctx"></param>
  421. /// <param name="dest"></param>
  422. /// <returns>the number of bytes copied</returns>
  423. [DllImport(libraryName, CallingConvention = CallingConvention.Cdecl)]
  424. public static extern ulong llama_copy_state_data(SafeLLamaContextHandle ctx, byte* dest);
  425. /// <summary>
  426. /// Set the state reading from the specified address
  427. /// </summary>
  428. /// <param name="ctx"></param>
  429. /// <param name="src"></param>
  430. /// <returns>the number of bytes read</returns>
  431. [DllImport(libraryName, CallingConvention = CallingConvention.Cdecl)]
  432. public static extern ulong llama_set_state_data(SafeLLamaContextHandle ctx, byte* src);
  433. /// <summary>
  434. /// Load session file
  435. /// </summary>
  436. /// <param name="ctx"></param>
  437. /// <param name="path_session"></param>
  438. /// <param name="tokens_out"></param>
  439. /// <param name="n_token_capacity"></param>
  440. /// <param name="n_token_count_out"></param>
  441. /// <returns></returns>
  442. [DllImport(libraryName, CallingConvention = CallingConvention.Cdecl)]
  443. public static extern bool llama_load_session_file(SafeLLamaContextHandle ctx, string path_session, llama_token[] tokens_out, ulong n_token_capacity, ulong* n_token_count_out);
  444. /// <summary>
  445. /// Save session file
  446. /// </summary>
  447. /// <param name="ctx"></param>
  448. /// <param name="path_session"></param>
  449. /// <param name="tokens"></param>
  450. /// <param name="n_token_count"></param>
  451. /// <returns></returns>
  452. [DllImport(libraryName, CallingConvention = CallingConvention.Cdecl)]
  453. public static extern bool llama_save_session_file(SafeLLamaContextHandle ctx, string path_session, llama_token[] tokens, ulong n_token_count);
  454. /// <summary>
  455. /// Run the llama inference to obtain the logits and probabilities for the next token.
  456. /// tokens + n_tokens is the provided batch of new tokens to process
  457. /// n_past is the number of tokens to use from previous eval calls
  458. /// </summary>
  459. /// <param name="ctx"></param>
  460. /// <param name="tokens"></param>
  461. /// <param name="n_tokens"></param>
  462. /// <param name="n_past"></param>
  463. /// <returns>Returns 0 on success</returns>
  464. [DllImport(libraryName, CallingConvention = CallingConvention.Cdecl)]
  465. [Obsolete("use llama_decode() instead")]
  466. public static extern int llama_eval(SafeLLamaContextHandle ctx, llama_token* tokens, int n_tokens, int n_past);
  467. /// <summary>
  468. /// Convert the provided text into tokens.
  469. /// </summary>
  470. /// <param name="ctx"></param>
  471. /// <param name="text"></param>
  472. /// <param name="encoding"></param>
  473. /// <param name="tokens"></param>
  474. /// <param name="n_max_tokens"></param>
  475. /// <param name="add_bos"></param>
  476. /// <param name="special">Allow tokenizing special and/or control tokens which otherwise are not exposed and treated as plaintext. Does not insert a leading space.</param>
  477. /// <returns>Returns the number of tokens on success, no more than n_max_tokens.
  478. /// Returns a negative number on failure - the number of tokens that would have been returned
  479. /// </returns>
  480. public static int llama_tokenize(SafeLLamaContextHandle ctx, string text, Encoding encoding, llama_token[] tokens, int n_max_tokens, bool add_bos, bool special)
  481. {
  482. // Calculate number of bytes in text and borrow an array that large (+1 for nul byte)
  483. var byteCount = encoding.GetByteCount(text);
  484. var array = ArrayPool<byte>.Shared.Rent(byteCount + 1);
  485. try
  486. {
  487. // Convert to bytes
  488. fixed (char* textPtr = text)
  489. fixed (byte* arrayPtr = array)
  490. {
  491. encoding.GetBytes(textPtr, text.Length, arrayPtr, array.Length);
  492. }
  493. // Add a zero byte to the end to terminate the string
  494. array[byteCount] = 0;
  495. // Do the actual tokenization
  496. fixed (byte* arrayPtr = array)
  497. fixed (llama_token* tokensPtr = tokens)
  498. return llama_tokenize(ctx.ModelHandle, arrayPtr, byteCount, tokensPtr, n_max_tokens, add_bos, special);
  499. }
  500. finally
  501. {
  502. ArrayPool<byte>.Shared.Return(array);
  503. }
  504. }
  505. /// <summary>
  506. /// Get the size of the context window for the model for this context
  507. /// </summary>
  508. /// <param name="ctx"></param>
  509. /// <returns></returns>
  510. [DllImport(libraryName, CallingConvention = CallingConvention.Cdecl)]
  511. public static extern int llama_n_ctx(SafeLLamaContextHandle ctx);
  512. /// <summary>
  513. /// Token logits obtained from the last call to llama_eval()
  514. /// The logits for the last token are stored in the last row
  515. /// Can be mutated in order to change the probabilities of the next token.<br />
  516. /// Rows: n_tokens<br />
  517. /// Cols: n_vocab
  518. /// </summary>
  519. /// <param name="ctx"></param>
  520. /// <returns></returns>
  521. [DllImport(libraryName, CallingConvention = CallingConvention.Cdecl)]
  522. public static extern float* llama_get_logits(SafeLLamaContextHandle ctx);
  523. /// <summary>
  524. /// Logits for the ith token. Equivalent to: llama_get_logits(ctx) + i*n_vocab
  525. /// </summary>
  526. /// <param name="ctx"></param>
  527. /// <param name="i"></param>
  528. /// <returns></returns>
  529. [DllImport(libraryName, CallingConvention = CallingConvention.Cdecl)]
  530. public static extern float* llama_get_logits_ith(SafeLLamaContextHandle ctx, int i);
  531. /// <summary>
  532. /// Get the embeddings for the input
  533. /// shape: [n_embd] (1-dimensional)
  534. /// </summary>
  535. /// <param name="ctx"></param>
  536. /// <returns></returns>
  537. [DllImport(libraryName, CallingConvention = CallingConvention.Cdecl)]
  538. public static extern float* llama_get_embeddings(SafeLLamaContextHandle ctx);
  539. /// <summary>
  540. /// Get the "Beginning of sentence" token
  541. /// </summary>
  542. /// <returns></returns>
  543. [DllImport(libraryName, CallingConvention = CallingConvention.Cdecl)]
  544. public static extern llama_token llama_token_bos(SafeLlamaModelHandle model);
  545. /// <summary>
  546. /// Get the "End of sentence" token
  547. /// </summary>
  548. /// <returns></returns>
  549. [DllImport(libraryName, CallingConvention = CallingConvention.Cdecl)]
  550. public static extern llama_token llama_token_eos(SafeLlamaModelHandle model);
  551. /// <summary>
  552. /// Get the "new line" token
  553. /// </summary>
  554. /// <returns></returns>
  555. [DllImport(libraryName, CallingConvention = CallingConvention.Cdecl)]
  556. public static extern llama_token llama_token_nl(SafeLlamaModelHandle model);
  557. /// <summary>
  558. /// Print out timing information for this context
  559. /// </summary>
  560. /// <param name="ctx"></param>
  561. [DllImport(libraryName, CallingConvention = CallingConvention.Cdecl)]
  562. public static extern void llama_print_timings(SafeLLamaContextHandle ctx);
  563. /// <summary>
  564. /// Reset all collected timing information for this context
  565. /// </summary>
  566. /// <param name="ctx"></param>
  567. [DllImport(libraryName, CallingConvention = CallingConvention.Cdecl)]
  568. public static extern void llama_reset_timings(SafeLLamaContextHandle ctx);
  569. /// <summary>
  570. /// Print system information
  571. /// </summary>
  572. /// <returns></returns>
  573. [DllImport(libraryName, CallingConvention = CallingConvention.Cdecl)]
  574. public static extern IntPtr llama_print_system_info();
  575. /// <summary>
  576. /// Get the number of tokens in the model vocabulary
  577. /// </summary>
  578. /// <param name="model"></param>
  579. /// <returns></returns>
  580. [DllImport(libraryName, CallingConvention = CallingConvention.Cdecl)]
  581. public static extern int llama_n_vocab(SafeLlamaModelHandle model);
  582. /// <summary>
  583. /// Get the size of the context window for the model
  584. /// </summary>
  585. /// <param name="model"></param>
  586. /// <returns></returns>
  587. [DllImport(libraryName, CallingConvention = CallingConvention.Cdecl)]
  588. public static extern int llama_n_ctx_train(SafeLlamaModelHandle model);
  589. /// <summary>
  590. /// Get the dimension of embedding vectors from this model
  591. /// </summary>
  592. /// <param name="model"></param>
  593. /// <returns></returns>
  594. [DllImport(libraryName, CallingConvention = CallingConvention.Cdecl)]
  595. public static extern int llama_n_embd(SafeLlamaModelHandle model);
  596. /// <summary>
  597. /// Get the size of the model in bytes
  598. /// </summary>
  599. /// <param name="model"></param>
  600. /// <returns></returns>
  601. [DllImport(libraryName, CallingConvention = CallingConvention.Cdecl)]
  602. public static extern ulong llama_model_size(SafeLlamaModelHandle model);
  603. /// <summary>
  604. /// Get the number of parameters in this model
  605. /// </summary>
  606. /// <param name="model"></param>
  607. /// <returns></returns>
  608. [DllImport(libraryName, CallingConvention = CallingConvention.Cdecl)]
  609. public static extern ulong llama_model_n_params(SafeLlamaModelHandle model);
  610. /// <summary>
  611. /// Convert a single token into text
  612. /// </summary>
  613. /// <param name="model"></param>
  614. /// <param name="llamaToken"></param>
  615. /// <param name="buffer">buffer to write string into</param>
  616. /// <param name="length">size of the buffer</param>
  617. /// <returns>The length writte, or if the buffer is too small a negative that indicates the length required</returns>
  618. [DllImport(libraryName, CallingConvention = CallingConvention.Cdecl)]
  619. public static extern int llama_token_to_piece(SafeLlamaModelHandle model, int llamaToken, byte* buffer, int length);
  620. /// <summary>
  621. /// Convert text into tokens
  622. /// </summary>
  623. /// <param name="model"></param>
  624. /// <param name="text"></param>
  625. /// <param name="text_len"></param>
  626. /// <param name="tokens"></param>
  627. /// <param name="n_max_tokens"></param>
  628. /// <param name="add_bos"></param>
  629. /// <param name="special">Allow tokenizing special and/or control tokens which otherwise are not exposed and treated as plaintext. Does not insert a leading space.</param>
  630. /// <returns>Returns the number of tokens on success, no more than n_max_tokens.
  631. /// Returns a negative number on failure - the number of tokens that would have been returned
  632. /// </returns>
  633. [DllImport(libraryName, CallingConvention = CallingConvention.Cdecl)]
  634. public static extern int llama_tokenize(SafeLlamaModelHandle model, byte* text, int text_len, int* tokens, int n_max_tokens, bool add_bos, bool special);
  635. /// <summary>
  636. /// Register a callback to receive llama log messages
  637. /// </summary>
  638. /// <param name="logCallback"></param>
  639. [DllImport(libraryName, CallingConvention = CallingConvention.Cdecl)]
  640. public static extern void llama_log_set(LLamaLogCallback logCallback);
  641. /// <summary>
  642. /// Remove all tokens data of cells in [c0, c1)
  643. /// </summary>
  644. /// <param name="ctx"></param>
  645. /// <param name="c0"></param>
  646. /// <param name="c1"></param>
  647. [DllImport(libraryName, CallingConvention = CallingConvention.Cdecl)]
  648. public static extern void llama_kv_cache_tokens_rm(SafeLLamaContextHandle ctx, int c0, int c1);
  649. /// <summary>
  650. /// Removes all tokens that belong to the specified sequence and have positions in [p0, p1)
  651. /// </summary>
  652. /// <param name="ctx"></param>
  653. /// <param name="seq"></param>
  654. /// <param name="p0"></param>
  655. /// <param name="p1"></param>
  656. [DllImport(libraryName, CallingConvention = CallingConvention.Cdecl)]
  657. public static extern void llama_kv_cache_seq_rm(SafeLLamaContextHandle ctx, LLamaSeqId seq, LLamaPos p0, LLamaPos p1);
  658. /// <summary>
  659. /// Copy all tokens that belong to the specified sequence to another sequence
  660. /// Note that this does not allocate extra KV cache memory - it simply assigns the tokens to the new sequence
  661. /// </summary>
  662. /// <param name="ctx"></param>
  663. /// <param name="src"></param>
  664. /// <param name="dest"></param>
  665. /// <param name="p0"></param>
  666. /// <param name="p1"></param>
  667. [DllImport(libraryName, CallingConvention = CallingConvention.Cdecl)]
  668. public static extern void llama_kv_cache_seq_cp(SafeLLamaContextHandle ctx, LLamaSeqId src, LLamaSeqId dest, LLamaPos p0, LLamaPos p1);
  669. /// <summary>
  670. /// Removes all tokens that do not belong to the specified sequence
  671. /// </summary>
  672. /// <param name="ctx"></param>
  673. /// <param name="seq"></param>
  674. [DllImport(libraryName, CallingConvention = CallingConvention.Cdecl)]
  675. public static extern void llama_kv_cache_seq_keep(SafeLLamaContextHandle ctx, LLamaSeqId seq);
  676. /// <summary>
  677. /// Adds relative position "delta" to all tokens that belong to the specified sequence and have positions in [p0, p1)
  678. /// If the KV cache is RoPEd, the KV data is updated accordingly
  679. /// </summary>
  680. /// <param name="ctx"></param>
  681. /// <param name="seq"></param>
  682. /// <param name="p0"></param>
  683. /// <param name="p1"></param>
  684. /// <param name="delta"></param>
  685. [DllImport(libraryName, CallingConvention = CallingConvention.Cdecl)]
  686. public static extern void llama_kv_cache_seq_shift(SafeLLamaContextHandle ctx, LLamaSeqId seq, LLamaPos p0, LLamaPos p1, LLamaPos delta);
  687. /// <summary>
  688. /// Allocates a batch of tokens on the heap
  689. /// Each token can be assigned up to n_seq_max sequence ids
  690. /// The batch has to be freed with llama_batch_free()
  691. /// If embd != 0, llama_batch.embd will be allocated with size of n_tokens * embd * sizeof(float)
  692. /// Otherwise, llama_batch.token will be allocated to store n_tokens llama_token
  693. /// The rest of the llama_batch members are allocated with size n_tokens
  694. /// All members are left uninitialized
  695. /// </summary>
  696. /// <param name="n_tokens"></param>
  697. /// <param name="embd"></param>
  698. /// <param name="n_seq_max">Each token can be assigned up to n_seq_max sequence ids</param>
  699. [DllImport(libraryName, CallingConvention = CallingConvention.Cdecl)]
  700. public static extern LLamaNativeBatch llama_batch_init(int n_tokens, int embd, int n_seq_max);
  701. /// <summary>
  702. /// Frees a batch of tokens allocated with llama_batch_init()
  703. /// </summary>
  704. /// <param name="batch"></param>
  705. [DllImport(libraryName, CallingConvention = CallingConvention.Cdecl)]
  706. public static extern void llama_batch_free(LLamaNativeBatch batch);
  707. /// <summary>
  708. /// </summary>
  709. /// <param name="ctx"></param>
  710. /// <param name="batch"></param>
  711. /// <returns>Positive return values does not mean a fatal error, but rather a warning:<br />
  712. /// - 0: success<br />
  713. /// - 1: could not find a KV slot for the batch (try reducing the size of the batch or increase the context)<br />
  714. /// - &lt; 0: error<br />
  715. /// </returns>
  716. [DllImport(libraryName, CallingConvention = CallingConvention.Cdecl)]
  717. public static extern int llama_decode(SafeLLamaContextHandle ctx, LLamaNativeBatch batch);
  718. /// <summary>
  719. /// Set the number of threads used for decoding
  720. /// </summary>
  721. /// <param name="ctx"></param>
  722. /// <param name="n_threads">n_threads is the number of threads used for generation (single token)</param>
  723. /// <param name="n_threads_batch">n_threads_batch is the number of threads used for prompt and batch processing (multiple tokens)</param>
  724. /// <returns></returns>
  725. [DllImport(libraryName, CallingConvention = CallingConvention.Cdecl)]
  726. public static extern int llama_set_n_threads(SafeLLamaContextHandle ctx, uint n_threads, uint n_threads_batch);
  727. }
  728. }