You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

NativeApi.cs 11 kB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262
  1. using System;
  2. using System.Collections.Generic;
  3. using System.IO;
  4. using System.Runtime.InteropServices;
  5. using System.Text;
  6. using LLama.Exceptions;
  7. namespace LLama.Native
  8. {
  9. using llama_token = Int32;
  10. public unsafe partial class NativeApi
  11. {
  12. static NativeApi()
  13. {
  14. try
  15. {
  16. llama_empty_call();
  17. }
  18. catch (DllNotFoundException)
  19. {
  20. throw new RuntimeError("The native library cannot be found. It could be one of the following reasons: \n" +
  21. "1. No LLamaSharp backend was installed. Please search LLamaSharp.Backend and install one of them. \n" +
  22. "2. You are using a device with only CPU but installed cuda backend. Please install cpu backend instead. \n" +
  23. "3. The backend is not compatible with your system cuda environment. Please check and fix it. If the environment is " +
  24. "expected not to be changed, then consider build llama.cpp from source or submit an issue to LLamaSharp.");
  25. }
  26. NativeApi.llama_init_backend();
  27. }
  28. private const string libraryName = "libllama";
  29. [DllImport("libllama", EntryPoint = "llama_mmap_supported")]
  30. public static extern bool llama_empty_call();
  31. [DllImport(libraryName)]
  32. public static extern LLamaContextParams llama_context_default_params();
  33. [DllImport(libraryName)]
  34. public static extern bool llama_mmap_supported();
  35. [DllImport(libraryName)]
  36. public static extern bool llama_mlock_supported();
  37. /// <summary>
  38. /// Various functions for loading a ggml llama model.
  39. /// Allocate (almost) all memory needed for the model.
  40. /// Return NULL on failure
  41. /// </summary>
  42. /// <param name="path_model"></param>
  43. /// <param name="params_"></param>
  44. /// <returns></returns>
  45. [DllImport(libraryName)]
  46. public static extern IntPtr llama_init_from_file(string path_model, LLamaContextParams params_);
  47. /// <summary>
  48. /// not great API - very likely to change.
  49. /// Initialize the llama + ggml backend
  50. /// Call once at the start of the program
  51. /// </summary>
  52. [DllImport(libraryName)]
  53. public static extern void llama_init_backend();
  54. /// <summary>
  55. /// Frees all allocated memory
  56. /// </summary>
  57. /// <param name="ctx"></param>
  58. [DllImport(libraryName)]
  59. public static extern void llama_free(IntPtr ctx);
  60. /// <summary>
  61. /// Apply a LoRA adapter to a loaded model
  62. /// path_base_model is the path to a higher quality model to use as a base for
  63. /// the layers modified by the adapter. Can be NULL to use the current loaded model.
  64. /// The model needs to be reloaded before applying a new adapter, otherwise the adapter
  65. /// will be applied on top of the previous one
  66. /// </summary>
  67. /// <param name="ctx"></param>
  68. /// <param name="path_lora"></param>
  69. /// <param name="path_base_model"></param>
  70. /// <param name="n_threads"></param>
  71. /// <returns>Returns 0 on success</returns>
  72. [DllImport(libraryName)]
  73. public static extern int llama_apply_lora_from_file(SafeLLamaContextHandle ctx, string path_lora, string path_base_model, int n_threads);
  74. /// <summary>
  75. /// Returns the number of tokens in the KV cache
  76. /// </summary>
  77. /// <param name="ctx"></param>
  78. /// <returns></returns>
  79. [DllImport(libraryName)]
  80. public static extern int llama_get_kv_cache_token_count(SafeLLamaContextHandle ctx);
  81. /// <summary>
  82. /// Sets the current rng seed.
  83. /// </summary>
  84. /// <param name="ctx"></param>
  85. /// <param name="seed"></param>
  86. [DllImport(libraryName)]
  87. public static extern void llama_set_rng_seed(SafeLLamaContextHandle ctx, int seed);
  88. /// <summary>
  89. /// Returns the maximum size in bytes of the state (rng, logits, embedding
  90. /// and kv_cache) - will often be smaller after compacting tokens
  91. /// </summary>
  92. /// <param name="ctx"></param>
  93. /// <returns></returns>
  94. [DllImport(libraryName)]
  95. public static extern ulong llama_get_state_size(SafeLLamaContextHandle ctx);
  96. /// <summary>
  97. /// Copies the state to the specified destination address.
  98. /// Destination needs to have allocated enough memory.
  99. /// Returns the number of bytes copied
  100. /// </summary>
  101. /// <param name="ctx"></param>
  102. /// <param name="dest"></param>
  103. /// <returns></returns>
  104. [DllImport(libraryName)]
  105. public static extern ulong llama_copy_state_data(SafeLLamaContextHandle ctx, byte[] dest);
  106. /// <summary>
  107. /// Set the state reading from the specified address
  108. /// Returns the number of bytes read
  109. /// </summary>
  110. /// <param name="ctx"></param>
  111. /// <param name="src"></param>
  112. /// <returns></returns>
  113. [DllImport(libraryName)]
  114. public static extern ulong llama_set_state_data(SafeLLamaContextHandle ctx, byte[] src);
  115. /// <summary>
  116. /// Load session file
  117. /// </summary>
  118. /// <param name="ctx"></param>
  119. /// <param name="path_session"></param>
  120. /// <param name="tokens_out"></param>
  121. /// <param name="n_token_capacity"></param>
  122. /// <param name="n_token_count_out"></param>
  123. /// <returns></returns>
  124. [DllImport(libraryName)]
  125. public static extern bool llama_load_session_file(SafeLLamaContextHandle ctx, string path_session, llama_token[] tokens_out, ulong n_token_capacity, ulong* n_token_count_out);
  126. /// <summary>
  127. /// Save session file
  128. /// </summary>
  129. /// <param name="ctx"></param>
  130. /// <param name="path_session"></param>
  131. /// <param name="tokens"></param>
  132. /// <param name="n_token_count"></param>
  133. /// <returns></returns>
  134. [DllImport(libraryName)]
  135. public static extern bool llama_save_session_file(SafeLLamaContextHandle ctx, string path_session, llama_token[] tokens, ulong n_token_count);
  136. /// <summary>
  137. /// Run the llama inference to obtain the logits and probabilities for the next token.
  138. /// tokens + n_tokens is the provided batch of new tokens to process
  139. /// n_past is the number of tokens to use from previous eval calls
  140. /// </summary>
  141. /// <param name="ctx"></param>
  142. /// <param name="tokens"></param>
  143. /// <param name="n_tokens"></param>
  144. /// <param name="n_past"></param>
  145. /// <param name="n_threads"></param>
  146. /// <returns>Returns 0 on success</returns>
  147. [DllImport(libraryName)]
  148. public static extern int llama_eval(SafeLLamaContextHandle ctx, llama_token[] tokens, int n_tokens, int n_past, int n_threads);
  149. /// <summary>
  150. /// Convert the provided text into tokens.
  151. /// The tokens pointer must be large enough to hold the resulting tokens.
  152. /// Returns the number of tokens on success, no more than n_max_tokens
  153. /// Returns a negative number on failure - the number of tokens that would have been returned
  154. /// </summary>
  155. /// <param name="ctx"></param>
  156. /// <param name="text"></param>
  157. /// <param name="tokens"></param>
  158. /// <param name="n_max_tokens"></param>
  159. /// <param name="add_bos"></param>
  160. /// <returns></returns>
  161. public static int llama_tokenize(SafeLLamaContextHandle ctx, string text, Encoding encoding, llama_token[] tokens, int n_max_tokens, bool add_bos)
  162. {
  163. var bytes = encoding.GetBytes(text);
  164. sbyte[] data = new sbyte[bytes.Length];
  165. for(int i = 0; i < bytes.Length; i++)
  166. {
  167. data[i] = (sbyte)bytes[i];
  168. //if (bytes[i] < 128)
  169. //{
  170. // data[i] = (sbyte)bytes[i];
  171. //}
  172. //else
  173. //{
  174. // data[i] = (sbyte)(~((sbyte)(~bytes[i] + 1)) + 1);
  175. //}
  176. }
  177. return llama_tokenize_native(ctx, data, tokens, n_max_tokens, add_bos);
  178. }
  179. [DllImport(libraryName, EntryPoint = "llama_tokenize")]
  180. public static extern int llama_tokenize_native(SafeLLamaContextHandle ctx, sbyte[] text, llama_token[] tokens, int n_max_tokens, bool add_bos);
  181. [DllImport(libraryName)]
  182. public static extern int llama_n_vocab(SafeLLamaContextHandle ctx);
  183. [DllImport(libraryName)]
  184. public static extern int llama_n_ctx(SafeLLamaContextHandle ctx);
  185. [DllImport(libraryName)]
  186. public static extern int llama_n_embd(SafeLLamaContextHandle ctx);
  187. /// <summary>
  188. /// Token logits obtained from the last call to llama_eval()
  189. /// The logits for the last token are stored in the last row
  190. /// Can be mutated in order to change the probabilities of the next token
  191. /// Rows: n_tokens
  192. /// Cols: n_vocab
  193. /// </summary>
  194. /// <param name="ctx"></param>
  195. /// <returns></returns>
  196. [DllImport(libraryName)]
  197. public static extern float* llama_get_logits(SafeLLamaContextHandle ctx);
  198. /// <summary>
  199. /// Get the embeddings for the input
  200. /// shape: [n_embd] (1-dimensional)
  201. /// </summary>
  202. /// <param name="ctx"></param>
  203. /// <returns></returns>
  204. [DllImport(libraryName)]
  205. public static extern float* llama_get_embeddings(SafeLLamaContextHandle ctx);
  206. /// <summary>
  207. /// Token Id -> String. Uses the vocabulary in the provided context
  208. /// </summary>
  209. /// <param name="ctx"></param>
  210. /// <param name="token"></param>
  211. /// <returns>Pointer to a string.</returns>
  212. [DllImport(libraryName)]
  213. public static extern IntPtr llama_token_to_str(SafeLLamaContextHandle ctx, llama_token token);
  214. [DllImport(libraryName)]
  215. public static extern llama_token llama_token_bos();
  216. [DllImport(libraryName)]
  217. public static extern llama_token llama_token_eos();
  218. [DllImport(libraryName)]
  219. public static extern llama_token llama_token_nl();
  220. [DllImport(libraryName)]
  221. public static extern void llama_print_timings(SafeLLamaContextHandle ctx);
  222. [DllImport(libraryName)]
  223. public static extern void llama_reset_timings(SafeLLamaContextHandle ctx);
  224. /// <summary>
  225. /// Print system information
  226. /// </summary>
  227. /// <returns></returns>
  228. [DllImport(libraryName)]
  229. public static extern IntPtr llama_print_system_info();
  230. }
  231. }

C#/.NET上易用的LLM高性能推理框架,支持LLaMA和LLaVA系列模型。