using System; using System.Buffers; using System.Collections.Generic; using System.IO; using System.Runtime.InteropServices; using System.Text; using System.Text.Json; using LLama.Exceptions; #pragma warning disable IDE1006 // Naming Styles namespace LLama.Native { using llama_token = Int32; /// /// Callback from llama.cpp with log messages /// /// /// public delegate void LLamaLogCallback(LLamaLogLevel level, string message); /// /// Direct translation of the llama.cpp API /// public unsafe partial class NativeApi { static NativeApi() { // Try to load a preferred library, based on CPU feature detection TryLoadLibrary(); try { llama_empty_call(); } catch (DllNotFoundException) { throw new RuntimeError("The native library cannot be found. It could be one of the following reasons: \n" + "1. No LLamaSharp backend was installed. Please search LLamaSharp.Backend and install one of them. \n" + "2. You are using a device with only CPU but installed cuda backend. Please install cpu backend instead. \n" + "3. The backend is not compatible with your system cuda environment. Please check and fix it. If the environment is " + "expected not to be changed, then consider build llama.cpp from source or submit an issue to LLamaSharp.\n" + "4. One of the dependency of the native library is missed.\n"); } llama_backend_init(false); } private static int GetCudaMajorVersion() { string? cudaPath; string version = ""; if (RuntimeInformation.IsOSPlatform(OSPlatform.Windows)) { cudaPath = Environment.GetEnvironmentVariable("CUDA_PATH"); if(cudaPath is null) { return -1; } version = GetCudaVersionFromPath(cudaPath); } else if (RuntimeInformation.IsOSPlatform(OSPlatform.Linux)) { // Try the default first cudaPath = "/usr/local/bin/cuda"; version = GetCudaVersionFromPath(cudaPath); if (string.IsNullOrEmpty(version)) { cudaPath = Environment.GetEnvironmentVariable("LD_LIBRARY_PATH"); if(cudaPath is null) { return -1; } foreach(var path in cudaPath.Split(':')) { version = GetCudaVersionFromPath(Path.Combine(path, "..")); if (string.IsNullOrEmpty(version)) { break; } } } } if (string.IsNullOrEmpty(version)) { return -1; } else { version = version.Split('.')[0]; bool success = int.TryParse(version, out var majorVersion); if (success) { return majorVersion; } else { return -1; } } } private static string GetCudaVersionFromPath(string cudaPath) { try { string json = File.ReadAllText(Path.Combine(cudaPath, cudaVersionFile)); using (JsonDocument document = JsonDocument.Parse(json)) { JsonElement root = document.RootElement; JsonElement cublasNode = root.GetProperty("libcublas"); JsonElement versionNode = cublasNode.GetProperty("version"); if (versionNode.ValueKind == JsonValueKind.Undefined) { return string.Empty; } return versionNode.GetString(); } } catch (Exception) { return string.Empty; } } #if NET6_0_OR_GREATER private static string GetAvxLibraryPath(NativeLibraryConfig.AvxLevel avxLevel, string prefix, string suffix) { var avxStr = NativeLibraryConfig.AvxLevelToString(avxLevel); if (!string.IsNullOrEmpty(avxStr)) { avxStr += "/"; } return $"{prefix}{avxStr}{libraryName}{suffix}"; } private static List GetLibraryTryOrder(NativeLibraryConfig.Description configuration) { OSPlatform platform; string prefix, suffix; if (RuntimeInformation.IsOSPlatform(OSPlatform.Windows)) { platform = OSPlatform.Windows; prefix = "runtimes/win-x64/native/"; suffix = ".dll"; } else if (RuntimeInformation.IsOSPlatform(OSPlatform.Linux)) { platform = OSPlatform.Linux; prefix = "runtimes/linux-x64/native/"; suffix = ".so"; } else if (RuntimeInformation.IsOSPlatform(OSPlatform.OSX)) { platform = OSPlatform.OSX; suffix = ".dylib"; if (System.Runtime.Intrinsics.Arm.ArmBase.Arm64.IsSupported) { prefix = "runtimes/osx-arm64/native/"; } else { prefix = "runtimes/osx-x64/native/"; } } else { throw new RuntimeError($"Your system plarform is not supported, please open an issue in LLamaSharp."); } List result = new(); if (configuration.UseCuda && (platform == OSPlatform.Windows || platform == OSPlatform.Linux)) // no cuda on macos { int cudaVersion = GetCudaMajorVersion(); // TODO: load cuda library with avx if (cudaVersion == -1 && !configuration.AllowFallback) { // if check skipped, we just try to load cuda libraries one by one. if (configuration.SkipCheck) { result.Add($"{prefix}cuda12/{libraryName}{suffix}"); result.Add($"{prefix}cuda11/{libraryName}{suffix}"); } else { throw new RuntimeError("Configured to load a cuda library but no cuda detected on your device."); } } else if (cudaVersion == 11) { result.Add($"{prefix}cuda11/{libraryName}{suffix}"); } else if (cudaVersion == 12) { result.Add($"{prefix}cuda12/{libraryName}{suffix}"); } else if (cudaVersion > 0) { throw new RuntimeError($"Cuda version {cudaVersion} hasn't been supported by LLamaSharp, please open an issue for it."); } // otherwise no cuda detected but allow fallback } // use cpu (or mac possibly with metal) if (!configuration.AllowFallback && platform != OSPlatform.OSX) { result.Add(GetAvxLibraryPath(configuration.AvxLevel, prefix, suffix)); } else if(platform != OSPlatform.OSX) // in macos there's absolutely no avx { #if NET8_0_OR_GREATER if (configuration.AvxLevel == NativeLibraryConfig.AvxLevel.Avx512) { result.Add(GetAvxLibraryPath(NativeLibraryConfig.AvxLevel.Avx512, prefix, suffix))); result.Add(GetAvxLibraryPath(NativeLibraryConfig.AvxLevel.Avx2, prefix, suffix))); result.Add(GetAvxLibraryPath(NativeLibraryConfig.AvxLevel.Avx, prefix, suffix))); } else #endif if (configuration.AvxLevel == NativeLibraryConfig.AvxLevel.Avx2) { result.Add(GetAvxLibraryPath(NativeLibraryConfig.AvxLevel.Avx2, prefix, suffix)); result.Add(GetAvxLibraryPath(NativeLibraryConfig.AvxLevel.Avx, prefix, suffix)); } else if (configuration.AvxLevel == NativeLibraryConfig.AvxLevel.Avx) { result.Add(GetAvxLibraryPath(NativeLibraryConfig.AvxLevel.Avx, prefix, suffix)); } result.Add(GetAvxLibraryPath(NativeLibraryConfig.AvxLevel.None, prefix, suffix)); } if(platform == OSPlatform.OSX) { result.Add($"{prefix}{libraryName}{suffix}"); } return result; } #endif /// /// Try to load libllama, using CPU feature detection to try and load a more specialised DLL if possible /// /// The library handle to unload later, or IntPtr.Zero if no library was loaded private static IntPtr TryLoadLibrary() { #if NET6_0_OR_GREATER var configuration = NativeLibraryConfig.GetInstance().Desc; if (!string.IsNullOrEmpty(configuration.Path)) { // When loading the user specified library, there's no fallback. var result = TryLoad(configuration.Path, true); if (result is null || result == IntPtr.Zero) { throw new RuntimeError($"Failed to load the native library [{configuration.Path}] you specified."); } return result ?? IntPtr.Zero; } var libraryTryLoadOrder = GetLibraryTryOrder(configuration); foreach(var libraryPath in libraryTryLoadOrder) { var result = TryLoad(libraryPath, true); if(result is not null && result != IntPtr.Zero) { Console.ForegroundColor = ConsoleColor.Red; Console.WriteLine($"[Native Library] {libraryPath} is loaded."); Console.ResetColor(); return result ?? IntPtr.Zero; } else { Console.WriteLine($"Tried to load {libraryPath}"); } } if (!configuration.AllowFallback) { throw new RuntimeError("Failed to load the library that match your rule, please" + " 1) check your rule." + " 2) try to allow fallback." + " 3) or open an issue if it's expected to be successful."); } #endif return IntPtr.Zero; #if NET6_0_OR_GREATER // Try to load a DLL from the path if supported. Returns null if nothing is loaded. static IntPtr? TryLoad(string path, bool supported = true) { if (!supported) return null; if (NativeLibrary.TryLoad(path, out var handle)) return handle; return null; } #endif } private const string libraryName = "libllama"; private const string cudaVersionFile = "version.json"; /// /// A method that does nothing. This is a native method, calling it will force the llama native dependencies to be loaded. /// /// [DllImport(libraryName, EntryPoint = "llama_mmap_supported", CallingConvention = CallingConvention.Cdecl)] public static extern bool llama_empty_call(); /// /// Get the maximum number of devices supported by llama.cpp /// /// [DllImport(libraryName, CallingConvention = CallingConvention.Cdecl)] public static extern int llama_max_devices(); /// /// Create a LLamaModelParams with default values /// /// [DllImport(libraryName, CallingConvention = CallingConvention.Cdecl)] public static extern LLamaModelParams llama_model_default_params(); /// /// Create a LLamaContextParams with default values /// /// [DllImport(libraryName, CallingConvention = CallingConvention.Cdecl)] public static extern LLamaContextParams llama_context_default_params(); /// /// Create a LLamaModelQuantizeParams with default values /// /// [DllImport(libraryName, CallingConvention = CallingConvention.Cdecl)] public static extern LLamaModelQuantizeParams llama_model_quantize_default_params(); /// /// Check if memory mapping is supported /// /// [DllImport(libraryName, CallingConvention = CallingConvention.Cdecl)] public static extern bool llama_mmap_supported(); /// /// Check if memory lockingis supported /// /// [DllImport(libraryName, CallingConvention = CallingConvention.Cdecl)] public static extern bool llama_mlock_supported(); /// /// Various functions for loading a ggml llama model. /// Allocate (almost) all memory needed for the model. /// Return NULL on failure /// /// /// /// [DllImport(libraryName, CallingConvention = CallingConvention.Cdecl)] public static extern IntPtr llama_load_model_from_file(string path_model, LLamaModelParams @params); /// /// Create a new llama_context with the given model. /// Return value should always be wrapped in SafeLLamaContextHandle! /// /// /// /// [DllImport(libraryName, CallingConvention = CallingConvention.Cdecl)] public static extern IntPtr llama_new_context_with_model(SafeLlamaModelHandle model, LLamaContextParams @params); /// /// not great API - very likely to change. /// Initialize the llama + ggml backend /// Call once at the start of the program /// [DllImport(libraryName, CallingConvention = CallingConvention.Cdecl)] public static extern void llama_backend_init(bool numa); /// /// Frees all allocated memory in the given llama_context /// /// [DllImport(libraryName, CallingConvention = CallingConvention.Cdecl)] public static extern void llama_free(IntPtr ctx); /// /// Frees all allocated memory associated with a model /// /// [DllImport(libraryName, CallingConvention = CallingConvention.Cdecl)] public static extern void llama_free_model(IntPtr model); /// /// Apply a LoRA adapter to a loaded model /// path_base_model is the path to a higher quality model to use as a base for /// the layers modified by the adapter. Can be NULL to use the current loaded model. /// The model needs to be reloaded before applying a new adapter, otherwise the adapter /// will be applied on top of the previous one /// /// /// /// /// /// /// Returns 0 on success [DllImport(libraryName, CallingConvention = CallingConvention.Cdecl)] public static extern int llama_model_apply_lora_from_file(SafeLlamaModelHandle model_ptr, string path_lora, float scale, string? path_base_model, int n_threads); /// /// Sets the current rng seed. /// /// /// [DllImport(libraryName, CallingConvention = CallingConvention.Cdecl)] public static extern void llama_set_rng_seed(SafeLLamaContextHandle ctx, uint seed); /// /// Returns the maximum size in bytes of the state (rng, logits, embedding /// and kv_cache) - will often be smaller after compacting tokens /// /// /// [DllImport(libraryName, CallingConvention = CallingConvention.Cdecl)] public static extern ulong llama_get_state_size(SafeLLamaContextHandle ctx); /// /// Copies the state to the specified destination address. /// Destination needs to have allocated enough memory. /// /// /// /// the number of bytes copied [DllImport(libraryName, CallingConvention = CallingConvention.Cdecl)] public static extern ulong llama_copy_state_data(SafeLLamaContextHandle ctx, byte* dest); /// /// Set the state reading from the specified address /// /// /// /// the number of bytes read [DllImport(libraryName, CallingConvention = CallingConvention.Cdecl)] public static extern ulong llama_set_state_data(SafeLLamaContextHandle ctx, byte* src); /// /// Load session file /// /// /// /// /// /// /// [DllImport(libraryName, CallingConvention = CallingConvention.Cdecl)] public static extern bool llama_load_session_file(SafeLLamaContextHandle ctx, string path_session, llama_token[] tokens_out, ulong n_token_capacity, ulong* n_token_count_out); /// /// Save session file /// /// /// /// /// /// [DllImport(libraryName, CallingConvention = CallingConvention.Cdecl)] public static extern bool llama_save_session_file(SafeLLamaContextHandle ctx, string path_session, llama_token[] tokens, ulong n_token_count); /// /// Run the llama inference to obtain the logits and probabilities for the next token. /// tokens + n_tokens is the provided batch of new tokens to process /// n_past is the number of tokens to use from previous eval calls /// /// /// /// /// /// Returns 0 on success [DllImport(libraryName, CallingConvention = CallingConvention.Cdecl)] [Obsolete("use llama_decode() instead")] public static extern int llama_eval(SafeLLamaContextHandle ctx, llama_token* tokens, int n_tokens, int n_past); /// /// Convert the provided text into tokens. /// /// /// /// /// /// /// /// Allow tokenizing special and/or control tokens which otherwise are not exposed and treated as plaintext. Does not insert a leading space. /// Returns the number of tokens on success, no more than n_max_tokens. /// Returns a negative number on failure - the number of tokens that would have been returned /// public static int llama_tokenize(SafeLLamaContextHandle ctx, string text, Encoding encoding, llama_token[] tokens, int n_max_tokens, bool add_bos, bool special) { // Calculate number of bytes in text and borrow an array that large (+1 for nul byte) var byteCount = encoding.GetByteCount(text); var array = ArrayPool.Shared.Rent(byteCount + 1); try { // Convert to bytes fixed (char* textPtr = text) fixed (byte* arrayPtr = array) { encoding.GetBytes(textPtr, text.Length, arrayPtr, array.Length); } // Add a zero byte to the end to terminate the string array[byteCount] = 0; // Do the actual tokenization fixed (byte* arrayPtr = array) fixed (llama_token* tokensPtr = tokens) return llama_tokenize(ctx.ModelHandle, arrayPtr, byteCount, tokensPtr, n_max_tokens, add_bos, special); } finally { ArrayPool.Shared.Return(array); } } /// /// Get the size of the context window for the model for this context /// /// /// [DllImport(libraryName, CallingConvention = CallingConvention.Cdecl)] public static extern int llama_n_ctx(SafeLLamaContextHandle ctx); /// /// Token logits obtained from the last call to llama_eval() /// The logits for the last token are stored in the last row /// Can be mutated in order to change the probabilities of the next token.
/// Rows: n_tokens
/// Cols: n_vocab ///
/// /// [DllImport(libraryName, CallingConvention = CallingConvention.Cdecl)] public static extern float* llama_get_logits(SafeLLamaContextHandle ctx); /// /// Logits for the ith token. Equivalent to: llama_get_logits(ctx) + i*n_vocab /// /// /// /// [DllImport(libraryName, CallingConvention = CallingConvention.Cdecl)] public static extern float* llama_get_logits_ith(SafeLLamaContextHandle ctx, int i); /// /// Get the embeddings for the input /// shape: [n_embd] (1-dimensional) /// /// /// [DllImport(libraryName, CallingConvention = CallingConvention.Cdecl)] public static extern float* llama_get_embeddings(SafeLLamaContextHandle ctx); /// /// Get the "Beginning of sentence" token /// /// [DllImport(libraryName, CallingConvention = CallingConvention.Cdecl)] public static extern llama_token llama_token_bos(SafeLlamaModelHandle model); /// /// Get the "End of sentence" token /// /// [DllImport(libraryName, CallingConvention = CallingConvention.Cdecl)] public static extern llama_token llama_token_eos(SafeLlamaModelHandle model); /// /// Get the "new line" token /// /// [DllImport(libraryName, CallingConvention = CallingConvention.Cdecl)] public static extern llama_token llama_token_nl(SafeLlamaModelHandle model); /// /// Print out timing information for this context /// /// [DllImport(libraryName, CallingConvention = CallingConvention.Cdecl)] public static extern void llama_print_timings(SafeLLamaContextHandle ctx); /// /// Reset all collected timing information for this context /// /// [DllImport(libraryName, CallingConvention = CallingConvention.Cdecl)] public static extern void llama_reset_timings(SafeLLamaContextHandle ctx); /// /// Print system information /// /// [DllImport(libraryName, CallingConvention = CallingConvention.Cdecl)] public static extern IntPtr llama_print_system_info(); /// /// Get the number of tokens in the model vocabulary /// /// /// [DllImport(libraryName, CallingConvention = CallingConvention.Cdecl)] public static extern int llama_n_vocab(SafeLlamaModelHandle model); /// /// Get the size of the context window for the model /// /// /// [DllImport(libraryName, CallingConvention = CallingConvention.Cdecl)] public static extern int llama_n_ctx_train(SafeLlamaModelHandle model); /// /// Get the dimension of embedding vectors from this model /// /// /// [DllImport(libraryName, CallingConvention = CallingConvention.Cdecl)] public static extern int llama_n_embd(SafeLlamaModelHandle model); /// /// Get the size of the model in bytes /// /// /// [DllImport(libraryName, CallingConvention = CallingConvention.Cdecl)] public static extern ulong llama_model_size(SafeLlamaModelHandle model); /// /// Get the number of parameters in this model /// /// /// [DllImport(libraryName, CallingConvention = CallingConvention.Cdecl)] public static extern ulong llama_model_n_params(SafeLlamaModelHandle model); /// /// Convert a single token into text /// /// /// /// buffer to write string into /// size of the buffer /// The length writte, or if the buffer is too small a negative that indicates the length required [DllImport(libraryName, CallingConvention = CallingConvention.Cdecl)] public static extern int llama_token_to_piece(SafeLlamaModelHandle model, int llamaToken, byte* buffer, int length); /// /// Convert text into tokens /// /// /// /// /// /// /// /// Allow tokenizing special and/or control tokens which otherwise are not exposed and treated as plaintext. Does not insert a leading space. /// Returns the number of tokens on success, no more than n_max_tokens. /// Returns a negative number on failure - the number of tokens that would have been returned /// [DllImport(libraryName, CallingConvention = CallingConvention.Cdecl)] public static extern int llama_tokenize(SafeLlamaModelHandle model, byte* text, int text_len, int* tokens, int n_max_tokens, bool add_bos, bool special); /// /// Register a callback to receive llama log messages /// /// [DllImport(libraryName, CallingConvention = CallingConvention.Cdecl)] public static extern void llama_log_set(LLamaLogCallback logCallback); /// /// Remove all tokens data of cells in [c0, c1) /// /// /// /// [DllImport(libraryName, CallingConvention = CallingConvention.Cdecl)] public static extern void llama_kv_cache_tokens_rm(SafeLLamaContextHandle ctx, int c0, int c1); /// /// Removes all tokens that belong to the specified sequence and have positions in [p0, p1) /// /// /// /// /// [DllImport(libraryName, CallingConvention = CallingConvention.Cdecl)] public static extern void llama_kv_cache_seq_rm(SafeLLamaContextHandle ctx, LLamaSeqId seq, LLamaPos p0, LLamaPos p1); /// /// Copy all tokens that belong to the specified sequence to another sequence /// Note that this does not allocate extra KV cache memory - it simply assigns the tokens to the new sequence /// /// /// /// /// /// [DllImport(libraryName, CallingConvention = CallingConvention.Cdecl)] public static extern void llama_kv_cache_seq_cp(SafeLLamaContextHandle ctx, LLamaSeqId src, LLamaSeqId dest, LLamaPos p0, LLamaPos p1); /// /// Removes all tokens that do not belong to the specified sequence /// /// /// [DllImport(libraryName, CallingConvention = CallingConvention.Cdecl)] public static extern void llama_kv_cache_seq_keep(SafeLLamaContextHandle ctx, LLamaSeqId seq); /// /// Adds relative position "delta" to all tokens that belong to the specified sequence and have positions in [p0, p1) /// If the KV cache is RoPEd, the KV data is updated accordingly /// /// /// /// /// /// [DllImport(libraryName, CallingConvention = CallingConvention.Cdecl)] public static extern void llama_kv_cache_seq_shift(SafeLLamaContextHandle ctx, LLamaSeqId seq, LLamaPos p0, LLamaPos p1, LLamaPos delta); /// /// Allocates a batch of tokens on the heap /// Each token can be assigned up to n_seq_max sequence ids /// The batch has to be freed with llama_batch_free() /// If embd != 0, llama_batch.embd will be allocated with size of n_tokens * embd * sizeof(float) /// Otherwise, llama_batch.token will be allocated to store n_tokens llama_token /// The rest of the llama_batch members are allocated with size n_tokens /// All members are left uninitialized /// /// /// /// Each token can be assigned up to n_seq_max sequence ids [DllImport(libraryName, CallingConvention = CallingConvention.Cdecl)] public static extern LLamaNativeBatch llama_batch_init(int n_tokens, int embd, int n_seq_max); /// /// Frees a batch of tokens allocated with llama_batch_init() /// /// [DllImport(libraryName, CallingConvention = CallingConvention.Cdecl)] public static extern void llama_batch_free(LLamaNativeBatch batch); /// /// /// /// /// Positive return values does not mean a fatal error, but rather a warning:
/// - 0: success
/// - 1: could not find a KV slot for the batch (try reducing the size of the batch or increase the context)
/// - < 0: error
///
[DllImport(libraryName, CallingConvention = CallingConvention.Cdecl)] public static extern int llama_decode(SafeLLamaContextHandle ctx, LLamaNativeBatch batch); /// /// Set the number of threads used for decoding /// /// /// n_threads is the number of threads used for generation (single token) /// n_threads_batch is the number of threads used for prompt and batch processing (multiple tokens) /// [DllImport(libraryName, CallingConvention = CallingConvention.Cdecl)] public static extern int llama_set_n_threads(SafeLLamaContextHandle ctx, uint n_threads, uint n_threads_batch); } }