using System;
using System.Buffers;
using System.Runtime.InteropServices;
using System.Text;
using LLama.Common;
using LLama.Exceptions;
#pragma warning disable IDE1006 // Naming Styles
namespace LLama.Native
{
using llama_token = Int32;
public delegate void LLamaLogCallback(ILLamaLogger.LogLevel level, string message);
public unsafe partial class NativeApi
{
public static readonly int LLAMA_MAX_DEVICES = 1;
static NativeApi()
{
try
{
llama_empty_call();
}
catch (DllNotFoundException)
{
throw new RuntimeError("The native library cannot be found. It could be one of the following reasons: \n" +
"1. No LLamaSharp backend was installed. Please search LLamaSharp.Backend and install one of them. \n" +
"2. You are using a device with only CPU but installed cuda backend. Please install cpu backend instead. \n" +
"3. The backend is not compatible with your system cuda environment. Please check and fix it. If the environment is " +
"expected not to be changed, then consider build llama.cpp from source or submit an issue to LLamaSharp.\n" +
"4. One of the dependency of the native library is missed.\n");
}
NativeApi.llama_backend_init(false);
}
private const string libraryName = "libllama";
///
/// A method that does nothing. This is a native method, calling it will force the llama native dependencies to be loaded.
///
///
[DllImport(libraryName, EntryPoint = "llama_mmap_supported", CallingConvention = CallingConvention.Cdecl)]
public static extern bool llama_empty_call();
[DllImport(libraryName, CallingConvention = CallingConvention.Cdecl)]
public static extern LLamaContextParams llama_context_default_params();
[DllImport(libraryName, CallingConvention = CallingConvention.Cdecl)]
public static extern LLamaModelQuantizeParams llama_model_quantize_default_params();
[DllImport(libraryName, CallingConvention = CallingConvention.Cdecl)]
public static extern bool llama_mmap_supported();
[DllImport(libraryName, CallingConvention = CallingConvention.Cdecl)]
public static extern bool llama_mlock_supported();
[DllImport(libraryName, CallingConvention = CallingConvention.Cdecl)]
public static extern int llama_eval_export(SafeLLamaContextHandle ctx, string fname);
///
/// Various functions for loading a ggml llama model.
/// Allocate (almost) all memory needed for the model.
/// Return NULL on failure
///
///
///
///
[DllImport(libraryName, CallingConvention = CallingConvention.Cdecl)]
public static extern IntPtr llama_load_model_from_file(string path_model, LLamaContextParams @params);
[DllImport(libraryName, CallingConvention = CallingConvention.Cdecl)]
public static extern IntPtr llama_new_context_with_model(SafeLlamaModelHandle model, LLamaContextParams @params);
///
/// not great API - very likely to change.
/// Initialize the llama + ggml backend
/// Call once at the start of the program
///
[DllImport(libraryName, CallingConvention = CallingConvention.Cdecl)]
public static extern void llama_backend_init(bool numa);
///
/// Frees all allocated memory
///
///
[DllImport(libraryName, CallingConvention = CallingConvention.Cdecl)]
public static extern void llama_free(IntPtr ctx);
///
/// Frees all allocated memory associated with a model
///
///
[DllImport(libraryName, CallingConvention = CallingConvention.Cdecl)]
public static extern void llama_free_model(IntPtr model);
///
/// Apply a LoRA adapter to a loaded model
/// path_base_model is the path to a higher quality model to use as a base for
/// the layers modified by the adapter. Can be NULL to use the current loaded model.
/// The model needs to be reloaded before applying a new adapter, otherwise the adapter
/// will be applied on top of the previous one
///
///
///
///
///
/// Returns 0 on success
[DllImport(libraryName, CallingConvention = CallingConvention.Cdecl)]
public static extern int llama_model_apply_lora_from_file(SafeLlamaModelHandle model_ptr, string path_lora, string? path_base_model, int n_threads);
///
/// Returns the number of tokens in the KV cache
///
///
///
[DllImport(libraryName, CallingConvention = CallingConvention.Cdecl)]
public static extern int llama_get_kv_cache_token_count(SafeLLamaContextHandle ctx);
///
/// Sets the current rng seed.
///
///
///
[DllImport(libraryName, CallingConvention = CallingConvention.Cdecl)]
public static extern void llama_set_rng_seed(SafeLLamaContextHandle ctx, int seed);
///
/// Returns the maximum size in bytes of the state (rng, logits, embedding
/// and kv_cache) - will often be smaller after compacting tokens
///
///
///
[DllImport(libraryName, CallingConvention = CallingConvention.Cdecl)]
public static extern ulong llama_get_state_size(SafeLLamaContextHandle ctx);
///
/// Copies the state to the specified destination address.
/// Destination needs to have allocated enough memory.
///
///
///
/// the number of bytes copied
[DllImport(libraryName, CallingConvention = CallingConvention.Cdecl)]
public static extern ulong llama_copy_state_data(SafeLLamaContextHandle ctx, byte* dest);
///
/// Copies the state to the specified destination address.
/// Destination needs to have allocated enough memory (see llama_get_state_size)
///
///
///
/// the number of bytes copied
public static ulong llama_copy_state_data(SafeLLamaContextHandle ctx, byte[] dest)
{
fixed (byte* dstPtr = &dest[0])
{
return llama_copy_state_data(ctx, dstPtr);
}
}
///
/// Set the state reading from the specified address
///
///
///
/// the number of bytes read
[DllImport(libraryName, CallingConvention = CallingConvention.Cdecl)]
public static extern ulong llama_set_state_data(SafeLLamaContextHandle ctx, byte* src);
///
/// Set the state reading from the specified address
///
///
///
/// the number of bytes read
public static ulong llama_set_state_data(SafeLLamaContextHandle ctx, byte[] src)
{
fixed (byte* srcPtr = &src[0])
{
return llama_set_state_data(ctx, srcPtr);
}
}
///
/// Load session file
///
///
///
///
///
///
///
[DllImport(libraryName, CallingConvention = CallingConvention.Cdecl)]
public static extern bool llama_load_session_file(SafeLLamaContextHandle ctx, string path_session, llama_token[] tokens_out, ulong n_token_capacity, ulong* n_token_count_out);
///
/// Save session file
///
///
///
///
///
///
[DllImport(libraryName, CallingConvention = CallingConvention.Cdecl)]
public static extern bool llama_save_session_file(SafeLLamaContextHandle ctx, string path_session, llama_token[] tokens, ulong n_token_count);
///
/// Run the llama inference to obtain the logits and probabilities for the next token.
/// tokens + n_tokens is the provided batch of new tokens to process
/// n_past is the number of tokens to use from previous eval calls
///
///
///
///
///
///
/// Returns 0 on success
[DllImport(libraryName, CallingConvention = CallingConvention.Cdecl)]
public static extern int llama_eval(SafeLLamaContextHandle ctx, llama_token[] tokens, int n_tokens, int n_past, int n_threads);
///
/// Run the llama inference to obtain the logits and probabilities for the next token.
/// tokens + n_tokens is the provided batch of new tokens to process
/// n_past is the number of tokens to use from previous eval calls
///
///
///
///
///
///
/// Returns 0 on success
[DllImport(libraryName, EntryPoint = "llama_eval", CallingConvention = CallingConvention.Cdecl)]
public static extern int llama_eval_with_pointer(SafeLLamaContextHandle ctx, llama_token* tokens, int n_tokens, int n_past, int n_threads);
///
/// Convert the provided text into tokens.
///
///
///
///
///
///
///
/// Returns the number of tokens on success, no more than n_max_tokens.
/// Returns a negative number on failure - the number of tokens that would have been returned
///
public static int llama_tokenize(SafeLLamaContextHandle ctx, string text, Encoding encoding, llama_token[] tokens, int n_max_tokens, bool add_bos)
{
// Calculate number of bytes in text and borrow an array that large (+1 for nul byte)
var byteCount = encoding.GetByteCount(text);
var array = ArrayPool.Shared.Rent(byteCount + 1);
try
{
// Convert to bytes
fixed (char* textPtr = text)
fixed (byte* arrayPtr = array)
{
encoding.GetBytes(textPtr, text.Length, arrayPtr, array.Length);
}
// Add a zero byte to the end to terminate the string
array[byteCount] = 0;
// Do the actual tokenization
fixed (byte* arrayPtr = array)
fixed (llama_token* tokensPtr = tokens)
return llama_tokenize_native(ctx, arrayPtr, tokensPtr, n_max_tokens, add_bos);
}
finally
{
ArrayPool.Shared.Return(array);
}
}
///
/// Convert the provided text into tokens.
///
///
///
///
///
///
/// Returns the number of tokens on success, no more than n_max_tokens.
/// Returns a negative number on failure - the number of tokens that would have been returned
///
[DllImport(libraryName, EntryPoint = "llama_tokenize", CallingConvention = CallingConvention.Cdecl)]
public static extern int llama_tokenize_native(SafeLLamaContextHandle ctx, byte* text, llama_token* tokens, int n_max_tokens, bool add_bos);
///
/// Get the number of tokens in the model vocabulary for this context
///
///
///
[DllImport(libraryName, CallingConvention = CallingConvention.Cdecl)]
public static extern int llama_n_vocab(SafeLLamaContextHandle ctx);
///
/// Get the size of the context window for the model for this context
///
///
///
[DllImport(libraryName, CallingConvention = CallingConvention.Cdecl)]
public static extern int llama_n_ctx(SafeLLamaContextHandle ctx);
///
/// Get the dimension of embedding vectors from the model for this context
///
///
///
[DllImport(libraryName, CallingConvention = CallingConvention.Cdecl)]
public static extern int llama_n_embd(SafeLLamaContextHandle ctx);
///
/// Token logits obtained from the last call to llama_eval()
/// The logits for the last token are stored in the last row
/// Can be mutated in order to change the probabilities of the next token.
/// Rows: n_tokens
/// Cols: n_vocab
///
///
///
[DllImport(libraryName, CallingConvention = CallingConvention.Cdecl)]
public static extern float* llama_get_logits(SafeLLamaContextHandle ctx);
///
/// Get the embeddings for the input
/// shape: [n_embd] (1-dimensional)
///
///
///
[DllImport(libraryName, CallingConvention = CallingConvention.Cdecl)]
public static extern float* llama_get_embeddings(SafeLLamaContextHandle ctx);
///
/// Token Id -> String. Uses the vocabulary in the provided context
///
///
///
/// Pointer to a string.
[DllImport(libraryName, CallingConvention = CallingConvention.Cdecl)]
public static extern IntPtr llama_token_to_str(SafeLLamaContextHandle ctx, llama_token token);
[DllImport(libraryName, CallingConvention = CallingConvention.Cdecl)]
public static extern llama_token llama_token_bos();
[DllImport(libraryName, CallingConvention = CallingConvention.Cdecl)]
public static extern llama_token llama_token_eos();
[DllImport(libraryName, CallingConvention = CallingConvention.Cdecl)]
public static extern llama_token llama_token_nl();
///
/// Print out timing information for this context
///
///
[DllImport(libraryName, CallingConvention = CallingConvention.Cdecl)]
public static extern void llama_print_timings(SafeLLamaContextHandle ctx);
///
/// Reset all collected timing information for this context
///
///
[DllImport(libraryName, CallingConvention = CallingConvention.Cdecl)]
public static extern void llama_reset_timings(SafeLLamaContextHandle ctx);
///
/// Print system information
///
///
[DllImport(libraryName, CallingConvention = CallingConvention.Cdecl)]
public static extern IntPtr llama_print_system_info();
///
/// Get the number of tokens in the model vocabulary
///
///
///
[DllImport(libraryName, CallingConvention = CallingConvention.Cdecl)]
public static extern int llama_model_n_vocab(SafeLlamaModelHandle model);
///
/// Get the size of the context window for the model
///
///
///
[DllImport(libraryName, CallingConvention = CallingConvention.Cdecl)]
public static extern int llama_model_n_ctx(SafeLlamaModelHandle model);
///
/// Get the dimension of embedding vectors from this model
///
///
///
[DllImport(libraryName, CallingConvention = CallingConvention.Cdecl)]
public static extern int llama_model_n_embd(SafeLlamaModelHandle model);
///
/// Convert a single token into text
///
///
///
///
[DllImport(libraryName, CallingConvention = CallingConvention.Cdecl)]
public static extern byte* llama_token_to_str_with_model(SafeLlamaModelHandle model, int llamaToken);
///
/// Convert text into tokens
///
///
///
///
///
///
/// Returns the number of tokens on success, no more than n_max_tokens.
/// Returns a negative number on failure - the number of tokens that would have been returned
///
[DllImport(libraryName, CallingConvention = CallingConvention.Cdecl)]
public static extern int llama_tokenize_with_model(SafeLlamaModelHandle model, byte* text, int* tokens, int n_max_tokens, bool add_bos);
///
/// Register a callback to receive llama log messages
///
///
[DllImport(libraryName, CallingConvention = CallingConvention.Cdecl)]
public static extern void llama_log_set(LLamaLogCallback logCallback);
}
}