using System;
using System.Buffers;
using System.Runtime.InteropServices;
using System.Text;
#pragma warning disable IDE1006 // Naming Styles
namespace LLama.Native
{
///
/// Callback from llama.cpp with log messages
///
///
///
public delegate void LLamaLogCallback(LLamaLogLevel level, string message);
///
/// Direct translation of the llama.cpp API
///
public static partial class NativeApi
{
///
/// A method that does nothing. This is a native method, calling it will force the llama native dependencies to be loaded.
///
///
public static void llama_empty_call()
{
llama_mmap_supported();
}
///
/// Get the maximum number of devices supported by llama.cpp
///
///
[DllImport(libraryName, CallingConvention = CallingConvention.Cdecl)]
public static extern int llama_max_devices();
///
/// Create a LLamaModelParams with default values
///
///
[DllImport(libraryName, CallingConvention = CallingConvention.Cdecl)]
public static extern LLamaModelParams llama_model_default_params();
///
/// Create a LLamaContextParams with default values
///
///
[DllImport(libraryName, CallingConvention = CallingConvention.Cdecl)]
public static extern LLamaContextParams llama_context_default_params();
///
/// Create a LLamaModelQuantizeParams with default values
///
///
[DllImport(libraryName, CallingConvention = CallingConvention.Cdecl)]
public static extern LLamaModelQuantizeParams llama_model_quantize_default_params();
///
/// Check if memory mapping is supported
///
///
[DllImport(libraryName, CallingConvention = CallingConvention.Cdecl)]
public static extern bool llama_mmap_supported();
///
/// Check if memory lockingis supported
///
///
[DllImport(libraryName, CallingConvention = CallingConvention.Cdecl)]
public static extern bool llama_mlock_supported();
///
/// Initialize the llama + ggml backend
/// Call once at the start of the program
///
[DllImport(libraryName, CallingConvention = CallingConvention.Cdecl)]
private static extern void llama_backend_init(bool numa);
///
/// Apply a LoRA adapter to a loaded model
/// path_base_model is the path to a higher quality model to use as a base for
/// the layers modified by the adapter. Can be NULL to use the current loaded model.
/// The model needs to be reloaded before applying a new adapter, otherwise the adapter
/// will be applied on top of the previous one
///
///
///
///
///
///
/// Returns 0 on success
[DllImport(libraryName, CallingConvention = CallingConvention.Cdecl)]
public static extern int llama_model_apply_lora_from_file(SafeLlamaModelHandle model_ptr, string path_lora, float scale, string? path_base_model, int n_threads);
///
/// Sets the current rng seed.
///
///
///
[DllImport(libraryName, CallingConvention = CallingConvention.Cdecl)]
public static extern void llama_set_rng_seed(SafeLLamaContextHandle ctx, uint seed);
///
/// Returns the maximum size in bytes of the state (rng, logits, embedding
/// and kv_cache) - will often be smaller after compacting tokens
///
///
///
[DllImport(libraryName, CallingConvention = CallingConvention.Cdecl)]
public static extern ulong llama_get_state_size(SafeLLamaContextHandle ctx);
///
/// Copies the state to the specified destination address.
/// Destination needs to have allocated enough memory.
///
///
///
/// the number of bytes copied
[DllImport(libraryName, CallingConvention = CallingConvention.Cdecl)]
public static extern unsafe ulong llama_copy_state_data(SafeLLamaContextHandle ctx, byte* dest);
///
/// Set the state reading from the specified address
///
///
///
/// the number of bytes read
[DllImport(libraryName, CallingConvention = CallingConvention.Cdecl)]
public static extern unsafe ulong llama_set_state_data(SafeLLamaContextHandle ctx, byte* src);
///
/// Load session file
///
///
///
///
///
///
///
[DllImport(libraryName, CallingConvention = CallingConvention.Cdecl)]
public static extern bool llama_load_session_file(SafeLLamaContextHandle ctx, string path_session, LLamaToken[] tokens_out, ulong n_token_capacity, out ulong n_token_count_out);
///
/// Save session file
///
///
///
///
///
///
[DllImport(libraryName, CallingConvention = CallingConvention.Cdecl)]
public static extern bool llama_save_session_file(SafeLLamaContextHandle ctx, string path_session, LLamaToken[] tokens, ulong n_token_count);
///
/// Run the llama inference to obtain the logits and probabilities for the next token.
/// tokens + n_tokens is the provided batch of new tokens to process
/// n_past is the number of tokens to use from previous eval calls
///
///
///
///
///
/// Returns 0 on success
[DllImport(libraryName, CallingConvention = CallingConvention.Cdecl)]
[Obsolete("use llama_decode() instead")]
public static extern unsafe int llama_eval(SafeLLamaContextHandle ctx, LLamaToken* tokens, int n_tokens, int n_past);
///
/// Convert the provided text into tokens.
///
///
///
///
///
///
///
/// Allow tokenizing special and/or control tokens which otherwise are not exposed and treated as plaintext. Does not insert a leading space.
/// Returns the number of tokens on success, no more than n_max_tokens.
/// Returns a negative number on failure - the number of tokens that would have been returned
///
public static int llama_tokenize(SafeLLamaContextHandle ctx, string text, Encoding encoding, LLamaToken[] tokens, int n_max_tokens, bool add_bos, bool special)
{
unsafe
{
// Calculate number of bytes in text and borrow an array that large (+1 for nul byte)
var byteCount = encoding.GetByteCount(text);
var array = ArrayPool.Shared.Rent(byteCount + 1);
try
{
// Convert to bytes
fixed (char* textPtr = text)
fixed (byte* arrayPtr = array)
{
encoding.GetBytes(textPtr, text.Length, arrayPtr, array.Length);
}
// Add a zero byte to the end to terminate the string
array[byteCount] = 0;
// Do the actual tokenization
fixed (byte* arrayPtr = array)
fixed (LLamaToken* tokensPtr = tokens)
return llama_tokenize(ctx.ModelHandle, arrayPtr, byteCount, tokensPtr, n_max_tokens, add_bos, special);
}
finally
{
ArrayPool.Shared.Return(array);
}
}
}
[DllImport(libraryName, CallingConvention = CallingConvention.Cdecl)]
public static extern unsafe byte* llama_token_get_text(SafeLlamaModelHandle model, LLamaToken token);
[DllImport(libraryName, CallingConvention = CallingConvention.Cdecl)]
public static extern float llama_token_get_score(SafeLlamaModelHandle model, LLamaToken token);
[DllImport(libraryName, CallingConvention = CallingConvention.Cdecl)]
public static extern LLamaTokenType llama_token_get_type(SafeLlamaModelHandle model, LLamaToken token);
///
/// Get the size of the context window for the model for this context
///
///
///
[DllImport(libraryName, CallingConvention = CallingConvention.Cdecl)]
public static extern int llama_n_ctx(SafeLLamaContextHandle ctx);
///
/// Token logits obtained from the last call to llama_eval()
/// The logits for the last token are stored in the last row
/// Can be mutated in order to change the probabilities of the next token.
/// Rows: n_tokens
/// Cols: n_vocab
///
///
///
[DllImport(libraryName, CallingConvention = CallingConvention.Cdecl)]
public static extern unsafe float* llama_get_logits(SafeLLamaContextHandle ctx);
///
/// Logits for the ith token. Equivalent to: llama_get_logits(ctx) + i*n_vocab
///
///
///
///
[DllImport(libraryName, CallingConvention = CallingConvention.Cdecl)]
public static extern unsafe float* llama_get_logits_ith(SafeLLamaContextHandle ctx, int i);
///
/// Get the embeddings for the input
///
///
///
public static Span llama_get_embeddings(SafeLLamaContextHandle ctx)
{
unsafe
{
var ptr = llama_get_embeddings_native(ctx);
return new Span(ptr, ctx.EmbeddingSize);
}
[DllImport(libraryName, CallingConvention = CallingConvention.Cdecl, EntryPoint = "llama_get_embeddings")]
static extern unsafe float* llama_get_embeddings_native(SafeLLamaContextHandle ctx);
}
///
/// Get the "Beginning of sentence" token
///
///
[DllImport(libraryName, CallingConvention = CallingConvention.Cdecl)]
public static extern LLamaToken llama_token_bos(SafeLlamaModelHandle model);
///
/// Get the "End of sentence" token
///
///
[DllImport(libraryName, CallingConvention = CallingConvention.Cdecl)]
public static extern LLamaToken llama_token_eos(SafeLlamaModelHandle model);
///
/// Get the "new line" token
///
///
[DllImport(libraryName, CallingConvention = CallingConvention.Cdecl)]
public static extern LLamaToken llama_token_nl(SafeLlamaModelHandle model);
///
/// Returns -1 if unknown, 1 for true or 0 for false.
///
///
[DllImport(libraryName, CallingConvention = CallingConvention.Cdecl)]
public static extern int llama_add_bos_token(SafeLlamaModelHandle model);
///
/// Returns -1 if unknown, 1 for true or 0 for false.
///
///
[DllImport(libraryName, CallingConvention = CallingConvention.Cdecl)]
public static extern int llama_add_eos_token(SafeLlamaModelHandle model);
///
/// codellama infill tokens, Beginning of infill prefix
///
///
[DllImport(libraryName, CallingConvention = CallingConvention.Cdecl)]
public static extern int llama_token_prefix(SafeLlamaModelHandle model);
///
/// codellama infill tokens, Beginning of infill middle
///
///
[DllImport(libraryName, CallingConvention = CallingConvention.Cdecl)]
public static extern int llama_token_middle(SafeLlamaModelHandle model);
///
/// codellama infill tokens, Beginning of infill suffix
///
///
[DllImport(libraryName, CallingConvention = CallingConvention.Cdecl)]
public static extern int llama_token_suffix(SafeLlamaModelHandle model);
///
/// codellama infill tokens, End of infill middle
///
///
[DllImport(libraryName, CallingConvention = CallingConvention.Cdecl)]
public static extern int llama_token_eot(SafeLlamaModelHandle model);
///
/// Print out timing information for this context
///
///
[DllImport(libraryName, CallingConvention = CallingConvention.Cdecl)]
public static extern void llama_print_timings(SafeLLamaContextHandle ctx);
///
/// Reset all collected timing information for this context
///
///
[DllImport(libraryName, CallingConvention = CallingConvention.Cdecl)]
public static extern void llama_reset_timings(SafeLLamaContextHandle ctx);
///
/// Print system information
///
///
[DllImport(libraryName, CallingConvention = CallingConvention.Cdecl)]
public static extern IntPtr llama_print_system_info();
///
/// Get the number of tokens in the model vocabulary
///
///
///
[DllImport(libraryName, CallingConvention = CallingConvention.Cdecl)]
public static extern int llama_n_vocab(SafeLlamaModelHandle model);
///
/// Get the size of the context window for the model
///
///
///
[DllImport(libraryName, CallingConvention = CallingConvention.Cdecl)]
public static extern int llama_n_ctx_train(SafeLlamaModelHandle model);
///
/// Get the dimension of embedding vectors from this model
///
///
///
[DllImport(libraryName, CallingConvention = CallingConvention.Cdecl)]
public static extern int llama_n_embd(SafeLlamaModelHandle model);
///
/// Get the model's RoPE frequency scaling factor
///
///
///
[DllImport(libraryName, CallingConvention = CallingConvention.Cdecl)]
public static extern float llama_rope_freq_scale_train(SafeLlamaModelHandle model);
///
/// Get metadata value as a string by key name
///
///
///
///
///
/// The length of the string on success, or -1 on failure
[DllImport(libraryName, CallingConvention = CallingConvention.Cdecl)]
public static extern unsafe int llama_model_meta_val_str(SafeLlamaModelHandle model, byte* key, byte* buf, long buf_size);
///
/// Get the number of metadata key/value pairs
///
///
///
[DllImport(libraryName, CallingConvention = CallingConvention.Cdecl)]
public static extern int llama_model_meta_count(SafeLlamaModelHandle model);
///
/// Get metadata key name by index
///
/// Model to fetch from
/// Index of key to fetch
/// buffer to write result into
/// The length of the string on success, or -1 on failure
public static int llama_model_meta_key_by_index(SafeLlamaModelHandle model, int index, Span dest)
{
unsafe
{
fixed (byte* destPtr = dest)
{
return llama_model_meta_key_by_index_native(model, index, destPtr, dest.Length);
}
}
[DllImport(libraryName, CallingConvention = CallingConvention.Cdecl, EntryPoint = "llama_model_meta_key_by_index")]
static extern unsafe int llama_model_meta_key_by_index_native(SafeLlamaModelHandle model, int index, byte* buf, long buf_size);
}
///
/// Get metadata value as a string by index
///
/// Model to fetch from
/// Index of val to fetch
/// Buffer to write result into
/// The length of the string on success, or -1 on failure
public static int llama_model_meta_val_str_by_index(SafeLlamaModelHandle model, int index, Span dest)
{
unsafe
{
fixed (byte* destPtr = dest)
{
return llama_model_meta_val_str_by_index_native(model, index, destPtr, dest.Length);
}
}
[DllImport(libraryName, CallingConvention = CallingConvention.Cdecl, EntryPoint = "llama_model_meta_val_str_by_index")]
static extern unsafe int llama_model_meta_val_str_by_index_native(SafeLlamaModelHandle model, int index, byte* buf, long buf_size);
}
///
/// Get a string describing the model type
///
///
///
///
/// The length of the string on success, or -1 on failure
[DllImport(libraryName, CallingConvention = CallingConvention.Cdecl)]
public static extern unsafe int llama_model_desc(SafeLlamaModelHandle model, byte* buf, long buf_size);
///
/// Get the size of the model in bytes
///
///
/// The size of the model
[DllImport(libraryName, CallingConvention = CallingConvention.Cdecl)]
public static extern ulong llama_model_size(SafeLlamaModelHandle model);
///
/// Get the number of parameters in this model
///
///
/// The functions return the length of the string on success, or -1 on failure
[DllImport(libraryName, CallingConvention = CallingConvention.Cdecl)]
public static extern ulong llama_model_n_params(SafeLlamaModelHandle model);
///
/// Convert a single token into text
///
///
///
/// buffer to write string into
/// The length written, or if the buffer is too small a negative that indicates the length required
public static int llama_token_to_piece(SafeLlamaModelHandle model, LLamaToken llamaToken, Span buffer)
{
unsafe
{
fixed (byte* bufferPtr = buffer)
{
return llama_token_to_piece_native(model, llamaToken, bufferPtr, buffer.Length);
}
}
[DllImport(libraryName, CallingConvention = CallingConvention.Cdecl, EntryPoint = "llama_token_to_piece")]
static extern unsafe int llama_token_to_piece_native(SafeLlamaModelHandle model, LLamaToken llamaToken, byte* buffer, int length);
}
///
/// Convert text into tokens
///
///
///
///
///
///
///
/// Allow tokenizing special and/or control tokens which otherwise are not exposed and treated as plaintext. Does not insert a leading space.
/// Returns the number of tokens on success, no more than n_max_tokens.
/// Returns a negative number on failure - the number of tokens that would have been returned
///
[DllImport(libraryName, CallingConvention = CallingConvention.Cdecl)]
public static extern unsafe int llama_tokenize(SafeLlamaModelHandle model, byte* text, int text_len, LLamaToken* tokens, int n_max_tokens, bool add_bos, bool special);
///
/// Register a callback to receive llama log messages
///
///
[DllImport(libraryName, CallingConvention = CallingConvention.Cdecl)]
public static extern void llama_log_set(LLamaLogCallback logCallback);
///
/// Clear the KV cache
///
///
[DllImport(libraryName, CallingConvention = CallingConvention.Cdecl)]
public static extern void llama_kv_cache_clear(SafeLLamaContextHandle ctx);
///
/// Removes all tokens that belong to the specified sequence and have positions in [p0, p1)
///
///
///
///
///
[DllImport(libraryName, CallingConvention = CallingConvention.Cdecl)]
public static extern void llama_kv_cache_seq_rm(SafeLLamaContextHandle ctx, LLamaSeqId seq, LLamaPos p0, LLamaPos p1);
///
/// Copy all tokens that belong to the specified sequence to another sequence
/// Note that this does not allocate extra KV cache memory - it simply assigns the tokens to the new sequence
///
///
///
///
///
///
[DllImport(libraryName, CallingConvention = CallingConvention.Cdecl)]
public static extern void llama_kv_cache_seq_cp(SafeLLamaContextHandle ctx, LLamaSeqId src, LLamaSeqId dest, LLamaPos p0, LLamaPos p1);
///
/// Removes all tokens that do not belong to the specified sequence
///
///
///
[DllImport(libraryName, CallingConvention = CallingConvention.Cdecl)]
public static extern void llama_kv_cache_seq_keep(SafeLLamaContextHandle ctx, LLamaSeqId seq);
///
/// Adds relative position "delta" to all tokens that belong to the specified sequence and have positions in [p0, p1)
/// If the KV cache is RoPEd, the KV data is updated accordingly
///
///
///
///
///
///
[DllImport(libraryName, CallingConvention = CallingConvention.Cdecl)]
public static extern void llama_kv_cache_seq_shift(SafeLLamaContextHandle ctx, LLamaSeqId seq, LLamaPos p0, LLamaPos p1, LLamaPos delta);
///
/// Allocates a batch of tokens on the heap
/// Each token can be assigned up to n_seq_max sequence ids
/// The batch has to be freed with llama_batch_free()
/// If embd != 0, llama_batch.embd will be allocated with size of n_tokens * embd * sizeof(float)
/// Otherwise, llama_batch.token will be allocated to store n_tokens llama_token
/// The rest of the llama_batch members are allocated with size n_tokens
/// All members are left uninitialized
///
///
///
/// Each token can be assigned up to n_seq_max sequence ids
[DllImport(libraryName, CallingConvention = CallingConvention.Cdecl)]
public static extern LLamaNativeBatch llama_batch_init(int n_tokens, int embd, int n_seq_max);
///
/// Frees a batch of tokens allocated with llama_batch_init()
///
///
[DllImport(libraryName, CallingConvention = CallingConvention.Cdecl)]
public static extern void llama_batch_free(LLamaNativeBatch batch);
///
///
///
///
/// Positive return values does not mean a fatal error, but rather a warning:
/// - 0: success
/// - 1: could not find a KV slot for the batch (try reducing the size of the batch or increase the context)
/// - < 0: error
///
[DllImport(libraryName, CallingConvention = CallingConvention.Cdecl)]
public static extern int llama_decode(SafeLLamaContextHandle ctx, LLamaNativeBatch batch);
///
/// Set the number of threads used for decoding
///
///
/// n_threads is the number of threads used for generation (single token)
/// n_threads_batch is the number of threads used for prompt and batch processing (multiple tokens)
///
[DllImport(libraryName, CallingConvention = CallingConvention.Cdecl)]
public static extern int llama_set_n_threads(SafeLLamaContextHandle ctx, uint n_threads, uint n_threads_batch);
}
}