using System;
using System.Runtime.InteropServices;
#pragma warning disable IDE1006 // Naming Styles
namespace LLama.Native
{
///
/// Direct translation of the llama.cpp API
///
public static partial class NativeApi
{
///
/// A method that does nothing. This is a native method, calling it will force the llama native dependencies to be loaded.
///
///
public static void llama_empty_call()
{
llama_max_devices();
}
[DllImport("kernel32.dll", CharSet = CharSet.Unicode, SetLastError = true)]
private static extern int AddDllDirectory(string NewDirectory);
///
/// Get the maximum number of devices supported by llama.cpp
///
///
[DllImport(libraryName, CallingConvention = CallingConvention.Cdecl)]
public static extern long llama_max_devices();
///
/// Check if memory mapping is supported
///
///
[DllImport(libraryName, CallingConvention = CallingConvention.Cdecl)]
public static extern bool llama_supports_mmap();
///
/// Check if memory locking is supported
///
///
[DllImport(libraryName, CallingConvention = CallingConvention.Cdecl)]
public static extern bool llama_supports_mlock();
///
/// Check if GPU offload is supported
///
///
[DllImport(libraryName, CallingConvention = CallingConvention.Cdecl)]
public static extern bool llama_supports_gpu_offload();
///
/// Initialize the llama + ggml backend. Call once at the start of the program.
///
/// This is private because LLamaSharp automatically calls it, and it's only valid to call it once!
///
[DllImport(libraryName, CallingConvention = CallingConvention.Cdecl)]
private static extern void llama_backend_init();
// Note: this is not implemented because we don't have a definition for `ggml_numa_strategy` in C#. That definition doesn't
// exist because it's not in llama.h, it's in ggml.h which we don't currently build a wrapper for. If there's demand
// for better NUMA support that will need adding.
/////
///// Optional, enable NUMA optimisations
/////
//[DllImport(libraryName, CallingConvention = CallingConvention.Cdecl)]
//public static extern void llama_numa_init(ggml_numa_strategy numa);
///
/// Load session file
///
///
///
///
///
///
///
[DllImport(libraryName, CallingConvention = CallingConvention.Cdecl)]
public static extern bool llama_state_load_file(SafeLLamaContextHandle ctx, string path_session, LLamaToken[] tokens_out, ulong n_token_capacity, out ulong n_token_count_out);
///
/// Save session file
///
///
///
///
///
///
[DllImport(libraryName, CallingConvention = CallingConvention.Cdecl)]
public static extern bool llama_state_save_file(SafeLLamaContextHandle ctx, string path_session, LLamaToken[] tokens, ulong n_token_count);
[DllImport(libraryName, CallingConvention = CallingConvention.Cdecl)]
public static extern unsafe nuint llama_state_seq_save_file(SafeLLamaContextHandle ctx, string filepath, LLamaSeqId seq_id, LLamaToken* tokens, nuint n_token_count);
[DllImport(libraryName, CallingConvention = CallingConvention.Cdecl)]
public static extern unsafe nuint llama_state_seq_load_file(SafeLLamaContextHandle ctx, string filepath, LLamaSeqId dest_seq_id, LLamaToken* tokens_out, nuint n_token_capacity, out nuint n_token_count_out);
[DllImport(libraryName, CallingConvention = CallingConvention.Cdecl)]
public static extern unsafe byte* llama_token_get_text(SafeLlamaModelHandle model, LLamaToken token);
///
/// Set whether to use causal attention or not. If set to true, the model will only attend to the past tokens
///
[DllImport(libraryName, CallingConvention = CallingConvention.Cdecl)]
public static extern void llama_set_causal_attn(SafeLlamaModelHandle ctx, bool causal_attn);
///
/// Set abort callback
///
[DllImport(libraryName, CallingConvention = CallingConvention.Cdecl)]
public static extern void llama_set_abort_callback(SafeLlamaModelHandle ctx, IntPtr /* ggml_abort_callback */ abort_callback, IntPtr abort_callback_data);
///
/// Wait until all computations are finished. This is automatically done when using any of the functions to obtain computation results
/// and is not necessary to call it explicitly in most cases.
///
///
[DllImport(libraryName, CallingConvention = CallingConvention.Cdecl)]
public static extern void llama_synchronize(SafeLlamaModelHandle ctx);
[DllImport(libraryName, CallingConvention = CallingConvention.Cdecl)]
public static extern float llama_token_get_score(SafeLlamaModelHandle model, LLamaToken token);
[DllImport(libraryName, CallingConvention = CallingConvention.Cdecl)]
public static extern LLamaTokenType llama_token_get_type(SafeLlamaModelHandle model, LLamaToken token);
///
/// Get the n_seq_max for this context
///
///
///
[DllImport(libraryName, CallingConvention = CallingConvention.Cdecl)]
public static extern uint llama_n_seq_max(SafeLLamaContextHandle ctx);
///
/// Get the embeddings for the a specific sequence.
/// Equivalent to: llama_get_embeddings(ctx) + ctx->output_ids[i]*n_embd
///
/// A pointer to the first float in an embedding, length = ctx.EmbeddingSize
[DllImport(libraryName, CallingConvention = CallingConvention.Cdecl)]
public static extern unsafe float* llama_get_embeddings_seq(SafeLLamaContextHandle ctx, LLamaSeqId id);
///
/// Get the embeddings for the ith sequence.
/// Equivalent to: llama_get_embeddings(ctx) + ctx->output_ids[i]*n_embd
///
/// A pointer to the first float in an embedding, length = ctx.EmbeddingSize
[DllImport(libraryName, CallingConvention = CallingConvention.Cdecl)]
public static extern unsafe float* llama_get_embeddings_ith(SafeLLamaContextHandle ctx, int i);
///
/// Get all output token embeddings.
/// When pooling_type == LLAMA_POOLING_TYPE_NONE or when using a generative model, the embeddings for which
/// llama_batch.logits[i] != 0 are stored contiguously in the order they have appeared in the batch.
/// shape: [n_outputs*n_embd]
/// Otherwise, returns an empty span.
///
///
///
[DllImport(libraryName, CallingConvention = CallingConvention.Cdecl)]
public static extern unsafe float* llama_get_embeddings(SafeLLamaContextHandle ctx);
///
/// Apply chat template. Inspired by hf apply_chat_template() on python.
/// Both "model" and "custom_template" are optional, but at least one is required. "custom_template" has higher precedence than "model"
/// NOTE: This function does not use a jinja parser. It only support a pre-defined list of template. See more: https://github.com/ggerganov/llama.cpp/wiki/Templates-supported-by-llama_chat_apply_template
///
///
/// A Jinja template to use for this chat. If this is nullptr, the model’s default chat template will be used instead.
/// Pointer to a list of multiple llama_chat_message
/// Number of llama_chat_message in this chat
/// Whether to end the prompt with the token(s) that indicate the start of an assistant message.
/// A buffer to hold the output formatted prompt. The recommended alloc size is 2 * (total number of characters of all messages)
/// The size of the allocated buffer
/// The total number of bytes of the formatted prompt. If is it larger than the size of buffer, you may need to re-alloc it and then re-apply the template.
[DllImport(libraryName, CallingConvention = CallingConvention.Cdecl, EntryPoint = "llama_get_embeddings")]
public static extern unsafe int llama_chat_apply_template(SafeLlamaModelHandle model, char* tmpl, LLamaChatMessage* chat, nuint n_msg, bool add_ass, char* buf, int length);
///
/// Returns -1 if unknown, 1 for true or 0 for false.
///
///
[DllImport(libraryName, CallingConvention = CallingConvention.Cdecl)]
public static extern int llama_add_bos_token(SafeLlamaModelHandle model);
///
/// Returns -1 if unknown, 1 for true or 0 for false.
///
///
[DllImport(libraryName, CallingConvention = CallingConvention.Cdecl)]
public static extern int llama_add_eos_token(SafeLlamaModelHandle model);
///
/// Print out timing information for this context
///
///
[DllImport(libraryName, CallingConvention = CallingConvention.Cdecl)]
public static extern void llama_print_timings(SafeLLamaContextHandle ctx);
///
/// Reset all collected timing information for this context
///
///
[DllImport(libraryName, CallingConvention = CallingConvention.Cdecl)]
public static extern void llama_reset_timings(SafeLLamaContextHandle ctx);
///
/// Print system information
///
///
[DllImport(libraryName, CallingConvention = CallingConvention.Cdecl)]
public static extern IntPtr llama_print_system_info();
///
/// Convert a single token into text
///
///
///
/// buffer to write string into
/// The length written, or if the buffer is too small a negative that indicates the length required
public static int llama_token_to_piece(SafeLlamaModelHandle model, LLamaToken llamaToken, Span buffer)
{
unsafe
{
fixed (byte* bufferPtr = buffer)
{
return llama_token_to_piece_native(model, llamaToken, bufferPtr, buffer.Length);
}
}
[DllImport(libraryName, CallingConvention = CallingConvention.Cdecl, EntryPoint = "llama_token_to_piece")]
static extern unsafe int llama_token_to_piece_native(SafeLlamaModelHandle model, LLamaToken llamaToken, byte* buffer, int length);
}
///
/// Convert text into tokens
///
///
///
///
///
///
///
/// Allow tokenizing special and/or control tokens which otherwise are not exposed and treated as plaintext. Does not insert a leading space.
/// Returns the number of tokens on success, no more than n_max_tokens.
/// Returns a negative number on failure - the number of tokens that would have been returned
///
[DllImport(libraryName, CallingConvention = CallingConvention.Cdecl)]
public static extern unsafe int llama_tokenize(SafeLlamaModelHandle model, byte* text, int text_len, LLamaToken* tokens, int n_max_tokens, bool add_special, bool parse_special);
///
/// Register a callback to receive llama log messages
///
///
[Obsolete("Use `NativeLogConfig.llama_log_set` instead")]
public static void llama_log_set(NativeLogConfig.LLamaLogCallback logCallback)
{
NativeLogConfig.llama_log_set(logCallback);
}
///
/// Clear the KV cache
///
///
[DllImport(libraryName, CallingConvention = CallingConvention.Cdecl)]
public static extern void llama_kv_cache_clear(SafeLLamaContextHandle ctx);
///
/// Removes all tokens that belong to the specified sequence and have positions in [p0, p1)
///
///
///
///
///
/// Returns false if a partial sequence cannot be removed. Removing a whole sequence never fails
[DllImport(libraryName, CallingConvention = CallingConvention.Cdecl)]
public static extern bool llama_kv_cache_seq_rm(SafeLLamaContextHandle ctx, LLamaSeqId seq, LLamaPos p0, LLamaPos p1);
///
/// Copy all tokens that belong to the specified sequence to another sequence
/// Note that this does not allocate extra KV cache memory - it simply assigns the tokens to the new sequence
///
///
///
///
///
///
[DllImport(libraryName, CallingConvention = CallingConvention.Cdecl)]
public static extern void llama_kv_cache_seq_cp(SafeLLamaContextHandle ctx, LLamaSeqId src, LLamaSeqId dest, LLamaPos p0, LLamaPos p1);
///
/// Removes all tokens that do not belong to the specified sequence
///
///
///
[DllImport(libraryName, CallingConvention = CallingConvention.Cdecl)]
public static extern void llama_kv_cache_seq_keep(SafeLLamaContextHandle ctx, LLamaSeqId seq);
///
/// Adds relative position "delta" to all tokens that belong to the specified sequence and have positions in [p0, p1)
/// If the KV cache is RoPEd, the KV data is updated accordingly:
/// - lazily on next llama_decode()
/// - explicitly with llama_kv_cache_update()
///
///
///
///
///
///
[DllImport(libraryName, CallingConvention = CallingConvention.Cdecl)]
public static extern void llama_kv_cache_seq_add(SafeLLamaContextHandle ctx, LLamaSeqId seq, LLamaPos p0, LLamaPos p1, int delta);
///
/// Integer division of the positions by factor of `d > 1`
/// If the KV cache is RoPEd, the KV data is updated accordingly:
/// - lazily on next llama_decode()
/// - explicitly with llama_kv_cache_update()
///
/// p0 < 0 : [0, p1]
///
/// p1 < 0 : [p0, inf)
///
///
///
///
///
///
[DllImport(libraryName, CallingConvention = CallingConvention.Cdecl)]
public static extern void llama_kv_cache_seq_div(SafeLLamaContextHandle ctx, LLamaSeqId seq, LLamaPos p0, LLamaPos p1, int d);
///
/// Returns the largest position present in the KV cache for the specified sequence
///
///
///
///
[DllImport(libraryName, CallingConvention = CallingConvention.Cdecl)]
public static extern LLamaPos llama_kv_cache_seq_pos_max(SafeLLamaContextHandle ctx, LLamaSeqId seq);
///
/// Allocates a batch of tokens on the heap
/// Each token can be assigned up to n_seq_max sequence ids
/// The batch has to be freed with llama_batch_free()
/// If embd != 0, llama_batch.embd will be allocated with size of n_tokens * embd * sizeof(float)
/// Otherwise, llama_batch.token will be allocated to store n_tokens llama_token
/// The rest of the llama_batch members are allocated with size n_tokens
/// All members are left uninitialized
///
///
///
/// Each token can be assigned up to n_seq_max sequence ids
[DllImport(libraryName, CallingConvention = CallingConvention.Cdecl)]
public static extern LLamaNativeBatch llama_batch_init(int n_tokens, int embd, int n_seq_max);
///
/// Frees a batch of tokens allocated with llama_batch_init()
///
///
[DllImport(libraryName, CallingConvention = CallingConvention.Cdecl)]
public static extern void llama_batch_free(LLamaNativeBatch batch);
///
/// Apply a loaded control vector to a llama_context, or if data is NULL, clear
/// the currently loaded vector.
/// n_embd should be the size of a single layer's control, and data should point
/// to an n_embd x n_layers buffer starting from layer 1.
/// il_start and il_end are the layer range the vector should apply to (both inclusive)
/// See llama_control_vector_load in common to load a control vector.
///
///
///
///
///
///
///
///
[DllImport(libraryName, CallingConvention = CallingConvention.Cdecl)]
public static extern unsafe int llama_control_vector_apply(SafeLLamaContextHandle ctx, float* data, nuint len, int n_embd, int il_start, int il_end);
///
/// Build a split GGUF final path for this chunk.
/// llama_split_path(split_path, sizeof(split_path), "/models/ggml-model-q4_0", 2, 4) => split_path = "/models/ggml-model-q4_0-00002-of-00004.gguf"
///
///
///
///
///
///
/// Returns the split_path length.
[DllImport(libraryName, CallingConvention = CallingConvention.Cdecl)]
public static extern int llama_split_path(string split_path, nuint maxlen, string path_prefix, int split_no, int split_count);
///
/// Extract the path prefix from the split_path if and only if the split_no and split_count match.
/// llama_split_prefix(split_prefix, 64, "/models/ggml-model-q4_0-00002-of-00004.gguf", 2, 4) => split_prefix = "/models/ggml-model-q4_0"
///
///
///
///
///
///
/// Returns the split_prefix length.
[DllImport(libraryName, CallingConvention = CallingConvention.Cdecl)]
public static extern int llama_split_prefix(string split_prefix, nuint maxlen, string split_path, int split_no, int split_count);
}
}