Namespace: LLama.Native
A C# representation of the llama.cpp llama_context_params struct
public struct LLamaContextParams
Inheritance Object → ValueType → LLamaContextParams
RNG seed, -1 for random
public int seed;
text context
public int n_ctx;
prompt processing batch size
public int n_batch;
number of layers to store in VRAM
public int n_gpu_layers;
the GPU that is used for scratch and small tensors
public int main_gpu;
how to split layers across multiple GPUs
public IntPtr tensor_split;
ref: https://github.com/ggerganov/llama.cpp/pull/2054
RoPE base frequency
public float rope_freq_base;
ref: https://github.com/ggerganov/llama.cpp/pull/2054
RoPE frequency scaling factor
public float rope_freq_scale;
called with a progress value between 0 and 1, pass NULL to disable
public IntPtr progress_callback;
context pointer passed to the progress callback
public IntPtr progress_callback_user_data;
if true, reduce VRAM usage at the cost of performance
public bool low_vram { get; set; }
if true, use experimental mul_mat_q kernels
public bool mul_mat_q { get; set; }
use fp16 for KV cache
public bool f16_kv { get; set; }
the llama_eval() call computes all logits, not just the last one
public bool logits_all { get; set; }
only load the vocabulary, no weights
public bool vocab_only { get; set; }
use mmap if possible
public bool use_mmap { get; set; }
force system to keep model in RAM
public bool use_mlock { get; set; }
embedding mode only
public bool embedding { get; set; }