using LLama.Abstractions; namespace LLama.Web.Common { public class ModelOptions : IModelParams { public string Name { get; set; } public int MaxInstances { get; set; } /// /// Model context size (n_ctx) /// public int ContextSize { get; set; } = 512; /// /// the GPU that is used for scratch and small tensors /// public int MainGpu { get; set; } = 0; /// /// if true, reduce VRAM usage at the cost of performance /// public bool LowVram { get; set; } = false; /// /// Number of layers to run in VRAM / GPU memory (n_gpu_layers) /// public int GpuLayerCount { get; set; } = 20; /// /// Seed for the random number generator (seed) /// public int Seed { get; set; } = 1686349486; /// /// Use f16 instead of f32 for memory kv (memory_f16) /// public bool UseFp16Memory { get; set; } = true; /// /// Use mmap for faster loads (use_mmap) /// public bool UseMemorymap { get; set; } = true; /// /// Use mlock to keep model in memory (use_mlock) /// public bool UseMemoryLock { get; set; } = false; /// /// Compute perplexity over the prompt (perplexity) /// public bool Perplexity { get; set; } = false; /// /// Model path (model) /// public string ModelPath { get; set; } /// /// model alias /// public string ModelAlias { get; set; } = "unknown"; /// /// lora adapter path (lora_adapter) /// public string LoraAdapter { get; set; } = string.Empty; /// /// base model path for the lora adapter (lora_base) /// public string LoraBase { get; set; } = string.Empty; /// /// Number of threads (-1 = autodetect) (n_threads) /// public int Threads { get; set; } = Math.Max(Environment.ProcessorCount / 2, 1); /// /// batch size for prompt processing (must be >=32 to use BLAS) (n_batch) /// public int BatchSize { get; set; } = 512; /// /// Whether to convert eos to newline during the inference. /// public bool ConvertEosToNewLine { get; set; } = false; /// /// Whether to use embedding mode. (embedding) Note that if this is set to true, /// The LLamaModel won't produce text response anymore. /// public bool EmbeddingMode { get; set; } = false; /// /// how split tensors should be distributed across GPUs /// public float[] TensorSplits { get; set; } /// /// Grouped-Query Attention /// public int GroupedQueryAttention { get; set; } = 1; /// /// RMS Norm Epsilon /// public float RmsNormEpsilon { get; set; } = 5e-6f; /// /// RoPE base frequency /// public float RopeFrequencyBase { get; set; } = 10000.0f; /// /// RoPE frequency scaling factor /// public float RopeFrequencyScale { get; set; } = 1.0f; /// /// Use experimental mul_mat_q kernels /// public bool MulMatQ { get; set; } } }