using System.Text; using LLama.Native; namespace LLama.Abstractions; /// /// The parameters for initializing a LLama context from a model. /// public interface IContextParams { /// /// Model context size (n_ctx) /// uint? ContextSize { get; } /// /// batch size for prompt processing (must be >=32 to use BLAS) (n_batch) /// uint BatchSize { get; } /// /// Seed for the random number generator (seed) /// uint Seed { get; } /// /// Whether to use embedding mode. (embedding) Note that if this is set to true, /// The LLamaModel won't produce text response anymore. /// bool EmbeddingMode { get; } /// /// RoPE base frequency (null to fetch from the model) /// float? RopeFrequencyBase { get; } /// /// RoPE frequency scaling factor (null to fetch from the model) /// float? RopeFrequencyScale { get; } /// /// The encoding to use for models /// Encoding Encoding { get; } /// /// Number of threads (null = autodetect) (n_threads) /// uint? Threads { get; } /// /// Number of threads to use for batch processing (null = autodetect) (n_threads) /// uint? BatchThreads { get; } /// /// YaRN extrapolation mix factor (null = from model) /// float? YarnExtrapolationFactor { get; } /// /// YaRN magnitude scaling factor (null = from model) /// float? YarnAttentionFactor { get; } /// /// YaRN low correction dim (null = from model) /// float? YarnBetaFast { get; } /// /// YaRN high correction dim (null = from model) /// float? YarnBetaSlow { get; } /// /// YaRN original context length (null = from model) /// uint? YarnOriginalContext { get; } /// /// YaRN scaling method to use. /// RopeScalingType? YarnScalingType { get; } /// /// Override the type of the K cache /// GGMLType? TypeK { get; } /// /// Override the type of the V cache /// GGMLType? TypeV { get; } /// /// Whether to disable offloading the KQV cache to the GPU /// bool NoKqvOffload { get; } }