using System; using System.Collections.Generic; using System.Runtime.InteropServices; using System.Text; namespace LLama.Native { public delegate void LlamaProgressCallback(float progress, IntPtr ctx); [StructLayout(LayoutKind.Sequential)] public struct LLamaContextParams { /// /// text context /// public int n_ctx; /// /// -1 for default /// public int n_parts; /// /// RNG seed, -1 for random /// public int seed; /// /// use fp16 for KV cache /// [MarshalAs(UnmanagedType.I1)] public bool f16_kv; /// /// the llama_eval() call computes all logits, not just the last one /// [MarshalAs(UnmanagedType.I1)] public bool logits_all; /// /// only load the vocabulary, no weights /// [MarshalAs(UnmanagedType.I1)] public bool vocab_only; /// /// use mmap if possible /// [MarshalAs(UnmanagedType.I1)] public bool use_mmap; /// /// force system to keep model in RAM /// [MarshalAs(UnmanagedType.I1)] public bool use_mlock; /// /// embedding mode only /// [MarshalAs(UnmanagedType.I1)] public bool embedding; /// /// called with a progress value between 0 and 1, pass NULL to disable /// public IntPtr progress_callback; /// /// context pointer passed to the progress callback /// public IntPtr progress_callback_user_data; } }