You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

IModelParams.cs 3.4 kB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123
  1. using System;
  2. namespace LLama.Abstractions
  3. {
  4. public interface IModelParams
  5. {
  6. /// <summary>
  7. /// Model context size (n_ctx)
  8. /// </summary>
  9. int ContextSize { get; set; }
  10. /// <summary>
  11. /// the GPU that is used for scratch and small tensors
  12. /// </summary>
  13. int MainGpu { get; set; }
  14. /// <summary>
  15. /// if true, reduce VRAM usage at the cost of performance
  16. /// </summary>
  17. bool LowVram { get; set; }
  18. /// <summary>
  19. /// Number of layers to run in VRAM / GPU memory (n_gpu_layers)
  20. /// </summary>
  21. int GpuLayerCount { get; set; }
  22. /// <summary>
  23. /// Seed for the random number generator (seed)
  24. /// </summary>
  25. int Seed { get; set; }
  26. /// <summary>
  27. /// Use f16 instead of f32 for memory kv (memory_f16)
  28. /// </summary>
  29. bool UseFp16Memory { get; set; }
  30. /// <summary>
  31. /// Use mmap for faster loads (use_mmap)
  32. /// </summary>
  33. bool UseMemorymap { get; set; }
  34. /// <summary>
  35. /// Use mlock to keep model in memory (use_mlock)
  36. /// </summary>
  37. bool UseMemoryLock { get; set; }
  38. /// <summary>
  39. /// Compute perplexity over the prompt (perplexity)
  40. /// </summary>
  41. bool Perplexity { get; set; }
  42. /// <summary>
  43. /// Model path (model)
  44. /// </summary>
  45. string ModelPath { get; set; }
  46. /// <summary>
  47. /// model alias
  48. /// </summary>
  49. string ModelAlias { get; set; }
  50. /// <summary>
  51. /// lora adapter path (lora_adapter)
  52. /// </summary>
  53. string LoraAdapter { get; set; }
  54. /// <summary>
  55. /// base model path for the lora adapter (lora_base)
  56. /// </summary>
  57. string LoraBase { get; set; }
  58. /// <summary>
  59. /// Number of threads (-1 = autodetect) (n_threads)
  60. /// </summary>
  61. int Threads { get; set; }
  62. /// <summary>
  63. /// batch size for prompt processing (must be >=32 to use BLAS) (n_batch)
  64. /// </summary>
  65. int BatchSize { get; set; }
  66. /// <summary>
  67. /// Whether to convert eos to newline during the inference.
  68. /// </summary>
  69. bool ConvertEosToNewLine { get; set; }
  70. /// <summary>
  71. /// Whether to use embedding mode. (embedding) Note that if this is set to true,
  72. /// The LLamaModel won't produce text response anymore.
  73. /// </summary>
  74. bool EmbeddingMode { get; set; }
  75. /// <summary>
  76. /// how split tensors should be distributed across GPUs
  77. /// </summary>
  78. nint TensorSplits { get; set; }
  79. /// <summary>
  80. /// Grouped-Query Attention
  81. /// </summary>
  82. int GroupedQueryAttention { get; set; }
  83. /// <summary>
  84. /// RMS Norm Epsilon
  85. /// </summary>
  86. float RmsNormEpsilon { get; set; }
  87. /// <summary>
  88. /// RoPE base frequency
  89. /// </summary>
  90. float RopeFrequencyBase { get; set; }
  91. /// <summary>
  92. /// RoPE frequency scaling factor
  93. /// </summary>
  94. float RopeFrequencyScale { get; set; }
  95. /// <summary>
  96. /// Use experimental mul_mat_q kernels
  97. /// </summary>
  98. bool MulMatQ { get; set; }
  99. }
  100. }