You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

IModelParams.cs 3.5 kB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126
  1. namespace LLama.Abstractions
  2. {
  3. public interface IModelParams
  4. {
  5. /// <summary>
  6. /// Model context size (n_ctx)
  7. /// </summary>
  8. int ContextSize { get; set; }
  9. /// <summary>
  10. /// the GPU that is used for scratch and small tensors
  11. /// </summary>
  12. int MainGpu { get; set; }
  13. /// <summary>
  14. /// if true, reduce VRAM usage at the cost of performance
  15. /// </summary>
  16. bool LowVram { get; set; }
  17. /// <summary>
  18. /// Number of layers to run in VRAM / GPU memory (n_gpu_layers)
  19. /// </summary>
  20. int GpuLayerCount { get; set; }
  21. /// <summary>
  22. /// Seed for the random number generator (seed)
  23. /// </summary>
  24. int Seed { get; set; }
  25. /// <summary>
  26. /// Use f16 instead of f32 for memory kv (memory_f16)
  27. /// </summary>
  28. bool UseFp16Memory { get; set; }
  29. /// <summary>
  30. /// Use mmap for faster loads (use_mmap)
  31. /// </summary>
  32. bool UseMemorymap { get; set; }
  33. /// <summary>
  34. /// Use mlock to keep model in memory (use_mlock)
  35. /// </summary>
  36. bool UseMemoryLock { get; set; }
  37. /// <summary>
  38. /// Compute perplexity over the prompt (perplexity)
  39. /// </summary>
  40. bool Perplexity { get; set; }
  41. /// <summary>
  42. /// Model path (model)
  43. /// </summary>
  44. string ModelPath { get; set; }
  45. /// <summary>
  46. /// model alias
  47. /// </summary>
  48. string ModelAlias { get; set; }
  49. /// <summary>
  50. /// lora adapter path (lora_adapter)
  51. /// </summary>
  52. string LoraAdapter { get; set; }
  53. /// <summary>
  54. /// base model path for the lora adapter (lora_base)
  55. /// </summary>
  56. string LoraBase { get; set; }
  57. /// <summary>
  58. /// Number of threads (-1 = autodetect) (n_threads)
  59. /// </summary>
  60. int Threads { get; set; }
  61. /// <summary>
  62. /// batch size for prompt processing (must be >=32 to use BLAS) (n_batch)
  63. /// </summary>
  64. int BatchSize { get; set; }
  65. /// <summary>
  66. /// Whether to convert eos to newline during the inference.
  67. /// </summary>
  68. bool ConvertEosToNewLine { get; set; }
  69. /// <summary>
  70. /// Whether to use embedding mode. (embedding) Note that if this is set to true,
  71. /// The LLamaModel won't produce text response anymore.
  72. /// </summary>
  73. bool EmbeddingMode { get; set; }
  74. /// <summary>
  75. /// how split tensors should be distributed across GPUs
  76. /// </summary>
  77. float[]? TensorSplits { get; set; }
  78. /// <summary>
  79. /// Grouped-Query Attention
  80. /// </summary>
  81. int GroupedQueryAttention { get; set; }
  82. /// <summary>
  83. /// RMS Norm Epsilon
  84. /// </summary>
  85. float RmsNormEpsilon { get; set; }
  86. /// <summary>
  87. /// RoPE base frequency
  88. /// </summary>
  89. float RopeFrequencyBase { get; set; }
  90. /// <summary>
  91. /// RoPE frequency scaling factor
  92. /// </summary>
  93. float RopeFrequencyScale { get; set; }
  94. /// <summary>
  95. /// Use experimental mul_mat_q kernels
  96. /// </summary>
  97. bool MulMatQ { get; set; }
  98. /// <summary>
  99. /// The encoding to use for models
  100. /// </summary>
  101. string Encoding { get; set; }
  102. }
  103. }