You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

IContextParams.cs 2.6 kB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101
  1. using System.Text;
  2. using LLama.Native;
  3. namespace LLama.Abstractions;
  4. /// <summary>
  5. /// The parameters for initializing a LLama context from a model.
  6. /// </summary>
  7. public interface IContextParams
  8. {
  9. /// <summary>
  10. /// Model context size (n_ctx)
  11. /// </summary>
  12. uint? ContextSize { get; }
  13. /// <summary>
  14. /// batch size for prompt processing (must be >=32 to use BLAS) (n_batch)
  15. /// </summary>
  16. uint BatchSize { get; }
  17. /// <summary>
  18. /// Seed for the random number generator (seed)
  19. /// </summary>
  20. uint Seed { get; }
  21. /// <summary>
  22. /// Whether to use embedding mode. (embedding) Note that if this is set to true,
  23. /// The LLamaModel won't produce text response anymore.
  24. /// </summary>
  25. bool EmbeddingMode { get; }
  26. /// <summary>
  27. /// RoPE base frequency (null to fetch from the model)
  28. /// </summary>
  29. float? RopeFrequencyBase { get; }
  30. /// <summary>
  31. /// RoPE frequency scaling factor (null to fetch from the model)
  32. /// </summary>
  33. float? RopeFrequencyScale { get; }
  34. /// <summary>
  35. /// The encoding to use for models
  36. /// </summary>
  37. Encoding Encoding { get; }
  38. /// <summary>
  39. /// Number of threads (null = autodetect) (n_threads)
  40. /// </summary>
  41. uint? Threads { get; }
  42. /// <summary>
  43. /// Number of threads to use for batch processing (null = autodetect) (n_threads)
  44. /// </summary>
  45. uint? BatchThreads { get; }
  46. /// <summary>
  47. /// YaRN extrapolation mix factor (null = from model)
  48. /// </summary>
  49. float? YarnExtrapolationFactor { get; }
  50. /// <summary>
  51. /// YaRN magnitude scaling factor (null = from model)
  52. /// </summary>
  53. float? YarnAttentionFactor { get; }
  54. /// <summary>
  55. /// YaRN low correction dim (null = from model)
  56. /// </summary>
  57. float? YarnBetaFast { get; }
  58. /// <summary>
  59. /// YaRN high correction dim (null = from model)
  60. /// </summary>
  61. float? YarnBetaSlow { get; }
  62. /// <summary>
  63. /// YaRN original context length (null = from model)
  64. /// </summary>
  65. uint? YarnOriginalContext { get; }
  66. /// <summary>
  67. /// YaRN scaling method to use.
  68. /// </summary>
  69. RopeScalingType? YarnScalingType { get; }
  70. /// <summary>
  71. /// Override the type of the K cache
  72. /// </summary>
  73. GGMLType? TypeK { get; }
  74. /// <summary>
  75. /// Override the type of the V cache
  76. /// </summary>
  77. GGMLType? TypeV { get; }
  78. /// <summary>
  79. /// Whether to disable offloading the KQV cache to the GPU
  80. /// </summary>
  81. bool NoKqvOffload { get; }
  82. }