You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

InferenceOptions.cs 3.6 kB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101
  1. using LLama.Common;
  2. using LLama.Abstractions;
  3. using LLama.Native;
  4. namespace LLama.Web.Common
  5. {
  6. public class InferenceOptions : IInferenceParams
  7. {
  8. /// <summary>
  9. /// number of tokens to keep from initial prompt
  10. /// </summary>
  11. public int TokensKeep { get; set; } = 0;
  12. /// <summary>
  13. /// how many new tokens to predict (n_predict), set to -1 to inifinitely generate response
  14. /// until it complete.
  15. /// </summary>
  16. public int MaxTokens { get; set; } = -1;
  17. /// <summary>
  18. /// logit bias for specific tokens
  19. /// </summary>
  20. public Dictionary<int, float>? LogitBias { get; set; } = null;
  21. /// <summary>
  22. /// Sequences where the model will stop generating further tokens.
  23. /// </summary>
  24. public IReadOnlyList<string> AntiPrompts { get; set; } = Array.Empty<string>();
  25. /// <summary>
  26. /// path to file for saving/loading model eval state
  27. /// </summary>
  28. public string PathSession { get; set; } = string.Empty;
  29. /// <summary>
  30. /// string to suffix user inputs with
  31. /// </summary>
  32. public string InputSuffix { get; set; } = string.Empty;
  33. /// <summary>
  34. /// string to prefix user inputs with
  35. /// </summary>
  36. public string InputPrefix { get; set; } = string.Empty;
  37. /// <summary>
  38. /// 0 or lower to use vocab size
  39. /// </summary>
  40. public int TopK { get; set; } = 40;
  41. /// <summary>
  42. /// 1.0 = disabled
  43. /// </summary>
  44. public float TopP { get; set; } = 0.95f;
  45. /// <summary>
  46. /// 1.0 = disabled
  47. /// </summary>
  48. public float TfsZ { get; set; } = 1.0f;
  49. /// <summary>
  50. /// 1.0 = disabled
  51. /// </summary>
  52. public float TypicalP { get; set; } = 1.0f;
  53. /// <summary>
  54. /// 1.0 = disabled
  55. /// </summary>
  56. public float Temperature { get; set; } = 0.8f;
  57. /// <summary>
  58. /// 1.0 = disabled
  59. /// </summary>
  60. public float RepeatPenalty { get; set; } = 1.1f;
  61. /// <summary>
  62. /// last n tokens to penalize (0 = disable penalty, -1 = context size) (repeat_last_n)
  63. /// </summary>
  64. public int RepeatLastTokensCount { get; set; } = 64;
  65. /// <summary>
  66. /// frequency penalty coefficient
  67. /// 0.0 = disabled
  68. /// </summary>
  69. public float FrequencyPenalty { get; set; } = .0f;
  70. /// <summary>
  71. /// presence penalty coefficient
  72. /// 0.0 = disabled
  73. /// </summary>
  74. public float PresencePenalty { get; set; } = .0f;
  75. /// <summary>
  76. /// Mirostat uses tokens instead of words.
  77. /// algorithm described in the paper https://arxiv.org/abs/2007.14966.
  78. /// 0 = disabled, 1 = mirostat, 2 = mirostat 2.0
  79. /// </summary>
  80. public MirostatType Mirostat { get; set; } = MirostatType.Disable;
  81. /// <summary>
  82. /// target entropy
  83. /// </summary>
  84. public float MirostatTau { get; set; } = 5.0f;
  85. /// <summary>
  86. /// learning rate
  87. /// </summary>
  88. public float MirostatEta { get; set; } = 0.1f;
  89. /// <summary>
  90. /// consider newlines as a repeatable token (penalize_nl)
  91. /// </summary>
  92. public bool PenalizeNL { get; set; } = true;
  93. /// <summary>
  94. /// A grammar to constrain possible tokens
  95. /// </summary>
  96. public SafeLLamaGrammarHandle Grammar { get; set; } = null;
  97. }
  98. }