You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

IInferenceParams.cs 2.9 kB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118
  1. using System.Collections.Generic;
  2. using LLama.Common;
  3. using LLama.Native;
  4. using LLama.Sampling;
  5. namespace LLama.Abstractions
  6. {
  7. /// <summary>
  8. /// The paramters used for inference.
  9. /// </summary>
  10. public interface IInferenceParams
  11. {
  12. /// <summary>
  13. /// number of tokens to keep from initial prompt
  14. /// </summary>
  15. public int TokensKeep { get; set; }
  16. /// <summary>
  17. /// how many new tokens to predict (n_predict), set to -1 to inifinitely generate response
  18. /// until it complete.
  19. /// </summary>
  20. public int MaxTokens { get; set; }
  21. /// <summary>
  22. /// logit bias for specific tokens
  23. /// </summary>
  24. public Dictionary<LLamaToken, float>? LogitBias { get; set; }
  25. /// <summary>
  26. /// Sequences where the model will stop generating further tokens.
  27. /// </summary>
  28. public IReadOnlyList<string> AntiPrompts { get; set; }
  29. /// <summary>
  30. /// 0 or lower to use vocab size
  31. /// </summary>
  32. public int TopK { get; set; }
  33. /// <summary>
  34. /// 1.0 = disabled
  35. /// </summary>
  36. public float TopP { get; set; }
  37. /// <summary>
  38. /// 0.0 = disabled
  39. /// </summary>
  40. public float MinP { get; set; }
  41. /// <summary>
  42. /// 1.0 = disabled
  43. /// </summary>
  44. public float TfsZ { get; set; }
  45. /// <summary>
  46. /// 1.0 = disabled
  47. /// </summary>
  48. public float TypicalP { get; set; }
  49. /// <summary>
  50. /// 1.0 = disabled
  51. /// </summary>
  52. public float Temperature { get; set; }
  53. /// <summary>
  54. /// 1.0 = disabled
  55. /// </summary>
  56. public float RepeatPenalty { get; set; }
  57. /// <summary>
  58. /// last n tokens to penalize (0 = disable penalty, -1 = context size) (repeat_last_n)
  59. /// </summary>
  60. public int RepeatLastTokensCount { get; set; }
  61. /// <summary>
  62. /// frequency penalty coefficient
  63. /// 0.0 = disabled
  64. /// </summary>
  65. public float FrequencyPenalty { get; set; }
  66. /// <summary>
  67. /// presence penalty coefficient
  68. /// 0.0 = disabled
  69. /// </summary>
  70. public float PresencePenalty { get; set; }
  71. /// <summary>
  72. /// Mirostat uses tokens instead of words.
  73. /// algorithm described in the paper https://arxiv.org/abs/2007.14966.
  74. /// 0 = disabled, 1 = mirostat, 2 = mirostat 2.0
  75. /// </summary>
  76. public MirostatType Mirostat { get; set; }
  77. /// <summary>
  78. /// target entropy
  79. /// </summary>
  80. public float MirostatTau { get; set; }
  81. /// <summary>
  82. /// learning rate
  83. /// </summary>
  84. public float MirostatEta { get; set; }
  85. /// <summary>
  86. /// consider newlines as a repeatable token (penalize_nl)
  87. /// </summary>
  88. public bool PenalizeNL { get; set; }
  89. /// <summary>
  90. /// Grammar to constrain possible tokens
  91. /// </summary>
  92. SafeLLamaGrammarHandle? Grammar { get; set; }
  93. /// <summary>
  94. /// Set a custom sampling pipeline to use. <b>If this is set All other sampling parameters are ignored!</b>
  95. /// </summary>
  96. ISamplingPipeline? SamplingPipeline { get; set; }
  97. }
  98. }