You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

InferenceParams.cs 3.7 kB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117
  1. using LLama.Abstractions;
  2. using System;
  3. using System.Collections.Generic;
  4. using LLama.Native;
  5. namespace LLama.Common
  6. {
  7. using llama_token = Int32;
  8. /// <summary>
  9. /// The paramters used for inference.
  10. /// </summary>
  11. public record InferenceParams : IInferenceParams
  12. {
  13. /// <summary>
  14. /// number of tokens to keep from initial prompt
  15. /// </summary>
  16. public int TokensKeep { get; set; } = 0;
  17. /// <summary>
  18. /// how many new tokens to predict (n_predict), set to -1 to inifinitely generate response
  19. /// until it complete.
  20. /// </summary>
  21. public int MaxTokens { get; set; } = -1;
  22. /// <summary>
  23. /// logit bias for specific tokens
  24. /// </summary>
  25. public Dictionary<llama_token, float>? LogitBias { get; set; } = null;
  26. /// <summary>
  27. /// Sequences where the model will stop generating further tokens.
  28. /// </summary>
  29. public IReadOnlyList<string> AntiPrompts { get; set; } = Array.Empty<string>();
  30. /// <summary>
  31. /// 0 or lower to use vocab size
  32. /// </summary>
  33. public int TopK { get; set; } = 40;
  34. /// <summary>
  35. /// 1.0 = disabled
  36. /// </summary>
  37. public float TopP { get; set; } = 0.95f;
  38. /// <summary>
  39. /// 1.0 = disabled
  40. /// </summary>
  41. public float TfsZ { get; set; } = 1.0f;
  42. /// <summary>
  43. /// 1.0 = disabled
  44. /// </summary>
  45. public float TypicalP { get; set; } = 1.0f;
  46. /// <summary>
  47. /// 1.0 = disabled
  48. /// </summary>
  49. public float Temperature { get; set; } = 0.8f;
  50. /// <summary>
  51. /// 1.0 = disabled
  52. /// </summary>
  53. public float RepeatPenalty { get; set; } = 1.1f;
  54. /// <summary>
  55. /// last n tokens to penalize (0 = disable penalty, -1 = context size) (repeat_last_n)
  56. /// </summary>
  57. public int RepeatLastTokensCount { get; set; } = 64;
  58. /// <summary>
  59. /// frequency penalty coefficient
  60. /// 0.0 = disabled
  61. /// </summary>
  62. public float FrequencyPenalty { get; set; } = .0f;
  63. /// <summary>
  64. /// presence penalty coefficient
  65. /// 0.0 = disabled
  66. /// </summary>
  67. public float PresencePenalty { get; set; } = .0f;
  68. /// <summary>
  69. /// Mirostat uses tokens instead of words.
  70. /// algorithm described in the paper https://arxiv.org/abs/2007.14966.
  71. /// 0 = disabled, 1 = mirostat, 2 = mirostat 2.0
  72. /// </summary>
  73. public MirostatType Mirostat { get; set; } = MirostatType.Disable;
  74. /// <summary>
  75. /// target entropy
  76. /// </summary>
  77. public float MirostatTau { get; set; } = 5.0f;
  78. /// <summary>
  79. /// learning rate
  80. /// </summary>
  81. public float MirostatEta { get; set; } = 0.1f;
  82. /// <summary>
  83. /// consider newlines as a repeatable token (penalize_nl)
  84. /// </summary>
  85. public bool PenalizeNL { get; set; } = true;
  86. /// <summary>
  87. /// A grammar to constrain the possible tokens
  88. /// </summary>
  89. public SafeLLamaGrammarHandle? Grammar { get; set; }
  90. }
  91. /// <summary>
  92. /// Type of "mirostat" sampling to use.
  93. /// https://github.com/basusourya/mirostat
  94. /// </summary>
  95. public enum MirostatType
  96. {
  97. /// <summary>
  98. /// Disable Mirostat sampling
  99. /// </summary>
  100. Disable = 0,
  101. /// <summary>
  102. /// Original mirostat algorithm
  103. /// </summary>
  104. Mirostat = 1,
  105. /// <summary>
  106. /// Mirostat 2.0 algorithm
  107. /// </summary>
  108. Mirostat2 = 2
  109. }
  110. }