You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

LLamaQuantizer.cs 6.2 kB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139
  1. using LLama.Native;
  2. using System;
  3. using System.Collections.Generic;
  4. namespace LLama
  5. {
  6. /// <summary>
  7. /// The quantizer to quantize the model.
  8. /// </summary>
  9. public static class LLamaQuantizer
  10. {
  11. /// <summary>
  12. /// Quantize the model.
  13. /// </summary>
  14. /// <param name="srcFileName">The model file to be quantized.</param>
  15. /// <param name="dstFilename">The path to save the quantized model.</param>
  16. /// <param name="ftype">The type of quantization.</param>
  17. /// <param name="nthread">Thread to be used during the quantization. By default it's the physical core number.</param>
  18. /// <param name="allowRequantize"></param>
  19. /// <param name="quantizeOutputTensor"></param>
  20. /// <returns>Whether the quantization is successful.</returns>
  21. /// <exception cref="ArgumentException"></exception>
  22. public static bool Quantize(string srcFileName, string dstFilename, LLamaFtype ftype, int nthread = -1, bool allowRequantize = true, bool quantizeOutputTensor = false)
  23. {
  24. if (!ValidateFtype(ftype))
  25. {
  26. throw new ArgumentException($"The type {Enum.GetName(typeof(LLamaFtype), ftype)} is not a valid type " +
  27. $"to perform quantization.");
  28. }
  29. var quantizeParams = NativeApi.llama_model_quantize_default_params();
  30. quantizeParams.ftype = ftype;
  31. quantizeParams.nthread = nthread;
  32. quantizeParams.allow_requantize = allowRequantize;
  33. quantizeParams.quantize_output_tensor = quantizeOutputTensor;
  34. unsafe
  35. {
  36. return NativeApi.llama_model_quantize(srcFileName, dstFilename, &quantizeParams) == 0;
  37. }
  38. }
  39. /// <summary>
  40. /// Quantize the model.
  41. /// </summary>
  42. /// <param name="srcFileName">The model file to be quantized.</param>
  43. /// <param name="dstFilename">The path to save the quantized model.</param>
  44. /// <param name="ftype">The type of quantization.</param>
  45. /// <param name="nthread">Thread to be used during the quantization. By default it's the physical core number.</param>
  46. /// <param name="allowRequantize"></param>
  47. /// <param name="quantizeOutputTensor"></param>
  48. /// <returns>Whether the quantization is successful.</returns>
  49. /// <exception cref="ArgumentException"></exception>
  50. public static bool Quantize(string srcFileName, string dstFilename, string ftype, int nthread = -1, bool allowRequantize = true,
  51. bool quantizeOutputTensor = false)
  52. {
  53. return Quantize(srcFileName, dstFilename, StringToFtype(ftype), nthread, allowRequantize, quantizeOutputTensor);
  54. }
  55. private static bool ValidateFtype(LLamaFtype ftype)
  56. {
  57. // Validation copies from here:
  58. // https://github.com/ggerganov/llama.cpp/blob/d71ac90985854b0905e1abba778e407e17f9f887/llama.cpp#L9613
  59. switch (ftype)
  60. {
  61. case LLamaFtype.LLAMA_FTYPE_MOSTLY_Q4_0:
  62. case LLamaFtype.LLAMA_FTYPE_MOSTLY_Q4_1:
  63. case LLamaFtype.LLAMA_FTYPE_MOSTLY_Q5_0:
  64. case LLamaFtype.LLAMA_FTYPE_MOSTLY_Q5_1:
  65. case LLamaFtype.LLAMA_FTYPE_MOSTLY_Q8_0:
  66. case LLamaFtype.LLAMA_FTYPE_MOSTLY_F16:
  67. case LLamaFtype.LLAMA_FTYPE_ALL_F32:
  68. case LLamaFtype.LLAMA_FTYPE_MOSTLY_Q2_K_S:
  69. case LLamaFtype.LLAMA_FTYPE_MOSTLY_Q2_K:
  70. case LLamaFtype.LLAMA_FTYPE_MOSTLY_Q3_K_XS:
  71. case LLamaFtype.LLAMA_FTYPE_MOSTLY_Q3_K_S:
  72. case LLamaFtype.LLAMA_FTYPE_MOSTLY_Q3_K_M:
  73. case LLamaFtype.LLAMA_FTYPE_MOSTLY_Q3_K_L:
  74. case LLamaFtype.LLAMA_FTYPE_MOSTLY_Q4_K_S:
  75. case LLamaFtype.LLAMA_FTYPE_MOSTLY_Q4_K_M:
  76. case LLamaFtype.LLAMA_FTYPE_MOSTLY_Q5_K_S:
  77. case LLamaFtype.LLAMA_FTYPE_MOSTLY_Q5_K_M:
  78. case LLamaFtype.LLAMA_FTYPE_MOSTLY_Q6_K:
  79. case LLamaFtype.LLAMA_FTYPE_MOSTLY_IQ2_XXS:
  80. case LLamaFtype.LLAMA_FTYPE_MOSTLY_IQ2_XS:
  81. case LLamaFtype.LLAMA_FTYPE_MOSTLY_IQ3_XXS:
  82. return true;
  83. case LLamaFtype.LLAMA_FTYPE_MOSTLY_Q4_1_SOME_F16:
  84. case LLamaFtype.LLAMA_FTYPE_GUESSED:
  85. default:
  86. return false;
  87. }
  88. }
  89. /// <summary>
  90. /// Parse a string into a LLamaFtype. This is a "relaxed" parsing, which allows any string which is contained within
  91. /// the enum name to be used.
  92. ///
  93. /// For example "Q5_K_M" will convert to "LLAMA_FTYPE_MOSTLY_Q5_K_M"
  94. /// </summary>
  95. /// <param name="str"></param>
  96. /// <returns></returns>
  97. /// <exception cref="ArgumentException"></exception>
  98. private static LLamaFtype StringToFtype(string str)
  99. {
  100. // Find all variants which contain the input string
  101. var matches = new List<LLamaFtype>();
  102. foreach (LLamaFtype ftype in Enum.GetValues(typeof(LLamaFtype)))
  103. {
  104. var name = Enum.GetName(typeof(LLamaFtype), ftype);
  105. // Note: this is using "IndexOf" instead of "Contains" to be compatible with netstandard2.0
  106. #pragma warning disable CA2249
  107. if (name != null && name.IndexOf(str, StringComparison.OrdinalIgnoreCase) >= 0)
  108. matches.Add(ftype);
  109. #pragma warning restore CA2249
  110. }
  111. // If there was just one match, success!
  112. if (matches.Count == 1)
  113. return matches[0];
  114. // If none matched throw a generic error
  115. if (matches.Count == 0)
  116. throw new ArgumentException($"Unknown ftype \"{str}\" for quantization.");
  117. // There were several matches, throw an error asking the user to be more specific
  118. throw new ArgumentException($"\"{str}\" matches multiple potential ftypes: {string.Join(",", matches)}");
  119. }
  120. }
  121. }