You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

LLamaQuantizer.cs 4.3 kB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394
  1. using LLama.Native;
  2. using System;
  3. using System.Collections.Generic;
  4. using System.Linq;
  5. using System.Text;
  6. namespace LLama
  7. {
  8. /// <summary>
  9. /// The quantizer to quantize the model.
  10. /// </summary>
  11. public static class LLamaQuantizer
  12. {
  13. /// <summary>
  14. /// Quantize the model.
  15. /// </summary>
  16. /// <param name="srcFileName">The model file to be quantized.</param>
  17. /// <param name="dstFilename">The path to save the quantized model.</param>
  18. /// <param name="ftype">The type of quantization.</param>
  19. /// <param name="nthread">Thread to be used during the quantization. By default it's the physical core number.</param>
  20. /// <returns>Whether the quantization is successful.</returns>
  21. /// <exception cref="ArgumentException"></exception>
  22. public static unsafe bool Quantize(string srcFileName, string dstFilename, LLamaFtype ftype, int nthread = -1, bool allowRequantize = true,
  23. bool quantizeOutputTensor = false)
  24. {
  25. if (!ValidateFtype(ftype))
  26. {
  27. throw new ArgumentException($"The type {Enum.GetName(typeof(LLamaFtype), ftype)} is not a valid type " +
  28. $"to perform quantization.");
  29. }
  30. var quantizeParams = NativeApi.llama_model_quantize_default_params();
  31. quantizeParams.ftype = ftype;
  32. quantizeParams.nthread = nthread;
  33. quantizeParams.allow_requantize = allowRequantize;
  34. quantizeParams.quantize_output_tensor = quantizeOutputTensor;
  35. LLamaModelQuantizeParams* p = &quantizeParams;
  36. return NativeApi.llama_model_quantize(srcFileName, dstFilename, p) == 0;
  37. }
  38. /// <summary>
  39. /// Quantize the model.
  40. /// </summary>
  41. /// <param name="srcFileName">The model file to be quantized.</param>
  42. /// <param name="dstFilename">The path to save the quantized model.</param>
  43. /// <param name="ftype">The type of quantization.</param>
  44. /// <param name="nthread">Thread to be used during the quantization. By default it's the physical core number.</param>
  45. /// <returns>Whether the quantization is successful.</returns>
  46. /// <exception cref="ArgumentException"></exception>
  47. public static bool Quantize(string srcFileName, string dstFilename, string ftype, int nthread = -1, bool allowRequantize = true,
  48. bool quantizeOutputTensor = false)
  49. {
  50. return Quantize(srcFileName, dstFilename, StringToFtype(ftype), nthread, allowRequantize, quantizeOutputTensor);
  51. }
  52. private static bool ValidateFtype(string ftype)
  53. {
  54. return new string[] { "q4_0", "q4_1", "q5_0", "q5_1", "q8_0" }.Contains(ftype);
  55. }
  56. private static bool ValidateFtype(LLamaFtype ftype)
  57. {
  58. return ftype is LLamaFtype.LLAMA_FTYPE_MOSTLY_Q4_0 or LLamaFtype.LLAMA_FTYPE_MOSTLY_Q4_1
  59. or LLamaFtype.LLAMA_FTYPE_MOSTLY_Q5_0 or LLamaFtype.LLAMA_FTYPE_MOSTLY_Q5_1 or LLamaFtype.LLAMA_FTYPE_MOSTLY_Q8_0;
  60. }
  61. private static string FtypeToString(LLamaFtype ftype)
  62. {
  63. return ftype switch
  64. {
  65. LLamaFtype.LLAMA_FTYPE_MOSTLY_Q4_0 => "q4_0",
  66. LLamaFtype.LLAMA_FTYPE_MOSTLY_Q4_1 => "q4_1",
  67. LLamaFtype.LLAMA_FTYPE_MOSTLY_Q5_0 => "q5_0",
  68. LLamaFtype.LLAMA_FTYPE_MOSTLY_Q5_1 => "q5_1",
  69. LLamaFtype.LLAMA_FTYPE_MOSTLY_Q8_0 => "q8_0",
  70. _ => throw new ArgumentException($"The type {Enum.GetName(typeof(LLamaFtype), ftype)} is not a valid type " +
  71. $"to perform quantization.")
  72. };
  73. }
  74. private static LLamaFtype StringToFtype(string str)
  75. {
  76. return str switch
  77. {
  78. "q4_0" => LLamaFtype.LLAMA_FTYPE_MOSTLY_Q4_0,
  79. "q4_1" => LLamaFtype.LLAMA_FTYPE_MOSTLY_Q4_1,
  80. "q5_0" => LLamaFtype.LLAMA_FTYPE_MOSTLY_Q5_0,
  81. "q5_1" => LLamaFtype.LLAMA_FTYPE_MOSTLY_Q5_1,
  82. "q8_0" => LLamaFtype.LLAMA_FTYPE_MOSTLY_Q8_0,
  83. _ => throw new ArgumentException($"Invalid ftype {str} to quantize.")
  84. };
  85. }
  86. }
  87. }

C#/.NET上易用的LLM高性能推理框架,支持LLaMA和LLaVA系列模型。