You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

reduce_sum_algorithm.maca 10 kB

2 months ago
123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277
  1. #include "test_utils.h"
  2. #include "performance_utils.h"
  3. #include "yaml_reporter.h"
  4. #include <iostream>
  5. #include <vector>
  6. #include <iomanip>
  7. // ============================================================================
  8. // 实现标记宏 - 参赛者修改实现时请将此宏设为0
  9. // ============================================================================
  10. #ifndef USE_DEFAULT_REF_IMPL
  11. #define USE_DEFAULT_REF_IMPL 1 // 1=默认实现, 0=参赛者自定义实现
  12. #endif
  13. #if USE_DEFAULT_REF_IMPL
  14. #include <thrust/reduce.h>
  15. #include <thrust/device_vector.h>
  16. #include <thrust/execution_policy.h>
  17. #include <thrust/functional.h>
  18. #endif
  19. // 误差容忍度
  20. constexpr double REDUCE_ERROR_TOLERANCE = 0.005; // 0.5%
  21. // ============================================================================
  22. // ReduceSum算法实现接口
  23. // 参赛者需要替换Thrust实现为自己的高性能kernel
  24. // ============================================================================
  25. template <typename InputT = float, typename OutputT = float>
  26. class ReduceSumAlgorithm {
  27. public:
  28. // 主要接口函数 - 参赛者需要实现这个函数
  29. void reduce(const InputT* d_in, OutputT* d_out, int num_items, OutputT init_value) {
  30. #if !USE_DEFAULT_REF_IMPL
  31. // ========================================
  32. // 参赛者自定义实现区域
  33. // ========================================
  34. // TODO: 参赛者在此实现自己的高性能归约算法
  35. // 示例:参赛者可以调用1个或多个自定义kernel
  36. // blockReduceKernel<<<grid, block>>>(d_in, temp_results, num_items, init_value);
  37. // finalReduceKernel<<<1, block>>>(temp_results, d_out, grid.x);
  38. #else
  39. // ========================================
  40. // 默认基准实现
  41. // ========================================
  42. auto input_ptr = thrust::device_pointer_cast(d_in);
  43. auto output_ptr = thrust::device_pointer_cast(d_out);
  44. // 直接使用thrust::reduce进行归约
  45. *output_ptr = thrust::reduce(
  46. thrust::device,
  47. input_ptr,
  48. input_ptr + num_items,
  49. static_cast<OutputT>(init_value)
  50. );
  51. #endif
  52. }
  53. // 获取当前实现状态
  54. static const char* getImplementationStatus() {
  55. #if USE_DEFAULT_REF_IMPL
  56. return "DEFAULT_REF_IMPL";
  57. #else
  58. return "CUSTOM_IMPL";
  59. #endif
  60. }
  61. private:
  62. // 参赛者可以在这里添加辅助函数和成员变量
  63. // 例如:中间结果缓冲区、多阶段归约等
  64. };
  65. // ============================================================================
  66. // 测试和性能评估
  67. // ============================================================================
  68. bool testCorrectness() {
  69. std::cout << "ReduceSum 正确性测试..." << std::endl;
  70. TestDataGenerator generator;
  71. ReduceSumAlgorithm<float, float> algorithm;
  72. bool allPassed = true;
  73. // 测试不同数据规模
  74. for (int i = 0; i < NUM_TEST_SIZES && i < 2; i++) { // 限制测试规模
  75. int size = std::min(TEST_SIZES[i], 10000);
  76. std::cout << " 测试规模: " << size << std::endl;
  77. // 测试普通数据
  78. {
  79. auto data = generator.generateRandomFloats(size, -10.0f, 10.0f);
  80. float init_value = 1.0f;
  81. // CPU参考计算
  82. double cpu_result = cpuReduceSum(data, static_cast<double>(init_value));
  83. // GPU计算
  84. float *d_in;
  85. float *d_out;
  86. MACA_CHECK(mcMalloc(&d_in, size * sizeof(float)));
  87. MACA_CHECK(mcMalloc(&d_out, sizeof(float)));
  88. MACA_CHECK(mcMemcpy(d_in, data.data(), size * sizeof(float), mcMemcpyHostToDevice));
  89. algorithm.reduce(d_in, d_out, size, init_value);
  90. float gpu_result;
  91. MACA_CHECK(mcMemcpy(&gpu_result, d_out, sizeof(float), mcMemcpyDeviceToHost));
  92. // 验证误差
  93. double relative_error = std::abs(gpu_result - cpu_result) / std::abs(cpu_result);
  94. if (relative_error > REDUCE_ERROR_TOLERANCE) {
  95. std::cout << " 失败: 误差过大 " << relative_error << std::endl;
  96. allPassed = false;
  97. } else {
  98. std::cout << " 通过 (误差: " << relative_error << ")" << std::endl;
  99. }
  100. mcFree(d_in);
  101. mcFree(d_out);
  102. }
  103. // 测试特殊值 (NaN, Inf)
  104. if (size > 100) {
  105. std::cout << " 测试特殊值..." << std::endl;
  106. auto data = generator.generateSpecialFloats(size);
  107. float init_value = 0.0f;
  108. double cpu_result = cpuReduceSum(data, static_cast<double>(init_value));
  109. float *d_in;
  110. float *d_out;
  111. MACA_CHECK(mcMalloc(&d_in, size * sizeof(float)));
  112. MACA_CHECK(mcMalloc(&d_out, sizeof(float)));
  113. MACA_CHECK(mcMemcpy(d_in, data.data(), size * sizeof(float), mcMemcpyHostToDevice));
  114. algorithm.reduce(d_in, d_out, size, init_value);
  115. float gpu_result;
  116. MACA_CHECK(mcMemcpy(&gpu_result, d_out, sizeof(float), mcMemcpyDeviceToHost));
  117. // 对于包含特殊值的情况,检查是否正确处理
  118. if (std::isfinite(cpu_result) && std::isfinite(gpu_result)) {
  119. double relative_error = std::abs(gpu_result - cpu_result) / std::abs(cpu_result);
  120. if (relative_error > REDUCE_ERROR_TOLERANCE) {
  121. std::cout << " 失败: 特殊值处理错误" << std::endl;
  122. allPassed = false;
  123. } else {
  124. std::cout << " 通过 (特殊值处理)" << std::endl;
  125. }
  126. } else {
  127. std::cout << " 通过 (特殊值结果)" << std::endl;
  128. }
  129. mcFree(d_in);
  130. mcFree(d_out);
  131. }
  132. }
  133. return allPassed;
  134. }
  135. void benchmarkPerformance() {
  136. PerformanceDisplay::printReduceSumHeader();
  137. TestDataGenerator generator;
  138. PerformanceMeter meter;
  139. ReduceSumAlgorithm<float, float> algorithm;
  140. const int WARMUP_ITERATIONS = 5;
  141. const int BENCHMARK_ITERATIONS = 10;
  142. // 用于YAML报告的数据收集
  143. std::vector<std::map<std::string, std::string>> perf_data;
  144. for (int i = 0; i < NUM_TEST_SIZES; i++) {
  145. int size = TEST_SIZES[i];
  146. // 生成测试数据
  147. auto data = generator.generateRandomFloats(size);
  148. float init_value = 0.0f;
  149. // 分配GPU内存
  150. float *d_in;
  151. float *d_out;
  152. MACA_CHECK(mcMalloc(&d_in, size * sizeof(float)));
  153. MACA_CHECK(mcMalloc(&d_out, sizeof(float)));
  154. MACA_CHECK(mcMemcpy(d_in, data.data(), size * sizeof(float), mcMemcpyHostToDevice));
  155. // Warmup阶段
  156. for (int iter = 0; iter < WARMUP_ITERATIONS; iter++) {
  157. algorithm.reduce(d_in, d_out, size, init_value);
  158. }
  159. // 正式测试阶段
  160. float total_time = 0;
  161. for (int iter = 0; iter < BENCHMARK_ITERATIONS; iter++) {
  162. meter.startTiming();
  163. algorithm.reduce(d_in, d_out, size, init_value);
  164. total_time += meter.stopTiming();
  165. }
  166. float avg_time = total_time / BENCHMARK_ITERATIONS;
  167. // 计算性能指标
  168. auto metrics = PerformanceCalculator::calculateReduceSum(size, avg_time);
  169. // 显示性能数据
  170. PerformanceDisplay::printReduceSumData(size, avg_time, metrics);
  171. // 收集YAML报告数据
  172. auto entry = YAMLPerformanceReporter::createEntry();
  173. entry["data_size"] = std::to_string(size);
  174. entry["time_ms"] = std::to_string(avg_time);
  175. entry["throughput_gps"] = std::to_string(metrics.throughput_gps);
  176. entry["data_type"] = "float";
  177. perf_data.push_back(entry);
  178. mcFree(d_in);
  179. mcFree(d_out);
  180. }
  181. // 生成YAML性能报告
  182. YAMLPerformanceReporter::generateReduceSumYAML(perf_data, "reduce_sum_performance.yaml");
  183. PerformanceDisplay::printSavedMessage("reduce_sum_performance.yaml");
  184. }
  185. // ============================================================================
  186. // 主函数
  187. // ============================================================================
  188. int main(int argc, char* argv[]) {
  189. std::cout << "=== ReduceSum 算法测试 ===" << std::endl;
  190. // 检查参数
  191. std::string mode = "all";
  192. if (argc > 1) {
  193. mode = argv[1];
  194. }
  195. bool correctness_passed = true;
  196. bool performance_completed = true;
  197. try {
  198. if (mode == "correctness" || mode == "all") {
  199. correctness_passed = testCorrectness();
  200. }
  201. if (mode == "performance" || mode == "all") {
  202. if (correctness_passed || mode == "performance") {
  203. benchmarkPerformance();
  204. } else {
  205. std::cout << "跳过性能测试,因为正确性测试未通过" << std::endl;
  206. performance_completed = false;
  207. }
  208. }
  209. std::cout << "\n=== 测试完成 ===" << std::endl;
  210. std::cout << "实现状态: " << ReduceSumAlgorithm<float, float>::getImplementationStatus() << std::endl;
  211. if (mode == "all") {
  212. std::cout << "正确性: " << (correctness_passed ? "通过" : "失败") << std::endl;
  213. std::cout << "性能测试: " << (performance_completed ? "完成" : "跳过") << std::endl;
  214. }
  215. return correctness_passed ? 0 : 1;
  216. } catch (const std::exception& e) {
  217. std::cerr << "测试出错: " << e.what() << std::endl;
  218. return 1;
  219. }
  220. }