You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

main.py 8.4 kB

2 years ago
2 years ago
2 years ago
123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209
  1. import os
  2. import fire
  3. import time
  4. import zipfile
  5. import numpy as np
  6. from tqdm import tqdm
  7. from shutil import copyfile, rmtree
  8. import learnware
  9. from learnware.market import instantiate_learnware_market, BaseUserInfo
  10. from learnware.reuse import JobSelectorReuser, AveragingReuser
  11. from learnware.specification import generate_rkme_table_spec
  12. from m5 import DataLoader
  13. from learnware.logger import get_module_logger
  14. logger = get_module_logger("m5_test", level="INFO")
  15. output_description = {
  16. "Dimension": 1,
  17. "Description": {},
  18. }
  19. input_description = {
  20. "Dimension": 82,
  21. "Description": {},
  22. }
  23. semantic_specs = [
  24. {
  25. "Data": {"Values": ["Table"], "Type": "Class"},
  26. "Task": {"Values": ["Regression"], "Type": "Class"},
  27. "Library": {"Values": ["Scikit-learn"], "Type": "Class"},
  28. "Scenario": {"Values": ["Business"], "Type": "Tag"},
  29. "Description": {"Values": "", "Type": "String"},
  30. "Name": {"Values": "learnware_1", "Type": "String"},
  31. "Input": input_description,
  32. "Output": output_description,
  33. }
  34. ]
  35. user_semantic = {
  36. "Data": {"Values": ["Table"], "Type": "Class"},
  37. "Task": {"Values": ["Regression"], "Type": "Class"},
  38. "Library": {"Values": ["Scikit-learn"], "Type": "Class"},
  39. "Scenario": {"Values": ["Business"], "Type": "Tag"},
  40. "Description": {"Values": "", "Type": "String"},
  41. "Name": {"Values": "", "Type": "String"},
  42. "Input": input_description,
  43. "Output": output_description,
  44. }
  45. class M5DatasetWorkflow:
  46. def _init_m5_dataset(self):
  47. m5 = DataLoader()
  48. m5.regenerate_data()
  49. algo_list = ["ridge", "lgb"]
  50. for algo in algo_list:
  51. m5.set_algo(algo)
  52. m5.retrain_models()
  53. def _init_learnware_market(self):
  54. """initialize learnware market"""
  55. # database_ops.clear_learnware_table()
  56. learnware.init()
  57. easy_market = instantiate_learnware_market(name="easy", rebuild=True)
  58. print("Total Item:", len(easy_market))
  59. zip_path_list = []
  60. curr_root = os.path.dirname(os.path.abspath(__file__))
  61. curr_root = os.path.join(curr_root, "learnware_pool")
  62. for zip_path in os.listdir(curr_root):
  63. zip_path_list.append(os.path.join(curr_root, zip_path))
  64. for idx, zip_path in enumerate(zip_path_list):
  65. semantic_spec = semantic_specs[0]
  66. semantic_spec["Name"]["Values"] = "learnware_%d" % (idx)
  67. semantic_spec["Description"]["Values"] = "test_learnware_number_%d" % (idx)
  68. easy_market.add_learnware(zip_path, semantic_spec)
  69. print("Total Item:", len(easy_market))
  70. def prepare_learnware(self, regenerate_flag=False):
  71. if regenerate_flag:
  72. self._init_m5_dataset()
  73. m5 = DataLoader()
  74. idx_list = m5.get_idx_list()
  75. algo_list = ["lgb"] # algo_list = ["ridge", "lgb"]
  76. curr_root = os.path.dirname(os.path.abspath(__file__))
  77. curr_root = os.path.join(curr_root, "learnware_pool")
  78. os.makedirs(curr_root, exist_ok=True)
  79. for idx in tqdm(idx_list):
  80. train_x, train_y, test_x, test_y = m5.get_idx_data(idx)
  81. st = time.time()
  82. spec = generate_rkme_table_spec(X=train_x, gamma=0.1, cuda_idx=0)
  83. ed = time.time()
  84. logger.info("Stat spec generated in %.3f s" % (ed - st))
  85. for algo in algo_list:
  86. m5.set_algo(algo)
  87. dir_path = os.path.join(curr_root, f"{algo}_{idx}")
  88. os.makedirs(dir_path, exist_ok=True)
  89. spec_path = os.path.join(dir_path, "rkme.json")
  90. spec.save(spec_path)
  91. model_path = m5.get_model_path(idx)
  92. model_file = os.path.join(dir_path, "model.out")
  93. copyfile(model_path, model_file)
  94. init_file = os.path.join(dir_path, "__init__.py")
  95. copyfile("example_init.py", init_file)
  96. yaml_file = os.path.join(dir_path, "learnware.yaml")
  97. copyfile("example.yaml", yaml_file)
  98. zip_file = dir_path + ".zip"
  99. with zipfile.ZipFile(zip_file, "w") as zip_obj:
  100. for foldername, subfolders, filenames in os.walk(dir_path):
  101. for filename in filenames:
  102. file_path = os.path.join(foldername, filename)
  103. zip_info = zipfile.ZipInfo(filename)
  104. zip_info.compress_type = zipfile.ZIP_STORED
  105. with open(file_path, "rb") as file:
  106. zip_obj.writestr(zip_info, file.read())
  107. rmtree(dir_path)
  108. def test(self, regenerate_flag=False):
  109. self.prepare_learnware(regenerate_flag)
  110. self._init_learnware_market()
  111. easy_market = instantiate_learnware_market(name="easy")
  112. print("Total Item:", len(easy_market))
  113. m5 = DataLoader()
  114. idx_list = m5.get_idx_list()
  115. os.makedirs("./user_spec", exist_ok=True)
  116. single_score_list = []
  117. random_score_list = []
  118. job_selector_score_list = []
  119. ensemble_score_list = []
  120. improve_list = []
  121. for idx in idx_list:
  122. train_x, train_y, test_x, test_y = m5.get_idx_data(idx)
  123. user_spec = generate_rkme_table_spec(X=test_x, gamma=0.1, cuda_idx=0)
  124. user_spec_path = f"./user_spec/user_{idx}.json"
  125. user_spec.save(user_spec_path)
  126. user_info = BaseUserInfo(semantic_spec=user_semantic, stat_info={"RKMETableSpecification": user_spec})
  127. search_result = easy_market.search_learnware(user_info)
  128. single_result = search_result.get_single_results()
  129. multiple_result = search_result.get_multiple_results()
  130. print(f"search result of user{idx}:")
  131. print(
  132. f"single model num: {len(single_result)}, max_score: {single_result[0].score}, min_score: {single_result[-1].score}"
  133. )
  134. loss_list = []
  135. for single_item in single_result:
  136. pred_y = single_item.learnware.predict(test_x)
  137. loss_list.append(m5.score(test_y, pred_y))
  138. print(
  139. f"Top1-score: {single_result[0].score}, learnware_id: {single_result[0].learnware.id}, loss: {loss_list[0]}"
  140. )
  141. if len(multiple_result) > 0:
  142. mixture_id = " ".join([learnware.id for learnware in multiple_result[0].learnwares])
  143. print(f"mixture_score: {multiple_result[0].score}, mixture_learnware: {mixture_id}")
  144. mixture_learnware_list = multiple_result[0].learnwares
  145. else:
  146. mixture_learnware_list = [single_result[0].learnware]
  147. reuse_job_selector = JobSelectorReuser(learnware_list=mixture_learnware_list, use_herding=False)
  148. job_selector_predict_y = reuse_job_selector.predict(user_data=test_x)
  149. job_selector_score = m5.score(test_y, job_selector_predict_y)
  150. print(f"mixture reuse loss (job selector): {job_selector_score}")
  151. reuse_ensemble = AveragingReuser(learnware_list=mixture_learnware_list, mode="vote_by_prob")
  152. ensemble_predict_y = reuse_ensemble.predict(user_data=test_x)
  153. ensemble_score = m5.score(test_y, ensemble_predict_y)
  154. print(f"mixture reuse loss (ensemble): {ensemble_score}\n")
  155. single_score_list.append(loss_list[0])
  156. random_score_list.append(np.mean(loss_list))
  157. job_selector_score_list.append(job_selector_score)
  158. ensemble_score_list.append(ensemble_score)
  159. improve_list.append((np.mean(loss_list) - loss_list[0]) / np.mean(loss_list))
  160. logger.info("Single search score %.3f +/- %.3f" % (np.mean(single_score_list), np.std(single_score_list)))
  161. logger.info("Random search score: %.3f +/- %.3f" % (np.mean(random_score_list), np.std(random_score_list)))
  162. logger.info("Average score improvement: %.3f" % (np.mean(improve_list)))
  163. logger.info(
  164. "Job selector score: %.3f +/- %.3f" % (np.mean(job_selector_score_list), np.std(job_selector_score_list))
  165. )
  166. logger.info(
  167. "Average ensemble score: %.3f +/- %.3f" % (np.mean(ensemble_score_list), np.std(ensemble_score_list))
  168. )
  169. if __name__ == "__main__":
  170. fire.Fire(M5DatasetWorkflow)