You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

evaluate.py 2.2 kB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859
  1. import os
  2. os.environ["TEST_DATASET_URL"] = "/home/lsq/RFNet/data_index/test.txt"
  3. os.environ["MODEL_URLS"] = "s3://kubeedge/sedna-robo/kb/index.pkl"
  4. os.environ["OUTPUT_URL"] = "s3://kubeedge/sedna-robo/kb_next/"
  5. os.environ["KB_SERVER"] = "http://0.0.0.0:9020"
  6. os.environ["operator"] = "<"
  7. os.environ["model_threshold"] = "0.01"
  8. os.environ["S3_ENDPOINT_URL"] = "https://obs.cn-north-1.myhuaweicloud.com"
  9. os.environ["SECRET_ACCESS_KEY"] = "OYPxi4uD9k5E90z0Od3Ug99symbJZ0AfyB4oveQc"
  10. os.environ["ACCESS_KEY_ID"] = "EMPTKHQUGPO2CDUFD2YR"
  11. from sedna.core.lifelong_learning import LifelongLearning
  12. from sedna.datasources import TxtDataParse
  13. from sedna.common.config import Context
  14. from accuracy import accuracy
  15. from basemodel import Model
  16. def _load_txt_dataset(dataset_url):
  17. # use original dataset url
  18. original_dataset_url = Context.get_parameters('original_dataset_url', "")
  19. dataset_urls = dataset_url.split()
  20. dataset_urls = [
  21. os.path.join(
  22. os.path.dirname(original_dataset_url),
  23. dataset_url) for dataset_url in dataset_urls]
  24. return dataset_urls[:-1], dataset_urls[-1]
  25. def eval():
  26. estimator = Model(num_class=31)
  27. eval_dataset_url = Context.get_parameters("test_dataset_url")
  28. eval_data = TxtDataParse(data_type="eval", func=_load_txt_dataset)
  29. eval_data.parse(eval_dataset_url, use_raw=False)
  30. task_allocation = {
  31. "method": "TaskAllocationSimple"
  32. }
  33. ll_job = LifelongLearning(estimator,
  34. task_definition=None,
  35. task_relationship_discovery=None,
  36. task_allocation=task_allocation,
  37. task_remodeling=None,
  38. inference_integrate=None,
  39. task_update_decision=None,
  40. unseen_task_allocation=None,
  41. unseen_sample_recognition=None,
  42. unseen_sample_re_recognition=None
  43. )
  44. ll_job.evaluate(eval_data, metrics=accuracy)
  45. if __name__ == '__main__':
  46. print(eval())