You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

train.py 3.1 kB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667
  1. # Copyright 2021 The KubeEdge Authors.
  2. #
  3. # Licensed under the Apache License, Version 2.0 (the "License");
  4. # you may not use this file except in compliance with the License.
  5. # You may obtain a copy of the License at
  6. #
  7. # http://www.apache.org/licenses/LICENSE-2.0
  8. #
  9. # Unless required by applicable law or agreed to in writing, software
  10. # distributed under the License is distributed on an "AS IS" BASIS,
  11. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. # See the License for the specific language governing permissions and
  13. # limitations under the License.
  14. import os
  15. from sedna.common.config import Context, BaseConfig
  16. from sedna.core.incremental_learning import IncrementalLearning
  17. from interface import Estimator
  18. from dataset import ImgDataset
  19. def main():
  20. # base_model_url means the low accuracy model
  21. base_model_url=Context.get_parameters("base_model_url")
  22. # model_url means the checkpoint file that has been trained
  23. # model_url is used for estimator.save, not in train.
  24. trained_ckpt_url = Context.get_parameters("model_url")
  25. #read parameters from deployment config
  26. input_shape=int(Context.get_parameters("input_shape"))
  27. epochs=int(Context.get_parameters('epochs'))
  28. batch_size=int(Context.get_parameters("batch_size"))
  29. num_parallel_workers=int(Context.get_parameters("num_parallel_workers"))
  30. print("num_parallel_workers="+str(num_parallel_workers))
  31. # load dataset
  32. train_dataset_url=os.path.dirname(Context.get_parameters("ORIGINAL_DATASET_URL"))+"/train"
  33. valid_dataset_url=os.path.dirname(Context.get_parameters("ORIGINAL_DATASET_URL"))+"/val"
  34. if train_dataset_url:
  35. print("train_dataset_url " + train_dataset_url)
  36. else:
  37. print("train_dataset_url: NULL ")
  38. if valid_dataset_url:
  39. print("valid_dataset_urlL : " + valid_dataset_url)
  40. else:
  41. print("valid_dataset_url : NULL")
  42. train_data = ImgDataset(data_type="train").parse(path=train_dataset_url,
  43. train=True,
  44. image_shape=input_shape,
  45. batch_size=batch_size,
  46. num_parallel_workers=num_parallel_workers)
  47. valid_data=ImgDataset(data_type="eval").parse(path=valid_dataset_url,
  48. train=False,
  49. image_shape=input_shape,
  50. batch_size=batch_size,
  51. num_parallel_workers=num_parallel_workers)
  52. incremental_instance = IncrementalLearning(estimator=Estimator)
  53. return incremental_instance.train(train_data=train_data,
  54. base_model_url=base_model_url,
  55. trained_ckpt_url=trained_ckpt_url,
  56. valid_data=valid_data,
  57. epochs=1)
  58. if __name__ == "__main__":
  59. main()
  60. print("train_phase_done")