You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

train.py 5.0 kB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105
  1. # Copyright 2021 The KubeEdge Authors.
  2. #
  3. # Licensed under the Apache License, Version 2.0 (the "License");
  4. # you may not use this file except in compliance with the License.
  5. # You may obtain a copy of the License at
  6. #
  7. # http://www.apache.org/licenses/LICENSE-2.0
  8. #
  9. # Unless required by applicable law or agreed to in writing, software
  10. # distributed under the License is distributed on an "AS IS" BASIS,
  11. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. # See the License for the specific language governing permissions and
  13. # limitations under the License.
  14. import os
  15. import tensorflow as tf
  16. from sedna.datasources import TxtDataParse
  17. from sedna.common.config import Context, BaseConfig
  18. from sedna.core.incremental_learning import IncrementalLearning
  19. from interface import Estimator
  20. def _load_txt_dataset(dataset_url):
  21. # use original dataset url,
  22. # see https://github.com/kubeedge/sedna/issues/35
  23. original_dataset_url = Context.get_parameters('original_dataset_url')
  24. return os.path.join(os.path.dirname(original_dataset_url), dataset_url)
  25. def main():
  26. tf.set_random_seed(22)
  27. class_names = Context.get_parameters("class_names")
  28. # load dataset.
  29. train_dataset_url = BaseConfig.train_dataset_url
  30. train_data = TxtDataParse(data_type="train", func=_load_txt_dataset)
  31. train_data.parse(train_dataset_url, use_raw=True)
  32. # read parameters from deployment config.
  33. obj_threshold = Context.get_parameters("obj_threshold")
  34. nms_threshold = Context.get_parameters("nms_threshold")
  35. input_shape = Context.get_parameters("input_shape")
  36. epochs = Context.get_parameters('epochs')
  37. batch_size = Context.get_parameters('batch_size')
  38. tf.flags.DEFINE_string('train_url', default=BaseConfig.model_url,
  39. help='train url for model')
  40. tf.flags.DEFINE_string('log_url', default=None, help='log url for model')
  41. tf.flags.DEFINE_string('checkpoint_url', default=None,
  42. help='checkpoint url for model')
  43. tf.flags.DEFINE_string('model_name', default=None,
  44. help='url for train annotation files')
  45. tf.flags.DEFINE_list('class_names', default=class_names.split(','),
  46. # 'helmet,helmet-on,person,helmet-off'
  47. help='label names for the training datasets')
  48. tf.flags.DEFINE_list('input_shape',
  49. default=[int(x) for x in input_shape.split(',')],
  50. help='input_shape') # [352, 640]
  51. tf.flags.DEFINE_integer('max_epochs', default=epochs,
  52. help='training number of epochs')
  53. tf.flags.DEFINE_integer('batch_size', default=batch_size,
  54. help='training batch size')
  55. tf.flags.DEFINE_boolean('load_imagenet_weights', default=False,
  56. help='if load imagenet weights or not')
  57. tf.flags.DEFINE_string('inference_device',
  58. default='GPU',
  59. help='which type of device is used to do inference,'
  60. ' only CPU, GPU or 310D')
  61. tf.flags.DEFINE_boolean('copy_to_local', default=True,
  62. help='if load imagenet weights or not')
  63. tf.flags.DEFINE_integer('num_gpus', default=1, help='use number of gpus')
  64. tf.flags.DEFINE_boolean('finetuning', default=False,
  65. help='use number of gpus')
  66. tf.flags.DEFINE_boolean('label_changed', default=False,
  67. help='whether number of labels is changed or not')
  68. tf.flags.DEFINE_string('learning_rate', default='0.001',
  69. help='learning rate to used for the optimizer')
  70. tf.flags.DEFINE_string('obj_threshold', default=obj_threshold,
  71. help='obj threshold')
  72. tf.flags.DEFINE_string('nms_threshold', default=nms_threshold,
  73. help='nms threshold')
  74. tf.flags.DEFINE_string('net_type', default='resnet18',
  75. help='resnet18 or resnet18_nas')
  76. tf.flags.DEFINE_string('nas_sequence', default='64_1-2111-2-1112',
  77. help='resnet18 or resnet18_nas')
  78. tf.flags.DEFINE_string('deploy_model_format', default=None,
  79. help='the format for the converted model')
  80. tf.flags.DEFINE_string('result_url', default=None,
  81. help='result url for training')
  82. incremental_instance = IncrementalLearning(estimator=Estimator)
  83. return incremental_instance.train(train_data=train_data, epochs=epochs,
  84. batch_size=batch_size,
  85. class_names=class_names,
  86. input_shape=input_shape,
  87. obj_threshold=obj_threshold,
  88. nms_threshold=nms_threshold)
  89. if __name__ == '__main__':
  90. main()