You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

test_datasets_voc.py 6.1 kB

5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193
  1. # Copyright 2019 Huawei Technologies Co., Ltd
  2. #
  3. # Licensed under the Apache License, Version 2.0 (the "License");
  4. # you may not use this file except in compliance with the License.
  5. # You may obtain a copy of the License at
  6. #
  7. # http://www.apache.org/licenses/LICENSE-2.0
  8. #
  9. # Unless required by applicable law or agreed to in writing, software
  10. # distributed under the License is distributed on an "AS IS" BASIS,
  11. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. # See the License for the specific language governing permissions and
  13. # limitations under the License.
  14. # ==============================================================================
  15. import mindspore.dataset as ds
  16. import mindspore.dataset.transforms.vision.c_transforms as vision
  17. DATA_DIR = "../data/dataset/testVOC2012"
  18. IMAGE_SHAPE = [2268, 2268, 2268, 2268, 642, 607, 561, 596, 612, 2268]
  19. TARGET_SHAPE = [680, 680, 680, 680, 642, 607, 561, 596, 612, 680]
  20. def test_voc_segmentation():
  21. data1 = ds.VOCDataset(DATA_DIR, task="Segmentation", mode="train", decode=True, shuffle=False)
  22. num = 0
  23. for item in data1.create_dict_iterator():
  24. assert item["image"].shape[0] == IMAGE_SHAPE[num]
  25. assert item["target"].shape[0] == TARGET_SHAPE[num]
  26. num += 1
  27. assert num == 10
  28. def test_voc_detection():
  29. data1 = ds.VOCDataset(DATA_DIR, task="Detection", mode="train", decode=True, shuffle=False)
  30. num = 0
  31. count = [0, 0, 0, 0, 0, 0]
  32. for item in data1.create_dict_iterator():
  33. assert item["image"].shape[0] == IMAGE_SHAPE[num]
  34. for label in item["label"]:
  35. count[label[0]] += 1
  36. num += 1
  37. assert num == 9
  38. assert count == [3, 2, 1, 2, 4, 3]
  39. def test_voc_class_index():
  40. class_index = {'car': 0, 'cat': 1, 'train': 5}
  41. data1 = ds.VOCDataset(DATA_DIR, task="Detection", mode="train", class_indexing=class_index, decode=True)
  42. class_index1 = data1.get_class_indexing()
  43. assert (class_index1 == {'car': 0, 'cat': 1, 'train': 5})
  44. data1 = data1.shuffle(4)
  45. class_index2 = data1.get_class_indexing()
  46. assert (class_index2 == {'car': 0, 'cat': 1, 'train': 5})
  47. num = 0
  48. count = [0, 0, 0, 0, 0, 0]
  49. for item in data1.create_dict_iterator():
  50. for label in item["label"]:
  51. count[label[0]] += 1
  52. assert label[0] in (0, 1, 5)
  53. num += 1
  54. assert num == 6
  55. assert count == [3, 2, 0, 0, 0, 3]
  56. def test_voc_get_class_indexing():
  57. data1 = ds.VOCDataset(DATA_DIR, task="Detection", mode="train", decode=True)
  58. class_index1 = data1.get_class_indexing()
  59. assert (class_index1 == {'car': 0, 'cat': 1, 'chair': 2, 'dog': 3, 'person': 4, 'train': 5})
  60. data1 = data1.shuffle(4)
  61. class_index2 = data1.get_class_indexing()
  62. assert (class_index2 == {'car': 0, 'cat': 1, 'chair': 2, 'dog': 3, 'person': 4, 'train': 5})
  63. num = 0
  64. count = [0, 0, 0, 0, 0, 0]
  65. for item in data1.create_dict_iterator():
  66. for label in item["label"]:
  67. count[label[0]] += 1
  68. assert label[0] in (0, 1, 2, 3, 4, 5)
  69. num += 1
  70. assert num == 9
  71. assert count == [3, 2, 1, 2, 4, 3]
  72. def test_case_0():
  73. data1 = ds.VOCDataset(DATA_DIR, task="Segmentation", mode="train", decode=True)
  74. resize_op = vision.Resize((224, 224))
  75. data1 = data1.map(input_columns=["image"], operations=resize_op)
  76. data1 = data1.map(input_columns=["target"], operations=resize_op)
  77. repeat_num = 4
  78. data1 = data1.repeat(repeat_num)
  79. batch_size = 2
  80. data1 = data1.batch(batch_size, drop_remainder=True)
  81. num = 0
  82. for _ in data1.create_dict_iterator():
  83. num += 1
  84. assert num == 20
  85. def test_case_1():
  86. data1 = ds.VOCDataset(DATA_DIR, task="Detection", mode="train", decode=True)
  87. resize_op = vision.Resize((224, 224))
  88. data1 = data1.map(input_columns=["image"], operations=resize_op)
  89. repeat_num = 4
  90. data1 = data1.repeat(repeat_num)
  91. batch_size = 2
  92. data1 = data1.batch(batch_size, drop_remainder=True, pad_info={})
  93. num = 0
  94. for _ in data1.create_dict_iterator():
  95. num += 1
  96. assert num == 18
  97. def test_case_2():
  98. data1 = ds.VOCDataset(DATA_DIR, task="Segmentation", mode="train", decode=True)
  99. sizes = [0.5, 0.5]
  100. randomize = False
  101. dataset1, dataset2 = data1.split(sizes=sizes, randomize=randomize)
  102. num_iter = 0
  103. for _ in dataset1.create_dict_iterator():
  104. num_iter += 1
  105. assert num_iter == 5
  106. num_iter = 0
  107. for _ in dataset2.create_dict_iterator():
  108. num_iter += 1
  109. assert num_iter == 5
  110. def test_voc_exception():
  111. try:
  112. data1 = ds.VOCDataset(DATA_DIR, task="InvalidTask", mode="train", decode=True)
  113. for _ in data1.create_dict_iterator():
  114. pass
  115. assert False
  116. except ValueError:
  117. pass
  118. try:
  119. data2 = ds.VOCDataset(DATA_DIR, task="Segmentation", mode="train", class_indexing={"cat": 0}, decode=True)
  120. for _ in data2.create_dict_iterator():
  121. pass
  122. assert False
  123. except ValueError:
  124. pass
  125. try:
  126. data3 = ds.VOCDataset(DATA_DIR, task="Detection", mode="notexist", decode=True)
  127. for _ in data3.create_dict_iterator():
  128. pass
  129. assert False
  130. except ValueError:
  131. pass
  132. try:
  133. data4 = ds.VOCDataset(DATA_DIR, task="Detection", mode="xmlnotexist", decode=True)
  134. for _ in data4.create_dict_iterator():
  135. pass
  136. assert False
  137. except RuntimeError:
  138. pass
  139. try:
  140. data5 = ds.VOCDataset(DATA_DIR, task="Detection", mode="invalidxml", decode=True)
  141. for _ in data5.create_dict_iterator():
  142. pass
  143. assert False
  144. except RuntimeError:
  145. pass
  146. try:
  147. data6 = ds.VOCDataset(DATA_DIR, task="Detection", mode="xmlnoobject", decode=True)
  148. for _ in data6.create_dict_iterator():
  149. pass
  150. assert False
  151. except RuntimeError:
  152. pass
  153. if __name__ == '__main__':
  154. test_voc_segmentation()
  155. test_voc_detection()
  156. test_voc_class_index()
  157. test_voc_get_class_indexing()
  158. test_case_0()
  159. test_case_1()
  160. test_case_2()
  161. test_voc_exception()