You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

test_allreduce_fusion.py 14 kB

5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300
  1. # Copyright 2020 Huawei Technologies Co., Ltd
  2. #
  3. # Licensed under the Apache License, Version 2.0 (the "License");
  4. # you may not use this file except in compliance with the License.
  5. # You may obtain a copy of the License at
  6. #
  7. # http://www.apache.org/licenses/LICENSE-2.0
  8. #
  9. # Unless required by applicable law or agreed to in writing, software
  10. # distributed under the License is distributed on an "AS IS" BASIS,
  11. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. # See the License for the specific language governing permissions and
  13. # limitations under the License.
  14. import numpy as np
  15. import mindspore as ms
  16. import mindspore.nn as nn
  17. from mindspore import Tensor, context
  18. from mindspore.common.api import _executor
  19. from mindspore.nn.loss import SoftmaxCrossEntropyWithLogits
  20. from mindspore.nn.optim.momentum import Momentum
  21. from mindspore.parallel import _cost_model_context as cost_model_context
  22. from mindspore.parallel._auto_parallel_context import auto_parallel_context
  23. from mindspore.train import Model, ParallelMode
  24. from tests.dataset_mock import MindData
  25. class Dataset(MindData):
  26. def __init__(self, predict, label, length=3):
  27. super(Dataset, self).__init__(size=length)
  28. self.predict = predict
  29. self.label = label
  30. self.index = 0
  31. self.length = length
  32. def __iter__(self):
  33. return self
  34. def __next__(self):
  35. if self.index >= self.length:
  36. raise StopIteration
  37. self.index += 1
  38. return self.predict, self.label
  39. def reset(self):
  40. self.index = 0
  41. class DenseNet1(nn.Cell):
  42. def __init__(self, has_bias=True, activation='relu'):
  43. super(DenseNet1, self).__init__()
  44. self.fc1 = nn.Dense(128, 128, has_bias=has_bias, activation=activation)
  45. self.fc2 = nn.Dense(128, 128, has_bias=has_bias, activation=activation)
  46. self.fc3 = nn.Dense(128, 128, has_bias=has_bias, activation=activation)
  47. self.fc4 = nn.Dense(128, 128, has_bias=has_bias, activation=activation)
  48. def construct(self, x):
  49. q = self.fc1(x)
  50. k = self.fc2(q)
  51. v = self.fc3(k)
  52. s = self.fc4(v)
  53. return s
  54. class DenseNet2(nn.Cell):
  55. def __init__(self, has_bias=True, activation='relu'):
  56. super(DenseNet2, self).__init__()
  57. self.fc1 = nn.Dense(128, 128, has_bias=has_bias, activation=activation)
  58. self.fc2 = nn.Dense(128, 128, has_bias=has_bias, activation=activation)
  59. self.fc3 = nn.Dense(128, 128, has_bias=has_bias, activation=activation)
  60. self.fc4 = nn.Dense(128, 128, has_bias=has_bias, activation=activation)
  61. self.fc5 = nn.Dense(128, 128, has_bias=has_bias, activation=activation)
  62. self.fc6 = nn.Dense(128, 128, has_bias=has_bias, activation=activation)
  63. self.fc7 = nn.Dense(128, 128, has_bias=has_bias, activation=activation)
  64. self.fc8 = nn.Dense(128, 128, has_bias=has_bias, activation=activation)
  65. def construct(self, x):
  66. q = self.fc1(x)
  67. k = self.fc2(q)
  68. v = self.fc3(k)
  69. s = self.fc4(v)
  70. t = self.fc5(s)
  71. u = self.fc6(t)
  72. w = self.fc7(u)
  73. z = self.fc8(w)
  74. return z
  75. class SimpleDMLNet(nn.Cell):
  76. def __init__(self, net1, net2):
  77. super(SimpleDMLNet, self).__init__()
  78. self.backbone1 = net1
  79. self.backbone2 = net2
  80. def construct(self, x):
  81. x1 = self.backbone1(x)
  82. x2 = self.backbone2(x)
  83. return x1 + x2
  84. def train_common(net):
  85. batch_size = 32
  86. learning_rate = 0.1
  87. momentum = 0.9
  88. epoch_size = 2
  89. device_num = 4
  90. context.reset_auto_parallel_context()
  91. auto_parallel_context().set_enable_all_reduce_fusion(enable_all_reduce_fusion=True)
  92. context.set_auto_parallel_context(parallel_mode=ParallelMode.SEMI_AUTO_PARALLEL, device_num=device_num,
  93. parameter_broadcast=False)
  94. context.set_context(mode=context.GRAPH_MODE)
  95. predict = Tensor(np.ones([batch_size, 128]), dtype=ms.float32)
  96. label = Tensor(np.ones([batch_size]), dtype=ms.int32)
  97. dataset = Dataset(predict, label, 2)
  98. loss = SoftmaxCrossEntropyWithLogits(is_grad=False, sparse=True)
  99. opt = Momentum(net.trainable_params(), learning_rate, momentum)
  100. model = Model(net, loss, opt)
  101. model.train(epoch_size, dataset, dataset_sink_mode=False)
  102. allreduce_fusion_dict = _executor._get_allreduce_fusion(model._train_network)
  103. print(allreduce_fusion_dict)
  104. return allreduce_fusion_dict
  105. def test_allreduce_fusion_parameters():
  106. cost_model_context.reset_cost_model_context()
  107. cost_model_context.set_cost_model_context(costmodel_allreduce_fusion_algorithm=2)
  108. algorithm = cost_model_context.get_cost_model_context('costmodel_allreduce_fusion_algorithm')
  109. assert algorithm == 2
  110. cost_model_context.set_cost_model_context(costmodel_allreduce_fusion_algorithm=1)
  111. algorithm = cost_model_context.get_cost_model_context('costmodel_allreduce_fusion_algorithm')
  112. assert algorithm == 1
  113. cost_model_context.reset_cost_model_context()
  114. algorithm = cost_model_context.get_cost_model_context('costmodel_allreduce_fusion_algorithm')
  115. assert algorithm == 0
  116. cost_model_context.set_cost_model_context(costmodel_allreduce_fusion_times=2)
  117. fusion_times = cost_model_context.get_cost_model_context('costmodel_allreduce_fusion_times')
  118. assert fusion_times == 2
  119. cost_model_context.set_cost_model_context(costmodel_allreduce_fusion_tail_percent=0.2)
  120. tail_percent = cost_model_context.get_cost_model_context('costmodel_allreduce_fusion_tail_percent')
  121. assert tail_percent == 0.2
  122. cost_model_context.reset_cost_model_context()
  123. tail_percent = cost_model_context.get_cost_model_context('costmodel_allreduce_fusion_tail_percent')
  124. assert tail_percent == 0.1
  125. cost_model_context.set_cost_model_context(costmodel_allreduce_fusion_tail_time=0.2)
  126. tail_time = cost_model_context.get_cost_model_context('costmodel_allreduce_fusion_tail_time')
  127. assert tail_time == 0.2
  128. cost_model_context.reset_cost_model_context()
  129. tail_time = cost_model_context.get_cost_model_context('costmodel_allreduce_fusion_tail_time')
  130. assert tail_time == 0.1
  131. cost_model_context.set_cost_model_context(costmodel_allreduce_fusion_allreduce_inherent_time=0.2)
  132. allreduce_inherent_time = cost_model_context.get_cost_model_context(
  133. 'costmodel_allreduce_fusion_allreduce_inherent_time')
  134. assert allreduce_inherent_time == 0.2
  135. cost_model_context.reset_cost_model_context()
  136. allreduce_inherent_time = cost_model_context.get_cost_model_context(
  137. 'costmodel_allreduce_fusion_allreduce_inherent_time')
  138. assert allreduce_inherent_time == 0.1
  139. cost_model_context.set_cost_model_context(costmodel_allreduce_fusion_allreduce_bandwidth=0.2)
  140. allreduce_bandwidth = cost_model_context.get_cost_model_context('costmodel_allreduce_fusion_allreduce_bandwidth')
  141. assert allreduce_bandwidth == 0.2
  142. cost_model_context.reset_cost_model_context()
  143. allreduce_bandwidth = cost_model_context.get_cost_model_context('costmodel_allreduce_fusion_allreduce_bandwidth')
  144. assert allreduce_bandwidth == 0.1
  145. cost_model_context.set_cost_model_context(costmodel_allreduce_fusion_computation_time_parameter=0.2)
  146. computation_time_parameter = cost_model_context.get_cost_model_context(
  147. 'costmodel_allreduce_fusion_computation_time_parameter')
  148. assert computation_time_parameter == 0.2
  149. cost_model_context.reset_cost_model_context()
  150. computation_time_parameter = cost_model_context.get_cost_model_context(
  151. 'costmodel_allreduce_fusion_computation_time_parameter')
  152. assert computation_time_parameter == 0.1
  153. def test_allreduce_fusion1():
  154. cost_model_context.set_cost_model_context(costmodel_allreduce_fusion_algorithm=1)
  155. cost_model_context.set_cost_model_context(costmodel_allreduce_fusion_times=2)
  156. cost_model_context.set_cost_model_context(costmodel_allreduce_fusion_tail_percent=0.5)
  157. net = SimpleDMLNet(DenseNet1(has_bias=False, activation=None), DenseNet2(has_bias=False, activation=None))
  158. allreduce_fusion_dict = train_common(net)
  159. expect_dict = {'backbone2.fc8.weight': 2,
  160. 'backbone2.fc7.weight': 2,
  161. 'backbone2.fc6.weight': 2,
  162. 'backbone1.fc4.weight': 2,
  163. 'backbone1.fc3.weight': 2,
  164. 'backbone1.fc2.weight': 2,
  165. 'backbone2.fc5.weight': 1,
  166. 'backbone2.fc4.weight': 1,
  167. 'backbone2.fc3.weight': 1,
  168. 'backbone2.fc2.weight': 1,
  169. 'backbone2.fc1.weight': 1,
  170. 'backbone1.fc1.weight': 1}
  171. assert allreduce_fusion_dict == expect_dict
  172. cost_model_context.reset_cost_model_context()
  173. # reset_cost_model_context is called, the default value of costmodel_allreduce_fusion_times is 0, step_allreduce_fusion
  174. # is bypassed.
  175. def test_allreduce_fusion2():
  176. cost_model_context.set_cost_model_context(costmodel_allreduce_fusion_times=2)
  177. cost_model_context.set_cost_model_context(costmodel_allreduce_fusion_tail_percent=0.5)
  178. cost_model_context.reset_cost_model_context()
  179. net = SimpleDMLNet(DenseNet1(has_bias=False, activation=None), DenseNet2(has_bias=False, activation=None))
  180. allreduce_fusion_dict = train_common(net)
  181. expect_dict = {}
  182. assert allreduce_fusion_dict == expect_dict
  183. cost_model_context.reset_cost_model_context()
  184. def test_allreduce_fusion3():
  185. cost_model_context.set_cost_model_context(costmodel_allreduce_fusion_algorithm=1)
  186. cost_model_context.set_cost_model_context(costmodel_allreduce_fusion_times=3)
  187. cost_model_context.set_cost_model_context(costmodel_allreduce_fusion_tail_percent=0.3333333)
  188. net = SimpleDMLNet(DenseNet1(has_bias=True, activation='relu'), DenseNet2(has_bias=False, activation='relu'))
  189. allreduce_fusion_dict = train_common(net)
  190. expect_dict = {'backbone2.fc8.weight': 3,
  191. 'backbone2.fc7.weight': 3,
  192. 'backbone2.fc6.weight': 2,
  193. 'backbone2.fc5.weight': 2,
  194. 'backbone2.fc4.weight': 2,
  195. 'backbone2.fc3.weight': 1,
  196. 'backbone2.fc2.weight': 1,
  197. 'backbone2.fc1.weight': 1,
  198. 'backbone1.fc4.bias': 3,
  199. 'backbone1.fc4.weight': 3,
  200. 'backbone1.fc3.bias': 3,
  201. 'backbone1.fc3.weight': 2,
  202. 'backbone1.fc2.bias': 2,
  203. 'backbone1.fc2.weight': 2,
  204. 'backbone1.fc1.bias': 2,
  205. 'backbone1.fc1.weight': 2}
  206. assert allreduce_fusion_dict == expect_dict
  207. cost_model_context.reset_cost_model_context()
  208. def test_allreduce_fusion4():
  209. cost_model_context.set_cost_model_context(costmodel_allreduce_fusion_algorithm=1)
  210. cost_model_context.set_cost_model_context(costmodel_allreduce_fusion_times=2)
  211. cost_model_context.set_cost_model_context(costmodel_allreduce_fusion_tail_percent=0.5)
  212. net = SimpleDMLNet(DenseNet2(has_bias=False, activation=None), DenseNet2(has_bias=False, activation=None))
  213. allreduce_fusion_dict = train_common(net)
  214. expect_dict = {'backbone2.fc8.weight': 2,
  215. 'backbone2.fc7.weight': 2,
  216. 'backbone2.fc6.weight': 2,
  217. 'backbone1.fc8.weight': 2,
  218. 'backbone1.fc7.weight': 2,
  219. 'backbone1.fc6.weight': 2,
  220. 'backbone2.fc5.weight': 1,
  221. 'backbone2.fc4.weight': 1,
  222. 'backbone2.fc3.weight': 1,
  223. 'backbone2.fc2.weight': 1,
  224. 'backbone2.fc1.weight': 1,
  225. 'backbone1.fc5.weight': 1,
  226. 'backbone1.fc4.weight': 1,
  227. 'backbone1.fc3.weight': 1,
  228. 'backbone1.fc2.weight': 1,
  229. 'backbone1.fc1.weight': 1}
  230. assert allreduce_fusion_dict == expect_dict
  231. cost_model_context.reset_cost_model_context()
  232. def test_allreduce_fusion5():
  233. cost_model_context.set_cost_model_context(costmodel_allreduce_fusion_algorithm=2)
  234. cost_model_context.set_cost_model_context(costmodel_allreduce_fusion_tail_time=0.1)
  235. cost_model_context.set_cost_model_context(costmodel_allreduce_fusion_allreduce_inherent_time=0.05)
  236. cost_model_context.set_cost_model_context(costmodel_allreduce_fusion_allreduce_bandwidth=0.000001)
  237. cost_model_context.set_cost_model_context(costmodel_allreduce_fusion_computation_time_parameter=0.0000015)
  238. net = SimpleDMLNet(DenseNet2(has_bias=False, activation=None), DenseNet2(has_bias=False, activation=None))
  239. allreduce_fusion_dict = train_common(net)
  240. expect_dict = {'backbone2.fc8.weight': 3,
  241. 'backbone2.fc7.weight': 3,
  242. 'backbone2.fc6.weight': 3,
  243. 'backbone2.fc5.weight': 3,
  244. 'backbone2.fc4.weight': 2,
  245. 'backbone2.fc3.weight': 2,
  246. 'backbone2.fc2.weight': 1,
  247. 'backbone2.fc1.weight': 1,
  248. 'backbone1.fc8.weight': 3,
  249. 'backbone1.fc7.weight': 3,
  250. 'backbone1.fc6.weight': 3,
  251. 'backbone1.fc5.weight': 3,
  252. 'backbone1.fc4.weight': 2,
  253. 'backbone1.fc3.weight': 2,
  254. 'backbone1.fc2.weight': 1,
  255. 'backbone1.fc1.weight': 1,}
  256. assert allreduce_fusion_dict == expect_dict
  257. cost_model_context.reset_cost_model_context()