You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

mobilenetv2.py 3.9 kB

5 years ago
5 years ago
123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115
  1. # Copyright 2020 Huawei Technologies Co., Ltd
  2. #
  3. # Licensed under the Apache License, Version 2.0 (the "License");
  4. # you may not use this file except in compliance with the License.
  5. # You may obtain a copy of the License at
  6. #
  7. # http://www.apache.org/licenses/LICENSE-2.0
  8. #
  9. # Unless required by applicable law or agreed to in writing, software
  10. # distributed under the License is distributed on an "AS IS" BASIS,
  11. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. # See the License for the specific language governing permissions and
  13. # limitations under the License.
  14. # ============================================================================
  15. """MobileNetV2"""
  16. from mindspore import nn
  17. from mindspore.ops import operations as P
  18. def make_divisible(input_x, div_by=8):
  19. return int((input_x + div_by) // div_by)
  20. def _conv_bn(in_channel,
  21. out_channel,
  22. ksize,
  23. stride=1):
  24. """Get a conv2d batchnorm and relu layer."""
  25. return nn.SequentialCell(
  26. [nn.Conv2d(in_channel,
  27. out_channel,
  28. kernel_size=ksize,
  29. stride=stride),
  30. nn.BatchNorm2d(out_channel)])
  31. class InvertedResidual(nn.Cell):
  32. def __init__(self, inp, oup, stride, expend_ratio):
  33. super(InvertedResidual, self).__init__()
  34. self.stride = stride
  35. assert stride in [1, 2]
  36. hidden_dim = int(inp * expend_ratio)
  37. self.use_res_connect = self.stride == 1 and inp == oup
  38. if expend_ratio == 1:
  39. self.conv = nn.SequentialCell([
  40. nn.Conv2d(hidden_dim, hidden_dim, 3, stride, group=hidden_dim),
  41. nn.BatchNorm2d(hidden_dim),
  42. nn.ReLU6(),
  43. nn.Conv2d(hidden_dim, oup, 1, 1),
  44. nn.BatchNorm2d(oup)
  45. ])
  46. else:
  47. self.conv = nn.SequentialCell([
  48. nn.Conv2d(inp, hidden_dim, 1, 1),
  49. nn.BatchNorm2d(hidden_dim),
  50. nn.ReLU6(),
  51. nn.Conv2d(hidden_dim, hidden_dim, 3, stride, group=hidden_dim),
  52. nn.BatchNorm2d(hidden_dim),
  53. nn.ReLU6(),
  54. nn.Conv2d(hidden_dim, oup, 1, 1),
  55. nn.BatchNorm2d(oup)
  56. ])
  57. def construct(self, input_x):
  58. out = self.conv(input_x)
  59. if self.use_res_connect:
  60. out = input_x + out
  61. return out
  62. class MobileNetV2(nn.Cell):
  63. def __init__(self, num_class=1000, input_size=224, width_mul=1.):
  64. super(MobileNetV2, self).__init__()
  65. _ = input_size
  66. block = InvertedResidual
  67. input_channel = 32
  68. last_channel = 1280
  69. inverted_residual_setting = [
  70. [1, 16, 1, 1],
  71. [6, 24, 2, 2],
  72. [6, 32, 3, 2],
  73. [6, 64, 4, 2],
  74. [6, 96, 3, 1],
  75. [6, 160, 3, 2],
  76. [6, 230, 1, 1],
  77. ]
  78. if width_mul > 1.0:
  79. last_channel = make_divisible(last_channel * width_mul)
  80. self.last_channel = last_channel
  81. features = [_conv_bn(3, input_channel, 3, 2)]
  82. for t, c, n, s in inverted_residual_setting:
  83. out_channel = make_divisible(c * width_mul) if t > 1 else c
  84. for i in range(n):
  85. if i == 0:
  86. features.append(block(input_channel, out_channel, s, t))
  87. else:
  88. features.append(block(input_channel, out_channel, 1, t))
  89. input_channel = out_channel
  90. features.append(_conv_bn(input_channel, self.last_channel, 1))
  91. self.features = nn.SequentialCell(features)
  92. self.mean = P.ReduceMean(keep_dims=False)
  93. self.classifier = nn.Dense(self.last_channel, num_class)
  94. def construct(self, input_x):
  95. out = input_x
  96. out = self.features(out)
  97. out = self.mean(out, (2, 3))
  98. out = self.classifier(out)
  99. return out