| @@ -0,0 +1,188 @@ | |||
| import tensorflow as tf | |||
| import tensorlayer as tl | |||
| config = tf.compat.v1.ConfigProto() | |||
| config.gpu_options.allow_growth = True | |||
| session = tf.compat.v1.Session(config=config) | |||
| class BottleNeck(tl.layers.Module): | |||
| def __init__(self, growth_rate, drop_rate): | |||
| super(BottleNeck, self).__init__() | |||
| self.bn1 = tl.layers.BatchNorm() | |||
| self.conv1 = tl.layers.Conv2d(n_filter=4 * growth_rate, | |||
| filter_size=(1, 1), | |||
| strides=(1,1), | |||
| padding="SAME") | |||
| self.bn2 = tl.layers.BatchNorm() | |||
| self.conv2 = tl.layers.Conv2d(n_filter=growth_rate, | |||
| filter_size=(3, 3), | |||
| strides=(1,1), | |||
| padding="SAME") | |||
| self.dropout = tl.layers.Dropout(keep=drop_rate) | |||
| self.listLayers = [self.bn1, | |||
| tl.layers.PRelu(channel_shared=True), | |||
| self.conv1, | |||
| self.bn2, | |||
| tl.layers.PRelu(channel_shared=True), | |||
| self.conv2, | |||
| self.dropout] | |||
| def forward(self, x): | |||
| y = x | |||
| for layer in self.listLayers: | |||
| y = layer(y) | |||
| y = tf.keras.layers.concatenate([x, y], axis=-1) | |||
| return y | |||
| # 构建密集块 | |||
| class DenseBlock(tl.layers.Module): | |||
| def __init__(self, num_layers, growth_rate, drop_rate=0.5): | |||
| super(DenseBlock, self).__init__() | |||
| self.num_layers = num_layers | |||
| self.growth_rate = growth_rate | |||
| self.drop_rate = drop_rate | |||
| self.listLayers = [] | |||
| for _ in range(num_layers): | |||
| self.listLayers.append(BottleNeck(growth_rate=self.growth_rate, drop_rate=self.drop_rate)) | |||
| def forward(self, x): | |||
| for layer in self.listLayers: | |||
| x = layer(x) | |||
| return x | |||
| # 构建过渡层 | |||
| class TransitionLayer(tl.layers.Module): | |||
| def __init__(self, out_channels): | |||
| super(TransitionLayer, self).__init__() | |||
| self.bn = tl.layers.BatchNorm() | |||
| self.conv = tl.layers.Conv2d(n_filter=out_channels, | |||
| filter_size=(1, 1), | |||
| strides=(1,1), | |||
| padding="same") | |||
| self.pool = tl.layers.MaxPool2d(filter_size=(2, 2), | |||
| strides=(2,2), | |||
| padding="SAME") | |||
| def forward(self, inputs): | |||
| x = self.bn(inputs) | |||
| x = tl.relu(x) | |||
| x = self.conv(x) | |||
| x = self.pool(x) | |||
| return x | |||
| # DenseNet-121,169,201,264模型 | |||
| class DenseNet(tl.layers.Module): | |||
| def __init__(self, num_init_features, growth_rate, block_layers, compression_rate, drop_rate): | |||
| super(DenseNet, self).__init__() | |||
| self.conv = tl.layers.Conv2d(n_filter=num_init_features, | |||
| filter_size=(7, 7), | |||
| strides=(2,2), | |||
| padding="SAME") | |||
| self.bn = tl.layers.BatchNorm() | |||
| self.pool = tl.layers.MaxPool2d(filter_size=(3, 3), | |||
| strides=(2,2), | |||
| padding="SAME") | |||
| self.num_channels = num_init_features | |||
| self.dense_block_1 = DenseBlock(num_layers=block_layers[0], growth_rate=growth_rate, drop_rate=drop_rate) | |||
| self.num_channels += growth_rate * block_layers[0] | |||
| self.num_channels = compression_rate * self.num_channels | |||
| self.transition_1 = TransitionLayer(out_channels=int(self.num_channels)) | |||
| self.dense_block_2 = DenseBlock(num_layers=block_layers[1], growth_rate=growth_rate, drop_rate=drop_rate) | |||
| self.num_channels += growth_rate * block_layers[1] | |||
| self.num_channels = compression_rate * self.num_channels | |||
| self.transition_2 = TransitionLayer(out_channels=int(self.num_channels)) | |||
| self.dense_block_3 = DenseBlock(num_layers=block_layers[2], growth_rate=growth_rate, drop_rate=drop_rate) | |||
| self.num_channels += growth_rate * block_layers[2] | |||
| self.num_channels = compression_rate * self.num_channels | |||
| self.transition_3 = TransitionLayer(out_channels=int(self.num_channels)) | |||
| self.dense_block_4 = DenseBlock(num_layers=block_layers[3], growth_rate=growth_rate, drop_rate=drop_rate) | |||
| self.avgpool = tl.layers.GlobalMeanPool2d() | |||
| self.fc = tl.layers.Dense(n_units=10,act=tl.softmax(logits=())) | |||
| def forward(self, inputs): | |||
| x = self.conv(inputs) | |||
| x = self.bn(x) | |||
| x = tl.relu(x) | |||
| x = self.pool(x) | |||
| x = self.dense_block_1(x) | |||
| x = self.transition_1(x) | |||
| x = self.dense_block_2(x) | |||
| x = self.transition_2(x) | |||
| x = self.dense_block_3(x) | |||
| x = self.transition_3(x,) | |||
| x = self.dense_block_4(x) | |||
| x = self.avgpool(x) | |||
| x = self.fc(x) | |||
| return x | |||
| # DenseNet-100模型 | |||
| class DenseNet_100(tl.layers.Module): | |||
| def __init__(self, num_init_features, growth_rate, block_layers, compression_rate, drop_rate): | |||
| super(DenseNet_100, self).__init__() | |||
| self.conv = tl.layers.Conv2d(n_filter=num_init_features, | |||
| filter_size=(7, 7), | |||
| strides=(2,2), | |||
| padding="SAME") | |||
| self.bn = tl.layers.BatchNorm() | |||
| self.pool = tl.layers.MaxPool2d(filter_size=(3, 3), | |||
| strides=(2,2), | |||
| padding="SAME") | |||
| self.num_channels = num_init_features | |||
| self.dense_block_1 = DenseBlock(num_layers=block_layers[0], growth_rate=growth_rate, drop_rate=drop_rate) | |||
| self.num_channels += growth_rate * block_layers[0] | |||
| self.num_channels = compression_rate * self.num_channels | |||
| self.transition_1 = TransitionLayer(out_channels=int(self.num_channels)) | |||
| self.dense_block_2 = DenseBlock(num_layers=block_layers[1], growth_rate=growth_rate, drop_rate=drop_rate) | |||
| self.num_channels += growth_rate * block_layers[1] | |||
| self.num_channels = compression_rate * self.num_channels | |||
| self.transition_2 = TransitionLayer(out_channels=int(self.num_channels)) | |||
| self.dense_block_3 = DenseBlock(num_layers=block_layers[2], growth_rate=growth_rate, drop_rate=drop_rate) | |||
| self.num_channels += growth_rate * block_layers[2] | |||
| self.num_channels = compression_rate * self.num_channels | |||
| self.transition_3 = TransitionLayer(out_channels=int(self.num_channels)) | |||
| self.avgpool = tl.layers.GlobalMeanPool2d() | |||
| self.fc = tl.layers.Dense(n_units=10,act=tl.softmax(logits=())) | |||
| def forward(self, inputs): | |||
| x = self.conv(inputs) | |||
| x = self.bn(x) | |||
| x = tl.relu(x) | |||
| x = self.pool(x) | |||
| x = self.dense_block_1(x) | |||
| x = self.transition_1(x) | |||
| x = self.dense_block_2(x) | |||
| x = self.transition_2(x) | |||
| x = self.dense_block_3(x) | |||
| x = self.transition_3(x,) | |||
| x = self.avgpool(x) | |||
| # x = tl.layers.Dense(n_units=10,act=tl.softmax(logits=x)) | |||
| x = self.fc(x) | |||
| return x | |||
| def densenet(x): | |||
| if x == 'densenet-121': | |||
| return DenseNet(num_init_features=64, growth_rate=32, block_layers=[6, 12, 24, 16], compression_rate=0.5, | |||
| drop_rate=0.5) | |||
| elif x == 'densenet-169': | |||
| return DenseNet(num_init_features=64, growth_rate=32, block_layers=[6 , 12, 32, 32], compression_rate=0.5, | |||
| drop_rate=0.5) | |||
| elif x == 'densenet-201': | |||
| return DenseNet(num_init_features=64, growth_rate=32, block_layers=[6, 12, 48, 32], compression_rate=0.5, | |||
| drop_rate=0.5) | |||
| elif x == 'densenet-264': | |||
| return DenseNet(num_init_features=64, growth_rate=32, block_layers=[6, 12, 64, 48], compression_rate=0.5, | |||
| drop_rate=0.5) | |||
| elif x=='densenet-100': | |||
| return DenseNet_100(num_init_features=64, growth_rate=12, block_layers=[16, 16, 16], compression_rate=0.5, | |||
| drop_rate=0.5) | |||