| @@ -0,0 +1,130 @@ | |||||
| import tensorflow as tf | |||||
| class BottleNeck(tf.keras.layers.Layer): | |||||
| def __init__(self, growth_rate, drop_rate): | |||||
| super(BottleNeck, self).__init__() | |||||
| self.bn1 = tf.keras.layers.BatchNormalization() | |||||
| self.conv1 = tf.keras.layers.Conv2D(filters=4 * growth_rate, | |||||
| kernel_size=(1, 1), | |||||
| strides=1, | |||||
| padding="same") | |||||
| self.bn2 = tf.keras.layers.BatchNormalization() | |||||
| self.conv2 = tf.keras.layers.Conv2D(filters=growth_rate, | |||||
| kernel_size=(3, 3), | |||||
| strides=1, | |||||
| padding="same") | |||||
| self.dropout = tf.keras.layers.Dropout(rate=drop_rate) | |||||
| self.listLayers = [self.bn1, | |||||
| tf.keras.layers.Activation("relu"), | |||||
| self.conv1, | |||||
| self.bn2, | |||||
| tf.keras.layers.Activation("relu"), | |||||
| self.conv2, | |||||
| self.dropout] | |||||
| def call(self, x): | |||||
| y = x | |||||
| for layer in self.listLayers.layers: | |||||
| y = layer(y) | |||||
| y = tf.keras.layers.concatenate([x, y], axis=-1) | |||||
| return y | |||||
| class DenseBlock(tf.keras.layers.Layer): | |||||
| def __init__(self, num_layers, growth_rate, drop_rate=0.5): | |||||
| super(DenseBlock, self).__init__() | |||||
| self.num_layers = num_layers | |||||
| self.growth_rate = growth_rate | |||||
| self.drop_rate = drop_rate | |||||
| self.listLayers = [] | |||||
| for _ in range(num_layers): | |||||
| self.listLayers.append(BottleNeck(growth_rate=self.growth_rate, drop_rate=self.drop_rate)) | |||||
| def call(self, x): | |||||
| for layer in self.listLayers.layers: | |||||
| x = layer(x) | |||||
| return x | |||||
| class TransitionLayer(tf.keras.layers.Layer): | |||||
| def __init__(self, out_channels): | |||||
| super(TransitionLayer, self).__init__() | |||||
| self.bn = tf.keras.layers.BatchNormalization() | |||||
| self.conv = tf.keras.layers.Conv2D(filters=out_channels, | |||||
| kernel_size=(1, 1), | |||||
| strides=1, | |||||
| padding="same") | |||||
| self.pool = tf.keras.layers.MaxPool2D(pool_size=(2, 2), | |||||
| strides=2, | |||||
| padding="same") | |||||
| def call(self, inputs): | |||||
| x = self.bn(inputs) | |||||
| x = tf.keras.activations.relu(x) | |||||
| x = self.conv(x) | |||||
| x = self.pool(x) | |||||
| return x | |||||
| class DenseNet(tf.keras.Model): | |||||
| def __init__(self, num_init_features, growth_rate, block_layers, compression_rate, drop_rate): | |||||
| super(DenseNet, self).__init__() | |||||
| self.conv = tf.keras.layers.Conv2D(filters=num_init_features, | |||||
| kernel_size=(7, 7), | |||||
| strides=2, | |||||
| padding="same") | |||||
| self.bn = tf.keras.layers.BatchNormalization() | |||||
| self.pool = tf.keras.layers.MaxPool2D(pool_size=(3, 3), | |||||
| strides=2, | |||||
| padding="same") | |||||
| self.num_channels = num_init_features | |||||
| self.dense_block_1 = DenseBlock(num_layers=block_layers[0], growth_rate=growth_rate, drop_rate=drop_rate) | |||||
| self.num_channels += growth_rate * block_layers[0] | |||||
| self.num_channels = compression_rate * self.num_channels | |||||
| self.transition_1 = TransitionLayer(out_channels=int(self.num_channels)) | |||||
| self.dense_block_2 = DenseBlock(num_layers=block_layers[1], growth_rate=growth_rate, drop_rate=drop_rate) | |||||
| self.num_channels += growth_rate * block_layers[1] | |||||
| self.num_channels = compression_rate * self.num_channels | |||||
| self.transition_2 = TransitionLayer(out_channels=int(self.num_channels)) | |||||
| self.dense_block_3 = DenseBlock(num_layers=block_layers[2], growth_rate=growth_rate, drop_rate=drop_rate) | |||||
| self.num_channels += growth_rate * block_layers[2] | |||||
| self.num_channels = compression_rate * self.num_channels | |||||
| self.transition_3 = TransitionLayer(out_channels=int(self.num_channels)) | |||||
| self.dense_block_4 = DenseBlock(num_layers=block_layers[3], growth_rate=growth_rate, drop_rate=drop_rate) | |||||
| self.avgpool = tf.keras.layers.GlobalAveragePooling2D() | |||||
| self.fc = tf.keras.layers.Dense(units=10, | |||||
| activation=tf.keras.activations.softmax) | |||||
| def call(self, inputs): | |||||
| x = self.conv(inputs) | |||||
| x = self.bn(x) | |||||
| x = tf.keras.activations.relu(x) | |||||
| x = self.pool(x) | |||||
| x = self.dense_block_1(x) | |||||
| x = self.transition_1(x) | |||||
| x = self.dense_block_2(x) | |||||
| x = self.transition_2(x) | |||||
| x = self.dense_block_3(x) | |||||
| x = self.transition_3(x,) | |||||
| x = self.dense_block_4(x) | |||||
| x = self.avgpool(x) | |||||
| x = self.fc(x) | |||||
| return x | |||||
| def densenet(): | |||||
| return DenseNet(num_init_features=64, growth_rate=32, block_layers=[4,4,4,4], compression_rate=0.5, drop_rate=0.5) | |||||
| mynet=densenet() | |||||
| (x_train, y_train), (x_test, y_test) = tf.keras.datasets.fashion_mnist.load_data() | |||||
| x_train = x_train.reshape((60000, 28, 28, 1)).astype('float32') / 255 | |||||
| x_test = x_test.reshape((10000, 28, 28, 1)).astype('float32') / 255 | |||||
| mynet.compile(loss='sparse_categorical_crossentropy', | |||||
| optimizer=tf.keras.optimizers.Adam(), | |||||
| metrics=['accuracy']) | |||||
| history = mynet.fit(x_train, y_train, | |||||
| batch_size=64, | |||||
| epochs=5, | |||||
| validation_split=0.2) | |||||
| test_scores = mynet.evaluate(x_test, y_test, verbose=2) | |||||