You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

LayersTest.cs 11 kB

4 years ago
5 years ago
4 years ago
4 years ago
4 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
3 years ago
123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270
  1. using Microsoft.VisualStudio.TestTools.UnitTesting;
  2. using System.Collections.Generic;
  3. using Tensorflow.NumPy;
  4. using static Tensorflow.Binding;
  5. using static Tensorflow.KerasApi;
  6. namespace Tensorflow.Keras.UnitTest.Layers
  7. {
  8. /// <summary>
  9. /// https://www.tensorflow.org/versions/r2.3/api_docs/python/tf/keras/layers
  10. /// </summary>
  11. [TestClass]
  12. public class LayersTest : EagerModeTestBase
  13. {
  14. [TestMethod]
  15. public void AveragePooling2D()
  16. {
  17. var x = tf.constant(new float[,]
  18. {
  19. { 1, 2, 3 },
  20. { 4, 5, 6 },
  21. { 7, 8, 9 }
  22. });
  23. x = tf.reshape(x, (1, 3, 3, 1));
  24. var avg_pool_2d = keras.layers.AveragePooling2D(pool_size: (2, 2),
  25. strides: (1, 1), padding: "valid");
  26. Tensor avg = avg_pool_2d.Apply(x);
  27. Assert.AreEqual((1, 2, 2, 1), avg.shape);
  28. Equal(new float[] { 3, 4, 6, 7 }, avg.ToArray<float>());
  29. }
  30. [TestMethod]
  31. public void InputLayer()
  32. {
  33. var model = keras.Sequential(new List<ILayer>
  34. {
  35. keras.layers.InputLayer(input_shape: 4),
  36. keras.layers.Dense(8)
  37. });
  38. model.compile(optimizer: keras.optimizers.RMSprop(0.001f),
  39. loss: keras.losses.MeanSquaredError(),
  40. metrics: new[] { "accuracy" });
  41. model.fit(np.zeros((10, 4), dtype: tf.float32), np.ones((10, 8), dtype: tf.float32));
  42. }
  43. [TestMethod]
  44. public void Sequential()
  45. {
  46. var model = keras.Sequential();
  47. model.add(keras.Input(shape: 16));
  48. }
  49. [TestMethod]
  50. public void Functional()
  51. {
  52. var layers = keras.layers;
  53. var inputs = keras.Input(shape: 784);
  54. Assert.AreEqual((-1, 784), inputs.shape);
  55. var dense = layers.Dense(64, activation: keras.activations.Relu);
  56. var x = dense.Apply(inputs);
  57. x = layers.Dense(64, activation: keras.activations.Relu).Apply(x);
  58. var outputs = layers.Dense(10).Apply(x);
  59. var model = keras.Model(inputs, outputs, name: "mnist_model");
  60. model.summary();
  61. }
  62. /// <summary>
  63. /// Custom layer test, used in Dueling DQN
  64. /// </summary>
  65. [TestMethod, Ignore]
  66. public void TensorFlowOpLayer()
  67. {
  68. var layers = keras.layers;
  69. var inputs = layers.Input(shape: 24);
  70. var x = layers.Dense(128, activation: "relu").Apply(inputs);
  71. var value = layers.Dense(24).Apply(x);
  72. var adv = layers.Dense(1).Apply(x);
  73. var mean = adv - tf.reduce_mean(adv, axis: 1, keepdims: true);
  74. adv = layers.Subtract().Apply((adv, mean));
  75. var outputs = layers.Add().Apply((value, adv));
  76. var model = keras.Model(inputs, outputs);
  77. model.compile(optimizer: keras.optimizers.RMSprop(0.001f),
  78. loss: keras.losses.MeanSquaredError(),
  79. metrics: new[] { "acc" });
  80. model.summary();
  81. Assert.AreEqual(model.Layers.Count, 8);
  82. var result = model.predict(tf.constant(np.arange(24).astype(np.float32)[np.newaxis, Slice.All]));
  83. Assert.AreEqual(result.shape, new Shape(1, 24));
  84. model.fit(np.arange(24).astype(np.float32)[np.newaxis, Slice.All], np.arange(24).astype(np.float32)[np.newaxis, Slice.All], verbose: 0);
  85. }
  86. /// <summary>
  87. /// https://www.tensorflow.org/api_docs/python/tf/keras/layers/Embedding
  88. /// </summary>
  89. [TestMethod]
  90. public void Embedding()
  91. {
  92. var model = keras.Sequential();
  93. var layer = keras.layers.Embedding(1000, 64, input_length: 10);
  94. model.add(layer);
  95. var input_array = np.random.randint(1000, size: (32, 10));
  96. model.compile("rmsprop", "mse", new[] { "accuracy" });
  97. var output_array = model.predict(input_array);
  98. Assert.AreEqual((32, 10, 64), output_array.shape);
  99. }
  100. /// <summary>
  101. /// https://www.tensorflow.org/api_docs/python/tf/keras/layers/Dense
  102. /// </summary>
  103. [TestMethod]
  104. public void Dense()
  105. {
  106. // Create a `Sequential` model and add a Dense layer as the first layer.
  107. var model = keras.Sequential();
  108. model.add(keras.Input(shape: 16));
  109. model.add(keras.layers.Dense(32, activation: keras.activations.Relu));
  110. // Now the model will take as input arrays of shape (None, 16)
  111. // and output arrays of shape (None, 32).
  112. // Note that after the first layer, you don't need to specify
  113. // the size of the input anymore:
  114. model.add(keras.layers.Dense(32));
  115. Assert.AreEqual((-1, 32), model.output_shape);
  116. }
  117. [TestMethod]
  118. public void EinsumDense()
  119. {
  120. var ed = keras.layers.EinsumDense(
  121. equation: "...b,bc->...c",
  122. output_shape: 4,
  123. bias_axes: "c",
  124. bias_initializer: tf.constant_initializer(0.03),
  125. kernel_initializer: tf.constant_initializer(0.5)
  126. );
  127. var inp = np.array(new[,] { { 1f, 2f }, { 3f, 4f } });
  128. var expected_output = np.array(new[,] {{1.53f, 1.53f, 1.53f, 1.53f },
  129. { 3.53f, 3.53f, 3.53f, 3.53f }});
  130. var actual_output = ed.Apply(inp)[0].numpy();
  131. Assert.AreEqual(expected_output, actual_output);
  132. }
  133. [TestMethod]
  134. public void Resizing()
  135. {
  136. var inputs = tf.random.uniform((10, 32, 32, 3));
  137. var layer = keras.layers.preprocessing.Resizing(16, 16);
  138. var output = layer.Apply(inputs);
  139. Assert.AreEqual((10, 16, 16, 3), output.shape);
  140. }
  141. [TestMethod]
  142. public void LayerNormalization()
  143. {
  144. var inputs = tf.constant(np.arange(10).reshape((5, 2)) * 10, dtype: tf.float32);
  145. var layer = keras.layers.LayerNormalization(axis: 1);
  146. Tensor output = layer.Apply(inputs);
  147. Assert.AreEqual((5, 2), output.shape);
  148. Assert.IsTrue(output[0].numpy().Equals(new[] { -0.99998f, 0.99998f }));
  149. }
  150. /// <summary>
  151. /// https://www.tensorflow.org/api_docs/python/tf/keras/layers/Normalization
  152. /// </summary>
  153. [TestMethod]
  154. public void Normalization()
  155. {
  156. // Calculate a global mean and variance by analyzing the dataset in adapt().
  157. var adapt_data = np.array(new[] { 1f, 2f, 3f, 4f, 5f });
  158. var input_data = np.array(new[] { 1f, 2f, 3f });
  159. var layer = tf.keras.layers.Normalization(axis: null);
  160. layer.adapt(adapt_data);
  161. var x = layer.Apply(input_data);
  162. Assert.AreEqual(x.numpy(), new[] { -1.4142135f, -0.70710677f, 0f });
  163. // Calculate a mean and variance for each index on the last axis.
  164. adapt_data = np.array(new[,]
  165. {
  166. { 0, 7, 4 },
  167. { 2, 9, 6 },
  168. { 0, 7, 4 },
  169. { 2, 9, 6 }
  170. }, dtype: tf.float32);
  171. input_data = np.array(new[,] { { 0, 7, 4 } }, dtype: tf.float32);
  172. layer = tf.keras.layers.Normalization(axis: -1);
  173. layer.adapt(adapt_data);
  174. x = layer.Apply(input_data);
  175. Equal(x.numpy().ToArray<float>(), new[] { -1f, -1f, -1f });
  176. // Pass the mean and variance directly.
  177. input_data = np.array(new[,] { { 1f }, { 2f }, { 3f } }, dtype: tf.float32);
  178. layer = tf.keras.layers.Normalization(mean: 3f, variance: 2f);
  179. x = layer.Apply(input_data);
  180. Equal(x.numpy().ToArray<float>(), new[] { -1.4142135f, -0.70710677f, 0f });
  181. // Use the layer to de-normalize inputs (after adapting the layer).
  182. adapt_data = np.array(new[,]
  183. {
  184. { 0, 7, 4 },
  185. { 2, 9, 6 },
  186. { 0, 7, 4 },
  187. { 2, 9, 6 }
  188. }, dtype: tf.float32);
  189. input_data = np.array(new[,] { { 1, 2, 3 } }, dtype: tf.float32);
  190. layer = tf.keras.layers.Normalization(axis: -1, invert: true);
  191. layer.adapt(adapt_data);
  192. x = layer.Apply(input_data);
  193. Equal(x.numpy().ToArray<float>(), new[] { -2f, -10f, -8f });
  194. }
  195. /// <summary>
  196. /// https://www.tensorflow.org/api_docs/python/tf/keras/layers/CategoryEncoding
  197. /// </summary>
  198. [TestMethod]
  199. public void CategoryEncoding()
  200. {
  201. // one-hot
  202. var inputs = np.array(new[] { 3, 2, 0, 1 });
  203. var layer = tf.keras.layers.CategoryEncoding(4);
  204. Tensor output = layer.Apply(inputs);
  205. Assert.AreEqual((4, 4), output.shape);
  206. Assert.IsTrue(output[0].numpy().Equals(new[] { 0, 0, 0, 1f }));
  207. Assert.IsTrue(output[1].numpy().Equals(new[] { 0, 0, 1, 0f }));
  208. Assert.IsTrue(output[2].numpy().Equals(new[] { 1, 0, 0, 0f }));
  209. Assert.IsTrue(output[3].numpy().Equals(new[] { 0, 1, 0, 0f }));
  210. // multi-hot
  211. inputs = np.array(new[,]
  212. {
  213. { 0, 1 },
  214. { 0, 0 },
  215. { 1, 2 },
  216. { 3, 1 }
  217. });
  218. layer = tf.keras.layers.CategoryEncoding(4, output_mode: "multi_hot");
  219. output = layer.Apply(inputs);
  220. Assert.IsTrue(output[0].numpy().Equals(new[] { 1, 1, 0, 0f }));
  221. Assert.IsTrue(output[1].numpy().Equals(new[] { 1, 0, 0, 0f }));
  222. Assert.IsTrue(output[2].numpy().Equals(new[] { 0, 1, 1, 0f }));
  223. Assert.IsTrue(output[3].numpy().Equals(new[] { 0, 1, 0, 1f }));
  224. // using weighted inputs in "count" mode
  225. inputs = np.array(new[,]
  226. {
  227. { 0, 1 },
  228. { 0, 0 },
  229. { 1, 2 },
  230. { 3, 1 }
  231. });
  232. var weights = np.array(new[,]
  233. {
  234. { 0.1f, 0.2f },
  235. { 0.1f, 0.1f },
  236. { 0.2f, 0.3f },
  237. { 0.4f, 0.2f }
  238. });
  239. layer = tf.keras.layers.CategoryEncoding(4, output_mode: "count", count_weights: weights);
  240. output = layer.Apply(inputs);
  241. Assert.IsTrue(output[0].numpy().Equals(new[] { 0.1f, 0.2f, 0f, 0f }));
  242. Assert.IsTrue(output[1].numpy().Equals(new[] { 0.2f, 0f, 0f, 0f }));
  243. Assert.IsTrue(output[2].numpy().Equals(new[] { 0f, 0.2f, 0.3f, 0f }));
  244. Assert.IsTrue(output[3].numpy().Equals(new[] { 0f, 0.2f, 0f, 0.4f }));
  245. }
  246. }
  247. }