You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

LayersTest.cs 12 kB

4 years ago
5 years ago
4 years ago
4 years ago
4 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
3 years ago
123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303
  1. using Microsoft.VisualStudio.TestTools.UnitTesting;
  2. using System;
  3. using System.Collections.Generic;
  4. using System.Linq;
  5. using Tensorflow.NumPy;
  6. using static Tensorflow.Binding;
  7. using static Tensorflow.KerasApi;
  8. namespace Tensorflow.Keras.UnitTest.Layers
  9. {
  10. /// <summary>
  11. /// https://www.tensorflow.org/versions/r2.3/api_docs/python/tf/keras/layers
  12. /// </summary>
  13. [TestClass]
  14. public class LayersTest : EagerModeTestBase
  15. {
  16. [TestMethod]
  17. public void AveragePooling2D()
  18. {
  19. var x = tf.constant(new float[,]
  20. {
  21. { 1, 2, 3 },
  22. { 4, 5, 6 },
  23. { 7, 8, 9 }
  24. });
  25. x = tf.reshape(x, (1, 3, 3, 1));
  26. var avg_pool_2d = keras.layers.AveragePooling2D(pool_size: (2, 2),
  27. strides: (1, 1), padding: "valid");
  28. Tensor avg = avg_pool_2d.Apply(x);
  29. Assert.AreEqual((1, 2, 2, 1), avg.shape);
  30. Equal(new float[] { 3, 4, 6, 7 }, avg.ToArray<float>());
  31. }
  32. [TestMethod]
  33. public void InputLayer()
  34. {
  35. var model = keras.Sequential(new List<ILayer>
  36. {
  37. keras.layers.InputLayer(input_shape: 4),
  38. keras.layers.Dense(8)
  39. });
  40. model.compile(optimizer: keras.optimizers.RMSprop(0.001f),
  41. loss: keras.losses.MeanSquaredError(),
  42. metrics: new[] { "accuracy" });
  43. model.fit(np.zeros((10, 4), dtype: tf.float32), np.ones((10, 8), dtype: tf.float32));
  44. }
  45. [TestMethod]
  46. public void Sequential()
  47. {
  48. var model = keras.Sequential();
  49. model.add(keras.Input(shape: 16));
  50. }
  51. [TestMethod]
  52. public void Functional()
  53. {
  54. var layers = keras.layers;
  55. var inputs = keras.Input(shape: 784);
  56. Assert.AreEqual((-1, 784), inputs.shape);
  57. var dense = layers.Dense(64, activation: keras.activations.Relu);
  58. var x = dense.Apply(inputs);
  59. x = layers.Dense(64, activation: keras.activations.Relu).Apply(x);
  60. var outputs = layers.Dense(10).Apply(x);
  61. var model = keras.Model(inputs, outputs, name: "mnist_model");
  62. model.summary();
  63. }
  64. /// <summary>
  65. /// Custom layer test, used in Dueling DQN
  66. /// </summary>
  67. [TestMethod, Ignore]
  68. public void TensorFlowOpLayer()
  69. {
  70. var layers = keras.layers;
  71. var inputs = layers.Input(shape: 24);
  72. var x = layers.Dense(128, activation: "relu").Apply(inputs);
  73. var value = layers.Dense(24).Apply(x);
  74. var adv = layers.Dense(1).Apply(x);
  75. var mean = adv - tf.reduce_mean(adv, axis: 1, keepdims: true);
  76. adv = layers.Subtract().Apply((adv, mean));
  77. var outputs = layers.Add().Apply((value, adv));
  78. var model = keras.Model(inputs, outputs);
  79. model.compile(optimizer: keras.optimizers.RMSprop(0.001f),
  80. loss: keras.losses.MeanSquaredError(),
  81. metrics: new[] { "acc" });
  82. model.summary();
  83. Assert.AreEqual(model.Layers.Count, 8);
  84. var result = model.predict(tf.constant(np.arange(24).astype(np.float32)[np.newaxis, Slice.All]));
  85. Assert.AreEqual(result.shape, new Shape(1, 24));
  86. model.fit(np.arange(24).astype(np.float32)[np.newaxis, Slice.All], np.arange(24).astype(np.float32)[np.newaxis, Slice.All], verbose: 0);
  87. }
  88. /// <summary>
  89. /// https://www.tensorflow.org/api_docs/python/tf/keras/layers/Embedding
  90. /// </summary>
  91. [TestMethod]
  92. public void Embedding()
  93. {
  94. var model = keras.Sequential();
  95. var layer = keras.layers.Embedding(1000, 64, input_length: 10);
  96. model.add(layer);
  97. var input_array = np.random.randint(1000, size: (32, 10));
  98. model.compile("rmsprop", "mse", new[] { "accuracy" });
  99. var output_array = model.predict(input_array);
  100. Assert.AreEqual((32, 10, 64), output_array.shape);
  101. }
  102. [TestMethod]
  103. public void EmbeddingGrad()
  104. {
  105. var inputs = keras.layers.Input(shape: new[] { 32, 10 });
  106. var outputs = keras.layers.Embedding(1000, 64, input_length: 10).Apply(inputs);
  107. var model = keras.Model(inputs: inputs, outputs: outputs);
  108. var input_array = np.random.randint(1000, size: (1, 32, 10));
  109. var output_array = np.random.random(size: (1, 32, 10, 64));
  110. model.compile("rmsprop", "mse", new[] { "accuracy" });
  111. model.fit(input_array, output_array);
  112. }
  113. /// <summary>
  114. /// https://www.tensorflow.org/api_docs/python/tf/keras/layers/Dense
  115. /// </summary>
  116. [TestMethod]
  117. public void Dense()
  118. {
  119. // Create a `Sequential` model and add a Dense layer as the first layer.
  120. var model = keras.Sequential();
  121. model.add(keras.Input(shape: 16));
  122. model.add(keras.layers.Dense(32, activation: keras.activations.Relu));
  123. // Now the model will take as input arrays of shape (None, 16)
  124. // and output arrays of shape (None, 32).
  125. // Note that after the first layer, you don't need to specify
  126. // the size of the input anymore:
  127. model.add(keras.layers.Dense(32));
  128. Assert.AreEqual((-1, 32), model.output_shape);
  129. }
  130. [TestMethod]
  131. public void EinsumDense()
  132. {
  133. var ed = keras.layers.EinsumDense(
  134. equation: "...b,bc->...c",
  135. output_shape: 4,
  136. bias_axes: "c",
  137. bias_initializer: tf.constant_initializer(0.03),
  138. kernel_initializer: tf.constant_initializer(0.5)
  139. );
  140. var inp = np.array(new[,] { { 1f, 2f }, { 3f, 4f } });
  141. var expected_output = np.array(new[,] {{1.53f, 1.53f, 1.53f, 1.53f },
  142. { 3.53f, 3.53f, 3.53f, 3.53f }});
  143. var actual_output = ed.Apply(inp)[0].numpy();
  144. Assert.AreEqual(expected_output, actual_output);
  145. }
  146. [TestMethod]
  147. public void Resizing()
  148. {
  149. var inputs = tf.random.uniform((10, 32, 32, 3));
  150. var layer = keras.layers.preprocessing.Resizing(16, 16);
  151. var output = layer.Apply(inputs);
  152. Assert.AreEqual((10, 16, 16, 3), output.shape);
  153. }
  154. [TestMethod]
  155. public void LayerNormalization()
  156. {
  157. var inputs = tf.constant(np.arange(10).reshape((5, 2)) * 10, dtype: tf.float32);
  158. var layer = keras.layers.LayerNormalization(axis: 1);
  159. Tensor output = layer.Apply(inputs);
  160. Assert.AreEqual((5, 2), output.shape);
  161. Assert.IsTrue(output[0].numpy().Equals(new[] { -0.99998f, 0.99998f }));
  162. // test_layernorm_weights
  163. Assert.AreEqual(len(layer.TrainableWeights), 2);
  164. Assert.AreEqual(len(layer.Weights), 2);
  165. var beta = layer.Weights.Where(x => x.Name.StartsWith("beta")).Single();
  166. var gamma = layer.Weights.Where(x => x.Name.StartsWith("gamma")).Single();
  167. // correctness_test
  168. layer = keras.layers.LayerNormalization(axis: -1, epsilon: (float) 1e-12);
  169. var x = np.random.normal(loc: 5.0f, scale: 10.0f, size: (1000, 2, 2, 2)).astype(tf.float32);
  170. output = layer.Apply(x);
  171. var y = (output - beta.numpy()) / gamma.numpy();
  172. var y_mean = np.mean(y.numpy());
  173. var y_std = np.sqrt(np.sum(np.power(y.numpy() - np.mean(y.numpy()), 2)) / 8000);
  174. Assert.IsTrue(tf.greater(np.array(0.1f), tf.abs(y_std - 1.0)).ToArray<bool>()[0]);
  175. Assert.IsTrue(tf.greater(np.array(0.1f), tf.abs(y_mean)).ToArray<bool>()[0]);
  176. }
  177. /// <summary>
  178. /// https://www.tensorflow.org/api_docs/python/tf/keras/layers/Normalization
  179. /// </summary>
  180. [TestMethod]
  181. public void Normalization()
  182. {
  183. // Calculate a global mean and variance by analyzing the dataset in adapt().
  184. var adapt_data = np.array(new[] { 1f, 2f, 3f, 4f, 5f });
  185. var input_data = np.array(new[] { 1f, 2f, 3f });
  186. var layer = tf.keras.layers.Normalization(axis: null);
  187. layer.adapt(adapt_data);
  188. var x = layer.Apply(input_data);
  189. Assert.AreEqual(x.numpy(), new[] { -1.4142135f, -0.70710677f, 0f });
  190. // Calculate a mean and variance for each index on the last axis.
  191. adapt_data = np.array(new[,]
  192. {
  193. { 0, 7, 4 },
  194. { 2, 9, 6 },
  195. { 0, 7, 4 },
  196. { 2, 9, 6 }
  197. }, dtype: tf.float32);
  198. input_data = np.array(new[,] { { 0, 7, 4 } }, dtype: tf.float32);
  199. layer = tf.keras.layers.Normalization(axis: -1);
  200. layer.adapt(adapt_data);
  201. x = layer.Apply(input_data);
  202. Equal(x.numpy().ToArray<float>(), new[] { -1f, -1f, -1f });
  203. // Pass the mean and variance directly.
  204. input_data = np.array(new[,] { { 1f }, { 2f }, { 3f } }, dtype: tf.float32);
  205. layer = tf.keras.layers.Normalization(mean: 3f, variance: 2f);
  206. x = layer.Apply(input_data);
  207. Equal(x.numpy().ToArray<float>(), new[] { -1.4142135f, -0.70710677f, 0f });
  208. // Use the layer to de-normalize inputs (after adapting the layer).
  209. adapt_data = np.array(new[,]
  210. {
  211. { 0, 7, 4 },
  212. { 2, 9, 6 },
  213. { 0, 7, 4 },
  214. { 2, 9, 6 }
  215. }, dtype: tf.float32);
  216. input_data = np.array(new[,] { { 1, 2, 3 } }, dtype: tf.float32);
  217. layer = tf.keras.layers.Normalization(axis: -1, invert: true);
  218. layer.adapt(adapt_data);
  219. x = layer.Apply(input_data);
  220. Equal(x.numpy().ToArray<float>(), new[] { -2f, -10f, -8f });
  221. }
  222. /// <summary>
  223. /// https://www.tensorflow.org/api_docs/python/tf/keras/layers/CategoryEncoding
  224. /// </summary>
  225. [TestMethod]
  226. public void CategoryEncoding()
  227. {
  228. // one-hot
  229. var inputs = np.array(new[] { 3, 2, 0, 1 });
  230. var layer = tf.keras.layers.CategoryEncoding(4);
  231. Tensor output = layer.Apply(inputs);
  232. Assert.AreEqual((4, 4), output.shape);
  233. Assert.IsTrue(output[0].numpy().Equals(new[] { 0, 0, 0, 1f }));
  234. Assert.IsTrue(output[1].numpy().Equals(new[] { 0, 0, 1, 0f }));
  235. Assert.IsTrue(output[2].numpy().Equals(new[] { 1, 0, 0, 0f }));
  236. Assert.IsTrue(output[3].numpy().Equals(new[] { 0, 1, 0, 0f }));
  237. // multi-hot
  238. inputs = np.array(new[,]
  239. {
  240. { 0, 1 },
  241. { 0, 0 },
  242. { 1, 2 },
  243. { 3, 1 }
  244. });
  245. layer = tf.keras.layers.CategoryEncoding(4, output_mode: "multi_hot");
  246. output = layer.Apply(inputs);
  247. Assert.IsTrue(output[0].numpy().Equals(new[] { 1, 1, 0, 0f }));
  248. Assert.IsTrue(output[1].numpy().Equals(new[] { 1, 0, 0, 0f }));
  249. Assert.IsTrue(output[2].numpy().Equals(new[] { 0, 1, 1, 0f }));
  250. Assert.IsTrue(output[3].numpy().Equals(new[] { 0, 1, 0, 1f }));
  251. // using weighted inputs in "count" mode
  252. inputs = np.array(new[,]
  253. {
  254. { 0, 1 },
  255. { 0, 0 },
  256. { 1, 2 },
  257. { 3, 1 }
  258. });
  259. var weights = np.array(new[,]
  260. {
  261. { 0.1f, 0.2f },
  262. { 0.1f, 0.1f },
  263. { 0.2f, 0.3f },
  264. { 0.4f, 0.2f }
  265. });
  266. layer = tf.keras.layers.CategoryEncoding(4, output_mode: "count", count_weights: weights);
  267. output = layer.Apply(inputs);
  268. Assert.IsTrue(output[0].numpy().Equals(new[] { 0.1f, 0.2f, 0f, 0f }));
  269. Assert.IsTrue(output[1].numpy().Equals(new[] { 0.2f, 0f, 0f, 0f }));
  270. Assert.IsTrue(output[2].numpy().Equals(new[] { 0f, 0.2f, 0.3f, 0f }));
  271. Assert.IsTrue(output[3].numpy().Equals(new[] { 0f, 0.2f, 0f, 0.4f }));
  272. }
  273. }
  274. }