You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

LayersTest.cs 7.1 kB

5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155
  1. using Microsoft.VisualStudio.TestTools.UnitTesting;
  2. using NumSharp;
  3. using Tensorflow;
  4. using static Tensorflow.KerasApi;
  5. namespace TensorFlowNET.Keras.UnitTest
  6. {
  7. /// <summary>
  8. /// https://www.tensorflow.org/versions/r2.3/api_docs/python/tf/keras/layers
  9. /// </summary>
  10. [TestClass]
  11. public class LayersTest : EagerModeTestBase
  12. {
  13. [TestMethod]
  14. public void Sequential()
  15. {
  16. var model = keras.Sequential();
  17. model.add(keras.Input(shape: 16));
  18. }
  19. [TestMethod]
  20. public void Functional()
  21. {
  22. var layers = keras.layers;
  23. var inputs = keras.Input(shape: 784);
  24. Assert.AreEqual((-1, 784), inputs.TensorShape);
  25. var dense = layers.Dense(64, activation: keras.activations.Relu);
  26. var x = dense.Apply(inputs);
  27. x = layers.Dense(64, activation: keras.activations.Relu).Apply(x);
  28. var outputs = layers.Dense(10).Apply(x);
  29. var model = keras.Model(inputs, outputs, name: "mnist_model");
  30. model.summary();
  31. }
  32. /// <summary>
  33. /// Custom layer test, used in Dueling DQN
  34. /// </summary>
  35. [TestMethod, Ignore]
  36. public void FunctionalTest()
  37. {
  38. var layers = keras.layers;
  39. var inputs = layers.Input(shape: 24);
  40. var x = layers.Dense(128, activation:"relu").Apply(inputs);
  41. var value = layers.Dense(24).Apply(x);
  42. var adv = layers.Dense(1).Apply(x);
  43. var adv_out = adv - Binding.tf.reduce_mean(adv, axis: 1, keepdims: true); // Here's problem.
  44. var outputs = layers.Add().Apply(new Tensors(adv_out, value));
  45. var model = keras.Model(inputs, outputs);
  46. model.summary();
  47. model.compile(optimizer: keras.optimizers.RMSprop(0.001f),
  48. loss: keras.losses.MeanSquaredError(),
  49. metrics: new[] { "acc" });
  50. // Here we consider the adv_out is one layer, which is a little different from py's version
  51. Assert.AreEqual(model.Layers.Count, 6);
  52. // py code:
  53. //from tensorflow.keras.layers import Input, Dense, Add, Subtract, Lambda
  54. //from tensorflow.keras.models import Model
  55. //from tensorflow.keras.optimizers import RMSprop
  56. //import tensorflow.keras.backend as K
  57. //inputs = Input(24)
  58. //x = Dense(128, activation = "relu")(inputs)
  59. //value = Dense(24)(x)
  60. //adv = Dense(1)(x)
  61. //meam = Lambda(lambda x: K.mean(x, axis = 1, keepdims = True))(adv)
  62. //adv = Subtract()([adv, meam])
  63. //outputs = Add()([value, adv])
  64. //model = Model(inputs, outputs)
  65. //model.compile(loss = "mse", optimizer = RMSprop(1e-3))
  66. //model.summary()
  67. //py output:
  68. //Model: "functional_3"
  69. //__________________________________________________________________________________________________
  70. //Layer(type) Output Shape Param # Connected to
  71. //==================================================================================================
  72. //input_2 (InputLayer) [(None, 24)] 0
  73. //__________________________________________________________________________________________________
  74. //dense_3 (Dense) (None, 128) 3200 input_2[0][0]
  75. //__________________________________________________________________________________________________
  76. //dense_5 (Dense) (None, 1) 129 dense_3[0][0]
  77. //__________________________________________________________________________________________________
  78. //lambda_1 (Lambda) (None, 1) 0 dense_5[0][0]
  79. //__________________________________________________________________________________________________
  80. //dense_4 (Dense) (None, 24) 3096 dense_3[0][0]
  81. //__________________________________________________________________________________________________
  82. //subtract_1 (Subtract) (None, 1) 0 dense_5[0][0]
  83. // lambda_1[0][0]
  84. //__________________________________________________________________________________________________
  85. //add_1 (Add) (None, 24) 0 dense_4[0][0]
  86. // subtract_1[0][0]
  87. //==================================================================================================
  88. //Total params: 6,425
  89. //Trainable params: 6,425
  90. //Non-trainable params: 0
  91. //__________________________________________________________________________________________________
  92. }
  93. /// <summary>
  94. /// https://www.tensorflow.org/api_docs/python/tf/keras/layers/Embedding
  95. /// </summary>
  96. [TestMethod, Ignore]
  97. public void Embedding()
  98. {
  99. var model = keras.Sequential();
  100. var layer = keras.layers.Embedding(7, 2, input_length: 4);
  101. model.add(layer);
  102. // the model will take as input an integer matrix of size (batch,
  103. // input_length).
  104. // the largest integer (i.e. word index) in the input should be no larger
  105. // than 999 (vocabulary size).
  106. // now model.output_shape == (None, 10, 64), where None is the batch
  107. // dimension.
  108. var input_array = np.array(new int[,]
  109. {
  110. { 1, 2, 3, 4 },
  111. { 2, 3, 4, 5 },
  112. { 3, 4, 5, 6 }
  113. });
  114. // model.compile("rmsprop", "mse");
  115. var output_array = model.predict(input_array);
  116. Assert.AreEqual((32, 10, 64), output_array.shape);
  117. }
  118. /// <summary>
  119. /// https://www.tensorflow.org/api_docs/python/tf/keras/layers/Dense
  120. /// </summary>
  121. [TestMethod]
  122. public void Dense()
  123. {
  124. // Create a `Sequential` model and add a Dense layer as the first layer.
  125. var model = keras.Sequential();
  126. model.add(keras.Input(shape: 16));
  127. model.add(keras.layers.Dense(32, activation: keras.activations.Relu));
  128. // Now the model will take as input arrays of shape (None, 16)
  129. // and output arrays of shape (None, 32).
  130. // Note that after the first layer, you don't need to specify
  131. // the size of the input anymore:
  132. model.add(keras.layers.Dense(32));
  133. Assert.AreEqual((-1, 32), model.output_shape);
  134. }
  135. [TestMethod]
  136. public void SimpleRNN()
  137. {
  138. }
  139. }
  140. }