You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

ActivationTest.cs 4.4 kB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107
  1. using Microsoft.VisualStudio.TestTools.UnitTesting;
  2. using Tensorflow.NumPy;
  3. using static Tensorflow.Binding;
  4. using static Tensorflow.KerasApi;
  5. namespace Tensorflow.Keras.UnitTest.Layers
  6. {
  7. [TestClass]
  8. public class ActivationTest : EagerModeTestBase
  9. {
  10. [TestMethod]
  11. public void LeakyReLU()
  12. {
  13. var layer = keras.layers.LeakyReLU();
  14. Tensor output = layer.Apply(np.array(-3.0f, -1.0f, 0.0f, 2.0f));
  15. Equal(new[] { -0.9f, -0.3f, 0.0f, 2.0f }, output.ToArray<float>());
  16. }
  17. [TestMethod]
  18. public void ELU()
  19. {
  20. Tensors input = tf.constant(new float[] { -3f, -2f, -1f, 0f, 1f, 2f });
  21. Tensor output = keras.layers.ELU().Apply(input);
  22. NDArray expected = new NDArray(new float[] { -0.0950213f, -0.08646648f, -0.06321206f, 0f, 1f, 2f });
  23. Assert.AreEqual(expected.numpy(), output.numpy());
  24. }
  25. [TestMethod]
  26. public void SELU()
  27. {
  28. Tensor input = tf.constant(new float[] { -3f, -2f, -1f, 0f, 1f, 2f });
  29. Tensor output = keras.layers.SELU().Apply(input);
  30. NDArray expected = new NDArray(new float[] { -1.6705688f, -1.5201665f, -1.1113307f, 0f, 1.050701f, 2.101402f });
  31. Assert.AreEqual(expected.numpy(), output.numpy());
  32. }
  33. [TestMethod]
  34. public void Softmax()
  35. {
  36. Tensor input = tf.constant(new float[] { -3f, -2f, -1f, 0f, 1f, 2f });
  37. Tensor output = keras.layers.Softmax(new Axis(-1)).Apply(input);
  38. var expected = new float[] { 0.0042697787f, 0.011606461f, 0.031549633f, 0.085760795f, 0.23312202f, 0.6336913f };
  39. Assert.IsTrue(Equal(expected, output.ToArray<float>()));
  40. }
  41. [TestMethod]
  42. public void Softplus()
  43. {
  44. Tensor input = tf.constant(new float[] { -3f, -2f, -1f, 0f, 1f, 2f });
  45. Tensor output = keras.layers.Softplus().Apply(input);
  46. NDArray expected = new NDArray(new float[] { 0.04858733f, 0.12692805f, 0.31326166f, 0.6931472f, 1.3132616f, 2.126928f });
  47. Assert.IsTrue(expected == output.numpy());
  48. }
  49. [TestMethod]
  50. public void Softsign()
  51. {
  52. Tensor input = tf.constant(new float[] { -3f, -2f, -1f, 0f, 1f, 2f });
  53. Tensor output = keras.layers.Softsign().Apply(input);
  54. NDArray expected = new NDArray(new float[] { -0.75f, -0.66666667f, -0.5f, 0f, 0.5f, 0.66666667f });
  55. Assert.AreEqual(expected, output.numpy());
  56. }
  57. [TestMethod]
  58. public void Exponential()
  59. {
  60. Tensor input = tf.constant(new float[] { -3f, -2f, -1f, 0f, 1f, 2f });
  61. Tensor output = keras.layers.Exponential().Apply(input);
  62. var expected = new float[] { 0.049787067f, 0.13533528f, 0.36787945f, 1f, 2.7182817f, 7.389056f };
  63. Assert.IsTrue(Equal(expected, output.ToArray<float>()));
  64. }
  65. [TestMethod]
  66. public void HardSigmoid()
  67. {
  68. Tensor input = tf.constant(new float[] { -3f, -2f, -1f, 0f, 1f, 2f });
  69. Tensor output = keras.layers.HardSigmoid().Apply(input);
  70. // Note, this should be [0, 0.1, 0.3, 0.5, 0.7, 0.9]
  71. // But somehow the second element will have 0.099999994
  72. // Probably because there is an accuracy loss somewhere
  73. NDArray expected = new NDArray(new float[] { 0f, 0.099999994f, 0.3f, 0.5f, 0.7f, 0.9f });
  74. Assert.AreEqual(expected, output.numpy());
  75. }
  76. [TestMethod]
  77. public void Swish()
  78. {
  79. Tensor input = tf.constant(new float[] { -3f, -2f, -1f, 0f, 1f, 2f });
  80. Tensor output = keras.layers.Swish().Apply(input);
  81. NDArray expected = new NDArray(new float[] { -0.14227762f, -0.23840584f, -0.26894143f, 0f, 0.7310586f, 1.761594f });
  82. Assert.AreEqual(expected, output.numpy());
  83. }
  84. /// <summary>
  85. /// https://www.tensorflow.org/addons/api_docs/python/tfa/activations/mish
  86. /// </summary>
  87. [TestMethod]
  88. public void Mish()
  89. {
  90. var x = tf.constant(new[] { 1.0, 0.0, 1.0 }, dtype: tf.float32);
  91. var output = keras.activations.Mish.Apply(x);
  92. Assert.AreEqual(new[] { 0.86509836f, 0f, 0.86509836f }, output.numpy());
  93. }
  94. }
  95. }