You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

ActivationTest.cs 4.3 kB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788
  1. using Microsoft.VisualStudio.TestTools.UnitTesting;
  2. using System;
  3. using System.Collections.Generic;
  4. using static Tensorflow.Binding;
  5. using Tensorflow.NumPy;
  6. using static Tensorflow.KerasApi;
  7. using Tensorflow;
  8. namespace TensorFlowNET.Keras.UnitTest {
  9. [TestClass]
  10. public class ActivationTest : EagerModeTestBase {
  11. [TestMethod]
  12. public void LeakyReLU () {
  13. var layer = keras.layers.LeakyReLU();
  14. Tensor output = layer.Apply(np.array(-3.0f, -1.0f, 0.0f, 2.0f));
  15. Equal(new[] { -0.9f, -0.3f, 0.0f, 2.0f }, output.ToArray<float>());
  16. }
  17. [TestMethod]
  18. public void ELU () {
  19. Tensors input = tf.constant(new float[] { -3f, -2f, -1f, 0f, 1f, 2f });
  20. Tensor output = keras.layers.ELU().Apply(input);
  21. NDArray expected = new NDArray(new float[] { -0.0950213f, -0.08646648f, -0.06321206f, 0f, 1f, 2f });
  22. Assert.AreEqual(expected.numpy(), output.numpy());
  23. }
  24. [TestMethod]
  25. public void SELU () {
  26. Tensor input = tf.constant(new float[] { -3f, -2f, -1f, 0f, 1f, 2f });
  27. Tensor output = keras.layers.SELU().Apply(input);
  28. NDArray expected = new NDArray(new float[] { -1.6705688f, -1.5201665f, -1.1113307f, 0f, 1.050701f, 2.101402f });
  29. Assert.AreEqual(expected.numpy(), output.numpy());
  30. }
  31. [TestMethod]
  32. public void Softmax () {
  33. Tensor input = tf.constant(new float[] { -3f, -2f, -1f, 0f, 1f, 2f });
  34. Tensor output = keras.layers.Softmax(new Axis(-1)).Apply(input);
  35. NDArray expected = new NDArray(new float[] { 0.0042697787f, 0.011606461f, 0.031549633f, 0.085760795f, 0.23312202f, 0.6336913f });
  36. Assert.AreEqual(expected.numpy(), output.numpy());
  37. }
  38. [TestMethod]
  39. public void Softplus () {
  40. Tensor input = tf.constant(new float[] { -3f, -2f, -1f, 0f, 1f, 2f });
  41. Tensor output = keras.layers.Softplus().Apply(input);
  42. NDArray expected = new NDArray(new float[] { 0.04858733f, 0.12692805f, 0.31326166f, 0.6931472f, 1.3132616f, 2.126928f });
  43. Assert.AreEqual(expected, output.numpy());
  44. }
  45. [TestMethod]
  46. public void Softsign () {
  47. Tensor input = tf.constant(new float[] { -3f, -2f, -1f, 0f, 1f, 2f });
  48. Tensor output = keras.layers.Softsign().Apply(input);
  49. NDArray expected = new NDArray(new float[] { -0.75f, -0.66666667f, -0.5f, 0f, 0.5f, 0.66666667f });
  50. Assert.AreEqual(expected, output.numpy());
  51. }
  52. [TestMethod]
  53. public void Exponential () {
  54. Tensor input = tf.constant(new float[] { -3f, -2f, -1f, 0f, 1f, 2f });
  55. Tensor output = keras.layers.Exponential().Apply(input);
  56. NDArray expected = new NDArray(new float[] { 0.049787067f, 0.13533528f, 0.36787945f, 1f, 2.7182817f, 7.389056f });
  57. Assert.AreEqual(expected, output.numpy());
  58. }
  59. [TestMethod]
  60. public void HardSigmoid () {
  61. Tensor input = tf.constant(new float[] { -3f, -2f, -1f, 0f, 1f, 2f });
  62. Tensor output = keras.layers.HardSigmoid().Apply(input);
  63. // Note, this should be [0, 0.1, 0.3, 0.5, 0.7, 0.9]
  64. // But somehow the second element will have 0.099999994
  65. // Probably because there is an accuracy loss somewhere
  66. NDArray expected = new NDArray(new float[] { 0f, 0.099999994f, 0.3f, 0.5f, 0.7f, 0.9f });
  67. Assert.AreEqual(expected, output.numpy());
  68. }
  69. [TestMethod]
  70. public void Swish () {
  71. Tensor input = tf.constant(new float[] { -3f, -2f, -1f, 0f, 1f, 2f });
  72. Tensor output = keras.layers.Swish().Apply(input);
  73. NDArray expected = new NDArray(new float[] { -0.14227762f, -0.23840584f, -0.26894143f, 0f, 0.7310586f, 1.761594f });
  74. Assert.AreEqual(expected, output.numpy());
  75. }
  76. }
  77. }