You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

GradientEagerTest.cs 3.3 kB

5 years ago
5 years ago
123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107
  1. using Microsoft.VisualStudio.TestTools.UnitTesting;
  2. using System;
  3. using System.Linq;
  4. using Tensorflow;
  5. using Tensorflow.UnitTest;
  6. using static Tensorflow.Binding;
  7. namespace TensorFlowNET.UnitTest.Gradient
  8. {
  9. [TestClass]
  10. public class GradientEagerTest : EagerModeTestBase
  11. {
  12. [TestMethod]
  13. public void ConstantSquare()
  14. {
  15. // Calcute the gradient of w * w
  16. // by Automatic Differentiation in Eager mode
  17. // in tensorflow.net 2.x that is in development intensively
  18. var w = tf.constant(1.5f);
  19. using var tape = tf.GradientTape();
  20. tape.watch(w);
  21. var loss = w * w;
  22. var grad = tape.gradient(loss, w);
  23. Assert.AreEqual((float)grad, 3.0f);
  24. }
  25. /// <summary>
  26. /// Calcute the gradient of w * w * w
  27. /// 高阶梯度
  28. /// </summary>
  29. [TestMethod]
  30. public void HighGradient()
  31. {
  32. var x = tf.Variable(1.0f);
  33. using var tape1 = tf.GradientTape();
  34. using var tape2 = tf.GradientTape();
  35. var y = x * x * x;
  36. tape2.Dispose();
  37. var dy_dx = tape2.gradient(y, x);
  38. Assert.AreEqual((float)dy_dx, 3.0f);
  39. tape1.Dispose();
  40. var d2y_d2x = tape1.gradient(dy_dx, x);
  41. Assert.AreEqual((float)d2y_d2x, 6.0f);
  42. }
  43. [TestMethod]
  44. public void ConstantMultiply()
  45. {
  46. var x = tf.ones((2, 2));
  47. using var tape = tf.GradientTape();
  48. tape.watch(x);
  49. var y = tf.reduce_sum(x);
  50. var z = tf.multiply(y, y);
  51. var dz_dx = tape.gradient(z, x);
  52. var expected = new float[] { 8.0f, 8.0f, 8.0f, 8.0f };
  53. Assert.IsTrue(Enumerable.SequenceEqual(dz_dx.ToArray<float>(), expected));
  54. }
  55. [TestMethod]
  56. public void PersistentTape()
  57. {
  58. var x = tf.ones((2, 2));
  59. using var tape = tf.GradientTape(persistent: true);
  60. tape.watch(x);
  61. var y = tf.reduce_sum(x);
  62. var z = tf.multiply(y, y);
  63. tape.Dispose();
  64. var dz_dx = tape.gradient(z, x);
  65. var expected = new float[] { 8.0f, 8.0f, 8.0f, 8.0f };
  66. Assert.IsTrue(Enumerable.SequenceEqual(dz_dx.ToArray<float>(), expected));
  67. var dz_dy = tape.gradient(z, y);
  68. Assert.AreEqual((float)dz_dy, 8.0f);
  69. }
  70. [TestMethod]
  71. public void ConditionalMultiply()
  72. {
  73. Func<Tensor, int, Tensor> func = (x, y) =>
  74. {
  75. Tensor output = tf.constant(1.0f);
  76. foreach (var i in range(y))
  77. {
  78. if (i > 1)
  79. output = tf.multiply(output, x);
  80. }
  81. return output;
  82. };
  83. Func<Tensor, int, Tensor> grad = (x, y) =>
  84. {
  85. using var tape = tf.GradientTape();
  86. tape.watch(x);
  87. var output = func(x, y);
  88. var grad = tape.gradient(output, x);
  89. return grad;
  90. };
  91. var x = tf.constant(2.0f);
  92. var result = grad(x, 4);
  93. Assert.AreEqual((float)result, 4.0f);
  94. }
  95. }
  96. }