You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

GradientEagerTest.cs 3.3 kB

5 years ago
5 years ago
123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106
  1. using Microsoft.VisualStudio.TestTools.UnitTesting;
  2. using System;
  3. using System.Linq;
  4. using Tensorflow;
  5. using static Tensorflow.Binding;
  6. namespace TensorFlowNET.UnitTest.Gradient
  7. {
  8. [TestClass]
  9. public class GradientEagerTest : PythonTest
  10. {
  11. [TestMethod]
  12. public void ConstantSquare()
  13. {
  14. // Calcute the gradient of w * w
  15. // by Automatic Differentiation in Eager mode
  16. // in tensorflow.net 2.x that is in development intensively
  17. var w = tf.constant(1.5f);
  18. using var tape = tf.GradientTape();
  19. tape.watch(w);
  20. var loss = w * w;
  21. var grad = tape.gradient(loss, w);
  22. Assert.AreEqual((float)grad, 3.0f);
  23. }
  24. /// <summary>
  25. /// Calcute the gradient of w * w * w
  26. /// 高阶梯度
  27. /// </summary>
  28. [TestMethod]
  29. public void HighGradient()
  30. {
  31. var x = tf.Variable(1.0f);
  32. using var tape1 = tf.GradientTape();
  33. using var tape2 = tf.GradientTape();
  34. var y = x * x * x;
  35. tape2.Dispose();
  36. var dy_dx = tape2.gradient(y, x);
  37. Assert.AreEqual((float)dy_dx, 3.0f);
  38. tape1.Dispose();
  39. var d2y_d2x = tape1.gradient(dy_dx, x);
  40. Assert.AreEqual((float)d2y_d2x, 6.0f);
  41. }
  42. [TestMethod]
  43. public void ConstantMultiply()
  44. {
  45. var x = tf.ones((2, 2));
  46. using var tape = tf.GradientTape();
  47. tape.watch(x);
  48. var y = tf.reduce_sum(x);
  49. var z = tf.multiply(y, y);
  50. var dz_dx = tape.gradient(z, x);
  51. var expected = new float[] { 8.0f, 8.0f, 8.0f, 8.0f };
  52. Assert.IsTrue(Enumerable.SequenceEqual(dz_dx.ToArray<float>(), expected));
  53. }
  54. [TestMethod]
  55. public void PersistentTape()
  56. {
  57. var x = tf.ones((2, 2));
  58. using var tape = tf.GradientTape(persistent: true);
  59. tape.watch(x);
  60. var y = tf.reduce_sum(x);
  61. var z = tf.multiply(y, y);
  62. tape.Dispose();
  63. var dz_dx = tape.gradient(z, x);
  64. var expected = new float[] { 8.0f, 8.0f, 8.0f, 8.0f };
  65. Assert.IsTrue(Enumerable.SequenceEqual(dz_dx.ToArray<float>(), expected));
  66. var dz_dy = tape.gradient(z, y);
  67. Assert.AreEqual((float)dz_dy, 8.0f);
  68. }
  69. [TestMethod]
  70. public void ConditionalMultiply()
  71. {
  72. Func<Tensor, int, Tensor> func = (x, y) =>
  73. {
  74. Tensor output = tf.constant(1.0f);
  75. foreach (var i in range(y))
  76. {
  77. if (i > 1)
  78. output = tf.multiply(output, x);
  79. }
  80. return output;
  81. };
  82. Func<Tensor, int, Tensor> grad = (x, y) =>
  83. {
  84. using var tape = tf.GradientTape();
  85. tape.watch(x);
  86. var output = func(x, y);
  87. var grad = tape.gradient(output, x);
  88. return grad;
  89. };
  90. var x = tf.constant(2.0f);
  91. var result = grad(x, 4);
  92. Assert.AreEqual((float)result, 4.0f);
  93. }
  94. }
  95. }