You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

SwitchTestCase.cs 7.4 kB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171
  1. using Microsoft.VisualStudio.TestTools.UnitTesting;
  2. using Tensorflow;
  3. namespace TensorFlowNET.UnitTest.control_flow_ops_test
  4. {
  5. /// <summary>
  6. /// excerpt of tensorflow/python/framework/ops/control_flow_ops_test.py
  7. /// </summary>
  8. [TestClass]
  9. public class SwitchTestCase : PythonTest
  10. {
  11. [Ignore("TODO")]
  12. [TestMethod]
  13. public void testResourceReadInLoop()
  14. {
  15. //var embedding_matrix = variable_scope.get_variable(
  16. //"embedding_matrix", initializer: new double[,] { { 2.0 }, { 3.0 } }, use_resource: true);
  17. Tensor cond(Tensor it, Tensor _)
  18. {
  19. return it < 5;
  20. }
  21. // TODO: below code doesn't compile
  22. //(Tensor, Tensor) body(Tensor it, Tensor cost)
  23. //{
  24. // var embedding = embedding_ops.embedding_lookup(embedding_matrix, new int[]{0});
  25. // cost += math_ops.reduce_sum(embedding);
  26. // return (it + 1, cost);
  27. //}
  28. //var (_, cost1) = control_flow_ops.while_loop(
  29. // cond, body, new[]
  30. // {
  31. // constant_op.constant(0),
  32. // constant_op.constant(0.0)
  33. // });
  34. //with<Session>(this.cached_session(), sess =>
  35. //{
  36. // self.evaluate(variables.global_variables_initializer());
  37. // self.assertAllEqual(10.0, self.evaluate(cost1));
  38. //});
  39. }
  40. [Ignore("TODO")]
  41. [TestMethod]
  42. public void testIndexedSlicesGradientInCondInWhileLoop()
  43. {
  44. doTestIndexedSlicesGradientInCondInWhileLoop(use_resource: false);
  45. }
  46. [Ignore("TODO")]
  47. [TestMethod]
  48. public void testIndexedSlicesGradientInCondInWhileLoopResource()
  49. {
  50. doTestIndexedSlicesGradientInCondInWhileLoop(use_resource: true);
  51. }
  52. private void doTestIndexedSlicesGradientInCondInWhileLoop(bool use_resource = false)
  53. {
  54. //def doTestIndexedSlicesGradientInCondInWhileLoop(self, use_resource=False):
  55. // embedding_matrix = variable_scope.get_variable(
  56. // "embedding_matrix", [5, 5],
  57. // initializer=init_ops.random_normal_initializer(),
  58. // use_resource=use_resource)
  59. // def cond(it, _):
  60. // return it < 5
  61. // def body(it, cost):
  62. // embedding = embedding_ops.embedding_lookup(embedding_matrix, [0])
  63. // cost = control_flow_ops.cond(
  64. // math_ops.equal(it, 3), lambda: math_ops.square(cost),
  65. // (lambda: cost + math_ops.reduce_sum(embedding)))
  66. // return it + 1, cost
  67. // _, cost = control_flow_ops.while_loop(
  68. // cond, body, [constant_op.constant(0),
  69. // constant_op.constant(0.0)])
  70. // dynamic_grads = gradients_impl.gradients(cost, [embedding_matrix])[0]
  71. // dynamic_grads = math_ops.segment_sum(dynamic_grads.values,
  72. // dynamic_grads.indices)
  73. // embedding = embedding_ops.embedding_lookup(embedding_matrix, [0])
  74. // static = math_ops.square(
  75. // math_ops.reduce_sum(embedding) + math_ops.reduce_sum(embedding) +
  76. // math_ops.reduce_sum(embedding)) + math_ops.reduce_sum(embedding)
  77. // static_grads = gradients_impl.gradients(static, [embedding_matrix])[0]
  78. // static_grads = math_ops.segment_sum(static_grads.values,
  79. // static_grads.indices)
  80. // with self.cached_session():
  81. // self.evaluate(variables.global_variables_initializer())
  82. // self.assertAllEqual(*self.evaluate([static_grads, dynamic_grads]))
  83. }
  84. [Ignore("TODO")]
  85. [TestMethod]
  86. public void testIndexedSlicesWithShapeGradientInWhileLoop()
  87. {
  88. //@test_util.run_v1_only("b/120545219")
  89. //def testIndexedSlicesWithShapeGradientInWhileLoop(self):
  90. // for dtype in [dtypes.float32, dtypes.float64]:
  91. // with self.cached_session() as sess:
  92. // num_steps = 9
  93. // inputs = array_ops.placeholder(dtype=dtype, shape=[num_steps])
  94. // initial_outputs = tensor_array_ops.TensorArray(
  95. // dtype=dtype, size=num_steps)
  96. // initial_i = constant_op.constant(0, dtype=dtypes.int32)
  97. // def cond(i, _):
  98. // return i < num_steps # pylint: disable=cell-var-from-loop
  99. // def body(i, outputs):
  100. // x = array_ops.gather(inputs, i) # pylint: disable=cell-var-from-loop
  101. // outputs = outputs.write(i, x)
  102. // return i + 1, outputs
  103. // _, outputs = control_flow_ops.while_loop(cond, body,
  104. // [initial_i, initial_outputs])
  105. // outputs = math_ops.reduce_sum(outputs.stack())
  106. // r = gradients_impl.gradients([outputs], [inputs])[0]
  107. // grad_wr_inputs = ops.convert_to_tensor(r)
  108. // o, grad = sess.run([outputs, grad_wr_inputs],
  109. // feed_dict={inputs: [4, 6, 0, 7, 0, 0, 1, 2, 0]})
  110. // self.assertEquals(o, 20)
  111. // self.assertAllEqual(grad, [1] * num_steps)
  112. }
  113. [Ignore("TODO")]
  114. [TestMethod]
  115. public void testIndexedSlicesWithDynamicShapeGradientInWhileLoop()
  116. {
  117. //@test_util.run_v1_only("b/120545219")
  118. //def testIndexedSlicesWithDynamicShapeGradientInWhileLoop(self):
  119. // for dtype in [dtypes.float32, dtypes.float64]:
  120. // with self.cached_session() as sess:
  121. // inputs = array_ops.placeholder(dtype=dtype)
  122. // initial_outputs = tensor_array_ops.TensorArray(
  123. // dtype=dtype, dynamic_size=True, size=1)
  124. // initial_i = constant_op.constant(0, dtype=dtypes.int32)
  125. // def cond(i, _):
  126. // return i < array_ops.size(inputs) # pylint: disable=cell-var-from-loop
  127. // def body(i, outputs):
  128. // x = array_ops.gather(inputs, i) # pylint: disable=cell-var-from-loop
  129. // outputs = outputs.write(i, x)
  130. // return i + 1, outputs
  131. // _, outputs = control_flow_ops.while_loop(cond, body,
  132. // [initial_i, initial_outputs])
  133. // outputs = math_ops.reduce_sum(outputs.stack())
  134. // r = gradients_impl.gradients([outputs], [inputs])[0]
  135. // grad_wr_inputs = ops.convert_to_tensor(r)
  136. // o, grad = sess.run([outputs, grad_wr_inputs],
  137. // feed_dict={inputs: [1, 3, 2]})
  138. // self.assertEquals(o, 6)
  139. // self.assertAllEqual(grad, [1] * 3)
  140. }
  141. }
  142. }