You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

SwitchTestCase.cs 7.5 kB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173
  1. using Microsoft.VisualStudio.TestTools.UnitTesting;
  2. using Tensorflow;
  3. namespace TensorFlowNET.UnitTest.control_flow_ops_test
  4. {
  5. /// <summary>
  6. /// excerpt of tensorflow/python/framework/ops/control_flow_ops_test.py
  7. /// </summary>
  8. [TestClass]
  9. public class SwitchTestCase : PythonTest
  10. {
  11. [Ignore("TODO")]
  12. [TestMethod]
  13. public void testResourceReadInLoop()
  14. {
  15. //var embedding_matrix = variable_scope.get_variable(
  16. //"embedding_matrix", initializer: new double[,] { { 2.0 }, { 3.0 } }, use_resource: true);
  17. /*
  18. Tensor cond(Tensor it, Tensor _)
  19. {
  20. return it < 5;
  21. }
  22. */
  23. // TODO: below code doesn't compile
  24. //(Tensor, Tensor) body(Tensor it, Tensor cost)
  25. //{
  26. // var embedding = embedding_ops.embedding_lookup(embedding_matrix, new int[]{0});
  27. // cost += math_ops.reduce_sum(embedding);
  28. // return (it + 1, cost);
  29. //}
  30. //var (_, cost1) = control_flow_ops.while_loop(
  31. // cond, body, new[]
  32. // {
  33. // constant_op.constant(0),
  34. // constant_op.constant(0.0)
  35. // });
  36. //with<Session>(this.cached_session(), sess =>
  37. //{
  38. // self.evaluate(variables.global_variables_initializer());
  39. // self.assertAllEqual(10.0, self.evaluate(cost1));
  40. //});
  41. }
  42. [Ignore("TODO")]
  43. [TestMethod]
  44. public void testIndexedSlicesGradientInCondInWhileLoop()
  45. {
  46. doTestIndexedSlicesGradientInCondInWhileLoop(use_resource: false);
  47. }
  48. [Ignore("TODO")]
  49. [TestMethod]
  50. public void testIndexedSlicesGradientInCondInWhileLoopResource()
  51. {
  52. doTestIndexedSlicesGradientInCondInWhileLoop(use_resource: true);
  53. }
  54. private void doTestIndexedSlicesGradientInCondInWhileLoop(bool use_resource = false)
  55. {
  56. //def doTestIndexedSlicesGradientInCondInWhileLoop(self, use_resource=False):
  57. // embedding_matrix = variable_scope.get_variable(
  58. // "embedding_matrix", [5, 5],
  59. // initializer=init_ops.random_normal_initializer(),
  60. // use_resource=use_resource)
  61. // def cond(it, _):
  62. // return it < 5
  63. // def body(it, cost):
  64. // embedding = embedding_ops.embedding_lookup(embedding_matrix, [0])
  65. // cost = control_flow_ops.cond(
  66. // math_ops.equal(it, 3), lambda: math_ops.square(cost),
  67. // (lambda: cost + math_ops.reduce_sum(embedding)))
  68. // return it + 1, cost
  69. // _, cost = control_flow_ops.while_loop(
  70. // cond, body, [constant_op.constant(0),
  71. // constant_op.constant(0.0)])
  72. // dynamic_grads = gradients_impl.gradients(cost, [embedding_matrix])[0]
  73. // dynamic_grads = math_ops.segment_sum(dynamic_grads.values,
  74. // dynamic_grads.indices)
  75. // embedding = embedding_ops.embedding_lookup(embedding_matrix, [0])
  76. // static = math_ops.square(
  77. // math_ops.reduce_sum(embedding) + math_ops.reduce_sum(embedding) +
  78. // math_ops.reduce_sum(embedding)) + math_ops.reduce_sum(embedding)
  79. // static_grads = gradients_impl.gradients(static, [embedding_matrix])[0]
  80. // static_grads = math_ops.segment_sum(static_grads.values,
  81. // static_grads.indices)
  82. // with self.cached_session():
  83. // self.evaluate(variables.global_variables_initializer())
  84. // self.assertAllEqual(*self.evaluate([static_grads, dynamic_grads]))
  85. }
  86. [Ignore("TODO")]
  87. [TestMethod]
  88. public void testIndexedSlicesWithShapeGradientInWhileLoop()
  89. {
  90. //@test_util.run_v1_only("b/120545219")
  91. //def testIndexedSlicesWithShapeGradientInWhileLoop(self):
  92. // for dtype in [dtypes.float32, dtypes.float64]:
  93. // with self.cached_session() as sess:
  94. // num_steps = 9
  95. // inputs = array_ops.placeholder(dtype=dtype, shape=[num_steps])
  96. // initial_outputs = tensor_array_ops.TensorArray(
  97. // dtype=dtype, size=num_steps)
  98. // initial_i = constant_op.constant(0, dtype=dtypes.int32)
  99. // def cond(i, _):
  100. // return i < num_steps # pylint: disable=cell-var-from-loop
  101. // def body(i, outputs):
  102. // x = array_ops.gather(inputs, i) # pylint: disable=cell-var-from-loop
  103. // outputs = outputs.write(i, x)
  104. // return i + 1, outputs
  105. // _, outputs = control_flow_ops.while_loop(cond, body,
  106. // [initial_i, initial_outputs])
  107. // outputs = math_ops.reduce_sum(outputs.stack())
  108. // r = gradients_impl.gradients([outputs], [inputs])[0]
  109. // grad_wr_inputs = ops.convert_to_tensor(r)
  110. // o, grad = sess.run([outputs, grad_wr_inputs],
  111. // feed_dict={inputs: [4, 6, 0, 7, 0, 0, 1, 2, 0]})
  112. // self.assertEquals(o, 20)
  113. // self.assertAllEqual(grad, [1] * num_steps)
  114. }
  115. [Ignore("TODO")]
  116. [TestMethod]
  117. public void testIndexedSlicesWithDynamicShapeGradientInWhileLoop()
  118. {
  119. //@test_util.run_v1_only("b/120545219")
  120. //def testIndexedSlicesWithDynamicShapeGradientInWhileLoop(self):
  121. // for dtype in [dtypes.float32, dtypes.float64]:
  122. // with self.cached_session() as sess:
  123. // inputs = array_ops.placeholder(dtype=dtype)
  124. // initial_outputs = tensor_array_ops.TensorArray(
  125. // dtype=dtype, dynamic_size=True, size=1)
  126. // initial_i = constant_op.constant(0, dtype=dtypes.int32)
  127. // def cond(i, _):
  128. // return i < array_ops.size(inputs) # pylint: disable=cell-var-from-loop
  129. // def body(i, outputs):
  130. // x = array_ops.gather(inputs, i) # pylint: disable=cell-var-from-loop
  131. // outputs = outputs.write(i, x)
  132. // return i + 1, outputs
  133. // _, outputs = control_flow_ops.while_loop(cond, body,
  134. // [initial_i, initial_outputs])
  135. // outputs = math_ops.reduce_sum(outputs.stack())
  136. // r = gradients_impl.gradients([outputs], [inputs])[0]
  137. // grad_wr_inputs = ops.convert_to_tensor(r)
  138. // o, grad = sess.run([outputs, grad_wr_inputs],
  139. // feed_dict={inputs: [1, 3, 2]})
  140. // self.assertEquals(o, 6)
  141. // self.assertAllEqual(grad, [1] * 3)
  142. }
  143. }
  144. }