You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

SwitchTestCase.cs 7.2 kB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162
  1. using Microsoft.VisualStudio.TestTools.UnitTesting;
  2. using Tensorflow;
  3. namespace TensorFlowNET.UnitTest.control_flow_ops_test
  4. {
  5. /// <summary>
  6. /// excerpt of tensorflow/python/framework/ops/control_flow_ops_test.py
  7. /// </summary>
  8. [TestClass]
  9. public class SwitchTestCase : PythonTest
  10. {
  11. [Ignore("TODO")]
  12. [TestMethod]
  13. public void testResourceReadInLoop()
  14. {
  15. //def testResourceReadInLoop(self):
  16. // embedding_matrix = variable_scope.get_variable(
  17. // "embedding_matrix", initializer=[[2.0], [3.0]], use_resource=True)
  18. //
  19. // def cond(it, _):
  20. // return it < 5
  21. //
  22. // def body(it, cost):
  23. // embedding = embedding_ops.embedding_lookup(embedding_matrix, [0])
  24. // cost += math_ops.reduce_sum(embedding)
  25. // return it + 1, cost
  26. //
  27. // _, cost = control_flow_ops.while_loop(
  28. // cond, body, [constant_op.constant(0),
  29. // constant_op.constant(0.0)])
  30. // with self.cached_session():
  31. // self.evaluate(variables.global_variables_initializer())
  32. // self.assertAllEqual(10.0, self.evaluate(cost))
  33. }
  34. [Ignore("TODO")]
  35. [TestMethod]
  36. public void testIndexedSlicesGradientInCondInWhileLoop()
  37. {
  38. doTestIndexedSlicesGradientInCondInWhileLoop(use_resource: false);
  39. }
  40. [Ignore("TODO")]
  41. [TestMethod]
  42. public void testIndexedSlicesGradientInCondInWhileLoopResource()
  43. {
  44. doTestIndexedSlicesGradientInCondInWhileLoop(use_resource: true);
  45. }
  46. private void doTestIndexedSlicesGradientInCondInWhileLoop(bool use_resource= false)
  47. {
  48. //def doTestIndexedSlicesGradientInCondInWhileLoop(self, use_resource=False):
  49. // embedding_matrix = variable_scope.get_variable(
  50. // "embedding_matrix", [5, 5],
  51. // initializer=init_ops.random_normal_initializer(),
  52. // use_resource=use_resource)
  53. // def cond(it, _):
  54. // return it < 5
  55. // def body(it, cost):
  56. // embedding = embedding_ops.embedding_lookup(embedding_matrix, [0])
  57. // cost = control_flow_ops.cond(
  58. // math_ops.equal(it, 3), lambda: math_ops.square(cost),
  59. // (lambda: cost + math_ops.reduce_sum(embedding)))
  60. // return it + 1, cost
  61. // _, cost = control_flow_ops.while_loop(
  62. // cond, body, [constant_op.constant(0),
  63. // constant_op.constant(0.0)])
  64. // dynamic_grads = gradients_impl.gradients(cost, [embedding_matrix])[0]
  65. // dynamic_grads = math_ops.segment_sum(dynamic_grads.values,
  66. // dynamic_grads.indices)
  67. // embedding = embedding_ops.embedding_lookup(embedding_matrix, [0])
  68. // static = math_ops.square(
  69. // math_ops.reduce_sum(embedding) + math_ops.reduce_sum(embedding) +
  70. // math_ops.reduce_sum(embedding)) + math_ops.reduce_sum(embedding)
  71. // static_grads = gradients_impl.gradients(static, [embedding_matrix])[0]
  72. // static_grads = math_ops.segment_sum(static_grads.values,
  73. // static_grads.indices)
  74. // with self.cached_session():
  75. // self.evaluate(variables.global_variables_initializer())
  76. // self.assertAllEqual(*self.evaluate([static_grads, dynamic_grads]))
  77. }
  78. [Ignore("TODO")]
  79. [TestMethod]
  80. public void testIndexedSlicesWithShapeGradientInWhileLoop()
  81. {
  82. //@test_util.run_v1_only("b/120545219")
  83. //def testIndexedSlicesWithShapeGradientInWhileLoop(self):
  84. // for dtype in [dtypes.float32, dtypes.float64]:
  85. // with self.cached_session() as sess:
  86. // num_steps = 9
  87. // inputs = array_ops.placeholder(dtype=dtype, shape=[num_steps])
  88. // initial_outputs = tensor_array_ops.TensorArray(
  89. // dtype=dtype, size=num_steps)
  90. // initial_i = constant_op.constant(0, dtype=dtypes.int32)
  91. // def cond(i, _):
  92. // return i < num_steps # pylint: disable=cell-var-from-loop
  93. // def body(i, outputs):
  94. // x = array_ops.gather(inputs, i) # pylint: disable=cell-var-from-loop
  95. // outputs = outputs.write(i, x)
  96. // return i + 1, outputs
  97. // _, outputs = control_flow_ops.while_loop(cond, body,
  98. // [initial_i, initial_outputs])
  99. // outputs = math_ops.reduce_sum(outputs.stack())
  100. // r = gradients_impl.gradients([outputs], [inputs])[0]
  101. // grad_wr_inputs = ops.convert_to_tensor(r)
  102. // o, grad = sess.run([outputs, grad_wr_inputs],
  103. // feed_dict={inputs: [4, 6, 0, 7, 0, 0, 1, 2, 0]})
  104. // self.assertEquals(o, 20)
  105. // self.assertAllEqual(grad, [1] * num_steps)
  106. }
  107. [Ignore("TODO")]
  108. [TestMethod]
  109. public void testIndexedSlicesWithDynamicShapeGradientInWhileLoop()
  110. {
  111. //@test_util.run_v1_only("b/120545219")
  112. //def testIndexedSlicesWithDynamicShapeGradientInWhileLoop(self):
  113. // for dtype in [dtypes.float32, dtypes.float64]:
  114. // with self.cached_session() as sess:
  115. // inputs = array_ops.placeholder(dtype=dtype)
  116. // initial_outputs = tensor_array_ops.TensorArray(
  117. // dtype=dtype, dynamic_size=True, size=1)
  118. // initial_i = constant_op.constant(0, dtype=dtypes.int32)
  119. // def cond(i, _):
  120. // return i < array_ops.size(inputs) # pylint: disable=cell-var-from-loop
  121. // def body(i, outputs):
  122. // x = array_ops.gather(inputs, i) # pylint: disable=cell-var-from-loop
  123. // outputs = outputs.write(i, x)
  124. // return i + 1, outputs
  125. // _, outputs = control_flow_ops.while_loop(cond, body,
  126. // [initial_i, initial_outputs])
  127. // outputs = math_ops.reduce_sum(outputs.stack())
  128. // r = gradients_impl.gradients([outputs], [inputs])[0]
  129. // grad_wr_inputs = ops.convert_to_tensor(r)
  130. // o, grad = sess.run([outputs, grad_wr_inputs],
  131. // feed_dict={inputs: [1, 3, 2]})
  132. // self.assertEquals(o, 6)
  133. // self.assertAllEqual(grad, [1] * 3)
  134. }
  135. }
  136. }

tensorflow框架的.NET版本,提供了丰富的特性和API,可以借此很方便地在.NET平台下搭建深度学习训练与推理流程。