Browse Source

Attempting to use uninitialized value v1 #172

tags/v0.8.0
Oceania2018 6 years ago
parent
commit
9bc0c86b0d
16 changed files with 48 additions and 44 deletions
  1. +20
    -20
      src/TensorFlowNET.Core/Gradients/gradients_impl.py.cs
  2. +4
    -3
      src/TensorFlowNET.Core/Graphs/Graph.cs
  3. +1
    -0
      src/TensorFlowNET.Core/ITensorOrOperation.cs
  4. +2
    -2
      src/TensorFlowNET.Core/Operations/Operation.cs
  5. +1
    -1
      src/TensorFlowNET.Core/Sessions/_FetchHandler.cs
  6. +1
    -1
      src/TensorFlowNET.Core/Tensors/Tensor.cs
  7. +1
    -1
      src/TensorFlowNET.Core/Train/Optimizer.cs
  8. +1
    -1
      src/TensorFlowNET.Core/Train/Saving/BaseSaverBuilder.cs
  9. +2
    -2
      src/TensorFlowNET.Core/Train/Saving/saveable_object_util.py.cs
  10. +3
    -3
      src/TensorFlowNET.Core/Variables/RefVariable.cs
  11. +1
    -1
      src/TensorFlowNET.Core/Variables/gen_state_ops.py.cs
  12. +1
    -1
      src/TensorFlowNET.Core/ops.name_scope.cs
  13. +3
    -1
      src/TensorFlowNET.Core/ops.py.cs
  14. +2
    -1
      src/TensorFlowNET.Core/tf.cs
  15. +2
    -2
      test/TensorFlowNET.UnitTest/GraphTest.cs
  16. +3
    -4
      test/TensorFlowNET.UnitTest/VariableTest.cs

+ 20
- 20
src/TensorFlowNET.Core/Gradients/gradients_impl.py.cs View File

@@ -95,9 +95,9 @@ namespace Tensorflow
{ {
// 'ready' handles the case where one output gradient relies on // 'ready' handles the case where one output gradient relies on
// another output's gradient. // another output's gradient.
if (!pending_count.ContainsKey(op.Name))
pending_count[op.Name] = 0;
bool ready = pending_count[op.Name] == 0;
if (!pending_count.ContainsKey(op.name))
pending_count[op.name] = 0;
bool ready = pending_count[op.name] == 0;
if(ready && !to_ops_set.Contains(op) && reachable_to_ops.Contains(op)) if(ready && !to_ops_set.Contains(op) && reachable_to_ops.Contains(op))
{ {
to_ops_set.Add(op); to_ops_set.Add(op);
@@ -131,7 +131,7 @@ namespace Tensorflow
// for ops that do not have gradients. // for ops that do not have gradients.
var grad_fn = ops.get_gradient_function(op); var grad_fn = ops.get_gradient_function(op);


Python.with<ops.name_scope>(new ops.name_scope(op.Name + "_grad"), scope1 =>
Python.with<ops.name_scope>(new ops.name_scope(op.name + "_grad"), scope1 =>
{ {
string name1 = scope1; string name1 = scope1;
if (grad_fn != null) if (grad_fn != null)
@@ -193,12 +193,12 @@ namespace Tensorflow
{ {
foreach(var x in _NonEagerInputs(op, xs)) foreach(var x in _NonEagerInputs(op, xs))
{ {
if (!pending_count.ContainsKey(x.op.Name))
pending_count[x.op.Name] = 0;
if (!pending_count.ContainsKey(x.op.name))
pending_count[x.op.name] = 0;


pending_count[x.op.Name] -= 1;
pending_count[x.op.name] -= 1;


var ready = pending_count[x.op.Name] == 0;
var ready = pending_count[x.op.name] == 0;


if(loop_state != null && !ready) if(loop_state != null && !ready)
{ {
@@ -281,10 +281,10 @@ namespace Tensorflow
bool is_stop_op = true; bool is_stop_op = true;
foreach(var inp in _NonEagerInputs(op, xs)) foreach(var inp in _NonEagerInputs(op, xs))
{ {
if (!pending_count.ContainsKey(inp.op.Name))
pending_count[inp.op.Name] = 0;
if (!pending_count.ContainsKey(inp.op.name))
pending_count[inp.op.name] = 0;


if (pending_count[inp.op.Name] > 0)
if (pending_count[inp.op.name] > 0)
{ {
is_stop_op = false; is_stop_op = false;
break; break;
@@ -300,17 +300,17 @@ namespace Tensorflow
private static Tensor _GetGrad(Dictionary<string, Tensor[][]> grads, Tensor t) private static Tensor _GetGrad(Dictionary<string, Tensor[][]> grads, Tensor t)
{ {
var op = t.op; var op = t.op;
if (!grads.ContainsKey(op.Name))
if (!grads.ContainsKey(op.name))
return null; return null;
Tensor[][] op_grads = grads[op.Name];
Tensor[][] op_grads = grads[op.name];
var t_grad = op_grads[t.value_index]; var t_grad = op_grads[t.value_index];
return t_grad[0]; return t_grad[0];
} }


private static Tensor[][] _GetGrads(Dictionary<string, Tensor[][]> grads, Operation op) private static Tensor[][] _GetGrads(Dictionary<string, Tensor[][]> grads, Operation op)
{ {
if (grads.ContainsKey(op.Name))
return grads[op.Name];
if (grads.ContainsKey(op.name))
return grads[op.name];
else else
return op.outputs.Select(x => new Tensor[0]).ToArray(); return op.outputs.Select(x => new Tensor[0]).ToArray();
} }
@@ -324,11 +324,11 @@ namespace Tensorflow
private static void _SetGrad(Dictionary<string, Tensor[][]> grads, Tensor t, Tensor grad) private static void _SetGrad(Dictionary<string, Tensor[][]> grads, Tensor t, Tensor grad)
{ {
var op = t.op; var op = t.op;
Tensor[][] op_grads = grads.ContainsKey(op.Name) ? grads[op.Name] : null;
Tensor[][] op_grads = grads.ContainsKey(op.name) ? grads[op.name] : null;
if (op_grads == null) if (op_grads == null)
{ {
op_grads = op.outputs.Select(x => new Tensor[1]).ToArray(); op_grads = op.outputs.Select(x => new Tensor[1]).ToArray();
grads[op.Name] = op_grads;
grads[op.name] = op_grads;
} }
var t_grads = op_grads[t.value_index]; var t_grads = op_grads[t.value_index];
t_grads[0] = grad; t_grads[0] = grad;
@@ -421,10 +421,10 @@ namespace Tensorflow
{ {
if (between_ops.Contains(x.op)) if (between_ops.Contains(x.op))
{ {
if (!pending_count.ContainsKey(x.op.Name))
pending_count[x.op.Name] = 0;
if (!pending_count.ContainsKey(x.op.name))
pending_count[x.op.name] = 0;


pending_count[x.op.Name] += 1;
pending_count[x.op.name] += 1;
} }
} }
} }


+ 4
- 3
src/TensorFlowNET.Core/Graphs/Graph.cs View File

@@ -150,8 +150,9 @@ namespace Tensorflow
_create_op_helper(op, true); _create_op_helper(op, true);


Console.Write($"create_op: {op_type} '{node_def.Name}'"); Console.Write($"create_op: {op_type} '{node_def.Name}'");
Console.Write($", inputs: {(inputs.Length == 0 ? "empty" : String.Join(",", inputs.Select(x => x.name)))}");
Console.Write($", outputs: {(op.outputs.Length == 0 ? "empty" : String.Join(",", op.outputs.Select(x => x.name)))}");
Console.Write($", inputs: {(inputs.Length == 0 ? "empty" : String.Join(", ", inputs.Select(x => x.name)))}");
Console.Write($", control_inputs: {(control_inputs.Length == 0 ? "empty" : String.Join(", ", control_inputs.Select(x => x.name)))}");
Console.Write($", outputs: {(op.outputs.Length == 0 ? "empty" : String.Join(", ", op.outputs.Select(x => x.name)))}");
Console.WriteLine(); Console.WriteLine();


return op; return op;
@@ -182,7 +183,7 @@ namespace Tensorflow
} }
else if (tensor_or_op is Operation) else if (tensor_or_op is Operation)
{ {
return !_unfetchable_ops.Contains((tensor_or_op as Operation).Name);
return !_unfetchable_ops.Contains((tensor_or_op as Operation).name);
} }


return false; return false;


+ 1
- 0
src/TensorFlowNET.Core/ITensorOrOperation.cs View File

@@ -12,5 +12,6 @@ namespace Tensorflow
{ {
string Device { get; } string Device { get; }
Operation op { get; } Operation op { get; }
string name { get; }
} }
} }

+ 2
- 2
src/TensorFlowNET.Core/Operations/Operation.cs View File

@@ -20,7 +20,7 @@ namespace Tensorflow


private Status status = new Status(); private Status status = new Status();


public string Name => c_api.StringPiece(c_api.TF_OperationName(_handle));
public string name => c_api.StringPiece(c_api.TF_OperationName(_handle));
public string OpType => c_api.StringPiece(c_api.TF_OperationOpType(_handle)); public string OpType => c_api.StringPiece(c_api.TF_OperationOpType(_handle));
public string Device => c_api.StringPiece(c_api.TF_OperationDevice(_handle)); public string Device => c_api.StringPiece(c_api.TF_OperationDevice(_handle));


@@ -210,7 +210,7 @@ namespace Tensorflow


public override string ToString() public override string ToString()
{ {
return _handle == IntPtr.Zero ? "tf.Operation Undefined" : $"tf.Operation '{Name}' type={OpType}";
return _handle == IntPtr.Zero ? "tf.Operation Undefined" : $"tf.Operation '{name}' type={OpType}";
} }


public static implicit operator Operation(IntPtr handle) => new Operation(handle); public static implicit operator Operation(IntPtr handle) => new Operation(handle);


+ 1
- 1
src/TensorFlowNET.Core/Sessions/_FetchHandler.cs View File

@@ -73,7 +73,7 @@ namespace Tensorflow
{ {
if (!graph.is_fetchable(op)) if (!graph.is_fetchable(op))
{ {
throw new Exception($"Operation {op.Name} has been marked as not fetchable.");
throw new Exception($"Operation {op.name} has been marked as not fetchable.");
} }
} }




+ 1
- 1
src/TensorFlowNET.Core/Tensors/Tensor.cs View File

@@ -25,7 +25,7 @@ namespace Tensorflow
/// <summary> /// <summary>
/// The string name of this tensor. /// The string name of this tensor.
/// </summary> /// </summary>
public string name => $"{(op == null ? "Operation was not named" : $"{op.Name}:{value_index}")}";
public string name => $"{(op == null ? "Operation was not named" : $"{op.name}:{value_index}")}";


public int value_index { get; } public int value_index { get; }




+ 1
- 1
src/TensorFlowNET.Core/Train/Optimizer.cs View File

@@ -97,7 +97,7 @@ namespace Tensorflow
if (grad == null) if (grad == null)
continue; continue;


var scope_name = var.op.Name;
var scope_name = var.op.name;
Python.with<ops.name_scope>(new ops.name_scope("update_" + scope_name), scope2 => Python.with<ops.name_scope>(new ops.name_scope("update_" + scope_name), scope2 =>
{ {
update_ops.Add(processor.update_op(this, grad)); update_ops.Add(processor.update_op(this, grad));


+ 1
- 1
src/TensorFlowNET.Core/Train/Saving/BaseSaverBuilder.cs View File

@@ -116,7 +116,7 @@ namespace Tensorflow
{ {
FilenameTensorName = filename_tensor.name, FilenameTensorName = filename_tensor.name,
SaveTensorName = save_tensor.name, SaveTensorName = save_tensor.name,
RestoreOpName = restore_op.Name,
RestoreOpName = restore_op.name,
MaxToKeep = max_to_keep, MaxToKeep = max_to_keep,
Sharded = sharded, Sharded = sharded,
KeepCheckpointEveryNHours = keep_checkpoint_every_n_hours, KeepCheckpointEveryNHours = keep_checkpoint_every_n_hours,


+ 2
- 2
src/TensorFlowNET.Core/Train/Saving/saveable_object_util.py.cs View File

@@ -84,9 +84,9 @@ namespace Tensorflow
} }


if (var.op.type == "ReadVariableOp") if (var.op.type == "ReadVariableOp")
name = var.op.inputs[0].op.Name;
name = var.op.inputs[0].op.name;
else else
name = var.op.Name;
name = var.op.name;


if (names_to_saveables.ContainsKey(name)) if (names_to_saveables.ContainsKey(name))
throw new ValueError($"At least two variables have the same name: {name}"); throw new ValueError($"At least two variables have the same name: {name}");


+ 3
- 3
src/TensorFlowNET.Core/Variables/RefVariable.cs View File

@@ -143,11 +143,11 @@ namespace Tensorflow
private Tensor _safe_initial_value_from_tensor(Tensor tensor, Dictionary<string, Operation> op_cache) private Tensor _safe_initial_value_from_tensor(Tensor tensor, Dictionary<string, Operation> op_cache)
{ {
var op = tensor.op; var op = tensor.op;
var new_op = op_cache.ContainsKey(op.Name) ? op_cache[op.Name] : null;
var new_op = op_cache.ContainsKey(op.name) ? op_cache[op.name] : null;
if(new_op == null) if(new_op == null)
{ {
new_op = _safe_initial_value_from_op(op, op_cache); new_op = _safe_initial_value_from_op(op, op_cache);
op_cache[op.Name] = new_op;
op_cache[op.name] = new_op;
} }
return new_op.outputs[tensor.value_index]; return new_op.outputs[tensor.value_index];
} }
@@ -185,7 +185,7 @@ namespace Tensorflow
/// A `Tensor` that will hold the new value of this variable after /// A `Tensor` that will hold the new value of this variable after
/// the assignment has completed. /// the assignment has completed.
/// </returns> /// </returns>
public ITensorOrOperation assign(Tensor value, bool use_locking = false, string name = "", bool read_value = true)
public ITensorOrOperation assign(object value, bool use_locking = false, string name = "", bool read_value = true)
{ {
var assign = gen_state_ops.assign(_variable, value, use_locking: use_locking, name: name); var assign = gen_state_ops.assign(_variable, value, use_locking: use_locking, name: name);
if (read_value) if (read_value)


+ 1
- 1
src/TensorFlowNET.Core/Variables/gen_state_ops.py.cs View File

@@ -47,7 +47,7 @@ namespace Tensorflow
/// <param name="validate_shape"></param> /// <param name="validate_shape"></param>
/// <param name="use_locking"></param> /// <param name="use_locking"></param>
/// <param name="name"></param> /// <param name="name"></param>
public static Tensor assign(Tensor tensor, Tensor value,
public static Tensor assign(Tensor tensor, object value,
bool validate_shape = true, bool validate_shape = true,
bool use_locking = true, bool use_locking = true,
string name = "") string name = "")


+ 1
- 1
src/TensorFlowNET.Core/ops.name_scope.cs View File

@@ -46,7 +46,7 @@ namespace Tensorflow
public void Dispose() public void Dispose()
{ {
var g = get_default_graph(); var g = get_default_graph();
Console.WriteLine($"name_scope: {g._name_stack} -> {old_stack}");
// Console.WriteLine($"name_scope: {g._name_stack} -> {old_stack}");
g._name_stack = old_stack; g._name_stack = old_stack;
} }




+ 3
- 1
src/TensorFlowNET.Core/ops.py.cs View File

@@ -319,7 +319,7 @@ namespace Tensorflow


return (oper, out_grads) => return (oper, out_grads) =>
{ {
Console.WriteLine($"get_gradient_function: {oper.type} '{oper.Name}'");
Console.WriteLine($"get_gradient_function: {oper.type} '{oper.name}'");


switch (oper.type) switch (oper.type)
{ {
@@ -414,6 +414,8 @@ namespace Tensorflow
return constant_op.constant(value as string[], name); return constant_op.constant(value as string[], name);
case "Int32": case "Int32":
return constant_op.constant(Convert.ToInt32(value), name); return constant_op.constant(Convert.ToInt32(value), name);
case "Single":
return constant_op.constant(Convert.ToSingle(value), name);
case "Double": case "Double":
return constant_op.constant(Convert.ToDouble(value), name); return constant_op.constant(Convert.ToDouble(value), name);
case "RefVariable": case "RefVariable":


+ 2
- 1
src/TensorFlowNET.Core/tf.cs View File

@@ -17,6 +17,7 @@ namespace Tensorflow
public static Context context = new Context(new ContextOptions(), new Status()); public static Context context = new Context(new ContextOptions(), new Status());


public static Graph g = new Graph(); public static Graph g = new Graph();
public static Session session = new Session();


public static RefVariable Variable<T>(T data, string name = "", TF_DataType dtype = TF_DataType.DtInvalid) public static RefVariable Variable<T>(T data, string name = "", TF_DataType dtype = TF_DataType.DtInvalid)
{ {
@@ -48,7 +49,7 @@ namespace Tensorflow


public static Session Session() public static Session Session()
{ {
return new Session();
return session;
} }
} }
} }

+ 2
- 2
test/TensorFlowNET.UnitTest/GraphTest.cs View File

@@ -23,7 +23,7 @@ namespace TensorFlowNET.UnitTest


// Make a placeholder operation. // Make a placeholder operation.
var feed = c_test_util.Placeholder(graph, s); var feed = c_test_util.Placeholder(graph, s);
EXPECT_EQ("feed", feed.Name);
EXPECT_EQ("feed", feed.name);
EXPECT_EQ("Placeholder", feed.OpType); EXPECT_EQ("Placeholder", feed.OpType);
EXPECT_EQ("", feed.Device); EXPECT_EQ("", feed.Device);
EXPECT_EQ(1, feed.NumOutputs); EXPECT_EQ(1, feed.NumOutputs);
@@ -53,7 +53,7 @@ namespace TensorFlowNET.UnitTest
EXPECT_EQ(TF_Code.TF_OK, s.Code); EXPECT_EQ(TF_Code.TF_OK, s.Code);


// Test TF_Operation*() query functions. // Test TF_Operation*() query functions.
EXPECT_EQ("add", add.Name);
EXPECT_EQ("add", add.name);
EXPECT_EQ("AddN", add.OpType); EXPECT_EQ("AddN", add.OpType);
EXPECT_EQ("", add.Device); EXPECT_EQ("", add.Device);
EXPECT_EQ(1, add.NumOutputs); EXPECT_EQ(1, add.NumOutputs);


+ 3
- 4
test/TensorFlowNET.UnitTest/VariableTest.cs View File

@@ -49,8 +49,7 @@ namespace TensorFlowNET.UnitTest
[TestMethod] [TestMethod]
public void Assign() public void Assign()
{ {
var v1 = tf.get_variable("v1", shape: new TensorShape(3), initializer: tf.zeros_initializer);

var v1 = tf.Variable(10.0f, name: "v1"); //tf.get_variable("v1", shape: new TensorShape(3), initializer: tf.zeros_initializer);
var inc_v1 = v1.assign(v1 + 1.0f); var inc_v1 = v1.assign(v1 + 1.0f);


// Add an op to initialize the variables. // Add an op to initialize the variables.
@@ -73,10 +72,10 @@ namespace TensorFlowNET.UnitTest
int result = 0; int result = 0;
Tensor x = tf.Variable(10, name: "x"); Tensor x = tf.Variable(10, name: "x");


var model = tf.global_variables_initializer();
var init_op = tf.global_variables_initializer();
using (var session = tf.Session()) using (var session = tf.Session())
{ {
session.run(model);
session.run(init_op);
for(int i = 0; i < 5; i++) for(int i = 0; i < 5; i++)
{ {
x = x + 1; x = x + 1;


Loading…
Cancel
Save