@@ -95,9 +95,9 @@ namespace Tensorflow | |||
{ | |||
// 'ready' handles the case where one output gradient relies on | |||
// another output's gradient. | |||
if (!pending_count.ContainsKey(op.Name)) | |||
pending_count[op.Name] = 0; | |||
bool ready = pending_count[op.Name] == 0; | |||
if (!pending_count.ContainsKey(op.name)) | |||
pending_count[op.name] = 0; | |||
bool ready = pending_count[op.name] == 0; | |||
if(ready && !to_ops_set.Contains(op) && reachable_to_ops.Contains(op)) | |||
{ | |||
to_ops_set.Add(op); | |||
@@ -131,7 +131,7 @@ namespace Tensorflow | |||
// for ops that do not have gradients. | |||
var grad_fn = ops.get_gradient_function(op); | |||
Python.with<ops.name_scope>(new ops.name_scope(op.Name + "_grad"), scope1 => | |||
Python.with<ops.name_scope>(new ops.name_scope(op.name + "_grad"), scope1 => | |||
{ | |||
string name1 = scope1; | |||
if (grad_fn != null) | |||
@@ -193,12 +193,12 @@ namespace Tensorflow | |||
{ | |||
foreach(var x in _NonEagerInputs(op, xs)) | |||
{ | |||
if (!pending_count.ContainsKey(x.op.Name)) | |||
pending_count[x.op.Name] = 0; | |||
if (!pending_count.ContainsKey(x.op.name)) | |||
pending_count[x.op.name] = 0; | |||
pending_count[x.op.Name] -= 1; | |||
pending_count[x.op.name] -= 1; | |||
var ready = pending_count[x.op.Name] == 0; | |||
var ready = pending_count[x.op.name] == 0; | |||
if(loop_state != null && !ready) | |||
{ | |||
@@ -281,10 +281,10 @@ namespace Tensorflow | |||
bool is_stop_op = true; | |||
foreach(var inp in _NonEagerInputs(op, xs)) | |||
{ | |||
if (!pending_count.ContainsKey(inp.op.Name)) | |||
pending_count[inp.op.Name] = 0; | |||
if (!pending_count.ContainsKey(inp.op.name)) | |||
pending_count[inp.op.name] = 0; | |||
if (pending_count[inp.op.Name] > 0) | |||
if (pending_count[inp.op.name] > 0) | |||
{ | |||
is_stop_op = false; | |||
break; | |||
@@ -300,17 +300,17 @@ namespace Tensorflow | |||
private static Tensor _GetGrad(Dictionary<string, Tensor[][]> grads, Tensor t) | |||
{ | |||
var op = t.op; | |||
if (!grads.ContainsKey(op.Name)) | |||
if (!grads.ContainsKey(op.name)) | |||
return null; | |||
Tensor[][] op_grads = grads[op.Name]; | |||
Tensor[][] op_grads = grads[op.name]; | |||
var t_grad = op_grads[t.value_index]; | |||
return t_grad[0]; | |||
} | |||
private static Tensor[][] _GetGrads(Dictionary<string, Tensor[][]> grads, Operation op) | |||
{ | |||
if (grads.ContainsKey(op.Name)) | |||
return grads[op.Name]; | |||
if (grads.ContainsKey(op.name)) | |||
return grads[op.name]; | |||
else | |||
return op.outputs.Select(x => new Tensor[0]).ToArray(); | |||
} | |||
@@ -324,11 +324,11 @@ namespace Tensorflow | |||
private static void _SetGrad(Dictionary<string, Tensor[][]> grads, Tensor t, Tensor grad) | |||
{ | |||
var op = t.op; | |||
Tensor[][] op_grads = grads.ContainsKey(op.Name) ? grads[op.Name] : null; | |||
Tensor[][] op_grads = grads.ContainsKey(op.name) ? grads[op.name] : null; | |||
if (op_grads == null) | |||
{ | |||
op_grads = op.outputs.Select(x => new Tensor[1]).ToArray(); | |||
grads[op.Name] = op_grads; | |||
grads[op.name] = op_grads; | |||
} | |||
var t_grads = op_grads[t.value_index]; | |||
t_grads[0] = grad; | |||
@@ -421,10 +421,10 @@ namespace Tensorflow | |||
{ | |||
if (between_ops.Contains(x.op)) | |||
{ | |||
if (!pending_count.ContainsKey(x.op.Name)) | |||
pending_count[x.op.Name] = 0; | |||
if (!pending_count.ContainsKey(x.op.name)) | |||
pending_count[x.op.name] = 0; | |||
pending_count[x.op.Name] += 1; | |||
pending_count[x.op.name] += 1; | |||
} | |||
} | |||
} | |||
@@ -150,8 +150,9 @@ namespace Tensorflow | |||
_create_op_helper(op, true); | |||
Console.Write($"create_op: {op_type} '{node_def.Name}'"); | |||
Console.Write($", inputs: {(inputs.Length == 0 ? "empty" : String.Join(",", inputs.Select(x => x.name)))}"); | |||
Console.Write($", outputs: {(op.outputs.Length == 0 ? "empty" : String.Join(",", op.outputs.Select(x => x.name)))}"); | |||
Console.Write($", inputs: {(inputs.Length == 0 ? "empty" : String.Join(", ", inputs.Select(x => x.name)))}"); | |||
Console.Write($", control_inputs: {(control_inputs.Length == 0 ? "empty" : String.Join(", ", control_inputs.Select(x => x.name)))}"); | |||
Console.Write($", outputs: {(op.outputs.Length == 0 ? "empty" : String.Join(", ", op.outputs.Select(x => x.name)))}"); | |||
Console.WriteLine(); | |||
return op; | |||
@@ -182,7 +183,7 @@ namespace Tensorflow | |||
} | |||
else if (tensor_or_op is Operation) | |||
{ | |||
return !_unfetchable_ops.Contains((tensor_or_op as Operation).Name); | |||
return !_unfetchable_ops.Contains((tensor_or_op as Operation).name); | |||
} | |||
return false; | |||
@@ -12,5 +12,6 @@ namespace Tensorflow | |||
{ | |||
string Device { get; } | |||
Operation op { get; } | |||
string name { get; } | |||
} | |||
} |
@@ -20,7 +20,7 @@ namespace Tensorflow | |||
private Status status = new Status(); | |||
public string Name => c_api.StringPiece(c_api.TF_OperationName(_handle)); | |||
public string name => c_api.StringPiece(c_api.TF_OperationName(_handle)); | |||
public string OpType => c_api.StringPiece(c_api.TF_OperationOpType(_handle)); | |||
public string Device => c_api.StringPiece(c_api.TF_OperationDevice(_handle)); | |||
@@ -210,7 +210,7 @@ namespace Tensorflow | |||
public override string ToString() | |||
{ | |||
return _handle == IntPtr.Zero ? "tf.Operation Undefined" : $"tf.Operation '{Name}' type={OpType}"; | |||
return _handle == IntPtr.Zero ? "tf.Operation Undefined" : $"tf.Operation '{name}' type={OpType}"; | |||
} | |||
public static implicit operator Operation(IntPtr handle) => new Operation(handle); | |||
@@ -73,7 +73,7 @@ namespace Tensorflow | |||
{ | |||
if (!graph.is_fetchable(op)) | |||
{ | |||
throw new Exception($"Operation {op.Name} has been marked as not fetchable."); | |||
throw new Exception($"Operation {op.name} has been marked as not fetchable."); | |||
} | |||
} | |||
@@ -25,7 +25,7 @@ namespace Tensorflow | |||
/// <summary> | |||
/// The string name of this tensor. | |||
/// </summary> | |||
public string name => $"{(op == null ? "Operation was not named" : $"{op.Name}:{value_index}")}"; | |||
public string name => $"{(op == null ? "Operation was not named" : $"{op.name}:{value_index}")}"; | |||
public int value_index { get; } | |||
@@ -97,7 +97,7 @@ namespace Tensorflow | |||
if (grad == null) | |||
continue; | |||
var scope_name = var.op.Name; | |||
var scope_name = var.op.name; | |||
Python.with<ops.name_scope>(new ops.name_scope("update_" + scope_name), scope2 => | |||
{ | |||
update_ops.Add(processor.update_op(this, grad)); | |||
@@ -116,7 +116,7 @@ namespace Tensorflow | |||
{ | |||
FilenameTensorName = filename_tensor.name, | |||
SaveTensorName = save_tensor.name, | |||
RestoreOpName = restore_op.Name, | |||
RestoreOpName = restore_op.name, | |||
MaxToKeep = max_to_keep, | |||
Sharded = sharded, | |||
KeepCheckpointEveryNHours = keep_checkpoint_every_n_hours, | |||
@@ -84,9 +84,9 @@ namespace Tensorflow | |||
} | |||
if (var.op.type == "ReadVariableOp") | |||
name = var.op.inputs[0].op.Name; | |||
name = var.op.inputs[0].op.name; | |||
else | |||
name = var.op.Name; | |||
name = var.op.name; | |||
if (names_to_saveables.ContainsKey(name)) | |||
throw new ValueError($"At least two variables have the same name: {name}"); | |||
@@ -143,11 +143,11 @@ namespace Tensorflow | |||
private Tensor _safe_initial_value_from_tensor(Tensor tensor, Dictionary<string, Operation> op_cache) | |||
{ | |||
var op = tensor.op; | |||
var new_op = op_cache.ContainsKey(op.Name) ? op_cache[op.Name] : null; | |||
var new_op = op_cache.ContainsKey(op.name) ? op_cache[op.name] : null; | |||
if(new_op == null) | |||
{ | |||
new_op = _safe_initial_value_from_op(op, op_cache); | |||
op_cache[op.Name] = new_op; | |||
op_cache[op.name] = new_op; | |||
} | |||
return new_op.outputs[tensor.value_index]; | |||
} | |||
@@ -185,7 +185,7 @@ namespace Tensorflow | |||
/// A `Tensor` that will hold the new value of this variable after | |||
/// the assignment has completed. | |||
/// </returns> | |||
public ITensorOrOperation assign(Tensor value, bool use_locking = false, string name = "", bool read_value = true) | |||
public ITensorOrOperation assign(object value, bool use_locking = false, string name = "", bool read_value = true) | |||
{ | |||
var assign = gen_state_ops.assign(_variable, value, use_locking: use_locking, name: name); | |||
if (read_value) | |||
@@ -47,7 +47,7 @@ namespace Tensorflow | |||
/// <param name="validate_shape"></param> | |||
/// <param name="use_locking"></param> | |||
/// <param name="name"></param> | |||
public static Tensor assign(Tensor tensor, Tensor value, | |||
public static Tensor assign(Tensor tensor, object value, | |||
bool validate_shape = true, | |||
bool use_locking = true, | |||
string name = "") | |||
@@ -46,7 +46,7 @@ namespace Tensorflow | |||
public void Dispose() | |||
{ | |||
var g = get_default_graph(); | |||
Console.WriteLine($"name_scope: {g._name_stack} -> {old_stack}"); | |||
// Console.WriteLine($"name_scope: {g._name_stack} -> {old_stack}"); | |||
g._name_stack = old_stack; | |||
} | |||
@@ -319,7 +319,7 @@ namespace Tensorflow | |||
return (oper, out_grads) => | |||
{ | |||
Console.WriteLine($"get_gradient_function: {oper.type} '{oper.Name}'"); | |||
Console.WriteLine($"get_gradient_function: {oper.type} '{oper.name}'"); | |||
switch (oper.type) | |||
{ | |||
@@ -414,6 +414,8 @@ namespace Tensorflow | |||
return constant_op.constant(value as string[], name); | |||
case "Int32": | |||
return constant_op.constant(Convert.ToInt32(value), name); | |||
case "Single": | |||
return constant_op.constant(Convert.ToSingle(value), name); | |||
case "Double": | |||
return constant_op.constant(Convert.ToDouble(value), name); | |||
case "RefVariable": | |||
@@ -17,6 +17,7 @@ namespace Tensorflow | |||
public static Context context = new Context(new ContextOptions(), new Status()); | |||
public static Graph g = new Graph(); | |||
public static Session session = new Session(); | |||
public static RefVariable Variable<T>(T data, string name = "", TF_DataType dtype = TF_DataType.DtInvalid) | |||
{ | |||
@@ -48,7 +49,7 @@ namespace Tensorflow | |||
public static Session Session() | |||
{ | |||
return new Session(); | |||
return session; | |||
} | |||
} | |||
} |
@@ -23,7 +23,7 @@ namespace TensorFlowNET.UnitTest | |||
// Make a placeholder operation. | |||
var feed = c_test_util.Placeholder(graph, s); | |||
EXPECT_EQ("feed", feed.Name); | |||
EXPECT_EQ("feed", feed.name); | |||
EXPECT_EQ("Placeholder", feed.OpType); | |||
EXPECT_EQ("", feed.Device); | |||
EXPECT_EQ(1, feed.NumOutputs); | |||
@@ -53,7 +53,7 @@ namespace TensorFlowNET.UnitTest | |||
EXPECT_EQ(TF_Code.TF_OK, s.Code); | |||
// Test TF_Operation*() query functions. | |||
EXPECT_EQ("add", add.Name); | |||
EXPECT_EQ("add", add.name); | |||
EXPECT_EQ("AddN", add.OpType); | |||
EXPECT_EQ("", add.Device); | |||
EXPECT_EQ(1, add.NumOutputs); | |||
@@ -49,8 +49,7 @@ namespace TensorFlowNET.UnitTest | |||
[TestMethod] | |||
public void Assign() | |||
{ | |||
var v1 = tf.get_variable("v1", shape: new TensorShape(3), initializer: tf.zeros_initializer); | |||
var v1 = tf.Variable(10.0f, name: "v1"); //tf.get_variable("v1", shape: new TensorShape(3), initializer: tf.zeros_initializer); | |||
var inc_v1 = v1.assign(v1 + 1.0f); | |||
// Add an op to initialize the variables. | |||
@@ -73,10 +72,10 @@ namespace TensorFlowNET.UnitTest | |||
int result = 0; | |||
Tensor x = tf.Variable(10, name: "x"); | |||
var model = tf.global_variables_initializer(); | |||
var init_op = tf.global_variables_initializer(); | |||
using (var session = tf.Session()) | |||
{ | |||
session.run(model); | |||
session.run(init_op); | |||
for(int i = 0; i < 5; i++) | |||
{ | |||
x = x + 1; | |||