@@ -9,6 +9,7 @@ namespace Tensorflow.Eager | |||
public int default_execution_mode; | |||
public string device_name = ""; | |||
public string scope_name = ""; | |||
bool _initialized = false; | |||
public Context(ContextOptions opts, Status status) | |||
@@ -33,6 +34,11 @@ namespace Tensorflow.Eager | |||
public bool executing_eagerly() => true; | |||
public string shared_name(string name = null) | |||
=> !string.IsNullOrEmpty(name) || !executing_eagerly() ? | |||
name : | |||
"cd2c89b7-88b7-44c8-ad83-06c2a9158347"; | |||
public static implicit operator IntPtr(Context ctx) | |||
=> ctx._handle; | |||
@@ -18,6 +18,6 @@ namespace Tensorflow.Eager | |||
=> tensor._handle; | |||
public override string ToString() | |||
=> $"TFE_Op {_handle}"; | |||
=> $"TFE_Op 0x{_handle.ToString("x16")}"; | |||
} | |||
} |
@@ -18,6 +18,6 @@ namespace Tensorflow.Eager | |||
=> tensor._handle; | |||
public override string ToString() | |||
=> $"TFE_TensorHandle {_handle}"; | |||
=> $"TFE_TensorHandle 0x{_handle.ToString("x16")}"; | |||
} | |||
} |
@@ -89,7 +89,7 @@ namespace Tensorflow | |||
/// <param name="num_retvals">int*</param> | |||
/// <param name="status">TF_Status*</param> | |||
[DllImport(TensorFlowLibName)] | |||
public static extern void TFE_Execute(TFE_Op op, IntPtr[] retvals, ref int num_retvals, IntPtr status); | |||
public static extern void TFE_Execute(IntPtr op, IntPtr[] retvals, ref int num_retvals, IntPtr status); | |||
/// <summary> | |||
/// | |||
@@ -147,6 +147,7 @@ namespace Tensorflow.Eager | |||
c_api.TFE_OpAddInput(op, input_handle, status); | |||
status.Check(true); | |||
return true; | |||
} | |||
@@ -236,8 +237,13 @@ namespace Tensorflow.Eager | |||
case TF_AttrType.TF_ATTR_INT: | |||
c_api.TFE_OpSetAttrInt(op, key, Convert.ToInt64(value)); | |||
break; | |||
case TF_AttrType.TF_ATTR_SHAPE: | |||
var dims = (value as int[]).Select(x => (long)x).ToArray(); | |||
c_api.TFE_OpSetAttrShape(op, key, dims, dims.Length, status); | |||
status.Check(true); | |||
break; | |||
default: | |||
throw new NotImplementedException(""); | |||
throw new NotImplementedException($"SetOpAttrScalar for {type}"); | |||
} | |||
return true; | |||
@@ -14,6 +14,9 @@ | |||
limitations under the License. | |||
******************************************************************************/ | |||
using Tensorflow.Eager; | |||
using static Tensorflow.Binding; | |||
namespace Tensorflow | |||
{ | |||
public static class gen_resource_variable_ops | |||
@@ -29,6 +32,14 @@ namespace Tensorflow | |||
public static Tensor var_is_initialized_op(Tensor resource, string name = null) | |||
{ | |||
if (tf.context.executing_eagerly()) | |||
{ | |||
var _result = wrap_tfe_src.TFE_FastPathExecute(tf.context, tf.context.device_name, | |||
"VarIsInitializedOp", name, null, | |||
resource); | |||
return _result; | |||
} | |||
var _op = _op_def_lib._apply_op_helper("VarIsInitializedOp", name, new { resource }); | |||
return _op.output; | |||
@@ -46,6 +57,14 @@ namespace Tensorflow | |||
public static Tensor var_handle_op(TF_DataType dtype, TensorShape shape, | |||
string container ="", string shared_name = "", string name = null) | |||
{ | |||
if (tf.context.executing_eagerly()) | |||
{ | |||
var _result = wrap_tfe_src.TFE_FastPathExecute(tf.context, tf.context.device_name, | |||
"VarHandleOp", name, null, | |||
"container", container, "shared_name", shared_name, "dtype", dtype, "shape", shape.dims); | |||
return _result; | |||
} | |||
var _op = _op_def_lib._apply_op_helper("VarHandleOp", name, new { | |||
dtype, | |||
shape, | |||
@@ -100,10 +100,10 @@ namespace Tensorflow | |||
/// <param name="shared_name"></param> | |||
/// <param name="name"></param> | |||
/// <param name="graph_mode"></param> | |||
/// <param name="extra_handle_data"></param> | |||
/// <param name="initial_value"></param> | |||
/// <returns></returns> | |||
public static Tensor variable_handle_from_shape_and_dtype(TensorShape shape, TF_DataType dtype, | |||
string shared_name, string name, bool graph_mode, Tensor extra_handle_data = null) | |||
string shared_name, string name, bool graph_mode, Tensor initial_value = null) | |||
{ | |||
var container = "";// ops.get_default_graph().container; | |||
var handle = gen_resource_variable_ops.var_handle_op(shape: shape, | |||
@@ -112,17 +112,22 @@ namespace Tensorflow | |||
name: name, | |||
container: container); | |||
if (extra_handle_data == null) | |||
extra_handle_data = handle; | |||
if (initial_value == null) | |||
initial_value = handle; | |||
if (graph_mode) | |||
{ | |||
var full_handle_data = _combine_handle_data(handle, extra_handle_data); | |||
var full_handle_data = _combine_handle_data(handle, initial_value); | |||
_set_handle_shapes_and_types(handle, full_handle_data, graph_mode); | |||
return handle; | |||
} | |||
else | |||
{ | |||
// We do not want two distinct ResourceVariable objects for the same | |||
// underlying resource in the runtime. | |||
// When in eager mode, explicitly ensure so here. When in graph mode, it's | |||
// ensured by always generating different variable names. | |||
var exists = gen_resource_variable_ops.var_is_initialized_op(handle); | |||
throw new NotImplementedException(""); | |||
} | |||
} | |||
@@ -71,5 +71,8 @@ namespace Tensorflow | |||
protected override void DisposeUnmanagedResources(IntPtr handle) | |||
=> TF_DeleteStatus(handle); | |||
public override string ToString() | |||
=> $"{Code} 0x{_handle.ToString("x16")}"; | |||
} | |||
} |
@@ -90,13 +90,13 @@ namespace Tensorflow.Summaries | |||
string scope_base_name = string.IsNullOrEmpty(family) ? name : $"{family}/{name}"; | |||
return tf_with(ops.name_scope(scope_base_name, default_name: default_name, values), scope => | |||
{ | |||
var tag = scope._name_scope; | |||
var tag = scope.scope_name; | |||
if (string.IsNullOrEmpty(family)) | |||
tag = tag.Remove(tag.Length - 1); | |||
else | |||
tag = $"{family}/{tag.Remove(tag.Length - 1)}"; | |||
return (tag, scope._name_scope); | |||
return (tag, scope.scope_name); | |||
}); | |||
} | |||
} | |||
@@ -181,6 +181,8 @@ namespace Tensorflow | |||
return (NDArray)StringData()[0]; | |||
case TF_DataType.TF_INT32: | |||
return *(int*)buffer; | |||
case TF_DataType.TF_FLOAT: | |||
return *(float*)buffer; | |||
case TF_DataType.TF_DOUBLE: | |||
return *(double*)buffer; | |||
default: | |||
@@ -253,7 +253,7 @@ namespace Tensorflow | |||
public static implicit operator Shape(TensorShape shape) => new Shape((int[]) shape.dims.Clone()); | |||
public static implicit operator int[](TensorShape shape) => shape == null ? null : (int[])shape.dims.Clone(); //we clone to avoid any changes | |||
public static implicit operator TensorShape(int[] dims) => new TensorShape(dims); | |||
public static implicit operator TensorShape(int[] dims) => dims == null ? new TensorShape(new int[0]) : new TensorShape(dims); | |||
public static explicit operator int(TensorShape shape) => shape.size; | |||
public static implicit operator TensorShape(int dim) => new TensorShape(dim); | |||
@@ -91,6 +91,10 @@ namespace Tensorflow | |||
return new EagerTensor(str, ctx.device_name); | |||
case int int32: | |||
return new EagerTensor(int32, ctx.device_name); | |||
case float float32: | |||
return new EagerTensor(float32, ctx.device_name); | |||
case double double64: | |||
return new EagerTensor(double64, ctx.device_name); | |||
case float[] float32s: | |||
return new EagerTensor(float32s, ctx.device_name); | |||
case double[] double64s: | |||
@@ -94,20 +94,28 @@ namespace Tensorflow | |||
if(collections == null) | |||
collections = new List<string>() { tf.GraphKeys.GLOBAL_VARIABLES }; | |||
_trainable = trainable; | |||
_graph_key = ops.get_default_graph().graph_key; | |||
if (trainable && !collections.Contains(tf.GraphKeys.TRAINABLE_VARIABLES)) | |||
collections.Add(tf.GraphKeys.TRAINABLE_VARIABLES); | |||
ops.init_scope(); | |||
_in_graph_mode = true; | |||
_in_graph_mode = !tf.context.executing_eagerly(); | |||
tf_with(ops.name_scope(name, "Variable"), scope => | |||
{ | |||
name = scope; | |||
var handle_name = ops.name_from_scope_name(name); | |||
var shared_name = handle_name; | |||
var unique_id = shared_name; | |||
var unique_id = $"{handle_name}_{ops.uid()}"; | |||
var shared_name = tf.context.shared_name(); | |||
if (_in_graph_mode) | |||
{ | |||
shared_name = handle_name; | |||
unique_id = shared_name; | |||
} | |||
var attr = new AttrValue(); | |||
attr.List = new AttrValue.Types.ListValue(); | |||
attr.List.S.Add(ByteString.CopyFromUtf8($"loc:{handle_name}")); | |||
attr.List.S.Add(ByteString.CopyFromUtf8($"loc:@{handle_name}")); | |||
tf_with(ops.name_scope("Initializer"), delegate | |||
{ | |||
initial_value = ops.convert_to_tensor(init_from_fn ? (initial_value as Func<Tensor>)() : initial_value, | |||
@@ -241,6 +241,9 @@ namespace Tensorflow | |||
/// <returns></returns> | |||
public static void init_scope() | |||
{ | |||
if (tf.context.executing_eagerly()) | |||
return; | |||
// Retrieve the active name scope: entering an `init_scope` preserves | |||
// the name scope of the current context. | |||
var default_graph = get_default_graph(); | |||
@@ -15,6 +15,8 @@ | |||
******************************************************************************/ | |||
using System.Collections.Generic; | |||
using Tensorflow.Eager; | |||
using static Tensorflow.Binding; | |||
namespace Tensorflow | |||
{ | |||
@@ -32,8 +34,8 @@ namespace Tensorflow | |||
public string _name; | |||
public string _default_name; | |||
public object _values; | |||
public string _name_scope; | |||
public string old_stack = ""; | |||
public string scope_name; | |||
public string old_scope_name = ""; | |||
public NameScope(string name, string default_name = "", object values = null) | |||
{ | |||
@@ -44,29 +46,54 @@ namespace Tensorflow | |||
public void __enter__() | |||
{ | |||
_name = _name ?? _default_name; | |||
if (_name.EndsWith("basic_r_n_n_cell")) | |||
if (tf.context.executing_eagerly()) | |||
{ | |||
(scope_name, old_scope_name) = enter_eager_name_scope(tf.context, _name); | |||
} | |||
else | |||
{ | |||
_name = _name ?? _default_name; | |||
Graph g = null; | |||
if (_values is List<Tensor> vList) | |||
g = _get_graph_from_inputs(vList.ToArray()); | |||
else if (_values is Tensor[] vArray) | |||
g = _get_graph_from_inputs(vArray); | |||
if (g == null) | |||
g = get_default_graph(); | |||
old_scope_name = g._name_stack; | |||
scope_name = g.name_scope(_name); | |||
} | |||
Graph g = null; | |||
} | |||
if (_values is List<Tensor> vList) | |||
g = _get_graph_from_inputs(vList.ToArray()); | |||
else if (_values is Tensor[] vArray) | |||
g = _get_graph_from_inputs(vArray); | |||
private (string, string) enter_eager_name_scope(Context ctx, string name) | |||
{ | |||
if (name == null) | |||
name = ""; | |||
if (g == null) | |||
g = get_default_graph(); | |||
var scope_name = name; | |||
var old_name = ctx.scope_name; | |||
// A trailing slash breaks out of nested name scopes, indicating a | |||
// fully specified scope name, for compatibility with Graph.name_scope. | |||
if (!name.EndsWith("/")) | |||
{ | |||
scope_name = name + "/"; | |||
if (!string.IsNullOrEmpty(old_name)) | |||
scope_name = old_name + scope_name; | |||
} | |||
old_stack = g._name_stack; | |||
_name_scope = g.name_scope(_name); | |||
ctx.scope_name = scope_name; | |||
return (scope_name, old_name); | |||
} | |||
public void Dispose() | |||
{ | |||
var g = get_default_graph(); | |||
g._name_stack = old_stack; | |||
if (tf.context.executing_eagerly()) | |||
tf.context.scope_name = old_scope_name; | |||
else | |||
get_default_graph()._name_stack = old_scope_name; | |||
} | |||
public void __exit__() | |||
@@ -89,7 +116,7 @@ namespace Tensorflow | |||
/// <param name="ns"></param> | |||
public static implicit operator string(NameScope ns) | |||
{ | |||
return ns._name_scope; | |||
return ns.scope_name; | |||
} | |||
} | |||
} | |||