@@ -11,7 +11,7 @@ namespace Tensorflow.Eager | |||||
{ | { | ||||
public EagerTensor() : base(IntPtr.Zero) | public EagerTensor() : base(IntPtr.Zero) | ||||
{ | { | ||||
EagerTensorHandle = c_api.TFE_NewEagerTensor(); | |||||
} | } | ||||
public EagerTensor(IntPtr handle) : base(IntPtr.Zero) | public EagerTensor(IntPtr handle) : base(IntPtr.Zero) | ||||
@@ -22,41 +22,28 @@ namespace Tensorflow.Eager | |||||
public EagerTensor(string value, string device_name) : base(value) | public EagerTensor(string value, string device_name) : base(value) | ||||
{ | { | ||||
EagerTensorHandle = c_api.TFE_NewEagerTensor(); | |||||
tfe_tensor_handle = c_api.TFE_NewTensorHandle(_handle, status); | |||||
c_api.TFE_SetEagerTensorHandle(EagerTensorHandle, tfe_tensor_handle); | |||||
EagerTensorHandle = c_api.TFE_NewTensorHandle(_handle, status); | |||||
Resolve(); | Resolve(); | ||||
} | } | ||||
public EagerTensor(NDArray value, string device_name) : base(value) | public EagerTensor(NDArray value, string device_name) : base(value) | ||||
{ | { | ||||
EagerTensorHandle = c_api.TFE_NewEagerTensor(); | |||||
tfe_tensor_handle = c_api.TFE_NewTensorHandle(_handle, status); | |||||
c_api.TFE_SetEagerTensorHandle(EagerTensorHandle, tfe_tensor_handle); | |||||
EagerTensorHandle = c_api.TFE_NewTensorHandle(_handle, status); | |||||
Resolve(); | Resolve(); | ||||
} | } | ||||
public EagerTensor Resolve() | public EagerTensor Resolve() | ||||
{ | { | ||||
_id = c_api.TFE_EagerTensorId(EagerTensorHandle); | |||||
if (tfe_tensor_handle == IntPtr.Zero) | |||||
tfe_tensor_handle = c_api.TFE_EagerTensorHandle(EagerTensorHandle); | |||||
_id = get_uid(); | |||||
if (_handle == IntPtr.Zero) | if (_handle == IntPtr.Zero) | ||||
_handle = c_api.TFE_TensorHandleResolve(tfe_tensor_handle, status); | |||||
/*print($"new Tensor {Id} {_handle.ToString("x16")}"); | |||||
print($"new TensorHandle {Id} {tfe_tensor_handle.ToString("x16")}"); | |||||
print($"new EagerTensor {Id} {EagerTensorHandle.ToString("x16")}");*/ | |||||
_handle = c_api.TFE_TensorHandleResolve(EagerTensorHandle, status); | |||||
if (tfe_tensor_handle == IntPtr.Zero && _id == 0) | |||||
{ | |||||
} | |||||
//print($"new Tensor {Id} {_handle.ToString("x16")}"); | |||||
//print($"new TensorHandle {Id} {EagerTensorHandle.ToString("x16")}"); | |||||
GarbageCollector.Increase(_handle, GCItemType.TensorHandle); | |||||
GarbageCollector.Increase(tfe_tensor_handle, GCItemType.LocalTensorHandle); | |||||
GarbageCollector.Increase(EagerTensorHandle, GCItemType.EagerTensorHandle); | |||||
/*GarbageCollector.Increase(_handle, GCItemType.TensorHandle); | |||||
GarbageCollector.Increase(tfe_tensor_handle, GCItemType.LocalTensorHandle);*/ | |||||
return this; | return this; | ||||
} | } | ||||
@@ -66,16 +53,17 @@ namespace Tensorflow.Eager | |||||
protected override void DisposeUnmanagedResources(IntPtr handle) | protected override void DisposeUnmanagedResources(IntPtr handle) | ||||
{ | { | ||||
GarbageCollector.Decrease(_handle); | |||||
/*GarbageCollector.Decrease(_handle); | |||||
GarbageCollector.Decrease(tfe_tensor_handle); | GarbageCollector.Decrease(tfe_tensor_handle); | ||||
GarbageCollector.Decrease(EagerTensorHandle); | |||||
GarbageCollector.Decrease(EagerTensorHandle);*/ | |||||
/*print($"deleting DeleteTensorHandle {Id} {_handle.ToString("x16")}"); | |||||
//print($"deleting DeleteTensorHandle {Id} {_handle.ToString("x16")}"); | |||||
c_api.TF_DeleteTensor(_handle); | c_api.TF_DeleteTensor(_handle); | ||||
print($"deleting DeleteTensorHandle {Id} {tfe_tensor_handle.ToString("x16")}"); | |||||
c_api.TFE_DeleteTensorHandle(tfe_tensor_handle); | |||||
print($"deleting DeleteEagerTensor {Id} {EagerTensorHandle.ToString("x16")}"); | |||||
c_api.TFE_DeleteEagerTensor(EagerTensorHandle);*/ | |||||
//print($"deleting DeleteTensorHandle {Id} {EagerTensorHandle.ToString("x16")}"); | |||||
c_api.TFE_DeleteTensorHandle(EagerTensorHandle); | |||||
} | } | ||||
static long _uid = 0; | |||||
long get_uid() => _uid++; | |||||
} | } | ||||
} | } |
@@ -9,11 +9,10 @@ namespace Tensorflow.Eager | |||||
public partial class EagerTensor : Tensor | public partial class EagerTensor : Tensor | ||||
{ | { | ||||
Status status = new Status(); | Status status = new Status(); | ||||
IntPtr tfe_tensor_handle; | |||||
public IntPtr EagerTensorHandle { get; set; } | |||||
public override string Device => c_api.StringPiece(c_api.TFE_TensorHandleDeviceName(tfe_tensor_handle, status)); | |||||
public IntPtr EagerTensorHandle; | |||||
public override string Device => c_api.StringPiece(c_api.TFE_TensorHandleDeviceName(EagerTensorHandle, status)); | |||||
public override int rank => c_api.TFE_TensorHandleNumDims(tfe_tensor_handle, status); | |||||
public override int rank => c_api.TFE_TensorHandleNumDims(EagerTensorHandle, status); | |||||
public static int GetRank(IntPtr handle) | public static int GetRank(IntPtr handle) | ||||
{ | { | ||||
@@ -33,17 +33,16 @@ namespace Tensorflow.Eager | |||||
{ | { | ||||
ctx.ensure_initialized(); | ctx.ensure_initialized(); | ||||
var results = Enumerable.Range(0, num_outputs).Select(x => new EagerTensor()).ToArray(); | |||||
Status status = c_api.TFE_QuickExecute(ctx, | |||||
using var status = new Status(); | |||||
var results = wrap_tfe_src.TFE_Execute(ctx, | |||||
ctx.device_name, | ctx.device_name, | ||||
op_name, | op_name, | ||||
inputs.Select(x => x.EagerTensorHandle).ToArray(), | |||||
inputs.Length, | |||||
op => wrap_tfe_src.SetOpAttrs(op, attrs), | |||||
results.Select(x => x.EagerTensorHandle).ToArray(), results.Length); | |||||
status.Check(true); | |||||
inputs, | |||||
attrs, | |||||
num_outputs, | |||||
status); | |||||
return results.Select(x => x.Resolve()).ToArray(); | |||||
return results; | |||||
} | } | ||||
public (TF_DataType, EagerTensor[]) args_to_matching_eager(Context ctx, TF_DataType default_dtype = TF_DataType.DtInvalid, object[] args = null) | public (TF_DataType, EagerTensor[]) args_to_matching_eager(Context ctx, TF_DataType default_dtype = TF_DataType.DtInvalid, object[] args = null) | ||||
@@ -0,0 +1,62 @@ | |||||
using System.Collections.Generic; | |||||
using System.Linq; | |||||
using System; | |||||
using static Tensorflow.OpDef.Types; | |||||
namespace Tensorflow.Eager | |||||
{ | |||||
/// <summary> | |||||
/// python\eager\pywrap_tfe_src.cc | |||||
/// </summary> | |||||
public partial class wrap_tfe_src | |||||
{ | |||||
public static EagerTensor[] TFE_Execute(Context ctx, | |||||
string device_name, | |||||
string op_name, | |||||
Tensor[] inputs, | |||||
object[] attrs, | |||||
int num_outputs, | |||||
Status status) | |||||
=> TFE_ExecuteCancelable(ctx, device_name, op_name, inputs, attrs, num_outputs, status); | |||||
public static EagerTensor[] TFE_ExecuteCancelable(Context ctx, | |||||
string device_name, | |||||
string op_name, | |||||
Tensor[] inputs, | |||||
object[] attrs, | |||||
int num_outputs, | |||||
Status status) | |||||
{ | |||||
var op = GetOp(ctx, op_name, status); | |||||
status.Check(true); | |||||
c_api.TFE_OpSetDevice(op, device_name, status); | |||||
if (status.ok()) | |||||
{ | |||||
for (int i = 0; i < inputs.Length; ++i) | |||||
{ | |||||
IntPtr tensor_handle; | |||||
switch (inputs[i]) | |||||
{ | |||||
case EagerTensor et: | |||||
tensor_handle = et.EagerTensorHandle; | |||||
break; | |||||
default: | |||||
tensor_handle = c_api.TFE_NewTensorHandle(inputs[i], status); | |||||
break; | |||||
} | |||||
c_api.TFE_OpAddInput(op, tensor_handle, status); | |||||
} | |||||
} | |||||
if (status.ok()) | |||||
SetOpAttrs(op, attrs, status); | |||||
var outputs = new IntPtr[num_outputs]; | |||||
if (status.ok()) | |||||
{ | |||||
c_api.TFE_Execute(op, outputs, ref num_outputs, status); | |||||
status.Check(true); | |||||
} | |||||
return outputs.Select(x => new EagerTensor(x)).ToArray(); | |||||
} | |||||
} | |||||
} |
@@ -11,6 +11,167 @@ namespace Tensorflow.Eager | |||||
/// </summary> | /// </summary> | ||||
public partial class wrap_tfe_src | public partial class wrap_tfe_src | ||||
{ | { | ||||
static int kFastPathExecuteInputStartIndex = 0; | |||||
public static EagerTensor[] TFE_FastPathExecute(Context ctx, | |||||
string device_name, | |||||
string opName, | |||||
string name, | |||||
Action callbacks, | |||||
params object[] args) | |||||
{ | |||||
int args_size = args.Length; | |||||
var attr_list_sizes = new Dictionary<string, long>(); | |||||
using var status = new Status(); | |||||
var op = GetOp(ctx, opName, status); | |||||
var op_def = Graph.TFE_GetOpDef(opName); | |||||
// Set non-inferred attrs, including setting defaults if the attr is passed in | |||||
// as None. | |||||
for (int i = kFastPathExecuteInputStartIndex + op_def.InputArg.Count; i < args_size; i += 2) | |||||
{ | |||||
var attr_name = args[i].ToString(); | |||||
var attr_value = args[i + 1]; | |||||
foreach (var attr in op_def.Attr) | |||||
{ | |||||
if (attr_name == attr.Name) | |||||
{ | |||||
SetOpAttrWithDefaults(ctx, op, attr, attr_name, attr_value, attr_list_sizes, status); | |||||
status.Check(true); | |||||
break; | |||||
} | |||||
} | |||||
} | |||||
c_api.TFE_OpSetDevice(op, device_name, status); | |||||
status.Check(true); | |||||
// Add inferred attrs and inputs. | |||||
for (int i = 0; i < op_def.InputArg.Count; i++) | |||||
{ | |||||
var input_arg = op_def.InputArg[i]; | |||||
if (!string.IsNullOrEmpty(input_arg.NumberAttr)) | |||||
{ | |||||
int len = (args[kFastPathExecuteInputStartIndex + i] as object[]).Length; | |||||
c_api.TFE_OpSetAttrInt(op, input_arg.NumberAttr, len); | |||||
attr_list_sizes[input_arg.NumberAttr] = len; | |||||
if (len > 0) | |||||
{ | |||||
var fast_input_array = (object[])args[i]; | |||||
// First item adds the type attr. | |||||
if (!AddInputToOp(fast_input_array[i], true, input_arg, op, status)) | |||||
return null; | |||||
for (var j = 1; j < len; j++) | |||||
{ | |||||
// Since the list is homogeneous, we don't need to re-add the attr. | |||||
if (!AddInputToOp(fast_input_array[j], false, input_arg, op, status)) | |||||
return null; | |||||
} | |||||
} | |||||
} | |||||
else if (!string.IsNullOrEmpty(input_arg.TypeListAttr)) | |||||
{ | |||||
} | |||||
else | |||||
{ | |||||
// The item is a single item. | |||||
AddInputToOp(args[i], true, input_arg, op, status); | |||||
} | |||||
} | |||||
int num_retvals = 0; | |||||
for (int i = 0; i < op_def.OutputArg.Count; i++) | |||||
{ | |||||
var output_arg = op_def.OutputArg[i]; | |||||
var delta = 1L; | |||||
if (!string.IsNullOrEmpty(output_arg.NumberAttr)) | |||||
delta = attr_list_sizes[output_arg.NumberAttr]; | |||||
else if (!string.IsNullOrEmpty(output_arg.TypeListAttr)) | |||||
delta = attr_list_sizes[output_arg.TypeListAttr]; | |||||
if (delta < 0) | |||||
throw new RuntimeError("Attributes suggest that the size of an output list is less than 0"); | |||||
num_retvals += (int)delta; | |||||
} | |||||
var retVals = new IntPtr[num_retvals]; | |||||
c_api.TFE_Execute(op, retVals, ref num_retvals, status); | |||||
status.Check(true); | |||||
return retVals.Select(x => new EagerTensor(x)).ToArray(); | |||||
} | |||||
private static TFE_Op GetOp(Context ctx, string op_or_function_name, Status status) | |||||
{ | |||||
var maybe_op = ReleaseThreadLocalOp(); | |||||
if (maybe_op != IntPtr.Zero) | |||||
{ | |||||
c_api.TFE_OpReset(maybe_op, op_or_function_name, ctx.device_name, status); | |||||
} | |||||
else | |||||
{ | |||||
maybe_op = c_api.TFE_NewOp(ctx, op_or_function_name, status); | |||||
op = maybe_op; | |||||
} | |||||
status.Check(true); | |||||
return maybe_op; | |||||
} | |||||
static TFE_Op op; | |||||
private static TFE_Op ReleaseThreadLocalOp() | |||||
{ | |||||
return op; | |||||
} | |||||
/// <summary> | |||||
/// Adds input and type attr to the op, and to the list of flattened | |||||
/// inputs/attrs. | |||||
/// </summary> | |||||
/// <param name="inputs"></param> | |||||
/// <param name="add_type_attr"></param> | |||||
/// <param name="input_arg"></param> | |||||
/// <param name="op"></param> | |||||
/// <param name="status"></param> | |||||
/// <returns></returns> | |||||
private static bool AddInputToOp(object inputs, | |||||
bool add_type_attr, | |||||
ArgDef input_arg, | |||||
IntPtr op, | |||||
Status status) | |||||
{ | |||||
IntPtr input_handle; | |||||
// ConvertToTensor(); | |||||
switch (inputs) | |||||
{ | |||||
case EagerTensor input: | |||||
input_handle = input.EagerTensorHandle; | |||||
break; | |||||
case EagerTensor[] input_list: | |||||
input_handle = input_list[0].EagerTensorHandle; | |||||
break; | |||||
default: | |||||
throw new NotImplementedException(""); | |||||
} | |||||
if (add_type_attr && !string.IsNullOrEmpty(input_arg.TypeAttr)) | |||||
{ | |||||
var dtype = c_api.TFE_TensorHandleDataType(input_handle); | |||||
c_api.TFE_OpSetAttrType(op, input_arg.TypeAttr, dtype); | |||||
} | |||||
c_api.TFE_OpAddInput(op, input_handle, status); | |||||
status.Check(true); | |||||
return true; | |||||
} | |||||
public static void SetOpAttrs(TFE_Op op, params object[] attrs) | public static void SetOpAttrs(TFE_Op op, params object[] attrs) | ||||
{ | { | ||||
using var status = new Status(); | using var status = new Status(); | ||||
@@ -38,12 +38,11 @@ namespace Tensorflow | |||||
public static OpDef TFE_GetOpDef(string type) | public static OpDef TFE_GetOpDef(string type) | ||||
{ | { | ||||
IntPtr handle = tf.get_default_graph(); | IntPtr handle = tf.get_default_graph(); | ||||
using (var buffer = new Buffer()) | |||||
using (var status = new Status()) | |||||
{ | |||||
c_api.TF_GraphGetOpDef(handle, type, buffer, status); | |||||
return OpDef.Parser.ParseFrom(buffer.MemoryBlock.Stream()); | |||||
} | |||||
using var buffer = new Buffer(); | |||||
using var status = new Status(); | |||||
c_api.TF_GraphGetOpDef(handle, type, buffer, status); | |||||
using var stream = buffer.MemoryBlock.Stream(); | |||||
return OpDef.Parser.ParseFrom(stream); | |||||
} | } | ||||
public OperationDescription NewOperation(string opType, string opName) | public OperationDescription NewOperation(string opType, string opName) | ||||
@@ -164,15 +164,12 @@ namespace Tensorflow | |||||
{ | { | ||||
if(tf.context.executing_eagerly()) | if(tf.context.executing_eagerly()) | ||||
{ | { | ||||
var results = new[] { new EagerTensor() }; | |||||
Status status = c_api.TFE_FastPathExecute(tf.context, tf.context.device_name, | |||||
var results = wrap_tfe_src.TFE_FastPathExecute(tf.context, tf.context.device_name, | |||||
"Pack", name, | "Pack", name, | ||||
values.Select(x => (x as EagerTensor).EagerTensorHandle).ToArray(), values.Length, | |||||
wrap_tfe_src.SetOpAttrs2("axis", axis), | |||||
op => wrap_tfe_src.SetOpAttrs(op, "axis", axis), | |||||
results.Select(x => x.EagerTensorHandle).ToArray(), results.Length); | |||||
status.Check(true); | |||||
return results[0].Resolve(); | |||||
null, | |||||
values, | |||||
"axis", axis); | |||||
return results[0]; | |||||
} | } | ||||
var _op = _op_def_lib._apply_op_helper("Pack", name: name, args: new { values, axis }); | var _op = _op_def_lib._apply_op_helper("Pack", name: name, args: new { values, axis }); | ||||
@@ -259,17 +259,11 @@ namespace Tensorflow | |||||
{ | { | ||||
if (tf.context.executing_eagerly()) | if (tf.context.executing_eagerly()) | ||||
{ | { | ||||
var results = new[] { new EagerTensor() }; | |||||
Status status = c_api.TFE_FastPathExecute(tf.context, tf.context.device_name, | |||||
"Add", name, new IntPtr[] | |||||
{ | |||||
x as EagerTensor, | |||||
y as EagerTensor | |||||
}, 2, | |||||
null, null, | |||||
results.Select(x => x.EagerTensorHandle).ToArray(), results.Length); | |||||
status.Check(true); | |||||
return results[0].Resolve(); | |||||
var results = wrap_tfe_src.TFE_FastPathExecute(tf.context, tf.context.device_name, | |||||
"Add", name, null, | |||||
x as EagerTensor, | |||||
y as EagerTensor); | |||||
return results[0]; | |||||
} | } | ||||
var _op = _op_def_lib._apply_op_helper("Add", name, args: new { x, y }); | var _op = _op_def_lib._apply_op_helper("Add", name, args: new { x, y }); | ||||
@@ -304,17 +298,11 @@ namespace Tensorflow | |||||
// forward_compatible(2019, 6, 25): | // forward_compatible(2019, 6, 25): | ||||
if (tf.context.executing_eagerly()) | if (tf.context.executing_eagerly()) | ||||
{ | { | ||||
var results = new[] { new EagerTensor() }; | |||||
Status status = c_api.TFE_FastPathExecute(tf.context, tf.context.device_name, | |||||
"AddV2", name, new IntPtr[] | |||||
{ | |||||
x as EagerTensor, | |||||
y as EagerTensor | |||||
}, 2, | |||||
null, null, | |||||
results.Select(x => x.EagerTensorHandle).ToArray(), results.Length); | |||||
status.Check(true); | |||||
return results[0].Resolve(); | |||||
var results = wrap_tfe_src.TFE_FastPathExecute(tf.context, tf.context.device_name, | |||||
"AddV2", name, | |||||
null, | |||||
x, y); | |||||
return results[0]; | |||||
} | } | ||||
var _op = _op_def_lib._apply_op_helper("AddV2", name, args: new { x, y }); | var _op = _op_def_lib._apply_op_helper("AddV2", name, args: new { x, y }); | ||||
@@ -784,22 +772,13 @@ namespace Tensorflow | |||||
{ | { | ||||
if (tf.context.executing_eagerly()) | if (tf.context.executing_eagerly()) | ||||
{ | { | ||||
var results = new[] { new EagerTensor() }; | |||||
var attrs = new object[] | |||||
{ | |||||
"DstT", DstT, | |||||
"Truncate", Truncate | |||||
}; | |||||
Status status = c_api.TFE_FastPathExecute(tf.context, tf.context.device_name, | |||||
var results = wrap_tfe_src.TFE_FastPathExecute(tf.context, tf.context.device_name, | |||||
"Cast", name, | "Cast", name, | ||||
new IntPtr[] { x as EagerTensor }, 1, | |||||
wrap_tfe_src.SetOpAttrs2(attrs), | |||||
op => wrap_tfe_src.SetOpAttrs(op, attrs), | |||||
results.Select(x => x.EagerTensorHandle).ToArray(), results.Length); | |||||
status.Check(true); | |||||
null, | |||||
x, | |||||
"DstT", DstT, "Truncate", Truncate); | |||||
return results[0].Resolve(); | |||||
return results[0]; | |||||
} | } | ||||
var _op = _op_def_lib._apply_op_helper("Cast", name, args: new { x, DstT, Truncate }); | var _op = _op_def_lib._apply_op_helper("Cast", name, args: new { x, DstT, Truncate }); | ||||
@@ -854,17 +833,11 @@ namespace Tensorflow | |||||
{ | { | ||||
if (tf.context.executing_eagerly()) | if (tf.context.executing_eagerly()) | ||||
{ | { | ||||
var results = new[] { new EagerTensor() }; | |||||
Status status = c_api.TFE_FastPathExecute(tf.context, tf.context.device_name, | |||||
"Sub", name, new IntPtr[] | |||||
{ | |||||
x as EagerTensor, | |||||
y as EagerTensor | |||||
}, 2, | |||||
null, null, | |||||
results.Select(x => x.EagerTensorHandle).ToArray(), results.Length); | |||||
status.Check(true); | |||||
return results[0].Resolve(); | |||||
var results = wrap_tfe_src.TFE_FastPathExecute(tf.context, tf.context.device_name, | |||||
"Sub", name, | |||||
null, | |||||
x, y); | |||||
return results[0]; | |||||
} | } | ||||
var _op = _op_def_lib._apply_op_helper("Sub", name, args: new { x, y }); | var _op = _op_def_lib._apply_op_helper("Sub", name, args: new { x, y }); | ||||
@@ -978,17 +951,11 @@ namespace Tensorflow | |||||
{ | { | ||||
if (tf.context.executing_eagerly()) | if (tf.context.executing_eagerly()) | ||||
{ | { | ||||
var results = new[] { new EagerTensor() }; | |||||
Status status = c_api.TFE_FastPathExecute(tf.context, tf.context.device_name, | |||||
"Mul", name, new IntPtr[] | |||||
{ | |||||
x as EagerTensor, | |||||
y as EagerTensor | |||||
}, 2, | |||||
null, null, | |||||
results.Select(x => x.EagerTensorHandle).ToArray(), results.Length); | |||||
status.Check(true); | |||||
return results[0].Resolve(); | |||||
var results = wrap_tfe_src.TFE_FastPathExecute(tf.context, tf.context.device_name, | |||||
"Mul", name, | |||||
null, | |||||
x, y); | |||||
return results[0]; | |||||
} | } | ||||
var _op = _op_def_lib._apply_op_helper("Mul", name, args: new { x, y }); | var _op = _op_def_lib._apply_op_helper("Mul", name, args: new { x, y }); | ||||
@@ -1029,17 +996,11 @@ namespace Tensorflow | |||||
{ | { | ||||
if (tf.context.executing_eagerly()) | if (tf.context.executing_eagerly()) | ||||
{ | { | ||||
var results = new[] { new EagerTensor() }; | |||||
Status status = c_api.TFE_FastPathExecute(tf.context, tf.context.device_name, | |||||
"RealDiv", name, new IntPtr[] | |||||
{ | |||||
x as EagerTensor, | |||||
y as EagerTensor | |||||
}, 2, | |||||
null, null, | |||||
results.Select(x => x.EagerTensorHandle).ToArray(), results.Length); | |||||
status.Check(true); | |||||
return results[0].Resolve(); | |||||
var results = wrap_tfe_src.TFE_FastPathExecute(tf.context, tf.context.device_name, | |||||
"RealDiv", name, | |||||
null, | |||||
x, y); | |||||
return results[0]; | |||||
} | } | ||||
var _op = _op_def_lib._apply_op_helper("RealDiv", name, args: new { x, y }); | var _op = _op_def_lib._apply_op_helper("RealDiv", name, args: new { x, y }); | ||||
@@ -1126,21 +1087,12 @@ namespace Tensorflow | |||||
{ | { | ||||
if (tf.context.executing_eagerly()) | if (tf.context.executing_eagerly()) | ||||
{ | { | ||||
var results = EagerTensorPass.Create(); | |||||
var inputs = EagerTensorPass.From(a, b); | |||||
var attrs = new object[] | |||||
{ | |||||
"transpose_a", transpose_a, | |||||
"transpose_b", transpose_b | |||||
}; | |||||
Status status = c_api.TFE_FastPathExecute(tf.context, tf.context.device_name, | |||||
var results = wrap_tfe_src.TFE_FastPathExecute(tf.context, tf.context.device_name, | |||||
"MatMul", name, | "MatMul", name, | ||||
inputs.Points, inputs.Length, | |||||
wrap_tfe_src.SetOpAttrs2(attrs), | |||||
op => wrap_tfe_src.SetOpAttrs(op, attrs), | |||||
results.Points, results.Length); | |||||
status.Check(true); | |||||
return results[0].Resolve(); | |||||
null, | |||||
a, b, | |||||
"transpose_a", transpose_a, "transpose_b", transpose_b); | |||||
return results[0]; | |||||
} | } | ||||
var _op = _op_def_lib._apply_op_helper("MatMul", name, args: new { a, b, transpose_a, transpose_b }); | var _op = _op_def_lib._apply_op_helper("MatMul", name, args: new { a, b, transpose_a, transpose_b }); | ||||
@@ -1343,18 +1295,11 @@ namespace Tensorflow | |||||
{ | { | ||||
if (tf.context.executing_eagerly()) | if (tf.context.executing_eagerly()) | ||||
{ | { | ||||
var results = new[] { new EagerTensor() }; | |||||
Status status = c_api.TFE_FastPathExecute(tf.context, tf.context.device_name, | |||||
"Range", name, new IntPtr[] | |||||
{ | |||||
start as EagerTensor, | |||||
limit as EagerTensor, | |||||
delta as EagerTensor | |||||
}, 3, | |||||
null, null, | |||||
results.Select(x => x.EagerTensorHandle).ToArray(), results.Length); | |||||
status.Check(true); | |||||
return results[0].Resolve(); | |||||
var results = wrap_tfe_src.TFE_FastPathExecute(tf.context, tf.context.device_name, | |||||
"Range", name, | |||||
null, | |||||
start, limit, delta); | |||||
return results[0]; | |||||
} | } | ||||
var _op = _op_def_lib._apply_op_helper("Range", name, new { start, limit, delta }); | var _op = _op_def_lib._apply_op_helper("Range", name, new { start, limit, delta }); | ||||
@@ -16,15 +16,14 @@ namespace Tensorflow | |||||
static object locker = new object(); | static object locker = new object(); | ||||
public static void Init() | public static void Init() | ||||
{ | { | ||||
Task.Run(() => | |||||
/*Task.Run(() => | |||||
{ | { | ||||
while (true) | while (true) | ||||
{ | { | ||||
Thread.Sleep(100); | Thread.Sleep(100); | ||||
Recycle(); | Recycle(); | ||||
} | } | ||||
}); | |||||
});*/ | |||||
} | } | ||||
public static void Increase(IntPtr handle, GCItemType type) | public static void Increase(IntPtr handle, GCItemType type) | ||||
@@ -41,12 +41,12 @@ namespace Tensorflow | |||||
ICanBeFlattened, | ICanBeFlattened, | ||||
IPointerInputs | IPointerInputs | ||||
{ | { | ||||
protected int _id; | |||||
protected long _id; | |||||
private readonly Operation _op; | private readonly Operation _op; | ||||
private readonly int _value_index; | private readonly int _value_index; | ||||
private TF_Output? _tf_output; | private TF_Output? _tf_output; | ||||
private readonly TF_DataType _override_dtype; | private readonly TF_DataType _override_dtype; | ||||
public int Id => _id; | |||||
public long Id => _id; | |||||
/// <summary> | /// <summary> | ||||
/// The Graph that contains this tensor. | /// The Graph that contains this tensor. | ||||
@@ -29,7 +29,7 @@ namespace Tensorflow | |||||
{ | { | ||||
public partial class ops | public partial class ops | ||||
{ | { | ||||
public static int tensor_id(Tensor tensor) | |||||
public static long tensor_id(Tensor tensor) | |||||
{ | { | ||||
return tensor.Id; | return tensor.Id; | ||||
} | } | ||||
@@ -52,7 +52,7 @@ namespace Tensorflow | |||||
{ | { | ||||
GarbageCollector.Init(); | GarbageCollector.Init(); | ||||
var vspace = c_api.VSpace_Handle((shape, dims, dtype) => | |||||
/*var vspace = c_api.VSpace_Handle((shape, dims, dtype) => | |||||
{ | { | ||||
var ones = constant_op.constant(1.0f, dtype: dtype) as EagerTensor; | var ones = constant_op.constant(1.0f, dtype: dtype) as EagerTensor; | ||||
return ones.EagerTensorHandle; | return ones.EagerTensorHandle; | ||||
@@ -60,16 +60,13 @@ namespace Tensorflow | |||||
{ | { | ||||
var add_n = gen_math_ops.add_n(gradients.Data); | var add_n = gen_math_ops.add_n(gradients.Data); | ||||
return add_n; | return add_n; | ||||
}); | |||||
});*/ | |||||
ops.RegisterFromAssembly(); | ops.RegisterFromAssembly(); | ||||
// ops.RegisterFromAssemblyEager(); | // ops.RegisterFromAssemblyEager(); | ||||
c_api.TFE_RegisterGradientFunction((op_name, op_inputs, op_outputs, attrs_string, output_grads, skip_input_indices) => | |||||
/*c_api.TFE_RegisterGradientFunction((op_name, op_inputs, op_outputs, attrs_string, output_grads, skip_input_indices) => | |||||
{ | { | ||||
/*var input_tensors = new BindingArray(op_inputs); | |||||
var output_tensors = new BindingArray(op_outputs); | |||||
var output_grad_tensors = new BindingArray(output_grads);*/ | |||||
var input_tensors = new BindingTensorArray(op_inputs) | var input_tensors = new BindingTensorArray(op_inputs) | ||||
.Data.Select(x => tf.tensorMgr.GetTensor(x)).ToArray(); | .Data.Select(x => tf.tensorMgr.GetTensor(x)).ToArray(); | ||||
var output_tensors = new BindingTensorArray(op_outputs) | var output_tensors = new BindingTensorArray(op_outputs) | ||||
@@ -98,7 +95,7 @@ namespace Tensorflow | |||||
}, (op_name, op_inputs, op_outputs) => | }, (op_name, op_inputs, op_outputs) => | ||||
{ | { | ||||
}); | |||||
});*/ | |||||
} | } | ||||
public ResourceVariable Variable<T>(T data, | public ResourceVariable Variable<T>(T data, | ||||