diff --git a/src/TensorFlowNET.Console/Program.cs b/src/TensorFlowNET.Console/Program.cs
index ee754eba..32f85fb8 100644
--- a/src/TensorFlowNET.Console/Program.cs
+++ b/src/TensorFlowNET.Console/Program.cs
@@ -1,4 +1,5 @@
using System;
+using static Tensorflow.Binding;
namespace Tensorflow
{
@@ -14,18 +15,19 @@ namespace Tensorflow
int batchSize = 1000;
- // 1 million float tensor 58.5M.
+ // 1 million float tensor 68M.
mm.Execute(10, 100 * batchSize, cases.Constant);
- // 100K float variable 80.5M.
+ // 100K float variable 84M.
mm.Execute(10, 10 * batchSize, cases.Variable);
- // 1 million math add 36.5M.
+ // 1 million math add 39M.
mm.Execute(10, 100 * batchSize, cases.MathAdd);
- // 100K gradient 210M.
+ // 100K gradient 44M.
mm.Execute(10, 10 * batchSize, cases.Gradient);
+ // 120M
Console.WriteLine("Finished.");
Console.ReadLine();
}
diff --git a/src/TensorFlowNET.Core/APIs/tf.gradients.cs b/src/TensorFlowNET.Core/APIs/tf.gradients.cs
index e99c7733..ef13b3f4 100644
--- a/src/TensorFlowNET.Core/APIs/tf.gradients.cs
+++ b/src/TensorFlowNET.Core/APIs/tf.gradients.cs
@@ -20,8 +20,10 @@ namespace Tensorflow
{
public partial class tensorflow
{
- public GradientTape GradientTape()
- => new GradientTape();
+ public GradientTape GradientTape(bool persistent = false,
+ bool watch_accessed_variables = true)
+ => new GradientTape(persistent: persistent,
+ watch_accessed_variables: watch_accessed_variables);
public Tensor[] gradients(Tensor[] ys,
Tensor[] xs,
diff --git a/src/TensorFlowNET.Core/Eager/Context.cs b/src/TensorFlowNET.Core/Eager/Context.cs
index ca01361d..58d882ad 100644
--- a/src/TensorFlowNET.Core/Eager/Context.cs
+++ b/src/TensorFlowNET.Core/Eager/Context.cs
@@ -18,6 +18,9 @@ namespace Tensorflow.Eager
status.Check(true);
}
+ ///
+ /// Initialize handle and devices if not already done so.
+ ///
public void ensure_initialized()
{
if (_initialized)
@@ -25,14 +28,20 @@ namespace Tensorflow.Eager
_initialized = true;
}
+ public void start_step()
+ => c_api.TFE_ContextStartStep(_handle);
+
+ public void end_step()
+ => c_api.TFE_ContextEndStep(_handle);
+
///
/// Dispose any unmanaged resources related to given .
///
protected sealed override void DisposeUnmanagedResources(IntPtr handle)
=> c_api.TFE_DeleteContext(_handle);
-
- public bool executing_eagerly() => true;
+ public bool executing_eagerly()
+ => default_execution_mode == EAGER_MODE;
public string shared_name(string name = null)
=> !string.IsNullOrEmpty(name) || !executing_eagerly() ?
diff --git a/src/TensorFlowNET.Core/Eager/EagerOperation.cs b/src/TensorFlowNET.Core/Eager/EagerOperation.cs
index 88bcc796..ce742904 100644
--- a/src/TensorFlowNET.Core/Eager/EagerOperation.cs
+++ b/src/TensorFlowNET.Core/Eager/EagerOperation.cs
@@ -8,7 +8,6 @@ namespace Tensorflow.Eager
{
public class EagerOperation : Operation
{
- static Dictionary op_dict;
public string Name { get; set; }
public new int NumInputs;
public IntPtr[] InputHandles { get; set; }
@@ -16,14 +15,12 @@ namespace Tensorflow.Eager
public new int NumOutputs;
public IntPtr[] OutputHandles { get; set; }
public Tensor[] Outputs { get; set; }
- public BindingArray SkipInputIndicesArray { get; set; }
- public unsafe int[] SkipInputIndices => SkipInputIndicesArray.Data.Select(x => *(int*) x).ToArray();
- public string[] AttrsArray { get; set; }
+ public long[] SkipInputIndices { get; set; }
+ public object[] Attrs { get; set; }
public EagerOperation() : base(IntPtr.Zero)
{
- if (op_dict == null)
- op_dict = op_def_registry.get_registered_ops();
+
}
public override InputList inputs
@@ -72,9 +69,9 @@ namespace Tensorflow.Eager
public bool get_attr_bool(string attr_name)
{
- for (int i = 0; i < AttrsArray.Length; i = i + 2)
- if (AttrsArray[i] == attr_name)
- return AttrsArray[i + 1] == "1";
+ for (int i = 0; i < Attrs.Length; i = i + 2)
+ if (Attrs[i].Equals(attr_name))
+ return Attrs[i + 1].Equals("1");
throw new ValueError($"Can't find attr: {attr_name}");
}
diff --git a/src/TensorFlowNET.Core/Eager/EagerRunner.cs b/src/TensorFlowNET.Core/Eager/EagerRunner.cs
new file mode 100644
index 00000000..34bbd86d
--- /dev/null
+++ b/src/TensorFlowNET.Core/Eager/EagerRunner.cs
@@ -0,0 +1,25 @@
+using System;
+using System.Collections.Generic;
+using System.Text;
+using Tensorflow.Gradients;
+
+namespace Tensorflow.Eager
+{
+ public class EagerRunner : IEagerRunner
+ {
+ public Tensor[] TFE_Execute(Context ctx, string device_name, string op_name, Tensor[] inputs, object[] attrs, int num_outputs)
+ {
+ throw new NotImplementedException();
+ }
+
+ public Tensor[] TFE_FastPathExecute(Context ctx, string device_name, string opName, string name, Action callbacks, params object[] args)
+ {
+ throw new NotImplementedException();
+ }
+
+ public Tensor[] TFE_TapeGradient(ITape tape, Tensor[] target, Tensor[] sources, Tensor[] output_gradients)
+ {
+ throw new NotImplementedException();
+ }
+ }
+}
diff --git a/src/TensorFlowNET.Core/Eager/EagerTensor.Creation.cs b/src/TensorFlowNET.Core/Eager/EagerTensor.Creation.cs
index b4aa8fd4..b754c913 100644
--- a/src/TensorFlowNET.Core/Eager/EagerTensor.Creation.cs
+++ b/src/TensorFlowNET.Core/Eager/EagerTensor.Creation.cs
@@ -42,9 +42,6 @@ namespace Tensorflow.Eager
//print($"new Tensor {Id} {_handle.ToString("x16")}");
//print($"new TensorHandle {Id} {EagerTensorHandle.ToString("x16")}");
- /*GarbageCollector.Increase(_handle, GCItemType.TensorHandle);
- GarbageCollector.Increase(tfe_tensor_handle, GCItemType.LocalTensorHandle);*/
-
return this;
}
@@ -53,10 +50,6 @@ namespace Tensorflow.Eager
protected override void DisposeUnmanagedResources(IntPtr handle)
{
- /*GarbageCollector.Decrease(_handle);
- GarbageCollector.Decrease(tfe_tensor_handle);
- GarbageCollector.Decrease(EagerTensorHandle);*/
-
//print($"deleting DeleteTensorHandle {Id} {_handle.ToString("x16")}");
c_api.TF_DeleteTensor(_handle);
//print($"deleting DeleteTensorHandle {Id} {EagerTensorHandle.ToString("x16")}");
diff --git a/src/TensorFlowNET.Core/Eager/Execute.cs b/src/TensorFlowNET.Core/Eager/Execute.cs
index e1689e04..52df5a7d 100644
--- a/src/TensorFlowNET.Core/Eager/Execute.cs
+++ b/src/TensorFlowNET.Core/Eager/Execute.cs
@@ -1,6 +1,7 @@
using System.Collections.Generic;
using System;
using System.Linq;
+using static Tensorflow.Binding;
namespace Tensorflow.Eager
{
@@ -27,20 +28,18 @@ namespace Tensorflow.Eager
/// The value of context.context().
/// Customized name for the operation.
/// List of output Tensor objects. The list is empty if there are no outputs
- public EagerTensor[] execute(Context ctx, string op_name, int num_outputs,
- EagerTensor[] inputs, object[] attrs,
+ public Tensor[] execute(Context ctx, string op_name, int num_outputs,
+ Tensor[] inputs, object[] attrs,
string name = null)
{
ctx.ensure_initialized();
- using var status = new Status();
- var results = wrap_tfe_src.TFE_Execute(ctx,
+ var results = tf.Runner.TFE_Execute(ctx,
ctx.device_name,
op_name,
inputs,
attrs,
- num_outputs,
- status);
+ num_outputs);
return results;
}
diff --git a/src/TensorFlowNET.Core/Eager/FastPathOpExecInfo.cs b/src/TensorFlowNET.Core/Eager/FastPathOpExecInfo.cs
new file mode 100644
index 00000000..e1dc1192
--- /dev/null
+++ b/src/TensorFlowNET.Core/Eager/FastPathOpExecInfo.cs
@@ -0,0 +1,18 @@
+using System;
+using System.Collections.Generic;
+using System.Text;
+
+namespace Tensorflow.Eager
+{
+ public class FastPathOpExecInfo
+ {
+ public Context ctx { get; set; }
+ public string device_name { get; set; }
+ public string op_name { get; set; }
+ public string name { get; set; }
+ public object[] args { get; set; }
+ public bool run_gradient_callback { get; set; }
+ public bool run_post_exec_callbacks { get; set; }
+ public bool run_callbacks { get; set; }
+ }
+}
diff --git a/src/TensorFlowNET.Core/Eager/IEagerRunner.cs b/src/TensorFlowNET.Core/Eager/IEagerRunner.cs
new file mode 100644
index 00000000..0e0deb3c
--- /dev/null
+++ b/src/TensorFlowNET.Core/Eager/IEagerRunner.cs
@@ -0,0 +1,29 @@
+using System;
+using System.Collections.Generic;
+using System.Text;
+using Tensorflow.Gradients;
+
+namespace Tensorflow.Eager
+{
+ public interface IEagerRunner
+ {
+ public Tensor[] TFE_FastPathExecute(Context ctx,
+ string device_name,
+ string opName,
+ string name,
+ Action callbacks,
+ params object[] args);
+
+ public Tensor[] TFE_Execute(Context ctx,
+ string device_name,
+ string op_name,
+ Tensor[] inputs,
+ object[] attrs,
+ int num_outputs);
+
+ public Tensor[] TFE_TapeGradient(ITape tape,
+ Tensor[] target,
+ Tensor[] sources,
+ Tensor[] output_gradients);
+ }
+}
diff --git a/src/TensorFlowNET.Core/Eager/c_api.eager.cs b/src/TensorFlowNET.Core/Eager/c_api.eager.cs
index 9971dd97..d12f9678 100644
--- a/src/TensorFlowNET.Core/Eager/c_api.eager.cs
+++ b/src/TensorFlowNET.Core/Eager/c_api.eager.cs
@@ -116,6 +116,12 @@ namespace Tensorflow
[DllImport(TensorFlowLibName)]
public static extern TFE_Context TFE_NewContext(IntPtr opts, IntPtr status);
+ [DllImport(TensorFlowLibName)]
+ public static extern TFE_Context TFE_ContextStartStep(IntPtr ctx);
+
+ [DllImport(TensorFlowLibName)]
+ public static extern TFE_Context TFE_ContextEndStep(IntPtr ctx);
+
///
///
///
diff --git a/src/TensorFlowNET.Core/Eager/wrap_tfe_src.TFE_Execute.cs b/src/TensorFlowNET.Core/Eager/wrap_tfe_src.TFE_Execute.cs
deleted file mode 100644
index 91a9e40a..00000000
--- a/src/TensorFlowNET.Core/Eager/wrap_tfe_src.TFE_Execute.cs
+++ /dev/null
@@ -1,62 +0,0 @@
-using System.Collections.Generic;
-using System.Linq;
-using System;
-using static Tensorflow.OpDef.Types;
-
-namespace Tensorflow.Eager
-{
- ///
- /// python\eager\pywrap_tfe_src.cc
- ///
- public partial class wrap_tfe_src
- {
- public static EagerTensor[] TFE_Execute(Context ctx,
- string device_name,
- string op_name,
- Tensor[] inputs,
- object[] attrs,
- int num_outputs,
- Status status)
- => TFE_ExecuteCancelable(ctx, device_name, op_name, inputs, attrs, num_outputs, status);
-
- public static EagerTensor[] TFE_ExecuteCancelable(Context ctx,
- string device_name,
- string op_name,
- Tensor[] inputs,
- object[] attrs,
- int num_outputs,
- Status status)
- {
- var op = GetOp(ctx, op_name, status);
- status.Check(true);
- c_api.TFE_OpSetDevice(op, device_name, status);
- if (status.ok())
- {
- for (int i = 0; i < inputs.Length; ++i)
- {
- IntPtr tensor_handle;
- switch (inputs[i])
- {
- case EagerTensor et:
- tensor_handle = et.EagerTensorHandle;
- break;
- default:
- tensor_handle = c_api.TFE_NewTensorHandle(inputs[i], status);
- break;
- }
- c_api.TFE_OpAddInput(op, tensor_handle, status);
- }
- }
- if (status.ok())
- SetOpAttrs(op, attrs, status);
-
- var outputs = new IntPtr[num_outputs];
- if (status.ok())
- {
- c_api.TFE_Execute(op, outputs, ref num_outputs, status);
- status.Check(true);
- }
- return outputs.Select(x => new EagerTensor(x)).ToArray();
- }
- }
-}
\ No newline at end of file
diff --git a/src/TensorFlowNET.Core/Eager/wrap_tfe_src.TFE_FastPathExecute.cs b/src/TensorFlowNET.Core/Eager/wrap_tfe_src.TFE_FastPathExecute.cs
deleted file mode 100644
index a6084c4f..00000000
--- a/src/TensorFlowNET.Core/Eager/wrap_tfe_src.TFE_FastPathExecute.cs
+++ /dev/null
@@ -1,305 +0,0 @@
-using System.Collections.Generic;
-using System.Linq;
-using System;
-using static Tensorflow.OpDef.Types;
-using static Tensorflow.Binding;
-
-namespace Tensorflow.Eager
-{
- ///
- /// python\eager\pywrap_tfe_src.cc
- ///
- public partial class wrap_tfe_src
- {
- static int kFastPathExecuteInputStartIndex = 0;
-
- public static EagerTensor[] TFE_FastPathExecute(Context ctx,
- string device_name,
- string opName,
- string name,
- Action callbacks,
- params object[] args)
- {
- int args_size = args.Length;
- var attr_list_sizes = new Dictionary();
- using var status = new Status();
-
- var op = GetOp(ctx, opName, status);
-
- var op_def = Graph.TFE_GetOpDef(opName);
-
- // Set non-inferred attrs, including setting defaults if the attr is passed in
- // as None.
- for (int i = kFastPathExecuteInputStartIndex + op_def.InputArg.Count; i < args_size; i += 2)
- {
- var attr_name = args[i].ToString();
- var attr_value = args[i + 1];
-
- foreach (var attr in op_def.Attr)
- {
- if (attr_name == attr.Name)
- {
- SetOpAttrWithDefaults(ctx, op, attr, attr_name, attr_value, attr_list_sizes, status);
- status.Check(true);
- break;
- }
- }
- }
-
- c_api.TFE_OpSetDevice(op, device_name, status);
- status.Check(true);
-
- // Add inferred attrs and inputs.
- for (int i = 0; i < op_def.InputArg.Count; i++)
- {
- var input_arg = op_def.InputArg[i];
- if (!string.IsNullOrEmpty(input_arg.NumberAttr))
- {
- int len = (args[kFastPathExecuteInputStartIndex + i] as object[]).Length;
- c_api.TFE_OpSetAttrInt(op, input_arg.NumberAttr, len);
- attr_list_sizes[input_arg.NumberAttr] = len;
-
- if (len > 0)
- {
- var fast_input_array = (object[])args[i];
- // First item adds the type attr.
- if (!AddInputToOp(fast_input_array[i], true, input_arg, op, status))
- return null;
-
- for (var j = 1; j < len; j++)
- {
- // Since the list is homogeneous, we don't need to re-add the attr.
- if (!AddInputToOp(fast_input_array[j], false, input_arg, op, status))
- return null;
- }
- }
- }
- else if (!string.IsNullOrEmpty(input_arg.TypeListAttr))
- {
-
- }
- else
- {
- // The item is a single item.
- AddInputToOp(args[i], true, input_arg, op, status);
- }
- }
-
- int num_retvals = 0;
- for (int i = 0; i < op_def.OutputArg.Count; i++)
- {
- var output_arg = op_def.OutputArg[i];
- var delta = 1L;
- if (!string.IsNullOrEmpty(output_arg.NumberAttr))
- delta = attr_list_sizes[output_arg.NumberAttr];
- else if (!string.IsNullOrEmpty(output_arg.TypeListAttr))
- delta = attr_list_sizes[output_arg.TypeListAttr];
- if (delta < 0)
- throw new RuntimeError("Attributes suggest that the size of an output list is less than 0");
- num_retvals += (int)delta;
- }
-
- var retVals = new IntPtr[num_retvals];
- c_api.TFE_Execute(op, retVals, ref num_retvals, status);
- status.Check(true);
-
- return retVals.Select(x => new EagerTensor(x)).ToArray();
- }
-
- private static TFE_Op GetOp(Context ctx, string op_or_function_name, Status status)
- {
- var maybe_op = ReleaseThreadLocalOp();
- if (maybe_op != IntPtr.Zero)
- {
- c_api.TFE_OpReset(maybe_op, op_or_function_name, ctx.device_name, status);
- }
- else
- {
- maybe_op = c_api.TFE_NewOp(ctx, op_or_function_name, status);
- op = maybe_op;
- }
-
- status.Check(true);
- return maybe_op;
- }
-
- static TFE_Op op;
- private static TFE_Op ReleaseThreadLocalOp()
- {
- return op;
- }
-
- ///
- /// Adds input and type attr to the op, and to the list of flattened
- /// inputs/attrs.
- ///
- ///
- ///
- ///
- ///
- ///
- ///
- private static bool AddInputToOp(object inputs,
- bool add_type_attr,
- ArgDef input_arg,
- IntPtr op,
- Status status)
- {
- IntPtr input_handle;
-
- // ConvertToTensor();
- switch (inputs)
- {
- case EagerTensor input:
- input_handle = input.EagerTensorHandle;
- break;
- case EagerTensor[] input_list:
- input_handle = input_list[0].EagerTensorHandle;
- break;
- default:
- throw new NotImplementedException("");
- }
-
- if (add_type_attr && !string.IsNullOrEmpty(input_arg.TypeAttr))
- {
- var dtype = c_api.TFE_TensorHandleDataType(input_handle);
- c_api.TFE_OpSetAttrType(op, input_arg.TypeAttr, dtype);
- }
-
- c_api.TFE_OpAddInput(op, input_handle, status);
- status.Check(true);
-
- return true;
- }
-
- public static void SetOpAttrs(TFE_Op op, params object[] attrs)
- {
- using var status = new Status();
- var len = attrs.Length;
- for (int i = 0; i < len; i += 2)
- {
- var key = attrs[i].ToString();
- var value = attrs[i + 1];
-
- byte is_list = 0;
- var type = c_api.TFE_OpGetAttrType(op, key, ref is_list, status);
- if (!status.ok()) return;
- if (is_list != 0)
- SetOpAttrList(tf.context, op, key, value, type, null, status);
- else
- SetOpAttrScalar(tf.context, op, key, value, type, null, status);
- status.Check(true);
- }
- }
-
- public static string SetOpAttrs2(params object[] attrs)
- {
- string attr_string = string.Empty;
- for(int i = 0; i < attrs.Length; i = i + 2)
- {
- object key = attrs[i];
- object value = attrs[i + 1];
-
- switch (value)
- {
- case TF_DataType dtype:
- value = (int)dtype;
- break;
- case bool bVal:
- value = bVal ? 1 : 0;
- break;
- case int[] shape:
- value = shape.Length == 0 ? "null" : string.Join(" ", shape);
- break;
- default:
- break;
- }
-
- attr_string += string.IsNullOrEmpty(attr_string) ?
- $"{key},{value}" :
- $",{key},{value}";
- }
-
- return attr_string;
- }
-
- ///
- /// This function will set the op attrs required. If an attr has the value of
- /// None, then it will read the AttrDef to get the default value and set that
- /// instead. Any failure in this function will simply fall back to the slow
- /// path.
- ///
- ///
- ///
- ///
- ///
- ///
- ///
- ///
- private static void SetOpAttrWithDefaults(Context ctx, IntPtr op, AttrDef attr,
- string attr_name, object attr_value,
- Dictionary attr_list_sizes,
- Status status)
- {
- byte is_list = 0;
- var type = c_api.TFE_OpGetAttrType(op, attr_name, ref is_list, status);
- if (status.Code != TF_Code.TF_OK) return;
-
- if(attr_value == null)
- {
- if (is_list != 0)
- ;
- //SetOpAttrListDefault
- else
- ;
- //SetOpAttrScalarDefault
- }
- else
- {
- if (is_list != 0)
- ;// SetOpAttrList
- else
- SetOpAttrScalar(ctx, op, attr_name, attr_value, type, attr_list_sizes, status);
- }
- }
-
- private static bool SetOpAttrList(Context ctx, IntPtr op,
- string key, object value, TF_AttrType type,
- Dictionary attr_list_sizes,
- Status status)
- {
- return false;
- }
-
- private static bool SetOpAttrScalar(Context ctx, IntPtr op,
- string key, object value, TF_AttrType type,
- Dictionary attr_list_sizes,
- Status status)
- {
- switch(type)
- {
- case TF_AttrType.TF_ATTR_STRING:
- c_api.TFE_OpSetAttrString(op, key, value.ToString(), (uint)value.ToString().Length);
- break;
- case TF_AttrType.TF_ATTR_TYPE:
- c_api.TFE_OpSetAttrType(op, key, (TF_DataType)value);
- break;
- case TF_AttrType.TF_ATTR_BOOL:
- c_api.TFE_OpSetAttrBool(op, key, Convert.ToBoolean(value));
- break;
- case TF_AttrType.TF_ATTR_INT:
- c_api.TFE_OpSetAttrInt(op, key, Convert.ToInt64(value));
- break;
- case TF_AttrType.TF_ATTR_SHAPE:
- var dims = (value as int[]).Select(x => (long)x).ToArray();
- c_api.TFE_OpSetAttrShape(op, key, dims, dims.Length, status);
- status.Check(true);
- break;
- default:
- throw new NotImplementedException($"SetOpAttrScalar for {type}");
- }
-
- return true;
- }
- }
-}
diff --git a/src/TensorFlowNET.Core/Framework/op_def_registry.py.cs b/src/TensorFlowNET.Core/Framework/op_def_registry.py.cs
index 8a2bc5c3..953b5552 100644
--- a/src/TensorFlowNET.Core/Framework/op_def_registry.py.cs
+++ b/src/TensorFlowNET.Core/Framework/op_def_registry.py.cs
@@ -22,22 +22,27 @@ namespace Tensorflow
{
public class op_def_registry
{
- private static Dictionary _registered_ops;
+ static Dictionary _registered_ops;
public static Dictionary get_registered_ops()
{
if(_registered_ops == null)
{
_registered_ops = new Dictionary();
- using (var buffer = new Buffer(c_api.TF_GetAllOpList()))
- {
- var op_list = OpList.Parser.ParseFrom(buffer.MemoryBlock.Stream());
- foreach (var op_def in op_list.Op)
- _registered_ops[op_def.Name] = op_def;
- }
+ using var buffer = new Buffer(c_api.TF_GetAllOpList());
+ using var stream = buffer.MemoryBlock.Stream();
+ var op_list = OpList.Parser.ParseFrom(stream);
+ foreach (var op_def in op_list.Op)
+ _registered_ops[op_def.Name] = op_def;
}
return _registered_ops;
}
+
+ public static OpDef GetOpDef(string type)
+ {
+ var ops = get_registered_ops();
+ return ops[type];
+ }
}
}
diff --git a/src/TensorFlowNET.Core/Gradients/AccumulatorCallState.cs b/src/TensorFlowNET.Core/Gradients/AccumulatorCallState.cs
new file mode 100644
index 00000000..919383d8
--- /dev/null
+++ b/src/TensorFlowNET.Core/Gradients/AccumulatorCallState.cs
@@ -0,0 +1,18 @@
+using System;
+using System.Collections.Generic;
+using System.Text;
+
+namespace Tensorflow.Gradients
+{
+ public class AccumulatorCallState
+ {
+ GradientTape backward_tape;
+ bool accumulating;
+
+ public AccumulatorCallState(GradientTape backward_tape, bool accumulating)
+ {
+ this.backward_tape = backward_tape;
+ this.accumulating = accumulating;
+ }
+ }
+}
diff --git a/src/TensorFlowNET.Core/Gradients/BackpropInitialState.cs b/src/TensorFlowNET.Core/Gradients/BackpropInitialState.cs
new file mode 100644
index 00000000..90bfb7cc
--- /dev/null
+++ b/src/TensorFlowNET.Core/Gradients/BackpropInitialState.cs
@@ -0,0 +1,30 @@
+using System;
+using System.Collections.Generic;
+using System.Text;
+using Tensorflow.Util;
+using static Tensorflow.tensorflow;
+
+namespace Tensorflow.Gradients
+{
+ public class BackpropInitialState
+ {
+ public OpTape op_tape { get; set; }
+ ///
+ /// Map from tensor ID to how many references still exist for this tensor in
+ /// the tape.
+ ///
+ public UnorderedMap tensor_usage_counts { get; set; }
+ ///
+ /// Maps from op ID to how many output tensors of this op still need to have
+ /// their gradients computed.
+ ///
+ public UnorderedMap op_missing_tensor { get; set; }
+
+ public BackpropInitialState()
+ {
+ op_tape = new OpTape();
+ tensor_usage_counts = new UnorderedMap();
+ op_missing_tensor = new UnorderedMap();
+ }
+ }
+}
diff --git a/src/TensorFlowNET.Core/Gradients/GradientTape.cs b/src/TensorFlowNET.Core/Gradients/GradientTape.cs
index 7fbb0cb9..32a5d415 100644
--- a/src/TensorFlowNET.Core/Gradients/GradientTape.cs
+++ b/src/TensorFlowNET.Core/Gradients/GradientTape.cs
@@ -30,7 +30,7 @@ namespace Tensorflow.Gradients
bool _watch_accessed_variables;
ResourceVariable[] _watched_variables;
bool _created_eagerly;
- Tape _tape;
+ ITape _tape;
public GradientTape(bool persistent = false,
bool watch_accessed_variables = true)
@@ -38,9 +38,20 @@ namespace Tensorflow.Gradients
_persistent = persistent;
_watch_accessed_variables = watch_accessed_variables;
_created_eagerly = tf.context.executing_eagerly();
+ _recording = false;
+ _created_eagerly = tf.context.executing_eagerly();
+ // Enters a context inside which operations are recorded on this tape.
+ if (_created_eagerly)
+ {
+ tf.context.ensure_initialized();
+ tf.context.start_step();
+ }
_push_tape();
}
+ ///
+ /// Pushes a new tape onto the tape stack.
+ ///
private void _push_tape()
{
if (_recording)
@@ -50,8 +61,8 @@ namespace Tensorflow.Gradients
if (_tape == null)
_tape = new Tape(_persistent, _watch_accessed_variables);
else
- throw new NotImplementedException("");
-
+ tf.GetTapeSet().Add(_tape);
+
_recording = true;
}
@@ -59,7 +70,7 @@ namespace Tensorflow.Gradients
{
if (!_recording)
throw new ValueError("Tape is not recording.");
- _tape.pop_tape(_tape);
+ _tape.PopTape(_tape);
_recording = false;
}
@@ -69,9 +80,15 @@ namespace Tensorflow.Gradients
///
public void watch(Tensor x)
{
- _tape.watch(x as EagerTensor);
+ _tape.Watch(x.Id);
}
+ ///
+ /// Computes the gradient using operations recorded in context of this tape.
+ ///
+ ///
+ ///
+ ///
public Tensor gradient(Tensor target, Tensor source)
{
if (_recording)
@@ -80,34 +97,29 @@ namespace Tensorflow.Gradients
_pop_tape();
}
- var results = EagerTensorPass.Create();
- var targets = EagerTensorPass.From(target);
- var sources = EagerTensorPass.From(source);
+ var results = tf.Runner.TFE_TapeGradient(_tape,
+ new[] { target },
+ new[] { source },
+ null);
- Status status = c_api.TFE_TapeGradient(_tape,
- targets.Points, targets.Length,
- sources.Points, sources.Length,
- results.Points, results.Length);
- status.Check(true);
-
- return results[0].Resolve();
+ return results[0];
}
public Tensor gradient(Tensor target, ResourceVariable source)
{
- var results = gradient(target as EagerTensor, new[] { source });
+ var results = gradient(target, new[] { source });
return results[0];
}
public (Tensor, Tensor) gradient(Tensor target, (ResourceVariable, ResourceVariable) sources)
{
- var results = gradient(target as EagerTensor, new[] { sources.Item1, sources.Item2 });
+ var results = gradient(target, new[] { sources.Item1, sources.Item2 });
return (results[0], results[1]);
}
- public EagerTensor[] gradient(EagerTensor target, ResourceVariable[] sources)
+ public Tensor[] gradient(Tensor target, ResourceVariable[] sources)
{
if (_recording)
{
@@ -115,24 +127,19 @@ namespace Tensorflow.Gradients
_pop_tape();
}
- var results = EagerTensorPass.Create(sources.Length);
- var target_inputs = EagerTensorPass.From(target);
- var source_inputs = EagerTensorPass.From(sources.Select(x => x.Handle).ToArray());
-
- Status status = c_api.TFE_TapeGradient(_tape,
- target_inputs.Points, target_inputs.Length,
- source_inputs.Points, source_inputs.Length,
- results.Points, results.Length);
- status.Check(true);
+ var results = tf.Runner.TFE_TapeGradient(_tape,
+ new[] { target },
+ sources.Select(x => x.Handle).ToArray(),
+ null);
if (!_persistent)
{
// Keep track of watched variables before setting tape to None
- _watched_variables = _tape.watched_variables();
+ _watched_variables = _tape.WatchedVariables();
_tape = null;
}
- return results.Items.Select(x => x.Resolve()).ToArray();
+ return results;
}
public void Dispose()
@@ -140,7 +147,8 @@ namespace Tensorflow.Gradients
if (_recording)
_pop_tape();
- tf.tensorMgr.Reset();
+ if (_created_eagerly)
+ tf.context.end_step();
}
}
}
diff --git a/src/TensorFlowNET.Core/Gradients/ITape.cs b/src/TensorFlowNET.Core/Gradients/ITape.cs
new file mode 100644
index 00000000..fc0495f5
--- /dev/null
+++ b/src/TensorFlowNET.Core/Gradients/ITape.cs
@@ -0,0 +1,33 @@
+using System;
+using System.Collections.Generic;
+using System.Text;
+using Tensorflow.Util;
+using static Tensorflow.tensorflow;
+
+namespace Tensorflow.Gradients
+{
+ public interface ITape
+ {
+ void PopTape(ITape tape);
+
+ bool ShouldRecord(long[] tensor_ids, TF_DataType[] dtypes);
+
+ void RecordOperation(string op_type,
+ Tensor[] input_tensors,
+ TapeTensor[] output_tensors,
+ long[] input_tensor_id,
+ TF_DataType[] input_dtypes,
+ Func backward_function_getter);
+
+ void VariableAccessed(ResourceVariable variable);
+
+ void Watch(long tensor_id);
+
+ ResourceVariable[] WatchedVariables();
+
+ Tensor[] ComputeGradient(long[] target_tensor_ids,
+ long[] source_tensor_ids,
+ UnorderedMap sources_that_are_targets,
+ Tensor[] output_gradients);
+ }
+}
diff --git a/src/TensorFlowNET.Core/Gradients/OpTape.cs b/src/TensorFlowNET.Core/Gradients/OpTape.cs
new file mode 100644
index 00000000..d415caef
--- /dev/null
+++ b/src/TensorFlowNET.Core/Gradients/OpTape.cs
@@ -0,0 +1,19 @@
+using System;
+using System.Collections.Generic;
+using System.Linq;
+using System.Text;
+using Tensorflow.Util;
+
+namespace Tensorflow.Gradients
+{
+ ///
+ /// Map from operation-id to tape entry.
+ ///
+ ///
+ ///
+ public class OpTape :
+ UnorderedMap>
+ {
+
+ }
+}
diff --git a/src/TensorFlowNET.Core/Gradients/OpTapeEntry.cs b/src/TensorFlowNET.Core/Gradients/OpTapeEntry.cs
new file mode 100644
index 00000000..7f478e48
--- /dev/null
+++ b/src/TensorFlowNET.Core/Gradients/OpTapeEntry.cs
@@ -0,0 +1,19 @@
+using System;
+using System.Collections.Generic;
+using System.Text;
+
+namespace Tensorflow.Gradients
+{
+ ///
+ /// Represents an entry in the tape.
+ ///
+ ///
+ ///
+ public class OpTapeEntry
+ {
+ public string op_type { get; set; }
+ public TapeTensor[] output_tensor_info { get; set; }
+ public long[] input_tensor_id { get; set; }
+ public BackwardFunction backward_function { get; set; }
+ }
+}
diff --git a/src/TensorFlowNET.Core/Gradients/Tape.cs b/src/TensorFlowNET.Core/Gradients/Tape.cs
index 9a52d743..663b3ef2 100644
--- a/src/TensorFlowNET.Core/Gradients/Tape.cs
+++ b/src/TensorFlowNET.Core/Gradients/Tape.cs
@@ -1,71 +1,50 @@
using System;
using System.Collections.Generic;
-using System.Linq;
-using System.Runtime.InteropServices;
using System.Text;
-using Tensorflow.Eager;
+using Tensorflow.Util;
namespace Tensorflow.Gradients
{
- public class Tape : DisposableObject
+ public class Tape : ITape
{
- public int nesting_id { get; set; }
-
public Tape(bool persistent, bool watch_accessed_variables)
{
- _handle = c_api.TFE_TapeSetNew(persistent, watch_accessed_variables);
+
}
- public void watch(EagerTensor x)
+ public Tensor[] ComputeGradient(long[] target_tensor_ids, long[] source_tensor_ids, UnorderedMap sources_that_are_targets, Tensor[] output_gradients)
{
- c_api.TFE_TapeWatch(_handle, x.EagerTensorHandle);
+ throw new NotImplementedException();
}
- public void pop_tape(Tape tape)
+ public void PopTape(ITape tape)
{
- c_api.TFE_TapeSetRemove(tape);
+ throw new NotImplementedException();
}
- public static void variable_accessed(ResourceVariable variable)
+ public void RecordOperation(string op_type, Tensor[] input_tensors, TapeTensor[] output_tensors, long[] input_tensor_id, TF_DataType[] input_dtypes, Func backward_function_getter)
{
- c_api.TFE_TapeVariableAccessed(variable);
+ throw new NotImplementedException();
}
- public unsafe ResourceVariable[] watched_variables()
+ public bool ShouldRecord(long[] tensor_ids, TF_DataType[] dtypes)
{
- BindingArray result = c_api.TFE_TapeWatchedVariables(_handle);
- var variables = result.Data.Select(x =>
- {
- var tensor = c_api.ResourceVariable_Handle(x);
- return new ResourceVariable(x, tensor);
- }).ToArray();
-
- return variables;
+ throw new NotImplementedException();
}
- public static bool IsDtypeTrainable(DataType dtype)
+ public void VariableAccessed(ResourceVariable variable)
{
- switch (dtype)
- {
- case DataType.DtHalf:
- case DataType.DtBfloat16:
- case DataType.DtFloat:
- case DataType.DtDouble:
- case DataType.DtComplex64:
- case DataType.DtComplex128:
- case DataType.DtResource:
- case DataType.DtVariant:
- return true;
- default:
- return false;
- }
+ throw new NotImplementedException();
}
- protected override void DisposeUnmanagedResources(IntPtr handle)
+ public void Watch(long tensor_id)
{
+ throw new NotImplementedException();
}
- public static implicit operator IntPtr(Tape tape)
- => tape._handle;
+ public ResourceVariable[] WatchedVariables()
+ {
+ throw new NotImplementedException();
+ }
}
}
diff --git a/src/TensorFlowNET.Core/Gradients/TapeTensor.cs b/src/TensorFlowNET.Core/Gradients/TapeTensor.cs
new file mode 100644
index 00000000..2396ae25
--- /dev/null
+++ b/src/TensorFlowNET.Core/Gradients/TapeTensor.cs
@@ -0,0 +1,31 @@
+using NumSharp;
+using System;
+using System.Collections.Generic;
+using System.Text;
+using Tensorflow.Eager;
+using static Tensorflow.Binding;
+
+namespace Tensorflow.Gradients
+{
+ public class TapeTensor
+ {
+ long id;
+ TF_DataType dtype;
+ TensorShape shape;
+
+ public TapeTensor(long id, TF_DataType dtype, TensorShape shape)
+ {
+ this.id = id;
+ this.dtype = dtype;
+ this.shape = shape;
+ }
+
+ public long GetID() => id;
+
+ public Tensor ZerosLike(int[] shape = null, TF_DataType dtype = TF_DataType.TF_FLOAT)
+ => tf.zeros(shape == null ? new int[0] : shape, dtype: dtype);
+
+ public Tensor OnesLike(int[] shape = null, TF_DataType dtype = TF_DataType.TF_FLOAT)
+ => tf.ones(shape == null ? new int[0] : shape, dtype: dtype);
+ }
+}
diff --git a/src/TensorFlowNET.Core/Gradients/TensorTape.cs b/src/TensorFlowNET.Core/Gradients/TensorTape.cs
new file mode 100644
index 00000000..8fb0de41
--- /dev/null
+++ b/src/TensorFlowNET.Core/Gradients/TensorTape.cs
@@ -0,0 +1,19 @@
+using System;
+using System.Collections.Generic;
+using System.Dynamic;
+using System.Linq;
+using System.Text;
+using Tensorflow.Util;
+
+namespace Tensorflow.Gradients
+{
+ ///
+ /// Map from tensor_id to internally-defined operation-id of the operation which
+ /// produced this tensor. A value of -1 means that the tensor was directly
+ /// watched and not the result of any operation in the tape.
+ ///
+ public class TensorTape : UnorderedMap
+ {
+
+ }
+}
diff --git a/src/TensorFlowNET.Core/Gradients/c_api.gradient.cs b/src/TensorFlowNET.Core/Gradients/c_api.gradient.cs
index c63783e5..2459626f 100644
--- a/src/TensorFlowNET.Core/Gradients/c_api.gradient.cs
+++ b/src/TensorFlowNET.Core/Gradients/c_api.gradient.cs
@@ -15,7 +15,10 @@
******************************************************************************/
using System;
+using System.Collections.Generic;
using System.Runtime.InteropServices;
+using System.Threading;
+using Tensorflow.Gradients;
namespace Tensorflow
{
diff --git a/src/TensorFlowNET.Core/Gradients/gradient_exclustions.cs b/src/TensorFlowNET.Core/Gradients/gradient_exclustions.cs
new file mode 100644
index 00000000..7e3449b6
--- /dev/null
+++ b/src/TensorFlowNET.Core/Gradients/gradient_exclustions.cs
@@ -0,0 +1,31 @@
+using System;
+using System.Collections.Generic;
+using System.Text;
+
+namespace Tensorflow.Gradients
+{
+ public class gradient_exclustions
+ {
+ public static int[] OpGradientUnusedInputIndices(string op_name)
+ => op_name switch
+ {
+ "FusedBatchNorm" => new[] { 2 },
+ "FusedBatchNormGradV3" => new[] { 5 },
+ "FusedBatchNormV2" => new[] { 2 },
+ "FusedBatchNormV3" => new[] { 2 },
+ _ => null
+ };
+
+ public static int[] OpGradientUnusedOutputIndices(string op_name)
+ => op_name switch
+ {
+ "SoftmaxCrossEntropyWithLogits" => new[] { 0 },
+ "TensorArrayConcat" => new[] { 0 },
+ "TensorArrayConcatV2" => new[] { 0 },
+ "TensorArrayConcatV3" => new[] { 0 },
+ "Mul" => new int[0],
+ "Sum" => new int[0],
+ _ => null
+ };
+ }
+}
diff --git a/src/TensorFlowNET.Core/Gradients/math_grad_eager.cs b/src/TensorFlowNET.Core/Gradients/math_grad_eager.cs
index 2e5b65a9..a82cc699 100644
--- a/src/TensorFlowNET.Core/Gradients/math_grad_eager.cs
+++ b/src/TensorFlowNET.Core/Gradients/math_grad_eager.cs
@@ -30,7 +30,7 @@ namespace Tensorflow.Gradients
public class math_grad_eager
{
[RegisterGradientEager("Mul")]
- public static EagerTensor[] _MulGrad(EagerOperation op, IntPtr[] grads)
+ public static Tensor[] _MulGrad(EagerOperation op, IntPtr[] grads)
{
var x = op.InputHandles[0];
var y = op.InputHandles[1];
@@ -39,7 +39,7 @@ namespace Tensorflow.Gradients
if (op.SkipInputIndices.Contains(1) &&
EagerTensor.GetRank(grad) == 0)
{
- return new EagerTensor[]
+ return new Tensor[]
{
null,//gen_math_ops.mul(grad, math_ops.conj(y)),
null
@@ -48,7 +48,7 @@ namespace Tensorflow.Gradients
if (_ShapesFullySpecifiedAndEqual(x, y, grad))
{
- return new EagerTensor[]
+ return new Tensor[]
{
gen_math_ops.mul(grad, y),
gen_math_ops.mul(grad, x)
diff --git a/src/TensorFlowNET.Core/Gradients/ops.gradient_function_mapping_eager.cs b/src/TensorFlowNET.Core/Gradients/ops.gradient_function_mapping_eager.cs
deleted file mode 100644
index 432113e0..00000000
--- a/src/TensorFlowNET.Core/Gradients/ops.gradient_function_mapping_eager.cs
+++ /dev/null
@@ -1,101 +0,0 @@
-/*****************************************************************************
- Copyright 2018 The TensorFlow.NET Authors. All Rights Reserved.
-
- Licensed under the Apache License, Version 2.0 (the "License");
- you may not use this file except in compliance with the License.
- You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
-******************************************************************************/
-
-using System;
-using System.Collections.Generic;
-using System.Linq;
-using System.Reflection;
-using Tensorflow.Eager;
-using Tensorflow.Gradients;
-
-namespace Tensorflow
-{
- public partial class ops
- {
- public static Dictionary> gradientFunctionsEager = null;
-
- public static void RegisterFromAssemblyEager()
- {
- if (gradientFunctionsEager == null)
- {
- gradientFunctionsEager = new Dictionary>();
-
- var gradGroups = Assembly.GetExecutingAssembly()
- .GetTypes()
- .Where(x => x.GetCustomAttribute() != null)
- .ToArray();
-
- foreach (var g in gradGroups)
- {
- var methods = g.GetMethods()
- .Where(x => x.GetCustomAttribute() != null)
- .ToArray();
-
- foreach (var m in methods)
- {
- RegisterGradientFunctionEager(m.GetCustomAttribute().Name,
- (oper, out_grads) =>
- g.InvokeMember(m.Name,
- BindingFlags.InvokeMethod,
- null,
- null,
- args: new object[] { oper, out_grads }) as EagerTensor[]
- );
- }
-
- // REGISTER_NO_GRADIENT_OP
- methods = g.GetMethods()
- .Where(x => x.GetCustomAttribute() != null)
- .ToArray();
-
- foreach (var m in methods)
- RegisterNoGradientFunctionEager(m.GetCustomAttribute().Name);
- }
- }
- }
-
- ///
- /// Regiter new gradient function
- ///
- /// operation type
- /// function delegate
- public static void RegisterGradientFunctionEager(string name, Func func)
- {
- RegisterFromAssemblyEager();
-
- gradientFunctionsEager[name] = func;
- }
-
- public static void RegisterNoGradientFunctionEager(string name)
- {
- RegisterFromAssemblyEager();
-
- gradientFunctionsEager[name] = null;
- }
-
- public static Func get_gradient_function_eager(EagerOperation op)
- {
- if (op.inputs == null) return null;
-
- RegisterFromAssemblyEager();
-
- if (!gradientFunctionsEager.ContainsKey(op.type))
- throw new LookupError($"can't get graident function through get_gradient_function {op.type}");
-
- return gradientFunctionsEager[op.type];
- }
- }
-}
diff --git a/src/TensorFlowNET.Core/Graphs/Graph.Operation.cs b/src/TensorFlowNET.Core/Graphs/Graph.Operation.cs
index 4f682d77..d7f2ce2a 100644
--- a/src/TensorFlowNET.Core/Graphs/Graph.Operation.cs
+++ b/src/TensorFlowNET.Core/Graphs/Graph.Operation.cs
@@ -26,24 +26,7 @@ namespace Tensorflow
public partial class Graph
{
public OpDef GetOpDef(string type)
- {
- using (var buffer = new Buffer())
- using (var status = new Status())
- {
- c_api.TF_GraphGetOpDef(_handle, type, buffer, status);
- return OpDef.Parser.ParseFrom(buffer.MemoryBlock.Stream());
- }
- }
-
- public static OpDef TFE_GetOpDef(string type)
- {
- IntPtr handle = tf.get_default_graph();
- using var buffer = new Buffer();
- using var status = new Status();
- c_api.TF_GraphGetOpDef(handle, type, buffer, status);
- using var stream = buffer.MemoryBlock.Stream();
- return OpDef.Parser.ParseFrom(stream);
- }
+ => op_def_registry.GetOpDef(type);
public OperationDescription NewOperation(string opType, string opName)
{
diff --git a/src/TensorFlowNET.Core/Operations/NnOps/gen_nn_ops.cs b/src/TensorFlowNET.Core/Operations/NnOps/gen_nn_ops.cs
index 47641d4d..f733c97a 100644
--- a/src/TensorFlowNET.Core/Operations/NnOps/gen_nn_ops.cs
+++ b/src/TensorFlowNET.Core/Operations/NnOps/gen_nn_ops.cs
@@ -16,15 +16,12 @@
using System;
using System.Linq;
-using Tensorflow.Eager;
using static Tensorflow.Binding;
namespace Tensorflow.Operations
{
public class gen_nn_ops
{
- public static OpDefLibrary _op_def_lib = new OpDefLibrary();
-
///
/// Computes a 2-D convolution given 4-D `input` and `filter` tensors.
///
@@ -45,7 +42,7 @@ namespace Tensorflow.Operations
///
public static Tensor conv2d(Conv2dParams parameters)
{
- var _op = _op_def_lib._apply_op_helper("Conv2D", name: parameters.Name, args: new
+ var _op = tf._op_def_lib._apply_op_helper("Conv2D", name: parameters.Name, args: new
{
input = parameters.Input,
filter = parameters.Filter,
@@ -67,7 +64,7 @@ namespace Tensorflow.Operations
///
public static Tensor conv2d_backprop_filter(Conv2dParams parameters)
{
- var _op = _op_def_lib._apply_op_helper("Conv2DBackpropFilter", name: parameters.Name, args: new
+ var _op = tf._op_def_lib._apply_op_helper("Conv2DBackpropFilter", name: parameters.Name, args: new
{
input = parameters.Input,
filter_sizes = parameters.FilterSizes,
@@ -90,7 +87,7 @@ namespace Tensorflow.Operations
///
public static Tensor conv2d_backprop_input(Conv2dParams parameters)
{
- var _op = _op_def_lib._apply_op_helper("Conv2DBackpropInput", name: parameters.Name, args: new
+ var _op = tf._op_def_lib._apply_op_helper("Conv2DBackpropInput", name: parameters.Name, args: new
{
input_sizes = parameters.InputSizes,
filter = parameters.Filter,
@@ -114,7 +111,7 @@ namespace Tensorflow.Operations
if (data_format == null)
data_format = "NHWC";
- var _op = _op_def_lib._apply_op_helper("BiasAdd", name: name, args: new
+ var _op = tf._op_def_lib._apply_op_helper("BiasAdd", name: name, args: new
{
value,
bias,
@@ -131,7 +128,7 @@ namespace Tensorflow.Operations
if (data_format == null)
data_format = "NHWC";
- var _op = _op_def_lib._apply_op_helper("BiasAddGrad", name: name, args: new
+ var _op = tf._op_def_lib._apply_op_helper("BiasAddGrad", name: name, args: new
{
out_backprop,
data_format
@@ -157,7 +154,7 @@ namespace Tensorflow.Operations
///
public static Tensor elu(Tensor features, string name = "Elu")
{
- var op = _op_def_lib._apply_op_helper("Elu", name: name, args: new { features });
+ var op = tf._op_def_lib._apply_op_helper("Elu", name: name, args: new { features });
return op.output;
}
@@ -176,7 +173,7 @@ namespace Tensorflow.Operations
///
public static Tensor[] fused_batch_norm_grad(FusedBatchNormParams @params)
{
- var op = _op_def_lib._apply_op_helper("FusedBatchNormGrad", name: @params.Name, args: new
+ var op = tf._op_def_lib._apply_op_helper("FusedBatchNormGrad", name: @params.Name, args: new
{
y_backprop = @params.YBackprop,
x = @params.X,
@@ -192,7 +189,7 @@ namespace Tensorflow.Operations
public static Tensor[] fused_batch_norm_grad_v3(FusedBatchNormParams @params)
{
- var op = _op_def_lib._apply_op_helper("FusedBatchNormGradV3", name: @params.Name, args: new
+ var op = tf._op_def_lib._apply_op_helper("FusedBatchNormGradV3", name: @params.Name, args: new
{
y_backprop = @params.YBackprop,
x = @params.X,
@@ -217,7 +214,7 @@ namespace Tensorflow.Operations
bool is_training = true,
string name = null)
{
- var _op = _op_def_lib._apply_op_helper("FusedBatchNorm", name: name, args: new
+ var _op = tf._op_def_lib._apply_op_helper("FusedBatchNorm", name: name, args: new
{
x,
scale,
@@ -242,7 +239,7 @@ namespace Tensorflow.Operations
bool is_training = true,
string name = null)
{
- var _op = _op_def_lib._apply_op_helper("FusedBatchNormV3", name: name, args: new
+ var _op = tf._op_def_lib._apply_op_helper("FusedBatchNormV3", name: name, args: new
{
x,
scale,
@@ -270,7 +267,7 @@ namespace Tensorflow.Operations
public static Tensor local_response_normalization(Tensor input, int depth_radius = 5, int bias = 1,
int alpha = 1, float beta = 0.5f, string name = null)
{
- var _op = _op_def_lib._apply_op_helper("LRN", name: name, args: new
+ var _op = tf._op_def_lib._apply_op_helper("LRN", name: name, args: new
{
input,
depth_radius,
@@ -284,7 +281,7 @@ namespace Tensorflow.Operations
public static Tensor log_softmax(Tensor logits, string name = null)
{
- var _op = _op_def_lib._apply_op_helper("LogSoftmax", name: name, args: new
+ var _op = tf._op_def_lib._apply_op_helper("LogSoftmax", name: name, args: new
{
logits
});
@@ -302,7 +299,7 @@ namespace Tensorflow.Operations
/// A `Tensor` of type `bool`.
public static Tensor in_top_kv2(Tensor predictions, Tensor targets, int k, string name = null)
{
- var _op = _op_def_lib._apply_op_helper("InTopKV2", name: name, args: new
+ var _op = tf._op_def_lib._apply_op_helper("InTopKV2", name: name, args: new
{
predictions,
targets,
@@ -314,7 +311,7 @@ namespace Tensorflow.Operations
public static Tensor leaky_relu(Tensor features, float alpha = 0.2f, string name = null)
{
- var _op = _op_def_lib._apply_op_helper("LeakyRelu", name: name, args: new
+ var _op = tf._op_def_lib._apply_op_helper("LeakyRelu", name: name, args: new
{
features,
alpha
@@ -330,7 +327,7 @@ namespace Tensorflow.Operations
string data_format = "NHWC",
string name = null)
{
- var _op = _op_def_lib._apply_op_helper("MaxPool", name: name, args: new
+ var _op = tf._op_def_lib._apply_op_helper("MaxPool", name: name, args: new
{
input,
ksize,
@@ -345,7 +342,7 @@ namespace Tensorflow.Operations
public static Tensor max_pool_grad(Tensor orig_input, Tensor orig_output, Tensor grad, int[] ksize, int[] strides, string padding,
string data_format= "NHWC", string name= null)
{
- var _op = _op_def_lib._apply_op_helper("MaxPoolGrad", name: name, args: new
+ var _op = tf._op_def_lib._apply_op_helper("MaxPoolGrad", name: name, args: new
{
orig_input,
orig_output,
@@ -361,7 +358,7 @@ namespace Tensorflow.Operations
public static Tensor[] top_kv2(Tensor input, int k, bool sorted = true, string name = null)
{
- var _op = _op_def_lib._apply_op_helper("TopKV2", name: name, args: new
+ var _op = tf._op_def_lib._apply_op_helper("TopKV2", name: name, args: new
{
input,
k,
@@ -375,18 +372,15 @@ namespace Tensorflow.Operations
{
if (tf.context.executing_eagerly())
{
- var results = EagerTensorPass.Create();
- var inputs = EagerTensorPass.From(gradients, features);
- Status status = c_api.TFE_FastPathExecute(tf.context, tf.context.device_name,
+ var results = tf.Runner.TFE_FastPathExecute(tf.context, tf.context.device_name,
"ReluGrad", name,
- inputs.Points, inputs.Length,
- null, null,
- results.Points, results.Length);
- status.Check(true);
- return results[0].Resolve();
+ null,
+ gradients, features);
+
+ return results[0];
}
- var _op = _op_def_lib._apply_op_helper("ReluGrad", name: name, args: new
+ var _op = tf._op_def_lib._apply_op_helper("ReluGrad", name: name, args: new
{
gradients,
features
@@ -397,7 +391,7 @@ namespace Tensorflow.Operations
public static Tensor leaky_relu_grad(Tensor gradients, Tensor features, float alpha = 0.2f, string name = null)
{
- var _op = _op_def_lib._apply_op_helper("LeakyReluGrad", name: name, args: new
+ var _op = tf._op_def_lib._apply_op_helper("LeakyReluGrad", name: name, args: new
{
gradients,
features,
@@ -411,18 +405,15 @@ namespace Tensorflow.Operations
{
if (tf.context.executing_eagerly())
{
- var results = EagerTensorPass.Create();
- var inputs = EagerTensorPass.From(logits);
- Status status = c_api.TFE_FastPathExecute(tf.context, tf.context.device_name,
+ var results = tf.Runner.TFE_FastPathExecute(tf.context, tf.context.device_name,
"Softmax", name,
- inputs.Points, inputs.Length,
- null, null,
- results.Points, results.Length);
- status.Check(true);
- return results[0].Resolve();
+ null,
+ logits);
+
+ return results[0];
}
- var _op = _op_def_lib._apply_op_helper("Softmax", name: name, args: new
+ var _op = tf._op_def_lib._apply_op_helper("Softmax", name: name, args: new
{
logits
});
@@ -439,7 +430,7 @@ namespace Tensorflow.Operations
///
public static (Tensor, Tensor) softmax_cross_entropy_with_logits(Tensor features, Tensor labels, string name = null)
{
- var _op = _op_def_lib._apply_op_helper("SoftmaxCrossEntropyWithLogits", name: name, args: new
+ var _op = tf._op_def_lib._apply_op_helper("SoftmaxCrossEntropyWithLogits", name: name, args: new
{
features,
labels
@@ -477,7 +468,7 @@ namespace Tensorflow.Operations
///
public static (Tensor loss, Tensor backprop) sparse_softmax_cross_entropy_with_logits(Tensor features, Tensor labels, string name = "SparseSoftmaxCrossEntropyWithLogits")
{
- var op = _op_def_lib._apply_op_helper("SparseSoftmaxCrossEntropyWithLogits", name: name, args: new { features, labels });
+ var op = tf._op_def_lib._apply_op_helper("SparseSoftmaxCrossEntropyWithLogits", name: name, args: new { features, labels });
int _idx = 0;
var loss = op.outputs[_idx++];
var backprop = op.outputs[_idx++];
@@ -494,19 +485,15 @@ namespace Tensorflow.Operations
{
if (tf.context.executing_eagerly())
{
- var results = new[] { new EagerTensor() };
- Status status = c_api.TFE_FastPathExecute(tf.context, tf.context.device_name,
- "Relu", name, new IntPtr[]
- {
- features as EagerTensor,
- }, 1,
- null, null,
- results.Select(x => x.EagerTensorHandle).ToArray(), results.Length);
- status.Check(true);
- return results[0].Resolve();
+ var results = tf.Runner.TFE_FastPathExecute(tf.context, tf.context.device_name,
+ "Relu", name,
+ null,
+ features);
+
+ return results[0];
}
- var _op = _op_def_lib._apply_op_helper("Relu", name: name, args: new { features });
+ var _op = tf._op_def_lib._apply_op_helper("Relu", name: name, args: new { features });
return _op.outputs[0];
}
@@ -514,19 +501,15 @@ namespace Tensorflow.Operations
{
if (tf.context.executing_eagerly())
{
- var results = new[] { new EagerTensor() };
- Status status = c_api.TFE_FastPathExecute(tf.context, tf.context.device_name,
- "Tanh", name, new IntPtr[]
- {
- x as EagerTensor,
- }, 1,
- null, null,
- results.Select(x => x.EagerTensorHandle).ToArray(), results.Length);
- status.Check(true);
- return results[0].Resolve();
+ var results = tf.Runner.TFE_FastPathExecute(tf.context, tf.context.device_name,
+ "Tanh", name,
+ null,
+ x);
+
+ return results[0];
}
- var _op = _op_def_lib._apply_op_helper("Tanh", name: name, args: new { x });
+ var _op = tf._op_def_lib._apply_op_helper("Tanh", name: name, args: new { x });
return _op.outputs[0];
}
}
diff --git a/src/TensorFlowNET.Core/Operations/gen_array_ops.cs b/src/TensorFlowNET.Core/Operations/gen_array_ops.cs
index 82de9f11..e718c4a3 100644
--- a/src/TensorFlowNET.Core/Operations/gen_array_ops.cs
+++ b/src/TensorFlowNET.Core/Operations/gen_array_ops.cs
@@ -14,32 +14,27 @@
limitations under the License.
******************************************************************************/
-using NumSharp;
using System;
using System.Collections.Generic;
using static Tensorflow.Binding;
using Tensorflow.Eager;
using System.Linq;
using static Tensorflow.Binding;
-using System.Security.Cryptography.X509Certificates;
namespace Tensorflow
{
public static class gen_array_ops
{
- public static OpDefLibrary _op_def_lib = new OpDefLibrary();
- public static Execute _execute = new Execute();
-
public static Tensor batch_to_space_nd(T input, int[] block_shape, int[,] crops, string name = null)
{
- var _op = _op_def_lib._apply_op_helper("BatchToSpaceND", name: name, args: new { input, block_shape, crops });
+ var _op = tf._op_def_lib._apply_op_helper("BatchToSpaceND", name: name, args: new { input, block_shape, crops });
return _op.output;
}
public static Tensor check_numerics(Tensor tensor, string message, string name = null)
{
- var _op = _op_def_lib._apply_op_helper("CheckNumerics", name: name, args: new { tensor, message });
+ var _op = tf._op_def_lib._apply_op_helper("CheckNumerics", name: name, args: new { tensor, message });
return _op.output;
}
@@ -55,20 +50,15 @@ namespace Tensorflow
{
if (tf.context.executing_eagerly())
{
- var results = new[] { new EagerTensor() };
- Status status = c_api.TFE_FastPathExecute(tf.context, tf.context.device_name,
- "ConcatV2", name, new IntPtr[]
- {
- values as EagerTensor,
- axis as EagerTensor
- }, 2,
- null, null,
- results.Select(x => x.EagerTensorHandle).ToArray(), results.Length);
- status.Check(true);
- return results[0].Resolve();
+ var results = tf.Runner.TFE_FastPathExecute(tf.context, tf.context.device_name,
+ "ConcatV2", name,
+ null,
+ values, axis);
+
+ return results[0];
}
- var _op = _op_def_lib._apply_op_helper("ConcatV2", name: name, args: new { values, axis });
+ var _op = tf._op_def_lib._apply_op_helper("ConcatV2", name: name, args: new { values, axis });
return _op.output;
}
@@ -79,24 +69,24 @@ namespace Tensorflow
return concat_v2_eager_fallback(values, axis, name, tf.context);
}
- var _op = _op_def_lib._apply_op_helper("ConcatV2", name: name, args: new { values, axis });
+ var _op = tf._op_def_lib._apply_op_helper("ConcatV2", name: name, args: new { values, axis });
return _op.output;
}
private static Tensor concat_v2_eager_fallback(T1[] values, T2 axis, string name, Context ctx)
{
var _attr_N = len(values);
- var (_attr_T, input) = _execute.args_to_matching_eager(ctx, args: values.Select(x => (object)x).ToArray());
- var (_attr_Tidx, axis1) = _execute.args_to_matching_eager(ctx, default_dtype: tf.int32, args: new object[] { axis });
+ var (_attr_T, input) = tf._execute.args_to_matching_eager(ctx, args: values.Select(x => (object)x).ToArray());
+ var (_attr_Tidx, axis1) = tf._execute.args_to_matching_eager(ctx, default_dtype: tf.int32, args: new object[] { axis });
var _inputs_flat = input.concat(axis1);
var _attrs = new object[] { "N", _attr_N, "T", _attr_T, "Tidx", _attr_Tidx };
- return _execute.execute(ctx, "ConcatV2", 1, _inputs_flat, _attrs, name: name)[0];
+ return tf._execute.execute(ctx, "ConcatV2", 1, _inputs_flat, _attrs, name: name)[0];
}
public static Tensor[] concat_offset(Tensor concat_dim, Tensor[] shape, string name = null)
{
- var _op = _op_def_lib._apply_op_helper("ConcatOffset", name: name, args: new { concat_dim, shape });
+ var _op = tf._op_def_lib._apply_op_helper("ConcatOffset", name: name, args: new { concat_dim, shape });
return _op.outputs;
}
@@ -134,28 +124,28 @@ namespace Tensorflow
///
public static Tensor diag(Tensor diagonal, string name = null)
{
- var op = _op_def_lib._apply_op_helper("Diag", name: name, args: new { diagonal });
+ var op = tf._op_def_lib._apply_op_helper("Diag", name: name, args: new { diagonal });
return op.output;
}
public static Tensor expand_dims(Tensor input, int axis, string name = null)
{
- var _op = _op_def_lib._apply_op_helper("ExpandDims", name: name, args: new { input, dim = axis });
+ var _op = tf._op_def_lib._apply_op_helper("ExpandDims", name: name, args: new { input, dim = axis });
return _op.outputs[0];
}
public static Tensor gather_v2(T1 @params, T2 indices, int axis, string name = null)
{
- var _op = _op_def_lib._apply_op_helper("GatherV2", name: name, new { @params, indices, axis });
+ var _op = tf._op_def_lib._apply_op_helper("GatherV2", name: name, new { @params, indices, axis });
return _op.outputs[0];
}
public static Tensor pad(Tensor input, Tensor paddings, string name = null)
{
- var _op = _op_def_lib._apply_op_helper("Pad", name: name, args: new { input, paddings });
+ var _op = tf._op_def_lib._apply_op_helper("Pad", name: name, args: new { input, paddings });
return _op.output;
}
@@ -164,7 +154,7 @@ namespace Tensorflow
{
if(tf.context.executing_eagerly())
{
- var results = wrap_tfe_src.TFE_FastPathExecute(tf.context, tf.context.device_name,
+ var results = tf.Runner.TFE_FastPathExecute(tf.context, tf.context.device_name,
"Pack", name,
null,
values,
@@ -172,13 +162,13 @@ namespace Tensorflow
return results[0];
}
- var _op = _op_def_lib._apply_op_helper("Pack", name: name, args: new { values, axis });
+ var _op = tf._op_def_lib._apply_op_helper("Pack", name: name, args: new { values, axis });
return _op.output;
}
public static Tensor placeholder(TF_DataType dtype, TensorShape shape = null, string name = null)
{
- var _op = _op_def_lib._apply_op_helper("Placeholder", name: name, args: new { dtype, shape });
+ var _op = tf._op_def_lib._apply_op_helper("Placeholder", name: name, args: new { dtype, shape });
var _result = _op.outputs;
var _inputs_flat = _op.inputs;
@@ -217,7 +207,7 @@ namespace Tensorflow
///
public static Tensor prevent_gradient(Tensor input, string message = "", string name = null)
{
- var op = _op_def_lib._apply_op_helper("PreventGradient", name: name, args: new { input, message });
+ var op = tf._op_def_lib._apply_op_helper("PreventGradient", name: name, args: new { input, message });
return op.output;
}
@@ -230,40 +220,36 @@ namespace Tensorflow
{
if (tf.context.executing_eagerly())
{
- var results = new[] { new EagerTensor() };
- Status status = c_api.TFE_FastPathExecute(tf.context, tf.context.device_name,
- "Identity", name, new IntPtr[]
- {
- input as EagerTensor
- }, 1,
- null, null,
- results.Select(x => x.EagerTensorHandle).ToArray(), results.Length);
- status.Check(true);
- return results[0].Resolve();
+ var results = tf.Runner.TFE_FastPathExecute(tf.context, tf.context.device_name,
+ "Identity", name,
+ null,
+ input);
+
+ return results[0];
}
- var _op = _op_def_lib._apply_op_helper("Identity", name, new { input });
+ var _op = tf._op_def_lib._apply_op_helper("Identity", name, new { input });
return _op.output;
}
public static Tensor invert_permutation(Tensor x, string name = null)
{
- var _op = _op_def_lib._apply_op_helper("InvertPermutation", name, new { x });
+ var _op = tf._op_def_lib._apply_op_helper("InvertPermutation", name, new { x });
return _op.outputs[0];
}
public static Tensor log(Tensor x, string name = null)
{
- var _op = _op_def_lib._apply_op_helper("Log", name: name, args: new { x });
+ var _op = tf._op_def_lib._apply_op_helper("Log", name: name, args: new { x });
return _op.outputs[0];
}
public static Tensor rank(Tensor input, string name = null)
{
- var _op = _op_def_lib._apply_op_helper("Rank", name: name, args: new { input });
+ var _op = tf._op_def_lib._apply_op_helper("Rank", name: name, args: new { input });
return _op.outputs[0];
}
@@ -279,20 +265,15 @@ namespace Tensorflow
{
if (tf.context.executing_eagerly())
{
- var results = EagerTensorPass.Create();
- var inputs = EagerTensorPass.From(dims, value);
-
- Status status = c_api.TFE_FastPathExecute(tf.context, tf.context.device_name,
+ var results = tf.Runner.TFE_FastPathExecute(tf.context, tf.context.device_name,
"Fill", name,
- inputs.Points, inputs.Length,
- null, null,
- results.Points, results.Length);
- status.Check(true);
+ null,
+ dims, value);
- return results[0].Resolve();
+ return results[0];
}
- var _op = _op_def_lib._apply_op_helper("Fill", name, new { dims, value });
+ var _op = tf._op_def_lib._apply_op_helper("Fill", name, new { dims, value });
return _op.output;
}
@@ -307,27 +288,22 @@ namespace Tensorflow
{
if (tf.context.executing_eagerly())
{
- var results = new[] { new EagerTensor(), new EagerTensor() };
- Status status = c_api.TFE_FastPathExecute(tf.context, tf.context.device_name,
- "BroadcastGradientArgs", name, new IntPtr[]
- {
- s0 as EagerTensor,
- s1 as EagerTensor
- }, 2,
- null, null,
- results.Select(x => x.EagerTensorHandle).ToArray(), results.Length);
- status.Check(true);
- return (results[0].Resolve(), results[1].Resolve());
+ var results = tf.Runner.TFE_FastPathExecute(tf.context, tf.context.device_name,
+ "BroadcastGradientArgs", name,
+ null,
+ s0,s1);
+
+ return (results[0], results[1]);
}
- var _op = _op_def_lib._apply_op_helper("BroadcastGradientArgs", name, new { s0, s1 });
+ var _op = tf._op_def_lib._apply_op_helper("BroadcastGradientArgs", name, new { s0, s1 });
return (_op.outputs[0], _op.outputs[1]);
}
public static Tensor reverse(Tensor tensor, T axis, string name = null)
{
- var _op = _op_def_lib._apply_op_helper("ReverseV2", name, new { tensor, axis });
+ var _op = tf._op_def_lib._apply_op_helper("ReverseV2", name, new { tensor, axis });
return _op.output;
}
@@ -335,26 +311,21 @@ namespace Tensorflow
{
if (tf.context.executing_eagerly())
{
- var results = new[] { new EagerTensor() };
- Status status = c_api.TFE_FastPathExecute(tf.context, tf.context.device_name,
- "Reshape", name, new IntPtr[]
- {
- tensor as EagerTensor,
- shape as EagerTensor
- }, 2,
- null, null,
- results.Select(x => x.EagerTensorHandle).ToArray(), results.Length);
- status.Check(true);
- return results[0].Resolve();
+ var results = tf.Runner.TFE_FastPathExecute(tf.context, tf.context.device_name,
+ "Reshape", name,
+ null,
+ tensor, shape);
+
+ return results[0];
}
- var _op = _op_def_lib._apply_op_helper("Reshape", name, new { tensor, shape });
+ var _op = tf._op_def_lib._apply_op_helper("Reshape", name, new { tensor, shape });
return _op.output;
}
public static Tensor reshape(Tensor tensor, int[] shape, string name = null)
{
- var _op = _op_def_lib._apply_op_helper("Reshape", name, new { tensor, shape });
+ var _op = tf._op_def_lib._apply_op_helper("Reshape", name, new { tensor, shape });
return _op.outputs[0];
}
@@ -367,7 +338,7 @@ namespace Tensorflow
///
public static (Tensor, Tensor) unique(Tensor x, TF_DataType out_idx = TF_DataType.TF_INT32, string name = null)
{
- var _op = _op_def_lib._apply_op_helper("Unique", name, new { x, out_idx });
+ var _op = tf._op_def_lib._apply_op_helper("Unique", name, new { x, out_idx });
// TODO
//var _result = _UniqueOutput._make(_op.outputs);
return (_op.outputs[0], _op.outputs[1]);
@@ -375,13 +346,13 @@ namespace Tensorflow
public static Tensor[] unpack(Tensor value, int num, int axis = 0, string name = null)
{
- var _op = _op_def_lib._apply_op_helper("Unpack", name, new { value, num, axis });
+ var _op = tf._op_def_lib._apply_op_helper("Unpack", name, new { value, num, axis });
return _op.outputs;
}
public static Tensor where(Tensor condition, string name = null)
{
- var _op = _op_def_lib._apply_op_helper("Where", name, new { input = condition });
+ var _op = tf._op_def_lib._apply_op_helper("Where", name, new { input = condition });
return _op.output;
}
@@ -394,22 +365,16 @@ namespace Tensorflow
{
if (tf.context.executing_eagerly())
{
- var results = EagerTensorPass.Create();
- var inputs = EagerTensorPass.From(indices, depth, on_value, off_value);
- var attrs = new object[] { "axis", axis };
-
- Status status = c_api.TFE_FastPathExecute(tf.context, tf.context.device_name,
+ var results = tf.Runner.TFE_FastPathExecute(tf.context, tf.context.device_name,
"OneHot", name,
- inputs.Points, inputs.Length,
- wrap_tfe_src.SetOpAttrs2(attrs),
- op => wrap_tfe_src.SetOpAttrs(op, attrs),
- results.Points, results.Length);
- status.Check(true);
+ null,
+ indices, depth, on_value, off_value,
+ "axis", axis);
- return results[0].Resolve();
+ return results[0];
}
- var _op = _op_def_lib._apply_op_helper("OneHot", name, new { indices, depth, on_value, off_value, axis });
+ var _op = tf._op_def_lib._apply_op_helper("OneHot", name, new { indices, depth, on_value, off_value, axis });
return _op.outputs[0];
}
@@ -422,7 +387,7 @@ namespace Tensorflow
///
public static Tensor placeholder_with_default(T input, int[] shape, string name = null)
{
- var _op = _op_def_lib._apply_op_helper("PlaceholderWithDefault", name, new { input, shape, name });
+ var _op = tf._op_def_lib._apply_op_helper("PlaceholderWithDefault", name, new { input, shape, name });
return _op.outputs[0];
}
@@ -430,26 +395,21 @@ namespace Tensorflow
{
if (tf.context.executing_eagerly())
{
- var results = EagerTensorPass.Create();
- var inputs = EagerTensorPass.From(condition, t, e);
-
- Status status = c_api.TFE_FastPathExecute(tf.context, tf.context.device_name,
- "SelectV2", name,
- inputs.Points, inputs.Length,
- null, null,
- results.Points, results.Length);
- status.Check(true);
+ var results = tf.Runner.TFE_FastPathExecute(tf.context, tf.context.device_name,
+ "SelectV2", name,
+ null,
+ condition, t, e);
- return results[0].Resolve();
+ return results[0];
}
- var _op = _op_def_lib._apply_op_helper("Select", name, new { condition, t, e });
+ var _op = tf._op_def_lib._apply_op_helper("Select", name, new { condition, t, e });
return _op.outputs[0];
}
public static Tensor scatter_nd(Tensor indices, Tensor updates, Tensor[] shape, string name = null)
{
- var _op = _op_def_lib._apply_op_helper("ScatterNd", name, new { indices, updates, shape });
+ var _op = tf._op_def_lib._apply_op_helper("ScatterNd", name, new { indices, updates, shape });
return _op.outputs[0];
}
@@ -457,20 +417,16 @@ namespace Tensorflow
{
if (tf.context.executing_eagerly())
{
- var results = new[] { new EagerTensor() };
- Status status = c_api.TFE_FastPathExecute(tf.context, tf.context.device_name,
- "Shape", name, new IntPtr[]
- {
- input as EagerTensor,
- }, 1,
- wrap_tfe_src.SetOpAttrs2("out_type", out_type),
- op => wrap_tfe_src.SetOpAttrs(op, "out_type", out_type),
- results.Select(x => x.EagerTensorHandle).ToArray(), results.Length);
- status.Check(true);
- return results[0].Resolve();
+ var results = tf.Runner.TFE_FastPathExecute(tf.context, tf.context.device_name,
+ "Shape", name,
+ null,
+ input,
+ "out_type", out_type);
+
+ return results[0];
}
- var _op = _op_def_lib._apply_op_helper("Shape", name, new { input, out_type });
+ var _op = tf._op_def_lib._apply_op_helper("Shape", name, new { input, out_type });
return _op.outputs[0];
}
@@ -483,13 +439,13 @@ namespace Tensorflow
///
public static Tensor[] shape_n(Tensor[] input, TF_DataType out_type = TF_DataType.TF_INT32, string name = null)
{
- var _op = _op_def_lib._apply_op_helper("ShapeN", name, new { input, out_type });
+ var _op = tf._op_def_lib._apply_op_helper("ShapeN", name, new { input, out_type });
return _op.outputs;
}
public static Tensor size(Tensor input, TF_DataType out_type = TF_DataType.TF_INT32, string name = null)
{
- var _op = _op_def_lib._apply_op_helper("Size", name, new { input, out_type });
+ var _op = tf._op_def_lib._apply_op_helper("Size", name, new { input, out_type });
return _op.outputs[0];
}
@@ -503,13 +459,13 @@ namespace Tensorflow
///
public static Tensor slice(Tensor input, Tensor begin, Tensor size, string name = null)
{
- var _op = _op_def_lib._apply_op_helper("Slice", name, new { input, begin, size });
+ var _op = tf._op_def_lib._apply_op_helper("Slice", name, new { input, begin, size });
return _op.outputs[0];
}
public static Tensor[] split(Tensor axis, Tensor value, int num_split, string name = null)
{
- var _op = _op_def_lib._apply_op_helper("Split", name, new { split_dim = axis, value, num_split });
+ var _op = tf._op_def_lib._apply_op_helper("Split", name, new { split_dim = axis, value, num_split });
return _op.outputs;
}
@@ -517,38 +473,33 @@ namespace Tensorflow
{
if (tf.context.executing_eagerly())
{
- var results = new[] { new EagerTensor() };
- Status status = c_api.TFE_FastPathExecute(tf.context, tf.context.device_name,
- "Tile", name, new IntPtr[]
- {
- input as EagerTensor,
- multiples as EagerTensor
- }, 2,
- null, null,
- results.Select(x => x.EagerTensorHandle).ToArray(), results.Length);
- status.Check(true);
- return results[0].Resolve();
+ var results = tf.Runner.TFE_FastPathExecute(tf.context, tf.context.device_name,
+ "Tile", name,
+ null,
+ input, multiples);
+
+ return results[0];
}
- var _op = _op_def_lib._apply_op_helper("Tile", name, new { input, multiples });
+ var _op = tf._op_def_lib._apply_op_helper("Tile", name, new { input, multiples });
return _op.outputs[0];
}
public static Tensor transpose(T1 x, T2 perm, string name = null)
{
- var _op = _op_def_lib._apply_op_helper("Transpose", name, new { x, perm });
+ var _op = tf._op_def_lib._apply_op_helper("Transpose", name, new { x, perm });
return _op.outputs[0];
}
public static Tensor zeros_like(Tensor x, string name = null)
{
- var _op = _op_def_lib._apply_op_helper("ZerosLike", name, new { x });
+ var _op = tf._op_def_lib._apply_op_helper("ZerosLike", name, new { x });
return _op.outputs[0];
}
public static Tensor stop_gradient(Tensor x, string name = null)
{
- var _op = _op_def_lib._apply_op_helper("StopGradient", name, args: new { input = x, name });
+ var _op = tf._op_def_lib._apply_op_helper("StopGradient", name, args: new { input = x, name });
return _op.output;
}
@@ -563,31 +514,20 @@ namespace Tensorflow
{
if (tf.context.executing_eagerly())
{
- var results = new[] { new EagerTensor() };
- var attrs = new object[]
- {
+ var results = tf.Runner.TFE_FastPathExecute(tf.context, tf.context.device_name,
+ "StridedSlice", name,
+ null,
+ input, begin, end, strides,
"begin_mask", begin_mask,
"end_mask", end_mask,
"ellipsis_mask", ellipsis_mask,
"new_axis_mask", new_axis_mask,
- "shrink_axis_mask", shrink_axis_mask
- };
- Status status = c_api.TFE_FastPathExecute(tf.context, tf.context.device_name,
- "StridedSlice", name, new IntPtr[]
- {
- input as EagerTensor,
- begin as EagerTensor,
- end as EagerTensor,
- strides as EagerTensor,
- }, 4,
- wrap_tfe_src.SetOpAttrs2(attrs),
- op => wrap_tfe_src.SetOpAttrs(op, attrs),
- results.Select(x => x.EagerTensorHandle).ToArray(), results.Length);
- status.Check(true);
- return results[0].Resolve();
+ "shrink_axis_mask", shrink_axis_mask);
+
+ return results[0];
}
- var _op = _op_def_lib._apply_op_helper("StridedSlice", name, new
+ var _op = tf._op_def_lib._apply_op_helper("StridedSlice", name, new
{
input,
begin,
@@ -611,7 +551,7 @@ namespace Tensorflow
int shrink_axis_mask = 0,
string name = null)
{
- var _op = _op_def_lib._apply_op_helper("StridedSlice", name, new
+ var _op = tf._op_def_lib._apply_op_helper("StridedSlice", name, new
{
input,
begin,
@@ -651,7 +591,7 @@ namespace Tensorflow
int begin_mask = 0, int end_mask = 0, int ellipsis_mask = 0, int new_axis_mask = 0,
int shrink_axis_mask = 0, string name = null)
{
- var op = _op_def_lib._apply_op_helper("StridedSliceGrad", name: name, args: new
+ var op = tf._op_def_lib._apply_op_helper("StridedSliceGrad", name: name, args: new
{
shape,
begin,
@@ -670,7 +610,7 @@ namespace Tensorflow
public static Tensor slice(Tensor input, Tb begin, Ts size, string name = null)
{
- var _op = _op_def_lib._apply_op_helper("Slice", name, new { input, begin, size });
+ var _op = tf._op_def_lib._apply_op_helper("Slice", name, new { input, begin, size });
return _op.outputs[0];
}
@@ -689,21 +629,17 @@ namespace Tensorflow
{
if (tf.context.executing_eagerly())
{
- var results = new[] { new EagerTensor() };
- Status status = c_api.TFE_FastPathExecute(tf.context, tf.context.device_name,
- "Squeeze", name, new IntPtr[]
- {
- input as EagerTensor
- }, 1,
- wrap_tfe_src.SetOpAttrs2("squeeze_dims", axis),
- op => wrap_tfe_src.SetOpAttrs(op, "squeeze_dims", axis),
- results.Select(x => x.EagerTensorHandle).ToArray(), results.Length);
- status.Check(true);
- return results[0].Resolve();
+ var results = tf.Runner.TFE_FastPathExecute(tf.context, tf.context.device_name,
+ "Squeeze", name,
+ null,
+ input,
+ "squeeze_dims", axis);
+
+ return results[0];
}
if (axis == null) axis = new int[0];
- var _op = _op_def_lib._apply_op_helper("Squeeze", name, args: new { input, squeeze_dims = axis });
+ var _op = tf._op_def_lib._apply_op_helper("Squeeze", name, args: new { input, squeeze_dims = axis });
return _op.outputs[0];
}
@@ -719,7 +655,7 @@ namespace Tensorflow
/// `Tensor`. Has the same type as `s0`.
public static Tensor broadcast_args(Tensor s0, Tensor s1, string name = null)
{
- var _op = _op_def_lib._apply_op_helper("BroadcastArgs", name, args: new { s0, s1, name });
+ var _op = tf._op_def_lib._apply_op_helper("BroadcastArgs", name, args: new { s0, s1, name });
return _op.outputs[0];
}
@@ -735,19 +671,15 @@ namespace Tensorflow
{
if (tf.context.executing_eagerly())
{
- var results = EagerTensorPass.Create();
- var inputs = EagerTensorPass.From(input, shape);
-
- Status status = c_api.TFE_FastPathExecute(tf.context, tf.context.device_name,
+ var results = tf.Runner.TFE_FastPathExecute(tf.context, tf.context.device_name,
"BroadcastTo", name,
- inputs.Points, inputs.Length,
- null, null,
- results.Points, results.Length);
- status.Check(true);
- return results[0].Resolve();
+ null,
+ input, shape);
+
+ return results[0];
}
- var _op = _op_def_lib._apply_op_helper("BroadcastTo", name, args: new { input, shape, name });
+ var _op = tf._op_def_lib._apply_op_helper("BroadcastTo", name, args: new { input, shape, name });
return _op.outputs[0];
}
diff --git a/src/TensorFlowNET.Core/Operations/gen_control_flow_ops.cs b/src/TensorFlowNET.Core/Operations/gen_control_flow_ops.cs
index 0a2d82d7..e9801a12 100644
--- a/src/TensorFlowNET.Core/Operations/gen_control_flow_ops.cs
+++ b/src/TensorFlowNET.Core/Operations/gen_control_flow_ops.cs
@@ -15,16 +15,15 @@
******************************************************************************/
using Tensorflow.Operations;
+using static Tensorflow.Binding;
namespace Tensorflow
{
public class gen_control_flow_ops
{
- public static OpDefLibrary _op_def_lib = new OpDefLibrary();
-
public static Operation control_trigger(string name = null)
{
- var _op = _op_def_lib._apply_op_helper("ControlTrigger", name, new
+ var _op = tf._op_def_lib._apply_op_helper("ControlTrigger", name, new
{
});
@@ -42,7 +41,7 @@ namespace Tensorflow
///
public static Tensor enter(Tensor data, string frame_name = "frame_name", bool is_constant = false, int parallel_iterations = 10, string name = null)
{
- var _op = _op_def_lib._apply_op_helper("Enter", name, new
+ var _op = tf._op_def_lib._apply_op_helper("Enter", name, new
{
data,
frame_name,
@@ -61,7 +60,7 @@ namespace Tensorflow
///
public static Tensor loop_cond(Tensor input, string name = null)
{
- var _op = _op_def_lib._apply_op_helper("LoopCond", name, new { input });
+ var _op = tf._op_def_lib._apply_op_helper("LoopCond", name, new { input });
return _op.output;
}
@@ -74,7 +73,7 @@ namespace Tensorflow
///
public static Tensor ref_next_iteration(Tensor data, string name = null)
{
- var _op = _op_def_lib._apply_op_helper("RefNextIteration", name, new { data });
+ var _op = tf._op_def_lib._apply_op_helper("RefNextIteration", name, new { data });
return _op;
}
@@ -87,7 +86,7 @@ namespace Tensorflow
///
public static Tensor next_iteration(Tensor data, string name = null)
{
- var _op = _op_def_lib._apply_op_helper("NextIteration", name, new { data });
+ var _op = tf._op_def_lib._apply_op_helper("NextIteration", name, new { data });
return _op;
}
@@ -100,7 +99,7 @@ namespace Tensorflow
///
public static Tensor ref_exit(Tensor data, string name = null)
{
- var _op = _op_def_lib._apply_op_helper("RefExit", name, new { data });
+ var _op = tf._op_def_lib._apply_op_helper("RefExit", name, new { data });
return _op;
}
@@ -113,21 +112,21 @@ namespace Tensorflow
///
public static Tensor _exit(Tensor data, string name = null)
{
- var _op = _op_def_lib._apply_op_helper("Exit", name, new { data });
+ var _op = tf._op_def_lib._apply_op_helper("Exit", name, new { data });
return _op;
}
public static Operation no_op(string name = null)
{
- var _op = _op_def_lib._apply_op_helper("NoOp", name, null);
+ var _op = tf._op_def_lib._apply_op_helper("NoOp", name, null);
return _op;
}
public static Tensor[] ref_switch(Tensor data, Tensor pred, string name = null)
{
- var _op = _op_def_lib._apply_op_helper("RefSwitch", name, new { data, pred });
+ var _op = tf._op_def_lib._apply_op_helper("RefSwitch", name, new { data, pred });
return _op.outputs;
}
@@ -151,7 +150,7 @@ namespace Tensorflow
///
public static Tensor[] @switch(Tensor data, Tensor pred, string name = null)
{
- var _op = _op_def_lib._apply_op_helper("Switch", name, new { data, pred });
+ var _op = tf._op_def_lib._apply_op_helper("Switch", name, new { data, pred });
var _inputs_flat = _op.inputs;
var _attrs = ("T", _op.get_attr("T"));
// TODO: missing original code
@@ -161,14 +160,14 @@ namespace Tensorflow
public static MergeOutput ref_merge(Tensor[] inputs, string name = null)
{
- var _op = _op_def_lib._apply_op_helper("RefMerge", name, new { inputs });
+ var _op = tf._op_def_lib._apply_op_helper("RefMerge", name, new { inputs });
return new MergeOutput(_op.outputs);
}
public static MergeOutput merge(Tensor[] inputs, string name = null)
{
- var _op = _op_def_lib._apply_op_helper("Merge", name, new { inputs });
+ var _op = tf._op_def_lib._apply_op_helper("Merge", name, new { inputs });
return new MergeOutput(_op.outputs);
}
diff --git a/src/TensorFlowNET.Core/Operations/gen_ctc_ops.cs b/src/TensorFlowNET.Core/Operations/gen_ctc_ops.cs
index 018a56bb..69f0ac04 100644
--- a/src/TensorFlowNET.Core/Operations/gen_ctc_ops.cs
+++ b/src/TensorFlowNET.Core/Operations/gen_ctc_ops.cs
@@ -14,15 +14,15 @@
limitations under the License.
******************************************************************************/
+using static Tensorflow.Binding;
+
namespace Tensorflow
{
public class gen_ctc_ops
{
- public static OpDefLibrary _op_def_lib = new OpDefLibrary();
-
public static Tensor[] ctc_greedy_decoder(Tensor inputs, Tensor sequence_length, bool merge_repeated = true, string name = "CTCGreedyDecoder")
{
- var op = _op_def_lib._apply_op_helper("CTCGreedyDecoder", name: name, args: new
+ var op = tf._op_def_lib._apply_op_helper("CTCGreedyDecoder", name: name, args: new
{
inputs,
sequence_length,
diff --git a/src/TensorFlowNET.Core/Operations/gen_data_flow_ops.cs b/src/TensorFlowNET.Core/Operations/gen_data_flow_ops.cs
index 37ae486e..e96c8a95 100644
--- a/src/TensorFlowNET.Core/Operations/gen_data_flow_ops.cs
+++ b/src/TensorFlowNET.Core/Operations/gen_data_flow_ops.cs
@@ -14,15 +14,15 @@
limitations under the License.
******************************************************************************/
+using static Tensorflow.Binding;
+
namespace Tensorflow
{
public class gen_data_flow_ops
{
- public static OpDefLibrary _op_def_lib = new OpDefLibrary();
-
public static Tensor dynamic_stitch(Tensor[] indices, Tensor[] data, string name = null)
{
- var _op = _op_def_lib._apply_op_helper("DynamicStitch", name, new { indices, data });
+ var _op = tf._op_def_lib._apply_op_helper("DynamicStitch", name, new { indices, data });
return _op.output;
}
@@ -30,7 +30,7 @@ namespace Tensorflow
public static Tensor[] dynamic_partition(Tensor data, Tensor partitions, int num_partitions,
string name = null)
{
- var _op = _op_def_lib._apply_op_helper("DynamicPartition", name, new
+ var _op = tf._op_def_lib._apply_op_helper("DynamicPartition", name, new
{
data,
partitions,
@@ -44,7 +44,7 @@ namespace Tensorflow
TensorShape element_shape = null, bool dynamic_size = false, bool clear_after_read = true,
bool identical_element_shapes = false, string tensor_array_name = "", string name = null)
{
- var _op = _op_def_lib._apply_op_helper("TensorArrayV3", name, new
+ var _op = tf._op_def_lib._apply_op_helper("TensorArrayV3", name, new
{
size,
dtype,
@@ -61,7 +61,7 @@ namespace Tensorflow
public static Tensor tensor_array_scatter_v3(Tensor handle, Tensor indices, Tensor value,
Tensor flow_in, string name = null)
{
- var _op = _op_def_lib._apply_op_helper("TensorArrayScatterV3", name, new
+ var _op = tf._op_def_lib._apply_op_helper("TensorArrayScatterV3", name, new
{
handle,
indices,
@@ -76,7 +76,7 @@ namespace Tensorflow
int capacity = -1, string container = "", string shared_name = "",
string name = null)
{
- var _op = _op_def_lib._apply_op_helper("PaddingFIFOQueueV2", name, new
+ var _op = tf._op_def_lib._apply_op_helper("PaddingFIFOQueueV2", name, new
{
component_types,
shapes,
@@ -92,7 +92,7 @@ namespace Tensorflow
int capacity = -1, string container = "", string shared_name = "",
string name = null)
{
- var _op = _op_def_lib._apply_op_helper("FIFOQueueV2", name, new
+ var _op = tf._op_def_lib._apply_op_helper("FIFOQueueV2", name, new
{
component_types,
shapes,
@@ -108,7 +108,7 @@ namespace Tensorflow
int capacity = -1, string container = "", string shared_name = "",
string name = null)
{
- var _op = _op_def_lib._apply_op_helper("PriorityQueueV2", name, new
+ var _op = tf._op_def_lib._apply_op_helper("PriorityQueueV2", name, new
{
component_types,
shapes,
@@ -124,7 +124,7 @@ namespace Tensorflow
int capacity = -1, int min_after_dequeue = 0, int seed = 0, int seed2 = 0,
string container = "", string shared_name = "", string name = null)
{
- var _op = _op_def_lib._apply_op_helper("RandomShuffleQueueV2", name, new
+ var _op = tf._op_def_lib._apply_op_helper("RandomShuffleQueueV2", name, new
{
component_types,
shapes,
@@ -141,7 +141,7 @@ namespace Tensorflow
public static Operation queue_enqueue(Tensor handle, Tensor[] components, int timeout_ms = -1, string name = null)
{
- var _op = _op_def_lib._apply_op_helper("QueueEnqueue", name, new
+ var _op = tf._op_def_lib._apply_op_helper("QueueEnqueue", name, new
{
handle,
components,
@@ -153,7 +153,7 @@ namespace Tensorflow
public static Operation queue_enqueue_v2(Tensor handle, Tensor[] components, int timeout_ms = -1, string name = null)
{
- var _op = _op_def_lib._apply_op_helper("QueueEnqueueV2", name, new
+ var _op = tf._op_def_lib._apply_op_helper("QueueEnqueueV2", name, new
{
handle,
components,
@@ -165,7 +165,7 @@ namespace Tensorflow
public static Tensor[] queue_dequeue_v2(Tensor handle, TF_DataType[] component_types, int timeout_ms = -1, string name = null)
{
- var _op = _op_def_lib._apply_op_helper("QueueDequeueV2", name, new
+ var _op = tf._op_def_lib._apply_op_helper("QueueDequeueV2", name, new
{
handle,
component_types,
@@ -177,7 +177,7 @@ namespace Tensorflow
public static Tensor[] queue_dequeue(Tensor handle, TF_DataType[] component_types, int timeout_ms = -1, string name = null)
{
- var _op = _op_def_lib._apply_op_helper("QueueDequeue", name, new
+ var _op = tf._op_def_lib._apply_op_helper("QueueDequeue", name, new
{
handle,
component_types,
@@ -189,7 +189,7 @@ namespace Tensorflow
public static Operation queue_enqueue_many_v2(Tensor handle, Tensor[] components, int timeout_ms = -1, string name = null)
{
- var _op = _op_def_lib._apply_op_helper("QueueEnqueueManyV2", name, new
+ var _op = tf._op_def_lib._apply_op_helper("QueueEnqueueManyV2", name, new
{
handle,
components,
@@ -201,7 +201,7 @@ namespace Tensorflow
public static Tensor[] queue_dequeue_many_v2(Tensor handle, int n, TF_DataType[] component_types, int timeout_ms = -1, string name = null)
{
- var _op = _op_def_lib._apply_op_helper("QueueDequeueManyV2", name, new
+ var _op = tf._op_def_lib._apply_op_helper("QueueDequeueManyV2", name, new
{
handle,
n,
@@ -223,7 +223,7 @@ namespace Tensorflow
///
public static Tensor tensor_array_read_v3(Tensor handle, Tensor index, Tensor flow_in, TF_DataType dtype, string name = null)
{
- var _op = _op_def_lib._apply_op_helper("TensorArrayReadV3", name, new
+ var _op = tf._op_def_lib._apply_op_helper("TensorArrayReadV3", name, new
{
handle,
index,
@@ -236,7 +236,7 @@ namespace Tensorflow
public static Tensor tensor_array_write_v3(Tensor handle, Tensor index, Tensor value, Tensor flow_in, string name = null)
{
- var _op = _op_def_lib._apply_op_helper("TensorArrayWriteV3", name, new
+ var _op = tf._op_def_lib._apply_op_helper("TensorArrayWriteV3", name, new
{
handle,
index,
@@ -249,7 +249,7 @@ namespace Tensorflow
public static Tensor tensor_array_size_v3(Tensor handle, Tensor flow_in, string name = null)
{
- var _op = _op_def_lib._apply_op_helper("TensorArraySizeV3", name, new
+ var _op = tf._op_def_lib._apply_op_helper("TensorArraySizeV3", name, new
{
handle,
flow_in
@@ -261,7 +261,7 @@ namespace Tensorflow
public static Tensor tensor_array_gather_v3(Tensor handle, Tensor indices, Tensor flow_in,
TF_DataType dtype, TensorShape element_shape = null, string name = null)
{
- var _op = _op_def_lib._apply_op_helper("TensorArrayGatherV3", name, new
+ var _op = tf._op_def_lib._apply_op_helper("TensorArrayGatherV3", name, new
{
handle,
indices,
@@ -276,7 +276,7 @@ namespace Tensorflow
public static Tensor stack_v2(Tensor max_size, TF_DataType elem_type, string stack_name = "",
string name = null)
{
- var _op = _op_def_lib._apply_op_helper("StackV2", name, new
+ var _op = tf._op_def_lib._apply_op_helper("StackV2", name, new
{
max_size,
elem_type,
@@ -289,7 +289,7 @@ namespace Tensorflow
public static Tensor stack_push_v2(Tensor handle, Tensor elem, bool swap_memory = false,
string name = null)
{
- var _op = _op_def_lib._apply_op_helper("StackPushV2", name, new
+ var _op = tf._op_def_lib._apply_op_helper("StackPushV2", name, new
{
handle,
elem,
@@ -301,7 +301,7 @@ namespace Tensorflow
public static Tensor stack_pop_v2(Tensor handle, TF_DataType elem_type, string name = null)
{
- var _op = _op_def_lib._apply_op_helper("StackPopV2", name, new
+ var _op = tf._op_def_lib._apply_op_helper("StackPopV2", name, new
{
handle,
elem_type
diff --git a/src/TensorFlowNET.Core/Operations/gen_image_ops.cs b/src/TensorFlowNET.Core/Operations/gen_image_ops.cs
index 143d4fe8..173b8e3a 100644
--- a/src/TensorFlowNET.Core/Operations/gen_image_ops.cs
+++ b/src/TensorFlowNET.Core/Operations/gen_image_ops.cs
@@ -21,8 +21,6 @@ namespace Tensorflow
{
public class gen_image_ops
{
- public static OpDefLibrary _op_def_lib = new OpDefLibrary();
-
public static Tensor convert_image_dtype(Tensor image, TF_DataType dtype, bool saturate = false, string name= null)
{
if (dtype == image.dtype)
@@ -73,7 +71,7 @@ namespace Tensorflow
}
else
{
- var _op = _op_def_lib._apply_op_helper("DecodeJpeg", name: name, args: new
+ var _op = tf._op_def_lib._apply_op_helper("DecodeJpeg", name: name, args: new
{
contents,
channels,
@@ -98,7 +96,7 @@ namespace Tensorflow
}
else
{
- var _op = _op_def_lib._apply_op_helper("DecodeGif", name: name, args: new
+ var _op = tf._op_def_lib._apply_op_helper("DecodeGif", name: name, args: new
{
contents
});
@@ -119,7 +117,7 @@ namespace Tensorflow
}
else
{
- var _op = _op_def_lib._apply_op_helper("DecodePng", name: name, args: new
+ var _op = tf._op_def_lib._apply_op_helper("DecodePng", name: name, args: new
{
contents,
channels,
@@ -141,7 +139,7 @@ namespace Tensorflow
}
else
{
- var _op = _op_def_lib._apply_op_helper("DecodeBmp", name: name, args: new
+ var _op = tf._op_def_lib._apply_op_helper("DecodeBmp", name: name, args: new
{
contents,
channels
@@ -159,7 +157,7 @@ namespace Tensorflow
}
else
{
- var _op = _op_def_lib._apply_op_helper("ResizeBilinear", name: name, args: new
+ var _op = tf._op_def_lib._apply_op_helper("ResizeBilinear", name: name, args: new
{
images,
size,
@@ -173,7 +171,7 @@ namespace Tensorflow
public static Tensor resize_nearest_neighbor(Tensor images, Tsize size, bool align_corners = false,
bool half_pixel_centers = false, string name = null)
{
- var op = _op_def_lib._apply_op_helper("ResizeNearestNeighbor", name: name, args: new
+ var op = tf._op_def_lib._apply_op_helper("ResizeNearestNeighbor", name: name, args: new
{
images,
size,
@@ -187,7 +185,7 @@ namespace Tensorflow
public static Tensor resize_nearest_neighbor_grad(Tensor grads, Tsize size, bool align_corners = false,
bool half_pixel_centers = false, string name = null)
{
- var op = _op_def_lib._apply_op_helper("ResizeNearestNeighborGrad", name: name, args: new
+ var op = tf._op_def_lib._apply_op_helper("ResizeNearestNeighborGrad", name: name, args: new
{
grads,
size,
diff --git a/src/TensorFlowNET.Core/Operations/gen_io_ops.cs b/src/TensorFlowNET.Core/Operations/gen_io_ops.cs
index 13408452..d7462116 100644
--- a/src/TensorFlowNET.Core/Operations/gen_io_ops.cs
+++ b/src/TensorFlowNET.Core/Operations/gen_io_ops.cs
@@ -14,29 +14,29 @@
limitations under the License.
******************************************************************************/
+using static Tensorflow.Binding;
+
namespace Tensorflow
{
public class gen_io_ops
{
- public static OpDefLibrary _op_def_lib = new OpDefLibrary();
-
public static Operation save_v2(Tensor prefix, string[] tensor_names, string[] shape_and_slices, Tensor[] tensors, string name = null)
{
- var _op = _op_def_lib._apply_op_helper("SaveV2", name: name, args: new { prefix, tensor_names, shape_and_slices, tensors });
+ var _op = tf._op_def_lib._apply_op_helper("SaveV2", name: name, args: new { prefix, tensor_names, shape_and_slices, tensors });
return _op;
}
public static Tensor[] restore_v2(Tensor prefix, string[] tensor_names, string[] shape_and_slices, TF_DataType[] dtypes, string name = null)
{
- var _op = _op_def_lib._apply_op_helper("RestoreV2", name: name, args: new { prefix, tensor_names, shape_and_slices, dtypes });
+ var _op = tf._op_def_lib._apply_op_helper("RestoreV2", name: name, args: new { prefix, tensor_names, shape_and_slices, dtypes });
return _op.outputs;
}
public static Tensor read_file(T filename, string name = null)
{
- var _op = _op_def_lib._apply_op_helper("ReadFile", name: name, args: new { filename });
+ var _op = tf._op_def_lib._apply_op_helper("ReadFile", name: name, args: new { filename });
return _op.outputs[0];
}
diff --git a/src/TensorFlowNET.Core/Operations/gen_logging_ops.cs b/src/TensorFlowNET.Core/Operations/gen_logging_ops.cs
index c076ab3e..dc2853cb 100644
--- a/src/TensorFlowNET.Core/Operations/gen_logging_ops.cs
+++ b/src/TensorFlowNET.Core/Operations/gen_logging_ops.cs
@@ -15,19 +15,18 @@
******************************************************************************/
using System.Collections.Generic;
+using static Tensorflow.Binding;
namespace Tensorflow
{
public class gen_logging_ops
{
- public static OpDefLibrary _op_def_lib = new OpDefLibrary();
-
public static Operation _assert(Tensor condition, object[] data, int? summarize = 3, string name = null)
{
if (!summarize.HasValue)
summarize = 3;
- var _op = _op_def_lib._apply_op_helper("Assert", name, args: new { condition, data, summarize });
+ var _op = tf._op_def_lib._apply_op_helper("Assert", name, args: new { condition, data, summarize });
return _op;
}
@@ -35,7 +34,7 @@ namespace Tensorflow
public static Tensor histogram_summary(string tag, Tensor values, string name = null)
{
var dict = new Dictionary();
- var op = _op_def_lib._apply_op_helper("HistogramSummary", name: name, args: new { tag, values });
+ var op = tf._op_def_lib._apply_op_helper("HistogramSummary", name: name, args: new { tag, values });
return op.output;
}
@@ -64,7 +63,7 @@ namespace Tensorflow
var dict = new Dictionary();
dict["tags"] = tags;
dict["values"] = values;
- var op = _op_def_lib._apply_op_helper("ScalarSummary", name: name, keywords: dict);
+ var op = tf._op_def_lib._apply_op_helper("ScalarSummary", name: name, keywords: dict);
return op.output;
}
@@ -95,7 +94,7 @@ namespace Tensorflow
{
var dict = new Dictionary();
dict["inputs"] = inputs;
- var op = _op_def_lib._apply_op_helper("MergeSummary", name: name, keywords: dict);
+ var op = tf._op_def_lib._apply_op_helper("MergeSummary", name: name, keywords: dict);
return op.output;
}
}
diff --git a/src/TensorFlowNET.Core/Operations/gen_math_ops.cs b/src/TensorFlowNET.Core/Operations/gen_math_ops.cs
index 5b4a6819..b057cd15 100644
--- a/src/TensorFlowNET.Core/Operations/gen_math_ops.cs
+++ b/src/TensorFlowNET.Core/Operations/gen_math_ops.cs
@@ -24,12 +24,9 @@ namespace Tensorflow
{
public static partial class gen_math_ops
{
- public static OpDefLibrary _op_def_lib = new OpDefLibrary();
- public static Execute _execute = new Execute();
-
public static Tensor _all(Tensor input, Tensor axis, bool keep_dims = false, string name = null)
{
- var _op = _op_def_lib._apply_op_helper("All", name, args: new { input, reduction_indices = axis, keep_dims = keep_dims });
+ var _op = tf._op_def_lib._apply_op_helper("All", name, args: new { input, reduction_indices = axis, keep_dims = keep_dims });
return _op.outputs[0];
}
@@ -44,33 +41,18 @@ namespace Tensorflow
{
if (tf.context.executing_eagerly())
{
- var results = new[] { new EagerTensor() };
- Status status = c_api.TFE_FastPathExecute(tf.context, tf.context.device_name,
+ var results = tf.Runner.TFE_FastPathExecute(tf.context, tf.context.device_name,
"AddN", name,
- inputs.Select(x => (x as EagerTensor).EagerTensorHandle).ToArray(), inputs.Length,
- null, null,
- results.Select(x => x.EagerTensorHandle).ToArray(), results.Length);
- status.Check(true);
- return results[0].Resolve();
+ null,
+ new[] { inputs });
+ return results[0];
}
- var _op = _op_def_lib._apply_op_helper("AddN", name, args: new { inputs });
+ var _op = tf._op_def_lib._apply_op_helper("AddN", name, args: new { inputs });
return _op.outputs[0];
}
- public static IntPtr add_n(IntPtr[] inputs, string name = null)
- {
- var results = new[] { c_api.TFE_NewEagerTensor() };
- Status status = c_api.TFE_FastPathExecute(tf.context, tf.context.device_name,
- "AddN", name,
- inputs, inputs.Length,
- null, null,
- results, results.Length);
- status.Check(true);
- return results[0];
- }
-
///
/// Returns the index with the largest value across dimensions of a tensor.
///
@@ -83,19 +65,16 @@ namespace Tensorflow
{
if (tf.context.executing_eagerly())
{
- var results = EagerTensorPass.Create();
- var inputs = EagerTensorPass.From(input, dimension);
- Status status = c_api.TFE_FastPathExecute(tf.context, tf.context.device_name,
+ var results = tf.Runner.TFE_FastPathExecute(tf.context, tf.context.device_name,
"ArgMax", name,
- inputs.Points, inputs.Length,
- wrap_tfe_src.SetOpAttrs2("output_type", output_type),
- op => wrap_tfe_src.SetOpAttrs(op, "output_type", output_type),
- results.Points, results.Length);
- status.Check(true);
- return results[0].Resolve();
+ null,
+ input, dimension,
+ "output_type", output_type);
+
+ return results[0];
}
- return _op_def_lib._apply_op_helper("ArgMax", name, args: new { input, dimension, output_type }).output;
+ return tf._op_def_lib._apply_op_helper("ArgMax", name, args: new { input, dimension, output_type }).output;
}
///
@@ -107,7 +86,7 @@ namespace Tensorflow
///
///
public static Tensor arg_min(Tensor input, int dimension, TF_DataType output_type= TF_DataType.TF_INT64, string name= null)
- =>_op_def_lib._apply_op_helper("ArgMin", name, args: new { input, dimension, output_type }).outputs[0];
+ => tf._op_def_lib._apply_op_helper("ArgMin", name, args: new { input, dimension, output_type }).outputs[0];
///
/// Computes Psi, the derivative of Lgamma (the log of the absolute value of
@@ -117,7 +96,7 @@ namespace Tensorflow
///
///
public static Tensor digamma(Tensor x, string name = null)
- => _op_def_lib._apply_op_helper("Digamma", name, args: new { x }).output;
+ => tf._op_def_lib._apply_op_helper("Digamma", name, args: new { x }).output;
///
/// Returns 0 if the denominator is zero.
@@ -139,7 +118,7 @@ namespace Tensorflow
///
public static Tensor div_no_nan(Tensor x, Tensor y, string name = null)
{
- var op = _op_def_lib._apply_op_helper("DivNoNan", name: name, args: new { x, y });
+ var op = tf._op_def_lib._apply_op_helper("DivNoNan", name: name, args: new { x, y });
return op.output;
}
@@ -160,22 +139,16 @@ namespace Tensorflow
{
if (tf.context.executing_eagerly())
{
- var results = new[] { new EagerTensor() };
- Status status = c_api.TFE_FastPathExecute(tf.context, tf.context.device_name,
+ var results = tf.Runner.TFE_FastPathExecute(tf.context, tf.context.device_name,
"Mean", name,
- new IntPtr[]
- {
- input as EagerTensor,
- axis as EagerTensor
- }, 2,
- wrap_tfe_src.SetOpAttrs2("keep_dims", keep_dims),
- op => wrap_tfe_src.SetOpAttrs(op, "keep_dims", keep_dims),
- results.Select(x => x.EagerTensorHandle).ToArray(), results.Length);
- status.Check(true);
- return results[0].Resolve();
+ null,
+ input, axis,
+ "keep_dims", keep_dims);
+
+ return results[0];
}
- var _op = _op_def_lib._apply_op_helper("Mean", name, args: new { input, reduction_indices = axis, keep_dims = keep_dims });
+ var _op = tf._op_def_lib._apply_op_helper("Mean", name, args: new { input, reduction_indices = axis, keep_dims = keep_dims });
return _op.output;
}
@@ -187,19 +160,19 @@ namespace Tensorflow
return mean_eager_fallback(inputs, axis, keep_dims: keep_dims, name: name, ctx: tf.context);
}
- var _op = _op_def_lib._apply_op_helper("Mean", name, args: new { inputs, reduction_indices = axis, keep_dims = keep_dims });
+ var _op = tf._op_def_lib._apply_op_helper("Mean", name, args: new { inputs, reduction_indices = axis, keep_dims = keep_dims });
return _op.output;
}
private static Tensor mean_eager_fallback(Tensor[] inputs, Tensor axis, bool keep_dims = false, string name = null, Context ctx = null)
{
- var (_attr_T, input) = _execute.args_to_matching_eager(ctx, args: new[] { inputs });
- var (_attr_Tidx, axis1) = _execute.args_to_matching_eager(ctx, default_dtype: tf.int32, args: new[] { axis });
+ var (_attr_T, input) = tf._execute.args_to_matching_eager(ctx, args: new[] { inputs });
+ var (_attr_Tidx, axis1) = tf._execute.args_to_matching_eager(ctx, default_dtype: tf.int32, args: new[] { axis });
var _inputs_flat = input.concat(axis1);
var _attrs = new object[] { "keep_dims", keep_dims, "T", _attr_T, "Tidx", _attr_Tidx };
- return _execute.execute(ctx, "Mean", 1, _inputs_flat, _attrs, name: name)[0];
+ return tf._execute.execute(ctx, "Mean", 1, _inputs_flat, _attrs, name: name)[0];
}
public static Tensor prod(T1 input, T2 axis, bool keep_dims = false, string name = null)
@@ -208,18 +181,13 @@ namespace Tensorflow
{
try
{
- var results = new[] { new EagerTensor() };
- Status status = c_api.TFE_FastPathExecute(tf.context, tf.context.device_name,
- "Prod", name, new IntPtr[]
- {
- input as EagerTensor,
- axis as EagerTensor
- }, 2,
- wrap_tfe_src.SetOpAttrs2("keep_dims", keep_dims),
- op => wrap_tfe_src.SetOpAttrs(op, "keep_dims", keep_dims),
- results.Select(x => x.EagerTensorHandle).ToArray(), results.Length);
- status.Check(true);
- return results[0].Resolve();
+ var results = tf.Runner.TFE_FastPathExecute(tf.context, tf.context.device_name,
+ "Prod", name,
+ null,
+ input, axis,
+ "keep_dims", keep_dims);
+
+ return results[0];
}
catch (Exception)
{
@@ -227,30 +195,30 @@ namespace Tensorflow
}
}
- var _op = _op_def_lib._apply_op_helper("Prod", name, args: new { input, reduction_indices = axis, keep_dims });
+ var _op = tf._op_def_lib._apply_op_helper("Prod", name, args: new { input, reduction_indices = axis, keep_dims });
return _op.output;
}
private static Tensor prod_eager_fallback(Tensor input_t, int[] axis, bool keep_dims, string name, Context ctx = null)
{
- var (_attr_T, input) = _execute.args_to_matching_eager(ctx, args: new[] { input_t });
- var (_attr_Tidx, axis1) = _execute.args_to_matching_eager(ctx, default_dtype: tf.int32, args: new[] { axis });
+ var (_attr_T, input) = tf._execute.args_to_matching_eager(ctx, args: new[] { input_t });
+ var (_attr_Tidx, axis1) = tf._execute.args_to_matching_eager(ctx, default_dtype: tf.int32, args: new[] { axis });
var _inputs_flat = input.concat(axis1);
var _attrs = new object[] { "keep_dims", keep_dims, "T", _attr_T, "Tidx", _attr_Tidx };
- return _execute.execute(ctx, "Prod", 1, _inputs_flat, _attrs, name: name)[0];
+ return tf._execute.execute(ctx, "Prod", 1, _inputs_flat, _attrs, name: name)[0];
}
public static Tensor acos(Tensor x, string name = null)
{
- var _op = _op_def_lib._apply_op_helper("Acos", name, args: new { x });
+ var _op = tf._op_def_lib._apply_op_helper("Acos", name, args: new { x });
return _op.outputs[0];
}
public static Tensor asin(Tensor x, string name = null)
{
- var _op = _op_def_lib._apply_op_helper("Asin", name, args: new { x });
+ var _op = tf._op_def_lib._apply_op_helper("Asin", name, args: new { x });
return _op.outputs[0];
}
@@ -259,14 +227,13 @@ namespace Tensorflow
{
if (tf.context.executing_eagerly())
{
- var results = wrap_tfe_src.TFE_FastPathExecute(tf.context, tf.context.device_name,
+ var results = tf.Runner.TFE_FastPathExecute(tf.context, tf.context.device_name,
"Add", name, null,
- x as EagerTensor,
- y as EagerTensor);
+ x, y);
return results[0];
}
- var _op = _op_def_lib._apply_op_helper("Add", name, args: new { x, y });
+ var _op = tf._op_def_lib._apply_op_helper("Add", name, args: new { x, y });
return _op.output;
}
@@ -275,20 +242,15 @@ namespace Tensorflow
{
if (tf.context.executing_eagerly())
{
- var results = new[] { new EagerTensor() };
- Status status = c_api.TFE_FastPathExecute(tf.context, tf.context.device_name,
- "Add", name, new IntPtr[]
- {
- x as EagerTensor,
- y as EagerTensor
- }, 2,
- null, null,
- results.Select(x => x.EagerTensorHandle).ToArray(), results.Length);
- status.Check(true);
- return results[0].Resolve();
+ var results = tf.Runner.TFE_FastPathExecute(tf.context, tf.context.device_name,
+ "Add", name,
+ null,
+ x, y);
+
+ return results[0];
}
- var _op = _op_def_lib._apply_op_helper("Add", name, args: new { x, y });
+ var _op = tf._op_def_lib._apply_op_helper("Add", name, args: new { x, y });
return _op.output;
}
@@ -298,28 +260,28 @@ namespace Tensorflow
// forward_compatible(2019, 6, 25):
if (tf.context.executing_eagerly())
{
- var results = wrap_tfe_src.TFE_FastPathExecute(tf.context, tf.context.device_name,
+ var results = tf.Runner.TFE_FastPathExecute(tf.context, tf.context.device_name,
"AddV2", name,
null,
x, y);
return results[0];
}
- var _op = _op_def_lib._apply_op_helper("AddV2", name, args: new { x, y });
+ var _op = tf._op_def_lib._apply_op_helper("AddV2", name, args: new { x, y });
return _op.output;
}
public static Tensor atan(Tensor x, string name = null)
{
- var _op = _op_def_lib._apply_op_helper("Atan", name, args: new { x });
+ var _op = tf._op_def_lib._apply_op_helper("Atan", name, args: new { x });
return _op.outputs[0];
}
public static Tensor ceil(Tensor x, string name = null)
{
- var _op = _op_def_lib._apply_op_helper("Ceil", name, args: new { x });
+ var _op = tf._op_def_lib._apply_op_helper("Ceil", name, args: new { x });
return _op.outputs[0];
}
@@ -328,19 +290,15 @@ namespace Tensorflow
{
if (tf.context.executing_eagerly())
{
- var results = new[] { new EagerTensor() };
- Status status = c_api.TFE_FastPathExecute(tf.context, tf.context.device_name,
- "Sin", name, new IntPtr[]
- {
- x as EagerTensor,
- }, 1,
- null, null,
- results.Select(x => x.EagerTensorHandle).ToArray(), results.Length);
- status.Check(true);
- return results[0].Resolve();
+ var results = tf.Runner.TFE_FastPathExecute(tf.context, tf.context.device_name,
+ "Sin", name,
+ null,
+ x);
+
+ return results[0];
}
- var _op = _op_def_lib._apply_op_helper("Sin", name, args: new { x });
+ var _op = tf._op_def_lib._apply_op_helper("Sin", name, args: new { x });
return _op.outputs[0];
}
@@ -363,19 +321,15 @@ namespace Tensorflow
{
if (tf.context.executing_eagerly())
{
- var results = new[] { new EagerTensor() };
- Status status = c_api.TFE_FastPathExecute(tf.context, tf.context.device_name,
- "Sigmoid", name, new IntPtr[]
- {
- x as EagerTensor,
- }, 1,
- null, null,
- results.Select(x => x.EagerTensorHandle).ToArray(), results.Length);
- status.Check(true);
- return results[0].Resolve();
+ var results = tf.Runner.TFE_FastPathExecute(tf.context, tf.context.device_name,
+ "Sigmoid", name,
+ null,
+ x);
+
+ return results[0];
}
- var op = _op_def_lib._apply_op_helper("Sigmoid", name: name, new { x });
+ var op = tf._op_def_lib._apply_op_helper("Sigmoid", name: name, new { x });
return op.output;
}
@@ -399,42 +353,42 @@ namespace Tensorflow
///
public static Tensor sigmoid_grad(Tensor y, Tensor dy, string name = "SigmoidGrad")
{
- var op = _op_def_lib._apply_op_helper("SigmoidGrad", name: name, args: new { y, dy });
+ var op = tf._op_def_lib._apply_op_helper("SigmoidGrad", name: name, args: new { y, dy });
return op.outputs[0];
}
public static Tensor sign(T x, string name = "Sign")
{
- var op = _op_def_lib._apply_op_helper("Sign", name: name, args: new {x});
+ var op = tf._op_def_lib._apply_op_helper("Sign", name: name, args: new {x});
return op.outputs[0];
}
public static Tensor sinh(Tensor x, string name = null)
{
- var _op = _op_def_lib._apply_op_helper("Sinh", name, args: new { x });
+ var _op = tf._op_def_lib._apply_op_helper("Sinh", name, args: new { x });
return _op.outputs[0];
}
public static Tensor cos(Tensor x, string name = null)
{
- var _op = _op_def_lib._apply_op_helper("Cos", name, args: new { x });
+ var _op = tf._op_def_lib._apply_op_helper("Cos", name, args: new { x });
return _op.outputs[0];
}
public static Tensor cosh(Tensor x, string name = null)
{
- var _op = _op_def_lib._apply_op_helper("Cosh", name, args: new { x });
+ var _op = tf._op_def_lib._apply_op_helper("Cosh", name, args: new { x });
return _op.outputs[0];
}
public static Tensor cumsum(Tensor x, T axis, bool exclusive = false, bool reverse = false, string name = null)
{
- var _op = _op_def_lib._apply_op_helper("Cumsum", name, args: new { x, axis, exclusive, reverse });
+ var _op = tf._op_def_lib._apply_op_helper("Cumsum", name, args: new { x, axis, exclusive, reverse });
return _op.outputs[0];
}
@@ -449,7 +403,7 @@ namespace Tensorflow
///
public static Tensor unsorted_segment_sum(Tensor data, Tensor segment_ids, Tensor num_segments, string name = null)
{
- var _op = _op_def_lib._apply_op_helper("UnsortedSegmentSum", name, new { data, segment_ids, num_segments });
+ var _op = tf._op_def_lib._apply_op_helper("UnsortedSegmentSum", name, new { data, segment_ids, num_segments });
return _op.outputs[0];
}
@@ -457,19 +411,15 @@ namespace Tensorflow
{
if (tf.context.executing_eagerly())
{
- var results = new[] { new EagerTensor() };
- Status status = c_api.TFE_FastPathExecute(tf.context, tf.context.device_name,
- "Tan", name, new IntPtr[]
- {
- x as EagerTensor,
- }, 1,
- null, null,
- results.Select(x => x.EagerTensorHandle).ToArray(), results.Length);
- status.Check(true);
- return results[0].Resolve();
+ var results = tf.Runner.TFE_FastPathExecute(tf.context, tf.context.device_name,
+ "Tan", name,
+ null,
+ x);
+
+ return results[0];
}
- var _op = _op_def_lib._apply_op_helper("Tan", name, args: new { x });
+ var _op = tf._op_def_lib._apply_op_helper("Tan", name, args: new { x });
return _op.outputs[0];
}
@@ -478,19 +428,15 @@ namespace Tensorflow
{
if (tf.context.executing_eagerly())
{
- var results = new[] { new EagerTensor() };
- Status status = c_api.TFE_FastPathExecute(tf.context, tf.context.device_name,
- "Tanh", name, new IntPtr[]
- {
- x as EagerTensor,
- }, 1,
- null, null,
- results.Select(x => x.EagerTensorHandle).ToArray(), results.Length);
- status.Check(true);
- return results[0].Resolve();
+ var results = tf.Runner.TFE_FastPathExecute(tf.context, tf.context.device_name,
+ "Tanh", name,
+ null,
+ x);
+
+ return results[0];
}
- var _op = _op_def_lib._apply_op_helper("Tanh", name, args: new { x });
+ var _op = tf._op_def_lib._apply_op_helper("Tanh", name, args: new { x });
return _op.outputs[0];
}
@@ -506,33 +452,28 @@ namespace Tensorflow
{
if (tf.context.executing_eagerly())
{
- var results = new[] { new EagerTensor() };
- Status status = c_api.TFE_FastPathExecute(tf.context, tf.context.device_name,
- "TanhGrad", name, new IntPtr[]
- {
- y as EagerTensor,
- dy as EagerTensor
- }, 2,
- null, null,
- results.Select(x => x.EagerTensorHandle).ToArray(), results.Length);
- status.Check(true);
- return results[0].Resolve();
+ var results = tf.Runner.TFE_FastPathExecute(tf.context, tf.context.device_name,
+ "TanhGrad", name,
+ null,
+ y, dy);
+
+ return results[0];
}
- var _op = _op_def_lib._apply_op_helper("TanhGrad", name: name, args: new { y, dy }).output;
+ var _op = tf._op_def_lib._apply_op_helper("TanhGrad", name: name, args: new { y, dy }).output;
return _op.outputs[0];
}
public static Tensor floor(Tensor x, string name = null)
{
- var _op = _op_def_lib._apply_op_helper("Floor", name, args: new { x });
+ var _op = tf._op_def_lib._apply_op_helper("Floor", name, args: new { x });
return _op.outputs[0];
}
public static Tensor _clip_by_value(Tensor t, Tensor clip_value_min, Tensor clip_value_max, string name = null)
{
- var _op = _op_def_lib._apply_op_helper("ClipByValue", name, args: new { t, clip_value_min, clip_value_max });
+ var _op = tf._op_def_lib._apply_op_helper("ClipByValue", name, args: new { t, clip_value_min, clip_value_max });
return _op.outputs[0];
}
@@ -541,18 +482,15 @@ namespace Tensorflow
{
if (tf.context.executing_eagerly())
{
- var results = EagerTensorPass.Create();
- var inputs = EagerTensorPass.From(x, y);
- Status status = c_api.TFE_FastPathExecute(tf.context, tf.context.device_name,
+ var results = tf.Runner.TFE_FastPathExecute(tf.context, tf.context.device_name,
"Greater", name,
- inputs.Points, inputs.Length,
- null, null,
- results.Points, results.Length);
- status.Check(true);
- return results[0].Resolve();
+ null,
+ x, y);
+
+ return results[0];
}
- var _op = _op_def_lib._apply_op_helper("Greater", name: name, args: new { x, y });
+ var _op = tf._op_def_lib._apply_op_helper("Greater", name: name, args: new { x, y });
return _op.outputs[0];
}
@@ -570,7 +508,7 @@ namespace Tensorflow
///
public static Tensor lgamma(Tensor x, string name = null)
{
- var op = _op_def_lib._apply_op_helper("Lgamma", name: name, args: new { x });
+ var op = tf._op_def_lib._apply_op_helper("Lgamma", name: name, args: new { x });
return op.output;
}
@@ -579,20 +517,15 @@ namespace Tensorflow
{
if (tf.context.executing_eagerly())
{
- var results = EagerTensorPass.Create();
- var inputs = EagerTensorPass.From(x, y);
-
- Status status = c_api.TFE_FastPathExecute(tf.context, tf.context.device_name,
+ var results = tf.Runner.TFE_FastPathExecute(tf.context, tf.context.device_name,
"GreaterEqual", name,
- inputs.Points, inputs.Length,
- null, null,
- results.Points, results.Length);
- status.Check(true);
+ null,
+ x, y);
- return results[0].Resolve();
+ return results[0];
}
- var _op = _op_def_lib._apply_op_helper("GreaterEqual", name: name, args: new { x, y });
+ var _op = tf._op_def_lib._apply_op_helper("GreaterEqual", name: name, args: new { x, y });
return _op.outputs[0];
}
@@ -601,18 +534,15 @@ namespace Tensorflow
{
if (tf.context.executing_eagerly())
{
- var results = EagerTensorPass.Create();
- var inputs = EagerTensorPass.From(x, y);
- Status status = c_api.TFE_FastPathExecute(tf.context, tf.context.device_name,
+ var results = tf.Runner.TFE_FastPathExecute(tf.context, tf.context.device_name,
"Less", name,
- inputs.Points, inputs.Length,
- null, null,
- results.Points, results.Length);
- status.Check(true);
- return results[0].Resolve();
+ null,
+ x, y);
+
+ return results[0];
}
- var _op = _op_def_lib._apply_op_helper("Less", name: name, args: new { x, y });
+ var _op = tf._op_def_lib._apply_op_helper("Less", name: name, args: new { x, y });
return _op.outputs[0];
}
@@ -621,46 +551,43 @@ namespace Tensorflow
{
if (tf.context.executing_eagerly())
{
- var results = EagerTensorPass.Create();
- var inputs = EagerTensorPass.From(x, y);
- Status status = c_api.TFE_FastPathExecute(tf.context, tf.context.device_name,
+ var results = tf.Runner.TFE_FastPathExecute(tf.context, tf.context.device_name,
"LessEqual", name,
- inputs.Points, inputs.Length,
- null, null,
- results.Points, results.Length);
- status.Check(true);
- return results[0].Resolve();
+ null,
+ x, y);
+
+ return results[0];
}
- var _op = _op_def_lib._apply_op_helper("LessEqual", name: name, args: new { x, y });
+ var _op = tf._op_def_lib._apply_op_helper("LessEqual", name: name, args: new { x, y });
return _op.outputs[0];
}
public static Tensor log1p(Tensor x, string name = null)
{
- var _op = _op_def_lib._apply_op_helper("Log1p", name, args: new { x });
+ var _op = tf._op_def_lib._apply_op_helper("Log1p", name, args: new { x });
return _op.outputs[0];
}
public static Tensor logical_and(Tensor x, Tensor y, string name = null)
{
- var _op = _op_def_lib._apply_op_helper("LogicalAnd", name, args: new { x, y });
+ var _op = tf._op_def_lib._apply_op_helper("LogicalAnd", name, args: new { x, y });
return _op.outputs[0];
}
public static Tensor logical_not(Tensor x, string name = null)
{
- var _op = _op_def_lib._apply_op_helper("LogicalNot", name, args: new { x });
+ var _op = tf._op_def_lib._apply_op_helper("LogicalNot", name, args: new { x });
return _op.outputs[0];
}
public static Tensor logical_or(Tensor x, Tensor y, string name = null)
{
- var _op = _op_def_lib._apply_op_helper("LogicalOr", name, args: new { x, y });
+ var _op = tf._op_def_lib._apply_op_helper("LogicalOr", name, args: new { x, y });
return _op.outputs[0];
}
@@ -675,7 +602,7 @@ namespace Tensorflow
public static Tensor squared_difference(Tensor x, Tensor y, string name = null)
{
- var _op = _op_def_lib._apply_op_helper("SquaredDifference", name, args: new { x, y, name });
+ var _op = tf._op_def_lib._apply_op_helper("SquaredDifference", name, args: new { x, y, name });
return _op.outputs[0];
}
@@ -690,19 +617,15 @@ namespace Tensorflow
{
if (tf.context.executing_eagerly())
{
- var results = new[] { new EagerTensor() };
- Status status = c_api.TFE_FastPathExecute(tf.context, tf.context.device_name,
- "Square", name, new IntPtr[]
- {
- x as EagerTensor,
- }, 1,
- null, null,
- results.Select(x => x.EagerTensorHandle).ToArray(), results.Length);
- status.Check(true);
- return results[0].Resolve();
+ var results = tf.Runner.TFE_FastPathExecute(tf.context, tf.context.device_name,
+ "Square", name,
+ null,
+ x);
+
+ return results[0];
}
- var _op = _op_def_lib._apply_op_helper("Square", name, args: new { x });
+ var _op = tf._op_def_lib._apply_op_helper("Square", name, args: new { x });
return _op.outputs[0];
}
@@ -715,14 +638,14 @@ namespace Tensorflow
/// A `Tensor` of type `bool`.
public static Tensor is_finite(Tensor x, string name = null)
{
- var _op = _op_def_lib._apply_op_helper("IsFinite", name, args: new { x });
+ var _op = tf._op_def_lib._apply_op_helper("IsFinite", name, args: new { x });
return _op.outputs[0];
}
public static Tensor is_nan(Tensor x, string name = null)
{
- var _op = _op_def_lib._apply_op_helper("IsNan", name: name, args: new { x });
+ var _op = tf._op_def_lib._apply_op_helper("IsNan", name: name, args: new { x });
return _op.outputs[0];
}
@@ -735,7 +658,7 @@ namespace Tensorflow
/// A `Tensor`. Has the same type as `x`.
public static Tensor exp(Tensor x, string name = null)
{
- var _op = _op_def_lib._apply_op_helper("Exp", name, args: new { x });
+ var _op = tf._op_def_lib._apply_op_helper("Exp", name, args: new { x });
return _op.outputs[0];
}
@@ -750,20 +673,15 @@ namespace Tensorflow
{
if (tf.context.executing_eagerly())
{
- var results = EagerTensorPass.Create();
- var inputs = EagerTensorPass.From(x);
-
- Status status = c_api.TFE_FastPathExecute(tf.context, tf.context.device_name,
+ var results = tf.Runner.TFE_FastPathExecute(tf.context, tf.context.device_name,
"Log", name,
- inputs.Points, inputs.Length,
- null, null,
- results.Points, results.Length);
- status.Check(true);
+ null,
+ x);
- return results[0].Resolve();
+ return results[0];
}
- var _op = _op_def_lib._apply_op_helper("Log", name, args: new { x });
+ var _op = tf._op_def_lib._apply_op_helper("Log", name, args: new { x });
return _op.outputs[0];
}
@@ -772,7 +690,7 @@ namespace Tensorflow
{
if (tf.context.executing_eagerly())
{
- var results = wrap_tfe_src.TFE_FastPathExecute(tf.context, tf.context.device_name,
+ var results = tf.Runner.TFE_FastPathExecute(tf.context, tf.context.device_name,
"Cast", name,
null,
x,
@@ -781,7 +699,7 @@ namespace Tensorflow
return results[0];
}
- var _op = _op_def_lib._apply_op_helper("Cast", name, args: new { x, DstT, Truncate });
+ var _op = tf._op_def_lib._apply_op_helper("Cast", name, args: new { x, DstT, Truncate });
return _op.outputs[0];
}
@@ -790,20 +708,15 @@ namespace Tensorflow
{
if (tf.context.executing_eagerly())
{
- var results = EagerTensorPass.Create();
- var inputs = EagerTensorPass.From(x);
-
- Status status = c_api.TFE_FastPathExecute(tf.context, tf.context.device_name,
+ var results = tf.Runner.TFE_FastPathExecute(tf.context, tf.context.device_name,
"Neg", name,
- inputs.Points, inputs.Length,
- null, null,
- results.Points, results.Length);
- status.Check(true);
+ null,
+ x);
- return results[0].Resolve();
+ return results[0];
}
- var _op = _op_def_lib._apply_op_helper("Neg", name, args: new { x });
+ var _op = tf._op_def_lib._apply_op_helper("Neg", name, args: new { x });
return _op.outputs[0];
}
@@ -812,19 +725,15 @@ namespace Tensorflow
{
if (tf.context.executing_eagerly())
{
- var results = new[] { new EagerTensor() };
- Status status = c_api.TFE_FastPathExecute(tf.context, tf.context.device_name,
- "Sqrt", name, new IntPtr[]
- {
- x as EagerTensor,
- }, 1,
- null, null,
- results.Select(x => x.EagerTensorHandle).ToArray(), results.Length);
- status.Check(true);
- return results[0].Resolve();
+ var results = tf.Runner.TFE_FastPathExecute(tf.context, tf.context.device_name,
+ "Sqrt", name,
+ null,
+ x);
+
+ return results[0];
}
- var _op = _op_def_lib._apply_op_helper("Sqrt", name, args: new { x });
+ var _op = tf._op_def_lib._apply_op_helper("Sqrt", name, args: new { x });
return _op.outputs[0];
}
@@ -833,14 +742,14 @@ namespace Tensorflow
{
if (tf.context.executing_eagerly())
{
- var results = wrap_tfe_src.TFE_FastPathExecute(tf.context, tf.context.device_name,
+ var results = tf.Runner.TFE_FastPathExecute(tf.context, tf.context.device_name,
"Sub", name,
null,
x, y);
return results[0];
}
- var _op = _op_def_lib._apply_op_helper("Sub", name, args: new { x, y });
+ var _op = tf._op_def_lib._apply_op_helper("Sub", name, args: new { x, y });
return _op.output;
}
@@ -849,20 +758,15 @@ namespace Tensorflow
{
if (tf.context.executing_eagerly())
{
- var results = new[] { new EagerTensor() };
- Status status = c_api.TFE_FastPathExecute(tf.context, tf.context.device_name,
- "Sub", name, new IntPtr[]
- {
- x as EagerTensor,
- y as EagerTensor
- }, 2,
- null, null,
- results.Select(x => x.EagerTensorHandle).ToArray(), results.Length);
- status.Check(true);
- return results[0].Resolve();
+ var results = tf.Runner.TFE_FastPathExecute(tf.context, tf.context.device_name,
+ "Sub", name,
+ null,
+ x, y);
+
+ return results[0];
}
- var _op = _op_def_lib._apply_op_helper("Sub", name, args: new { x, y });
+ var _op = tf._op_def_lib._apply_op_helper("Sub", name, args: new { x, y });
return _op.outputs[0];
}
@@ -878,20 +782,15 @@ namespace Tensorflow
{
if (tf.context.executing_eagerly())
{
- var results = new[] { new EagerTensor() };
- Status status = c_api.TFE_FastPathExecute(tf.context, tf.context.device_name,
- "Equal", name, new IntPtr[]
- {
- x as EagerTensor,
- y as EagerTensor
- }, 2,
- null, null,
- results.Select(x => x.EagerTensorHandle).ToArray(), results.Length);
- status.Check(true);
- return results[0].Resolve();
+ var results = tf.Runner.TFE_FastPathExecute(tf.context, tf.context.device_name,
+ "Equal", name,
+ null,
+ x, y);
+
+ return results[0];
}
- var _op = _op_def_lib._apply_op_helper("Equal", name, args: new { x, y });
+ var _op = tf._op_def_lib._apply_op_helper("Equal", name, args: new { x, y });
return _op.output;
}
@@ -908,20 +807,15 @@ namespace Tensorflow
{
if (tf.context.executing_eagerly())
{
- var results = new[] { new EagerTensor() };
- Status status = c_api.TFE_FastPathExecute(tf.context, tf.context.device_name,
- "NotEqual", name, new IntPtr[]
- {
- x as EagerTensor,
- y as EagerTensor
- }, 2,
- null, null,
- results.Select(x => x.EagerTensorHandle).ToArray(), results.Length);
- status.Check(true);
- return results[0].Resolve();
+ var results = tf.Runner.TFE_FastPathExecute(tf.context, tf.context.device_name,
+ "NotEqual", name,
+ null,
+ x, y);
+
+ return results[0];
}
- var _op = _op_def_lib._apply_op_helper("NotEqual", name, args: new { x, y });
+ var _op = tf._op_def_lib._apply_op_helper("NotEqual", name, args: new { x, y });
return _op.output;
}
@@ -930,20 +824,15 @@ namespace Tensorflow
{
if (tf.context.executing_eagerly())
{
- var results = new[] { new EagerTensor() };
- Status status = c_api.TFE_FastPathExecute(tf.context, tf.context.device_name,
- "Atan2", name, new IntPtr[]
- {
- y as EagerTensor,
- x as EagerTensor
- }, 2,
- null, null,
- results.Select(x => x.EagerTensorHandle).ToArray(), results.Length);
- status.Check(true);
- return results[0].Resolve();
+ var results = tf.Runner.TFE_FastPathExecute(tf.context, tf.context.device_name,
+ "Atan2", name,
+ null,
+ y, x);
+
+ return results[0];
}
- var _op = _op_def_lib._apply_op_helper("Atan2", name, args: new { y, x });
+ var _op = tf._op_def_lib._apply_op_helper("Atan2", name, args: new { y, x });
return _op.output;
}
@@ -951,14 +840,14 @@ namespace Tensorflow
{
if (tf.context.executing_eagerly())
{
- var results = wrap_tfe_src.TFE_FastPathExecute(tf.context, tf.context.device_name,
+ var results = tf.Runner.TFE_FastPathExecute(tf.context, tf.context.device_name,
"Mul", name,
null,
x, y);
return results[0];
}
- var _op = _op_def_lib._apply_op_helper("Mul", name, args: new { x, y });
+ var _op = tf._op_def_lib._apply_op_helper("Mul", name, args: new { x, y });
return _op.output;
}
@@ -967,27 +856,22 @@ namespace Tensorflow
{
if (tf.context.executing_eagerly())
{
- var results = new[] { new EagerTensor() };
- Status status = c_api.TFE_FastPathExecute(tf.context, tf.context.device_name,
- "Mul", name, new IntPtr[]
- {
- x as EagerTensor,
- y as EagerTensor,
- }, 2,
- null, null,
- results.Select(x => x.EagerTensorHandle).ToArray(), results.Length);
- status.Check(true);
- return results[0].Resolve();
+ var results = tf.Runner.TFE_FastPathExecute(tf.context, tf.context.device_name,
+ "Mul", name,
+ null,
+ x, y);
+
+ return results[0];
}
- var _op = _op_def_lib._apply_op_helper("Mul", name, args: new { x, y });
+ var _op = tf._op_def_lib._apply_op_helper("Mul", name, args: new { x, y });
return _op.outputs[0];
}
public static Tensor mul_no_nan(Tx x, Ty y, string name = null)
{
- var _op = _op_def_lib._apply_op_helper("MulNoNan", name, args: new { x, y });
+ var _op = tf._op_def_lib._apply_op_helper("MulNoNan", name, args: new { x, y });
return _op.outputs[0];
}
@@ -996,14 +880,14 @@ namespace Tensorflow
{
if (tf.context.executing_eagerly())
{
- var results = wrap_tfe_src.TFE_FastPathExecute(tf.context, tf.context.device_name,
+ var results = tf.Runner.TFE_FastPathExecute(tf.context, tf.context.device_name,
"RealDiv", name,
null,
x, y);
return results[0];
}
- var _op = _op_def_lib._apply_op_helper("RealDiv", name, args: new { x, y });
+ var _op = tf._op_def_lib._apply_op_helper("RealDiv", name, args: new { x, y });
return _op.outputs[0];
}
@@ -1012,20 +896,15 @@ namespace Tensorflow
{
if (tf.context.executing_eagerly())
{
- var results = EagerTensorPass.Create();
- var inputs = EagerTensorPass.From(x);
-
- Status status = c_api.TFE_FastPathExecute(tf.context, tf.context.device_name,
+ var results = tf.Runner.TFE_FastPathExecute(tf.context, tf.context.device_name,
"Reciprocal", name,
- inputs.Points, inputs.Length,
- null, null,
- results.Points, results.Length);
- status.Check(true);
+ null,
+ x);
- return results[0].Resolve();
+ return results[0];
}
- var _op = _op_def_lib._apply_op_helper("Reciprocal", name, args: new { x });
+ var _op = tf._op_def_lib._apply_op_helper("Reciprocal", name, args: new { x });
return _op.outputs[0];
}
@@ -1034,20 +913,15 @@ namespace Tensorflow
{
if (tf.context.executing_eagerly())
{
- var results = new[] { new EagerTensor() };
- Status status = c_api.TFE_FastPathExecute(tf.context, tf.context.device_name,
- "FloorMod", name, new IntPtr[]
- {
- x as EagerTensor,
- y as EagerTensor
- }, 2,
- null, null,
- results.Select(x => x.EagerTensorHandle).ToArray(), results.Length);
- status.Check(true);
- return results[0].Resolve();
+ var results = tf.Runner.TFE_FastPathExecute(tf.context, tf.context.device_name,
+ "FloorMod", name,
+ null,
+ x, y);
+
+ return results[0];
}
- var _op = _op_def_lib._apply_op_helper("FloorMod", name, args: new { x, y });
+ var _op = tf._op_def_lib._apply_op_helper("FloorMod", name, args: new { x, y });
return _op.outputs[0];
}
@@ -1056,20 +930,15 @@ namespace Tensorflow
{
if (tf.context.executing_eagerly())
{
- var results = new[] { new EagerTensor() };
- Status status = c_api.TFE_FastPathExecute(tf.context, tf.context.device_name,
- "FloorDiv", name, new IntPtr[]
- {
- x as EagerTensor,
- y as EagerTensor
- }, 2,
- null, null,
- results.Select(x => x.EagerTensorHandle).ToArray(), results.Length);
- status.Check(true);
- return results[0].Resolve();
+ var results = tf.Runner.TFE_FastPathExecute(tf.context, tf.context.device_name,
+ "FloorDiv", name,
+ null,
+ x, y);
+
+ return results[0];
}
- var _op = _op_def_lib._apply_op_helper("FloorDiv", name, args: new { x, y });
+ var _op = tf._op_def_lib._apply_op_helper("FloorDiv", name, args: new { x, y });
return _op.outputs[0];
}
@@ -1087,7 +956,7 @@ namespace Tensorflow
{
if (tf.context.executing_eagerly())
{
- var results = wrap_tfe_src.TFE_FastPathExecute(tf.context, tf.context.device_name,
+ var results = tf.Runner.TFE_FastPathExecute(tf.context, tf.context.device_name,
"MatMul", name,
null,
a, b,
@@ -1095,7 +964,7 @@ namespace Tensorflow
return results[0];
}
- var _op = _op_def_lib._apply_op_helper("MatMul", name, args: new { a, b, transpose_a, transpose_b });
+ var _op = tf._op_def_lib._apply_op_helper("MatMul", name, args: new { a, b, transpose_a, transpose_b });
return _op.output;
}
@@ -1127,7 +996,7 @@ namespace Tensorflow
///
public static Tensor batch_mat_mul(Tensor x, Tensor y, bool adj_x = false, bool adj_y = false, string name = null)
{
- var _op = _op_def_lib._apply_op_helper(
+ var _op = tf._op_def_lib._apply_op_helper(
"BatchMatMul",
name,
args: new { x, y, adj_x, adj_y });
@@ -1146,20 +1015,15 @@ namespace Tensorflow
{
if (tf.context.executing_eagerly())
{
- var results = EagerTensorPass.Create();
- var inputs = EagerTensorPass.From(x, y);
-
- Status status = c_api.TFE_FastPathExecute(tf.context, tf.context.device_name,
+ var results = tf.Runner.TFE_FastPathExecute(tf.context, tf.context.device_name,
"Maximum", name,
- inputs.Points, inputs.Length,
- null, null,
- results.Points, results.Length);
- status.Check(true);
+ null,
+ x, y);
- return results[0].Resolve();
+ return results[0];
}
- var _op = _op_def_lib._apply_op_helper("Maximum", name, args: new { x, y });
+ var _op = tf._op_def_lib._apply_op_helper("Maximum", name, args: new { x, y });
return _op.outputs[0];
}
@@ -1168,48 +1032,43 @@ namespace Tensorflow
{
if (tf.context.executing_eagerly())
{
- var results = EagerTensorPass.Create();
- var inputs = EagerTensorPass.From(x, y);
-
- Status status = c_api.TFE_FastPathExecute(tf.context, tf.context.device_name,
+ var results = tf.Runner.TFE_FastPathExecute(tf.context, tf.context.device_name,
"Minimum", name,
- inputs.Points, inputs.Length,
- null, null,
- results.Points, results.Length);
- status.Check(true);
+ null,
+ x, y);
- return results[0].Resolve();
+ return results[0];
}
- var _op = _op_def_lib._apply_op_helper("Minimum", name, args: new { x, y });
+ var _op = tf._op_def_lib._apply_op_helper("Minimum", name, args: new { x, y });
return _op.outputs[0];
}
public static Tensor _abs(Tensor x, string name = null)
{
- var _op = _op_def_lib._apply_op_helper("Abs", name, args: new { x });
+ var _op = tf._op_def_lib._apply_op_helper("Abs", name, args: new { x });
return _op.output;
}
public static Tensor _any(Tx input, Ty axis, bool keep_dims = false, string name = null)
{
- var _op = _op_def_lib._apply_op_helper("Any", name, new { input, reduction_indices = axis, keep_dims });
+ var _op = tf._op_def_lib._apply_op_helper("Any", name, new { input, reduction_indices = axis, keep_dims });
return _op.outputs[0];
}
public static Tensor _max(Tx input, Ty axis, bool keep_dims=false, string name = null)
{
- var _op = _op_def_lib._apply_op_helper("Max", name, new { input, reduction_indices = axis, keep_dims });
+ var _op = tf._op_def_lib._apply_op_helper("Max", name, new { input, reduction_indices = axis, keep_dims });
return _op.outputs[0];
}
public static Tensor _min(Tx input, Ty axis, bool keep_dims = false, string name = null)
{
- var _op = _op_def_lib._apply_op_helper("Min", name, new { input, reduction_indices = axis, keep_dims });
+ var _op = tf._op_def_lib._apply_op_helper("Min", name, new { input, reduction_indices = axis, keep_dims });
return _op.outputs[0];
}
@@ -1218,20 +1077,15 @@ namespace Tensorflow
{
if (tf.context.executing_eagerly())
{
- var results = new[] { new EagerTensor() };
- Status status = c_api.TFE_FastPathExecute(tf.context, tf.context.device_name,
- "Pow", name, new IntPtr[]
- {
- x as EagerTensor,
- y as EagerTensor
- }, 2,
- null, null,
- results.Select(x => x.EagerTensorHandle).ToArray(), results.Length);
- status.Check(true);
- return results[0].Resolve();
+ var results = tf.Runner.TFE_FastPathExecute(tf.context, tf.context.device_name,
+ "Pow", name,
+ null,
+ x, y);
+
+ return results[0];
}
- var _op = _op_def_lib._apply_op_helper("Pow", name, args: new { x, y });
+ var _op = tf._op_def_lib._apply_op_helper("Pow", name, args: new { x, y });
return _op.outputs[0];
}
@@ -1240,22 +1094,16 @@ namespace Tensorflow
{
if (tf.context.executing_eagerly())
{
- var results = EagerTensorPass.Create();
- var inputs = EagerTensorPass.From(input, axis);
- var attrs = new object[] { "keep_dims", keep_dims };
-
- Status status = c_api.TFE_FastPathExecute(tf.context, tf.context.device_name,
+ var results = tf.Runner.TFE_FastPathExecute(tf.context, tf.context.device_name,
"Sum", name,
- inputs.Points, inputs.Length,
- wrap_tfe_src.SetOpAttrs2(attrs),
- op => wrap_tfe_src.SetOpAttrs(op, attrs),
- results.Points, results.Length);
- status.Check(true);
+ null,
+ input, axis,
+ "keep_dims", keep_dims);
- return results[0].Resolve();
+ return results[0];
}
- var _op = _op_def_lib._apply_op_helper("Sum", name, args: new { input, reduction_indices = axis, keep_dims });
+ var _op = tf._op_def_lib._apply_op_helper("Sum", name, args: new { input, reduction_indices = axis, keep_dims });
return _op.outputs[0];
}
@@ -1268,19 +1116,19 @@ namespace Tensorflow
keep_dims: keep_dims, name: name, ctx: tf.context);
}
- var _op = _op_def_lib._apply_op_helper("Sum", name, args: new { inputs, reduction_indices = axis, keep_dims });
+ var _op = tf._op_def_lib._apply_op_helper("Sum", name, args: new { inputs, reduction_indices = axis, keep_dims });
return _op.outputs[0];
}
private static Tensor _sum_eager_fallback(Tensor[] inputs, Tensor axis, bool keep_dims = false, string name = null, Context ctx = null)
{
- var (_attr_T, input) = _execute.args_to_matching_eager(ctx, args: new[] { inputs });
- var (_attr_Tidx, axis1) = _execute.args_to_matching_eager(ctx, tf.int32, new[] { axis });
+ var (_attr_T, input) = tf._execute.args_to_matching_eager(ctx, args: new[] { inputs });
+ var (_attr_Tidx, axis1) = tf._execute.args_to_matching_eager(ctx, tf.int32, new[] { axis });
var _inputs_flat = input.concat(axis1);
var _attrs = new object[] { "keep_dims", keep_dims, "T", _attr_T, "Tidx", _attr_Tidx };
- return _execute.execute(ctx, "Sum", 1, _inputs_flat, _attrs, name: name)[0];
+ return tf._execute.execute(ctx, "Sum", 1, _inputs_flat, _attrs, name: name)[0];
}
///
@@ -1295,14 +1143,14 @@ namespace Tensorflow
{
if (tf.context.executing_eagerly())
{
- var results = wrap_tfe_src.TFE_FastPathExecute(tf.context, tf.context.device_name,
+ var results = tf.Runner.TFE_FastPathExecute(tf.context, tf.context.device_name,
"Range", name,
null,
start, limit, delta);
return results[0];
}
- var _op = _op_def_lib._apply_op_helper("Range", name, new { start, limit, delta });
+ var _op = tf._op_def_lib._apply_op_helper("Range", name, new { start, limit, delta });
return _op.outputs[0];
}
@@ -1324,7 +1172,7 @@ namespace Tensorflow
///
public static Tensor round(Tensor x, string name = "Round")
{
- var op = _op_def_lib._apply_op_helper("Round", name: name, new { x });
+ var op = tf._op_def_lib._apply_op_helper("Round", name: name, new { x });
return op.output;
}
@@ -1337,7 +1185,7 @@ namespace Tensorflow
///
public static Tensor rsqrt(Tensor x, string name = null)
{
- var _op = _op_def_lib._apply_op_helper("Rsqrt", name, new { x });
+ var _op = tf._op_def_lib._apply_op_helper("Rsqrt", name, new { x });
return _op.outputs[0];
}
@@ -1350,7 +1198,7 @@ namespace Tensorflow
/// The fraction of zeros in value, with type float32.
public static Tensor zero_fraction(Tensor value, string name = null)
{
- var _op = _op_def_lib._apply_op_helper("zero_fraction", name, new { value, name });
+ var _op = tf._op_def_lib._apply_op_helper("zero_fraction", name, new { value, name });
return _op.outputs[0];
}
diff --git a/src/TensorFlowNET.Core/Operations/gen_math_ops.eager.cs b/src/TensorFlowNET.Core/Operations/gen_math_ops.eager.cs
index 973acfa5..1991c4b1 100644
--- a/src/TensorFlowNET.Core/Operations/gen_math_ops.eager.cs
+++ b/src/TensorFlowNET.Core/Operations/gen_math_ops.eager.cs
@@ -9,19 +9,14 @@ namespace Tensorflow
{
public static partial class gen_math_ops
{
- public static EagerTensor mul(IntPtr x, IntPtr y, string name = null)
+ public static Tensor mul(IntPtr x, IntPtr y, string name = null)
{
- var results = EagerTensorPass.Create();
- Status status = c_api.TFE_FastPathExecute(tf.context, tf.context.device_name,
- "Mul", name, new IntPtr[]
- {
- x,
- y,
- }, 2,
- null, null,
- results.Points, results.Length);
- status.Check(true);
- return results[0].Resolve();
+ var results = tf.Runner.TFE_FastPathExecute(tf.context, tf.context.device_name,
+ "Mul", name,
+ null,
+ x, y);
+
+ return results[0];
}
}
}
diff --git a/src/TensorFlowNET.Core/Operations/gen_ops.cs b/src/TensorFlowNET.Core/Operations/gen_ops.cs
index 6e91be02..14b5700b 100644
--- a/src/TensorFlowNET.Core/Operations/gen_ops.cs
+++ b/src/TensorFlowNET.Core/Operations/gen_ops.cs
@@ -1,13 +1,11 @@
using System.Linq;
using System.Collections.Generic;
+using static Tensorflow.Binding;
namespace Tensorflow.Operations
{
public class gen_ops
{
- static readonly OpDefLibrary _op_def_lib;
- static gen_ops() { _op_def_lib = new OpDefLibrary(); }
-
///
/// Raise a exception to abort the process when called.
///
@@ -35,7 +33,7 @@ namespace Tensorflow.Operations
dict["error_msg"] = error_msg;
if (exit_without_error.HasValue)
dict["exit_without_error"] = exit_without_error.Value;
- var op = _op_def_lib._apply_op_helper("Abort", name: name, keywords: dict);
+ var op = tf._op_def_lib._apply_op_helper("Abort", name: name, keywords: dict);
return op;
}
@@ -59,7 +57,7 @@ namespace Tensorflow.Operations
{
var dict = new Dictionary();
dict["x"] = x;
- var op = _op_def_lib._apply_op_helper("Abs", name: name, keywords: dict);
+ var op = tf._op_def_lib._apply_op_helper("Abs", name: name, keywords: dict);
return op.output;
}
@@ -94,7 +92,7 @@ namespace Tensorflow.Operations
var dict = new Dictionary();
dict["inputs"] = inputs;
dict["shape"] = shape;
- var op = _op_def_lib._apply_op_helper("AccumulateNV2", name: name, keywords: dict);
+ var op = tf._op_def_lib._apply_op_helper("AccumulateNV2", name: name, keywords: dict);
return op.output;
}
@@ -125,7 +123,7 @@ namespace Tensorflow.Operations
dict["handle"] = handle;
dict["local_step"] = local_step;
dict["gradient"] = gradient;
- var op = _op_def_lib._apply_op_helper("AccumulatorApplyGradient", name: name, keywords: dict);
+ var op = tf._op_def_lib._apply_op_helper("AccumulatorApplyGradient", name: name, keywords: dict);
return op;
}
@@ -146,7 +144,7 @@ namespace Tensorflow.Operations
{
var dict = new Dictionary();
dict["handle"] = handle;
- var op = _op_def_lib._apply_op_helper("AccumulatorNumAccumulated", name: name, keywords: dict);
+ var op = tf._op_def_lib._apply_op_helper("AccumulatorNumAccumulated", name: name, keywords: dict);
return op.output;
}
@@ -174,7 +172,7 @@ namespace Tensorflow.Operations
var dict = new Dictionary();
dict["handle"] = handle;
dict["new_global_step"] = new_global_step;
- var op = _op_def_lib._apply_op_helper("AccumulatorSetGlobalStep", name: name, keywords: dict);
+ var op = tf._op_def_lib._apply_op_helper("AccumulatorSetGlobalStep", name: name, keywords: dict);
return op;
}
@@ -212,7 +210,7 @@ namespace Tensorflow.Operations
dict["handle"] = handle;
dict["num_required"] = num_required;
dict["dtype"] = dtype;
- var op = _op_def_lib._apply_op_helper("AccumulatorTakeGradient", name: name, keywords: dict);
+ var op = tf._op_def_lib._apply_op_helper("AccumulatorTakeGradient", name: name, keywords: dict);
return op.output;
}
@@ -231,7 +229,7 @@ namespace Tensorflow.Operations
{
var dict = new Dictionary();
dict["x"] = x;
- var op = _op_def_lib._apply_op_helper("Acos", name: name, keywords: dict);
+ var op = tf._op_def_lib._apply_op_helper("Acos", name: name, keywords: dict);
return op.output;
}
@@ -250,7 +248,7 @@ namespace Tensorflow.Operations
{
var dict = new Dictionary();
dict["x"] = x;
- var op = _op_def_lib._apply_op_helper("Acosh", name: name, keywords: dict);
+ var op = tf._op_def_lib._apply_op_helper("Acosh", name: name, keywords: dict);
return op.output;
}
@@ -276,7 +274,7 @@ namespace Tensorflow.Operations
var dict = new Dictionary();
dict["x"] = x;
dict["y"] = y;
- var op = _op_def_lib._apply_op_helper("Add", name: name, keywords: dict);
+ var op = tf._op_def_lib._apply_op_helper("Add", name: name, keywords: dict);
return op.output;
}
@@ -345,7 +343,7 @@ namespace Tensorflow.Operations
dict["container"] = container;
if (shared_name != null)
dict["shared_name"] = shared_name;
- var op = _op_def_lib._apply_op_helper("AddManySparseToTensorsMap", name: name, keywords: dict);
+ var op = tf._op_def_lib._apply_op_helper("AddManySparseToTensorsMap", name: name, keywords: dict);
return op.output;
}
@@ -365,7 +363,7 @@ namespace Tensorflow.Operations
{
var dict = new Dictionary();
dict["inputs"] = inputs;
- var op = _op_def_lib._apply_op_helper("AddN", name: name, keywords: dict);
+ var op = tf._op_def_lib._apply_op_helper("AddN", name: name, keywords: dict);
return op.output;
}
@@ -422,7 +420,7 @@ namespace Tensorflow.Operations
dict["container"] = container;
if (shared_name != null)
dict["shared_name"] = shared_name;
- var op = _op_def_lib._apply_op_helper("AddSparseToTensorsMap", name: name, keywords: dict);
+ var op = tf._op_def_lib._apply_op_helper("AddSparseToTensorsMap", name: name, keywords: dict);
return op.output;
}
@@ -448,7 +446,7 @@ namespace Tensorflow.Operations
var dict = new Dictionary();
dict["x"] = x;
dict["y"] = y;
- var op = _op_def_lib._apply_op_helper("AddV2", name: name, keywords: dict);
+ var op = tf._op_def_lib._apply_op_helper("AddV2", name: name, keywords: dict);
return op.output;
}
@@ -476,7 +474,7 @@ namespace Tensorflow.Operations
dict["contrast_factor"] = contrast_factor;
dict["min_value"] = min_value;
dict["max_value"] = max_value;
- var op = _op_def_lib._apply_op_helper("AdjustContrast", name: name, keywords: dict);
+ var op = tf._op_def_lib._apply_op_helper("AdjustContrast", name: name, keywords: dict);
return op.output;
}
@@ -512,7 +510,7 @@ namespace Tensorflow.Operations
var dict = new Dictionary();
dict["images"] = images;
dict["contrast_factor"] = contrast_factor;
- var op = _op_def_lib._apply_op_helper("AdjustContrastv2", name: name, keywords: dict);
+ var op = tf._op_def_lib._apply_op_helper("AdjustContrastv2", name: name, keywords: dict);
return op.output;
}
@@ -545,7 +543,7 @@ namespace Tensorflow.Operations
var dict = new Dictionary();
dict["images"] = images;
dict["delta"] = delta;
- var op = _op_def_lib._apply_op_helper("AdjustHue", name: name, keywords: dict);
+ var op = tf._op_def_lib._apply_op_helper("AdjustHue", name: name, keywords: dict);
return op.output;
}
@@ -578,7 +576,7 @@ namespace Tensorflow.Operations
var dict = new Dictionary();
dict["images"] = images;
dict["scale"] = scale;
- var op = _op_def_lib._apply_op_helper("AdjustSaturation", name: name, keywords: dict);
+ var op = tf._op_def_lib._apply_op_helper("AdjustSaturation", name: name, keywords: dict);
return op.output;
}
@@ -615,7 +613,7 @@ namespace Tensorflow.Operations
dict["reduction_indices"] = reduction_indices;
if (keep_dims.HasValue)
dict["keep_dims"] = keep_dims.Value;
- var op = _op_def_lib._apply_op_helper("All", name: name, keywords: dict);
+ var op = tf._op_def_lib._apply_op_helper("All", name: name, keywords: dict);
return op.output;
}
@@ -686,7 +684,7 @@ namespace Tensorflow.Operations
dict["seed"] = seed.Value;
if (seed2.HasValue)
dict["seed2"] = seed2.Value;
- var op = _op_def_lib._apply_op_helper("AllCandidateSampler", name: name, keywords: dict);
+ var op = tf._op_def_lib._apply_op_helper("AllCandidateSampler", name: name, keywords: dict);
int _idx = 0;
var sampled_candidates = op.outputs[_idx++];
var true_expected_count = op.outputs[_idx++];
@@ -732,7 +730,7 @@ namespace Tensorflow.Operations
dict["input"] = input;
if (Tout.HasValue)
dict["Tout"] = Tout.Value;
- var op = _op_def_lib._apply_op_helper("Angle", name: name, keywords: dict);
+ var op = tf._op_def_lib._apply_op_helper("Angle", name: name, keywords: dict);
return op.output;
}
@@ -760,7 +758,7 @@ namespace Tensorflow.Operations
var dict = new Dictionary();
dict["output_types"] = output_types;
dict["output_shapes"] = output_shapes;
- var op = _op_def_lib._apply_op_helper("AnonymousIterator", name: name, keywords: dict);
+ var op = tf._op_def_lib._apply_op_helper("AnonymousIterator", name: name, keywords: dict);
return op.output;
}
@@ -797,7 +795,7 @@ namespace Tensorflow.Operations
dict["reduction_indices"] = reduction_indices;
if (keep_dims.HasValue)
dict["keep_dims"] = keep_dims.Value;
- var op = _op_def_lib._apply_op_helper("Any", name: name, keywords: dict);
+ var op = tf._op_def_lib._apply_op_helper("Any", name: name, keywords: dict);
return op.output;
}
@@ -862,7 +860,7 @@ namespace Tensorflow.Operations
dict["grad"] = grad;
if (use_locking.HasValue)
dict["use_locking"] = use_locking.Value;
- var op = _op_def_lib._apply_op_helper("ApplyAdaMax", name: name, keywords: dict);
+ var op = tf._op_def_lib._apply_op_helper("ApplyAdaMax", name: name, keywords: dict);
return op.output;
}
@@ -919,7 +917,7 @@ namespace Tensorflow.Operations
dict["grad"] = grad;
if (use_locking.HasValue)
dict["use_locking"] = use_locking.Value;
- var op = _op_def_lib._apply_op_helper("ApplyAdadelta", name: name, keywords: dict);
+ var op = tf._op_def_lib._apply_op_helper("ApplyAdadelta", name: name, keywords: dict);
return op.output;
}
@@ -967,7 +965,7 @@ namespace Tensorflow.Operations
dict["use_locking"] = use_locking.Value;
if (update_slots.HasValue)
dict["update_slots"] = update_slots.Value;
- var op = _op_def_lib._apply_op_helper("ApplyAdagrad", name: name, keywords: dict);
+ var op = tf._op_def_lib._apply_op_helper("ApplyAdagrad", name: name, keywords: dict);
return op.output;
}
@@ -1022,7 +1020,7 @@ namespace Tensorflow.Operations
dict["global_step"] = global_step;
if (use_locking.HasValue)
dict["use_locking"] = use_locking.Value;
- var op = _op_def_lib._apply_op_helper("ApplyAdagradDA", name: name, keywords: dict);
+ var op = tf._op_def_lib._apply_op_helper("ApplyAdagradDA", name: name, keywords: dict);
return op.output;
}
@@ -1097,7 +1095,7 @@ namespace Tensorflow.Operations
dict["use_locking"] = use_locking.Value;
if (use_nesterov.HasValue)
dict["use_nesterov"] = use_nesterov.Value;
- var op = _op_def_lib._apply_op_helper("ApplyAdam", name: name, keywords: dict);
+ var op = tf._op_def_lib._apply_op_helper("ApplyAdam", name: name, keywords: dict);
return op.output;
}
@@ -1154,7 +1152,7 @@ namespace Tensorflow.Operations
dict["grad"] = grad;
if (use_locking.HasValue)
dict["use_locking"] = use_locking.Value;
- var op = _op_def_lib._apply_op_helper("ApplyAddSign", name: name, keywords: dict);
+ var op = tf._op_def_lib._apply_op_helper("ApplyAddSign", name: name, keywords: dict);
return op.output;
}
@@ -1233,7 +1231,7 @@ namespace Tensorflow.Operations
dict["grad"] = grad;
if (use_locking.HasValue)
dict["use_locking"] = use_locking.Value;
- var op = _op_def_lib._apply_op_helper("ApplyCenteredRMSProp", name: name, keywords: dict);
+ var op = tf._op_def_lib._apply_op_helper("ApplyCenteredRMSProp", name: name, keywords: dict);
return op.output;
}
@@ -1296,7 +1294,7 @@ namespace Tensorflow.Operations
dict["lr_power"] = lr_power;
if (use_locking.HasValue)
dict["use_locking"] = use_locking.Value;
- var op = _op_def_lib._apply_op_helper("ApplyFtrl", name: name, keywords: dict);
+ var op = tf._op_def_lib._apply_op_helper("ApplyFtrl", name: name, keywords: dict);
return op.output;
}
@@ -1364,7 +1362,7 @@ namespace Tensorflow.Operations
dict["lr_power"] = lr_power;
if (use_locking.HasValue)
dict["use_locking"] = use_locking.Value;
- var op = _op_def_lib._apply_op_helper("ApplyFtrlV2", name: name, keywords: dict);
+ var op = tf._op_def_lib._apply_op_helper("ApplyFtrlV2", name: name, keywords: dict);
return op.output;
}
@@ -1399,7 +1397,7 @@ namespace Tensorflow.Operations
dict["delta"] = delta;
if (use_locking.HasValue)
dict["use_locking"] = use_locking.Value;
- var op = _op_def_lib._apply_op_helper("ApplyGradientDescent", name: name, keywords: dict);
+ var op = tf._op_def_lib._apply_op_helper("ApplyGradientDescent", name: name, keywords: dict);
return op.output;
}
@@ -1456,7 +1454,7 @@ namespace Tensorflow.Operations
dict["use_locking"] = use_locking.Value;
if (use_nesterov.HasValue)
dict["use_nesterov"] = use_nesterov.Value;
- var op = _op_def_lib._apply_op_helper("ApplyMomentum", name: name, keywords: dict);
+ var op = tf._op_def_lib._apply_op_helper("ApplyMomentum", name: name, keywords: dict);
return op.output;
}
@@ -1513,7 +1511,7 @@ namespace Tensorflow.Operations
dict["grad"] = grad;
if (use_locking.HasValue)
dict["use_locking"] = use_locking.Value;
- var op = _op_def_lib._apply_op_helper("ApplyPowerSign", name: name, keywords: dict);
+ var op = tf._op_def_lib._apply_op_helper("ApplyPowerSign", name: name, keywords: dict);
return op.output;
}
@@ -1565,7 +1563,7 @@ namespace Tensorflow.Operations
dict["grad"] = grad;
if (use_locking.HasValue)
dict["use_locking"] = use_locking.Value;
- var op = _op_def_lib._apply_op_helper("ApplyProximalAdagrad", name: name, keywords: dict);
+ var op = tf._op_def_lib._apply_op_helper("ApplyProximalAdagrad", name: name, keywords: dict);
return op.output;
}
@@ -1612,7 +1610,7 @@ namespace Tensorflow.Operations
dict["delta"] = delta;
if (use_locking.HasValue)
dict["use_locking"] = use_locking.Value;
- var op = _op_def_lib._apply_op_helper("ApplyProximalGradientDescent", name: name, keywords: dict);
+ var op = tf._op_def_lib._apply_op_helper("ApplyProximalGradientDescent", name: name, keywords: dict);
return op.output;
}
@@ -1679,7 +1677,7 @@ namespace Tensorflow.Operations
dict["grad"] = grad;
if (use_locking.HasValue)
dict["use_locking"] = use_locking.Value;
- var op = _op_def_lib._apply_op_helper("ApplyRMSProp", name: name, keywords: dict);
+ var op = tf._op_def_lib._apply_op_helper("ApplyRMSProp", name: name, keywords: dict);
return op.output;
}
@@ -1705,7 +1703,7 @@ namespace Tensorflow.Operations
dict["y"] = y;
if (tolerance.HasValue)
dict["tolerance"] = tolerance.Value;
- var op = _op_def_lib._apply_op_helper("ApproximateEqual", name: name, keywords: dict);
+ var op = tf._op_def_lib._apply_op_helper("ApproximateEqual", name: name, keywords: dict);
return op.output;
}
@@ -1737,7 +1735,7 @@ namespace Tensorflow.Operations
dict["dimension"] = dimension;
if (output_type.HasValue)
dict["output_type"] = output_type.Value;
- var op = _op_def_lib._apply_op_helper("ArgMax", name: name, keywords: dict);
+ var op = tf._op_def_lib._apply_op_helper("ArgMax", name: name, keywords: dict);
return op.output;
}
@@ -1769,7 +1767,7 @@ namespace Tensorflow.Operations
dict["dimension"] = dimension;
if (output_type.HasValue)
dict["output_type"] = output_type.Value;
- var op = _op_def_lib._apply_op_helper("ArgMin", name: name, keywords: dict);
+ var op = tf._op_def_lib._apply_op_helper("ArgMin", name: name, keywords: dict);
return op.output;
}
@@ -1821,7 +1819,7 @@ namespace Tensorflow.Operations
dict["width"] = width.Value;
if (fill != null)
dict["fill"] = fill;
- var op = _op_def_lib._apply_op_helper("AsString", name: name, keywords: dict);
+ var op = tf._op_def_lib._apply_op_helper("AsString", name: name, keywords: dict);
return op.output;
}
@@ -1840,7 +1838,7 @@ namespace Tensorflow.Operations
{
var dict = new Dictionary();
dict["x"] = x;
- var op = _op_def_lib._apply_op_helper("Asin", name: name, keywords: dict);
+ var op = tf._op_def_lib._apply_op_helper("Asin", name: name, keywords: dict);
return op.output;
}
@@ -1859,7 +1857,7 @@ namespace Tensorflow.Operations
{
var dict = new Dictionary();
dict["x"] = x;
- var op = _op_def_lib._apply_op_helper("Asinh", name: name, keywords: dict);
+ var op = tf._op_def_lib._apply_op_helper("Asinh", name: name, keywords: dict);
return op.output;
}
@@ -1892,7 +1890,7 @@ namespace Tensorflow.Operations
dict["data"] = data;
if (summarize.HasValue)
dict["summarize"] = summarize.Value;
- var op = _op_def_lib._apply_op_helper("Assert", name: name, keywords: dict);
+ var op = tf._op_def_lib._apply_op_helper("Assert", name: name, keywords: dict);
return op;
}
@@ -1935,7 +1933,7 @@ namespace Tensorflow.Operations
dict["validate_shape"] = validate_shape.Value;
if (use_locking.HasValue)
dict["use_locking"] = use_locking.Value;
- var op = _op_def_lib._apply_op_helper("Assign", name: name, keywords: dict);
+ var op = tf._op_def_lib._apply_op_helper("Assign", name: name, keywords: dict);
return op.output;
}
@@ -1971,7 +1969,7 @@ namespace Tensorflow.Operations
dict["value"] = value;
if (use_locking.HasValue)
dict["use_locking"] = use_locking.Value;
- var op = _op_def_lib._apply_op_helper("AssignAdd", name: name, keywords: dict);
+ var op = tf._op_def_lib._apply_op_helper("AssignAdd", name: name, keywords: dict);
return op.output;
}
@@ -1999,7 +1997,7 @@ namespace Tensorflow.Operations
var dict = new Dictionary();
dict["resource"] = resource;
dict["value"] = value;
- var op = _op_def_lib._apply_op_helper("AssignAddVariableOp", name: name, keywords: dict);
+ var op = tf._op_def_lib._apply_op_helper("AssignAddVariableOp", name: name, keywords: dict);
return op;
}
@@ -2035,7 +2033,7 @@ namespace Tensorflow.Operations
dict["value"] = value;
if (use_locking.HasValue)
dict["use_locking"] = use_locking.Value;
- var op = _op_def_lib._apply_op_helper("AssignSub", name: name, keywords: dict);
+ var op = tf._op_def_lib._apply_op_helper("AssignSub", name: name, keywords: dict);
return op.output;
}
@@ -2063,7 +2061,7 @@ namespace Tensorflow.Operations
var dict = new Dictionary();
dict["resource"] = resource;
dict["value"] = value;
- var op = _op_def_lib._apply_op_helper("AssignSubVariableOp", name: name, keywords: dict);
+ var op = tf._op_def_lib._apply_op_helper("AssignSubVariableOp", name: name, keywords: dict);
return op;
}
@@ -2091,7 +2089,7 @@ namespace Tensorflow.Operations
var dict = new Dictionary();
dict["resource"] = resource;
dict["value"] = value;
- var op = _op_def_lib._apply_op_helper("AssignVariableOp", name: name, keywords: dict);
+ var op = tf._op_def_lib._apply_op_helper("AssignVariableOp", name: name, keywords: dict);
return op;
}
@@ -2110,7 +2108,7 @@ namespace Tensorflow.Operations
{
var dict = new Dictionary();
dict["x"] = x;
- var op = _op_def_lib._apply_op_helper("Atan", name: name, keywords: dict);
+ var op = tf._op_def_lib._apply_op_helper("Atan", name: name, keywords: dict);
return op.output;
}
@@ -2139,7 +2137,7 @@ namespace Tensorflow.Operations
var dict = new Dictionary();
dict["y"] = y;
dict["x"] = x;
- var op = _op_def_lib._apply_op_helper("Atan2", name: name, keywords: dict);
+ var op = tf._op_def_lib._apply_op_helper("Atan2", name: name, keywords: dict);
return op.output;
}
@@ -2158,7 +2156,7 @@ namespace Tensorflow.Operations
{
var dict = new Dictionary();
dict["x"] = x;
- var op = _op_def_lib._apply_op_helper("Atanh", name: name, keywords: dict);
+ var op = tf._op_def_lib._apply_op_helper("Atanh", name: name, keywords: dict);
return op.output;
}
@@ -2223,7 +2221,7 @@ namespace Tensorflow.Operations
dict["stride"] = stride;
if (magnitude_squared.HasValue)
dict["magnitude_squared"] = magnitude_squared.Value;
- var op = _op_def_lib._apply_op_helper("AudioSpectrogram", name: name, keywords: dict);
+ var op = tf._op_def_lib._apply_op_helper("AudioSpectrogram", name: name, keywords: dict);
return op.output;
}
@@ -2271,7 +2269,7 @@ namespace Tensorflow.Operations
dict["sample_rate"] = sample_rate;
if (max_outputs.HasValue)
dict["max_outputs"] = max_outputs.Value;
- var op = _op_def_lib._apply_op_helper("AudioSummary", name: name, keywords: dict);
+ var op = tf._op_def_lib._apply_op_helper("AudioSummary", name: name, keywords: dict);
return op.output;
}
@@ -2318,7 +2316,7 @@ namespace Tensorflow.Operations
dict["sample_rate"] = sample_rate;
if (max_outputs.HasValue)
dict["max_outputs"] = max_outputs.Value;
- var op = _op_def_lib._apply_op_helper("AudioSummaryV2", name: name, keywords: dict);
+ var op = tf._op_def_lib._apply_op_helper("AudioSummaryV2", name: name, keywords: dict);
return op.output;
}
@@ -2367,7 +2365,7 @@ namespace Tensorflow.Operations
dict["padding"] = padding;
if (data_format != null)
dict["data_format"] = data_format;
- var op = _op_def_lib._apply_op_helper("AvgPool", name: name, keywords: dict);
+ var op = tf._op_def_lib._apply_op_helper("AvgPool", name: name, keywords: dict);
return op.output;
}
@@ -2414,7 +2412,7 @@ namespace Tensorflow.Operations
dict["padding"] = padding;
if (data_format != null)
dict["data_format"] = data_format;
- var op = _op_def_lib._apply_op_helper("AvgPool3D", name: name, keywords: dict);
+ var op = tf._op_def_lib._apply_op_helper("AvgPool3D", name: name, keywords: dict);
return op.output;
}
@@ -2465,7 +2463,7 @@ namespace Tensorflow.Operations
dict["padding"] = padding;
if (data_format != null)
dict["data_format"] = data_format;
- var op = _op_def_lib._apply_op_helper("AvgPool3DGrad", name: name, keywords: dict);
+ var op = tf._op_def_lib._apply_op_helper("AvgPool3DGrad", name: name, keywords: dict);
return op.output;
}
@@ -2515,7 +2513,7 @@ namespace Tensorflow.Operations
dict["padding"] = padding;
if (data_format != null)
dict["data_format"] = data_format;
- var op = _op_def_lib._apply_op_helper("AvgPoolGrad", name: name, keywords: dict);
+ var op = tf._op_def_lib._apply_op_helper("AvgPoolGrad", name: name, keywords: dict);
return op.output;
}
@@ -2572,7 +2570,7 @@ namespace Tensorflow.Operations
dict["container"] = container;
if (shared_name != null)
dict["shared_name"] = shared_name;
- var op = _op_def_lib._apply_op_helper("Barrier", name: name, keywords: dict);
+ var op = tf._op_def_lib._apply_op_helper("Barrier", name: name, keywords: dict);
return op.output;
}
@@ -2607,7 +2605,7 @@ namespace Tensorflow.Operations
dict["handle"] = handle;
if (cancel_pending_enqueues.HasValue)
dict["cancel_pending_enqueues"] = cancel_pending_enqueues.Value;
- var op = _op_def_lib._apply_op_helper("BarrierClose", name: name, keywords: dict);
+ var op = tf._op_def_lib._apply_op_helper("BarrierClose", name: name, keywords: dict);
return op;
}
@@ -2629,7 +2627,7 @@ namespace Tensorflow.Operations
{
var dict = new Dictionary();
dict["handle"] = handle;
- var op = _op_def_lib._apply_op_helper("BarrierIncompleteSize", name: name, keywords: dict);
+ var op = tf._op_def_lib._apply_op_helper("BarrierIncompleteSize", name: name, keywords: dict);
return op.output;
}
@@ -2669,7 +2667,7 @@ namespace Tensorflow.Operations
dict["keys"] = keys;
dict["values"] = values;
dict["component_index"] = component_index;
- var op = _op_def_lib._apply_op_helper("BarrierInsertMany", name: name, keywords: dict);
+ var op = tf._op_def_lib._apply_op_helper("BarrierInsertMany", name: name, keywords: dict);
return op;
}
@@ -2691,7 +2689,7 @@ namespace Tensorflow.Operations
{
var dict = new Dictionary();
dict["handle"] = handle;
- var op = _op_def_lib._apply_op_helper("BarrierReadySize", name: name, keywords: dict);
+ var op = tf._op_def_lib._apply_op_helper("BarrierReadySize", name: name, keywords: dict);
return op.output;
}
@@ -2754,7 +2752,7 @@ namespace Tensorflow.Operations
dict["wait_for_incomplete"] = wait_for_incomplete.Value;
if (timeout_ms.HasValue)
dict["timeout_ms"] = timeout_ms.Value;
- var op = _op_def_lib._apply_op_helper("BarrierTakeMany", name: name, keywords: dict);
+ var op = tf._op_def_lib._apply_op_helper("BarrierTakeMany", name: name, keywords: dict);
int _idx = 0;
var indices = op.outputs[_idx++];
var keys = op.outputs[_idx++];
@@ -2855,7 +2853,7 @@ namespace Tensorflow.Operations
dict["shared_name"] = shared_name;
if (batching_queue != null)
dict["batching_queue"] = batching_queue;
- var op = _op_def_lib._apply_op_helper("Batch", name: name, keywords: dict);
+ var op = tf._op_def_lib._apply_op_helper("Batch", name: name, keywords: dict);
int _idx = 0;
var batched_tensors = Enumerable.Range(0, op.OutputListLength("batched_tensors")).Select(_ => op.outputs[_idx++]).ToArray();
var batch_index = op.outputs[_idx++];
@@ -2891,7 +2889,7 @@ namespace Tensorflow.Operations
dict["batch_size"] = batch_size;
dict["output_types"] = output_types;
dict["output_shapes"] = output_shapes;
- var op = _op_def_lib._apply_op_helper("BatchDataset", name: name, keywords: dict);
+ var op = tf._op_def_lib._apply_op_helper("BatchDataset", name: name, keywords: dict);
return op.output;
}
@@ -2927,7 +2925,7 @@ namespace Tensorflow.Operations
dict["drop_remainder"] = drop_remainder;
dict["output_types"] = output_types;
dict["output_shapes"] = output_shapes;
- var op = _op_def_lib._apply_op_helper("BatchDatasetV2", name: name, keywords: dict);
+ var op = tf._op_def_lib._apply_op_helper("BatchDatasetV2", name: name, keywords: dict);
return op.output;
}
@@ -2982,7 +2980,7 @@ namespace Tensorflow.Operations
dict["adj_x"] = adj_x.Value;
if (adj_y.HasValue)
dict["adj_y"] = adj_y.Value;
- var op = _op_def_lib._apply_op_helper("BatchMatMul", name: name, keywords: dict);
+ var op = tf._op_def_lib._apply_op_helper("BatchMatMul", name: name, keywords: dict);
return op.output;
}
@@ -3039,7 +3037,7 @@ namespace Tensorflow.Operations
dict["gamma"] = gamma;
dict["variance_epsilon"] = variance_epsilon;
dict["scale_after_normalization"] = scale_after_normalization;
- var op = _op_def_lib._apply_op_helper("BatchNormWithGlobalNormalization", name: name, keywords: dict);
+ var op = tf._op_def_lib._apply_op_helper("BatchNormWithGlobalNormalization", name: name, keywords: dict);
return op.output;
}
@@ -3101,7 +3099,7 @@ namespace Tensorflow.Operations
dict["backprop"] = backprop;
dict["variance_epsilon"] = variance_epsilon;
dict["scale_after_normalization"] = scale_after_normalization;
- var op = _op_def_lib._apply_op_helper("BatchNormWithGlobalNormalizationGrad", name: name, keywords: dict);
+ var op = tf._op_def_lib._apply_op_helper("BatchNormWithGlobalNormalizationGrad", name: name, keywords: dict);
int _idx = 0;
var dx = op.outputs[_idx++];
var dm = op.outputs[_idx++];
@@ -3218,7 +3216,7 @@ namespace Tensorflow.Operations
dict["input"] = input;
dict["crops"] = crops;
dict["block_size"] = block_size;
- var op = _op_def_lib._apply_op_helper("BatchToSpace", name: name, keywords: dict);
+ var op = tf._op_def_lib._apply_op_helper("BatchToSpace", name: name, keywords: dict);
return op.output;
}
@@ -3363,7 +3361,7 @@ namespace Tensorflow.Operations
dict["input"] = input;
dict["block_shape"] = block_shape;
dict["crops"] = crops;
- var op = _op_def_lib._apply_op_helper("BatchToSpaceND", name: name, keywords: dict);
+ var op = tf._op_def_lib._apply_op_helper("BatchToSpaceND", name: name, keywords: dict);
return op.output;
}
@@ -3388,7 +3386,7 @@ namespace Tensorflow.Operations
{
var dict = new Dictionary();
dict["x"] = x;
- var op = _op_def_lib._apply_op_helper("BesselI0e", name: name, keywords: dict);
+ var op = tf._op_def_lib._apply_op_helper("BesselI0e", name: name, keywords: dict);
return op.output;
}
@@ -3413,7 +3411,7 @@ namespace Tensorflow.Operations
{
var dict = new Dictionary();
dict["x"] = x;
- var op = _op_def_lib._apply_op_helper("BesselI1e", name: name, keywords: dict);
+ var op = tf._op_def_lib._apply_op_helper("BesselI1e", name: name, keywords: dict);
return op.output;
}
@@ -3453,7 +3451,7 @@ namespace Tensorflow.Operations
dict["a"] = a;
dict["b"] = b;
dict["x"] = x;
- var op = _op_def_lib._apply_op_helper("Betainc", name: name, keywords: dict);
+ var op = tf._op_def_lib._apply_op_helper("Betainc", name: name, keywords: dict);
return op.output;
}
@@ -3493,7 +3491,7 @@ namespace Tensorflow.Operations
dict["bias"] = bias;
if (data_format != null)
dict["data_format"] = data_format;
- var op = _op_def_lib._apply_op_helper("BiasAdd", name: name, keywords: dict);
+ var op = tf._op_def_lib._apply_op_helper("BiasAdd", name: name, keywords: dict);
return op.output;
}
@@ -3530,7 +3528,7 @@ namespace Tensorflow.Operations
dict["out_backprop"] = out_backprop;
if (data_format != null)
dict["data_format"] = data_format;
- var op = _op_def_lib._apply_op_helper("BiasAddGrad", name: name, keywords: dict);
+ var op = tf._op_def_lib._apply_op_helper("BiasAddGrad", name: name, keywords: dict);
return op.output;
}
@@ -3561,7 +3559,7 @@ namespace Tensorflow.Operations
var dict = new Dictionary();
dict["value"] = value;
dict["bias"] = bias;
- var op = _op_def_lib._apply_op_helper("BiasAddV1", name: name, keywords: dict);
+ var op = tf._op_def_lib._apply_op_helper("BiasAddV1", name: name, keywords: dict);
return op.output;
}
@@ -3602,7 +3600,7 @@ namespace Tensorflow.Operations
dict["arr"] = arr;
dict["size"] = size;
dict["weights"] = weights;
- var op = _op_def_lib._apply_op_helper("Bincount", name: name, keywords: dict);
+ var op = tf._op_def_lib._apply_op_helper("Bincount", name: name, keywords: dict);
return op.output;
}
@@ -3639,7 +3637,7 @@ namespace Tensorflow.Operations
var dict = new Dictionary();
dict["input"] = input;
dict["type"] = type;
- var op = _op_def_lib._apply_op_helper("Bitcast", name: name, keywords: dict);
+ var op = tf._op_def_lib._apply_op_helper("Bitcast", name: name, keywords: dict);
return op.output;
}
@@ -3665,7 +3663,7 @@ namespace Tensorflow.Operations
var dict = new Dictionary();
dict["x"] = x;
dict["y"] = y;
- var op = _op_def_lib._apply_op_helper("BitwiseAnd", name: name, keywords: dict);
+ var op = tf._op_def_lib._apply_op_helper("BitwiseAnd", name: name, keywords: dict);
return op.output;
}
@@ -3691,7 +3689,7 @@ namespace Tensorflow.Operations
var dict = new Dictionary();
dict["x"] = x;
dict["y"] = y;
- var op = _op_def_lib._apply_op_helper("BitwiseOr", name: name, keywords: dict);
+ var op = tf._op_def_lib._apply_op_helper("BitwiseOr", name: name, keywords: dict);
return op.output;
}
@@ -3717,7 +3715,7 @@ namespace Tensorflow.Operations
var dict = new Dictionary();
dict["x"] = x;
dict["y"] = y;
- var op = _op_def_lib._apply_op_helper("BitwiseXor", name: name, keywords: dict);
+ var op = tf._op_def_lib._apply_op_helper("BitwiseXor", name: name, keywords: dict);
return op.output;
}
@@ -3778,7 +3776,7 @@ namespace Tensorflow.Operations
dict["tree_complexity"] = tree_complexity;
dict["min_node_weight"] = min_node_weight;
dict["max_splits"] = max_splits;
- var op = _op_def_lib._apply_op_helper("BoostedTreesCalculateBestGainsPerFeature", name: name, keywords: dict);
+ var op = tf._op_def_lib._apply_op_helper("BoostedTreesCalculateBestGainsPerFeature", name: name, keywords: dict);
int _idx = 0;
var node_ids_list = Enumerable.Range(0, op.OutputListLength("node_ids_list")).Select(_ => op.outputs[_idx++]).ToArray();
var gains_list = Enumerable.Range(0, op.OutputListLength("gains_list")).Select(_ => op.outputs[_idx++]).ToArray();
@@ -3821,7 +3819,7 @@ namespace Tensorflow.Operations
dict["mean_hessians"] = mean_hessians;
dict["l1"] = l1;
dict["l2"] = l2;
- var op = _op_def_lib._apply_op_helper("BoostedTreesCenterBias", name: name, keywords: dict);
+ var op = tf._op_def_lib._apply_op_helper("BoostedTreesCenterBias", name: name, keywords: dict);
return op.output;
}
@@ -3849,7 +3847,7 @@ namespace Tensorflow.Operations
dict["tree_ensemble_handle"] = tree_ensemble_handle;
dict["stamp_token"] = stamp_token;
dict["tree_ensemble_serialized"] = tree_ensemble_serialized;
- var op = _op_def_lib._apply_op_helper("BoostedTreesCreateEnsemble", name: name, keywords: dict);
+ var op = tf._op_def_lib._apply_op_helper("BoostedTreesCreateEnsemble", name: name, keywords: dict);
return op;
}
@@ -3880,7 +3878,7 @@ namespace Tensorflow.Operations
dict["tree_ensemble_handle"] = tree_ensemble_handle;
dict["stamp_token"] = stamp_token;
dict["tree_ensemble_serialized"] = tree_ensemble_serialized;
- var op = _op_def_lib._apply_op_helper("BoostedTreesDeserializeEnsemble", name: name, keywords: dict);
+ var op = tf._op_def_lib._apply_op_helper("BoostedTreesDeserializeEnsemble", name: name, keywords: dict);
return op;
}
@@ -3904,7 +3902,7 @@ namespace Tensorflow.Operations
dict["container"] = container;
if (shared_name != null)
dict["shared_name"] = shared_name;
- var op = _op_def_lib._apply_op_helper("BoostedTreesEnsembleResourceHandleOp", name: name, keywords: dict);
+ var op = tf._op_def_lib._apply_op_helper("BoostedTreesEnsembleResourceHandleOp", name: name, keywords: dict);
return op.output;
}
@@ -3940,7 +3938,7 @@ namespace Tensorflow.Operations
dict["tree_ensemble_handle"] = tree_ensemble_handle;
dict["bucketized_features"] = bucketized_features;
dict["logits_dimension"] = logits_dimension;
- var op = _op_def_lib._apply_op_helper("BoostedTreesExampleDebugOutputs", name: name, keywords: dict);
+ var op = tf._op_def_lib._apply_op_helper("BoostedTreesExampleDebugOutputs", name: name, keywords: dict);
return op.output;
}
@@ -3967,7 +3965,7 @@ namespace Tensorflow.Operations
{
var dict = new Dictionary();
dict["tree_ensemble_handle"] = tree_ensemble_handle;
- var op = _op_def_lib._apply_op_helper("BoostedTreesGetEnsembleStates", name: name, keywords: dict);
+ var op = tf._op_def_lib._apply_op_helper("BoostedTreesGetEnsembleStates", name: name, keywords: dict);
int _idx = 0;
var stamp_token = op.outputs[_idx++];
var num_trees = op.outputs[_idx++];
@@ -4019,7 +4017,7 @@ namespace Tensorflow.Operations
dict["bucketized_features_list"] = bucketized_features_list;
dict["max_splits"] = max_splits;
dict["num_buckets"] = num_buckets;
- var op = _op_def_lib._apply_op_helper("BoostedTreesMakeStatsSummary", name: name, keywords: dict);
+ var op = tf._op_def_lib._apply_op_helper("BoostedTreesMakeStatsSummary", name: name, keywords: dict);
return op.output;
}
@@ -4054,7 +4052,7 @@ namespace Tensorflow.Operations
dict["tree_ensemble_handle"] = tree_ensemble_handle;
dict["bucketized_features"] = bucketized_features;
dict["logits_dimension"] = logits_dimension;
- var op = _op_def_lib._apply_op_helper("BoostedTreesPredict", name: name, keywords: dict);
+ var op = tf._op_def_lib._apply_op_helper("BoostedTreesPredict", name: name, keywords: dict);
return op.output;
}
@@ -4077,7 +4075,7 @@ namespace Tensorflow.Operations
{
var dict = new Dictionary();
dict["tree_ensemble_handle"] = tree_ensemble_handle;
- var op = _op_def_lib._apply_op_helper("BoostedTreesSerializeEnsemble", name: name, keywords: dict);
+ var op = tf._op_def_lib._apply_op_helper("BoostedTreesSerializeEnsemble", name: name, keywords: dict);
int _idx = 0;
var stamp_token = op.outputs[_idx++];
var tree_ensemble_serialized = op.outputs[_idx++];
@@ -4130,7 +4128,7 @@ namespace Tensorflow.Operations
dict["cached_node_ids"] = cached_node_ids;
dict["bucketized_features"] = bucketized_features;
dict["logits_dimension"] = logits_dimension;
- var op = _op_def_lib._apply_op_helper("BoostedTreesTrainingPredict", name: name, keywords: dict);
+ var op = tf._op_def_lib._apply_op_helper("BoostedTreesTrainingPredict", name: name, keywords: dict);
int _idx = 0;
var partial_logits = op.outputs[_idx++];
var tree_ids = op.outputs[_idx++];
@@ -4202,7 +4200,7 @@ namespace Tensorflow.Operations
dict["max_depth"] = max_depth;
dict["learning_rate"] = learning_rate;
dict["pruning_mode"] = pruning_mode;
- var op = _op_def_lib._apply_op_helper("BoostedTreesUpdateEnsemble", name: name, keywords: dict);
+ var op = tf._op_def_lib._apply_op_helper("BoostedTreesUpdateEnsemble", name: name, keywords: dict);
return op;
}
@@ -4228,7 +4226,7 @@ namespace Tensorflow.Operations
var dict = new Dictionary();
dict["s0"] = s0;
dict["s1"] = s1;
- var op = _op_def_lib._apply_op_helper("BroadcastArgs", name: name, keywords: dict);
+ var op = tf._op_def_lib._apply_op_helper("BroadcastArgs", name: name, keywords: dict);
return op.output;
}
@@ -4256,7 +4254,7 @@ namespace Tensorflow.Operations
var dict = new Dictionary();
dict["s0"] = s0;
dict["s1"] = s1;
- var op = _op_def_lib._apply_op_helper("BroadcastGradientArgs", name: name, keywords: dict);
+ var op = tf._op_def_lib._apply_op_helper("BroadcastGradientArgs", name: name, keywords: dict);
int _idx = 0;
var r0 = op.outputs[_idx++];
var r1 = op.outputs[_idx++];
@@ -4303,7 +4301,7 @@ namespace Tensorflow.Operations
var dict = new Dictionary();
dict["input"] = input;
dict["shape"] = shape;
- var op = _op_def_lib._apply_op_helper("BroadcastTo", name: name, keywords: dict);
+ var op = tf._op_def_lib._apply_op_helper("BroadcastTo", name: name, keywords: dict);
return op.output;
}
@@ -4345,7 +4343,7 @@ namespace Tensorflow.Operations
var dict = new Dictionary();
dict["input"] = input;
dict["boundaries"] = boundaries;
- var op = _op_def_lib._apply_op_helper("Bucketize", name: name, keywords: dict);
+ var op = tf._op_def_lib._apply_op_helper("Bucketize", name: name, keywords: dict);
return op.output;
}
@@ -4375,7 +4373,7 @@ namespace Tensorflow.Operations
dict["tag"] = tag;
dict["output_types"] = output_types;
dict["output_shapes"] = output_shapes;
- var op = _op_def_lib._apply_op_helper("BytesProducedStatsDataset", name: name, keywords: dict);
+ var op = tf._op_def_lib._apply_op_helper("BytesProducedStatsDataset", name: name, keywords: dict);
return op.output;
}
@@ -4433,7 +4431,7 @@ namespace Tensorflow.Operations
dict["top_paths"] = top_paths;
if (merge_repeated.HasValue)
dict["merge_repeated"] = merge_repeated.Value;
- var op = _op_def_lib._apply_op_helper("CTCBeamSearchDecoder", name: name, keywords: dict);
+ var op = tf._op_def_lib._apply_op_helper("CTCBeamSearchDecoder", name: name, keywords: dict);
int _idx = 0;
var decoded_indices = Enumerable.Range(0, op.OutputListLength("decoded_indices")).Select(_ => op.outputs[_idx++]).ToArray();
var decoded_values = Enumerable.Range(0, op.OutputListLength("decoded_values")).Select(_ => op.outputs[_idx++]).ToArray();
@@ -4487,7 +4485,7 @@ namespace Tensorflow.Operations
dict["sequence_length"] = sequence_length;
if (merge_repeated.HasValue)
dict["merge_repeated"] = merge_repeated.Value;
- var op = _op_def_lib._apply_op_helper("CTCGreedyDecoder", name: name, keywords: dict);
+ var op = tf._op_def_lib._apply_op_helper("CTCGreedyDecoder", name: name, keywords: dict);
int _idx = 0;
var decoded_indices = op.outputs[_idx++];
var decoded_values = op.outputs[_idx++];
@@ -4554,7 +4552,7 @@ namespace Tensorflow.Operations
dict["ctc_merge_repeated"] = ctc_merge_repeated.Value;
if (ignore_longer_outputs_than_inputs.HasValue)
dict["ignore_longer_outputs_than_inputs"] = ignore_longer_outputs_than_inputs.Value;
- var op = _op_def_lib._apply_op_helper("CTCLoss", name: name, keywords: dict);
+ var op = tf._op_def_lib._apply_op_helper("CTCLoss", name: name, keywords: dict);
int _idx = 0;
var loss = op.outputs[_idx++];
var gradient = op.outputs[_idx++];
@@ -4595,7 +4593,7 @@ namespace Tensorflow.Operations
dict["filename"] = filename;
dict["output_types"] = output_types;
dict["output_shapes"] = output_shapes;
- var op = _op_def_lib._apply_op_helper("CacheDataset", name: name, keywords: dict);
+ var op = tf._op_def_lib._apply_op_helper("CacheDataset", name: name, keywords: dict);
return op.output;
}
@@ -4622,7 +4620,7 @@ namespace Tensorflow.Operations
dict["DstT"] = DstT;
if (Truncate.HasValue)
dict["Truncate"] = Truncate.Value;
- var op = _op_def_lib._apply_op_helper("Cast", name: name, keywords: dict);
+ var op = tf._op_def_lib._apply_op_helper("Cast", name: name, keywords: dict);
return op.output;
}
@@ -4641,7 +4639,7 @@ namespace Tensorflow.Operations
{
var dict = new Dictionary();
dict["x"] = x;
- var op = _op_def_lib._apply_op_helper("Ceil", name: name, keywords: dict);
+ var op = tf._op_def_lib._apply_op_helper("Ceil", name: name, keywords: dict);
return op.output;
}
@@ -4669,7 +4667,7 @@ namespace Tensorflow.Operations
var dict = new Dictionary();
dict["tensor"] = tensor;
dict["message"] = message;
- var op = _op_def_lib._apply_op_helper("CheckNumerics", name: name, keywords: dict);
+ var op = tf._op_def_lib._apply_op_helper("CheckNumerics", name: name, keywords: dict);
return op.output;
}
@@ -4705,7 +4703,7 @@ namespace Tensorflow.Operations
{
var dict = new Dictionary();
dict["input"] = input;
- var op = _op_def_lib._apply_op_helper("Cholesky", name: name, keywords: dict);
+ var op = tf._op_def_lib._apply_op_helper("Cholesky", name: name, keywords: dict);
return op.output;
}
@@ -4738,7 +4736,7 @@ namespace Tensorflow.Operations
var dict = new Dictionary();
dict["l"] = l;
dict["grad"] = grad;
- var op = _op_def_lib._apply_op_helper("CholeskyGrad", name: name, keywords: dict);
+ var op = tf._op_def_lib._apply_op_helper("CholeskyGrad", name: name, keywords: dict);
return op.output;
}
@@ -4775,7 +4773,7 @@ namespace Tensorflow.Operations
dict["t"] = t;
dict["clip_value_min"] = clip_value_min;
dict["clip_value_max"] = clip_value_max;
- var op = _op_def_lib._apply_op_helper("ClipByValue", name: name, keywords: dict);
+ var op = tf._op_def_lib._apply_op_helper("ClipByValue", name: name, keywords: dict);
return op.output;
}
@@ -4811,7 +4809,7 @@ namespace Tensorflow.Operations
dict["group_key"] = group_key;
dict["instance_key"] = instance_key;
dict["shape"] = shape;
- var op = _op_def_lib._apply_op_helper("CollectiveBcastRecv", name: name, keywords: dict);
+ var op = tf._op_def_lib._apply_op_helper("CollectiveBcastRecv", name: name, keywords: dict);
return op.output;
}
@@ -4846,7 +4844,7 @@ namespace Tensorflow.Operations
dict["group_key"] = group_key;
dict["instance_key"] = instance_key;
dict["shape"] = shape;
- var op = _op_def_lib._apply_op_helper("CollectiveBcastSend", name: name, keywords: dict);
+ var op = tf._op_def_lib._apply_op_helper("CollectiveBcastSend", name: name, keywords: dict);
return op.output;
}
@@ -4889,7 +4887,7 @@ namespace Tensorflow.Operations
dict["merge_op"] = merge_op;
dict["final_op"] = final_op;
dict["subdiv_offsets"] = subdiv_offsets;
- var op = _op_def_lib._apply_op_helper("CollectiveReduce", name: name, keywords: dict);
+ var op = tf._op_def_lib._apply_op_helper("CollectiveReduce", name: name, keywords: dict);
return op.output;
}
@@ -4939,7 +4937,7 @@ namespace Tensorflow.Operations
var dict = new Dictionary();
dict["input"] = input;
dict["threshold"] = threshold;
- var op = _op_def_lib._apply_op_helper("CompareAndBitpack", name: name, keywords: dict);
+ var op = tf._op_def_lib._apply_op_helper("CompareAndBitpack", name: name, keywords: dict);
return op.output;
}
@@ -4981,7 +4979,7 @@ namespace Tensorflow.Operations
dict["imag"] = imag;
if (Tout.HasValue)
dict["Tout"] = Tout.Value;
- var op = _op_def_lib._apply_op_helper("Complex", name: name, keywords: dict);
+ var op = tf._op_def_lib._apply_op_helper("Complex", name: name, keywords: dict);
return op.output;
}
@@ -5010,7 +5008,7 @@ namespace Tensorflow.Operations
dict["x"] = x;
if (Tout.HasValue)
dict["Tout"] = Tout.Value;
- var op = _op_def_lib._apply_op_helper("ComplexAbs", name: name, keywords: dict);
+ var op = tf._op_def_lib._apply_op_helper("ComplexAbs", name: name, keywords: dict);
return op.output;
}
@@ -5063,7 +5061,7 @@ namespace Tensorflow.Operations
dict["seed"] = seed.Value;
if (seed2.HasValue)
dict["seed2"] = seed2.Value;
- var op = _op_def_lib._apply_op_helper("ComputeAccidentalHits", name: name, keywords: dict);
+ var op = tf._op_def_lib._apply_op_helper("ComputeAccidentalHits", name: name, keywords: dict);
int _idx = 0;
var indices = op.outputs[_idx++];
var ids = op.outputs[_idx++];
@@ -5096,7 +5094,7 @@ namespace Tensorflow.Operations
var dict = new Dictionary();
dict["concat_dim"] = concat_dim;
dict["values"] = values;
- var op = _op_def_lib._apply_op_helper("Concat", name: name, keywords: dict);
+ var op = tf._op_def_lib._apply_op_helper("Concat", name: name, keywords: dict);
return op.output;
}
@@ -5134,7 +5132,7 @@ namespace Tensorflow.Operations
var dict = new Dictionary();
dict["concat_dim"] = concat_dim;
dict["shape"] = shape;
- var op = _op_def_lib._apply_op_helper("ConcatOffset", name: name, keywords: dict);
+ var op = tf._op_def_lib._apply_op_helper("ConcatOffset", name: name, keywords: dict);
int _idx = 0;
var offset = Enumerable.Range(0, op.OutputListLength("offset")).Select(_ => op.outputs[_idx++]).ToArray();
return (offset);
@@ -5165,7 +5163,7 @@ namespace Tensorflow.Operations
var dict = new Dictionary();
dict["values"] = values;
dict["axis"] = axis;
- var op = _op_def_lib._apply_op_helper("ConcatV2", name: name, keywords: dict);
+ var op = tf._op_def_lib._apply_op_helper("ConcatV2", name: name, keywords: dict);
return op.output;
}
@@ -5195,7 +5193,7 @@ namespace Tensorflow.Operations
dict["another_dataset"] = another_dataset;
dict["output_types"] = output_types;
dict["output_shapes"] = output_shapes;
- var op = _op_def_lib._apply_op_helper("ConcatenateDataset", name: name, keywords: dict);
+ var op = tf._op_def_lib._apply_op_helper("ConcatenateDataset", name: name, keywords: dict);
return op.output;
}
@@ -5242,7 +5240,7 @@ namespace Tensorflow.Operations
dict["container"] = container;
if (shared_name != null)
dict["shared_name"] = shared_name;
- var op = _op_def_lib._apply_op_helper("ConditionalAccumulator", name: name, keywords: dict);
+ var op = tf._op_def_lib._apply_op_helper("ConditionalAccumulator", name: name, keywords: dict);
return op.output;
}
@@ -5279,7 +5277,7 @@ namespace Tensorflow.Operations
dict["tpu_embedding_config"] = tpu_embedding_config;
if (is_global_init.HasValue)
dict["is_global_init"] = is_global_init.Value;
- var op = _op_def_lib._apply_op_helper("ConfigureDistributedTPU", name: name, keywords: dict);
+ var op = tf._op_def_lib._apply_op_helper("ConfigureDistributedTPU", name: name, keywords: dict);
return op.output;
}
@@ -5313,7 +5311,7 @@ namespace Tensorflow.Operations
{
var dict = new Dictionary();
dict["input"] = input;
- var op = _op_def_lib._apply_op_helper("Conj", name: name, keywords: dict);
+ var op = tf._op_def_lib._apply_op_helper("Conj", name: name, keywords: dict);
return op.output;
}
@@ -5340,7 +5338,7 @@ namespace Tensorflow.Operations
var dict = new Dictionary();
dict["x"] = x;
dict["perm"] = perm;
- var op = _op_def_lib._apply_op_helper("ConjugateTranspose", name: name, keywords: dict);
+ var op = tf._op_def_lib._apply_op_helper("ConjugateTranspose", name: name, keywords: dict);
return op.output;
}
@@ -5365,7 +5363,7 @@ namespace Tensorflow.Operations
var dict = new Dictionary();
dict["value"] = value;
dict["dtype"] = dtype;
- var op = _op_def_lib._apply_op_helper("Const", name: name, keywords: dict);
+ var op = tf._op_def_lib._apply_op_helper("Const", name: name, keywords: dict);
return op.output;
}
@@ -5394,7 +5392,7 @@ namespace Tensorflow.Operations
{
var dict = new Dictionary();
dict["mutex_lock"] = mutex_lock;
- var op = _op_def_lib._apply_op_helper("ConsumeMutexLock", name: name, keywords: dict);
+ var op = tf._op_def_lib._apply_op_helper("ConsumeMutexLock", name: name, keywords: dict);
return op;
}
@@ -5413,7 +5411,7 @@ namespace Tensorflow.Operations
public static Operation control_trigger (string name = "ControlTrigger")
{
var dict = new Dictionary();
- var op = _op_def_lib._apply_op_helper("ControlTrigger", name: name, keywords: dict);
+ var op = tf._op_def_lib._apply_op_helper("ControlTrigger", name: name, keywords: dict);
return op;
}
@@ -5498,7 +5496,7 @@ namespace Tensorflow.Operations
dict["data_format"] = data_format;
if (dilations != null)
dict["dilations"] = dilations;
- var op = _op_def_lib._apply_op_helper("Conv2D", name: name, keywords: dict);
+ var op = tf._op_def_lib._apply_op_helper("Conv2D", name: name, keywords: dict);
return op.output;
}
@@ -5566,7 +5564,7 @@ namespace Tensorflow.Operations
dict["data_format"] = data_format;
if (dilations != null)
dict["dilations"] = dilations;
- var op = _op_def_lib._apply_op_helper("Conv2DBackpropFilter", name: name, keywords: dict);
+ var op = tf._op_def_lib._apply_op_helper("Conv2DBackpropFilter", name: name, keywords: dict);
return op.output;
}
@@ -5633,7 +5631,7 @@ namespace Tensorflow.Operations
dict["data_format"] = data_format;
if (dilations != null)
dict["dilations"] = dilations;
- var op = _op_def_lib._apply_op_helper("Conv2DBackpropInput", name: name, keywords: dict);
+ var op = tf._op_def_lib._apply_op_helper("Conv2DBackpropInput", name: name, keywords: dict);
return op.output;
}
@@ -5694,7 +5692,7 @@ namespace Tensorflow.Operations
dict["data_format"] = data_format;
if (dilations != null)
dict["dilations"] = dilations;
- var op = _op_def_lib._apply_op_helper("Conv3D", name: name, keywords: dict);
+ var op = tf._op_def_lib._apply_op_helper("Conv3D", name: name, keywords: dict);
return op.output;
}
@@ -5739,7 +5737,7 @@ namespace Tensorflow.Operations
dict["padding"] = padding;
if (dilations != null)
dict["dilations"] = dilations;
- var op = _op_def_lib._apply_op_helper("Conv3DBackpropFilter", name: name, keywords: dict);
+ var op = tf._op_def_lib._apply_op_helper("Conv3DBackpropFilter", name: name, keywords: dict);
return op.output;
}
@@ -5800,7 +5798,7 @@ namespace Tensorflow.Operations
dict["data_format"] = data_format;
if (dilations != null)
dict["dilations"] = dilations;
- var op = _op_def_lib._apply_op_helper("Conv3DBackpropFilterV2", name: name, keywords: dict);
+ var op = tf._op_def_lib._apply_op_helper("Conv3DBackpropFilterV2", name: name, keywords: dict);
return op.output;
}
@@ -5845,7 +5843,7 @@ namespace Tensorflow.Operations
dict["padding"] = padding;
if (dilations != null)
dict["dilations"] = dilations;
- var op = _op_def_lib._apply_op_helper("Conv3DBackpropInput", name: name, keywords: dict);
+ var op = tf._op_def_lib._apply_op_helper("Conv3DBackpropInput", name: name, keywords: dict);
return op.output;
}
@@ -5906,7 +5904,7 @@ namespace Tensorflow.Operations
dict["data_format"] = data_format;
if (dilations != null)
dict["dilations"] = dilations;
- var op = _op_def_lib._apply_op_helper("Conv3DBackpropInputV2", name: name, keywords: dict);
+ var op = tf._op_def_lib._apply_op_helper("Conv3DBackpropInputV2", name: name, keywords: dict);
return op.output;
}
@@ -5951,7 +5949,7 @@ namespace Tensorflow.Operations
dict["tensor_name"] = tensor_name;
if (debug_ops_spec != null)
dict["debug_ops_spec"] = debug_ops_spec;
- var op = _op_def_lib._apply_op_helper("Copy", name: name, keywords: dict);
+ var op = tf._op_def_lib._apply_op_helper("Copy", name: name, keywords: dict);
return op.output;
}
@@ -5994,7 +5992,7 @@ namespace Tensorflow.Operations
dict["tensor_name"] = tensor_name;
if (debug_ops_spec != null)
dict["debug_ops_spec"] = debug_ops_spec;
- var op = _op_def_lib._apply_op_helper("CopyHost", name: name, keywords: dict);
+ var op = tf._op_def_lib._apply_op_helper("CopyHost", name: name, keywords: dict);
return op.output;
}
@@ -6013,7 +6011,7 @@ namespace Tensorflow.Operations
{
var dict = new Dictionary();
dict["x"] = x;
- var op = _op_def_lib._apply_op_helper("Cos", name: name, keywords: dict);
+ var op = tf._op_def_lib._apply_op_helper("Cos", name: name, keywords: dict);
return op.output;
}
@@ -6032,7 +6030,7 @@ namespace Tensorflow.Operations
{
var dict = new Dictionary();
dict["x"] = x;
- var op = _op_def_lib._apply_op_helper("Cosh", name: name, keywords: dict);
+ var op = tf._op_def_lib._apply_op_helper("Cosh", name: name, keywords: dict);
return op.output;
}
@@ -6060,7 +6058,7 @@ namespace Tensorflow.Operations
var dict = new Dictionary();
dict["ref"] = referecne;
dict["limit"] = limit;
- var op = _op_def_lib._apply_op_helper("CountUpTo", name: name, keywords: dict);
+ var op = tf._op_def_lib._apply_op_helper("CountUpTo", name: name, keywords: dict);
return op.output;
}
@@ -6136,7 +6134,7 @@ namespace Tensorflow.Operations
dict["method"] = method;
if (extrapolation_value.HasValue)
dict["extrapolation_value"] = extrapolation_value.Value;
- var op = _op_def_lib._apply_op_helper("CropAndResize", name: name, keywords: dict);
+ var op = tf._op_def_lib._apply_op_helper("CropAndResize", name: name, keywords: dict);
return op.output;
}
@@ -6186,7 +6184,7 @@ namespace Tensorflow.Operations
dict["box_ind"] = box_ind;
if (method != null)
dict["method"] = method;
- var op = _op_def_lib._apply_op_helper("CropAndResizeGradBoxes", name: name, keywords: dict);
+ var op = tf._op_def_lib._apply_op_helper("CropAndResizeGradBoxes", name: name, keywords: dict);
return op.output;
}
@@ -6241,7 +6239,7 @@ namespace Tensorflow.Operations
dict["T"] = T;
if (method != null)
dict["method"] = method;
- var op = _op_def_lib._apply_op_helper("CropAndResizeGradImage", name: name, keywords: dict);
+ var op = tf._op_def_lib._apply_op_helper("CropAndResizeGradImage", name: name, keywords: dict);
return op.output;
}
@@ -6271,7 +6269,7 @@ namespace Tensorflow.Operations
var dict = new Dictionary();
dict["a"] = a;
dict["b"] = b;
- var op = _op_def_lib._apply_op_helper("Cross", name: name, keywords: dict);
+ var op = tf._op_def_lib._apply_op_helper("Cross", name: name, keywords: dict);
return op.output;
}
@@ -6308,7 +6306,7 @@ namespace Tensorflow.Operations
var dict = new Dictionary();
dict["input"] = input;
dict["group_assignment"] = group_assignment;
- var op = _op_def_lib._apply_op_helper("CrossReplicaSum", name: name, keywords: dict);
+ var op = tf._op_def_lib._apply_op_helper("CrossReplicaSum", name: name, keywords: dict);
return op.output;
}
@@ -6401,7 +6399,7 @@ namespace Tensorflow.Operations
dict["seed2"] = seed2.Value;
if (is_training.HasValue)
dict["is_training"] = is_training.Value;
- var op = _op_def_lib._apply_op_helper("CudnnRNN", name: name, keywords: dict);
+ var op = tf._op_def_lib._apply_op_helper("CudnnRNN", name: name, keywords: dict);
int _idx = 0;
var output = op.outputs[_idx++];
var output_h = op.outputs[_idx++];
@@ -6525,7 +6523,7 @@ namespace Tensorflow.Operations
dict["seed"] = seed.Value;
if (seed2.HasValue)
dict["seed2"] = seed2.Value;
- var op = _op_def_lib._apply_op_helper("CudnnRNNBackprop", name: name, keywords: dict);
+ var op = tf._op_def_lib._apply_op_helper("CudnnRNNBackprop", name: name, keywords: dict);
int _idx = 0;
var input_backprop = op.outputs[_idx++];
var input_h_backprop = op.outputs[_idx++];
@@ -6655,7 +6653,7 @@ namespace Tensorflow.Operations
dict["seed"] = seed.Value;
if (seed2.HasValue)
dict["seed2"] = seed2.Value;
- var op = _op_def_lib._apply_op_helper("CudnnRNNBackpropV2", name: name, keywords: dict);
+ var op = tf._op_def_lib._apply_op_helper("CudnnRNNBackpropV2", name: name, keywords: dict);
int _idx = 0;
var input_backprop = op.outputs[_idx++];
var input_h_backprop = op.outputs[_idx++];
@@ -6746,7 +6744,7 @@ namespace Tensorflow.Operations
dict["seed"] = seed.Value;
if (seed2.HasValue)
dict["seed2"] = seed2.Value;
- var op = _op_def_lib._apply_op_helper("CudnnRNNCanonicalToParams", name: name, keywords: dict);
+ var op = tf._op_def_lib._apply_op_helper("CudnnRNNCanonicalToParams", name: name, keywords: dict);
return op.output;
}
@@ -6826,7 +6824,7 @@ namespace Tensorflow.Operations
dict["seed"] = seed.Value;
if (seed2.HasValue)
dict["seed2"] = seed2.Value;
- var op = _op_def_lib._apply_op_helper("CudnnRNNParamsSize", name: name, keywords: dict);
+ var op = tf._op_def_lib._apply_op_helper("CudnnRNNParamsSize", name: name, keywords: dict);
return op.output;
}
@@ -6916,7 +6914,7 @@ namespace Tensorflow.Operations
dict["seed"] = seed.Value;
if (seed2.HasValue)
dict["seed2"] = seed2.Value;
- var op = _op_def_lib._apply_op_helper("CudnnRNNParamsToCanonical", name: name, keywords: dict);
+ var op = tf._op_def_lib._apply_op_helper("CudnnRNNParamsToCanonical", name: name, keywords: dict);
int _idx = 0;
var weights = Enumerable.Range(0, op.OutputListLength("weights")).Select(_ => op.outputs[_idx++]).ToArray();
var biases = Enumerable.Range(0, op.OutputListLength("biases")).Select(_ => op.outputs[_idx++]).ToArray();
@@ -7016,7 +7014,7 @@ namespace Tensorflow.Operations
dict["seed2"] = seed2.Value;
if (is_training.HasValue)
dict["is_training"] = is_training.Value;
- var op = _op_def_lib._apply_op_helper("CudnnRNNV2", name: name, keywords: dict);
+ var op = tf._op_def_lib._apply_op_helper("CudnnRNNV2", name: name, keywords: dict);
int _idx = 0;
var output = op.outputs[_idx++];
var output_h = op.outputs[_idx++];
@@ -7089,7 +7087,7 @@ namespace Tensorflow.Operations
dict["exclusive"] = exclusive.Value;
if (reverse.HasValue)
dict["reverse"] = reverse.Value;
- var op = _op_def_lib._apply_op_helper("Cumprod", name: name, keywords: dict);
+ var op = tf._op_def_lib._apply_op_helper("Cumprod", name: name, keywords: dict);
return op.output;
}
@@ -7156,7 +7154,7 @@ namespace Tensorflow.Operations
dict["exclusive"] = exclusive.Value;
if (reverse.HasValue)
dict["reverse"] = reverse.Value;
- var op = _op_def_lib._apply_op_helper("Cumsum", name: name, keywords: dict);
+ var op = tf._op_def_lib._apply_op_helper("Cumsum", name: name, keywords: dict);
return op.output;
}
@@ -7191,7 +7189,7 @@ namespace Tensorflow.Operations
dict["src_format"] = src_format;
if (dst_format != null)
dict["dst_format"] = dst_format;
- var op = _op_def_lib._apply_op_helper("DataFormatDimMap", name: name, keywords: dict);
+ var op = tf._op_def_lib._apply_op_helper("DataFormatDimMap", name: name, keywords: dict);
return op.output;
}
@@ -7225,7 +7223,7 @@ namespace Tensorflow.Operations
dict["src_format"] = src_format;
if (dst_format != null)
dict["dst_format"] = dst_format;
- var op = _op_def_lib._apply_op_helper("DataFormatVecPermute", name: name, keywords: dict);
+ var op = tf._op_def_lib._apply_op_helper("DataFormatVecPermute", name: name, keywords: dict);
return op.output;
}
@@ -7249,7 +7247,7 @@ namespace Tensorflow.Operations
{
var dict = new Dictionary();
dict["input_dataset"] = input_dataset;
- var op = _op_def_lib._apply_op_helper("DatasetToGraph", name: name, keywords: dict);
+ var op = tf._op_def_lib._apply_op_helper("DatasetToGraph", name: name, keywords: dict);
return op.output;
}
@@ -7278,7 +7276,7 @@ namespace Tensorflow.Operations
dict["dataset"] = dataset;
dict["output_types"] = output_types;
dict["output_shapes"] = output_shapes;
- var op = _op_def_lib._apply_op_helper("DatasetToSingleElement", name: name, keywords: dict);
+ var op = tf._op_def_lib._apply_op_helper("DatasetToSingleElement", name: name, keywords: dict);
int _idx = 0;
var components = Enumerable.Range(0, op.OutputListLength("components")).Select(_ => op.outputs[_idx++]).ToArray();
return (components);
@@ -7309,7 +7307,7 @@ namespace Tensorflow.Operations
dict["input_dataset"] = input_dataset;
dict["filename"] = filename;
dict["compression_type"] = compression_type;
- var op = _op_def_lib._apply_op_helper("DatasetToTFRecord", name: name, keywords: dict);
+ var op = tf._op_def_lib._apply_op_helper("DatasetToTFRecord", name: name, keywords: dict);
return op;
}
@@ -7333,7 +7331,7 @@ namespace Tensorflow.Operations
{
var dict = new Dictionary();
dict["input"] = input;
- var op = _op_def_lib._apply_op_helper("DebugGradientIdentity", name: name, keywords: dict);
+ var op = tf._op_def_lib._apply_op_helper("DebugGradientIdentity", name: name, keywords: dict);
return op.output;
}
@@ -7357,7 +7355,7 @@ namespace Tensorflow.Operations
{
var dict = new Dictionary();
dict["input"] = input;
- var op = _op_def_lib._apply_op_helper("DebugGradientRefIdentity", name: name, keywords: dict);
+ var op = tf._op_def_lib._apply_op_helper("DebugGradientRefIdentity", name: name, keywords: dict);
return op.output;
}
@@ -7406,7 +7404,7 @@ namespace Tensorflow.Operations
dict["debug_urls"] = debug_urls;
if (gated_grpc.HasValue)
dict["gated_grpc"] = gated_grpc.Value;
- var op = _op_def_lib._apply_op_helper("DebugIdentity", name: name, keywords: dict);
+ var op = tf._op_def_lib._apply_op_helper("DebugIdentity", name: name, keywords: dict);
return op.output;
}
@@ -7455,7 +7453,7 @@ namespace Tensorflow.Operations
dict["debug_urls"] = debug_urls;
if (gated_grpc.HasValue)
dict["gated_grpc"] = gated_grpc.Value;
- var op = _op_def_lib._apply_op_helper("DebugNanCount", name: name, keywords: dict);
+ var op = tf._op_def_lib._apply_op_helper("DebugNanCount", name: name, keywords: dict);
return op.output;
}
@@ -7549,7 +7547,7 @@ namespace Tensorflow.Operations
dict["mute_if_healthy"] = mute_if_healthy.Value;
if (gated_grpc.HasValue)
dict["gated_grpc"] = gated_grpc.Value;
- var op = _op_def_lib._apply_op_helper("DebugNumericSummary", name: name, keywords: dict);
+ var op = tf._op_def_lib._apply_op_helper("DebugNumericSummary", name: name, keywords: dict);
return op.output;
}
@@ -7632,7 +7630,7 @@ namespace Tensorflow.Operations
dict["acceptable_fraction"] = acceptable_fraction.Value;
if (dct_method != null)
dict["dct_method"] = dct_method;
- var op = _op_def_lib._apply_op_helper("DecodeAndCropJpeg", name: name, keywords: dict);
+ var op = tf._op_def_lib._apply_op_helper("DecodeAndCropJpeg", name: name, keywords: dict);
return op.output;
}
@@ -7657,7 +7655,7 @@ namespace Tensorflow.Operations
{
var dict = new Dictionary();
dict["input"] = input;
- var op = _op_def_lib._apply_op_helper("DecodeBase64", name: name, keywords: dict);
+ var op = tf._op_def_lib._apply_op_helper("DecodeBase64", name: name, keywords: dict);
return op.output;
}
@@ -7692,7 +7690,7 @@ namespace Tensorflow.Operations
dict["contents"] = contents;
if (channels.HasValue)
dict["channels"] = channels.Value;
- var op = _op_def_lib._apply_op_helper("DecodeBmp", name: name, keywords: dict);
+ var op = tf._op_def_lib._apply_op_helper("DecodeBmp", name: name, keywords: dict);
return op.output;
}
@@ -7746,7 +7744,7 @@ namespace Tensorflow.Operations
dict["na_value"] = na_value;
if (select_cols != null)
dict["select_cols"] = select_cols;
- var op = _op_def_lib._apply_op_helper("DecodeCSV", name: name, keywords: dict);
+ var op = tf._op_def_lib._apply_op_helper("DecodeCSV", name: name, keywords: dict);
int _idx = 0;
var output = Enumerable.Range(0, op.OutputListLength("output")).Select(_ => op.outputs[_idx++]).ToArray();
return (output);
@@ -7784,7 +7782,7 @@ namespace Tensorflow.Operations
dict["bytes"] = bytes;
if (compression_type != null)
dict["compression_type"] = compression_type;
- var op = _op_def_lib._apply_op_helper("DecodeCompressed", name: name, keywords: dict);
+ var op = tf._op_def_lib._apply_op_helper("DecodeCompressed", name: name, keywords: dict);
return op.output;
}
@@ -7814,7 +7812,7 @@ namespace Tensorflow.Operations
{
var dict = new Dictionary();
dict["contents"] = contents;
- var op = _op_def_lib._apply_op_helper("DecodeGif", name: name, keywords: dict);
+ var op = tf._op_def_lib._apply_op_helper("DecodeGif", name: name, keywords: dict);
return op.output;
}
@@ -7845,7 +7843,7 @@ namespace Tensorflow.Operations
{
var dict = new Dictionary();
dict["json_examples"] = json_examples;
- var op = _op_def_lib._apply_op_helper("DecodeJSONExample", name: name, keywords: dict);
+ var op = tf._op_def_lib._apply_op_helper("DecodeJSONExample", name: name, keywords: dict);
return op.output;
}
@@ -7924,7 +7922,7 @@ namespace Tensorflow.Operations
dict["acceptable_fraction"] = acceptable_fraction.Value;
if (dct_method != null)
dict["dct_method"] = dct_method;
- var op = _op_def_lib._apply_op_helper("DecodeJpeg", name: name, keywords: dict);
+ var op = tf._op_def_lib._apply_op_helper("DecodeJpeg", name: name, keywords: dict);
return op.output;
}
@@ -7971,7 +7969,7 @@ namespace Tensorflow.Operations
dict["channels"] = channels.Value;
if (dtype.HasValue)
dict["dtype"] = dtype.Value;
- var op = _op_def_lib._apply_op_helper("DecodePng", name: name, keywords: dict);
+ var op = tf._op_def_lib._apply_op_helper("DecodePng", name: name, keywords: dict);
return op.output;
}
@@ -8080,7 +8078,7 @@ namespace Tensorflow.Operations
dict["message_format"] = message_format;
if (sanitize.HasValue)
dict["sanitize"] = sanitize.Value;
- var op = _op_def_lib._apply_op_helper("DecodeProtoV2", name: name, keywords: dict);
+ var op = tf._op_def_lib._apply_op_helper("DecodeProtoV2", name: name, keywords: dict);
int _idx = 0;
var sizes = op.outputs[_idx++];
var values = Enumerable.Range(0, op.OutputListLength("values")).Select(_ => op.outputs[_idx++]).ToArray();
@@ -8117,7 +8115,7 @@ namespace Tensorflow.Operations
dict["out_type"] = out_type;
if (little_endian.HasValue)
dict["little_endian"] = little_endian.Value;
- var op = _op_def_lib._apply_op_helper("DecodeRaw", name: name, keywords: dict);
+ var op = tf._op_def_lib._apply_op_helper("DecodeRaw", name: name, keywords: dict);
return op.output;
}
@@ -8166,7 +8164,7 @@ namespace Tensorflow.Operations
dict["desired_channels"] = desired_channels.Value;
if (desired_samples.HasValue)
dict["desired_samples"] = desired_samples.Value;
- var op = _op_def_lib._apply_op_helper("DecodeWav", name: name, keywords: dict);
+ var op = tf._op_def_lib._apply_op_helper("DecodeWav", name: name, keywords: dict);
int _idx = 0;
var audio = op.outputs[_idx++];
var sample_rate = op.outputs[_idx++];
@@ -8191,7 +8189,7 @@ namespace Tensorflow.Operations
{
var dict = new Dictionary();
dict["x"] = x;
- var op = _op_def_lib._apply_op_helper("DeepCopy", name: name, keywords: dict);
+ var op = tf._op_def_lib._apply_op_helper("DeepCopy", name: name, keywords: dict);
return op.output;
}
@@ -8211,7 +8209,7 @@ namespace Tensorflow.Operations
{
var dict = new Dictionary();
dict["handle"] = handle;
- var op = _op_def_lib._apply_op_helper("DeleteSessionTensor", name: name, keywords: dict);
+ var op = tf._op_def_lib._apply_op_helper("DeleteSessionTensor", name: name, keywords: dict);
return op;
}
@@ -8260,7 +8258,7 @@ namespace Tensorflow.Operations
dict["set_operation"] = set_operation;
if (validate_indices.HasValue)
dict["validate_indices"] = validate_indices.Value;
- var op = _op_def_lib._apply_op_helper("DenseToDenseSetOperation", name: name, keywords: dict);
+ var op = tf._op_def_lib._apply_op_helper("DenseToDenseSetOperation", name: name, keywords: dict);
int _idx = 0;
var result_indices = op.outputs[_idx++];
var result_values = op.outputs[_idx++];
@@ -8303,7 +8301,7 @@ namespace Tensorflow.Operations
dict["row_shape"] = row_shape;
dict["output_types"] = output_types;
dict["output_shapes"] = output_shapes;
- var op = _op_def_lib._apply_op_helper("DenseToSparseBatchDataset", name: name, keywords: dict);
+ var op = tf._op_def_lib._apply_op_helper("DenseToSparseBatchDataset", name: name, keywords: dict);
return op.output;
}
@@ -8371,7 +8369,7 @@ namespace Tensorflow.Operations
dict["set_operation"] = set_operation;
if (validate_indices.HasValue)
dict["validate_indices"] = validate_indices.Value;
- var op = _op_def_lib._apply_op_helper("DenseToSparseSetOperation", name: name, keywords: dict);
+ var op = tf._op_def_lib._apply_op_helper("DenseToSparseSetOperation", name: name, keywords: dict);
int _idx = 0;
var result_indices = op.outputs[_idx++];
var result_values = op.outputs[_idx++];
@@ -8494,7 +8492,7 @@ namespace Tensorflow.Operations
dict["block_size"] = block_size;
if (data_format != null)
dict["data_format"] = data_format;
- var op = _op_def_lib._apply_op_helper("DepthToSpace", name: name, keywords: dict);
+ var op = tf._op_def_lib._apply_op_helper("DepthToSpace", name: name, keywords: dict);
return op.output;
}
@@ -8565,7 +8563,7 @@ namespace Tensorflow.Operations
dict["data_format"] = data_format;
if (dilations != null)
dict["dilations"] = dilations;
- var op = _op_def_lib._apply_op_helper("DepthwiseConv2dNative", name: name, keywords: dict);
+ var op = tf._op_def_lib._apply_op_helper("DepthwiseConv2dNative", name: name, keywords: dict);
return op.output;
}
@@ -8632,7 +8630,7 @@ namespace Tensorflow.Operations
dict["data_format"] = data_format;
if (dilations != null)
dict["dilations"] = dilations;
- var op = _op_def_lib._apply_op_helper("DepthwiseConv2dNativeBackpropFilter", name: name, keywords: dict);
+ var op = tf._op_def_lib._apply_op_helper("DepthwiseConv2dNativeBackpropFilter", name: name, keywords: dict);
return op.output;
}
@@ -8699,7 +8697,7 @@ namespace Tensorflow.Operations
dict["data_format"] = data_format;
if (dilations != null)
dict["dilations"] = dilations;
- var op = _op_def_lib._apply_op_helper("DepthwiseConv2dNativeBackpropInput", name: name, keywords: dict);
+ var op = tf._op_def_lib._apply_op_helper("DepthwiseConv2dNativeBackpropInput", name: name, keywords: dict);
return op.output;
}
@@ -8805,7 +8803,7 @@ namespace Tensorflow.Operations
dict["max_range"] = max_range;
if (mode != null)
dict["mode"] = mode;
- var op = _op_def_lib._apply_op_helper("Dequantize", name: name, keywords: dict);
+ var op = tf._op_def_lib._apply_op_helper("Dequantize", name: name, keywords: dict);
return op.output;
}
@@ -8830,7 +8828,7 @@ namespace Tensorflow.Operations
var dict = new Dictionary();
dict["resource_handle"] = resource_handle;
dict["serialized"] = serialized;
- var op = _op_def_lib._apply_op_helper("DeserializeIterator", name: name, keywords: dict);
+ var op = tf._op_def_lib._apply_op_helper("DeserializeIterator", name: name, keywords: dict);
return op;
}
@@ -8903,7 +8901,7 @@ namespace Tensorflow.Operations
var dict = new Dictionary();
dict["serialized_sparse"] = serialized_sparse;
dict["dtype"] = dtype;
- var op = _op_def_lib._apply_op_helper("DeserializeManySparse", name: name, keywords: dict);
+ var op = tf._op_def_lib._apply_op_helper("DeserializeManySparse", name: name, keywords: dict);
int _idx = 0;
var sparse_indices = op.outputs[_idx++];
var sparse_values = op.outputs[_idx++];
@@ -8980,7 +8978,7 @@ namespace Tensorflow.Operations
var dict = new Dictionary();
dict["serialized_sparse"] = serialized_sparse;
dict["dtype"] = dtype;
- var op = _op_def_lib._apply_op_helper("DeserializeSparse", name: name, keywords: dict);
+ var op = tf._op_def_lib._apply_op_helper("DeserializeSparse", name: name, keywords: dict);
int _idx = 0;
var sparse_indices = op.outputs[_idx++];
var sparse_values = op.outputs[_idx++];
@@ -9014,7 +9012,7 @@ namespace Tensorflow.Operations
dict["resource"] = resource;
if (ignore_lookup_error.HasValue)
dict["ignore_lookup_error"] = ignore_lookup_error.Value;
- var op = _op_def_lib._apply_op_helper("DestroyResourceOp", name: name, keywords: dict);
+ var op = tf._op_def_lib._apply_op_helper("DestroyResourceOp", name: name, keywords: dict);
return op;
}
@@ -9049,7 +9047,7 @@ namespace Tensorflow.Operations
var dict = new Dictionary();
dict["ref"] = referecne;
dict["var_name"] = var_name;
- var op = _op_def_lib._apply_op_helper("DestroyTemporaryVariable", name: name, keywords: dict);
+ var op = tf._op_def_lib._apply_op_helper("DestroyTemporaryVariable", name: name, keywords: dict);
return op.output;
}
@@ -9088,7 +9086,7 @@ namespace Tensorflow.Operations
{
var dict = new Dictionary();
dict["diagonal"] = diagonal;
- var op = _op_def_lib._apply_op_helper("Diag", name: name, keywords: dict);
+ var op = tf._op_def_lib._apply_op_helper("Diag", name: name, keywords: dict);
return op.output;
}
@@ -9129,7 +9127,7 @@ namespace Tensorflow.Operations
{
var dict = new Dictionary();
dict["input"] = input;
- var op = _op_def_lib._apply_op_helper("DiagPart", name: name, keywords: dict);
+ var op = tf._op_def_lib._apply_op_helper("DiagPart", name: name, keywords: dict);
return op.output;
}
@@ -9151,7 +9149,7 @@ namespace Tensorflow.Operations
{
var dict = new Dictionary();
dict["x"] = x;
- var op = _op_def_lib._apply_op_helper("Digamma", name: name, keywords: dict);
+ var op = tf._op_def_lib._apply_op_helper("Digamma", name: name, keywords: dict);
return op.output;
}
@@ -9218,7 +9216,7 @@ namespace Tensorflow.Operations
dict["strides"] = strides;
dict["rates"] = rates;
dict["padding"] = padding;
- var op = _op_def_lib._apply_op_helper("Dilation2D", name: name, keywords: dict);
+ var op = tf._op_def_lib._apply_op_helper("Dilation2D", name: name, keywords: dict);
return op.output;
}
@@ -9264,7 +9262,7 @@ namespace Tensorflow.Operations
dict["strides"] = strides;
dict["rates"] = rates;
dict["padding"] = padding;
- var op = _op_def_lib._apply_op_helper("Dilation2DBackpropFilter", name: name, keywords: dict);
+ var op = tf._op_def_lib._apply_op_helper("Dilation2DBackpropFilter", name: name, keywords: dict);
return op.output;
}
@@ -9310,7 +9308,7 @@ namespace Tensorflow.Operations
dict["strides"] = strides;
dict["rates"] = rates;
dict["padding"] = padding;
- var op = _op_def_lib._apply_op_helper("Dilation2DBackpropInput", name: name, keywords: dict);
+ var op = tf._op_def_lib._apply_op_helper("Dilation2DBackpropInput", name: name, keywords: dict);
return op.output;
}
@@ -9336,7 +9334,7 @@ namespace Tensorflow.Operations
var dict = new Dictionary();
dict["x"] = x;
dict["y"] = y;
- var op = _op_def_lib._apply_op_helper("Div", name: name, keywords: dict);
+ var op = tf._op_def_lib._apply_op_helper("Div", name: name, keywords: dict);
return op.output;
}
@@ -9363,7 +9361,7 @@ namespace Tensorflow.Operations
var dict = new Dictionary();
dict["x"] = x;
dict["y"] = y;
- var op = _op_def_lib._apply_op_helper("DivNoNan", name: name, keywords: dict);
+ var op = tf._op_def_lib._apply_op_helper("DivNoNan", name: name, keywords: dict);
return op.output;
}
@@ -9403,7 +9401,7 @@ namespace Tensorflow.Operations
var dict = new Dictionary();
dict["images"] = images;
dict["boxes"] = boxes;
- var op = _op_def_lib._apply_op_helper("DrawBoundingBoxes", name: name, keywords: dict);
+ var op = tf._op_def_lib._apply_op_helper("DrawBoundingBoxes", name: name, keywords: dict);
return op.output;
}
@@ -9470,7 +9468,7 @@ namespace Tensorflow.Operations
dict["data"] = data;
dict["partitions"] = partitions;
dict["num_partitions"] = num_partitions;
- var op = _op_def_lib._apply_op_helper("DynamicPartition", name: name, keywords: dict);
+ var op = tf._op_def_lib._apply_op_helper("DynamicPartition", name: name, keywords: dict);
int _idx = 0;
var outputs = Enumerable.Range(0, op.OutputListLength("outputs")).Select(_ => op.outputs[_idx++]).ToArray();
return (outputs);
@@ -9558,7 +9556,7 @@ namespace Tensorflow.Operations
var dict = new Dictionary();
dict["indices"] = indices;
dict["data"] = data;
- var op = _op_def_lib._apply_op_helper("DynamicStitch", name: name, keywords: dict);
+ var op = tf._op_def_lib._apply_op_helper("DynamicStitch", name: name, keywords: dict);
return op.output;
}
@@ -9648,7 +9646,7 @@ namespace Tensorflow.Operations
dict["truth_shape"] = truth_shape;
if (normalize.HasValue)
dict["normalize"] = normalize.Value;
- var op = _op_def_lib._apply_op_helper("EditDistance", name: name, keywords: dict);
+ var op = tf._op_def_lib._apply_op_helper("EditDistance", name: name, keywords: dict);
return op.output;
}
@@ -9671,7 +9669,7 @@ namespace Tensorflow.Operations
{
var dict = new Dictionary();
dict["features"] = features;
- var op = _op_def_lib._apply_op_helper("Elu", name: name, keywords: dict);
+ var op = tf._op_def_lib._apply_op_helper("Elu", name: name, keywords: dict);
return op.output;
}
@@ -9697,7 +9695,7 @@ namespace Tensorflow.Operations
var dict = new Dictionary();
dict["gradients"] = gradients;
dict["outputs"] = outputs;
- var op = _op_def_lib._apply_op_helper("EluGrad", name: name, keywords: dict);
+ var op = tf._op_def_lib._apply_op_helper("EluGrad", name: name, keywords: dict);
return op.output;
}
@@ -9729,7 +9727,7 @@ namespace Tensorflow.Operations
dict["dtype"] = dtype;
if (init.HasValue)
dict["init"] = init.Value;
- var op = _op_def_lib._apply_op_helper("Empty", name: name, keywords: dict);
+ var op = tf._op_def_lib._apply_op_helper("Empty", name: name, keywords: dict);
return op.output;
}
@@ -9760,7 +9758,7 @@ namespace Tensorflow.Operations
var dict = new Dictionary();
dict["element_shape"] = element_shape;
dict["element_dtype"] = element_dtype;
- var op = _op_def_lib._apply_op_helper("EmptyTensorList", name: name, keywords: dict);
+ var op = tf._op_def_lib._apply_op_helper("EmptyTensorList", name: name, keywords: dict);
return op.output;
}
@@ -9794,7 +9792,7 @@ namespace Tensorflow.Operations
dict["input"] = input;
if (pad.HasValue)
dict["pad"] = pad.Value;
- var op = _op_def_lib._apply_op_helper("EncodeBase64", name: name, keywords: dict);
+ var op = tf._op_def_lib._apply_op_helper("EncodeBase64", name: name, keywords: dict);
return op.output;
}
@@ -9879,7 +9877,7 @@ namespace Tensorflow.Operations
dict["y_density"] = y_density.Value;
if (xmp_metadata != null)
dict["xmp_metadata"] = xmp_metadata;
- var op = _op_def_lib._apply_op_helper("EncodeJpeg", name: name, keywords: dict);
+ var op = tf._op_def_lib._apply_op_helper("EncodeJpeg", name: name, keywords: dict);
return op.output;
}
@@ -9918,7 +9916,7 @@ namespace Tensorflow.Operations
dict["image"] = image;
if (compression.HasValue)
dict["compression"] = compression.Value;
- var op = _op_def_lib._apply_op_helper("EncodePng", name: name, keywords: dict);
+ var op = tf._op_def_lib._apply_op_helper("EncodePng", name: name, keywords: dict);
return op.output;
}
@@ -9996,7 +9994,7 @@ namespace Tensorflow.Operations
dict["message_type"] = message_type;
if (descriptor_source != null)
dict["descriptor_source"] = descriptor_source;
- var op = _op_def_lib._apply_op_helper("EncodeProto", name: name, keywords: dict);
+ var op = tf._op_def_lib._apply_op_helper("EncodeProto", name: name, keywords: dict);
return op.output;
}
@@ -10030,7 +10028,7 @@ namespace Tensorflow.Operations
var dict = new Dictionary();
dict["audio"] = audio;
dict["sample_rate"] = sample_rate;
- var op = _op_def_lib._apply_op_helper("EncodeWav", name: name, keywords: dict);
+ var op = tf._op_def_lib._apply_op_helper("EncodeWav", name: name, keywords: dict);
return op.output;
}
@@ -10060,7 +10058,7 @@ namespace Tensorflow.Operations
var dict = new Dictionary();
dict["input"] = input;
dict["shape"] = shape;
- var op = _op_def_lib._apply_op_helper("EnsureShape", name: name, keywords: dict);
+ var op = tf._op_def_lib._apply_op_helper("EnsureShape", name: name, keywords: dict);
return op.output;
}
@@ -10103,7 +10101,7 @@ namespace Tensorflow.Operations
dict["is_constant"] = is_constant.Value;
if (parallel_iterations.HasValue)
dict["parallel_iterations"] = parallel_iterations.Value;
- var op = _op_def_lib._apply_op_helper("Enter", name: name, keywords: dict);
+ var op = tf._op_def_lib._apply_op_helper("Enter", name: name, keywords: dict);
return op.output;
}
@@ -10129,7 +10127,7 @@ namespace Tensorflow.Operations
var dict = new Dictionary();
dict["x"] = x;
dict["y"] = y;
- var op = _op_def_lib._apply_op_helper("Equal", name: name, keywords: dict);
+ var op = tf._op_def_lib._apply_op_helper("Equal", name: name, keywords: dict);
return op.output;
}
@@ -10148,7 +10146,7 @@ namespace Tensorflow.Operations
{
var dict = new Dictionary();
dict["x"] = x;
- var op = _op_def_lib._apply_op_helper("Erf", name: name, keywords: dict);
+ var op = tf._op_def_lib._apply_op_helper("Erf", name: name, keywords: dict);
return op.output;
}
@@ -10167,7 +10165,7 @@ namespace Tensorflow.Operations
{
var dict = new Dictionary();
dict["x"] = x;
- var op = _op_def_lib._apply_op_helper("Erfc", name: name, keywords: dict);
+ var op = tf._op_def_lib._apply_op_helper("Erfc", name: name, keywords: dict);
return op.output;
}
@@ -10191,7 +10189,7 @@ namespace Tensorflow.Operations
{
var dict = new Dictionary();
dict["data"] = data;
- var op = _op_def_lib._apply_op_helper("Exit", name: name, keywords: dict);
+ var op = tf._op_def_lib._apply_op_helper("Exit", name: name, keywords: dict);
return op.output;
}
@@ -10210,7 +10208,7 @@ namespace Tensorflow.Operations
{
var dict = new Dictionary();
dict["x"] = x;
- var op = _op_def_lib._apply_op_helper("Exp", name: name, keywords: dict);
+ var op = tf._op_def_lib._apply_op_helper("Exp", name: name, keywords: dict);
return op.output;
}
@@ -10269,7 +10267,7 @@ namespace Tensorflow.Operations
var dict = new Dictionary();
dict["input"] = input;
dict["dim"] = dim;
- var op = _op_def_lib._apply_op_helper("ExpandDims", name: name, keywords: dict);
+ var op = tf._op_def_lib._apply_op_helper("ExpandDims", name: name, keywords: dict);
return op.output;
}
@@ -10291,7 +10289,7 @@ namespace Tensorflow.Operations
{
var dict = new Dictionary();
dict["x"] = x;
- var op = _op_def_lib._apply_op_helper("Expm1", name: name, keywords: dict);
+ var op = tf._op_def_lib._apply_op_helper("Expm1", name: name, keywords: dict);
return op.output;
}
@@ -10366,7 +10364,7 @@ namespace Tensorflow.Operations
dict["normalized"] = normalized.Value;
if (uniform_noise.HasValue)
dict["uniform_noise"] = uniform_noise.Value;
- var op = _op_def_lib._apply_op_helper("ExtractGlimpse", name: name, keywords: dict);
+ var op = tf._op_def_lib._apply_op_helper("ExtractGlimpse", name: name, keywords: dict);
return op.output;
}
@@ -10424,7 +10422,7 @@ namespace Tensorflow.Operations
dict["strides"] = strides;
dict["rates"] = rates;
dict["padding"] = padding;
- var op = _op_def_lib._apply_op_helper("ExtractImagePatches", name: name, keywords: dict);
+ var op = tf._op_def_lib._apply_op_helper("ExtractImagePatches", name: name, keywords: dict);
return op.output;
}
@@ -10454,7 +10452,7 @@ namespace Tensorflow.Operations
dict["contents"] = contents;
if (output_type.HasValue)
dict["output_type"] = output_type.Value;
- var op = _op_def_lib._apply_op_helper("ExtractJpegShape", name: name, keywords: dict);
+ var op = tf._op_def_lib._apply_op_helper("ExtractJpegShape", name: name, keywords: dict);
return op.output;
}
@@ -10484,7 +10482,7 @@ namespace Tensorflow.Operations
{
var dict = new Dictionary();
dict["input"] = input;
- var op = _op_def_lib._apply_op_helper("FFT", name: name, keywords: dict);
+ var op = tf._op_def_lib._apply_op_helper("FFT", name: name, keywords: dict);
return op.output;
}
@@ -10514,7 +10512,7 @@ namespace Tensorflow.Operations
{
var dict = new Dictionary();
dict["input"] = input;
- var op = _op_def_lib._apply_op_helper("FFT2D", name: name, keywords: dict);
+ var op = tf._op_def_lib._apply_op_helper("FFT2D", name: name, keywords: dict);
return op.output;
}
@@ -10544,7 +10542,7 @@ namespace Tensorflow.Operations
{
var dict = new Dictionary();
dict["input"] = input;
- var op = _op_def_lib._apply_op_helper("FFT3D", name: name, keywords: dict);
+ var op = tf._op_def_lib._apply_op_helper("FFT3D", name: name, keywords: dict);
return op.output;
}
@@ -10592,7 +10590,7 @@ namespace Tensorflow.Operations
dict["container"] = container;
if (shared_name != null)
dict["shared_name"] = shared_name;
- var op = _op_def_lib._apply_op_helper("FIFOQueue", name: name, keywords: dict);
+ var op = tf._op_def_lib._apply_op_helper("FIFOQueue", name: name, keywords: dict);
return op.output;
}
@@ -10640,7 +10638,7 @@ namespace Tensorflow.Operations
dict["container"] = container;
if (shared_name != null)
dict["shared_name"] = shared_name;
- var op = _op_def_lib._apply_op_helper("FIFOQueueV2", name: name, keywords: dict);
+ var op = tf._op_def_lib._apply_op_helper("FIFOQueueV2", name: name, keywords: dict);
return op.output;
}
@@ -10671,7 +10669,7 @@ namespace Tensorflow.Operations
var dict = new Dictionary();
dict["dtype"] = dtype;
dict["shape"] = shape;
- var op = _op_def_lib._apply_op_helper("FakeParam", name: name, keywords: dict);
+ var op = tf._op_def_lib._apply_op_helper("FakeParam", name: name, keywords: dict);
return op.output;
}
@@ -10715,7 +10713,7 @@ namespace Tensorflow.Operations
dict["num_bits"] = num_bits.Value;
if (narrow_range.HasValue)
dict["narrow_range"] = narrow_range.Value;
- var op = _op_def_lib._apply_op_helper("FakeQuantWithMinMaxArgs", name: name, keywords: dict);
+ var op = tf._op_def_lib._apply_op_helper("FakeQuantWithMinMaxArgs", name: name, keywords: dict);
return op.output;
}
@@ -10757,7 +10755,7 @@ namespace Tensorflow.Operations
dict["num_bits"] = num_bits.Value;
if (narrow_range.HasValue)
dict["narrow_range"] = narrow_range.Value;
- var op = _op_def_lib._apply_op_helper("FakeQuantWithMinMaxArgsGradient", name: name, keywords: dict);
+ var op = tf._op_def_lib._apply_op_helper("FakeQuantWithMinMaxArgsGradient", name: name, keywords: dict);
return op.output;
}
@@ -10802,7 +10800,7 @@ namespace Tensorflow.Operations
dict["num_bits"] = num_bits.Value;
if (narrow_range.HasValue)
dict["narrow_range"] = narrow_range.Value;
- var op = _op_def_lib._apply_op_helper("FakeQuantWithMinMaxVars", name: name, keywords: dict);
+ var op = tf._op_def_lib._apply_op_helper("FakeQuantWithMinMaxVars", name: name, keywords: dict);
return op.output;
}
@@ -10850,7 +10848,7 @@ namespace Tensorflow.Operations
dict["num_bits"] = num_bits.Value;
if (narrow_range.HasValue)
dict["narrow_range"] = narrow_range.Value;
- var op = _op_def_lib._apply_op_helper("FakeQuantWithMinMaxVarsGradient", name: name, keywords: dict);
+ var op = tf._op_def_lib._apply_op_helper("FakeQuantWithMinMaxVarsGradient", name: name, keywords: dict);
int _idx = 0;
var backprops_wrt_input = op.outputs[_idx++];
var backprop_wrt_min = op.outputs[_idx++];
@@ -10900,7 +10898,7 @@ namespace Tensorflow.Operations
dict["num_bits"] = num_bits.Value;
if (narrow_range.HasValue)
dict["narrow_range"] = narrow_range.Value;
- var op = _op_def_lib._apply_op_helper("FakeQuantWithMinMaxVarsPerChannel", name: name, keywords: dict);
+ var op = tf._op_def_lib._apply_op_helper("FakeQuantWithMinMaxVarsPerChannel", name: name, keywords: dict);
return op.output;
}
@@ -10951,7 +10949,7 @@ namespace Tensorflow.Operations
dict["num_bits"] = num_bits.Value;
if (narrow_range.HasValue)
dict["narrow_range"] = narrow_range.Value;
- var op = _op_def_lib._apply_op_helper("FakeQuantWithMinMaxVarsPerChannelGradient", name: name, keywords: dict);
+ var op = tf._op_def_lib._apply_op_helper("FakeQuantWithMinMaxVarsPerChannelGradient", name: name, keywords: dict);
int _idx = 0;
var backprops_wrt_input = op.outputs[_idx++];
var backprop_wrt_min = op.outputs[_idx++];
@@ -10974,7 +10972,7 @@ namespace Tensorflow.Operations
{
var dict = new Dictionary();
dict["resource"] = resource;
- var op = _op_def_lib._apply_op_helper("FakeQueue", name: name, keywords: dict);
+ var op = tf._op_def_lib._apply_op_helper("FakeQueue", name: name, keywords: dict);
return op.output;
}
@@ -11023,7 +11021,7 @@ namespace Tensorflow.Operations
var dict = new Dictionary();
dict["dims"] = dims;
dict["value"] = value;
- var op = _op_def_lib._apply_op_helper("Fill", name: name, keywords: dict);
+ var op = tf._op_def_lib._apply_op_helper("Fill", name: name, keywords: dict);
return op.output;
}
@@ -11050,7 +11048,7 @@ namespace Tensorflow.Operations
dict["input_dataset"] = input_dataset;
dict["output_types"] = output_types;
dict["output_shapes"] = output_shapes;
- var op = _op_def_lib._apply_op_helper("FilterByLastComponentDataset", name: name, keywords: dict);
+ var op = tf._op_def_lib._apply_op_helper("FilterByLastComponentDataset", name: name, keywords: dict);
return op.output;
}
@@ -11089,7 +11087,7 @@ namespace Tensorflow.Operations
dict["record_bytes"] = record_bytes;
dict["footer_bytes"] = footer_bytes;
dict["buffer_size"] = buffer_size;
- var op = _op_def_lib._apply_op_helper("FixedLengthRecordDataset", name: name, keywords: dict);
+ var op = tf._op_def_lib._apply_op_helper("FixedLengthRecordDataset", name: name, keywords: dict);
return op.output;
}
@@ -11139,7 +11137,7 @@ namespace Tensorflow.Operations
dict["container"] = container;
if (shared_name != null)
dict["shared_name"] = shared_name;
- var op = _op_def_lib._apply_op_helper("FixedLengthRecordReader", name: name, keywords: dict);
+ var op = tf._op_def_lib._apply_op_helper("FixedLengthRecordReader", name: name, keywords: dict);
return op.output;
}
@@ -11195,7 +11193,7 @@ namespace Tensorflow.Operations
dict["shared_name"] = shared_name;
if (encoding != null)
dict["encoding"] = encoding;
- var op = _op_def_lib._apply_op_helper("FixedLengthRecordReaderV2", name: name, keywords: dict);
+ var op = tf._op_def_lib._apply_op_helper("FixedLengthRecordReaderV2", name: name, keywords: dict);
return op.output;
}
@@ -11323,7 +11321,7 @@ namespace Tensorflow.Operations
dict["seed"] = seed.Value;
if (seed2.HasValue)
dict["seed2"] = seed2.Value;
- var op = _op_def_lib._apply_op_helper("FixedUnigramCandidateSampler", name: name, keywords: dict);
+ var op = tf._op_def_lib._apply_op_helper("FixedUnigramCandidateSampler", name: name, keywords: dict);
int _idx = 0;
var sampled_candidates = op.outputs[_idx++];
var true_expected_count = op.outputs[_idx++];
@@ -11346,7 +11344,7 @@ namespace Tensorflow.Operations
{
var dict = new Dictionary();
dict["x"] = x;
- var op = _op_def_lib._apply_op_helper("Floor", name: name, keywords: dict);
+ var op = tf._op_def_lib._apply_op_helper("Floor", name: name, keywords: dict);
return op.output;
}
@@ -11372,7 +11370,7 @@ namespace Tensorflow.Operations
var dict = new Dictionary();
dict["x"] = x;
dict["y"] = y;
- var op = _op_def_lib._apply_op_helper("FloorDiv", name: name, keywords: dict);
+ var op = tf._op_def_lib._apply_op_helper("FloorDiv", name: name, keywords: dict);
return op.output;
}
@@ -11401,7 +11399,7 @@ namespace Tensorflow.Operations
var dict = new Dictionary();
dict["x"] = x;
dict["y"] = y;
- var op = _op_def_lib._apply_op_helper("FloorMod", name: name, keywords: dict);
+ var op = tf._op_def_lib._apply_op_helper("FloorMod", name: name, keywords: dict);
return op.output;
}
@@ -11481,7 +11479,7 @@ namespace Tensorflow.Operations
dict["seed"] = seed.Value;
if (seed2.HasValue)
dict["seed2"] = seed2.Value;
- var op = _op_def_lib._apply_op_helper("FractionalAvgPool", name: name, keywords: dict);
+ var op = tf._op_def_lib._apply_op_helper("FractionalAvgPool", name: name, keywords: dict);
int _idx = 0;
var output = op.outputs[_idx++];
var row_pooling_sequence = op.outputs[_idx++];
@@ -11541,7 +11539,7 @@ namespace Tensorflow.Operations
dict["col_pooling_sequence"] = col_pooling_sequence;
if (overlapping.HasValue)
dict["overlapping"] = overlapping.Value;
- var op = _op_def_lib._apply_op_helper("FractionalAvgPoolGrad", name: name, keywords: dict);
+ var op = tf._op_def_lib._apply_op_helper("FractionalAvgPoolGrad", name: name, keywords: dict);
return op.output;
}
@@ -11645,7 +11643,7 @@ namespace Tensorflow.Operations
dict["seed"] = seed.Value;
if (seed2.HasValue)
dict["seed2"] = seed2.Value;
- var op = _op_def_lib._apply_op_helper("FractionalMaxPool", name: name, keywords: dict);
+ var op = tf._op_def_lib._apply_op_helper("FractionalMaxPool", name: name, keywords: dict);
int _idx = 0;
var output = op.outputs[_idx++];
var row_pooling_sequence = op.outputs[_idx++];
@@ -11702,7 +11700,7 @@ namespace Tensorflow.Operations
dict["col_pooling_sequence"] = col_pooling_sequence;
if (overlapping.HasValue)
dict["overlapping"] = overlapping.Value;
- var op = _op_def_lib._apply_op_helper("FractionalMaxPoolGrad", name: name, keywords: dict);
+ var op = tf._op_def_lib._apply_op_helper("FractionalMaxPoolGrad", name: name, keywords: dict);
return op.output;
}
@@ -11770,7 +11768,7 @@ namespace Tensorflow.Operations
dict["data_format"] = data_format;
if (is_training.HasValue)
dict["is_training"] = is_training.Value;
- var op = _op_def_lib._apply_op_helper("FusedBatchNorm", name: name, keywords: dict);
+ var op = tf._op_def_lib._apply_op_helper("FusedBatchNorm", name: name, keywords: dict);
int _idx = 0;
var y = op.outputs[_idx++];
var batch_mean = op.outputs[_idx++];
@@ -11847,7 +11845,7 @@ namespace Tensorflow.Operations
dict["data_format"] = data_format;
if (is_training.HasValue)
dict["is_training"] = is_training.Value;
- var op = _op_def_lib._apply_op_helper("FusedBatchNormGrad", name: name, keywords: dict);
+ var op = tf._op_def_lib._apply_op_helper("FusedBatchNormGrad", name: name, keywords: dict);
int _idx = 0;
var x_backprop = op.outputs[_idx++];
var scale_backprop = op.outputs[_idx++];
@@ -11924,7 +11922,7 @@ namespace Tensorflow.Operations
dict["data_format"] = data_format;
if (is_training.HasValue)
dict["is_training"] = is_training.Value;
- var op = _op_def_lib._apply_op_helper("FusedBatchNormGradV2", name: name, keywords: dict);
+ var op = tf._op_def_lib._apply_op_helper("FusedBatchNormGradV2", name: name, keywords: dict);
int _idx = 0;
var x_backprop = op.outputs[_idx++];
var scale_backprop = op.outputs[_idx++];
@@ -11998,7 +11996,7 @@ namespace Tensorflow.Operations
dict["data_format"] = data_format;
if (is_training.HasValue)
dict["is_training"] = is_training.Value;
- var op = _op_def_lib._apply_op_helper("FusedBatchNormV2", name: name, keywords: dict);
+ var op = tf._op_def_lib._apply_op_helper("FusedBatchNormV2", name: name, keywords: dict);
int _idx = 0;
var y = op.outputs[_idx++];
var batch_mean = op.outputs[_idx++];
@@ -12062,7 +12060,7 @@ namespace Tensorflow.Operations
dict["mode"] = mode;
dict["strides"] = strides;
dict["padding"] = padding;
- var op = _op_def_lib._apply_op_helper("FusedPadConv2D", name: name, keywords: dict);
+ var op = tf._op_def_lib._apply_op_helper("FusedPadConv2D", name: name, keywords: dict);
return op.output;
}
@@ -12130,7 +12128,7 @@ namespace Tensorflow.Operations
dict["padding"] = padding;
if (resize_align_corners.HasValue)
dict["resize_align_corners"] = resize_align_corners.Value;
- var op = _op_def_lib._apply_op_helper("FusedResizeAndPadConv2D", name: name, keywords: dict);
+ var op = tf._op_def_lib._apply_op_helper("FusedResizeAndPadConv2D", name: name, keywords: dict);
return op.output;
}
@@ -12183,7 +12181,7 @@ namespace Tensorflow.Operations
dict["indices"] = indices;
if (validate_indices.HasValue)
dict["validate_indices"] = validate_indices.Value;
- var op = _op_def_lib._apply_op_helper("Gather", name: name, keywords: dict);
+ var op = tf._op_def_lib._apply_op_helper("Gather", name: name, keywords: dict);
return op.output;
}
@@ -12315,7 +12313,7 @@ namespace Tensorflow.Operations
var dict = new Dictionary();
dict["params"] = parameters;
dict["indices"] = indices;
- var op = _op_def_lib._apply_op_helper("GatherNd", name: name, keywords: dict);
+ var op = tf._op_def_lib._apply_op_helper("GatherNd", name: name, keywords: dict);
return op.output;
}
@@ -12376,7 +12374,7 @@ namespace Tensorflow.Operations
dict["params"] = parameters;
dict["indices"] = indices;
dict["axis"] = axis;
- var op = _op_def_lib._apply_op_helper("GatherV2", name: name, keywords: dict);
+ var op = tf._op_def_lib._apply_op_helper("GatherV2", name: name, keywords: dict);
return op.output;
}
@@ -12451,7 +12449,7 @@ namespace Tensorflow.Operations
dict["num_new_vocab"] = num_new_vocab;
if (old_vocab_size.HasValue)
dict["old_vocab_size"] = old_vocab_size.Value;
- var op = _op_def_lib._apply_op_helper("GenerateVocabRemapping", name: name, keywords: dict);
+ var op = tf._op_def_lib._apply_op_helper("GenerateVocabRemapping", name: name, keywords: dict);
int _idx = 0;
var remapping = op.outputs[_idx++];
var num_present = op.outputs[_idx++];
@@ -12476,7 +12474,7 @@ namespace Tensorflow.Operations
{
var dict = new Dictionary();
dict["value"] = value;
- var op = _op_def_lib._apply_op_helper("GetSessionHandle", name: name, keywords: dict);
+ var op = tf._op_def_lib._apply_op_helper("GetSessionHandle", name: name, keywords: dict);
return op.output;
}
@@ -12498,7 +12496,7 @@ namespace Tensorflow.Operations
{
var dict = new Dictionary();
dict["value"] = value;
- var op = _op_def_lib._apply_op_helper("GetSessionHandleV2", name: name, keywords: dict);
+ var op = tf._op_def_lib._apply_op_helper("GetSessionHandleV2", name: name, keywords: dict);
return op.output;
}
@@ -12524,7 +12522,7 @@ namespace Tensorflow.Operations
var dict = new Dictionary();
dict["handle"] = handle;
dict["dtype"] = dtype;
- var op = _op_def_lib._apply_op_helper("GetSessionTensor", name: name, keywords: dict);
+ var op = tf._op_def_lib._apply_op_helper("GetSessionTensor", name: name, keywords: dict);
return op.output;
}
@@ -12550,7 +12548,7 @@ namespace Tensorflow.Operations
var dict = new Dictionary();
dict["x"] = x;
dict["y"] = y;
- var op = _op_def_lib._apply_op_helper("Greater", name: name, keywords: dict);
+ var op = tf._op_def_lib._apply_op_helper("Greater", name: name, keywords: dict);
return op.output;
}
@@ -12576,7 +12574,7 @@ namespace Tensorflow.Operations
var dict = new Dictionary();
dict["x"] = x;
dict["y"] = y;
- var op = _op_def_lib._apply_op_helper("GreaterEqual", name: name, keywords: dict);
+ var op = tf._op_def_lib._apply_op_helper("GreaterEqual", name: name, keywords: dict);
return op.output;
}
@@ -12603,7 +12601,7 @@ namespace Tensorflow.Operations
{
var dict = new Dictionary();
dict["input"] = input;
- var op = _op_def_lib._apply_op_helper("GuaranteeConst", name: name, keywords: dict);
+ var op = tf._op_def_lib._apply_op_helper("GuaranteeConst", name: name, keywords: dict);
return op.output;
}
@@ -12631,7 +12629,7 @@ namespace Tensorflow.Operations
{
var dict = new Dictionary();
dict["images"] = images;
- var op = _op_def_lib._apply_op_helper("HSVToRGB", name: name, keywords: dict);
+ var op = tf._op_def_lib._apply_op_helper("HSVToRGB", name: name, keywords: dict);
return op.output;
}
@@ -12681,7 +12679,7 @@ namespace Tensorflow.Operations
dict["shared_name"] = shared_name;
if (use_node_name_sharing.HasValue)
dict["use_node_name_sharing"] = use_node_name_sharing.Value;
- var op = _op_def_lib._apply_op_helper("HashTable", name: name, keywords: dict);
+ var op = tf._op_def_lib._apply_op_helper("HashTable", name: name, keywords: dict);
return op.output;
}
@@ -12731,7 +12729,7 @@ namespace Tensorflow.Operations
dict["shared_name"] = shared_name;
if (use_node_name_sharing.HasValue)
dict["use_node_name_sharing"] = use_node_name_sharing.Value;
- var op = _op_def_lib._apply_op_helper("HashTableV2", name: name, keywords: dict);
+ var op = tf._op_def_lib._apply_op_helper("HashTableV2", name: name, keywords: dict);
return op.output;
}
@@ -12783,7 +12781,7 @@ namespace Tensorflow.Operations
dict["nbins"] = nbins;
if (dtype.HasValue)
dict["dtype"] = dtype.Value;
- var op = _op_def_lib._apply_op_helper("HistogramFixedWidth", name: name, keywords: dict);
+ var op = tf._op_def_lib._apply_op_helper("HistogramFixedWidth", name: name, keywords: dict);
return op.output;
}
@@ -12815,7 +12813,7 @@ namespace Tensorflow.Operations
var dict = new Dictionary();
dict["tag"] = tag;
dict["values"] = values;
- var op = _op_def_lib._apply_op_helper("HistogramSummary", name: name, keywords: dict);
+ var op = tf._op_def_lib._apply_op_helper("HistogramSummary", name: name, keywords: dict);
return op.output;
}
@@ -12840,7 +12838,7 @@ namespace Tensorflow.Operations
var dict = new Dictionary();
dict["value"] = value;
dict["dtype"] = dtype;
- var op = _op_def_lib._apply_op_helper("HostConst", name: name, keywords: dict);
+ var op = tf._op_def_lib._apply_op_helper("HostConst", name: name, keywords: dict);
return op.output;
}
@@ -12870,7 +12868,7 @@ namespace Tensorflow.Operations
{
var dict = new Dictionary();
dict["input"] = input;
- var op = _op_def_lib._apply_op_helper("IFFT", name: name, keywords: dict);
+ var op = tf._op_def_lib._apply_op_helper("IFFT", name: name, keywords: dict);
return op.output;
}
@@ -12900,7 +12898,7 @@ namespace Tensorflow.Operations
{
var dict = new Dictionary();
dict["input"] = input;
- var op = _op_def_lib._apply_op_helper("IFFT2D", name: name, keywords: dict);
+ var op = tf._op_def_lib._apply_op_helper("IFFT2D", name: name, keywords: dict);
return op.output;
}
@@ -12930,7 +12928,7 @@ namespace Tensorflow.Operations
{
var dict = new Dictionary();
dict["input"] = input;
- var op = _op_def_lib._apply_op_helper("IFFT3D", name: name, keywords: dict);
+ var op = tf._op_def_lib._apply_op_helper("IFFT3D", name: name, keywords: dict);
return op.output;
}
@@ -12976,7 +12974,7 @@ namespace Tensorflow.Operations
var dict = new Dictionary();
dict["input"] = input;
dict["fft_length"] = fft_length;
- var op = _op_def_lib._apply_op_helper("IRFFT", name: name, keywords: dict);
+ var op = tf._op_def_lib._apply_op_helper("IRFFT", name: name, keywords: dict);
return op.output;
}
@@ -13023,7 +13021,7 @@ namespace Tensorflow.Operations
var dict = new Dictionary();
dict["input"] = input;
dict["fft_length"] = fft_length;
- var op = _op_def_lib._apply_op_helper("IRFFT2D", name: name, keywords: dict);
+ var op = tf._op_def_lib._apply_op_helper("IRFFT2D", name: name, keywords: dict);
return op.output;
}
@@ -13070,7 +13068,7 @@ namespace Tensorflow.Operations
var dict = new Dictionary();
dict["input"] = input;
dict["fft_length"] = fft_length;
- var op = _op_def_lib._apply_op_helper("IRFFT3D", name: name, keywords: dict);
+ var op = tf._op_def_lib._apply_op_helper("IRFFT3D", name: name, keywords: dict);
return op.output;
}
@@ -13089,7 +13087,7 @@ namespace Tensorflow.Operations
{
var dict = new Dictionary();
dict["input"] = input;
- var op = _op_def_lib._apply_op_helper("Identity", name: name, keywords: dict);
+ var op = tf._op_def_lib._apply_op_helper("Identity", name: name, keywords: dict);
return op.output;
}
@@ -13125,7 +13123,7 @@ namespace Tensorflow.Operations
{
var dict = new Dictionary();
dict["input"] = input;
- var op = _op_def_lib._apply_op_helper("IdentityN", name: name, keywords: dict);
+ var op = tf._op_def_lib._apply_op_helper("IdentityN", name: name, keywords: dict);
int _idx = 0;
var output = Enumerable.Range(0, op.OutputListLength("output")).Select(_ => op.outputs[_idx++]).ToArray();
return (output);
@@ -13160,7 +13158,7 @@ namespace Tensorflow.Operations
dict["container"] = container;
if (shared_name != null)
dict["shared_name"] = shared_name;
- var op = _op_def_lib._apply_op_helper("IdentityReader", name: name, keywords: dict);
+ var op = tf._op_def_lib._apply_op_helper("IdentityReader", name: name, keywords: dict);
return op.output;
}
@@ -13193,7 +13191,7 @@ namespace Tensorflow.Operations
dict["container"] = container;
if (shared_name != null)
dict["shared_name"] = shared_name;
- var op = _op_def_lib._apply_op_helper("IdentityReaderV2", name: name, keywords: dict);
+ var op = tf._op_def_lib._apply_op_helper("IdentityReaderV2", name: name, keywords: dict);
return op.output;
}
@@ -13230,7 +13228,7 @@ namespace Tensorflow.Operations
var dict = new Dictionary();
dict["a"] = a;
dict["x"] = x;
- var op = _op_def_lib._apply_op_helper("Igamma", name: name, keywords: dict);
+ var op = tf._op_def_lib._apply_op_helper("Igamma", name: name, keywords: dict);
return op.output;
}
@@ -13252,7 +13250,7 @@ namespace Tensorflow.Operations
var dict = new Dictionary();
dict["a"] = a;
dict["x"] = x;
- var op = _op_def_lib._apply_op_helper("IgammaGradA", name: name, keywords: dict);
+ var op = tf._op_def_lib._apply_op_helper("IgammaGradA", name: name, keywords: dict);
return op.output;
}
@@ -13288,7 +13286,7 @@ namespace Tensorflow.Operations
var dict = new Dictionary();
dict["a"] = a;
dict["x"] = x;
- var op = _op_def_lib._apply_op_helper("Igammac", name: name, keywords: dict);
+ var op = tf._op_def_lib._apply_op_helper("Igammac", name: name, keywords: dict);
return op.output;
}
@@ -13324,7 +13322,7 @@ namespace Tensorflow.Operations
dict["input"] = input;
if (Tout.HasValue)
dict["Tout"] = Tout.Value;
- var op = _op_def_lib._apply_op_helper("Imag", name: name, keywords: dict);
+ var op = tf._op_def_lib._apply_op_helper("Imag", name: name, keywords: dict);
return op.output;
}
@@ -13395,7 +13393,7 @@ namespace Tensorflow.Operations
dict["max_images"] = max_images.Value;
if (bad_color != null)
dict["bad_color"] = bad_color;
- var op = _op_def_lib._apply_op_helper("ImageSummary", name: name, keywords: dict);
+ var op = tf._op_def_lib._apply_op_helper("ImageSummary", name: name, keywords: dict);
return op.output;
}
@@ -13430,7 +13428,7 @@ namespace Tensorflow.Operations
dict["dtype"] = dtype;
dict["shape"] = shape;
dict["memory_region_name"] = memory_region_name;
- var op = _op_def_lib._apply_op_helper("ImmutableConst", name: name, keywords: dict);
+ var op = tf._op_def_lib._apply_op_helper("ImmutableConst", name: name, keywords: dict);
return op.output;
}
@@ -13476,7 +13474,7 @@ namespace Tensorflow.Operations
dict["predictions"] = predictions;
dict["targets"] = targets;
dict["k"] = k;
- var op = _op_def_lib._apply_op_helper("InTopK", name: name, keywords: dict);
+ var op = tf._op_def_lib._apply_op_helper("InTopK", name: name, keywords: dict);
return op.output;
}
@@ -13521,7 +13519,7 @@ namespace Tensorflow.Operations
dict["predictions"] = predictions;
dict["targets"] = targets;
dict["k"] = k;
- var op = _op_def_lib._apply_op_helper("InTopKV2", name: name, keywords: dict);
+ var op = tf._op_def_lib._apply_op_helper("InTopKV2", name: name, keywords: dict);
return op.output;
}
@@ -13548,7 +13546,7 @@ namespace Tensorflow.Operations
var dict = new Dictionary();
dict["dtype"] = dtype;
dict["shape"] = shape;
- var op = _op_def_lib._apply_op_helper("InfeedDequeue", name: name, keywords: dict);
+ var op = tf._op_def_lib._apply_op_helper("InfeedDequeue", name: name, keywords: dict);
return op.output;
}
@@ -13578,7 +13576,7 @@ namespace Tensorflow.Operations
var dict = new Dictionary();
dict["dtypes"] = dtypes;
dict["shapes"] = shapes;
- var op = _op_def_lib._apply_op_helper("InfeedDequeueTuple", name: name, keywords: dict);
+ var op = tf._op_def_lib._apply_op_helper("InfeedDequeueTuple", name: name, keywords: dict);
int _idx = 0;
var outputs = Enumerable.Range(0, op.OutputListLength("outputs")).Select(_ => op.outputs[_idx++]).ToArray();
return (outputs);
@@ -13612,7 +13610,7 @@ namespace Tensorflow.Operations
dict["shape"] = shape;
if (device_ordinal.HasValue)
dict["device_ordinal"] = device_ordinal.Value;
- var op = _op_def_lib._apply_op_helper("InfeedEnqueue", name: name, keywords: dict);
+ var op = tf._op_def_lib._apply_op_helper("InfeedEnqueue", name: name, keywords: dict);
return op;
}
@@ -13644,7 +13642,7 @@ namespace Tensorflow.Operations
dict["shapes"] = shapes;
if (device_ordinal.HasValue)
dict["device_ordinal"] = device_ordinal.Value;
- var op = _op_def_lib._apply_op_helper("InfeedEnqueueTuple", name: name, keywords: dict);
+ var op = tf._op_def_lib._apply_op_helper("InfeedEnqueueTuple", name: name, keywords: dict);
return op;
}
@@ -13672,7 +13670,7 @@ namespace Tensorflow.Operations
dict["table_handle"] = table_handle;
dict["keys"] = keys;
dict["values"] = values;
- var op = _op_def_lib._apply_op_helper("InitializeTable", name: name, keywords: dict);
+ var op = tf._op_def_lib._apply_op_helper("InitializeTable", name: name, keywords: dict);
return op;
}
@@ -13729,7 +13727,7 @@ namespace Tensorflow.Operations
dict["vocab_size"] = vocab_size.Value;
if (delimiter != null)
dict["delimiter"] = delimiter;
- var op = _op_def_lib._apply_op_helper("InitializeTableFromTextFile", name: name, keywords: dict);
+ var op = tf._op_def_lib._apply_op_helper("InitializeTableFromTextFile", name: name, keywords: dict);
return op;
}
@@ -13786,7 +13784,7 @@ namespace Tensorflow.Operations
dict["vocab_size"] = vocab_size.Value;
if (delimiter != null)
dict["delimiter"] = delimiter;
- var op = _op_def_lib._apply_op_helper("InitializeTableFromTextFileV2", name: name, keywords: dict);
+ var op = tf._op_def_lib._apply_op_helper("InitializeTableFromTextFileV2", name: name, keywords: dict);
return op;
}
@@ -13814,7 +13812,7 @@ namespace Tensorflow.Operations
dict["table_handle"] = table_handle;
dict["keys"] = keys;
dict["values"] = values;
- var op = _op_def_lib._apply_op_helper("InitializeTableV2", name: name, keywords: dict);
+ var op = tf._op_def_lib._apply_op_helper("InitializeTableV2", name: name, keywords: dict);
return op;
}
@@ -13845,7 +13843,7 @@ namespace Tensorflow.Operations
dict["x"] = x;
dict["i"] = i;
dict["v"] = v;
- var op = _op_def_lib._apply_op_helper("InplaceAdd", name: name, keywords: dict);
+ var op = tf._op_def_lib._apply_op_helper("InplaceAdd", name: name, keywords: dict);
return op.output;
}
@@ -13876,7 +13874,7 @@ namespace Tensorflow.Operations
dict["x"] = x;
dict["i"] = i;
dict["v"] = v;
- var op = _op_def_lib._apply_op_helper("InplaceSub", name: name, keywords: dict);
+ var op = tf._op_def_lib._apply_op_helper("InplaceSub", name: name, keywords: dict);
return op.output;
}
@@ -13907,7 +13905,7 @@ namespace Tensorflow.Operations
dict["x"] = x;
dict["i"] = i;
dict["v"] = v;
- var op = _op_def_lib._apply_op_helper("InplaceUpdate", name: name, keywords: dict);
+ var op = tf._op_def_lib._apply_op_helper("InplaceUpdate", name: name, keywords: dict);
return op.output;
}
@@ -13929,7 +13927,7 @@ namespace Tensorflow.Operations
{
var dict = new Dictionary();
dict["x"] = x;
- var op = _op_def_lib._apply_op_helper("Inv", name: name, keywords: dict);
+ var op = tf._op_def_lib._apply_op_helper("Inv", name: name, keywords: dict);
return op.output;
}
@@ -13955,7 +13953,7 @@ namespace Tensorflow.Operations
var dict = new Dictionary();
dict["y"] = y;
dict["dy"] = dy;
- var op = _op_def_lib._apply_op_helper("InvGrad", name: name, keywords: dict);
+ var op = tf._op_def_lib._apply_op_helper("InvGrad", name: name, keywords: dict);
return op.output;
}
@@ -13978,7 +13976,7 @@ namespace Tensorflow.Operations
{
var dict = new Dictionary();
dict["x"] = x;
- var op = _op_def_lib._apply_op_helper("Invert", name: name, keywords: dict);
+ var op = tf._op_def_lib._apply_op_helper("Invert", name: name, keywords: dict);
return op.output;
}
@@ -14016,7 +14014,7 @@ namespace Tensorflow.Operations
{
var dict = new Dictionary();
dict["x"] = x;
- var op = _op_def_lib._apply_op_helper("InvertPermutation", name: name, keywords: dict);
+ var op = tf._op_def_lib._apply_op_helper("InvertPermutation", name: name, keywords: dict);
return op.output;
}
@@ -14037,7 +14035,7 @@ namespace Tensorflow.Operations
{
var dict = new Dictionary();
dict["tree_ensemble_handle"] = tree_ensemble_handle;
- var op = _op_def_lib._apply_op_helper("IsBoostedTreesEnsembleInitialized", name: name, keywords: dict);
+ var op = tf._op_def_lib._apply_op_helper("IsBoostedTreesEnsembleInitialized", name: name, keywords: dict);
return op.output;
}
@@ -14061,7 +14059,7 @@ namespace Tensorflow.Operations
{
var dict = new Dictionary();
dict["x"] = x;
- var op = _op_def_lib._apply_op_helper("IsFinite", name: name, keywords: dict);
+ var op = tf._op_def_lib._apply_op_helper("IsFinite", name: name, keywords: dict);
return op.output;
}
@@ -14085,7 +14083,7 @@ namespace Tensorflow.Operations
{
var dict = new Dictionary();
dict["x"] = x;
- var op = _op_def_lib._apply_op_helper("IsInf", name: name, keywords: dict);
+ var op = tf._op_def_lib._apply_op_helper("IsInf", name: name, keywords: dict);
return op.output;
}
@@ -14109,7 +14107,7 @@ namespace Tensorflow.Operations
{
var dict = new Dictionary();
dict["x"] = x;
- var op = _op_def_lib._apply_op_helper("IsNan", name: name, keywords: dict);
+ var op = tf._op_def_lib._apply_op_helper("IsNan", name: name, keywords: dict);
return op.output;
}
@@ -14132,7 +14130,7 @@ namespace Tensorflow.Operations
{
var dict = new Dictionary();
dict["ref"] = referecne;
- var op = _op_def_lib._apply_op_helper("IsVariableInitialized", name: name, keywords: dict);
+ var op = tf._op_def_lib._apply_op_helper("IsVariableInitialized", name: name, keywords: dict);
return op.output;
}
@@ -14166,7 +14164,7 @@ namespace Tensorflow.Operations
dict["container"] = container;
dict["output_types"] = output_types;
dict["output_shapes"] = output_shapes;
- var op = _op_def_lib._apply_op_helper("Iterator", name: name, keywords: dict);
+ var op = tf._op_def_lib._apply_op_helper("Iterator", name: name, keywords: dict);
return op.output;
}
@@ -14199,7 +14197,7 @@ namespace Tensorflow.Operations
dict["output_types"] = output_types;
if (output_shapes != null)
dict["output_shapes"] = output_shapes;
- var op = _op_def_lib._apply_op_helper("IteratorFromStringHandle", name: name, keywords: dict);
+ var op = tf._op_def_lib._apply_op_helper("IteratorFromStringHandle", name: name, keywords: dict);
return op.output;
}
@@ -14226,7 +14224,7 @@ namespace Tensorflow.Operations
dict["iterator"] = iterator;
dict["output_types"] = output_types;
dict["output_shapes"] = output_shapes;
- var op = _op_def_lib._apply_op_helper("IteratorGetNext", name: name, keywords: dict);
+ var op = tf._op_def_lib._apply_op_helper("IteratorGetNext", name: name, keywords: dict);
int _idx = 0;
var components = Enumerable.Range(0, op.OutputListLength("components")).Select(_ => op.outputs[_idx++]).ToArray();
return (components);
@@ -14255,7 +14253,7 @@ namespace Tensorflow.Operations
dict["iterator"] = iterator;
dict["output_types"] = output_types;
dict["output_shapes"] = output_shapes;
- var op = _op_def_lib._apply_op_helper("IteratorGetNextAsOptional", name: name, keywords: dict);
+ var op = tf._op_def_lib._apply_op_helper("IteratorGetNextAsOptional", name: name, keywords: dict);
return op.output;
}
@@ -14288,7 +14286,7 @@ namespace Tensorflow.Operations
dict["iterator"] = iterator;
dict["output_types"] = output_types;
dict["output_shapes"] = output_shapes;
- var op = _op_def_lib._apply_op_helper("IteratorGetNextSync", name: name, keywords: dict);
+ var op = tf._op_def_lib._apply_op_helper("IteratorGetNextSync", name: name, keywords: dict);
int _idx = 0;
var components = Enumerable.Range(0, op.OutputListLength("components")).Select(_ => op.outputs[_idx++]).ToArray();
return (components);
@@ -14311,7 +14309,7 @@ namespace Tensorflow.Operations
{
var dict = new Dictionary();
dict["resource_handle"] = resource_handle;
- var op = _op_def_lib._apply_op_helper("IteratorToStringHandle", name: name, keywords: dict);
+ var op = tf._op_def_lib._apply_op_helper("IteratorToStringHandle", name: name, keywords: dict);
return op.output;
}
@@ -14337,7 +14335,7 @@ namespace Tensorflow.Operations
{
var dict = new Dictionary();
dict["t"] = t;
- var op = _op_def_lib._apply_op_helper("L2Loss", name: name, keywords: dict);
+ var op = tf._op_def_lib._apply_op_helper("L2Loss", name: name, keywords: dict);
return op.output;
}
@@ -14366,7 +14364,7 @@ namespace Tensorflow.Operations
dict["container"] = container;
if (shared_name != null)
dict["shared_name"] = shared_name;
- var op = _op_def_lib._apply_op_helper("LMDBReader", name: name, keywords: dict);
+ var op = tf._op_def_lib._apply_op_helper("LMDBReader", name: name, keywords: dict);
return op.output;
}
@@ -14419,7 +14417,7 @@ namespace Tensorflow.Operations
dict["alpha"] = alpha.Value;
if (beta.HasValue)
dict["beta"] = beta.Value;
- var op = _op_def_lib._apply_op_helper("LRN", name: name, keywords: dict);
+ var op = tf._op_def_lib._apply_op_helper("LRN", name: name, keywords: dict);
return op.output;
}
@@ -14468,7 +14466,7 @@ namespace Tensorflow.Operations
dict["alpha"] = alpha.Value;
if (beta.HasValue)
dict["beta"] = beta.Value;
- var op = _op_def_lib._apply_op_helper("LRNGrad", name: name, keywords: dict);
+ var op = tf._op_def_lib._apply_op_helper("LRNGrad", name: name, keywords: dict);
return op.output;
}
@@ -14498,7 +14496,7 @@ namespace Tensorflow.Operations
dict["tag"] = tag;
dict["output_types"] = output_types;
dict["output_shapes"] = output_shapes;
- var op = _op_def_lib._apply_op_helper("LatencyStatsDataset", name: name, keywords: dict);
+ var op = tf._op_def_lib._apply_op_helper("LatencyStatsDataset", name: name, keywords: dict);
return op.output;
}
@@ -14574,7 +14572,7 @@ namespace Tensorflow.Operations
dict["seed"] = seed.Value;
if (seed2.HasValue)
dict["seed2"] = seed2.Value;
- var op = _op_def_lib._apply_op_helper("LearnedUnigramCandidateSampler", name: name, keywords: dict);
+ var op = tf._op_def_lib._apply_op_helper("LearnedUnigramCandidateSampler", name: name, keywords: dict);
int _idx = 0;
var sampled_candidates = op.outputs[_idx++];
var true_expected_count = op.outputs[_idx++];
@@ -14604,7 +14602,7 @@ namespace Tensorflow.Operations
var dict = new Dictionary();
dict["x"] = x;
dict["y"] = y;
- var op = _op_def_lib._apply_op_helper("LeftShift", name: name, keywords: dict);
+ var op = tf._op_def_lib._apply_op_helper("LeftShift", name: name, keywords: dict);
return op.output;
}
@@ -14630,7 +14628,7 @@ namespace Tensorflow.Operations
var dict = new Dictionary();
dict["x"] = x;
dict["y"] = y;
- var op = _op_def_lib._apply_op_helper("Less", name: name, keywords: dict);
+ var op = tf._op_def_lib._apply_op_helper("Less", name: name, keywords: dict);
return op.output;
}
@@ -14656,7 +14654,7 @@ namespace Tensorflow.Operations
var dict = new Dictionary();
dict["x"] = x;
dict["y"] = y;
- var op = _op_def_lib._apply_op_helper("LessEqual", name: name, keywords: dict);
+ var op = tf._op_def_lib._apply_op_helper("LessEqual", name: name, keywords: dict);
return op.output;
}
@@ -14675,7 +14673,7 @@ namespace Tensorflow.Operations
{
var dict = new Dictionary();
dict["x"] = x;
- var op = _op_def_lib._apply_op_helper("Lgamma", name: name, keywords: dict);
+ var op = tf._op_def_lib._apply_op_helper("Lgamma", name: name, keywords: dict);
return op.output;
}
@@ -14715,7 +14713,7 @@ namespace Tensorflow.Operations
dict["start"] = start;
dict["stop"] = stop;
dict["num"] = num;
- var op = _op_def_lib._apply_op_helper("LinSpace", name: name, keywords: dict);
+ var op = tf._op_def_lib._apply_op_helper("LinSpace", name: name, keywords: dict);
return op.output;
}
@@ -14769,7 +14767,7 @@ namespace Tensorflow.Operations
dict["y"] = y;
if (out_idx.HasValue)
dict["out_idx"] = out_idx.Value;
- var op = _op_def_lib._apply_op_helper("ListDiff", name: name, keywords: dict);
+ var op = tf._op_def_lib._apply_op_helper("ListDiff", name: name, keywords: dict);
int _idx = 0;
var output = op.outputs[_idx++];
var idx = op.outputs[_idx++];
@@ -14871,7 +14869,7 @@ namespace Tensorflow.Operations
dict["num_cols"] = num_cols;
if (max_rows_in_memory.HasValue)
dict["max_rows_in_memory"] = max_rows_in_memory.Value;
- var op = _op_def_lib._apply_op_helper("LoadAndRemapMatrix", name: name, keywords: dict);
+ var op = tf._op_def_lib._apply_op_helper("LoadAndRemapMatrix", name: name, keywords: dict);
return op.output;
}
@@ -14893,7 +14891,7 @@ namespace Tensorflow.Operations
{
var dict = new Dictionary();
dict["x"] = x;
- var op = _op_def_lib._apply_op_helper("Log", name: name, keywords: dict);
+ var op = tf._op_def_lib._apply_op_helper("Log", name: name, keywords: dict);
return op.output;
}
@@ -14915,7 +14913,7 @@ namespace Tensorflow.Operations
{
var dict = new Dictionary();
dict["x"] = x;
- var op = _op_def_lib._apply_op_helper("Log1p", name: name, keywords: dict);
+ var op = tf._op_def_lib._apply_op_helper("Log1p", name: name, keywords: dict);
return op.output;
}
@@ -14950,7 +14948,7 @@ namespace Tensorflow.Operations
{
var dict = new Dictionary();
dict["input"] = input;
- var op = _op_def_lib._apply_op_helper("LogMatrixDeterminant", name: name, keywords: dict);
+ var op = tf._op_def_lib._apply_op_helper("LogMatrixDeterminant", name: name, keywords: dict);
int _idx = 0;
var sign = op.outputs[_idx++];
var log_abs_determinant = op.outputs[_idx++];
@@ -14979,7 +14977,7 @@ namespace Tensorflow.Operations
{
var dict = new Dictionary();
dict["logits"] = logits;
- var op = _op_def_lib._apply_op_helper("LogSoftmax", name: name, keywords: dict);
+ var op = tf._op_def_lib._apply_op_helper("LogSoftmax", name: name, keywords: dict);
return op.output;
}
@@ -15055,7 +15053,7 @@ namespace Tensorflow.Operations
dict["seed"] = seed.Value;
if (seed2.HasValue)
dict["seed2"] = seed2.Value;
- var op = _op_def_lib._apply_op_helper("LogUniformCandidateSampler", name: name, keywords: dict);
+ var op = tf._op_def_lib._apply_op_helper("LogUniformCandidateSampler", name: name, keywords: dict);
int _idx = 0;
var sampled_candidates = op.outputs[_idx++];
var true_expected_count = op.outputs[_idx++];
@@ -15085,7 +15083,7 @@ namespace Tensorflow.Operations
var dict = new Dictionary();
dict["x"] = x;
dict["y"] = y;
- var op = _op_def_lib._apply_op_helper("LogicalAnd", name: name, keywords: dict);
+ var op = tf._op_def_lib._apply_op_helper("LogicalAnd", name: name, keywords: dict);
return op.output;
}
@@ -15104,7 +15102,7 @@ namespace Tensorflow.Operations
{
var dict = new Dictionary();
dict["x"] = x;
- var op = _op_def_lib._apply_op_helper("LogicalNot", name: name, keywords: dict);
+ var op = tf._op_def_lib._apply_op_helper("LogicalNot", name: name, keywords: dict);
return op.output;
}
@@ -15130,7 +15128,7 @@ namespace Tensorflow.Operations
var dict = new Dictionary();
dict["x"] = x;
dict["y"] = y;
- var op = _op_def_lib._apply_op_helper("LogicalOr", name: name, keywords: dict);
+ var op = tf._op_def_lib._apply_op_helper("LogicalOr", name: name, keywords: dict);
return op.output;
}
@@ -15161,7 +15159,7 @@ namespace Tensorflow.Operations
dict["table_handle"] = table_handle;
dict["Tkeys"] = Tkeys;
dict["Tvalues"] = Tvalues;
- var op = _op_def_lib._apply_op_helper("LookupTableExport", name: name, keywords: dict);
+ var op = tf._op_def_lib._apply_op_helper("LookupTableExport", name: name, keywords: dict);
int _idx = 0;
var keys = op.outputs[_idx++];
var values = op.outputs[_idx++];
@@ -15195,7 +15193,7 @@ namespace Tensorflow.Operations
dict["table_handle"] = table_handle;
dict["Tkeys"] = Tkeys;
dict["Tvalues"] = Tvalues;
- var op = _op_def_lib._apply_op_helper("LookupTableExportV2", name: name, keywords: dict);
+ var op = tf._op_def_lib._apply_op_helper("LookupTableExportV2", name: name, keywords: dict);
int _idx = 0;
var keys = op.outputs[_idx++];
var values = op.outputs[_idx++];
@@ -15234,7 +15232,7 @@ namespace Tensorflow.Operations
dict["table_handle"] = table_handle;
dict["keys"] = keys;
dict["default_value"] = default_value;
- var op = _op_def_lib._apply_op_helper("LookupTableFind", name: name, keywords: dict);
+ var op = tf._op_def_lib._apply_op_helper("LookupTableFind", name: name, keywords: dict);
return op.output;
}
@@ -15270,7 +15268,7 @@ namespace Tensorflow.Operations
dict["table_handle"] = table_handle;
dict["keys"] = keys;
dict["default_value"] = default_value;
- var op = _op_def_lib._apply_op_helper("LookupTableFindV2", name: name, keywords: dict);
+ var op = tf._op_def_lib._apply_op_helper("LookupTableFindV2", name: name, keywords: dict);
return op.output;
}
@@ -15302,7 +15300,7 @@ namespace Tensorflow.Operations
dict["table_handle"] = table_handle;
dict["keys"] = keys;
dict["values"] = values;
- var op = _op_def_lib._apply_op_helper("LookupTableImport", name: name, keywords: dict);
+ var op = tf._op_def_lib._apply_op_helper("LookupTableImport", name: name, keywords: dict);
return op;
}
@@ -15334,7 +15332,7 @@ namespace Tensorflow.Operations
dict["table_handle"] = table_handle;
dict["keys"] = keys;
dict["values"] = values;
- var op = _op_def_lib._apply_op_helper("LookupTableImportV2", name: name, keywords: dict);
+ var op = tf._op_def_lib._apply_op_helper("LookupTableImportV2", name: name, keywords: dict);
return op;
}
@@ -15366,7 +15364,7 @@ namespace Tensorflow.Operations
dict["table_handle"] = table_handle;
dict["keys"] = keys;
dict["values"] = values;
- var op = _op_def_lib._apply_op_helper("LookupTableInsert", name: name, keywords: dict);
+ var op = tf._op_def_lib._apply_op_helper("LookupTableInsert", name: name, keywords: dict);
return op;
}
@@ -15398,7 +15396,7 @@ namespace Tensorflow.Operations
dict["table_handle"] = table_handle;
dict["keys"] = keys;
dict["values"] = values;
- var op = _op_def_lib._apply_op_helper("LookupTableInsertV2", name: name, keywords: dict);
+ var op = tf._op_def_lib._apply_op_helper("LookupTableInsertV2", name: name, keywords: dict);
return op;
}
@@ -15419,7 +15417,7 @@ namespace Tensorflow.Operations
{
var dict = new Dictionary();
dict["table_handle"] = table_handle;
- var op = _op_def_lib._apply_op_helper("LookupTableSize", name: name, keywords: dict);
+ var op = tf._op_def_lib._apply_op_helper("LookupTableSize", name: name, keywords: dict);
return op.output;
}
@@ -15440,7 +15438,7 @@ namespace Tensorflow.Operations
{
var dict = new Dictionary();
dict["table_handle"] = table_handle;
- var op = _op_def_lib._apply_op_helper("LookupTableSizeV2", name: name, keywords: dict);
+ var op = tf._op_def_lib._apply_op_helper("LookupTableSizeV2", name: name, keywords: dict);
return op.output;
}
@@ -15465,7 +15463,7 @@ namespace Tensorflow.Operations
{
var dict = new Dictionary();
dict["input"] = input;
- var op = _op_def_lib._apply_op_helper("LoopCond", name: name, keywords: dict);
+ var op = tf._op_def_lib._apply_op_helper("LoopCond", name: name, keywords: dict);
return op.output;
}
@@ -15491,7 +15489,7 @@ namespace Tensorflow.Operations
var dict = new Dictionary();
dict["dataset"] = dataset;
dict["iterator"] = iterator;
- var op = _op_def_lib._apply_op_helper("MakeIterator", name: name, keywords: dict);
+ var op = tf._op_def_lib._apply_op_helper("MakeIterator", name: name, keywords: dict);
return op;
}
@@ -15527,7 +15525,7 @@ namespace Tensorflow.Operations
dict["container"] = container;
if (shared_name != null)
dict["shared_name"] = shared_name;
- var op = _op_def_lib._apply_op_helper("MapClear", name: name, keywords: dict);
+ var op = tf._op_def_lib._apply_op_helper("MapClear", name: name, keywords: dict);
return op;
}
@@ -15563,7 +15561,7 @@ namespace Tensorflow.Operations
dict["container"] = container;
if (shared_name != null)
dict["shared_name"] = shared_name;
- var op = _op_def_lib._apply_op_helper("MapIncompleteSize", name: name, keywords: dict);
+ var op = tf._op_def_lib._apply_op_helper("MapIncompleteSize", name: name, keywords: dict);
return op.output;
}
@@ -15609,7 +15607,7 @@ namespace Tensorflow.Operations
dict["container"] = container;
if (shared_name != null)
dict["shared_name"] = shared_name;
- var op = _op_def_lib._apply_op_helper("MapPeek", name: name, keywords: dict);
+ var op = tf._op_def_lib._apply_op_helper("MapPeek", name: name, keywords: dict);
int _idx = 0;
var values = Enumerable.Range(0, op.OutputListLength("values")).Select(_ => op.outputs[_idx++]).ToArray();
return (values);
@@ -15647,7 +15645,7 @@ namespace Tensorflow.Operations
dict["container"] = container;
if (shared_name != null)
dict["shared_name"] = shared_name;
- var op = _op_def_lib._apply_op_helper("MapSize", name: name, keywords: dict);
+ var op = tf._op_def_lib._apply_op_helper("MapSize", name: name, keywords: dict);
return op.output;
}
@@ -15700,7 +15698,7 @@ namespace Tensorflow.Operations
dict["container"] = container;
if (shared_name != null)
dict["shared_name"] = shared_name;
- var op = _op_def_lib._apply_op_helper("MapStage", name: name, keywords: dict);
+ var op = tf._op_def_lib._apply_op_helper("MapStage", name: name, keywords: dict);
return op;
}
@@ -15746,7 +15744,7 @@ namespace Tensorflow.Operations
dict["container"] = container;
if (shared_name != null)
dict["shared_name"] = shared_name;
- var op = _op_def_lib._apply_op_helper("MapUnstage", name: name, keywords: dict);
+ var op = tf._op_def_lib._apply_op_helper("MapUnstage", name: name, keywords: dict);
int _idx = 0;
var values = Enumerable.Range(0, op.OutputListLength("values")).Select(_ => op.outputs[_idx++]).ToArray();
return (values);
@@ -15794,7 +15792,7 @@ namespace Tensorflow.Operations
dict["container"] = container;
if (shared_name != null)
dict["shared_name"] = shared_name;
- var op = _op_def_lib._apply_op_helper("MapUnstageNoKey", name: name, keywords: dict);
+ var op = tf._op_def_lib._apply_op_helper("MapUnstageNoKey", name: name, keywords: dict);
int _idx = 0;
var key = op.outputs[_idx++];
var values = Enumerable.Range(0, op.OutputListLength("values")).Select(_ => op.outputs[_idx++]).ToArray();
@@ -15838,7 +15836,7 @@ namespace Tensorflow.Operations
dict["transpose_a"] = transpose_a.Value;
if (transpose_b.HasValue)
dict["transpose_b"] = transpose_b.Value;
- var op = _op_def_lib._apply_op_helper("MatMul", name: name, keywords: dict);
+ var op = tf._op_def_lib._apply_op_helper("MatMul", name: name, keywords: dict);
return op.output;
}
@@ -15864,7 +15862,7 @@ namespace Tensorflow.Operations
{
var dict = new Dictionary();
dict["pattern"] = pattern;
- var op = _op_def_lib._apply_op_helper("MatchingFiles", name: name, keywords: dict);
+ var op = tf._op_def_lib._apply_op_helper("MatchingFiles", name: name, keywords: dict);
return op.output;
}
@@ -15936,7 +15934,7 @@ namespace Tensorflow.Operations
dict["input"] = input;
dict["num_lower"] = num_lower;
dict["num_upper"] = num_upper;
- var op = _op_def_lib._apply_op_helper("MatrixBandPart", name: name, keywords: dict);
+ var op = tf._op_def_lib._apply_op_helper("MatrixBandPart", name: name, keywords: dict);
return op.output;
}
@@ -15962,7 +15960,7 @@ namespace Tensorflow.Operations
{
var dict = new Dictionary();
dict["input"] = input;
- var op = _op_def_lib._apply_op_helper("MatrixDeterminant", name: name, keywords: dict);
+ var op = tf._op_def_lib._apply_op_helper("MatrixDeterminant", name: name, keywords: dict);
return op.output;
}
@@ -16011,7 +16009,7 @@ namespace Tensorflow.Operations
{
var dict = new Dictionary();
dict["diagonal"] = diagonal;
- var op = _op_def_lib._apply_op_helper("MatrixDiag", name: name, keywords: dict);
+ var op = tf._op_def_lib._apply_op_helper("MatrixDiag", name: name, keywords: dict);
return op.output;
}
@@ -16063,7 +16061,7 @@ namespace Tensorflow.Operations
{
var dict = new Dictionary();
dict["input"] = input;
- var op = _op_def_lib._apply_op_helper("MatrixDiagPart", name: name, keywords: dict);
+ var op = tf._op_def_lib._apply_op_helper("MatrixDiagPart", name: name, keywords: dict);
return op.output;
}
@@ -16082,7 +16080,7 @@ namespace Tensorflow.Operations
{
var dict = new Dictionary();
dict["input"] = input;
- var op = _op_def_lib._apply_op_helper("MatrixExponential", name: name, keywords: dict);
+ var op = tf._op_def_lib._apply_op_helper("MatrixExponential", name: name, keywords: dict);
return op.output;
}
@@ -16124,7 +16122,7 @@ namespace Tensorflow.Operations
dict["input"] = input;
if (adjoint.HasValue)
dict["adjoint"] = adjoint.Value;
- var op = _op_def_lib._apply_op_helper("MatrixInverse", name: name, keywords: dict);
+ var op = tf._op_def_lib._apply_op_helper("MatrixInverse", name: name, keywords: dict);
return op.output;
}
@@ -16166,7 +16164,7 @@ namespace Tensorflow.Operations
{
var dict = new Dictionary();
dict["input"] = input;
- var op = _op_def_lib._apply_op_helper("MatrixLogarithm", name: name, keywords: dict);
+ var op = tf._op_def_lib._apply_op_helper("MatrixLogarithm", name: name, keywords: dict);
return op.output;
}
@@ -16205,7 +16203,7 @@ namespace Tensorflow.Operations
var dict = new Dictionary();
dict["input"] = input;
dict["diagonal"] = diagonal;
- var op = _op_def_lib._apply_op_helper("MatrixSetDiag", name: name, keywords: dict);
+ var op = tf._op_def_lib._apply_op_helper("MatrixSetDiag", name: name, keywords: dict);
return op.output;
}
@@ -16244,7 +16242,7 @@ namespace Tensorflow.Operations
dict["rhs"] = rhs;
if (adjoint.HasValue)
dict["adjoint"] = adjoint.Value;
- var op = _op_def_lib._apply_op_helper("MatrixSolve", name: name, keywords: dict);
+ var op = tf._op_def_lib._apply_op_helper("MatrixSolve", name: name, keywords: dict);
return op.output;
}
@@ -16317,7 +16315,7 @@ namespace Tensorflow.Operations
dict["l2_regularizer"] = l2_regularizer;
if (fast.HasValue)
dict["fast"] = fast.Value;
- var op = _op_def_lib._apply_op_helper("MatrixSolveLs", name: name, keywords: dict);
+ var op = tf._op_def_lib._apply_op_helper("MatrixSolveLs", name: name, keywords: dict);
return op.output;
}
@@ -16375,7 +16373,7 @@ namespace Tensorflow.Operations
dict["lower"] = lower.Value;
if (adjoint.HasValue)
dict["adjoint"] = adjoint.Value;
- var op = _op_def_lib._apply_op_helper("MatrixTriangularSolve", name: name, keywords: dict);
+ var op = tf._op_def_lib._apply_op_helper("MatrixTriangularSolve", name: name, keywords: dict);
return op.output;
}
@@ -16412,7 +16410,7 @@ namespace Tensorflow.Operations
dict["reduction_indices"] = reduction_indices;
if (keep_dims.HasValue)
dict["keep_dims"] = keep_dims.Value;
- var op = _op_def_lib._apply_op_helper("Max", name: name, keywords: dict);
+ var op = tf._op_def_lib._apply_op_helper("Max", name: name, keywords: dict);
return op.output;
}
@@ -16458,7 +16456,7 @@ namespace Tensorflow.Operations
dict["padding"] = padding;
if (data_format != null)
dict["data_format"] = data_format;
- var op = _op_def_lib._apply_op_helper("MaxPool", name: name, keywords: dict);
+ var op = tf._op_def_lib._apply_op_helper("MaxPool", name: name, keywords: dict);
return op.output;
}
@@ -16505,7 +16503,7 @@ namespace Tensorflow.Operations
dict["padding"] = padding;
if (data_format != null)
dict["data_format"] = data_format;
- var op = _op_def_lib._apply_op_helper("MaxPool3D", name: name, keywords: dict);
+ var op = tf._op_def_lib._apply_op_helper("MaxPool3D", name: name, keywords: dict);
return op.output;
}
@@ -16559,7 +16557,7 @@ namespace Tensorflow.Operations
dict["padding"] = padding;
if (data_format != null)
dict["data_format"] = data_format;
- var op = _op_def_lib._apply_op_helper("MaxPool3DGrad", name: name, keywords: dict);
+ var op = tf._op_def_lib._apply_op_helper("MaxPool3DGrad", name: name, keywords: dict);
return op.output;
}
@@ -16614,7 +16612,7 @@ namespace Tensorflow.Operations
dict["padding"] = padding;
if (data_format != null)
dict["data_format"] = data_format;
- var op = _op_def_lib._apply_op_helper("MaxPool3DGradGrad", name: name, keywords: dict);
+ var op = tf._op_def_lib._apply_op_helper("MaxPool3DGradGrad", name: name, keywords: dict);
return op.output;
}
@@ -16668,7 +16666,7 @@ namespace Tensorflow.Operations
dict["padding"] = padding;
if (data_format != null)
dict["data_format"] = data_format;
- var op = _op_def_lib._apply_op_helper("MaxPoolGrad", name: name, keywords: dict);
+ var op = tf._op_def_lib._apply_op_helper("MaxPoolGrad", name: name, keywords: dict);
return op.output;
}
@@ -16722,7 +16720,7 @@ namespace Tensorflow.Operations
dict["padding"] = padding;
if (data_format != null)
dict["data_format"] = data_format;
- var op = _op_def_lib._apply_op_helper("MaxPoolGradGrad", name: name, keywords: dict);
+ var op = tf._op_def_lib._apply_op_helper("MaxPoolGradGrad", name: name, keywords: dict);
return op.output;
}
@@ -16774,7 +16772,7 @@ namespace Tensorflow.Operations
dict["padding"] = padding;
if (data_format != null)
dict["data_format"] = data_format;
- var op = _op_def_lib._apply_op_helper("MaxPoolGradGradV2", name: name, keywords: dict);
+ var op = tf._op_def_lib._apply_op_helper("MaxPoolGradGradV2", name: name, keywords: dict);
return op.output;
}
@@ -16820,7 +16818,7 @@ namespace Tensorflow.Operations
dict["ksize"] = ksize;
dict["strides"] = strides;
dict["padding"] = padding;
- var op = _op_def_lib._apply_op_helper("MaxPoolGradGradWithArgmax", name: name, keywords: dict);
+ var op = tf._op_def_lib._apply_op_helper("MaxPoolGradGradWithArgmax", name: name, keywords: dict);
return op.output;
}
@@ -16872,7 +16870,7 @@ namespace Tensorflow.Operations
dict["padding"] = padding;
if (data_format != null)
dict["data_format"] = data_format;
- var op = _op_def_lib._apply_op_helper("MaxPoolGradV2", name: name, keywords: dict);
+ var op = tf._op_def_lib._apply_op_helper("MaxPoolGradV2", name: name, keywords: dict);
return op.output;
}
@@ -16918,7 +16916,7 @@ namespace Tensorflow.Operations
dict["ksize"] = ksize;
dict["strides"] = strides;
dict["padding"] = padding;
- var op = _op_def_lib._apply_op_helper("MaxPoolGradWithArgmax", name: name, keywords: dict);
+ var op = tf._op_def_lib._apply_op_helper("MaxPoolGradWithArgmax", name: name, keywords: dict);
return op.output;
}
@@ -16962,7 +16960,7 @@ namespace Tensorflow.Operations
dict["padding"] = padding;
if (data_format != null)
dict["data_format"] = data_format;
- var op = _op_def_lib._apply_op_helper("MaxPoolV2", name: name, keywords: dict);
+ var op = tf._op_def_lib._apply_op_helper("MaxPoolV2", name: name, keywords: dict);
return op.output;
}
@@ -17015,7 +17013,7 @@ namespace Tensorflow.Operations
dict["padding"] = padding;
if (Targmax.HasValue)
dict["Targmax"] = Targmax.Value;
- var op = _op_def_lib._apply_op_helper("MaxPoolWithArgmax", name: name, keywords: dict);
+ var op = tf._op_def_lib._apply_op_helper("MaxPoolWithArgmax", name: name, keywords: dict);
int _idx = 0;
var output = op.outputs[_idx++];
var argmax = op.outputs[_idx++];
@@ -17044,7 +17042,7 @@ namespace Tensorflow.Operations
var dict = new Dictionary();
dict["x"] = x;
dict["y"] = y;
- var op = _op_def_lib._apply_op_helper("Maximum", name: name, keywords: dict);
+ var op = tf._op_def_lib._apply_op_helper("Maximum", name: name, keywords: dict);
return op.output;
}
@@ -17081,7 +17079,7 @@ namespace Tensorflow.Operations
dict["reduction_indices"] = reduction_indices;
if (keep_dims.HasValue)
dict["keep_dims"] = keep_dims.Value;
- var op = _op_def_lib._apply_op_helper("Mean", name: name, keywords: dict);
+ var op = tf._op_def_lib._apply_op_helper("Mean", name: name, keywords: dict);
return op.output;
}
@@ -17111,7 +17109,7 @@ namespace Tensorflow.Operations
{
var dict = new Dictionary();
dict["inputs"] = inputs;
- var op = _op_def_lib._apply_op_helper("Merge", name: name, keywords: dict);
+ var op = tf._op_def_lib._apply_op_helper("Merge", name: name, keywords: dict);
int _idx = 0;
var output = op.outputs[_idx++];
var value_index = op.outputs[_idx++];
@@ -17145,7 +17143,7 @@ namespace Tensorflow.Operations
{
var dict = new Dictionary();
dict["inputs"] = inputs;
- var op = _op_def_lib._apply_op_helper("MergeSummary", name: name, keywords: dict);
+ var op = tf._op_def_lib._apply_op_helper("MergeSummary", name: name, keywords: dict);
return op.output;
}
@@ -17185,7 +17183,7 @@ namespace Tensorflow.Operations
dict["destination_prefix"] = destination_prefix;
if (delete_old_dirs.HasValue)
dict["delete_old_dirs"] = delete_old_dirs.Value;
- var op = _op_def_lib._apply_op_helper("MergeV2Checkpoints", name: name, keywords: dict);
+ var op = tf._op_def_lib._apply_op_helper("MergeV2Checkpoints", name: name, keywords: dict);
return op;
}
@@ -17240,7 +17238,7 @@ namespace Tensorflow.Operations
dict["filterbank_channel_count"] = filterbank_channel_count.Value;
if (dct_coefficient_count.HasValue)
dict["dct_coefficient_count"] = dct_coefficient_count.Value;
- var op = _op_def_lib._apply_op_helper("Mfcc", name: name, keywords: dict);
+ var op = tf._op_def_lib._apply_op_helper("Mfcc", name: name, keywords: dict);
return op.output;
}
@@ -17277,7 +17275,7 @@ namespace Tensorflow.Operations
dict["reduction_indices"] = reduction_indices;
if (keep_dims.HasValue)
dict["keep_dims"] = keep_dims.Value;
- var op = _op_def_lib._apply_op_helper("Min", name: name, keywords: dict);
+ var op = tf._op_def_lib._apply_op_helper("Min", name: name, keywords: dict);
return op.output;
}
@@ -17303,7 +17301,7 @@ namespace Tensorflow.Operations
var dict = new Dictionary();
dict["x"] = x;
dict["y"] = y;
- var op = _op_def_lib._apply_op_helper("Minimum", name: name, keywords: dict);
+ var op = tf._op_def_lib._apply_op_helper("Minimum", name: name, keywords: dict);
return op.output;
}
@@ -17365,7 +17363,7 @@ namespace Tensorflow.Operations
dict["input"] = input;
dict["paddings"] = paddings;
dict["mode"] = mode;
- var op = _op_def_lib._apply_op_helper("MirrorPad", name: name, keywords: dict);
+ var op = tf._op_def_lib._apply_op_helper("MirrorPad", name: name, keywords: dict);
return op.output;
}
@@ -17416,7 +17414,7 @@ namespace Tensorflow.Operations
dict["input"] = input;
dict["paddings"] = paddings;
dict["mode"] = mode;
- var op = _op_def_lib._apply_op_helper("MirrorPadGrad", name: name, keywords: dict);
+ var op = tf._op_def_lib._apply_op_helper("MirrorPadGrad", name: name, keywords: dict);
return op.output;
}
@@ -17445,7 +17443,7 @@ namespace Tensorflow.Operations
var dict = new Dictionary();
dict["x"] = x;
dict["y"] = y;
- var op = _op_def_lib._apply_op_helper("Mod", name: name, keywords: dict);
+ var op = tf._op_def_lib._apply_op_helper("Mod", name: name, keywords: dict);
return op.output;
}
@@ -17471,7 +17469,7 @@ namespace Tensorflow.Operations
var dict = new Dictionary();
dict["x"] = x;
dict["y"] = y;
- var op = _op_def_lib._apply_op_helper("Mul", name: name, keywords: dict);
+ var op = tf._op_def_lib._apply_op_helper("Mul", name: name, keywords: dict);
return op.output;
}
@@ -17513,7 +17511,7 @@ namespace Tensorflow.Operations
dict["seed2"] = seed2.Value;
if (output_dtype.HasValue)
dict["output_dtype"] = output_dtype.Value;
- var op = _op_def_lib._apply_op_helper("Multinomial", name: name, keywords: dict);
+ var op = tf._op_def_lib._apply_op_helper("Multinomial", name: name, keywords: dict);
return op.output;
}
@@ -17581,7 +17579,7 @@ namespace Tensorflow.Operations
dict["initial_num_buckets"] = initial_num_buckets.Value;
if (max_load_factor.HasValue)
dict["max_load_factor"] = max_load_factor.Value;
- var op = _op_def_lib._apply_op_helper("MutableDenseHashTable", name: name, keywords: dict);
+ var op = tf._op_def_lib._apply_op_helper("MutableDenseHashTable", name: name, keywords: dict);
return op.output;
}
@@ -17649,7 +17647,7 @@ namespace Tensorflow.Operations
dict["initial_num_buckets"] = initial_num_buckets.Value;
if (max_load_factor.HasValue)
dict["max_load_factor"] = max_load_factor.Value;
- var op = _op_def_lib._apply_op_helper("MutableDenseHashTableV2", name: name, keywords: dict);
+ var op = tf._op_def_lib._apply_op_helper("MutableDenseHashTableV2", name: name, keywords: dict);
return op.output;
}
@@ -17699,7 +17697,7 @@ namespace Tensorflow.Operations
dict["shared_name"] = shared_name;
if (use_node_name_sharing.HasValue)
dict["use_node_name_sharing"] = use_node_name_sharing.Value;
- var op = _op_def_lib._apply_op_helper("MutableHashTable", name: name, keywords: dict);
+ var op = tf._op_def_lib._apply_op_helper("MutableHashTable", name: name, keywords: dict);
return op.output;
}
@@ -17751,7 +17749,7 @@ namespace Tensorflow.Operations
dict["use_node_name_sharing"] = use_node_name_sharing.Value;
if (value_shape != null)
dict["value_shape"] = value_shape;
- var op = _op_def_lib._apply_op_helper("MutableHashTableOfTensors", name: name, keywords: dict);
+ var op = tf._op_def_lib._apply_op_helper("MutableHashTableOfTensors", name: name, keywords: dict);
return op.output;
}
@@ -17803,7 +17801,7 @@ namespace Tensorflow.Operations
dict["use_node_name_sharing"] = use_node_name_sharing.Value;
if (value_shape != null)
dict["value_shape"] = value_shape;
- var op = _op_def_lib._apply_op_helper("MutableHashTableOfTensorsV2", name: name, keywords: dict);
+ var op = tf._op_def_lib._apply_op_helper("MutableHashTableOfTensorsV2", name: name, keywords: dict);
return op.output;
}
@@ -17853,7 +17851,7 @@ namespace Tensorflow.Operations
dict["shared_name"] = shared_name;
if (use_node_name_sharing.HasValue)
dict["use_node_name_sharing"] = use_node_name_sharing.Value;
- var op = _op_def_lib._apply_op_helper("MutableHashTableV2", name: name, keywords: dict);
+ var op = tf._op_def_lib._apply_op_helper("MutableHashTableV2", name: name, keywords: dict);
return op.output;
}
@@ -17916,7 +17914,7 @@ namespace Tensorflow.Operations
{
var dict = new Dictionary();
dict["mutex"] = mutex;
- var op = _op_def_lib._apply_op_helper("MutexLock", name: name, keywords: dict);
+ var op = tf._op_def_lib._apply_op_helper("MutexLock", name: name, keywords: dict);
return op.output;
}
@@ -17945,7 +17943,7 @@ namespace Tensorflow.Operations
dict["container"] = container;
if (shared_name != null)
dict["shared_name"] = shared_name;
- var op = _op_def_lib._apply_op_helper("MutexV2", name: name, keywords: dict);
+ var op = tf._op_def_lib._apply_op_helper("MutexV2", name: name, keywords: dict);
return op.output;
}
@@ -17967,7 +17965,7 @@ namespace Tensorflow.Operations
{
var dict = new Dictionary();
dict["x"] = x;
- var op = _op_def_lib._apply_op_helper("Neg", name: name, keywords: dict);
+ var op = tf._op_def_lib._apply_op_helper("Neg", name: name, keywords: dict);
return op.output;
}
@@ -18012,7 +18010,7 @@ namespace Tensorflow.Operations
dict["lr"] = lr;
dict["vocab_count"] = vocab_count;
dict["num_negative_samples"] = num_negative_samples;
- var op = _op_def_lib._apply_op_helper("NegTrain", name: name, keywords: dict);
+ var op = tf._op_def_lib._apply_op_helper("NegTrain", name: name, keywords: dict);
return op;
}
@@ -18033,7 +18031,7 @@ namespace Tensorflow.Operations
{
var dict = new Dictionary();
dict["data"] = data;
- var op = _op_def_lib._apply_op_helper("NextIteration", name: name, keywords: dict);
+ var op = tf._op_def_lib._apply_op_helper("NextIteration", name: name, keywords: dict);
return op.output;
}
@@ -18049,7 +18047,7 @@ namespace Tensorflow.Operations
public static Operation no_op (string name = "NoOp")
{
var dict = new Dictionary();
- var op = _op_def_lib._apply_op_helper("NoOp", name: name, keywords: dict);
+ var op = tf._op_def_lib._apply_op_helper("NoOp", name: name, keywords: dict);
return op;
}
@@ -18105,7 +18103,7 @@ namespace Tensorflow.Operations
dict["max_output_size"] = max_output_size;
if (iou_threshold.HasValue)
dict["iou_threshold"] = iou_threshold.Value;
- var op = _op_def_lib._apply_op_helper("NonMaxSuppression", name: name, keywords: dict);
+ var op = tf._op_def_lib._apply_op_helper("NonMaxSuppression", name: name, keywords: dict);
return op.output;
}
@@ -18162,7 +18160,7 @@ namespace Tensorflow.Operations
dict["scores"] = scores;
dict["max_output_size"] = max_output_size;
dict["iou_threshold"] = iou_threshold;
- var op = _op_def_lib._apply_op_helper("NonMaxSuppressionV2", name: name, keywords: dict);
+ var op = tf._op_def_lib._apply_op_helper("NonMaxSuppressionV2", name: name, keywords: dict);
return op.output;
}
@@ -18223,7 +18221,7 @@ namespace Tensorflow.Operations
dict["max_output_size"] = max_output_size;
dict["iou_threshold"] = iou_threshold;
dict["score_threshold"] = score_threshold;
- var op = _op_def_lib._apply_op_helper("NonMaxSuppressionV3", name: name, keywords: dict);
+ var op = tf._op_def_lib._apply_op_helper("NonMaxSuppressionV3", name: name, keywords: dict);
return op.output;
}
@@ -18293,7 +18291,7 @@ namespace Tensorflow.Operations
dict["score_threshold"] = score_threshold;
if (pad_to_max_output_size.HasValue)
dict["pad_to_max_output_size"] = pad_to_max_output_size.Value;
- var op = _op_def_lib._apply_op_helper("NonMaxSuppressionV4", name: name, keywords: dict);
+ var op = tf._op_def_lib._apply_op_helper("NonMaxSuppressionV4", name: name, keywords: dict);
int _idx = 0;
var selected_indices = op.outputs[_idx++];
var valid_outputs = op.outputs[_idx++];
@@ -18355,7 +18353,7 @@ namespace Tensorflow.Operations
dict["max_output_size"] = max_output_size;
dict["overlap_threshold"] = overlap_threshold;
dict["score_threshold"] = score_threshold;
- var op = _op_def_lib._apply_op_helper("NonMaxSuppressionWithOverlaps", name: name, keywords: dict);
+ var op = tf._op_def_lib._apply_op_helper("NonMaxSuppressionWithOverlaps", name: name, keywords: dict);
return op.output;
}
@@ -18381,7 +18379,7 @@ namespace Tensorflow.Operations
var dict = new Dictionary();
dict["x"] = x;
dict["y"] = y;
- var op = _op_def_lib._apply_op_helper("NotEqual", name: name, keywords: dict);
+ var op = tf._op_def_lib._apply_op_helper("NotEqual", name: name, keywords: dict);
return op.output;
}
@@ -18422,7 +18420,7 @@ namespace Tensorflow.Operations
dict["n"] = n;
if (reverse.HasValue)
dict["reverse"] = reverse.Value;
- var op = _op_def_lib._apply_op_helper("NthElement", name: name, keywords: dict);
+ var op = tf._op_def_lib._apply_op_helper("NthElement", name: name, keywords: dict);
return op.output;
}
@@ -18553,7 +18551,7 @@ namespace Tensorflow.Operations
dict["off_value"] = off_value;
if (axis.HasValue)
dict["axis"] = axis.Value;
- var op = _op_def_lib._apply_op_helper("OneHot", name: name, keywords: dict);
+ var op = tf._op_def_lib._apply_op_helper("OneHot", name: name, keywords: dict);
return op.output;
}
@@ -18574,7 +18572,7 @@ namespace Tensorflow.Operations
{
var dict = new Dictionary();
dict["x"] = x;
- var op = _op_def_lib._apply_op_helper("OnesLike", name: name, keywords: dict);
+ var op = tf._op_def_lib._apply_op_helper("OnesLike", name: name, keywords: dict);
return op.output;
}
@@ -18609,7 +18607,7 @@ namespace Tensorflow.Operations
dict["optimizations"] = optimizations;
dict["output_types"] = output_types;
dict["output_shapes"] = output_shapes;
- var op = _op_def_lib._apply_op_helper("OptimizeDataset", name: name, keywords: dict);
+ var op = tf._op_def_lib._apply_op_helper("OptimizeDataset", name: name, keywords: dict);
return op.output;
}
@@ -18628,7 +18626,7 @@ namespace Tensorflow.Operations
{
var dict = new Dictionary();
dict["components"] = components;
- var op = _op_def_lib._apply_op_helper("OptionalFromValue", name: name, keywords: dict);
+ var op = tf._op_def_lib._apply_op_helper("OptionalFromValue", name: name, keywords: dict);
return op.output;
}
@@ -18655,7 +18653,7 @@ namespace Tensorflow.Operations
dict["optional"] = optional;
dict["output_types"] = output_types;
dict["output_shapes"] = output_shapes;
- var op = _op_def_lib._apply_op_helper("OptionalGetValue", name: name, keywords: dict);
+ var op = tf._op_def_lib._apply_op_helper("OptionalGetValue", name: name, keywords: dict);
int _idx = 0;
var components = Enumerable.Range(0, op.OutputListLength("components")).Select(_ => op.outputs[_idx++]).ToArray();
return (components);
@@ -18676,7 +18674,7 @@ namespace Tensorflow.Operations
{
var dict = new Dictionary();
dict["optional"] = optional;
- var op = _op_def_lib._apply_op_helper("OptionalHasValue", name: name, keywords: dict);
+ var op = tf._op_def_lib._apply_op_helper("OptionalHasValue", name: name, keywords: dict);
return op.output;
}
@@ -18692,7 +18690,7 @@ namespace Tensorflow.Operations
public static Tensor optional_none (string name = "OptionalNone")
{
var dict = new Dictionary();
- var op = _op_def_lib._apply_op_helper("OptionalNone", name: name, keywords: dict);
+ var op = tf._op_def_lib._apply_op_helper("OptionalNone", name: name, keywords: dict);
return op.output;
}
@@ -18728,7 +18726,7 @@ namespace Tensorflow.Operations
dict["container"] = container;
if (shared_name != null)
dict["shared_name"] = shared_name;
- var op = _op_def_lib._apply_op_helper("OrderedMapClear", name: name, keywords: dict);
+ var op = tf._op_def_lib._apply_op_helper("OrderedMapClear", name: name, keywords: dict);
return op;
}
@@ -18764,7 +18762,7 @@ namespace Tensorflow.Operations
dict["container"] = container;
if (shared_name != null)
dict["shared_name"] = shared_name;
- var op = _op_def_lib._apply_op_helper("OrderedMapIncompleteSize", name: name, keywords: dict);
+ var op = tf._op_def_lib._apply_op_helper("OrderedMapIncompleteSize", name: name, keywords: dict);
return op.output;
}
@@ -18811,7 +18809,7 @@ namespace Tensorflow.Operations
dict["container"] = container;
if (shared_name != null)
dict["shared_name"] = shared_name;
- var op = _op_def_lib._apply_op_helper("OrderedMapPeek", name: name, keywords: dict);
+ var op = tf._op_def_lib._apply_op_helper("OrderedMapPeek", name: name, keywords: dict);
int _idx = 0;
var values = Enumerable.Range(0, op.OutputListLength("values")).Select(_ => op.outputs[_idx++]).ToArray();
return (values);
@@ -18849,7 +18847,7 @@ namespace Tensorflow.Operations
dict["container"] = container;
if (shared_name != null)
dict["shared_name"] = shared_name;
- var op = _op_def_lib._apply_op_helper("OrderedMapSize", name: name, keywords: dict);
+ var op = tf._op_def_lib._apply_op_helper("OrderedMapSize", name: name, keywords: dict);
return op.output;
}
@@ -18905,7 +18903,7 @@ namespace Tensorflow.Operations
dict["container"] = container;
if (shared_name != null)
dict["shared_name"] = shared_name;
- var op = _op_def_lib._apply_op_helper("OrderedMapStage", name: name, keywords: dict);
+ var op = tf._op_def_lib._apply_op_helper("OrderedMapStage", name: name, keywords: dict);
return op;
}
@@ -18951,7 +18949,7 @@ namespace Tensorflow.Operations
dict["container"] = container;
if (shared_name != null)
dict["shared_name"] = shared_name;
- var op = _op_def_lib._apply_op_helper("OrderedMapUnstage", name: name, keywords: dict);
+ var op = tf._op_def_lib._apply_op_helper("OrderedMapUnstage", name: name, keywords: dict);
int _idx = 0;
var values = Enumerable.Range(0, op.OutputListLength("values")).Select(_ => op.outputs[_idx++]).ToArray();
return (values);
@@ -18999,7 +18997,7 @@ namespace Tensorflow.Operations
dict["container"] = container;
if (shared_name != null)
dict["shared_name"] = shared_name;
- var op = _op_def_lib._apply_op_helper("OrderedMapUnstageNoKey", name: name, keywords: dict);
+ var op = tf._op_def_lib._apply_op_helper("OrderedMapUnstageNoKey", name: name, keywords: dict);
int _idx = 0;
var key = op.outputs[_idx++];
var values = Enumerable.Range(0, op.OutputListLength("values")).Select(_ => op.outputs[_idx++]).ToArray();
@@ -19039,7 +19037,7 @@ namespace Tensorflow.Operations
dict["shape"] = shape;
if (device_ordinal.HasValue)
dict["device_ordinal"] = device_ordinal.Value;
- var op = _op_def_lib._apply_op_helper("OutfeedDequeue", name: name, keywords: dict);
+ var op = tf._op_def_lib._apply_op_helper("OutfeedDequeue", name: name, keywords: dict);
return op.output;
}
@@ -19077,7 +19075,7 @@ namespace Tensorflow.Operations
dict["shapes"] = shapes;
if (device_ordinal.HasValue)
dict["device_ordinal"] = device_ordinal.Value;
- var op = _op_def_lib._apply_op_helper("OutfeedDequeueTuple", name: name, keywords: dict);
+ var op = tf._op_def_lib._apply_op_helper("OutfeedDequeueTuple", name: name, keywords: dict);
int _idx = 0;
var outputs = Enumerable.Range(0, op.OutputListLength("outputs")).Select(_ => op.outputs[_idx++]).ToArray();
return (outputs);
@@ -19099,7 +19097,7 @@ namespace Tensorflow.Operations
{
var dict = new Dictionary();
dict["input"] = input;
- var op = _op_def_lib._apply_op_helper("OutfeedEnqueue", name: name, keywords: dict);
+ var op = tf._op_def_lib._apply_op_helper("OutfeedEnqueue", name: name, keywords: dict);
return op;
}
@@ -19120,7 +19118,7 @@ namespace Tensorflow.Operations
{
var dict = new Dictionary();
dict["inputs"] = inputs;
- var op = _op_def_lib._apply_op_helper("OutfeedEnqueueTuple", name: name, keywords: dict);
+ var op = tf._op_def_lib._apply_op_helper("OutfeedEnqueueTuple", name: name, keywords: dict);
return op;
}
@@ -19168,7 +19166,7 @@ namespace Tensorflow.Operations
dict["values"] = values;
if (axis.HasValue)
dict["axis"] = axis.Value;
- var op = _op_def_lib._apply_op_helper("Pack", name: name, keywords: dict);
+ var op = tf._op_def_lib._apply_op_helper("Pack", name: name, keywords: dict);
return op.output;
}
@@ -19215,7 +19213,7 @@ namespace Tensorflow.Operations
var dict = new Dictionary();
dict["input"] = input;
dict["paddings"] = paddings;
- var op = _op_def_lib._apply_op_helper("Pad", name: name, keywords: dict);
+ var op = tf._op_def_lib._apply_op_helper("Pad", name: name, keywords: dict);
return op.output;
}
@@ -19266,7 +19264,7 @@ namespace Tensorflow.Operations
dict["input"] = input;
dict["paddings"] = paddings;
dict["constant_values"] = constant_values;
- var op = _op_def_lib._apply_op_helper("PadV2", name: name, keywords: dict);
+ var op = tf._op_def_lib._apply_op_helper("PadV2", name: name, keywords: dict);
return op.output;
}
@@ -19306,7 +19304,7 @@ namespace Tensorflow.Operations
dict["padded_shapes"] = padded_shapes;
dict["padding_values"] = padding_values;
dict["output_shapes"] = output_shapes;
- var op = _op_def_lib._apply_op_helper("PaddedBatchDataset", name: name, keywords: dict);
+ var op = tf._op_def_lib._apply_op_helper("PaddedBatchDataset", name: name, keywords: dict);
return op.output;
}
@@ -19351,7 +19349,7 @@ namespace Tensorflow.Operations
dict["padding_values"] = padding_values;
dict["drop_remainder"] = drop_remainder;
dict["output_shapes"] = output_shapes;
- var op = _op_def_lib._apply_op_helper("PaddedBatchDatasetV2", name: name, keywords: dict);
+ var op = tf._op_def_lib._apply_op_helper("PaddedBatchDatasetV2", name: name, keywords: dict);
return op.output;
}
@@ -19408,7 +19406,7 @@ namespace Tensorflow.Operations
dict["container"] = container;
if (shared_name != null)
dict["shared_name"] = shared_name;
- var op = _op_def_lib._apply_op_helper("PaddingFIFOQueue", name: name, keywords: dict);
+ var op = tf._op_def_lib._apply_op_helper("PaddingFIFOQueue", name: name, keywords: dict);
return op.output;
}
@@ -19465,7 +19463,7 @@ namespace Tensorflow.Operations
dict["container"] = container;
if (shared_name != null)
dict["shared_name"] = shared_name;
- var op = _op_def_lib._apply_op_helper("PaddingFIFOQueueV2", name: name, keywords: dict);
+ var op = tf._op_def_lib._apply_op_helper("PaddingFIFOQueueV2", name: name, keywords: dict);
return op.output;
}
@@ -19511,7 +19509,7 @@ namespace Tensorflow.Operations
var dict = new Dictionary();
dict["values"] = values;
dict["shape"] = shape;
- var op = _op_def_lib._apply_op_helper("ParallelConcat", name: name, keywords: dict);
+ var op = tf._op_def_lib._apply_op_helper("ParallelConcat", name: name, keywords: dict);
return op.output;
}
@@ -19596,7 +19594,7 @@ namespace Tensorflow.Operations
var dict = new Dictionary();
dict["indices"] = indices;
dict["data"] = data;
- var op = _op_def_lib._apply_op_helper("ParallelDynamicStitch", name: name, keywords: dict);
+ var op = tf._op_def_lib._apply_op_helper("ParallelDynamicStitch", name: name, keywords: dict);
return op.output;
}
@@ -19651,7 +19649,7 @@ namespace Tensorflow.Operations
dict["seed"] = seed.Value;
if (seed2.HasValue)
dict["seed2"] = seed2.Value;
- var op = _op_def_lib._apply_op_helper("ParameterizedTruncatedNormal", name: name, keywords: dict);
+ var op = tf._op_def_lib._apply_op_helper("ParameterizedTruncatedNormal", name: name, keywords: dict);
return op.output;
}
@@ -19734,7 +19732,7 @@ namespace Tensorflow.Operations
dict["dense_defaults"] = dense_defaults;
dict["sparse_types"] = sparse_types;
dict["dense_shapes"] = dense_shapes;
- var op = _op_def_lib._apply_op_helper("ParseExample", name: name, keywords: dict);
+ var op = tf._op_def_lib._apply_op_helper("ParseExample", name: name, keywords: dict);
int _idx = 0;
var sparse_indices = Enumerable.Range(0, op.OutputListLength("sparse_indices")).Select(_ => op.outputs[_idx++]).ToArray();
var sparse_values = Enumerable.Range(0, op.OutputListLength("sparse_values")).Select(_ => op.outputs[_idx++]).ToArray();
@@ -19808,7 +19806,7 @@ namespace Tensorflow.Operations
dict["dense_shapes"] = dense_shapes;
dict["output_types"] = output_types;
dict["output_shapes"] = output_shapes;
- var op = _op_def_lib._apply_op_helper("ParseExampleDataset", name: name, keywords: dict);
+ var op = tf._op_def_lib._apply_op_helper("ParseExampleDataset", name: name, keywords: dict);
return op.output;
}
@@ -19947,7 +19945,7 @@ namespace Tensorflow.Operations
dict["feature_list_sparse_types"] = feature_list_sparse_types;
if (feature_list_dense_shapes != null)
dict["feature_list_dense_shapes"] = feature_list_dense_shapes;
- var op = _op_def_lib._apply_op_helper("ParseSequenceExample", name: name, keywords: dict);
+ var op = tf._op_def_lib._apply_op_helper("ParseSequenceExample", name: name, keywords: dict);
int _idx = 0;
var context_sparse_indices = Enumerable.Range(0, op.OutputListLength("context_sparse_indices")).Select(_ => op.outputs[_idx++]).ToArray();
var context_sparse_values = Enumerable.Range(0, op.OutputListLength("context_sparse_values")).Select(_ => op.outputs[_idx++]).ToArray();
@@ -20034,7 +20032,7 @@ namespace Tensorflow.Operations
dict["dense_keys"] = dense_keys;
dict["sparse_types"] = sparse_types;
dict["dense_shapes"] = dense_shapes;
- var op = _op_def_lib._apply_op_helper("ParseSingleExample", name: name, keywords: dict);
+ var op = tf._op_def_lib._apply_op_helper("ParseSingleExample", name: name, keywords: dict);
int _idx = 0;
var sparse_indices = Enumerable.Range(0, op.OutputListLength("sparse_indices")).Select(_ => op.outputs[_idx++]).ToArray();
var sparse_values = Enumerable.Range(0, op.OutputListLength("sparse_values")).Select(_ => op.outputs[_idx++]).ToArray();
@@ -20156,7 +20154,7 @@ namespace Tensorflow.Operations
dict["feature_list_sparse_types"] = feature_list_sparse_types;
if (feature_list_dense_shapes != null)
dict["feature_list_dense_shapes"] = feature_list_dense_shapes;
- var op = _op_def_lib._apply_op_helper("ParseSingleSequenceExample", name: name, keywords: dict);
+ var op = tf._op_def_lib._apply_op_helper("ParseSingleSequenceExample", name: name, keywords: dict);
int _idx = 0;
var context_sparse_indices = Enumerable.Range(0, op.OutputListLength("context_sparse_indices")).Select(_ => op.outputs[_idx++]).ToArray();
var context_sparse_values = Enumerable.Range(0, op.OutputListLength("context_sparse_values")).Select(_ => op.outputs[_idx++]).ToArray();
@@ -20192,7 +20190,7 @@ namespace Tensorflow.Operations
var dict = new Dictionary();
dict["serialized"] = serialized;
dict["out_type"] = out_type;
- var op = _op_def_lib._apply_op_helper("ParseTensor", name: name, keywords: dict);
+ var op = tf._op_def_lib._apply_op_helper("ParseTensor", name: name, keywords: dict);
return op.output;
}
@@ -20225,7 +20223,7 @@ namespace Tensorflow.Operations
dict["dtype"] = dtype;
if (shape != null)
dict["shape"] = shape;
- var op = _op_def_lib._apply_op_helper("Placeholder", name: name, keywords: dict);
+ var op = tf._op_def_lib._apply_op_helper("Placeholder", name: name, keywords: dict);
return op.output;
}
@@ -20258,7 +20256,7 @@ namespace Tensorflow.Operations
var dict = new Dictionary();
dict["dtype"] = dtype;
dict["shape"] = shape;
- var op = _op_def_lib._apply_op_helper("PlaceholderV2", name: name, keywords: dict);
+ var op = tf._op_def_lib._apply_op_helper("PlaceholderV2", name: name, keywords: dict);
return op.output;
}
@@ -20284,7 +20282,7 @@ namespace Tensorflow.Operations
var dict = new Dictionary();
dict["input"] = input;
dict["shape"] = shape;
- var op = _op_def_lib._apply_op_helper("PlaceholderWithDefault", name: name, keywords: dict);
+ var op = tf._op_def_lib._apply_op_helper("PlaceholderWithDefault", name: name, keywords: dict);
return op.output;
}
@@ -20314,7 +20312,7 @@ namespace Tensorflow.Operations
var dict = new Dictionary();
dict["a"] = a;
dict["x"] = x;
- var op = _op_def_lib._apply_op_helper("Polygamma", name: name, keywords: dict);
+ var op = tf._op_def_lib._apply_op_helper("Polygamma", name: name, keywords: dict);
return op.output;
}
@@ -20341,7 +20339,7 @@ namespace Tensorflow.Operations
{
var dict = new Dictionary();
dict["x"] = x;
- var op = _op_def_lib._apply_op_helper("PopulationCount", name: name, keywords: dict);
+ var op = tf._op_def_lib._apply_op_helper("PopulationCount", name: name, keywords: dict);
return op.output;
}
@@ -20373,7 +20371,7 @@ namespace Tensorflow.Operations
var dict = new Dictionary();
dict["x"] = x;
dict["y"] = y;
- var op = _op_def_lib._apply_op_helper("Pow", name: name, keywords: dict);
+ var op = tf._op_def_lib._apply_op_helper("Pow", name: name, keywords: dict);
return op.output;
}
@@ -20405,7 +20403,7 @@ namespace Tensorflow.Operations
dict["buffer_size"] = buffer_size;
dict["output_types"] = output_types;
dict["output_shapes"] = output_shapes;
- var op = _op_def_lib._apply_op_helper("PrefetchDataset", name: name, keywords: dict);
+ var op = tf._op_def_lib._apply_op_helper("PrefetchDataset", name: name, keywords: dict);
return op.output;
}
@@ -20441,7 +20439,7 @@ namespace Tensorflow.Operations
dict["input"] = input;
if (message != null)
dict["message"] = message;
- var op = _op_def_lib._apply_op_helper("PreventGradient", name: name, keywords: dict);
+ var op = tf._op_def_lib._apply_op_helper("PreventGradient", name: name, keywords: dict);
return op.output;
}
@@ -20484,7 +20482,7 @@ namespace Tensorflow.Operations
dict["first_n"] = first_n.Value;
if (summarize.HasValue)
dict["summarize"] = summarize.Value;
- var op = _op_def_lib._apply_op_helper("Print", name: name, keywords: dict);
+ var op = tf._op_def_lib._apply_op_helper("Print", name: name, keywords: dict);
return op.output;
}
@@ -20539,7 +20537,7 @@ namespace Tensorflow.Operations
dict["container"] = container;
if (shared_name != null)
dict["shared_name"] = shared_name;
- var op = _op_def_lib._apply_op_helper("PriorityQueue", name: name, keywords: dict);
+ var op = tf._op_def_lib._apply_op_helper("PriorityQueue", name: name, keywords: dict);
return op.output;
}
@@ -20594,7 +20592,7 @@ namespace Tensorflow.Operations
dict["container"] = container;
if (shared_name != null)
dict["shared_name"] = shared_name;
- var op = _op_def_lib._apply_op_helper("PriorityQueueV2", name: name, keywords: dict);
+ var op = tf._op_def_lib._apply_op_helper("PriorityQueueV2", name: name, keywords: dict);
return op.output;
}
@@ -20631,7 +20629,7 @@ namespace Tensorflow.Operations
dict["reduction_indices"] = reduction_indices;
if (keep_dims.HasValue)
dict["keep_dims"] = keep_dims.Value;
- var op = _op_def_lib._apply_op_helper("Prod", name: name, keywords: dict);
+ var op = tf._op_def_lib._apply_op_helper("Prod", name: name, keywords: dict);
return op.output;
}
@@ -20676,7 +20674,7 @@ namespace Tensorflow.Operations
dict["input"] = input;
if (full_matrices.HasValue)
dict["full_matrices"] = full_matrices.Value;
- var op = _op_def_lib._apply_op_helper("Qr", name: name, keywords: dict);
+ var op = tf._op_def_lib._apply_op_helper("Qr", name: name, keywords: dict);
int _idx = 0;
var q = op.outputs[_idx++];
var r = op.outputs[_idx++];
@@ -20718,7 +20716,7 @@ namespace Tensorflow.Operations
dict["input_min"] = input_min.Value;
if (input_max.HasValue)
dict["input_max"] = input_max.Value;
- var op = _op_def_lib._apply_op_helper("QuantizeAndDequantize", name: name, keywords: dict);
+ var op = tf._op_def_lib._apply_op_helper("QuantizeAndDequantize", name: name, keywords: dict);
return op.output;
}
@@ -20818,7 +20816,7 @@ namespace Tensorflow.Operations
dict["num_bits"] = num_bits.Value;
if (range_given.HasValue)
dict["range_given"] = range_given.Value;
- var op = _op_def_lib._apply_op_helper("QuantizeAndDequantizeV2", name: name, keywords: dict);
+ var op = tf._op_def_lib._apply_op_helper("QuantizeAndDequantizeV2", name: name, keywords: dict);
return op.output;
}
@@ -20858,7 +20856,7 @@ namespace Tensorflow.Operations
dict["signed_input"] = signed_input.Value;
if (range_given.HasValue)
dict["range_given"] = range_given.Value;
- var op = _op_def_lib._apply_op_helper("QuantizeAndDequantizeV3", name: name, keywords: dict);
+ var op = tf._op_def_lib._apply_op_helper("QuantizeAndDequantizeV3", name: name, keywords: dict);
return op.output;
}
@@ -20918,7 +20916,7 @@ namespace Tensorflow.Operations
dict["input_min"] = input_min;
dict["input_max"] = input_max;
dict["out_type"] = out_type;
- var op = _op_def_lib._apply_op_helper("QuantizeDownAndShrinkRange", name: name, keywords: dict);
+ var op = tf._op_def_lib._apply_op_helper("QuantizeDownAndShrinkRange", name: name, keywords: dict);
int _idx = 0;
var output = op.outputs[_idx++];
var output_min = op.outputs[_idx++];
@@ -21065,7 +21063,7 @@ namespace Tensorflow.Operations
dict["mode"] = mode;
if (round_mode != null)
dict["round_mode"] = round_mode;
- var op = _op_def_lib._apply_op_helper("QuantizeV2", name: name, keywords: dict);
+ var op = tf._op_def_lib._apply_op_helper("QuantizeV2", name: name, keywords: dict);
int _idx = 0;
var output = op.outputs[_idx++];
var output_min = op.outputs[_idx++];
@@ -21118,7 +21116,7 @@ namespace Tensorflow.Operations
dict["max_y"] = max_y;
if (Toutput.HasValue)
dict["Toutput"] = Toutput.Value;
- var op = _op_def_lib._apply_op_helper("QuantizedAdd", name: name, keywords: dict);
+ var op = tf._op_def_lib._apply_op_helper("QuantizedAdd", name: name, keywords: dict);
int _idx = 0;
var z = op.outputs[_idx++];
var min_z = op.outputs[_idx++];
@@ -21171,7 +21169,7 @@ namespace Tensorflow.Operations
dict["ksize"] = ksize;
dict["strides"] = strides;
dict["padding"] = padding;
- var op = _op_def_lib._apply_op_helper("QuantizedAvgPool", name: name, keywords: dict);
+ var op = tf._op_def_lib._apply_op_helper("QuantizedAvgPool", name: name, keywords: dict);
int _idx = 0;
var output = op.outputs[_idx++];
var min_output = op.outputs[_idx++];
@@ -21281,7 +21279,7 @@ namespace Tensorflow.Operations
dict["out_type"] = out_type;
dict["variance_epsilon"] = variance_epsilon;
dict["scale_after_normalization"] = scale_after_normalization;
- var op = _op_def_lib._apply_op_helper("QuantizedBatchNormWithGlobalNormalization", name: name, keywords: dict);
+ var op = tf._op_def_lib._apply_op_helper("QuantizedBatchNormWithGlobalNormalization", name: name, keywords: dict);
int _idx = 0;
var result = op.outputs[_idx++];
var result_min = op.outputs[_idx++];
@@ -21335,7 +21333,7 @@ namespace Tensorflow.Operations
dict["min_bias"] = min_bias;
dict["max_bias"] = max_bias;
dict["out_type"] = out_type;
- var op = _op_def_lib._apply_op_helper("QuantizedBiasAdd", name: name, keywords: dict);
+ var op = tf._op_def_lib._apply_op_helper("QuantizedBiasAdd", name: name, keywords: dict);
int _idx = 0;
var output = op.outputs[_idx++];
var min_out = op.outputs[_idx++];
@@ -21379,7 +21377,7 @@ namespace Tensorflow.Operations
dict["values"] = values;
dict["input_mins"] = input_mins;
dict["input_maxes"] = input_maxes;
- var op = _op_def_lib._apply_op_helper("QuantizedConcat", name: name, keywords: dict);
+ var op = tf._op_def_lib._apply_op_helper("QuantizedConcat", name: name, keywords: dict);
int _idx = 0;
var output = op.outputs[_idx++];
var output_min = op.outputs[_idx++];
@@ -21456,7 +21454,7 @@ namespace Tensorflow.Operations
dict["out_type"] = out_type.Value;
if (dilations != null)
dict["dilations"] = dilations;
- var op = _op_def_lib._apply_op_helper("QuantizedConv2D", name: name, keywords: dict);
+ var op = tf._op_def_lib._apply_op_helper("QuantizedConv2D", name: name, keywords: dict);
int _idx = 0;
var output = op.outputs[_idx++];
var min_output = op.outputs[_idx++];
@@ -21519,7 +21517,7 @@ namespace Tensorflow.Operations
dict["variance_epsilon"] = variance_epsilon.Value;
if (min_separation.HasValue)
dict["min_separation"] = min_separation.Value;
- var op = _op_def_lib._apply_op_helper("QuantizedInstanceNorm", name: name, keywords: dict);
+ var op = tf._op_def_lib._apply_op_helper("QuantizedInstanceNorm", name: name, keywords: dict);
int _idx = 0;
var y = op.outputs[_idx++];
var y_min = op.outputs[_idx++];
@@ -21593,7 +21591,7 @@ namespace Tensorflow.Operations
dict["transpose_b"] = transpose_b.Value;
if (Tactivation.HasValue)
dict["Tactivation"] = Tactivation.Value;
- var op = _op_def_lib._apply_op_helper("QuantizedMatMul", name: name, keywords: dict);
+ var op = tf._op_def_lib._apply_op_helper("QuantizedMatMul", name: name, keywords: dict);
int _idx = 0;
var output = op.outputs[_idx++];
var min_out = op.outputs[_idx++];
@@ -21646,7 +21644,7 @@ namespace Tensorflow.Operations
dict["ksize"] = ksize;
dict["strides"] = strides;
dict["padding"] = padding;
- var op = _op_def_lib._apply_op_helper("QuantizedMaxPool", name: name, keywords: dict);
+ var op = tf._op_def_lib._apply_op_helper("QuantizedMaxPool", name: name, keywords: dict);
int _idx = 0;
var output = op.outputs[_idx++];
var min_output = op.outputs[_idx++];
@@ -21699,7 +21697,7 @@ namespace Tensorflow.Operations
dict["max_y"] = max_y;
if (Toutput.HasValue)
dict["Toutput"] = Toutput.Value;
- var op = _op_def_lib._apply_op_helper("QuantizedMul", name: name, keywords: dict);
+ var op = tf._op_def_lib._apply_op_helper("QuantizedMul", name: name, keywords: dict);
int _idx = 0;
var z = op.outputs[_idx++];
var min_z = op.outputs[_idx++];
@@ -21738,7 +21736,7 @@ namespace Tensorflow.Operations
dict["max_features"] = max_features;
if (out_type.HasValue)
dict["out_type"] = out_type.Value;
- var op = _op_def_lib._apply_op_helper("QuantizedRelu", name: name, keywords: dict);
+ var op = tf._op_def_lib._apply_op_helper("QuantizedRelu", name: name, keywords: dict);
int _idx = 0;
var activations = op.outputs[_idx++];
var min_activations = op.outputs[_idx++];
@@ -21777,7 +21775,7 @@ namespace Tensorflow.Operations
dict["max_features"] = max_features;
if (out_type.HasValue)
dict["out_type"] = out_type.Value;
- var op = _op_def_lib._apply_op_helper("QuantizedRelu6", name: name, keywords: dict);
+ var op = tf._op_def_lib._apply_op_helper("QuantizedRelu6", name: name, keywords: dict);
int _idx = 0;
var activations = op.outputs[_idx++];
var min_activations = op.outputs[_idx++];
@@ -21819,7 +21817,7 @@ namespace Tensorflow.Operations
dict["max_features"] = max_features;
if (out_type.HasValue)
dict["out_type"] = out_type.Value;
- var op = _op_def_lib._apply_op_helper("QuantizedReluX", name: name, keywords: dict);
+ var op = tf._op_def_lib._apply_op_helper("QuantizedReluX", name: name, keywords: dict);
int _idx = 0;
var activations = op.outputs[_idx++];
var min_activations = op.outputs[_idx++];
@@ -21861,7 +21859,7 @@ namespace Tensorflow.Operations
dict["shape"] = shape;
dict["input_min"] = input_min;
dict["input_max"] = input_max;
- var op = _op_def_lib._apply_op_helper("QuantizedReshape", name: name, keywords: dict);
+ var op = tf._op_def_lib._apply_op_helper("QuantizedReshape", name: name, keywords: dict);
int _idx = 0;
var output = op.outputs[_idx++];
var output_min = op.outputs[_idx++];
@@ -21910,7 +21908,7 @@ namespace Tensorflow.Operations
dict["max"] = max;
if (align_corners.HasValue)
dict["align_corners"] = align_corners.Value;
- var op = _op_def_lib._apply_op_helper("QuantizedResizeBilinear", name: name, keywords: dict);
+ var op = tf._op_def_lib._apply_op_helper("QuantizedResizeBilinear", name: name, keywords: dict);
int _idx = 0;
var resized_images = op.outputs[_idx++];
var out_min = op.outputs[_idx++];
@@ -21947,7 +21945,7 @@ namespace Tensorflow.Operations
dict["handle"] = handle;
if (cancel_pending_enqueues.HasValue)
dict["cancel_pending_enqueues"] = cancel_pending_enqueues.Value;
- var op = _op_def_lib._apply_op_helper("QueueClose", name: name, keywords: dict);
+ var op = tf._op_def_lib._apply_op_helper("QueueClose", name: name, keywords: dict);
return op;
}
@@ -21980,7 +21978,7 @@ namespace Tensorflow.Operations
dict["handle"] = handle;
if (cancel_pending_enqueues.HasValue)
dict["cancel_pending_enqueues"] = cancel_pending_enqueues.Value;
- var op = _op_def_lib._apply_op_helper("QueueCloseV2", name: name, keywords: dict);
+ var op = tf._op_def_lib._apply_op_helper("QueueCloseV2", name: name, keywords: dict);
return op;
}
@@ -22021,7 +22019,7 @@ namespace Tensorflow.Operations
dict["component_types"] = component_types;
if (timeout_ms.HasValue)
dict["timeout_ms"] = timeout_ms.Value;
- var op = _op_def_lib._apply_op_helper("QueueDequeue", name: name, keywords: dict);
+ var op = tf._op_def_lib._apply_op_helper("QueueDequeue", name: name, keywords: dict);
int _idx = 0;
var components = Enumerable.Range(0, op.OutputListLength("components")).Select(_ => op.outputs[_idx++]).ToArray();
return (components);
@@ -22075,7 +22073,7 @@ namespace Tensorflow.Operations
dict["component_types"] = component_types;
if (timeout_ms.HasValue)
dict["timeout_ms"] = timeout_ms.Value;
- var op = _op_def_lib._apply_op_helper("QueueDequeueMany", name: name, keywords: dict);
+ var op = tf._op_def_lib._apply_op_helper("QueueDequeueMany", name: name, keywords: dict);
int _idx = 0;
var components = Enumerable.Range(0, op.OutputListLength("components")).Select(_ => op.outputs[_idx++]).ToArray();
return (components);
@@ -22129,7 +22127,7 @@ namespace Tensorflow.Operations
dict["component_types"] = component_types;
if (timeout_ms.HasValue)
dict["timeout_ms"] = timeout_ms.Value;
- var op = _op_def_lib._apply_op_helper("QueueDequeueManyV2", name: name, keywords: dict);
+ var op = tf._op_def_lib._apply_op_helper("QueueDequeueManyV2", name: name, keywords: dict);
int _idx = 0;
var components = Enumerable.Range(0, op.OutputListLength("components")).Select(_ => op.outputs[_idx++]).ToArray();
return (components);
@@ -22187,7 +22185,7 @@ namespace Tensorflow.Operations
dict["component_types"] = component_types;
if (timeout_ms.HasValue)
dict["timeout_ms"] = timeout_ms.Value;
- var op = _op_def_lib._apply_op_helper("QueueDequeueUpTo", name: name, keywords: dict);
+ var op = tf._op_def_lib._apply_op_helper("QueueDequeueUpTo", name: name, keywords: dict);
int _idx = 0;
var components = Enumerable.Range(0, op.OutputListLength("components")).Select(_ => op.outputs[_idx++]).ToArray();
return (components);
@@ -22245,7 +22243,7 @@ namespace Tensorflow.Operations
dict["component_types"] = component_types;
if (timeout_ms.HasValue)
dict["timeout_ms"] = timeout_ms.Value;
- var op = _op_def_lib._apply_op_helper("QueueDequeueUpToV2", name: name, keywords: dict);
+ var op = tf._op_def_lib._apply_op_helper("QueueDequeueUpToV2", name: name, keywords: dict);
int _idx = 0;
var components = Enumerable.Range(0, op.OutputListLength("components")).Select(_ => op.outputs[_idx++]).ToArray();
return (components);
@@ -22288,7 +22286,7 @@ namespace Tensorflow.Operations
dict["component_types"] = component_types;
if (timeout_ms.HasValue)
dict["timeout_ms"] = timeout_ms.Value;
- var op = _op_def_lib._apply_op_helper("QueueDequeueV2", name: name, keywords: dict);
+ var op = tf._op_def_lib._apply_op_helper("QueueDequeueV2", name: name, keywords: dict);
int _idx = 0;
var components = Enumerable.Range(0, op.OutputListLength("components")).Select(_ => op.outputs[_idx++]).ToArray();
return (components);
@@ -22328,7 +22326,7 @@ namespace Tensorflow.Operations
dict["components"] = components;
if (timeout_ms.HasValue)
dict["timeout_ms"] = timeout_ms.Value;
- var op = _op_def_lib._apply_op_helper("QueueEnqueue", name: name, keywords: dict);
+ var op = tf._op_def_lib._apply_op_helper("QueueEnqueue", name: name, keywords: dict);
return op;
}
@@ -22371,7 +22369,7 @@ namespace Tensorflow.Operations
dict["components"] = components;
if (timeout_ms.HasValue)
dict["timeout_ms"] = timeout_ms.Value;
- var op = _op_def_lib._apply_op_helper("QueueEnqueueMany", name: name, keywords: dict);
+ var op = tf._op_def_lib._apply_op_helper("QueueEnqueueMany", name: name, keywords: dict);
return op;
}
@@ -22414,7 +22412,7 @@ namespace Tensorflow.Operations
dict["components"] = components;
if (timeout_ms.HasValue)
dict["timeout_ms"] = timeout_ms.Value;
- var op = _op_def_lib._apply_op_helper("QueueEnqueueManyV2", name: name, keywords: dict);
+ var op = tf._op_def_lib._apply_op_helper("QueueEnqueueManyV2", name: name, keywords: dict);
return op;
}
@@ -22452,7 +22450,7 @@ namespace Tensorflow.Operations
dict["components"] = components;
if (timeout_ms.HasValue)
dict["timeout_ms"] = timeout_ms.Value;
- var op = _op_def_lib._apply_op_helper("QueueEnqueueV2", name: name, keywords: dict);
+ var op = tf._op_def_lib._apply_op_helper("QueueEnqueueV2", name: name, keywords: dict);
return op;
}
@@ -22476,7 +22474,7 @@ namespace Tensorflow.Operations
{
var dict = new Dictionary();
dict["handle"] = handle;
- var op = _op_def_lib._apply_op_helper("QueueIsClosed", name: name, keywords: dict);
+ var op = tf._op_def_lib._apply_op_helper("QueueIsClosed", name: name, keywords: dict);
return op.output;
}
@@ -22500,7 +22498,7 @@ namespace Tensorflow.Operations
{
var dict = new Dictionary();
dict["handle"] = handle;
- var op = _op_def_lib._apply_op_helper("QueueIsClosedV2", name: name, keywords: dict);
+ var op = tf._op_def_lib._apply_op_helper("QueueIsClosedV2", name: name, keywords: dict);
return op.output;
}
@@ -22521,7 +22519,7 @@ namespace Tensorflow.Operations
{
var dict = new Dictionary();
dict["handle"] = handle;
- var op = _op_def_lib._apply_op_helper("QueueSize", name: name, keywords: dict);
+ var op = tf._op_def_lib._apply_op_helper("QueueSize", name: name, keywords: dict);
return op.output;
}
@@ -22542,7 +22540,7 @@ namespace Tensorflow.Operations
{
var dict = new Dictionary();
dict["handle"] = handle;
- var op = _op_def_lib._apply_op_helper("QueueSizeV2", name: name, keywords: dict);
+ var op = tf._op_def_lib._apply_op_helper("QueueSizeV2", name: name, keywords: dict);
return op.output;
}
@@ -22585,7 +22583,7 @@ namespace Tensorflow.Operations
var dict = new Dictionary();
dict["input"] = input;
dict["fft_length"] = fft_length;
- var op = _op_def_lib._apply_op_helper("RFFT", name: name, keywords: dict);
+ var op = tf._op_def_lib._apply_op_helper("RFFT", name: name, keywords: dict);
return op.output;
}
@@ -22630,7 +22628,7 @@ namespace Tensorflow.Operations
var dict = new Dictionary();
dict["input"] = input;
dict["fft_length"] = fft_length;
- var op = _op_def_lib._apply_op_helper("RFFT2D", name: name, keywords: dict);
+ var op = tf._op_def_lib._apply_op_helper("RFFT2D", name: name, keywords: dict);
return op.output;
}
@@ -22675,7 +22673,7 @@ namespace Tensorflow.Operations
var dict = new Dictionary();
dict["input"] = input;
dict["fft_length"] = fft_length;
- var op = _op_def_lib._apply_op_helper("RFFT3D", name: name, keywords: dict);
+ var op = tf._op_def_lib._apply_op_helper("RFFT3D", name: name, keywords: dict);
return op.output;
}
@@ -22705,7 +22703,7 @@ namespace Tensorflow.Operations
{
var dict = new Dictionary();
dict["images"] = images;
- var op = _op_def_lib._apply_op_helper("RGBToHSV", name: name, keywords: dict);
+ var op = tf._op_def_lib._apply_op_helper("RGBToHSV", name: name, keywords: dict);
return op.output;
}
@@ -22750,7 +22748,7 @@ namespace Tensorflow.Operations
dict["seed"] = seed.Value;
if (seed2.HasValue)
dict["seed2"] = seed2.Value;
- var op = _op_def_lib._apply_op_helper("RandomCrop", name: name, keywords: dict);
+ var op = tf._op_def_lib._apply_op_helper("RandomCrop", name: name, keywords: dict);
return op.output;
}
@@ -22784,7 +22782,7 @@ namespace Tensorflow.Operations
dict["seed2"] = seed2;
dict["output_types"] = output_types;
dict["output_shapes"] = output_shapes;
- var op = _op_def_lib._apply_op_helper("RandomDataset", name: name, keywords: dict);
+ var op = tf._op_def_lib._apply_op_helper("RandomDataset", name: name, keywords: dict);
return op.output;
}
@@ -22830,7 +22828,7 @@ namespace Tensorflow.Operations
dict["seed"] = seed.Value;
if (seed2.HasValue)
dict["seed2"] = seed2.Value;
- var op = _op_def_lib._apply_op_helper("RandomGamma", name: name, keywords: dict);
+ var op = tf._op_def_lib._apply_op_helper("RandomGamma", name: name, keywords: dict);
return op.output;
}
@@ -22852,7 +22850,7 @@ namespace Tensorflow.Operations
var dict = new Dictionary();
dict["alpha"] = alpha;
dict["sample"] = sample;
- var op = _op_def_lib._apply_op_helper("RandomGammaGrad", name: name, keywords: dict);
+ var op = tf._op_def_lib._apply_op_helper("RandomGammaGrad", name: name, keywords: dict);
return op.output;
}
@@ -22882,7 +22880,7 @@ namespace Tensorflow.Operations
dict["seed"] = seed.Value;
if (seed2.HasValue)
dict["seed2"] = seed2.Value;
- var op = _op_def_lib._apply_op_helper("RandomPoisson", name: name, keywords: dict);
+ var op = tf._op_def_lib._apply_op_helper("RandomPoisson", name: name, keywords: dict);
return op.output;
}
@@ -22938,7 +22936,7 @@ namespace Tensorflow.Operations
dict["seed2"] = seed2.Value;
if (dtype.HasValue)
dict["dtype"] = dtype.Value;
- var op = _op_def_lib._apply_op_helper("RandomPoissonV2", name: name, keywords: dict);
+ var op = tf._op_def_lib._apply_op_helper("RandomPoissonV2", name: name, keywords: dict);
return op.output;
}
@@ -22983,7 +22981,7 @@ namespace Tensorflow.Operations
dict["seed"] = seed.Value;
if (seed2.HasValue)
dict["seed2"] = seed2.Value;
- var op = _op_def_lib._apply_op_helper("RandomShuffle", name: name, keywords: dict);
+ var op = tf._op_def_lib._apply_op_helper("RandomShuffle", name: name, keywords: dict);
return op.output;
}
@@ -23049,7 +23047,7 @@ namespace Tensorflow.Operations
dict["container"] = container;
if (shared_name != null)
dict["shared_name"] = shared_name;
- var op = _op_def_lib._apply_op_helper("RandomShuffleQueue", name: name, keywords: dict);
+ var op = tf._op_def_lib._apply_op_helper("RandomShuffleQueue", name: name, keywords: dict);
return op.output;
}
@@ -23115,7 +23113,7 @@ namespace Tensorflow.Operations
dict["container"] = container;
if (shared_name != null)
dict["shared_name"] = shared_name;
- var op = _op_def_lib._apply_op_helper("RandomShuffleQueueV2", name: name, keywords: dict);
+ var op = tf._op_def_lib._apply_op_helper("RandomShuffleQueueV2", name: name, keywords: dict);
return op.output;
}
@@ -23156,7 +23154,7 @@ namespace Tensorflow.Operations
dict["seed"] = seed.Value;
if (seed2.HasValue)
dict["seed2"] = seed2.Value;
- var op = _op_def_lib._apply_op_helper("RandomStandardNormal", name: name, keywords: dict);
+ var op = tf._op_def_lib._apply_op_helper("RandomStandardNormal", name: name, keywords: dict);
return op.output;
}
@@ -23198,7 +23196,7 @@ namespace Tensorflow.Operations
dict["seed"] = seed.Value;
if (seed2.HasValue)
dict["seed2"] = seed2.Value;
- var op = _op_def_lib._apply_op_helper("RandomUniform", name: name, keywords: dict);
+ var op = tf._op_def_lib._apply_op_helper("RandomUniform", name: name, keywords: dict);
return op.output;
}
@@ -23248,7 +23246,7 @@ namespace Tensorflow.Operations
dict["seed"] = seed.Value;
if (seed2.HasValue)
dict["seed2"] = seed2.Value;
- var op = _op_def_lib._apply_op_helper("RandomUniformInt", name: name, keywords: dict);
+ var op = tf._op_def_lib._apply_op_helper("RandomUniformInt", name: name, keywords: dict);
return op.output;
}
@@ -23290,7 +23288,7 @@ namespace Tensorflow.Operations
dict["start"] = start;
dict["limit"] = limit;
dict["delta"] = delta;
- var op = _op_def_lib._apply_op_helper("Range", name: name, keywords: dict);
+ var op = tf._op_def_lib._apply_op_helper("Range", name: name, keywords: dict);
return op.output;
}
@@ -23326,7 +23324,7 @@ namespace Tensorflow.Operations
dict["step"] = step;
dict["output_types"] = output_types;
dict["output_shapes"] = output_shapes;
- var op = _op_def_lib._apply_op_helper("RangeDataset", name: name, keywords: dict);
+ var op = tf._op_def_lib._apply_op_helper("RangeDataset", name: name, keywords: dict);
return op.output;
}
@@ -23360,7 +23358,7 @@ namespace Tensorflow.Operations
{
var dict = new Dictionary();
dict["input"] = input;
- var op = _op_def_lib._apply_op_helper("Rank", name: name, keywords: dict);
+ var op = tf._op_def_lib._apply_op_helper("Rank", name: name, keywords: dict);
return op.output;
}
@@ -23379,7 +23377,7 @@ namespace Tensorflow.Operations
{
var dict = new Dictionary();
dict["filename"] = filename;
- var op = _op_def_lib._apply_op_helper("ReadFile", name: name, keywords: dict);
+ var op = tf._op_def_lib._apply_op_helper("ReadFile", name: name, keywords: dict);
return op.output;
}
@@ -23412,7 +23410,7 @@ namespace Tensorflow.Operations
var dict = new Dictionary();
dict["resource"] = resource;
dict["dtype"] = dtype;
- var op = _op_def_lib._apply_op_helper("ReadVariableOp", name: name, keywords: dict);
+ var op = tf._op_def_lib._apply_op_helper("ReadVariableOp", name: name, keywords: dict);
return op.output;
}
@@ -23436,7 +23434,7 @@ namespace Tensorflow.Operations
{
var dict = new Dictionary();
dict["reader_handle"] = reader_handle;
- var op = _op_def_lib._apply_op_helper("ReaderNumRecordsProduced", name: name, keywords: dict);
+ var op = tf._op_def_lib._apply_op_helper("ReaderNumRecordsProduced", name: name, keywords: dict);
return op.output;
}
@@ -23460,7 +23458,7 @@ namespace Tensorflow.Operations
{
var dict = new Dictionary();
dict["reader_handle"] = reader_handle;
- var op = _op_def_lib._apply_op_helper("ReaderNumRecordsProducedV2", name: name, keywords: dict);
+ var op = tf._op_def_lib._apply_op_helper("ReaderNumRecordsProducedV2", name: name, keywords: dict);
return op.output;
}
@@ -23480,7 +23478,7 @@ namespace Tensorflow.Operations
{
var dict = new Dictionary();
dict["reader_handle"] = reader_handle;
- var op = _op_def_lib._apply_op_helper("ReaderNumWorkUnitsCompleted", name: name, keywords: dict);
+ var op = tf._op_def_lib._apply_op_helper("ReaderNumWorkUnitsCompleted", name: name, keywords: dict);
return op.output;
}
@@ -23500,7 +23498,7 @@ namespace Tensorflow.Operations
{
var dict = new Dictionary();
dict["reader_handle"] = reader_handle;
- var op = _op_def_lib._apply_op_helper("ReaderNumWorkUnitsCompletedV2", name: name, keywords: dict);
+ var op = tf._op_def_lib._apply_op_helper("ReaderNumWorkUnitsCompletedV2", name: name, keywords: dict);
return op.output;
}
@@ -23532,7 +23530,7 @@ namespace Tensorflow.Operations
var dict = new Dictionary();
dict["reader_handle"] = reader_handle;
dict["queue_handle"] = queue_handle;
- var op = _op_def_lib._apply_op_helper("ReaderRead", name: name, keywords: dict);
+ var op = tf._op_def_lib._apply_op_helper("ReaderRead", name: name, keywords: dict);
int _idx = 0;
var key = op.outputs[_idx++];
var value = op.outputs[_idx++];
@@ -23572,7 +23570,7 @@ namespace Tensorflow.Operations
dict["reader_handle"] = reader_handle;
dict["queue_handle"] = queue_handle;
dict["num_records"] = num_records;
- var op = _op_def_lib._apply_op_helper("ReaderReadUpTo", name: name, keywords: dict);
+ var op = tf._op_def_lib._apply_op_helper("ReaderReadUpTo", name: name, keywords: dict);
int _idx = 0;
var keys = op.outputs[_idx++];
var values = op.outputs[_idx++];
@@ -23612,7 +23610,7 @@ namespace Tensorflow.Operations
dict["reader_handle"] = reader_handle;
dict["queue_handle"] = queue_handle;
dict["num_records"] = num_records;
- var op = _op_def_lib._apply_op_helper("ReaderReadUpToV2", name: name, keywords: dict);
+ var op = tf._op_def_lib._apply_op_helper("ReaderReadUpToV2", name: name, keywords: dict);
int _idx = 0;
var keys = op.outputs[_idx++];
var values = op.outputs[_idx++];
@@ -23647,7 +23645,7 @@ namespace Tensorflow.Operations
var dict = new Dictionary();
dict["reader_handle"] = reader_handle;
dict["queue_handle"] = queue_handle;
- var op = _op_def_lib._apply_op_helper("ReaderReadV2", name: name, keywords: dict);
+ var op = tf._op_def_lib._apply_op_helper("ReaderReadV2", name: name, keywords: dict);
int _idx = 0;
var key = op.outputs[_idx++];
var value = op.outputs[_idx++];
@@ -23670,7 +23668,7 @@ namespace Tensorflow.Operations
{
var dict = new Dictionary();
dict["reader_handle"] = reader_handle;
- var op = _op_def_lib._apply_op_helper("ReaderReset", name: name, keywords: dict);
+ var op = tf._op_def_lib._apply_op_helper("ReaderReset", name: name, keywords: dict);
return op;
}
@@ -23690,7 +23688,7 @@ namespace Tensorflow.Operations
{
var dict = new Dictionary();
dict["reader_handle"] = reader_handle;
- var op = _op_def_lib._apply_op_helper("ReaderResetV2", name: name, keywords: dict);
+ var op = tf._op_def_lib._apply_op_helper("ReaderResetV2", name: name, keywords: dict);
return op;
}
@@ -23719,7 +23717,7 @@ namespace Tensorflow.Operations
var dict = new Dictionary();
dict["reader_handle"] = reader_handle;
dict["state"] = state;
- var op = _op_def_lib._apply_op_helper("ReaderRestoreState", name: name, keywords: dict);
+ var op = tf._op_def_lib._apply_op_helper("ReaderRestoreState", name: name, keywords: dict);
return op;
}
@@ -23748,7 +23746,7 @@ namespace Tensorflow.Operations
var dict = new Dictionary