diff --git a/src/TensorFlowNET.Console/MemoryBasicTest.cs b/src/TensorFlowNET.Console/MemoryBasicTest.cs
index 3b0deeab..2bb11a02 100644
--- a/src/TensorFlowNET.Console/MemoryBasicTest.cs
+++ b/src/TensorFlowNET.Console/MemoryBasicTest.cs
@@ -112,7 +112,7 @@ namespace Tensorflow
var strides = new[] { 1, 1, 1, 1 };
var dilations = new[] { 1, 1, 1, 1 };
- var results = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo("Conv2D", null, input, filter)
+ var results = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(tf.Context, "Conv2D", null, input, filter)
{
attrs = ConvertToDict(new
{
@@ -134,7 +134,7 @@ namespace Tensorflow
var strides = new[] { 1, 1, 1, 1 };
var dilations = new[] { 1, 1, 1, 1 };
- var results = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo("Conv2D", null, input, filter)
+ var results = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(tf.Context, "Conv2D", null, input, filter)
{
attrs = ConvertToDict(new
{
diff --git a/src/TensorFlowNET.Core/APIs/tf.array.cs b/src/TensorFlowNET.Core/APIs/tf.array.cs
index a2c91983..6a646512 100644
--- a/src/TensorFlowNET.Core/APIs/tf.array.cs
+++ b/src/TensorFlowNET.Core/APIs/tf.array.cs
@@ -44,7 +44,8 @@ namespace Tensorflow
///
///
public Tensor batch_to_space_nd(T input, int[] block_shape, int[,] crops, string name = null)
- => gen_array_ops.batch_to_space_nd(input, block_shape, crops, name: name);
+ => gen_array_ops.batch_to_space_nd(ops.convert_to_tensor(input), ops.convert_to_tensor(block_shape),
+ ops.convert_to_tensor(crops), name: name);
///
/// Apply boolean mask to tensor.
@@ -91,7 +92,7 @@ namespace Tensorflow
});
}
- return gen_array_ops.concat_v2(values.ToArray(), axis, name: name);
+ return gen_array_ops.concat_v2(values.ToArray(), ops.convert_to_tensor(axis), name: name);
}
///
@@ -115,7 +116,7 @@ namespace Tensorflow
///
///
public Tensor fill(Tensor dims, T value, string name = null)
- => gen_array_ops.fill(dims, value, name: name);
+ => gen_array_ops.fill(dims, ops.convert_to_tensor(value), name: name);
public Tensor fill(Shape dims, T value, string name = null)
=> array_ops.fill(dims, value, name: name);
@@ -138,7 +139,7 @@ namespace Tensorflow
///
///
public Tensor gather(Tensor @params, Tensor indices, string name = null, int axis = 0)
- => array_ops.gather(@params, indices, name: name, axis: axis);
+ => array_ops.gather(@params, indices, name: name, axis: ops.convert_to_tensor(axis));
///
/// Return the elements, either from `x` or `y`, depending on the `condition`.
@@ -166,7 +167,7 @@ namespace Tensorflow
///
///
public Tensor reverse(Tensor tensor, int[] axis, string name = null)
- => gen_array_ops.reverse(tensor, axis, name: name);
+ => gen_array_ops.reverse(tensor, ops.convert_to_tensor(axis), name: name);
public Tensor reverse(Tensor tensor, Tensor axis, string name = null)
=> gen_array_ops.reverse(tensor, axis, name: name);
@@ -189,7 +190,8 @@ namespace Tensorflow
/// A name for the operation (optional).
/// A `Tensor` the same type as `input`.
public Tensor slice(Tensor input, Tb[] begin, Ts[] size, string name = null)
- => array_ops.slice(input, begin, size, name: name);
+ => array_ops.slice(input, begin.Select(x => ops.convert_to_tensor(x)).ToArray(),
+ size.Select(x => ops.convert_to_tensor(x)).ToArray(), name: name);
public Tensor squeeze(Tensor input, int axis, string name = null, int squeeze_dims = -1)
=> array_ops.squeeze(input, new[] { axis }, name);
@@ -255,7 +257,7 @@ namespace Tensorflow
/// A name for the operation (optional).
/// A `Tensor`. Has the same type as `input`.
public Tensor placeholder_with_default(T input, int[] shape, string name = null)
- => gen_array_ops.placeholder_with_default(input, shape, name: name);
+ => gen_array_ops.placeholder_with_default(ops.convert_to_tensor(input), shape, name: name);
///
/// Returns the shape of a tensor.
diff --git a/src/TensorFlowNET.Core/APIs/tf.math.cs b/src/TensorFlowNET.Core/APIs/tf.math.cs
index 83653c8b..75253700 100644
--- a/src/TensorFlowNET.Core/APIs/tf.math.cs
+++ b/src/TensorFlowNET.Core/APIs/tf.math.cs
@@ -130,7 +130,7 @@ namespace Tensorflow
=> gen_math_ops.add(a, b, name: name);
public Tensor add(Tx a, Ty b, string name = null)
- => gen_math_ops.add(a, b, name: name);
+ => gen_math_ops.add(ops.convert_to_tensor(a), ops.convert_to_tensor(b), name: name);
///
/// Adds all input tensors element-wise.
@@ -151,10 +151,10 @@ namespace Tensorflow
=> gen_math_ops.atan(x, name);
public Tensor arg_max(Tensor input, int dimension, TF_DataType output_type = TF_DataType.TF_INT64, string name = null)
- => gen_math_ops.arg_max(input, dimension, output_type: output_type, name: name);
+ => gen_math_ops.arg_max(input, ops.convert_to_tensor(dimension), output_type: output_type, name: name);
public Tensor arg_min(Tensor input, int dimension, TF_DataType output_type = TF_DataType.TF_INT64, string name = null)
- => gen_math_ops.arg_min(input, dimension, output_type: output_type, name: name);
+ => gen_math_ops.arg_min(input, ops.convert_to_tensor(dimension), output_type: output_type, name: name);
public Tensor is_finite(Tensor input, string name = null)
=> gen_math_ops.is_finite(input, name);
@@ -199,7 +199,7 @@ namespace Tensorflow
=> gen_math_ops.cos(x, name);
public Tensor cos(float x, string name = null)
- => gen_math_ops.cos(x, name);
+ => gen_math_ops.cos(ops.convert_to_tensor(x), name);
///
/// Computes hyperbolic cosine of x element-wise.
@@ -235,7 +235,7 @@ namespace Tensorflow
///
///
public Tensor greater(Tx x, Ty y, string name = null)
- => gen_math_ops.greater(x, y, name);
+ => gen_math_ops.greater(ops.convert_to_tensor(x), ops.convert_to_tensor(y), name);
///
/// Returns the truth value of (x >= y) element-wise.
@@ -247,7 +247,7 @@ namespace Tensorflow
///
///
public Tensor greater_equal(Tx x, Ty y, string name = null)
- => gen_math_ops.greater_equal(x, y, name);
+ => gen_math_ops.greater_equal(ops.convert_to_tensor(x), ops.convert_to_tensor(y), name);
///
/// Returns the truth value of (x < y) element-wise.
@@ -259,7 +259,7 @@ namespace Tensorflow
///
///
public Tensor less(Tx x, Ty y, string name = null)
- => gen_math_ops.less(x, y, name);
+ => gen_math_ops.less(ops.convert_to_tensor(x), ops.convert_to_tensor(y), name);
///
/// Computes the log of the absolute value of `Gamma(x)` element-wise.
@@ -280,7 +280,7 @@ namespace Tensorflow
///
///
public Tensor less_equal(Tx x, Ty y, string name = null)
- => gen_math_ops.less_equal(x, y, name);
+ => gen_math_ops.less_equal(ops.convert_to_tensor(x), ops.convert_to_tensor(y), name);
///
/// Computes natural logarithm of (1 + x) element-wise.
@@ -292,7 +292,7 @@ namespace Tensorflow
=> gen_math_ops.log1p(x, name);
public Tensor logical_and(T x, T y, string name = null)
- => gen_math_ops.logical_and(x, y, name);
+ => gen_math_ops.logical_and(ops.convert_to_tensor(x), ops.convert_to_tensor(y), name);
public Tensor logical_not(Tensor x, string name = null)
=> gen_math_ops.logical_not(x, name);
@@ -301,7 +301,10 @@ namespace Tensorflow
=> gen_math_ops.logical_or(x, y, name);
public Tensor logical_xor(Tensor x, Tensor y, string name = "LogicalXor")
- => gen_math_ops.logical_xor(x, y, name);
+ {
+ return gen_math_ops.logical_and(gen_math_ops.logical_or(x, y),
+ gen_math_ops.logical_not(gen_math_ops.logical_and(x, y)), name);
+ }
///
/// Clips tensor values to a specified min and max.
@@ -312,7 +315,7 @@ namespace Tensorflow
///
///
public Tensor _clip_by_value(Tensor t, Tensor clip_value_min, Tensor clip_value_max, string name = null)
- => gen_math_ops._clip_by_value(t, clip_value_min, clip_value_max);
+ => gen_math_ops.clip_by_value(t, clip_value_min, clip_value_max);
///
/// Clips tensor values to a specified min and max.
@@ -345,7 +348,7 @@ namespace Tensorflow
=> clip_ops.clip_by_value(t, clip_value_min, clip_value_max, name);
public Tensor sub(Tx a, Ty b, string name = null)
- => gen_math_ops.sub(a, b, name: name);
+ => gen_math_ops.sub(ops.convert_to_tensor(a), ops.convert_to_tensor(b), name: name);
public Tensor divide(Tensor a, Tensor b)
=> a / b;
@@ -396,7 +399,7 @@ namespace Tensorflow
///
///
public Tensor max(Tx input, Ty axis, bool keep_dims = false, string name = null)
- => gen_math_ops._max(input, axis, keep_dims: keep_dims, name: name);
+ => gen_math_ops.max(ops.convert_to_tensor(input), ops.convert_to_tensor(axis), keep_dims: keep_dims, name: name);
///
/// Computes the minimum of elements across dimensions of a tensor.
@@ -409,7 +412,7 @@ namespace Tensorflow
///
///
public Tensor min(Tx input, Ty axis, bool keep_dims = false, string name = null)
- => gen_math_ops._min(input, axis, keep_dims: keep_dims, name: name);
+ => gen_math_ops.min(ops.convert_to_tensor(input), ops.convert_to_tensor(axis), keep_dims: keep_dims, name: name);
///
/// Returns the max of x and y (i.e. x > y ? x : y) element-wise.
@@ -421,7 +424,7 @@ namespace Tensorflow
///
///
public Tensor maximum(T1 x, T2 y, string name = null)
- => gen_math_ops.maximum(x, y, name: name);
+ => gen_math_ops.maximum(ops.convert_to_tensor(x), ops.convert_to_tensor(y), name: name);
///
/// Returns the min of x and y (i.e. x < y ? x : y) element-wise.
@@ -433,7 +436,7 @@ namespace Tensorflow
///
///
public Tensor minimum(T1 x, T2 y, string name = null)
- => gen_math_ops.minimum(x, y, name: name);
+ => gen_math_ops.minimum(ops.convert_to_tensor(x), ops.convert_to_tensor(y), name: name);
public Tensor multiply(Tensor x, Tensor y, string name = null)
=> gen_math_ops.mul(x, y, name: name);
@@ -448,7 +451,7 @@ namespace Tensorflow
///
///
public Tensor multiply(Tx x, Ty y, string name = null)
- => gen_math_ops.mul(x, y, name: name);
+ => gen_math_ops.mul(ops.convert_to_tensor(x), ops.convert_to_tensor(y), name: name);
public Tensor negative(Tensor x, string name = null)
=> gen_math_ops.neg(x, name);
@@ -577,7 +580,7 @@ namespace Tensorflow
=> math_ops.sigmoid(x, name: name);
public Tensor sum(Tensor input, int axis, bool keep_dims = false, string name = null)
- => gen_math_ops._sum(input, axis, keep_dims: keep_dims, name: name);
+ => gen_math_ops.sum(input, ops.convert_to_tensor(axis), keep_dims: keep_dims, name: name);
public Tensor reduce_mean(Tensor input_tensor, Axis? axis = null, bool keepdims = false, string name = null, int? reduction_indices = null)
=> math_ops.reduce_mean(input_tensor, axis: axis, keepdims: keepdims, name: name, reduction_indices: reduction_indices);
diff --git a/src/TensorFlowNET.Core/APIs/tf.nn.cs b/src/TensorFlowNET.Core/APIs/tf.nn.cs
index 1595e52f..e0c29bfa 100644
--- a/src/TensorFlowNET.Core/APIs/tf.nn.cs
+++ b/src/TensorFlowNET.Core/APIs/tf.nn.cs
@@ -29,21 +29,8 @@ namespace Tensorflow
public Tensor conv2d(Tensor input, Tensor filter, int[] strides, string padding, bool use_cudnn_on_gpu = true,
string data_format = "NHWC", int[] dilations = null, string name = null)
{
- var parameters = new Conv2dParams
- {
- Input = input,
- Filter = filter,
- Strides = strides,
- Padding = padding,
- UseCudnnOnGpu = use_cudnn_on_gpu,
- DataFormat = data_format,
- Name = name
- };
-
- if (dilations != null)
- parameters.Dilations = dilations;
-
- return gen_nn_ops.conv2d(parameters);
+ return gen_nn_ops.conv2d(input, filter, strides, padding, use_cudnn_on_gpu,
+ data_format: data_format, dilations: dilations, name: name);
}
public Tensor[] ctc_greedy_decoder(Tensor inputs, Tensor sequence_length, bool merge_repeated = true, string name = null)
@@ -118,7 +105,7 @@ namespace Tensorflow
public IActivation softmax() => new softmax();
public Tensor tanh(Tensor x, string name = null)
- => gen_nn_ops.tanh(x, name);
+ => gen_math_ops.tanh(x, name);
public Tensor relu(Tensor features, string name = null)
=> gen_nn_ops.relu(features, name);
@@ -146,14 +133,14 @@ namespace Tensorflow
=> nn_ops.in_top_k(predictions, targets, k, name);
public Tensor[] top_k(Tensor input, int k = 1, bool sorted = true, string name = null)
- => gen_nn_ops.top_kv2(input, k: k, sorted: sorted, name: name);
+ => gen_nn_ops.top_kv2(input, k: ops.convert_to_tensor(k), sorted: sorted, name: name);
public Tensor bias_add(Tensor value, IVariableV1 bias, string data_format = null, string name = null)
{
return tf_with(ops.name_scope(name, "BiasAdd", new { value, bias }), scope =>
{
name = scope;
- return gen_nn_ops.bias_add(value, bias, data_format: data_format, name: name);
+ return gen_nn_ops.bias_add(value, ops.convert_to_tensor(bias), data_format: data_format, name: name);
});
}
@@ -172,7 +159,7 @@ namespace Tensorflow
///
public Tensor lrn(Tensor input, int depth_radius = 5, int bias = 1,
int alpha = 1, float beta = 0.5f, string name = null)
- => gen_nn_ops.local_response_normalization(input, depth_radius: depth_radius, bias: bias,
+ => gen_nn_ops.lrn(input, depth_radius: depth_radius, bias: bias,
alpha: alpha, beta: beta, name: name);
public Tensor leaky_relu(Tensor features, float alpha = 0.2f, string name = null)
diff --git a/src/TensorFlowNET.Core/APIs/tf.reshape.cs b/src/TensorFlowNET.Core/APIs/tf.reshape.cs
index cdd5194a..5da7b795 100644
--- a/src/TensorFlowNET.Core/APIs/tf.reshape.cs
+++ b/src/TensorFlowNET.Core/APIs/tf.reshape.cs
@@ -31,6 +31,6 @@ namespace Tensorflow
public Tensor reshape(Tensor tensor,
object[] shape,
string name = null)
- => gen_array_ops.reshape(tensor, shape, name);
+ => gen_array_ops.reshape(tensor, ops.convert_to_tensor(shape), name);
}
}
diff --git a/src/TensorFlowNET.Core/APIs/tf.tensor.cs b/src/TensorFlowNET.Core/APIs/tf.tensor.cs
index 35efde06..be8c2ab2 100644
--- a/src/TensorFlowNET.Core/APIs/tf.tensor.cs
+++ b/src/TensorFlowNET.Core/APIs/tf.tensor.cs
@@ -46,10 +46,10 @@ namespace Tensorflow
int ellipsis_mask = 0,
int new_axis_mask = 0,
int shrink_axis_mask = 0,
- string name = null) => gen_array_ops.strided_slice(input: input,
- begin: begin,
- end: end,
- strides: strides,
+ string name = null) => array_ops.strided_slice(input,
+ begin: ops.convert_to_tensor(begin),
+ end: ops.convert_to_tensor(end),
+ strides: ops.convert_to_tensor(strides),
begin_mask: begin_mask,
end_mask: end_mask,
ellipsis_mask: ellipsis_mask,
diff --git a/src/TensorFlowNET.Core/APIs/tf.tile.cs b/src/TensorFlowNET.Core/APIs/tf.tile.cs
index be03e453..65975ac8 100644
--- a/src/TensorFlowNET.Core/APIs/tf.tile.cs
+++ b/src/TensorFlowNET.Core/APIs/tf.tile.cs
@@ -23,7 +23,7 @@ namespace Tensorflow
=> gen_array_ops.tile(input, multiples, name);
public Tensor tile(Tensor input, object[] multiples, string name = null)
- => gen_array_ops.tile(input, multiples, name);
+ => gen_array_ops.tile(input, ops.convert_to_tensor(multiples), name);
public Tensor tile(Tensor input, Shape multiples, string name = null)
{
diff --git a/src/TensorFlowNET.Core/Attributes/c_api.ops.cs b/src/TensorFlowNET.Core/Attributes/c_api.ops.cs
index 2a22413b..ba6f653a 100644
--- a/src/TensorFlowNET.Core/Attributes/c_api.ops.cs
+++ b/src/TensorFlowNET.Core/Attributes/c_api.ops.cs
@@ -57,6 +57,21 @@ namespace Tensorflow
[DllImport(TensorFlowLibName)]
public static extern int TF_OperationGetAttrValueProto(IntPtr oper, string attr_name, SafeBufferHandle output_attr_value, SafeStatusHandle status);
+ [DllImport(TensorFlowLibName)]
+ public static extern void TF_OperationGetAttrType(IntPtr oper, string attr_name, IntPtr value, SafeStatusHandle status);
+
+ [DllImport(TensorFlowLibName)]
+ public static extern void TF_OperationGetAttrInt(IntPtr oper, string attr_name, IntPtr value, SafeStatusHandle status);
+
+ [DllImport(TensorFlowLibName)]
+ public static extern void TF_OperationGetAttrFloat(IntPtr oper, string attr_name, IntPtr value, SafeStatusHandle status);
+
+ [DllImport(TensorFlowLibName)]
+ public static extern void TF_OperationGetAttrBool(IntPtr oper, string attr_name, IntPtr value, SafeStatusHandle status);
+
+ [DllImport(TensorFlowLibName)]
+ public static extern void TF_OperationGetAttrShape(IntPtr oper, string attr_name, long[] value, int num_dims, SafeStatusHandle status);
+
[DllImport(TensorFlowLibName)]
public static extern void TF_SetAttrBool(IntPtr desc, string attr_name, bool value);
diff --git a/src/TensorFlowNET.Core/Clustering/_InitializeClustersOpFactory.cs b/src/TensorFlowNET.Core/Clustering/_InitializeClustersOpFactory.cs
index adb26ef2..1b295fcf 100644
--- a/src/TensorFlowNET.Core/Clustering/_InitializeClustersOpFactory.cs
+++ b/src/TensorFlowNET.Core/Clustering/_InitializeClustersOpFactory.cs
@@ -88,7 +88,7 @@ namespace Tensorflow.Clustering
public Tensor op()
{
- var x = control_flow_ops.cond(gen_math_ops.equal(_num_remaining, 0),
+ var x = control_flow_ops.cond(gen_math_ops.equal(_num_remaining, ops.convert_to_tensor(0)),
() =>
{
return check_ops.assert_equal(_cluster_centers_initialized, true);
diff --git a/src/TensorFlowNET.Core/Contexts/Context.ExecuteOp.cs b/src/TensorFlowNET.Core/Contexts/Context.ExecuteOp.cs
index ac1cd866..f6e0911c 100644
--- a/src/TensorFlowNET.Core/Contexts/Context.ExecuteOp.cs
+++ b/src/TensorFlowNET.Core/Contexts/Context.ExecuteOp.cs
@@ -49,7 +49,7 @@ namespace Tensorflow.Contexts
Tensors ExecEagerAction(string OpType, string Name, ExecuteOpArgs args)
{
- var opExecInfo = new FastPathOpExecInfo(OpType, Name, args.OpInputArgs)
+ var opExecInfo = new FastPathOpExecInfo(tf.Context, OpType, Name, args.OpInputArgs)
{
attrs = args.OpAttrs
};
diff --git a/src/TensorFlowNET.Core/Eager/EagerRunner.TFE_FastPathExecute.cs b/src/TensorFlowNET.Core/Eager/EagerRunner.TFE_FastPathExecute.cs
index fedc02cb..f1a09ed7 100644
--- a/src/TensorFlowNET.Core/Eager/EagerRunner.TFE_FastPathExecute.cs
+++ b/src/TensorFlowNET.Core/Eager/EagerRunner.TFE_FastPathExecute.cs
@@ -68,7 +68,8 @@ namespace Tensorflow.Eager
var input_arg = op_def.InputArg[i];
if (!string.IsNullOrEmpty(input_arg.NumberAttr))
{
- int len = (input as object[]).Length;
+ var fast_input_array = input is Tensors tensors ? (object[])tensors : (object[])input;
+ int len = fast_input_array.Length;
c_api.TFE_OpSetAttrInt(op, input_arg.NumberAttr, len);
if (op_exec_info.run_callbacks)
{
@@ -79,7 +80,6 @@ namespace Tensorflow.Eager
if (len > 0)
{
- var fast_input_array = (object[])op_exec_info.args[i];
// First item adds the type attr.
if (!AddInputToOp(fast_input_array[i], true, input_arg, flattened_attrs, flattened_inputs, op, status))
return null;
diff --git a/src/TensorFlowNET.Core/Eager/FastPathOpExecInfo.cs b/src/TensorFlowNET.Core/Eager/FastPathOpExecInfo.cs
index 2cdf025a..307ca2ce 100644
--- a/src/TensorFlowNET.Core/Eager/FastPathOpExecInfo.cs
+++ b/src/TensorFlowNET.Core/Eager/FastPathOpExecInfo.cs
@@ -17,8 +17,9 @@ namespace Tensorflow
public bool run_callbacks { get; set; }
public Action callbacks { get; set; }
- public FastPathOpExecInfo(string opName, string name, params object[] inputArgs)
+ public FastPathOpExecInfo(Context ctx, string opName, string name, params object[] inputArgs)
{
+ this.ctx = ctx;
this.op_name = opName;
this.name = name;
this.args = inputArgs;
diff --git a/src/TensorFlowNET.Core/Eager/execute.cs b/src/TensorFlowNET.Core/Eager/execute.cs
index 1804992a..e981c6c5 100644
--- a/src/TensorFlowNET.Core/Eager/execute.cs
+++ b/src/TensorFlowNET.Core/Eager/execute.cs
@@ -7,10 +7,11 @@ using Tensorflow.Contexts;
using static Tensorflow.ApiDef.Types;
using static Tensorflow.CostGraphDef.Types;
using static Tensorflow.Binding;
+using Tensorflow.Gradients;
namespace Tensorflow.Eager
{
- internal static class execute
+ internal static class _execute
{
public static (DataType[], Tensor[]) onvert_to_mixed_eager_tensors(Tensor[] values, Context ctx)
{
@@ -18,7 +19,7 @@ namespace Tensorflow.Eager
var types = v.Select(t => t.dtype.as_datatype_enum());
return (types.ToArray(), v.ToArray());
}
- public static Tensor[] executes(string op_name, int num_outputs, Tensor[] inputs, object[] attrs, Context ctx, string name = null)
+ public static Tensor[] execute(string op_name, int num_outputs, Tensor[] inputs, object[] attrs, Context ctx, string name = null)
{
return quick_execute(op_name, num_outputs, inputs, attrs, ctx, name);
}
@@ -33,7 +34,12 @@ namespace Tensorflow.Eager
}
public static bool must_record_gradient()
{
- return false;
+ return tf.GetTapeSet().Count != 0;
+ }
+
+ public static bool record_gradient(string op_name, Tensor[] inputs, object[] attrs, Tensor[] results)
+ {
+ return tf.Runner.RecordGradient(op_name, inputs, attrs, results);
}
}
}
diff --git a/src/TensorFlowNET.Core/Functions/EagerDefinedFunction.cs b/src/TensorFlowNET.Core/Functions/EagerDefinedFunction.cs
index cc38683d..d547b612 100644
--- a/src/TensorFlowNET.Core/Functions/EagerDefinedFunction.cs
+++ b/src/TensorFlowNET.Core/Functions/EagerDefinedFunction.cs
@@ -147,7 +147,7 @@ namespace Tensorflow.Functions
Tensor[] outputs;
if (executing_eagerly)
{
- outputs = execute.executes(
+ outputs = _execute.execute(
Signature.Name,
_num_outputs,
args,
diff --git a/src/TensorFlowNET.Core/Gradients/GradientTape.cs b/src/TensorFlowNET.Core/Gradients/GradientTape.cs
index b5fd373e..a714436a 100644
--- a/src/TensorFlowNET.Core/Gradients/GradientTape.cs
+++ b/src/TensorFlowNET.Core/Gradients/GradientTape.cs
@@ -44,6 +44,15 @@ namespace Tensorflow.Gradients
return tape;
}
+ public void PushTape(ITape tape)
+ {
+ // Enters a context inside which operations are recorded on this tape.
+ if (tf.Context.executing_eagerly())
+ tf.Context.ensure_initialized();
+
+ _tapeSet.Push(tape);
+ }
+
ITape PopTape()
{
_tape.StopRecord();
diff --git a/src/TensorFlowNET.Core/Gradients/array_grad.cs b/src/TensorFlowNET.Core/Gradients/array_grad.cs
index c4cb9fbd..f939f7b6 100644
--- a/src/TensorFlowNET.Core/Gradients/array_grad.cs
+++ b/src/TensorFlowNET.Core/Gradients/array_grad.cs
@@ -36,8 +36,7 @@ namespace Tensorflow.Gradients
var input_value = op.inputs[0];
var broadcast_shape = op.inputs[1];
var input_value_shape = array_ops.shape(input_value);
- var (_, reduction_axes) = gen_array_ops.broadcast_gradient_args(broadcast_shape,
- input_value_shape);
+ var reduction_axes = gen_array_ops.broadcast_gradient_args(broadcast_shape, input_value_shape)[1];
var updates_grad_reshaped = math_ops.reduce_sum(grad,
axis: reduction_axes,
keepdims: true);
@@ -351,16 +350,16 @@ namespace Tensorflow.Gradients
null,
null,
null,
- gen_array_ops.strided_slice(
+ array_ops.strided_slice(
grad,
begin,
end,
strides,
- begin_mask: op.get_attr("begin_mask"),
- end_mask: op.get_attr("end_mask"),
- ellipsis_mask: op.get_attr("ellipsis_mask"),
- new_axis_mask: op.get_attr("new_axis_mask"),
- shrink_axis_mask: op.get_attr("shrink_axis_mask"))
+ begin_mask: (int)op.get_attr("begin_mask"),
+ end_mask: (int)op.get_attr("end_mask"),
+ ellipsis_mask: (int)op.get_attr("ellipsis_mask"),
+ new_axis_mask: (int)op.get_attr("new_axis_mask"),
+ shrink_axis_mask: (int)op.get_attr("shrink_axis_mask"))
};
}
diff --git a/src/TensorFlowNET.Core/Gradients/math_grad.cs b/src/TensorFlowNET.Core/Gradients/math_grad.cs
index 89699d6b..be1fbbba 100644
--- a/src/TensorFlowNET.Core/Gradients/math_grad.cs
+++ b/src/TensorFlowNET.Core/Gradients/math_grad.cs
@@ -53,7 +53,8 @@ namespace Tensorflow.Gradients
var sx = array_ops.shape(x);
var sy = array_ops.shape(y);
- var (rx, ry) = gen_array_ops.broadcast_gradient_args(sx, sy);
+ var args = gen_array_ops.broadcast_gradient_args(sx, sy);
+ var (rx, ry) = (args[0], args[1]);
var sum1 = math_ops.reduce_sum(grad, rx);
var r1 = gen_array_ops.reshape(sum1, sx);
@@ -101,7 +102,8 @@ namespace Tensorflow.Gradients
var y = op.inputs[1];
var sx = array_ops.shape(x);
var sy = array_ops.shape(y);
- var (rx, ry) = gen_array_ops.broadcast_gradient_args(sx, sy);
+ var args = gen_array_ops.broadcast_gradient_args(sx, sy);
+ var (rx, ry) = (args[0], args[1]);
x = math_ops.conj(x);
y = math_ops.conj(y);
@@ -427,7 +429,8 @@ namespace Tensorflow.Gradients
isMaximum
? gen_math_ops.greater_equal(x, y)
: gen_math_ops.less_equal(x, y);
- var (rx, ry) = gen_array_ops.broadcast_gradient_args(sx, sy);
+ var args = gen_array_ops.broadcast_gradient_args(sx, sy);
+ var (rx, ry) = (args[0], args[1]);
var xgrad = array_ops.where(xmask, grad, zeros);
var gx = array_ops.reshape(math_ops.reduce_sum(xgrad, rx), sx);
var ygrad = array_ops.where(xmask, zeros, grad);
@@ -458,7 +461,7 @@ namespace Tensorflow.Gradients
private static Tensor _safe_shape_div(Tensor x, Tensor y)
{
- return math_ops.floordiv(x, gen_math_ops.maximum(y, 1));
+ return math_ops.floordiv(x, gen_math_ops.maximum(y, ops.convert_to_tensor(1)));
}
[RegisterGradient("Sub")]
@@ -573,7 +576,8 @@ namespace Tensorflow.Gradients
var sx = array_ops.shape(x);
var sy = array_ops.shape(y);
- var (rx, ry) = gen_array_ops.broadcast_gradient_args(sx, sy);
+ var args = gen_array_ops.broadcast_gradient_args(sx, sy);
+ var (rx, ry) = (args[0], args[1]);
x = math_ops.conj(x);
y = math_ops.conj(y);
@@ -824,7 +828,7 @@ namespace Tensorflow.Gradients
mask = x > 0.0f;
var ones = array_ops.ones_like(x);
var safe_x = array_ops.where(mask, x, ones);
- var x1 = gen_array_ops.log(safe_x);
+ var x1 = math_ops.log(safe_x);
var y1 = array_ops.zeros_like(x);
var log_x = array_ops.where(mask, x1, y1);
var mul1 = grad * z * log_x;
@@ -855,7 +859,8 @@ namespace Tensorflow.Gradients
sy = array_ops.shape_internal(y, optimize: false);
}
- var (rx, ry) = gen_array_ops.broadcast_gradient_args(sx, sy);
+ var args = gen_array_ops.broadcast_gradient_args(sx, sy);
+ var (rx, ry) = (args[0], args[1]);
return new[]
{
(sx, rx, !x.shape.Equals(grad.shape)),
diff --git a/src/TensorFlowNET.Core/Gradients/math_grad_eager.cs b/src/TensorFlowNET.Core/Gradients/math_grad_eager.cs
index 530bb6c0..f8b16090 100644
--- a/src/TensorFlowNET.Core/Gradients/math_grad_eager.cs
+++ b/src/TensorFlowNET.Core/Gradients/math_grad_eager.cs
@@ -47,8 +47,8 @@ namespace Tensorflow.Gradients
{
return new Tensor[]
{
- gen_math_ops.mul(grad, y),
- gen_math_ops.mul(grad, x)
+ math_ops.multiply(grad, y),
+ math_ops.multiply(grad, x)
};
}
diff --git a/src/TensorFlowNET.Core/Gradients/nn_grad.cs b/src/TensorFlowNET.Core/Gradients/nn_grad.cs
index e9516393..a1ac97a9 100644
--- a/src/TensorFlowNET.Core/Gradients/nn_grad.cs
+++ b/src/TensorFlowNET.Core/Gradients/nn_grad.cs
@@ -192,17 +192,8 @@ namespace Tensorflow.Gradients
explicit_paddings: explicit_paddings,
dilations: dilations,
data_format: data_format),
- gen_nn_ops.conv2d(new Conv2dParams
- {
- Input = grad,
- Filter = op.inputs[1],
- Strides = strides,
- Padding = padding,
- DataFormat = data_format,
- Dilations = dilations,
- ExplicitPaddings = explicit_paddings,
- UseCudnnOnGpu = use_cudnn_on_gpu
- })
+ gen_nn_ops.conv2d(grad, op.inputs[1], strides, padding,
+ use_cudnn_on_gpu, explicit_paddings, data_format, dilations)
};
}
@@ -265,20 +256,27 @@ namespace Tensorflow.Gradients
var epsilon = op.get_attr("epsilon");
var data_format = op.get_attr("data_format");
var is_training = op.get_attr("is_training");
- Func grad_fun = null;
-
- switch (version)
+ Func grad_fun = (p) =>
{
- case 2:
- grad_fun = gen_nn_ops.fused_batch_norm_grad_v3;
- break;
- case 1:
- // grad_fun = gen_nn_ops.fused_batch_norm_grad_v2;
- throw new NotImplementedException("");
- default:
- grad_fun = gen_nn_ops.fused_batch_norm_grad;
- break;
- }
+ if(version == 2)
+ {
+ return gen_nn_ops.fused_batch_norm_grad_v3(p.YBackprop, p.X, p.Scale,
+ p.ReserveSpace1, p.ReserveSpace2, p.ReserveSpace3, p.Epsilon,
+ p.DataFormat, p.IsTraining, p.Name);
+ }
+ else if(version == 1)
+ {
+ return gen_nn_ops.fused_batch_norm_grad_v2(p.YBackprop, p.X, p.Scale,
+ p.ReserveSpace1, p.ReserveSpace2, p.Epsilon, p.DataFormat,
+ p.IsTraining, p.Name);
+ }
+ else
+ {
+ return gen_nn_ops.fused_batch_norm_grad(p.YBackprop, p.X, p.Scale,
+ p.ReserveSpace1, p.ReserveSpace2, p.Epsilon, p.DataFormat,
+ p.IsTraining, p.Name);
+ }
+ };
if (is_training)
{
@@ -406,7 +404,7 @@ namespace Tensorflow.Gradients
// finally reshaping it to the original input shape.
var scatter = gen_array_ops.scatter_nd(array_ops.expand_dims(ind, -1),
array_ops.reshape(grad, new int[] { -1 }),
- new Tensor[] { math_ops.reduce_prod(in_shape) });
+ math_ops.reduce_prod(in_shape));
return new Tensor[]
{
diff --git a/src/TensorFlowNET.Core/Operations/NnOps/AveragePoolFunction.cs b/src/TensorFlowNET.Core/Operations/NnOps/AveragePoolFunction.cs
index d43f8a0c..84ce56a4 100644
--- a/src/TensorFlowNET.Core/Operations/NnOps/AveragePoolFunction.cs
+++ b/src/TensorFlowNET.Core/Operations/NnOps/AveragePoolFunction.cs
@@ -34,7 +34,7 @@ namespace Tensorflow.Operations
{
name = scope;
value = ops.convert_to_tensor(value, name: "input");
- return gen_nn_ops.average_pool(
+ return gen_nn_ops.avg_pool(
value,
ksize: ksize,
strides: strides,
diff --git a/src/TensorFlowNET.Core/Operations/NnOps/ConvolutionInternal.cs b/src/TensorFlowNET.Core/Operations/NnOps/ConvolutionInternal.cs
index 958d79f4..ec70b185 100644
--- a/src/TensorFlowNET.Core/Operations/NnOps/ConvolutionInternal.cs
+++ b/src/TensorFlowNET.Core/Operations/NnOps/ConvolutionInternal.cs
@@ -67,16 +67,15 @@ namespace Tensorflow.Operations
var dilations = _get_sequence(args.DilationRate, num_spatial_dims, channel_index).ToArray();
var strides = _get_sequence(args.Strides, num_spatial_dims, channel_index).ToArray();
- result = gen_nn_ops.conv2d(new Conv2dParams
- {
- Input = input,
- Filter = filters,
- Strides = strides,
- Padding = padding,
- DataFormat = data_format,
- Dilations = dilations,
- Name = name
- });
+ result = gen_nn_ops.conv2d(
+ input,
+ filters,
+ strides,
+ padding,
+ data_format: data_format,
+ dilations: dilations,
+ name: name
+ );
}
else
{
@@ -93,16 +92,15 @@ namespace Tensorflow.Operations
input = array_ops.expand_dims(input, spatial_start_dim);
filters = array_ops.expand_dims(filters, 0);
- result = gen_nn_ops.conv2d(new Conv2dParams
- {
- Input = input,
- Filter = filters,
- Strides = strides.ToArray(),
- Padding = padding,
- DataFormat = channel_first ? "NCHW" : "NHWC",
- Dilations = dilations.ToArray(),
- Name = name
- });
+ result = gen_nn_ops.conv2d(
+ input,
+ filters,
+ strides.ToArray(),
+ padding,
+ data_format: channel_first ? "NCHW" : "NHWC",
+ dilations: dilations.ToArray(),
+ name: name
+ );
result = array_ops.squeeze(result, new[] { spatial_start_dim });
}
});
diff --git a/src/TensorFlowNET.Core/Operations/NnOps/gen_nn_ops.cs b/src/TensorFlowNET.Core/Operations/NnOps/gen_nn_ops.cs
deleted file mode 100644
index 408d06eb..00000000
--- a/src/TensorFlowNET.Core/Operations/NnOps/gen_nn_ops.cs
+++ /dev/null
@@ -1,373 +0,0 @@
-/*****************************************************************************
- Copyright 2018 The TensorFlow.NET Authors. All Rights Reserved.
-
- Licensed under the Apache License, Version 2.0 (the "License");
- you may not use this file except in compliance with the License.
- You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
-******************************************************************************/
-
-using System.Linq;
-using static Tensorflow.Binding;
-
-namespace Tensorflow.Operations
-{
- public class gen_nn_ops
- {
- ///
- /// Computes a 2-D convolution given 4-D `input` and `filter` tensors.
- ///
- /// Given an input tensor of shape `[batch, in_height, in_width, in_channels]`
- /// and a filter / kernel tensor of shape
- /// `[filter_height, filter_width, in_channels, out_channels]`, this op
- /// performs the following:
- ///
- /// 1. Flattens the filter to a 2-D matrix with shape
- /// `[filter_height * filter_width * in_channels, output_channels]`.
- /// 2. Extracts image patches from the input tensor to form a *virtual*
- /// tensor of shape `[batch, out_height, out_width,
- /// filter_height * filter_width * in_channels]`.
- /// 3. For each patch, right-multiplies the filter matrix and the image patch
- /// vector.
- ///
- ///
- ///
- public static Tensor conv2d(Conv2dParams parameters)
- => tf.Context.ExecuteOp("Conv2D", parameters.Name, new ExecuteOpArgs(parameters.Input, parameters.Filter)
- .SetAttributes(new
- {
- strides = parameters.Strides,
- padding = parameters.Padding,
- use_cudnn_on_gpu = parameters.UseCudnnOnGpu,
- explicit_paddings = parameters.ExplicitPaddings,
- data_format = parameters.DataFormat,
- dilations = parameters.Dilations
- }));
-
- ///
- /// Computes the gradients of convolution with respect to the filter.
- ///
- ///
- ///
- public static Tensor conv2d_backprop_filter(Tensor input, Tensor filter_sizes, Tensor out_backprop,
- int[] strides, string padding, bool use_cudnn_on_gpu = true,
- int[] explicit_paddings = null,
- string data_format = "NHWC",
- int[] dilations = null,
- string name = null)
- => tf.Context.ExecuteOp("Conv2DBackpropFilter", name, new ExecuteOpArgs(input, filter_sizes, out_backprop)
- .SetAttributes(new
- {
- strides,
- padding,
- use_cudnn_on_gpu,
- explicit_paddings = explicit_paddings ?? new int[0],
- data_format,
- dilations = dilations ?? new int[] { 1, 1, 1, 1 }
- }));
-
- ///
- /// Computes the gradients of convolution with respect to the input.
- ///
- ///
- ///
- public static Tensor conv2d_backprop_input(Tensor input_sizes, Tensor filter, Tensor out_backprop,
- int[] strides, string padding, bool use_cudnn_on_gpu = true,
- int[] explicit_paddings = null,
- string data_format = "NHWC",
- int[] dilations = null,
- string name = null)
- => tf.Context.ExecuteOp("Conv2DBackpropInput", name, new ExecuteOpArgs(input_sizes, filter, out_backprop)
- .SetAttributes(new
- {
- strides,
- padding,
- use_cudnn_on_gpu,
- explicit_paddings = explicit_paddings ?? new int[0],
- data_format,
- dilations = dilations ?? new int[] { 1, 1, 1, 1 }
- }));
-
- public static Tensor bias_add(Tensor value,
- IVariableV1 bias,
- string data_format = null,
- string name = null)
- => tf.Context.ExecuteOp("BiasAdd", name, new ExecuteOpArgs(value, bias)
- .SetAttributes(new { data_format = data_format ?? "NHWC" }));
-
- public static Tensor bias_add_grad(Tensor out_backprop,
- string data_format = "NHWC",
- string name = null)
- => tf.Context.ExecuteOp("BiasAddGrad", name, new ExecuteOpArgs(out_backprop)
- .SetAttributes(new { data_format = data_format ?? "NHWC" }));
-
- ///
- /// Computes exponential linear: exp(features) - 1 if < 0, features otherwise.
- ///
- ///
- ///
- ///
- /// If specified, the created operation in the graph will be this one, otherwise it will be named 'Elu'.
- ///
- ///
- /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result.
- ///
- ///
- /// See [Fast and Accurate Deep Network Learning by Exponential Linear Units (ELUs)
- /// ](http://arxiv.org/abs/1511.07289)
- ///
- public static Tensor elu(Tensor features, string name = "Elu")
- {
- var op = tf.OpDefLib._apply_op_helper("Elu", name: name, args: new { features });
- return op.output;
- }
-
- ///
- /// Gradient for batch normalization.
- ///
- ///
- ///
- public static Tensor[] fused_batch_norm_grad(FusedBatchNormParams @params)
- {
- var op = tf.OpDefLib._apply_op_helper("FusedBatchNormGrad", name: @params.Name, args: new
- {
- y_backprop = @params.YBackprop,
- x = @params.X,
- scale = @params.Scale,
- reserve_space_1 = @params.ReserveSpace1,
- reserve_space_2 = @params.ReserveSpace2,
- epsilon = @params.Epsilon,
- data_format = @params.DataFormat,
- is_training = @params.IsTraining
- });
- return op.outputs;
- }
-
- public static Tensor[] fused_batch_norm_grad_v3(FusedBatchNormParams @params)
- => tf.Context.ExecuteOp("FusedBatchNormGradV3", @params.Name,
- new ExecuteOpArgs(@params.YBackprop,
- @params.X,
- @params.Scale,
- @params.ReserveSpace1,
- @params.ReserveSpace2,
- @params.ReserveSpace3)
- .SetAttributes(new
- {
- epsilon = @params.Epsilon,
- data_format = @params.DataFormat,
- is_training = @params.IsTraining
- }));
-
- public static Tensor[] fused_batch_norm(Tensor x,
- Tensor scale,
- Tensor offset,
- Tensor mean,
- Tensor variance,
- float epsilon = 0.0001f,
- string data_format = "NHWC",
- bool is_training = true,
- string name = null)
- {
- var _op = tf.OpDefLib._apply_op_helper("FusedBatchNorm", name: name, args: new
- {
- x,
- scale,
- offset,
- mean,
- variance,
- epsilon,
- data_format,
- is_training
- });
-
- return _op.outputs;
- }
-
- public static Tensors fused_batch_norm_v3(Tensor x,
- Tensor scale,
- Tensor offset,
- Tensor mean,
- Tensor variance,
- float epsilon = 0.0001f,
- float exponential_avg_factor = 1.0f,
- string data_format = "NHWC",
- bool is_training = true,
- string name = null)
- => tf.Context.ExecuteOp("FusedBatchNormV3", name, new ExecuteOpArgs(x, scale, offset, mean, variance)
- .SetAttributes(new { epsilon, data_format, is_training }));
-
- ///
- /// Local Response Normalization.
- ///
- ///
- ///
- ///
- ///
- ///
- ///
- ///
- public static Tensor local_response_normalization(Tensor input, int depth_radius = 5, int bias = 1,
- int alpha = 1, float beta = 0.5f, string name = null)
- {
- var _op = tf.OpDefLib._apply_op_helper("LRN", name: name, args: new
- {
- input,
- depth_radius,
- bias,
- alpha,
- beta
- });
-
- return _op.output;
- }
-
- public static Tensor log_softmax(Tensor logits, string name = null)
- => tf.Context.ExecuteOp("LogSoftmax", name, new ExecuteOpArgs(logits));
-
- ///
- /// Says whether the targets are in the top `K` predictions.
- ///
- ///
- ///
- ///
- ///
- /// A `Tensor` of type `bool`.
- public static Tensor in_top_kv2(Tensor predictions, Tensor targets, int k, string name = null)
- => tf.Context.ExecuteOp("InTopKV2", name,
- new ExecuteOpArgs(predictions, targets, k));
-
- public static Tensor leaky_relu(Tensor features, float alpha = 0.2f, string name = null)
- => tf.Context.ExecuteOp("LeakyRelu", name,
- new ExecuteOpArgs(features).SetAttributes(new { alpha }));
-
- public static Tensor average_pool(Tensor input,
- int[] ksize,
- int[] strides,
- string padding,
- string data_format = "NHWC",
- string name = null)
- => tf.Context.ExecuteOp("AvgPool", name, new ExecuteOpArgs(input)
- .SetAttributes(new
- {
- ksize,
- strides,
- padding,
- data_format
- }));
-
- public static Tensor max_pool(Tensor input,
- int[] ksize,
- int[] strides,
- string padding,
- string data_format = "NHWC",
- string name = null)
- => tf.Context.ExecuteOp("MaxPool", name, new ExecuteOpArgs(input)
- .SetAttributes(new
- {
- ksize,
- strides,
- padding,
- data_format
- }));
-
- public static Tensor max_pool_grad(Tensor orig_input, Tensor orig_output, Tensor grad, int[] ksize, int[] strides, string padding,
- string data_format = "NHWC", string name = null)
- => tf.Context.ExecuteOp("MaxPoolGrad", name, new ExecuteOpArgs(orig_input, orig_output, grad)
- .SetAttributes(new
- {
- ksize,
- strides,
- padding,
- data_format
- }));
-
- public static Tensor[] top_kv2(Tensor input, T k, bool sorted = true, string name = null)
- {
- var _op = tf.OpDefLib._apply_op_helper("TopKV2", name: name, args: new
- {
- input,
- k,
- sorted
- });
-
- return _op.outputs;
- }
-
- public static Tensor relu_grad(Tensor gradients, Tensor features, string name = null)
- => tf.Context.ExecuteOp("ReluGrad", name, new ExecuteOpArgs(gradients, features));
-
- public static Tensor leaky_relu_grad(Tensor gradients, Tensor features, float alpha = 0.2f, string name = null)
- => tf.Context.ExecuteOp("LeakyReluGrad", name, new ExecuteOpArgs(gradients, features)
- .SetAttributes(new { alpha }));
-
- public static Tensor softmax(Tensor logits, string name = null)
- => tf.Context.ExecuteOp("Softmax", name, new ExecuteOpArgs(logits));
-
- ///
- /// Computes softmax cross entropy cost and gradients to backpropagate.
- ///
- ///
- ///
- ///
- ///
- public static (Tensor, Tensor) softmax_cross_entropy_with_logits(Tensor features, Tensor labels, string name = null)
- {
- var results = tf.Context.ExecuteOp("SoftmaxCrossEntropyWithLogits", name, new ExecuteOpArgs(features, labels));
-
- return (results[0], results[1]);
- }
-
- ///
- /// Computes softmax cross entropy cost and gradients to backpropagate.
- ///
- ///
- /// batch_size x num_classes matrix
- ///
- ///
- /// batch_size vector with values in [0, num_classes).
- /// This is the label for the given minibatch entry.
- ///
- ///
- /// If specified, the created operation in the graph will be this one, otherwise it will be named 'SparseSoftmaxCrossEntropyWithLogits'.
- ///
- ///
- /// Returns a tuple with multiple values, as follows:
- /// loss : Per example loss (batch_size vector).
- /// backprop : backpropagated gradients (batch_size x num_classes matrix).
- /// The Operation can be fetched from any of the Tensorreturned in the tuple values, by fetching the Operation property.
- ///
- ///
- /// Unlike SoftmaxCrossEntropyWithLogits, this operation does not accept
- /// a matrix of label probabilities, but rather a single label per row
- /// of features. This label is considered to have probability 1.0 for the
- /// given row.
- ///
- /// Inputs are the logits, not probabilities.
- ///
- public static (Tensor loss, Tensor backprop) sparse_softmax_cross_entropy_with_logits(Tensor features, Tensor labels, string name = "SparseSoftmaxCrossEntropyWithLogits")
- {
- var results = tf.Context.ExecuteOp("SparseSoftmaxCrossEntropyWithLogits", name, new ExecuteOpArgs(features, labels));
-
- return (results[0], results[1]);
- }
-
- ///
- /// Computes rectified linear: `max(features, 0)`.
- ///
- /// A `Tensor`. Must be one of the following types: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `int64`, `bfloat16`, `uint16`, `half`, `uint32`, `uint64`, `qint8`.
- /// A name for the operation (optional).
- /// A `Tensor`. Has the same type as `features`.
- public static Tensor relu(Tensor features, string name = null)
- => tf.Context.ExecuteOp("Relu", name, new ExecuteOpArgs(features));
-
- public static Tensor tanh(Tensor x, string name = null)
- => tf.Context.ExecuteOp("Tanh", name, new ExecuteOpArgs(x));
- }
-}
diff --git a/src/TensorFlowNET.Core/Operations/OpDefLibrary.cs b/src/TensorFlowNET.Core/Operations/OpDefLibrary.cs
index 3ccf0c19..76a222ba 100644
--- a/src/TensorFlowNET.Core/Operations/OpDefLibrary.cs
+++ b/src/TensorFlowNET.Core/Operations/OpDefLibrary.cs
@@ -103,6 +103,11 @@ namespace Tensorflow
DataType dtype = DataType.DtInvalid;
DataType default_dtype = DataType.DtInvalid;
+ if (values is Tensors tensors)
+ {
+ values = (Tensor[])tensors;
+ }
+
if (_IsListParameter(input_arg))
{
if (!_IsListValue(values))
diff --git a/src/TensorFlowNET.Core/Operations/Operation.cs b/src/TensorFlowNET.Core/Operations/Operation.cs
index 311f2184..a789c5f4 100644
--- a/src/TensorFlowNET.Core/Operations/Operation.cs
+++ b/src/TensorFlowNET.Core/Operations/Operation.cs
@@ -187,6 +187,33 @@ namespace Tensorflow
public virtual T get_attr(string name)
=> (T)get_attr(name);
+ internal unsafe TF_DataType _get_attr_type(string name)
+ {
+ Status status = new();
+ TF_DataType result;
+ c_api.TF_OperationGetAttrType(_handle, name, new IntPtr(&result), status);
+ status.Check(true);
+ return result;
+ }
+
+ internal unsafe int _get_attr_int(string name)
+ {
+ Status status = new();
+ int result;
+ c_api.TF_OperationGetAttrInt(_handle, name, new IntPtr(&result), status);
+ status.Check(true);
+ return result;
+ }
+
+ internal unsafe bool _get_attr_bool(string name)
+ {
+ Status status = new();
+ bool result;
+ c_api.TF_OperationGetAttrBool(_handle, name, new IntPtr(&result), status);
+ status.Check(true);
+ return result;
+ }
+
public virtual T[] get_attr_list(string name)
{
if (tf.executing_eagerly())
@@ -229,7 +256,42 @@ namespace Tensorflow
if(oneof_value == AttrValue.ValueOneofCase.List)
{
- throw new NotImplementedException($"Unsupported field type in {oneof_value}");
+ if (x.List.S is not null && x.List.S.Count > 0)
+ {
+ return x.List.S.Select(x => x.ToStringUtf8()).ToArray();
+ }
+ else if (x.List.I is not null && x.List.I.Count > 0)
+ {
+ return x.List.I.ToArray();
+ }
+ else if (x.List.F is not null && x.List.F.Count > 0)
+ {
+ return x.List.F.ToArray();
+ }
+ else if (x.List.B is not null && x.List.B.Count > 0)
+ {
+ return x.List.B.ToArray();
+ }
+ else if (x.List.Shape is not null && x.List.Shape.Count > 0)
+ {
+ return x.List.Shape.ToArray();
+ }
+ else if (x.List.Tensor is not null && x.List.Tensor.Count > 0)
+ {
+ return x.List.Tensor.ToArray();
+ }
+ else if (x.List.Func is not null && x.List.Func.Count > 0)
+ {
+ return x.List.Func.ToArray();
+ }
+ else if (x.List.Type is not null && x.List.Type.Count > 0)
+ {
+ return x.List.Type.Select(x => x.as_tf_dtype()).ToArray();
+ }
+ else
+ {
+ return null;
+ }
}
if(oneof_value == AttrValue.ValueOneofCase.Type)
{
diff --git a/src/TensorFlowNET.Core/Operations/array_ops.cs b/src/TensorFlowNET.Core/Operations/array_ops.cs
index 2767e821..a0b47aac 100644
--- a/src/TensorFlowNET.Core/Operations/array_ops.cs
+++ b/src/TensorFlowNET.Core/Operations/array_ops.cs
@@ -22,12 +22,13 @@ using Tensorflow.Contexts;
using Tensorflow.Eager;
using Tensorflow.Framework;
using static Tensorflow.Binding;
+using System.Diagnostics;
namespace Tensorflow
{
public class array_ops
{
- public static Tensor placeholder_with_default(T input, int[] shape, string name = null)
+ public static Tensor placeholder_with_default(Tensor input, int[] shape, string name = null)
=> gen_array_ops.placeholder_with_default(input, shape, name);
///
@@ -132,7 +133,7 @@ namespace Tensorflow
if (ndims_mask < 1)
throw new ValueError("mask cannot be scalar.");
- var leading_size = gen_math_ops.prod(shape(tensor_tensor)[$"{axis}:{axis + ndims_mask}"], new[] { 0 });
+ var leading_size = gen_math_ops.prod(shape(tensor_tensor)[$"{axis}:{axis + ndims_mask}"], ops.convert_to_tensor(new[] { 0 }));
var shape1 = concat(new[]
{
shape(tensor_tensor)[$":{axis}"],
@@ -153,7 +154,7 @@ namespace Tensorflow
private static Tensor _apply_mask_1d(Tensor reshaped_tensor, Tensor mask, int axis = 0)
{
var indices = squeeze(where(mask), axis: new[] { 1 });
- return gather(reshaped_tensor, indices, axis: axis);
+ return gather(reshaped_tensor, indices, axis: ops.convert_to_tensor(axis));
}
public static Tensor zeros(Tensor shape, TF_DataType dtype = TF_DataType.TF_FLOAT, string name = null)
@@ -293,7 +294,7 @@ namespace Tensorflow
}
public static Tensor expand_dims(Tensor input, int axis = -1, string name = null)
- => gen_array_ops.expand_dims(input, axis, name);
+ => gen_array_ops.expand_dims(input, ops.convert_to_tensor(axis), name);
///
/// Creates a tensor filled with a scalar value.
@@ -304,7 +305,7 @@ namespace Tensorflow
/// Optional string. The name of the output `tf.Tensor`.
/// A `tf.Tensor` with shape `dims` and the same dtype as `value`.
public static Tensor fill(Shape dims, T value, string name = null)
- => gen_array_ops.fill(dims, value, name: name);
+ => gen_array_ops.fill(dims, ops.convert_to_tensor(value), name: name);
///
/// Returns the rank of a tensor.
@@ -368,7 +369,7 @@ namespace Tensorflow
=> gen_array_ops.reshape(tensor, shape, name: name);
public static Tensor reshape(Tensor tensor, object[] shape, string name = null)
- => gen_array_ops.reshape(tensor, shape, name: name);
+ => gen_array_ops.reshape(tensor, ops.convert_to_tensor(shape), name: name);
private static Tensor ones_like_impl(T tensor, TF_DataType dtype, string name, bool optimize = true)
{
@@ -466,7 +467,11 @@ namespace Tensorflow
}
public static (Tensor, Tensor) unique(Tensor x, TF_DataType out_idx = TF_DataType.TF_INT32, string name = null)
- => gen_array_ops.unique(x, out_idx: out_idx, name: name);
+ {
+ var res = gen_array_ops.unique(x, out_idx: out_idx, name: name);
+ Debug.Assert(res.Length == 2);
+ return (res[0], res[1]);
+ }
public static Tensor stack(Tensor[] values, int axis = 0, string name = "stack")
{
@@ -492,12 +497,12 @@ namespace Tensorflow
{
name = scope;
condition = ops.convert_to_tensor(condition, preferred_dtype: dtypes.@bool, name: "condition");
- return gen_array_ops.where(condition: condition, name: name);
+ return gen_array_ops.where(condition, name: name);
});
}
else if (x != null && y != null)
{
- return gen_array_ops.select(condition, x, y, name);
+ return gen_math_ops.select(condition, ops.convert_to_tensor(x), ops.convert_to_tensor(y), name);
}
else
{
@@ -505,7 +510,6 @@ namespace Tensorflow
}
}
-
public static Tensor where_v2(Tensor condition, object x = null, object y = null, string name = null)
{
if (x == null && y == null)
@@ -514,18 +518,19 @@ namespace Tensorflow
{
name = scope;
condition = ops.convert_to_tensor(condition, preferred_dtype: dtypes.@bool, name: "condition");
- return gen_array_ops.where(condition: condition, name: name);
+ return gen_array_ops.where(condition, name: name);
});
}
else if (x != null && y != null)
{
- return gen_array_ops.select_v2(condition, x, y, name);
+ return gen_math_ops.select_v2(condition, ops.convert_to_tensor(x), ops.convert_to_tensor(y), name);
}
else
{
throw new ValueError("x and y must both be non-None or both be None.");
}
}
+
///
/// Returns the shape of a tensor.
///
@@ -634,7 +639,13 @@ namespace Tensorflow
///
///
public static Tensor stop_gradient(Tensor input, string name = null)
- => tf.Context.ExecuteOp("StopGradient", name, new ExecuteOpArgs(input));
+ {
+ var tape = tf.GradientTape().stop_recording();
+ var result = gen_array_ops.stop_gradient(input, name);
+ tape.StartRecord();
+ tf.GradientTape().PushTape(tape);
+ return result;
+ }
///
/// Extracts a strided slice of a tensor (generalized python array indexing).
@@ -858,7 +869,7 @@ namespace Tensorflow
});
}
- return gen_array_ops.concat_v2(values, axis, name: name);
+ return gen_array_ops.concat_v2(values, ops.convert_to_tensor(axis), name: name);
}
public static Tensor concat(Tensor[] values, Tensor axis, string name = "concat")
@@ -868,7 +879,7 @@ namespace Tensorflow
public static Tensor concat(object[] values, int axis, string name = "concat")
{
- return gen_array_ops.concat_v2(values, axis, name: name);
+ return tf.Context.ExecuteOp("ConcatV2", name, new ExecuteOpArgs(values, axis));
}
///
@@ -886,18 +897,33 @@ namespace Tensorflow
///
/// An integer. The number of batch dimensions. Must be less than or equal to rank(indices).
///
- public static Tensor gather(T1 @params, T2 indices, string name = null, int axis = 0, int batch_dims = 0)
+ public static Tensor gather(Tensor @params, Tensor indices, string name = null, Tensor axis = null, int batch_dims = 0)
{
- if (axis != 0)
- return gen_array_ops.gather_v2(@params, indices, axis, name: name);
-
- if (@params is ResourceVariable variable &&
- indices is Tensor indices_tensor)
- return variable.sparse_read(indices_tensor, name);
+ if (axis is null)
+ axis = tf.convert_to_tensor(batch_dims);
+ if(tensor_util.constant_value(axis) != 0)
+ {
+ return gen_array_ops.gather_v2(@params, indices, axis, batch_dims: batch_dims, name: name);
+ }
return gen_array_ops.gather_v2(@params, indices, axis, name: name);
}
+ public static Tensor gather(Tensor @params, Tensor indices, int axis, string name = null, int batch_dims = 0)
+ => gather(@params, indices, name, ops.convert_to_tensor(axis), batch_dims);
+
+ public static Tensor gather(ResourceVariable @params, Tensor indices, string name = null, Tensor axis = null, int batch_dims = 0)
+ {
+ if (axis is null)
+ axis = tf.convert_to_tensor(batch_dims);
+ if (tensor_util.constant_value(axis) != 0)
+ {
+ throw new NotImplementedException();
+ }
+
+ return @params.sparse_read(indices, name);
+ }
+
public static Tensor transpose(T1 a, Axis perm, string name = "transpose", bool conjugate = false)
{
return tf_with(ops.name_scope(name, "transpose", new { a }), scope =>
@@ -927,7 +953,7 @@ namespace Tensorflow
if (num == -1)
num = (int)size_splits.shape[0];
- return gen_array_ops.split_v(value, size_splits, axis, num, name: name);
+ return gen_array_ops.split_v(value, size_splits, tf.convert_to_tensor(axis), num, name: name);
}
public static Tensor[] split(Tensor value, int num_split, T axis,
@@ -956,20 +982,10 @@ namespace Tensorflow
}
public static Tensor slice(Tensor input, Tensor[] begin, Tensor[] size, string name = null)
- => gen_array_ops.slice(input, begin, size, name: name);
-
- public static Tensor slice(Tensor input, Tb begin, Ts size, string name = null)
- => gen_array_ops.slice(input, begin, size, name: name);
+ => gen_array_ops.slice(input, ops.convert_to_tensor(begin), ops.convert_to_tensor(size), name: name);
public static Tensor slice(Tensor input, Tensor begin, Tensor size, string name = null)
- => tf.Context.ExecuteOp("Slice", name, new ExecuteOpArgs(input, begin, size)
- {
- GetGradientAttrs = (op) => new
- {
- T = op.get_attr("T"),
- Index = op.get_attr("Index")
- }
- });
+ => gen_array_ops.slice(input, begin, size, name: name);
public static Tensor stack(object values, int axis = 0, string name = "stack")
diff --git a/src/TensorFlowNET.Core/Operations/dataset_ops.cs b/src/TensorFlowNET.Core/Operations/dataset_ops.cs
index c7e62777..061fb95e 100644
--- a/src/TensorFlowNET.Core/Operations/dataset_ops.cs
+++ b/src/TensorFlowNET.Core/Operations/dataset_ops.cs
@@ -233,7 +233,7 @@ namespace Tensorflow
{
try
{
- var result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo("AnonymousIteratorV3", name)
+ var result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(tf.Context, "AnonymousIteratorV3", name)
{
attrs = attrs
});
@@ -250,7 +250,7 @@ namespace Tensorflow
public Tensor anonymous_iterator_v3_eager_fallback(TF_DataType[] output_types, Shape[] output_shapes, string name, Context ctx)
{
object[] attrs = new object[] { output_types, output_shapes };
- var result = execute.quick_execute("AnonymousIteratorV3", 1, new Tensor[] { }, attrs, ctx, name);
+ var result = _execute.quick_execute("AnonymousIteratorV3", 1, new Tensor[] { }, attrs, ctx, name);
return result[0];
}
diff --git a/src/TensorFlowNET.Core/Operations/gen_array_ops.cs b/src/TensorFlowNET.Core/Operations/gen_array_ops.cs
index 1dc6504a..9810d32f 100644
--- a/src/TensorFlowNET.Core/Operations/gen_array_ops.cs
+++ b/src/TensorFlowNET.Core/Operations/gen_array_ops.cs
@@ -1,543 +1,10327 @@
-/*****************************************************************************
- Copyright 2018 The TensorFlow.NET Authors. All Rights Reserved.
+/*Wrappers around TensorFlow ops. This file is MACHINE GENERATED! Do not edit.*/
- Licensed under the Apache License, Version 2.0 (the "License");
- you may not use this file except in compliance with the License.
- You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
-******************************************************************************/
-
-using System;
-using System.Linq;
-using Tensorflow.Contexts;
using Tensorflow.Eager;
+using Tensorflow.Contexts;
using static Tensorflow.Binding;
-namespace Tensorflow
+namespace Tensorflow;
+
+public static class gen_array_ops
{
- public static class gen_array_ops
+ ///
+ ///
+ ///
+ ///
+ ///
+ ///
+ ///
+ public static Tensor batch_matrix_band_part(Tensor input, Tensor num_lower, Tensor num_upper, string? name = null)
{
- public static Tensor batch_to_space_nd(T input, int[] block_shape, int[,] crops, string name = null)
+ var _ctx = tf.Context;
+ if (_ctx.executing_eagerly())
{
- var _op = tf.OpDefLib._apply_op_helper("BatchToSpaceND", name: name, args: new { input, block_shape, crops });
-
- return _op.output;
+ try
+ {
+ var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "BatchMatrixBandPart", name) { args = new object[] { input, num_lower, num_upper }, attrs = new Dictionary() { } });
+ return _fast_path_result[0];
+ }
+ catch (Exception)
+ {
+ }
+ try
+ {
+ return batch_matrix_band_part_eager_fallback(input, num_lower, num_upper, name: name, ctx: _ctx);
+ }
+ catch (Exception)
+ {
+ }
}
-
- public static Tensor check_numerics(Tensor tensor, string message, string name = null)
+ Dictionary keywords = new();
+ keywords["input"] = input;
+ keywords["num_lower"] = num_lower;
+ keywords["num_upper"] = num_upper;
+ var _op = tf.OpDefLib._apply_op_helper("BatchMatrixBandPart", name, keywords);
+ var _result = _op.outputs;
+ if (_execute.must_record_gradient())
{
- var _op = tf.OpDefLib._apply_op_helper("CheckNumerics", name: name, args: new { tensor, message });
-
- return _op.output;
+ object[] _attrs = new object[] { "T", _op._get_attr_type("T") };
+ _execute.record_gradient("BatchMatrixBandPart", _op.inputs, _attrs, _result);
}
+ return _result[0];
+ }
- ///
- /// Concatenates tensors along one dimension.
- ///
- ///
- ///
- ///
- ///
- public static Tensor concat_v2(T[] values, Ta axis, string name = null)
- => tf.Context.ExecuteOp("ConcatV2", name, new ExecuteOpArgs(values, axis));
-
- public static Tensor concat_v2(Tensor[] values, Tensor axis, string name = null)
+ public static Tensor batch_matrix_band_part_eager_fallback(Tensor input, Tensor num_lower, Tensor num_upper, string name, Context ctx)
+ {
+ Tensor[] _inputs_flat = new Tensor[] { input, num_lower, num_upper };
+ object[] _attrs = new object[] { "T", input.dtype };
+ var _result = _execute.execute("BatchMatrixBandPart", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name);
+ if (_execute.must_record_gradient())
+ {
+ _execute.record_gradient("BatchMatrixBandPart", _inputs_flat, _attrs, _result);
+ }
+ return _result[0];
+ }
+ ///
+ ///
+ ///
+ ///
+ ///
+ public static Tensor batch_matrix_diag(Tensor diagonal, string? name = null)
+ {
+ var _ctx = tf.Context;
+ if (_ctx.executing_eagerly())
{
- if (tf.Context.executing_eagerly())
+ try
+ {
+ var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "BatchMatrixDiag", name) { args = new object[] { diagonal }, attrs = new Dictionary() { } });
+ return _fast_path_result[0];
+ }
+ catch (Exception)
+ {
+ }
+ try
+ {
+ return batch_matrix_diag_eager_fallback(diagonal, name: name, ctx: _ctx);
+ }
+ catch (Exception)
{
- return concat_v2_eager_fallback(values, axis, name, tf.Context);
}
-
- var _op = tf.OpDefLib._apply_op_helper("ConcatV2", name: name, args: new { values, axis });
- return _op.output;
}
-
- public static Tensor concat_v2(Tensor[] values, int axis, string name = null)
- => tf.Context.ExecuteOp("ConcatV2", name, new ExecuteOpArgs(values, axis));
-
- private static Tensor concat_v2_eager_fallback(T1[] values, T2 axis, string name, Context ctx)
+ Dictionary keywords = new();
+ keywords["diagonal"] = diagonal;
+ var _op = tf.OpDefLib._apply_op_helper("BatchMatrixDiag", name, keywords);
+ var _result = _op.outputs;
+ if (_execute.must_record_gradient())
{
- var _attr_N = len(values);
- var (_attr_T, input) = tf.Runner.ArgsToMatchingEager(ctx, args: values.Select(x => (object)x).ToArray());
- var (_attr_Tidx, axis1) = tf.Runner.ArgsToMatchingEager(ctx, default_dtype: tf.int32, args: new object[] { axis });
- var _inputs_flat = input.concat(axis1);
- var _attrs = new object[] { "N", _attr_N, "T", _attr_T, "Tidx", _attr_Tidx };
-
- return tf.Runner.Execute(ctx, "ConcatV2", 1, _inputs_flat, _attrs, name: name)[0];
+ object[] _attrs = new object[] { "T", _op._get_attr_type("T") };
+ _execute.record_gradient("BatchMatrixDiag", _op.inputs, _attrs, _result);
}
+ return _result[0];
+ }
- public static Tensor[] concat_offset(Tensor concat_dim, Tensor[] shape, string name = null)
+ public static Tensor batch_matrix_diag_eager_fallback(Tensor diagonal, string name, Context ctx)
+ {
+ Tensor[] _inputs_flat = new Tensor[] { diagonal };
+ object[] _attrs = new object[] { "T", diagonal.dtype };
+ var _result = _execute.execute("BatchMatrixDiag", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name);
+ if (_execute.must_record_gradient())
{
- var _op = tf.OpDefLib._apply_op_helper("ConcatOffset", name: name, args: new { concat_dim, shape });
-
- return _op.outputs;
+ _execute.record_gradient("BatchMatrixDiag", _inputs_flat, _attrs, _result);
}
-
- ///
- /// Returns a diagonal tensor with a given diagonal values.
- ///
- ///
- /// Rank k tensor where k is at most 1.
- ///
- ///
- /// If specified, the created operation in the graph will be this one, otherwise it will be named 'Diag'.
- ///
- ///
- /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result.
- ///
- ///
- /// Given a diagonal, this operation returns a tensor with the diagonal and
- /// everything else padded with zeros. The diagonal is computed as follows:
- ///
- /// Assume diagonal has dimensions [D1,..., Dk], then the output is a tensor of
- /// rank 2k with dimensions [D1,..., Dk, D1,..., Dk] where:
- ///
- /// output[i1,..., ik, i1,..., ik] = diagonal[i1, ..., ik] and 0 everywhere else.
- ///
- /// For example:
- ///
- ///
- /// # 'diagonal' is [1, 2, 3, 4]
- /// tf.diag(diagonal) ==> [[1, 0, 0, 0]
- /// [0, 2, 0, 0]
- /// [0, 0, 3, 0]
- /// [0, 0, 0, 4]]
- ///
- ///
- public static Tensor diag(Tensor diagonal, string name = null)
- => tf.Context.ExecuteOp("Diag", name, new ExecuteOpArgs(diagonal));
-
- public static Tensor diag_part(Tensor diagonal, string name = null)
- => tf.Context.ExecuteOp("DiagPart", name, new ExecuteOpArgs(diagonal));
-
- public static Tensor expand_dims(Tensor input, int axis, string name = null)
- => tf.Context.ExecuteOp("ExpandDims", name, new ExecuteOpArgs(input, axis)
- .SetAttributes(new { dim = axis }));
-
- public static Tensor gather_v2(T1 @params, T2 indices, int axis, int batch_dims = 0, string name = null)
+ return _result[0];
+ }
+ ///
+ ///
+ ///
+ ///
+ ///
+ public static Tensor batch_matrix_diag_part(Tensor input, string? name = null)
+ {
+ var _ctx = tf.Context;
+ if (_ctx.executing_eagerly())
+ {
+ try
+ {
+ var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "BatchMatrixDiagPart", name) { args = new object[] { input }, attrs = new Dictionary() { } });
+ return _fast_path_result[0];
+ }
+ catch (Exception)
+ {
+ }
+ try
+ {
+ return batch_matrix_diag_part_eager_fallback(input, name: name, ctx: _ctx);
+ }
+ catch (Exception)
+ {
+ }
+ }
+ Dictionary keywords = new();
+ keywords["input"] = input;
+ var _op = tf.OpDefLib._apply_op_helper("BatchMatrixDiagPart", name, keywords);
+ var _result = _op.outputs;
+ if (_execute.must_record_gradient())
{
- var result = tf.Context.ExecuteOp("GatherV2", name, new ExecuteOpArgs(
- @params,
- indices,
- axis).SetAttributes(new { batch_dims }));
- return result [0];
+ object[] _attrs = new object[] { "T", _op._get_attr_type("T") };
+ _execute.record_gradient("BatchMatrixDiagPart", _op.inputs, _attrs, _result);
}
+ return _result[0];
+ }
- private static Tensor gather_v2_eager_fallback(object @params, object indices, int axis, string name, Context ctx)
+ public static Tensor batch_matrix_diag_part_eager_fallback(Tensor input, string name, Context ctx)
+ {
+ Tensor[] _inputs_flat = new Tensor[] { input };
+ object[] _attrs = new object[] { "T", input.dtype };
+ var _result = _execute.execute("BatchMatrixDiagPart", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name);
+ if (_execute.must_record_gradient())
{
- var (_attr_T, param) = tf.Runner.ArgsToMatchingEager(ctx, args: new[] { @params });
- var (_attr_Tindice, indice) = tf.Runner.ArgsToMatchingEager(ctx, default_dtype: tf.int32, args: new[] { indices });
- var (_attr_Taxis, axiss) = tf.Runner.ArgsToMatchingEager(ctx, default_dtype: tf.int32, args: new object[] { axis });
- var _inputs_flat = param.concat(indice).concat(axiss);
- var _attrs = new object[] { "batch_dims", 0, "Tparams", _attr_T, "Tindices", _attr_Tindice, "Taxis", _attr_Taxis };
-
- var results = tf.Runner.Execute(ctx, "GatherV2", 1, _inputs_flat, _attrs, name: name);
- if (tf.Runner.MustRecordGradient())
- tf.Runner.RecordGradient("GatherV2", _inputs_flat, _attrs, results);
- return results[0];
+ _execute.record_gradient("BatchMatrixDiagPart", _inputs_flat, _attrs, _result);
}
-
-
- public static Tensor pad(Tensor input, Tensor paddings, string name = null)
+ return _result[0];
+ }
+ ///
+ ///
+ ///
+ ///
+ ///
+ ///
+ public static Tensor batch_matrix_set_diag(Tensor input, Tensor diagonal, string? name = null)
+ {
+ var _ctx = tf.Context;
+ if (_ctx.executing_eagerly())
{
- if (tf.Context.executing_eagerly())
+ try
+ {
+ var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "BatchMatrixSetDiag", name) { args = new object[] { input, diagonal }, attrs = new Dictionary() { } });
+ return _fast_path_result[0];
+ }
+ catch (Exception)
+ {
+ }
+ try
+ {
+ return batch_matrix_set_diag_eager_fallback(input, diagonal, name: name, ctx: _ctx);
+ }
+ catch (Exception)
{
- /*var results = tf.Runner.TFE_FastPathExecute(tf.Context, tf.Context.DeviceName,
- "Pad", name,
- null,
- input, paddings);
- return results[0];*/
- return pad_eager_fallback(input, paddings, name: name, ctx: tf.Context);
}
-
- var _op = tf.OpDefLib._apply_op_helper("Pad", name: name, args: new { input, paddings });
-
- return _op.output;
}
-
- private static Tensor pad_eager_fallback(Tensor inputs, Tensor padding, string name = null, Context ctx = null)
+ Dictionary keywords = new();
+ keywords["input"] = input;
+ keywords["diagonal"] = diagonal;
+ var _op = tf.OpDefLib._apply_op_helper("BatchMatrixSetDiag", name, keywords);
+ var _result = _op.outputs;
+ if (_execute.must_record_gradient())
{
- var (_attr_T, input) = tf.Runner.ArgsToMatchingEager(ctx, args: new[] { inputs });
- var (_attr_Tpaddings, paddings) = tf.Runner.ArgsToMatchingEager(ctx, default_dtype: tf.int32, args: new[] { padding });
- var _inputs_flat = input.concat(paddings);
- var _attrs = new object[] { "T", _attr_T, "Tpaddings", _attr_Tpaddings };
-
- var results = tf.Runner.Execute(ctx, "Pad", 1, _inputs_flat, _attrs, name: name);
- if (tf.Runner.MustRecordGradient())
- tf.Runner.RecordGradient("Pad", _inputs_flat, _attrs, results);
- return results[0];
+ object[] _attrs = new object[] { "T", _op._get_attr_type("T") };
+ _execute.record_gradient("BatchMatrixSetDiag", _op.inputs, _attrs, _result);
}
+ return _result[0];
+ }
- public static Tensor pack(Tensor[] values, int axis = 0, string name = null)
- => tf.Context.ExecuteOp("Pack", name, new ExecuteOpArgs()
+ public static Tensor batch_matrix_set_diag_eager_fallback(Tensor input, Tensor diagonal, string name, Context ctx)
+ {
+ Tensor[] _inputs_flat = new Tensor[] { input, diagonal };
+ object[] _attrs = new object[] { "T", input.dtype };
+ var _result = _execute.execute("BatchMatrixSetDiag", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name);
+ if (_execute.must_record_gradient())
+ {
+ _execute.record_gradient("BatchMatrixSetDiag", _inputs_flat, _attrs, _result);
+ }
+ return _result[0];
+ }
+ ///
+ /// BatchToSpace for 4-D tensors of type T.
+ ///
+ ///
+ ///
+ /// This is a legacy version of the more general BatchToSpaceND.
+ ///
+ /// Rearranges (permutes) data from batch into blocks of spatial data, followed by
+ /// cropping. This is the reverse transformation of SpaceToBatch. More specifically,
+ /// this op outputs a copy of the input tensor where values from the `batch`
+ /// dimension are moved in spatial blocks to the `height` and `width` dimensions,
+ /// followed by cropping along the `height` and `width` dimensions.
+ ///
+ ///
+ ///
+ ///
+ ///
+ ///
+ public static Tensor batch_to_space(Tensor input, Tensor crops, int block_size = 0, string? name = null)
+ {
+ var _ctx = tf.Context;
+ if (_ctx.executing_eagerly())
+ {
+ try
{
- OpInputArgs = new object[] { values }
- }.SetAttributes(new { axis }));
-
- ///
- /// Return a tensor with the same shape and contents as the input tensor or value.
- ///
- ///
- ///
- public static Tensor identity(Tensor input, string name = null)
- => tf.Context.ExecuteOp("Identity", name, new ExecuteOpArgs(input));
-
- public static Tensor invert_permutation(Tensor x, string name = null)
+ var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "BatchToSpace", name) { args = new object[] { input, crops }, attrs = new Dictionary() { ["block_size"] = block_size } });
+ return _fast_path_result[0];
+ }
+ catch (Exception)
+ {
+ }
+ try
+ {
+ return batch_to_space_eager_fallback(input, crops, block_size: block_size, name: name, ctx: _ctx);
+ }
+ catch (Exception)
+ {
+ }
+ }
+ Dictionary keywords = new();
+ keywords["input"] = input;
+ keywords["crops"] = crops;
+ keywords["block_size"] = block_size;
+ var _op = tf.OpDefLib._apply_op_helper("BatchToSpace", name, keywords);
+ var _result = _op.outputs;
+ if (_execute.must_record_gradient())
{
- var _op = tf.OpDefLib._apply_op_helper("InvertPermutation", name, new { x });
-
- return _op.outputs[0];
+ object[] _attrs = new object[] { "T", _op._get_attr_type("T"), "block_size", _op._get_attr_int("block_size"), "Tidx", _op._get_attr_type("Tidx") };
+ _execute.record_gradient("BatchToSpace", _op.inputs, _attrs, _result);
}
+ return _result[0];
+ }
- public static Tensor log(Tensor x, string name = null)
- => tf.Context.ExecuteOp("Log", name, new ExecuteOpArgs(x));
-
-
- public static Tensor rank(Tensor input, string name = null)
- => tf.Context.ExecuteOp("Rank", name, new ExecuteOpArgs(input));
-
- ///
- /// Creates a tensor filled with a scalar value.
- ///
- /// A `Tensor`.
- /// A `Tensor`. 0-D (scalar). Value to fill the returned tensor.
- /// A name for the operation (optional).
- /// A `Tensor`. Has the same type as `value`.
- public static Tensor fill(Tensor dims, T value, string name = null)
+ public static Tensor batch_to_space_eager_fallback(Tensor input, Tensor crops, int block_size, string name, Context ctx)
+ {
+ Tensor[] _inputs_flat = new Tensor[] { input, crops };
+ object[] _attrs = new object[] { "T", input.dtype, "block_size", block_size, "Tidx", crops.dtype };
+ var _result = _execute.execute("BatchToSpace", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name);
+ if (_execute.must_record_gradient())
+ {
+ _execute.record_gradient("BatchToSpace", _inputs_flat, _attrs, _result);
+ }
+ return _result[0];
+ }
+ ///
+ /// BatchToSpace for N-D tensors of type T.
+ ///
+ ///
+ ///
+ /// This operation reshapes the "batch" dimension 0 into `M + 1` dimensions of shape
+ /// `block_shape + [batch]`, interleaves these blocks back into the grid defined by
+ /// the spatial dimensions `[1, ..., M]`, to obtain a result with the same rank as
+ /// the input. The spatial dimensions of this intermediate result are then
+ /// optionally cropped according to `crops` to produce the output. This is the
+ /// reverse of SpaceToBatch. See below for a precise description.
+ ///
+ ///
+ ///
+ ///
+ ///
+ ///
+ public static Tensor batch_to_space_nd(Tensor input, Tensor block_shape, Tensor crops, string? name = null)
+ {
+ var _ctx = tf.Context;
+ if (_ctx.executing_eagerly())
{
- var ctx = tf.Context;
- if (ctx.executing_eagerly())
+ try
+ {
+ var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "BatchToSpaceND", name) { args = new object[] { input, block_shape, crops }, attrs = new Dictionary() { } });
+ return _fast_path_result[0];
+ }
+ catch (Exception)
{
- try
- {
- var _result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo("Fill", name, dims, value));
- return _result[0];
- }
- catch (Exception)
- {
-
- }
- try
- {
- return fill_eager_fallback(dims, value as Tensor, name, ctx);
- }
- catch (Exception)
- {
-
- }
}
- Dictionary attrs = new Dictionary();
- attrs["dims"] = dims;
- attrs["value"] = value;
- var result = tf.OpDefLib._apply_op_helper("Fill", name, attrs);
- if (execute.must_record_gradient())
+ try
+ {
+ return batch_to_space_nd_eager_fallback(input, block_shape, crops, name: name, ctx: _ctx);
+ }
+ catch (Exception)
{
- throw new NotImplementedException();
}
- return result.output;
}
-
- public static Tensor fill_eager_fallback(Tensor dims, Tensor value, string name, Context ctx)
+ Dictionary keywords = new();
+ keywords["input"] = input;
+ keywords["block_shape"] = block_shape;
+ keywords["crops"] = crops;
+ var _op = tf.OpDefLib._apply_op_helper("BatchToSpaceND", name, keywords);
+ var _result = _op.outputs;
+ if (_execute.must_record_gradient())
{
- object[] attrs = new object[] { "T", dims.dtype.as_datatype_enum(), "index_type", dims.dtype.as_datatype_enum() };
- var _result = execute.executes("Fill", 1, new Tensor[] { dims, value }, attrs, ctx, name);
+ object[] _attrs = new object[] { "T", _op._get_attr_type("T"), "Tblock_shape", _op._get_attr_type("Tblock_shape"), "Tcrops", _op._get_attr_type("Tcrops") };
+ _execute.record_gradient("BatchToSpaceND", _op.inputs, _attrs, _result);
+ }
+ return _result[0];
+ }
- if (execute.must_record_gradient())
+ public static Tensor batch_to_space_nd_eager_fallback(Tensor input, Tensor block_shape, Tensor crops, string name, Context ctx)
+ {
+ Tensor[] _inputs_flat = new Tensor[] { input, block_shape, crops };
+ object[] _attrs = new object[] { "T", input.dtype, "Tblock_shape", block_shape.dtype, "Tcrops", crops.dtype };
+ var _result = _execute.execute("BatchToSpaceND", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name);
+ if (_execute.must_record_gradient())
+ {
+ _execute.record_gradient("BatchToSpaceND", _inputs_flat, _attrs, _result);
+ }
+ return _result[0];
+ }
+ ///
+ /// Bitcasts a tensor from one type to another without copying data.
+ ///
+ ///
+ ///
+ /// Given a tensor `input`, this operation returns a tensor that has the same buffer
+ /// data as `input` with datatype `type`.
+ ///
+ /// If the input datatype `T` is larger than the output datatype `type` then the
+ /// shape changes from [...] to [..., sizeof(`T`)/sizeof(`type`)].
+ ///
+ /// If `T` is smaller than `type`, the operator requires that the rightmost
+ /// dimension be equal to sizeof(`type`)/sizeof(`T`). The shape then goes from
+ /// [..., sizeof(`type`)/sizeof(`T`)] to [...].
+ ///
+ /// tf.bitcast() and tf.cast() work differently when real dtype is casted as a complex dtype
+ /// (e.g. tf.complex64 or tf.complex128) as tf.cast() make imaginary part 0 while tf.bitcast()
+ /// gives module error.
+ /// For example,
+ ///
+ /// Example 1:
+ ///
+ /// >>> a = [1., 2., 3.]
+ /// >>> equality_bitcast = tf.bitcast(a, tf.complex128)
+ /// Traceback (most recent call last):
+ /// ...
+ /// InvalidArgumentError: Cannot bitcast from 1 to 18 [Op:Bitcast]
+ /// >>> equality_cast = tf.cast(a, tf.complex128)
+ /// >>> print(equality_cast)
+ /// tf.Tensor([1.+0.j 2.+0.j 3.+0.j], shape=(3,), dtype=complex128)
+ ///
+ /// Example 2:
+ ///
+ /// >>> tf.bitcast(tf.constant(0xffffffff, dtype=tf.uint32), tf.uint8)
+ ///
+ ///
+ /// Example 3:
+ ///
+ /// >>> x = [1., 2., 3.]
+ /// >>> y = [0., 2., 3.]
+ /// >>> equality= tf.equal(x,y)
+ /// >>> equality_cast = tf.cast(equality,tf.float32)
+ /// >>> equality_bitcast = tf.bitcast(equality_cast,tf.uint8)
+ /// >>> print(equality)
+ /// tf.Tensor([False True True], shape=(3,), dtype=bool)
+ /// >>> print(equality_cast)
+ /// tf.Tensor([0. 1. 1.], shape=(3,), dtype=float32)
+ /// >>> print(equality_bitcast)
+ /// tf.Tensor(
+ /// [[ 0 0 0 0]
+ /// [ 0 0 128 63]
+ /// [ 0 0 128 63]], shape=(3, 4), dtype=uint8)
+ ///
+ /// *NOTE*: Bitcast is implemented as a low-level cast, so machines with different
+ /// endian orderings will give different results.
+ ///
+ ///
+ ///
+ ///
+ ///
+ public static Tensor bitcast(Tensor input, TF_DataType type, string? name = null)
+ {
+ var _ctx = tf.Context;
+ if (_ctx.executing_eagerly())
+ {
+ try
+ {
+ var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "Bitcast", name) { args = new object[] { input }, attrs = new Dictionary() { ["type"] = type } });
+ return _fast_path_result[0];
+ }
+ catch (Exception)
+ {
+ }
+ try
+ {
+ return bitcast_eager_fallback(input, type: type, name: name, ctx: _ctx);
+ }
+ catch (Exception)
{
- throw new NotImplementedException();
}
- return _result[0];
}
- //=> tf.Context.ExecuteOp("Fill", name, new ExecuteOpArgs(dims, value));
-
- ///
- /// Return the reduction indices for computing gradients of s0 op s1 with broadcast.
- ///
- /// A `Tensor`. Must be one of the following types: `int32`, `int64`.
- /// A `Tensor`. Must have the same type as `s0`.
- /// A name for the operation (optional).
- /// A tuple of `Tensor` objects (r0, r1).
- public static (Tensor, Tensor) broadcast_gradient_args(Tensor s0, Tensor s1, string name = "")
+ Dictionary keywords = new();
+ keywords["input"] = input;
+ keywords["type"] = type;
+ var _op = tf.OpDefLib._apply_op_helper("Bitcast", name, keywords);
+ var _result = _op.outputs;
+ if (_execute.must_record_gradient())
{
- var results = tf.Context.ExecuteOp("BroadcastGradientArgs", name, new ExecuteOpArgs(s0, s1));
- return (results[0], results[1]);
+ object[] _attrs = new object[] { "T", _op._get_attr_type("T"), "type", _op._get_attr_type("type") };
+ _execute.record_gradient("Bitcast", _op.inputs, _attrs, _result);
}
+ return _result[0];
+ }
- public static Tensor reverse(Tensor tensor, T axis, string name = null)
+ public static Tensor bitcast_eager_fallback(Tensor input, TF_DataType type, string name, Context ctx)
+ {
+ Tensor[] _inputs_flat = new Tensor[] { input };
+ object[] _attrs = new object[] { "T", input.dtype, "type", type };
+ var _result = _execute.execute("Bitcast", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name);
+ if (_execute.must_record_gradient())
{
- var _op = tf.OpDefLib._apply_op_helper("ReverseV2", name, new { tensor, axis });
- return _op.output;
+ _execute.record_gradient("Bitcast", _inputs_flat, _attrs, _result);
}
-
- public static Tensor reshape(Tensor tensor, T shape, string name = null)
- => tf.Context.ExecuteOp("Reshape", name, new ExecuteOpArgs(tensor, shape));
-
- public static Tensor reshape(Tensor tensor, object[] shape, string name = null)
- => tf.Context.ExecuteOp("Reshape", name, new ExecuteOpArgs(tensor, shape));
-
- private static Tensor reshape_eager_fallback(Tensor tensor, object[] shape, string name, Context ctx)
+ return _result[0];
+ }
+ ///
+ /// Return the shape of s0 op s1 with broadcast.
+ ///
+ ///
+ ///
+ /// Given `s0` and `s1`, tensors that represent shapes, compute `r0`, the
+ /// broadcasted shape. `s0`, `s1` and `r0` are all integer vectors.
+ ///
+ ///
+ ///
+ ///
+ ///
+ public static Tensor broadcast_args(Tensor s0, Tensor s1, string? name = null)
+ {
+ var _ctx = tf.Context;
+ if (_ctx.executing_eagerly())
{
- var (_attr_T, _input) = tf.Runner.ArgsToMatchingEager(ctx, args: new[] { tensor });
- var (_attr_Tshape, _input_shape) = tf.Runner.ArgsToMatchingEager(ctx, args: new object[] { shape }, default_dtype: TF_DataType.TF_INT32);
- var _inputs_flat = new[] { _input[0], _input_shape[0] };
- var _attrs = new object[] { "T", _attr_T, "Tshape", _attr_Tshape };
-
- var results = tf.Runner.Execute(ctx, "Reshape", 1, _inputs_flat, _attrs, name: name);
- if (tf.Runner.MustRecordGradient())
+ try
+ {
+ var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "BroadcastArgs", name) { args = new object[] { s0, s1 }, attrs = new Dictionary() { } });
+ return _fast_path_result[0];
+ }
+ catch (Exception)
+ {
+ }
+ try
+ {
+ return broadcast_args_eager_fallback(s0, s1, name: name, ctx: _ctx);
+ }
+ catch (Exception)
{
- tf.Runner.RecordGradient("Reshape", _inputs_flat, _attrs, results);
}
- return results[0];
}
-
- ///
- /// Finds unique elements in a 1-D tensor.
- ///
- ///
- ///
- ///
- ///
- public static (Tensor, Tensor) unique(Tensor x, TF_DataType out_idx = TF_DataType.TF_INT32, string name = null)
+ Dictionary keywords = new();
+ keywords["s0"] = s0;
+ keywords["s1"] = s1;
+ var _op = tf.OpDefLib._apply_op_helper("BroadcastArgs", name, keywords);
+ var _result = _op.outputs;
+ if (_execute.must_record_gradient())
{
- var _op = tf.OpDefLib._apply_op_helper("Unique", name, new { x, out_idx });
- // TODO
- //var _result = _UniqueOutput._make(_op.outputs);
- return (_op.outputs[0], _op.outputs[1]);
+ object[] _attrs = new object[] { "T", _op._get_attr_type("T") };
+ _execute.record_gradient("BroadcastArgs", _op.inputs, _attrs, _result);
}
+ return _result[0];
+ }
- public static Tensor[] unpack(Tensor value, int num, int axis = 0, string name = null)
- => tf.Context.ExecuteOp("Unpack", name, new ExecuteOpArgs(value, num)
- .SetAttributes(new { axis, num }));
-
- public static Tensor where(Tensor condition, string name = null)
+ public static Tensor broadcast_args_eager_fallback(Tensor s0, Tensor s1, string name, Context ctx)
+ {
+ Tensor[] _inputs_flat = new Tensor[] { s0, s1 };
+ object[] _attrs = new object[] { "T", s0.dtype };
+ var _result = _execute.execute("BroadcastArgs", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name);
+ if (_execute.must_record_gradient())
{
- var _op = tf.OpDefLib._apply_op_helper("Where", name, new { input = condition });
- return _op.output;
+ _execute.record_gradient("BroadcastArgs", _inputs_flat, _attrs, _result);
}
-
- public static Tensor one_hot(Tensor indices, Tensor depth,
- Tensor on_value = null,
- Tensor off_value = null,
- TF_DataType dtype = TF_DataType.DtInvalid,
- int axis = -1,
- string name = null)
- => tf.Context.ExecuteOp("OneHot", name, new ExecuteOpArgs(indices, depth, on_value, off_value)
- .SetAttributes(new { axis }));
-
- ///
- /// A placeholder op that passes through `input` when its output is not fed.
- ///
- /// The default value to produce when output is not fed.
- ///
- ///
- ///
- public static Tensor placeholder_with_default(T input, int[] shape, string name = null)
+ return _result[0];
+ }
+ ///
+ /// Return the reduction indices for computing gradients of s0 op s1 with broadcast.
+ ///
+ ///
+ ///
+ /// This is typically used by gradient computations for a broadcasting operation.
+ ///
+ ///
+ ///
+ ///
+ ///
+ public static Tensor[] broadcast_gradient_args(Tensor s0, Tensor s1, string? name = null)
+ {
+ var _ctx = tf.Context;
+ if (_ctx.executing_eagerly())
+ {
+ try
+ {
+ var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "BroadcastGradientArgs", name) { args = new object[] { s0, s1 }, attrs = new Dictionary() { } });
+ return _fast_path_result;
+ }
+ catch (Exception)
+ {
+ }
+ try
+ {
+ return broadcast_gradient_args_eager_fallback(s0, s1, name: name, ctx: _ctx);
+ }
+ catch (Exception)
+ {
+ }
+ }
+ Dictionary keywords = new();
+ keywords["s0"] = s0;
+ keywords["s1"] = s1;
+ var _op = tf.OpDefLib._apply_op_helper("BroadcastGradientArgs", name, keywords);
+ var _result = _op.outputs;
+ if (_execute.must_record_gradient())
{
- var _op = tf.OpDefLib._apply_op_helper("PlaceholderWithDefault", name, new { input, shape, name });
- return _op.outputs[0];
+ object[] _attrs = new object[] { "T", _op._get_attr_type("T") };
+ _execute.record_gradient("BroadcastGradientArgs", _op.inputs, _attrs, _result);
}
+ return _result;
+ }
- public static Tensor select(Tensor condition, Tx x, Ty y, string name = null)
- => tf.Context.ExecuteOp("Select", name, new ExecuteOpArgs(condition, x, y));
-
- public static Tensor select_v2(Tensor condition, Tx x, Ty y, string name = null)
- => tf.Context.ExecuteOp("SelectV2", name, new ExecuteOpArgs(condition, x, y));
-
- public static Tensor scatter_nd(Tensor indices, Tensor updates, Tensor[] shape, string name = null)
+ public static Tensor[] broadcast_gradient_args_eager_fallback(Tensor s0, Tensor s1, string name, Context ctx)
+ {
+ Tensor[] _inputs_flat = new Tensor[] { s0, s1 };
+ object[] _attrs = new object[] { "T", s0.dtype };
+ var _result = _execute.execute("BroadcastGradientArgs", 2, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name);
+ if (_execute.must_record_gradient())
{
- var _op = tf.OpDefLib._apply_op_helper("ScatterNd", name, new { indices, updates, shape });
- return _op.outputs[0];
+ _execute.record_gradient("BroadcastGradientArgs", _inputs_flat, _attrs, _result);
}
-
- public static Tensor shape(Tensor input, TF_DataType out_type = TF_DataType.TF_INT32, string name = null)
- => tf.Context.ExecuteOp("Shape", name, new ExecuteOpArgs(input)
- .SetAttributes(new { out_type }));
-
- ///
- /// Returns shape of tensors.
- ///
- ///
- ///
- ///
- ///
- public static Tensor[] shape_n(Tensor[] input, TF_DataType out_type = TF_DataType.TF_INT32, string name = null)
- => tf.Context.ExecuteOp("ShapeN", name, new ExecuteOpArgs()
+ return _result;
+ }
+ ///
+ /// Broadcast an array for a compatible shape.
+ ///
+ ///
+ ///
+ /// Broadcasting is the process of making arrays to have compatible shapes
+ /// for arithmetic operations. Two shapes are compatible if for each
+ /// dimension pair they are either equal or one of them is one.
+ ///
+ /// For example:
+ ///
+ /// >>> x = tf.constant([[1, 2, 3]]) # Shape (1, 3,)
+ /// >>> y = tf.broadcast_to(x, [2, 3])
+ /// >>> print(y)
+ /// tf.Tensor(
+ /// [[1 2 3]
+ /// [1 2 3]], shape=(2, 3), dtype=int32)
+ ///
+ /// In the above example, the input Tensor with the shape of `[1, 3]`
+ /// is broadcasted to output Tensor with shape of `[2, 3]`.
+ ///
+ /// When broadcasting, if a tensor has fewer axes than necessary its shape is
+ /// padded on the left with ones. So this gives the same result as the previous
+ /// example:
+ ///
+ /// >>> x = tf.constant([1, 2, 3]) # Shape (3,)
+ /// >>> y = tf.broadcast_to(x, [2, 3])
+ ///
+ ///
+ /// When doing broadcasted operations such as multiplying a tensor
+ /// by a scalar, broadcasting (usually) confers some time or space
+ /// benefit, as the broadcasted tensor is never materialized.
+ ///
+ /// However, `broadcast_to` does not carry with it any such benefits.
+ /// The newly-created tensor takes the full memory of the broadcasted
+ /// shape. (In a graph context, `broadcast_to` might be fused to
+ /// subsequent operation and then be optimized away, however.)
+ ///
+ ///
+ ///
+ ///
+ ///
+ public static Tensor broadcast_to(Tensor input, Tensor shape, string? name = null)
+ {
+ var _ctx = tf.Context;
+ if (_ctx.executing_eagerly())
+ {
+ try
{
- OpInputArgs = new object[] { input }
- }.SetAttributes(new { out_type }));
-
- public static Tensor size(Tensor input, TF_DataType out_type = TF_DataType.TF_INT32, string name = null)
+ var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "BroadcastTo", name) { args = new object[] { input, shape }, attrs = new Dictionary() { } });
+ return _fast_path_result[0];
+ }
+ catch (Exception)
+ {
+ }
+ try
+ {
+ return broadcast_to_eager_fallback(input, shape, name: name, ctx: _ctx);
+ }
+ catch (Exception)
+ {
+ }
+ }
+ Dictionary keywords = new();
+ keywords["input"] = input;
+ keywords["shape"] = shape;
+ var _op = tf.OpDefLib._apply_op_helper("BroadcastTo", name, keywords);
+ var _result = _op.outputs;
+ if (_execute.must_record_gradient())
{
- var _op = tf.OpDefLib._apply_op_helper("Size", name, new { input, out_type });
- return _op.outputs[0];
+ object[] _attrs = new object[] { "T", _op._get_attr_type("T"), "Tidx", _op._get_attr_type("Tidx") };
+ _execute.record_gradient("BroadcastTo", _op.inputs, _attrs, _result);
}
+ return _result[0];
+ }
- public static Tensor slice(Tensor input, Tensor[] begin, Tensor[] size, string name = null)
+ public static Tensor broadcast_to_eager_fallback(Tensor input, Tensor shape, string name, Context ctx)
+ {
+ Tensor[] _inputs_flat = new Tensor[] { input, shape };
+ object[] _attrs = new object[] { "T", input.dtype, "Tidx", shape.dtype };
+ var _result = _execute.execute("BroadcastTo", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name);
+ if (_execute.must_record_gradient())
+ {
+ _execute.record_gradient("BroadcastTo", _inputs_flat, _attrs, _result);
+ }
+ return _result[0];
+ }
+ ///
+ /// Checks a tensor for NaN and Inf values.
+ ///
+ ///
+ ///
+ /// When run, reports an `InvalidArgument` error if `tensor` has any values
+ /// that are not a number (NaN) or infinity (Inf). Otherwise, returns the input
+ /// tensor.
+ ///
+ /// Example usage:
+ ///
+ /// ``` python
+ /// a = tf.Variable(1.0)
+ /// tf.debugging.check_numerics(a, message='')
+ ///
+ /// b = tf.Variable(np.nan)
+ /// try:
+ /// tf.debugging.check_numerics(b, message='Checking b')
+ /// except Exception as e:
+ /// assert "Checking b : Tensor had NaN values" in e.message
+ ///
+ /// c = tf.Variable(np.inf)
+ /// try:
+ /// tf.debugging.check_numerics(c, message='Checking c')
+ /// except Exception as e:
+ /// assert "Checking c : Tensor had Inf values" in e.message
+ /// ```
+ ///
+ ///
+ ///
+ ///
+ ///
+ ///
+ /// Prefix of the error message.
+ ///
+ ///
+ ///
+ public static Tensor check_numerics(Tensor tensor, string message, string? name = null)
+ {
+ var _ctx = tf.Context;
+ if (_ctx.executing_eagerly())
{
- if (tf.executing_eagerly())
+ try
+ {
+ var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "CheckNumerics", name) { args = new object[] { tensor }, attrs = new Dictionary() { ["message"] = message } });
+ return _fast_path_result[0];
+ }
+ catch (Exception)
+ {
+ }
+ try
+ {
+ return check_numerics_eager_fallback(tensor, message: message, name: name, ctx: _ctx);
+ }
+ catch (Exception)
{
- var result = slice_eager_fallback(input, begin, size, name, tf.Context);
- return result;
}
-
- var _op = tf.OpDefLib._apply_op_helper("Slice", name, new { input, begin, size });
- return _op.outputs[0];
}
-
- private static Tensor slice_eager_fallback(Tensor inputs, Tensor[] begin, Tensor[] size, string name, Context ctx)
+ Dictionary keywords = new();
+ keywords["tensor"] = tensor;
+ keywords["message"] = message;
+ var _op = tf.OpDefLib._apply_op_helper("CheckNumerics", name, keywords);
+ var _result = _op.outputs;
+ if (_execute.must_record_gradient())
{
- var (_attr_T, input) = tf.Runner.ArgsToMatchingEager(ctx, args: new[] { inputs });
- var (_attr_Tidx, _inputs_Index) = tf.Runner.ArgsToMatchingEager(ctx, args: new object[] { begin, size });
- var _inputs_flat = input.concat(_inputs_Index);
- var _attrs = new object[] { "T", _attr_T, "Index", _attr_Tidx };
+ object[] _attrs = new object[] { "T", _op._get_attr_type("T"), "message", _op.get_attr("message") };
+ _execute.record_gradient("CheckNumerics", _op.inputs, _attrs, _result);
+ }
+ return _result[0];
+ }
- var results = tf.Runner.Execute(ctx, "Slice", 1, _inputs_flat, _attrs, name: name);
- if (tf.Runner.MustRecordGradient())
+ public static Tensor check_numerics_eager_fallback(Tensor tensor, string message, string name, Context ctx)
+ {
+ Tensor[] _inputs_flat = new Tensor[] { tensor };
+ object[] _attrs = new object[] { "T", tensor.dtype, "message", message };
+ var _result = _execute.execute("CheckNumerics", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name);
+ if (_execute.must_record_gradient())
+ {
+ _execute.record_gradient("CheckNumerics", _inputs_flat, _attrs, _result);
+ }
+ return _result[0];
+ }
+ ///
+ /// Checks a tensor for NaN, -Inf and +Inf values.
+ ///
+ ///
+ ///
+ /// When run, reports an `InvalidArgument` error if `tensor` has any values
+ /// that are not a number (NaN) or infinity (Inf). Otherwise, returns the input
+ /// tensor. Unlike CheckNumerics (V1), CheckNumericsV2 distinguishes -Inf and +Inf
+ /// in the errors it throws.
+ ///
+ ///
+ ///
+ ///
+ ///
+ /// Prefix of the error message.
+ ///
+ ///
+ ///
+ public static Tensor check_numerics_v2(Tensor tensor, string message, string? name = null)
+ {
+ var _ctx = tf.Context;
+ if (_ctx.executing_eagerly())
+ {
+ try
+ {
+ var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "CheckNumericsV2", name) { args = new object[] { tensor }, attrs = new Dictionary() { ["message"] = message } });
+ return _fast_path_result[0];
+ }
+ catch (Exception)
+ {
+ }
+ try
+ {
+ return check_numerics_v2_eager_fallback(tensor, message: message, name: name, ctx: _ctx);
+ }
+ catch (Exception)
{
- tf.Runner.RecordGradient("Slice", _inputs_flat, _attrs, results);
}
- return results[0];
}
+ Dictionary keywords = new();
+ keywords["tensor"] = tensor;
+ keywords["message"] = message;
+ var _op = tf.OpDefLib._apply_op_helper("CheckNumericsV2", name, keywords);
+ var _result = _op.outputs;
+ if (_execute.must_record_gradient())
+ {
+ object[] _attrs = new object[] { "T", _op._get_attr_type("T"), "message", _op.get_attr("message") };
+ _execute.record_gradient("CheckNumericsV2", _op.inputs, _attrs, _result);
+ }
+ return _result[0];
+ }
- public static Tensor slice(Tensor input, Tb begin, Ts size, string name = null)
+ public static Tensor check_numerics_v2_eager_fallback(Tensor tensor, string message, string name, Context ctx)
+ {
+ Tensor[] _inputs_flat = new Tensor[] { tensor };
+ object[] _attrs = new object[] { "T", tensor.dtype, "message", message };
+ var _result = _execute.execute("CheckNumericsV2", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name);
+ if (_execute.must_record_gradient())
{
- if (tf.executing_eagerly())
+ _execute.record_gradient("CheckNumericsV2", _inputs_flat, _attrs, _result);
+ }
+ return _result[0];
+ }
+ ///
+ /// Concatenates tensors along one dimension.
+ ///
+ ///
+ ///
+ ///
+ public static Tensor concat(Tensor concat_dim, Tensors values, string? name = null)
+ {
+ var _ctx = tf.Context;
+ if (_ctx.executing_eagerly())
+ {
+ try
+ {
+ var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "Concat", name) { args = new object[] { concat_dim, values }, attrs = new Dictionary() { } });
+ return _fast_path_result[0];
+ }
+ catch (Exception)
+ {
+ }
+ try
+ {
+ return concat_eager_fallback(concat_dim, values, name: name, ctx: _ctx);
+ }
+ catch (Exception)
{
- var outputs = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo("Slice", name, input, begin, size));
- return outputs[0];
}
-
- var _op = tf.OpDefLib._apply_op_helper("Slice", name, new { input, begin, size });
- return _op.outputs[0];
}
+ Dictionary keywords = new();
+ keywords["concat_dim"] = concat_dim;
+ keywords["values"] = values;
+ var _op = tf.OpDefLib._apply_op_helper("Concat", name, keywords);
+ var _result = _op.outputs;
+ if (_execute.must_record_gradient())
+ {
+ object[] _attrs = new object[] { "N", _op._get_attr_int("N"), "T", _op._get_attr_type("T") };
+ _execute.record_gradient("Concat", _op.inputs, _attrs, _result);
+ }
+ return _result[0];
+ }
- public static Tensor[] split_v(Tensor value, Tensor size_splits,
- int axis, int num_split, string name = null)
- => tf.Context.ExecuteOp("SplitV", name, new ExecuteOpArgs(value, size_splits, axis)
- .SetAttributes(new { num_split }));
-
- public static Tensor tile(Tensor input, Tensor multiples, string name = null)
- => tf.Context.ExecuteOp("Tile", name, new ExecuteOpArgs(input, multiples));
-
- public static Tensor tile(Tensor input, object[] multiples, string name = null)
- => tf.Context.ExecuteOp("Tile", name, new ExecuteOpArgs(input, multiples));
-
- public static Tensor transpose(Tensor x, T1 perm, string name = null)
- => tf.Context.ExecuteOp("Transpose", name, new ExecuteOpArgs(x, perm));
-
- public static Tensor ones_like(Tensor x, string name = null)
- => tf.Context.ExecuteOp("OnesLike", name, new ExecuteOpArgs(x));
-
- public static Tensor zeros_like(Tensor x, string name = null)
- => tf.Context.ExecuteOp("ZerosLike", name, new ExecuteOpArgs(x));
-
- public static Tensor stop_gradient(Tensor x, string name = null)
+ public static Tensor concat_eager_fallback(Tensor concat_dim, Tensors values, string name, Context ctx)
+ {
+ List _inputs_flat_list = new();
+ _inputs_flat_list.Add(concat_dim);
+ _inputs_flat_list.AddRange(values);
+ var _inputs_flat = _inputs_flat_list.ToArray();
+ object[] _attrs = new object[] { "N", values.Length, "T", values.dtype };
+ var _result = _execute.execute("Concat", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name);
+ if (_execute.must_record_gradient())
+ {
+ _execute.record_gradient("Concat", _inputs_flat, _attrs, _result);
+ }
+ return _result[0];
+ }
+ ///
+ /// Computes offsets of concat inputs within its output.
+ ///
+ ///
+ ///
+ /// For example:
+ ///
+ /// >>> x = [2, 2, 7]
+ /// >>> y = [2, 3, 7]
+ /// >>> z = [2, 9, 7]
+ /// >>> offsets = concat_offset(1, [x, y, z])
+ /// >>> [list(off.numpy()) for off in offsets]
+ /// [[0, 0, 0], [0, 2, 0], [0, 5, 0]]
+ ///
+ /// This is typically used by gradient computations for a concat operation.
+ ///
+ ///
+ ///
+ ///
+ ///
+ public static Tensor[] concat_offset(Tensor concat_dim, Tensors shape, string? name = null)
+ {
+ var _ctx = tf.Context;
+ if (_ctx.executing_eagerly())
+ {
+ try
+ {
+ var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "ConcatOffset", name) { args = new object[] { concat_dim, shape }, attrs = new Dictionary() { } });
+ return _fast_path_result;
+ }
+ catch (Exception)
+ {
+ }
+ try
+ {
+ return concat_offset_eager_fallback(concat_dim, shape, name: name, ctx: _ctx);
+ }
+ catch (Exception)
+ {
+ }
+ }
+ Dictionary keywords = new();
+ keywords["concat_dim"] = concat_dim;
+ keywords["shape"] = shape;
+ var _op = tf.OpDefLib._apply_op_helper("ConcatOffset", name, keywords);
+ var _result = _op.outputs;
+ if (_execute.must_record_gradient())
{
- var _op = tf.OpDefLib._apply_op_helper("StopGradient", name, args: new { input = x, name });
+ object[] _attrs = new object[] { "N", _op._get_attr_int("N") };
+ _execute.record_gradient("ConcatOffset", _op.inputs, _attrs, _result);
+ }
+ return _result;
+ }
- return _op.output;
+ public static Tensor[] concat_offset_eager_fallback(Tensor concat_dim, Tensors shape, string name, Context ctx)
+ {
+ List _inputs_flat_list = new();
+ _inputs_flat_list.Add(concat_dim);
+ _inputs_flat_list.AddRange(shape);
+ var _inputs_flat = _inputs_flat_list.ToArray();
+ object[] _attrs = new object[] { "N", shape.Length };
+ var _result = _execute.execute("ConcatOffset", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name);
+ if (_execute.must_record_gradient())
+ {
+ _execute.record_gradient("ConcatOffset", _inputs_flat, _attrs, _result);
+ }
+ return _result;
+ }
+ ///
+ /// Concatenates tensors along one dimension.
+ ///
+ ///
+ ///
+ ///
+ public static Tensor concat_v2(Tensors values, Tensor axis, string? name = null)
+ {
+ var _ctx = tf.Context;
+ if (_ctx.executing_eagerly())
+ {
+ try
+ {
+ var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "ConcatV2", name) { args = new object[] { values, axis }, attrs = new Dictionary() { } });
+ return _fast_path_result[0];
+ }
+ catch (Exception)
+ {
+ }
+ try
+ {
+ return concat_v2_eager_fallback(values, axis, name: name, ctx: _ctx);
+ }
+ catch (Exception)
+ {
+ }
+ }
+ Dictionary keywords = new();
+ keywords["values"] = values;
+ keywords["axis"] = axis;
+ var _op = tf.OpDefLib._apply_op_helper("ConcatV2", name, keywords);
+ var _result = _op.outputs;
+ if (_execute.must_record_gradient())
+ {
+ object[] _attrs = new object[] { "N", _op._get_attr_int("N"), "T", _op._get_attr_type("T"), "Tidx", _op._get_attr_type("Tidx") };
+ _execute.record_gradient("ConcatV2", _op.inputs, _attrs, _result);
}
+ return _result[0];
+ }
- public static Tensor strided_slice(Tensor input, Tensor begin, Tensor end, Tensor strides,
- long begin_mask = 0,
- long end_mask = 0,
- long ellipsis_mask = 0,
- long new_axis_mask = 0,
- long shrink_axis_mask = 0,
- string name = null)
- => tf.Context.ExecuteOp("StridedSlice", name, new ExecuteOpArgs(input, begin, end, strides)
- .SetAttributes(new
- {
- begin_mask,
- end_mask,
- ellipsis_mask,
- new_axis_mask,
- shrink_axis_mask
- }));
-
- public static Tensor resource_strided_slice_assign(Tensor input, Tensor begin, Tensor end, Tensor strides, Tensor value,
- int begin_mask = 0,
- int end_mask = 0,
- int ellipsis_mask = 0,
- int new_axis_mask = 0,
- int shrink_axis_mask = 0,
- string name = null)
- => tf.Context.ExecuteOp("ResourceStridedSliceAssign", name, new ExecuteOpArgs(input, begin, end, strides, value)
- .SetAttributes(new
- {
- begin_mask,
- end_mask,
- ellipsis_mask,
- new_axis_mask,
- shrink_axis_mask
- }));
-
- public static Tensor strided_slice(Tensor input, T[] begin, T[] end, T[] strides,
- int begin_mask = 0,
- int end_mask = 0,
- int ellipsis_mask = 0,
- int new_axis_mask = 0,
- int shrink_axis_mask = 0,
- string name = null)
- {
- var _op = tf.OpDefLib._apply_op_helper("StridedSlice", name, new
- {
- input,
- begin,
- end,
- strides,
- begin_mask,
- end_mask,
- ellipsis_mask,
- new_axis_mask,
- shrink_axis_mask
- });
-
- return _op.outputs[0];
- }
-
- ///
- /// Removes dimensions of size 1 from the shape of a tensor.
- /// Given a tensor `input`, this operation returns a tensor of the same type with
- /// all dimensions of size 1 removed.If you don't want to remove all size 1
- /// dimensions, you can remove specific size 1 dimensions by specifying
- /// `axis`.
- ///
- /// A `Tensor`. The `input` to squeeze.
- /// An optional list of `ints`. Defaults to `[]`. If specified, only squeezes the dimensions listed.
- /// A name for the operation (optional).
- /// A `Tensor`. Has the same type as `input`.
- public static Tensor squeeze(Tensor input, int[] axis = null, string name = null)
- => tf.Context.ExecuteOp("Squeeze", name, new ExecuteOpArgs(input)
- .SetAttributes(new { squeeze_dims = axis }));
-
- ///
- /// Return the shape of s0 op s1 with broadcast.
- /// Given `s0` and `s1`, tensors that represent shapes, compute `r0`, the
- /// broadcasted shape. `s0`, `s1` and `r0` are all integer vectors.
- ///
- /// A `Tensor`. Must be one of the following types: `int32`, `int64`.
- /// A `Tensor`. Must have the same type as `s0`.
- /// A name for the operation (optional).
- /// `Tensor`. Has the same type as `s0`.
- public static Tensor broadcast_args(Tensor s0, Tensor s1, string name = null)
- => tf.Context.ExecuteOp("BroadcastArgs", name, new ExecuteOpArgs(s0, s1));
-
- ///
- /// Broadcast an array for a compatible shape.
- ///
- ///
- ///
- ///
- ///
- public static Tensor broadcast_to(Tensor input, T shape, string name = null)
- => tf.Context.ExecuteOp("BroadcastTo", name, new ExecuteOpArgs(input, shape));
+ public static Tensor concat_v2_eager_fallback(Tensors values, Tensor axis, string name, Context ctx)
+ {
+ List _inputs_flat_list = new();
+ _inputs_flat_list.AddRange(values);
+ _inputs_flat_list.Add(axis);
+ var _inputs_flat = _inputs_flat_list.ToArray();
+ object[] _attrs = new object[] { "N", values.Length, "T", values.dtype, "Tidx", axis.dtype };
+ var _result = _execute.execute("ConcatV2", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name);
+ if (_execute.must_record_gradient())
+ {
+ _execute.record_gradient("ConcatV2", _inputs_flat, _attrs, _result);
+ }
+ return _result[0];
+ }
+ ///
+ /// Shuffle dimensions of x according to a permutation and conjugate the result.
+ ///
+ ///
+ ///
+ /// The output `y` has the same rank as `x`. The shapes of `x` and `y` satisfy:
+ /// `y.shape[i] == x.shape[perm[i]] for i in [0, 1, ..., rank(x) - 1]`
+ /// `y[i,j,k,...,s,t,u] == conj(x[perm[i], perm[j], perm[k],...,perm[s], perm[t], perm[u]])`
+ ///
+ ///
+ ///
+ ///
+ ///
+ public static Tensor conjugate_transpose(Tensor x, Tensor perm, string? name = null)
+ {
+ var _ctx = tf.Context;
+ if (_ctx.executing_eagerly())
+ {
+ try
+ {
+ var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "ConjugateTranspose", name) { args = new object[] { x, perm }, attrs = new Dictionary() { } });
+ return _fast_path_result[0];
+ }
+ catch (Exception)
+ {
+ }
+ try
+ {
+ return conjugate_transpose_eager_fallback(x, perm, name: name, ctx: _ctx);
+ }
+ catch (Exception)
+ {
+ }
+ }
+ Dictionary keywords = new();
+ keywords["x"] = x;
+ keywords["perm"] = perm;
+ var _op = tf.OpDefLib._apply_op_helper("ConjugateTranspose", name, keywords);
+ var _result = _op.outputs;
+ if (_execute.must_record_gradient())
+ {
+ object[] _attrs = new object[] { "T", _op._get_attr_type("T"), "Tperm", _op._get_attr_type("Tperm") };
+ _execute.record_gradient("ConjugateTranspose", _op.inputs, _attrs, _result);
+ }
+ return _result[0];
+ }
+
+ public static Tensor conjugate_transpose_eager_fallback(Tensor x, Tensor perm, string name, Context ctx)
+ {
+ Tensor[] _inputs_flat = new Tensor[] { x, perm };
+ object[] _attrs = new object[] { "T", x.dtype, "Tperm", perm.dtype };
+ var _result = _execute.execute("ConjugateTranspose", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name);
+ if (_execute.must_record_gradient())
+ {
+ _execute.record_gradient("ConjugateTranspose", _inputs_flat, _attrs, _result);
+ }
+ return _result[0];
+ }
+ ///
+ /// Returns a constant tensor.
+ ///
+ ///
+ ///
+ /// Attr `value` is the tensor to return.
+ ///
+ ///
+ ///
+ ///
+ public static Tensor _const(TensorProto value, TF_DataType dtype, string? name = null)
+ {
+ var _ctx = tf.Context;
+ if (_ctx.executing_eagerly())
+ {
+ try
+ {
+ var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "Const", name) { args = new object[] { }, attrs = new Dictionary() { ["value"] = value, ["dtype"] = dtype } });
+ return _fast_path_result[0];
+ }
+ catch (Exception)
+ {
+ }
+ try
+ {
+ return const_eager_fallback(value: value, dtype: dtype, name: name, ctx: _ctx);
+ }
+ catch (Exception)
+ {
+ }
+ }
+ Dictionary keywords = new();
+ keywords["value"] = value;
+ keywords["dtype"] = dtype;
+ var _op = tf.OpDefLib._apply_op_helper("Const", name, keywords);
+ var _result = _op.outputs;
+ if (_execute.must_record_gradient())
+ {
+ object[] _attrs = new object[] { "value", _op.get_attr("value"), "dtype", _op._get_attr_type("dtype") };
+ _execute.record_gradient("Const", _op.inputs, _attrs, _result);
+ }
+ return _result[0];
+ }
+
+ public static Tensor const_eager_fallback(TensorProto value, TF_DataType dtype, string name, Context ctx)
+ {
+ Tensor[] _inputs_flat = new Tensor[] { };
+ object[] _attrs = new object[] { "value", value, "dtype", dtype };
+ var _result = _execute.execute("Const", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name);
+ if (_execute.must_record_gradient())
+ {
+ _execute.record_gradient("Const", _inputs_flat, _attrs, _result);
+ }
+ return _result[0];
+ }
+ ///
+ /// Identity op for gradient debugging.
+ ///
+ ///
+ ///
+ /// This op is hidden from public in Python. It is used by TensorFlow Debugger to
+ /// register gradient tensors for gradient debugging.
+ /// This op operates on non-reference-type tensors.
+ ///
+ ///
+ ///
+ ///
+ public static Tensor debug_gradient_identity(Tensor input, string? name = null)
+ {
+ var _ctx = tf.Context;
+ if (_ctx.executing_eagerly())
+ {
+ try
+ {
+ var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "DebugGradientIdentity", name) { args = new object[] { input }, attrs = new Dictionary() { } });
+ return _fast_path_result[0];
+ }
+ catch (Exception)
+ {
+ }
+ try
+ {
+ return debug_gradient_identity_eager_fallback(input, name: name, ctx: _ctx);
+ }
+ catch (Exception)
+ {
+ }
+ }
+ Dictionary keywords = new();
+ keywords["input"] = input;
+ var _op = tf.OpDefLib._apply_op_helper("DebugGradientIdentity", name, keywords);
+ var _result = _op.outputs;
+ if (_execute.must_record_gradient())
+ {
+ object[] _attrs = new object[] { "T", _op._get_attr_type("T") };
+ _execute.record_gradient("DebugGradientIdentity", _op.inputs, _attrs, _result);
+ }
+ return _result[0];
+ }
+
+ public static Tensor debug_gradient_identity_eager_fallback(Tensor input, string name, Context ctx)
+ {
+ Tensor[] _inputs_flat = new Tensor[] { input };
+ object[] _attrs = new object[] { "T", input.dtype };
+ var _result = _execute.execute("DebugGradientIdentity", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name);
+ if (_execute.must_record_gradient())
+ {
+ _execute.record_gradient("DebugGradientIdentity", _inputs_flat, _attrs, _result);
+ }
+ return _result[0];
+ }
+ ///
+ /// Identity op for gradient debugging.
+ ///
+ ///
+ ///
+ /// This op is hidden from public in Python. It is used by TensorFlow Debugger to
+ /// register gradient tensors for gradient debugging.
+ /// This op operates on reference-type tensors.
+ ///
+ ///
+ ///
+ ///
+ public static Tensor debug_gradient_ref_identity(Tensor input, string? name = null)
+ {
+ var _ctx = tf.Context;
+ if (_ctx.executing_eagerly())
+ {
+ throw new RuntimeError("debug_gradient_ref_identity op does not support eager execution. Arg input is a ref.");
+ }
+ Dictionary keywords = new();
+ keywords["input"] = input;
+ var _op = tf.OpDefLib._apply_op_helper("DebugGradientRefIdentity", name, keywords);
+ var _result = _op.outputs;
+ if (_execute.must_record_gradient())
+ {
+ object[] _attrs = new object[] { "T", _op._get_attr_type("T") };
+ _execute.record_gradient("DebugGradientRefIdentity", _op.inputs, _attrs, _result);
+ }
+ return _result[0];
+ }
+
+ public static Tensor debug_gradient_ref_identity_eager_fallback(Tensor input, string name, Context ctx)
+ {
+ throw new RuntimeError($"debug_gradient_ref_identity op does not support eager execution. Arg 'input' is a ref.");
+ }
+ ///
+ /// Makes a copy of `x`.
+ ///
+ ///
+ ///
+ public static Tensor deep_copy(Tensor x, string? name = null)
+ {
+ var _ctx = tf.Context;
+ if (_ctx.executing_eagerly())
+ {
+ try
+ {
+ var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "DeepCopy", name) { args = new object[] { x }, attrs = new Dictionary() { } });
+ return _fast_path_result[0];
+ }
+ catch (Exception)
+ {
+ }
+ try
+ {
+ return deep_copy_eager_fallback(x, name: name, ctx: _ctx);
+ }
+ catch (Exception)
+ {
+ }
+ }
+ Dictionary keywords = new();
+ keywords["x"] = x;
+ var _op = tf.OpDefLib._apply_op_helper("DeepCopy", name, keywords);
+ var _result = _op.outputs;
+ if (_execute.must_record_gradient())
+ {
+ object[] _attrs = new object[] { "T", _op._get_attr_type("T") };
+ _execute.record_gradient("DeepCopy", _op.inputs, _attrs, _result);
+ }
+ return _result[0];
+ }
+
+ public static Tensor deep_copy_eager_fallback(Tensor x, string name, Context ctx)
+ {
+ Tensor[] _inputs_flat = new Tensor[] { x };
+ object[] _attrs = new object[] { "T", x.dtype };
+ var _result = _execute.execute("DeepCopy", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name);
+ if (_execute.must_record_gradient())
+ {
+ _execute.record_gradient("DeepCopy", _inputs_flat, _attrs, _result);
+ }
+ return _result[0];
+ }
+ ///
+ /// DepthToSpace for tensors of type T.
+ ///
+ ///
+ ///
+ /// Rearranges data from depth into blocks of spatial data.
+ /// This is the reverse transformation of SpaceToDepth. More specifically,
+ /// this op outputs a copy of the input tensor where values from the `depth`
+ /// dimension are moved in spatial blocks to the `height` and `width` dimensions.
+ /// The attr `block_size` indicates the input block size and how the data is moved.
+ ///
+ /// * Chunks of data of size `block_size * block_size` from depth are rearranged
+ /// into non-overlapping blocks of size `block_size x block_size`
+ /// * The width of the output tensor is `input_depth * block_size`, whereas the
+ /// height is `input_height * block_size`.
+ /// * The Y, X coordinates within each block of the output image are determined
+ /// by the high order component of the input channel index.
+ /// * The depth of the input tensor must be divisible by
+ /// `block_size * block_size`.
+ ///
+ /// The `data_format` attr specifies the layout of the input and output tensors
+ /// with the following options:
+ /// "NHWC": `[ batch, height, width, channels ]`
+ /// "NCHW": `[ batch, channels, height, width ]`
+ /// "NCHW_VECT_C":
+ /// `qint8 [ batch, channels / 4, height, width, 4 ]`
+ ///
+ /// It is useful to consider the operation as transforming a 6-D Tensor.
+ /// e.g. for data_format = NHWC,
+ /// Each element in the input tensor can be specified via 6 coordinates,
+ /// ordered by decreasing memory layout significance as:
+ /// n,iY,iX,bY,bX,oC (where n=batch index, iX, iY means X or Y coordinates
+ /// within the input image, bX, bY means coordinates
+ /// within the output block, oC means output channels).
+ /// The output would be the input transposed to the following layout:
+ /// n,iY,bY,iX,bX,oC
+ ///
+ /// This operation is useful for resizing the activations between convolutions
+ /// (but keeping all data), e.g. instead of pooling. It is also useful for training
+ /// purely convolutional models.
+ ///
+ /// For example, given an input of shape `[1, 1, 1, 4]`, data_format = "NHWC" and
+ /// block_size = 2:
+ ///
+ /// ```
+ /// x = [[[[1, 2, 3, 4]]]]
+ ///
+ /// ```
+ ///
+ /// This operation will output a tensor of shape `[1, 2, 2, 1]`:
+ ///
+ /// ```
+ /// [[[[1], [2]],
+ /// [[3], [4]]]]
+ /// ```
+ ///
+ /// Here, the input has a batch of 1 and each batch element has shape `[1, 1, 4]`,
+ /// the corresponding output will have 2x2 elements and will have a depth of
+ /// 1 channel (1 = `4 / (block_size * block_size)`).
+ /// The output element shape is `[2, 2, 1]`.
+ ///
+ /// For an input tensor with larger depth, here of shape `[1, 1, 1, 12]`, e.g.
+ ///
+ /// ```
+ /// x = [[[[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12]]]]
+ /// ```
+ ///
+ /// This operation, for block size of 2, will return the following tensor of shape
+ /// `[1, 2, 2, 3]`
+ ///
+ /// ```
+ /// [[[[1, 2, 3], [4, 5, 6]],
+ /// [[7, 8, 9], [10, 11, 12]]]]
+ ///
+ /// ```
+ ///
+ /// Similarly, for the following input of shape `[1 2 2 4]`, and a block size of 2:
+ ///
+ /// ```
+ /// x = [[[[1, 2, 3, 4],
+ /// [5, 6, 7, 8]],
+ /// [[9, 10, 11, 12],
+ /// [13, 14, 15, 16]]]]
+ /// ```
+ ///
+ /// the operator will return the following tensor of shape `[1 4 4 1]`:
+ ///
+ /// ```
+ /// x = [[[ [1], [2], [5], [6]],
+ /// [ [3], [4], [7], [8]],
+ /// [ [9], [10], [13], [14]],
+ /// [ [11], [12], [15], [16]]]]
+ ///
+ /// ```
+ ///
+ ///
+ ///
+ ///
+ ///
+ /// The size of the spatial block, same as in Space2Depth.
+ ///
+ ///
+ ///
+ ///
+ public static Tensor depth_to_space(Tensor input, int block_size = 0, string data_format = "NHWC", string? name = null)
+ {
+ var _ctx = tf.Context;
+ if (_ctx.executing_eagerly())
+ {
+ try
+ {
+ var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "DepthToSpace", name) { args = new object[] { input }, attrs = new Dictionary() { ["block_size"] = block_size, ["data_format"] = data_format } });
+ return _fast_path_result[0];
+ }
+ catch (Exception)
+ {
+ }
+ try
+ {
+ return depth_to_space_eager_fallback(input, block_size: block_size, data_format: data_format, name: name, ctx: _ctx);
+ }
+ catch (Exception)
+ {
+ }
+ }
+ if (data_format is null)
+ {
+ data_format = "NHWC";
+ }
+ Dictionary keywords = new();
+ keywords["input"] = input;
+ keywords["block_size"] = block_size;
+ keywords["data_format"] = data_format;
+ var _op = tf.OpDefLib._apply_op_helper("DepthToSpace", name, keywords);
+ var _result = _op.outputs;
+ if (_execute.must_record_gradient())
+ {
+ object[] _attrs = new object[] { "T", _op._get_attr_type("T"), "block_size", _op._get_attr_int("block_size"), "data_format", _op.get_attr("data_format") };
+ _execute.record_gradient("DepthToSpace", _op.inputs, _attrs, _result);
+ }
+ return _result[0];
+ }
+
+ public static Tensor depth_to_space_eager_fallback(Tensor input, int block_size, string data_format, string name, Context ctx)
+ {
+ Tensor[] _inputs_flat = new Tensor[] { input };
+ object[] _attrs = new object[] { "T", input.dtype, "block_size", block_size, "data_format", data_format };
+ var _result = _execute.execute("DepthToSpace", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name);
+ if (_execute.must_record_gradient())
+ {
+ _execute.record_gradient("DepthToSpace", _inputs_flat, _attrs, _result);
+ }
+ return _result[0];
+ }
+ ///
+ /// Dequantize the 'input' tensor into a float or bfloat16 Tensor.
+ ///
+ ///
+ ///
+ /// [min_range, max_range] are scalar floats that specify the range for
+ /// the output. The 'mode' attribute controls exactly which calculations are
+ /// used to convert the float values to their quantized equivalents.
+ ///
+ /// In 'MIN_COMBINED' mode, each value of the tensor will undergo the following:
+ ///
+ /// ```
+ /// if T == qint8: in[i] += (range(T) + 1)/ 2.0
+ /// out[i] = min_range + (in[i]* (max_range - min_range) / range(T))
+ /// ```
+ /// here `range(T) = numeric_limits::max() - numeric_limits::min()`
+ ///
+ /// *MIN_COMBINED Mode Example*
+ ///
+ /// If the input comes from a QuantizedRelu6, the output type is
+ /// quint8 (range of 0-255) but the possible range of QuantizedRelu6 is
+ /// 0-6. The min_range and max_range values are therefore 0.0 and 6.0.
+ /// Dequantize on quint8 will take each value, cast to float, and multiply
+ /// by 6 / 255.
+ /// Note that if quantizedtype is qint8, the operation will additionally add
+ /// each value by 128 prior to casting.
+ ///
+ /// If the mode is 'MIN_FIRST', then this approach is used:
+ ///
+ /// ```c++
+ /// num_discrete_values = 1 << (# of bits in T)
+ /// range_adjust = num_discrete_values / (num_discrete_values - 1)
+ /// range = (range_max - range_min) * range_adjust
+ /// range_scale = range / num_discrete_values
+ /// const double offset_input = static_cast(input) - lowest_quantized;
+ /// result = range_min + ((input - numeric_limits::min()) * range_scale)
+ /// ```
+ ///
+ /// If the mode is `SCALED`, dequantization is performed by multiplying each
+ /// input value by a scaling_factor. (Thus an input of 0 always maps to 0.0).
+ ///
+ /// The scaling_factor is determined from `min_range`, `max_range`, and
+ /// `narrow_range` in a way that is compatible with `QuantizeAndDequantize{V2|V3}`
+ /// and `QuantizeV2`, using the following algorithm:
+ ///
+ /// ```c++
+ ///
+ /// const int min_expected_T = std::numeric_limits::min() +
+ /// (narrow_range ? 1 : 0);
+ /// const int max_expected_T = std::numeric_limits::max();
+ /// const float max_expected_T = std::numeric_limits::max();
+ ///
+ /// const float scale_factor =
+ /// (std::numeric_limits::min() == 0) ? (max_range / max_expected_T)
+ /// : std::max(min_range / min_expected_T,
+ /// max_range / max_expected_T);
+ /// ```
+ ///
+ ///
+ ///
+ ///
+ ///
+ ///
+ ///
+ ///
+ ///
+ ///
+ /// Type of the output tensor. Currently Dequantize supports float and bfloat16.
+ /// If 'dtype' is 'bfloat16', it only supports 'MIN_COMBINED' mode.
+ ///
+ ///
+ ///
+ public static Tensor dequantize(Tensor input, Tensor min_range, Tensor max_range, string mode = "MIN_COMBINED", bool narrow_range = false, int axis = -1, TF_DataType dtype = TF_DataType.TF_FLOAT, string? name = null)
+ {
+ var _ctx = tf.Context;
+ if (_ctx.executing_eagerly())
+ {
+ try
+ {
+ var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "Dequantize", name) { args = new object[] { input, min_range, max_range }, attrs = new Dictionary() { ["mode"] = mode, ["narrow_range"] = narrow_range, ["axis"] = axis, ["dtype"] = dtype } });
+ return _fast_path_result[0];
+ }
+ catch (Exception)
+ {
+ }
+ try
+ {
+ return dequantize_eager_fallback(input, min_range, max_range, mode: mode, narrow_range: narrow_range, axis: axis, dtype: dtype, name: name, ctx: _ctx);
+ }
+ catch (Exception)
+ {
+ }
+ }
+ if (mode is null)
+ {
+ mode = "MIN_COMBINED";
+ }
+ Dictionary keywords = new();
+ keywords["input"] = input;
+ keywords["min_range"] = min_range;
+ keywords["max_range"] = max_range;
+ keywords["mode"] = mode;
+ keywords["narrow_range"] = narrow_range;
+ keywords["axis"] = axis;
+ keywords["dtype"] = dtype;
+ var _op = tf.OpDefLib._apply_op_helper("Dequantize", name, keywords);
+ var _result = _op.outputs;
+ if (_execute.must_record_gradient())
+ {
+ object[] _attrs = new object[] { "T", _op._get_attr_type("T"), "mode", _op.get_attr("mode"), "narrow_range", _op._get_attr_bool("narrow_range"), "axis", _op._get_attr_int("axis"), "dtype", _op._get_attr_type("dtype") };
+ _execute.record_gradient("Dequantize", _op.inputs, _attrs, _result);
+ }
+ return _result[0];
+ }
+
+ public static Tensor dequantize_eager_fallback(Tensor input, Tensor min_range, Tensor max_range, string mode, bool narrow_range, int axis, TF_DataType dtype, string name, Context ctx)
+ {
+ Tensor[] _inputs_flat = new Tensor[] { input, min_range, max_range };
+ object[] _attrs = new object[] { "T", input.dtype, "mode", mode, "narrow_range", narrow_range, "axis", axis, "dtype", dtype };
+ var _result = _execute.execute("Dequantize", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name);
+ if (_execute.must_record_gradient())
+ {
+ _execute.record_gradient("Dequantize", _inputs_flat, _attrs, _result);
+ }
+ return _result[0];
+ }
+ ///
+ /// Returns a diagonal tensor with a given diagonal values.
+ ///
+ ///
+ ///
+ /// Given a `diagonal`, this operation returns a tensor with the `diagonal` and
+ /// everything else padded with zeros. The diagonal is computed as follows:
+ ///
+ /// Assume `diagonal` has dimensions [D1,..., Dk], then the output is a tensor of
+ /// rank 2k with dimensions [D1,..., Dk, D1,..., Dk] where:
+ ///
+ /// `output[i1,..., ik, i1,..., ik] = diagonal[i1, ..., ik]` and 0 everywhere else.
+ ///
+ /// For example:
+ ///
+ /// ```
+ /// # 'diagonal' is [1, 2, 3, 4]
+ /// tf.diag(diagonal) ==> [[1, 0, 0, 0]
+ /// [0, 2, 0, 0]
+ /// [0, 0, 3, 0]
+ /// [0, 0, 0, 4]]
+ /// ```
+ ///
+ ///
+ ///
+ ///
+ public static Tensor diag(Tensor diagonal, string? name = null)
+ {
+ var _ctx = tf.Context;
+ if (_ctx.executing_eagerly())
+ {
+ try
+ {
+ var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "Diag", name) { args = new object[] { diagonal }, attrs = new Dictionary() { } });
+ return _fast_path_result[0];
+ }
+ catch (Exception)
+ {
+ }
+ try
+ {
+ return diag_eager_fallback(diagonal, name: name, ctx: _ctx);
+ }
+ catch (Exception)
+ {
+ }
+ }
+ Dictionary keywords = new();
+ keywords["diagonal"] = diagonal;
+ var _op = tf.OpDefLib._apply_op_helper("Diag", name, keywords);
+ var _result = _op.outputs;
+ if (_execute.must_record_gradient())
+ {
+ object[] _attrs = new object[] { "T", _op._get_attr_type("T") };
+ _execute.record_gradient("Diag", _op.inputs, _attrs, _result);
+ }
+ return _result[0];
+ }
+
+ public static Tensor diag_eager_fallback(Tensor diagonal, string name, Context ctx)
+ {
+ Tensor[] _inputs_flat = new Tensor[] { diagonal };
+ object[] _attrs = new object[] { "T", diagonal.dtype };
+ var _result = _execute.execute("Diag", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name);
+ if (_execute.must_record_gradient())
+ {
+ _execute.record_gradient("Diag", _inputs_flat, _attrs, _result);
+ }
+ return _result[0];
+ }
+ ///
+ /// Returns the diagonal part of the tensor.
+ ///
+ ///
+ ///
+ /// This operation returns a tensor with the `diagonal` part
+ /// of the `input`. The `diagonal` part is computed as follows:
+ ///
+ /// Assume `input` has dimensions `[D1,..., Dk, D1,..., Dk]`, then the output is a
+ /// tensor of rank `k` with dimensions `[D1,..., Dk]` where:
+ ///
+ /// `diagonal[i1,..., ik] = input[i1, ..., ik, i1,..., ik]`.
+ ///
+ /// For example:
+ ///
+ /// ```
+ /// # 'input' is [[1, 0, 0, 0]
+ /// [0, 2, 0, 0]
+ /// [0, 0, 3, 0]
+ /// [0, 0, 0, 4]]
+ ///
+ /// tf.diag_part(input) ==> [1, 2, 3, 4]
+ /// ```
+ ///
+ ///
+ ///
+ ///
+ public static Tensor diag_part(Tensor input, string? name = null)
+ {
+ var _ctx = tf.Context;
+ if (_ctx.executing_eagerly())
+ {
+ try
+ {
+ var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "DiagPart", name) { args = new object[] { input }, attrs = new Dictionary() { } });
+ return _fast_path_result[0];
+ }
+ catch (Exception)
+ {
+ }
+ try
+ {
+ return diag_part_eager_fallback(input, name: name, ctx: _ctx);
+ }
+ catch (Exception)
+ {
+ }
+ }
+ Dictionary keywords = new();
+ keywords["input"] = input;
+ var _op = tf.OpDefLib._apply_op_helper("DiagPart", name, keywords);
+ var _result = _op.outputs;
+ if (_execute.must_record_gradient())
+ {
+ object[] _attrs = new object[] { "T", _op._get_attr_type("T") };
+ _execute.record_gradient("DiagPart", _op.inputs, _attrs, _result);
+ }
+ return _result[0];
+ }
+
+ public static Tensor diag_part_eager_fallback(Tensor input, string name, Context ctx)
+ {
+ Tensor[] _inputs_flat = new Tensor[] { input };
+ object[] _attrs = new object[] { "T", input.dtype };
+ var _result = _execute.execute("DiagPart", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name);
+ if (_execute.must_record_gradient())
+ {
+ _execute.record_gradient("DiagPart", _inputs_flat, _attrs, _result);
+ }
+ return _result[0];
+ }
+ ///
+ /// Computes the (possibly normalized) Levenshtein Edit Distance.
+ ///
+ ///
+ ///
+ /// The inputs are variable-length sequences provided by SparseTensors
+ /// (hypothesis_indices, hypothesis_values, hypothesis_shape)
+ /// and
+ /// (truth_indices, truth_values, truth_shape).
+ ///
+ /// The inputs are:
+ ///
+ ///
+ ///
+ ///
+ ///
+ ///
+ ///
+ ///
+ ///
+ ///
+ /// boolean (if true, edit distances are normalized by length of truth).
+ ///
+ /// The output is:
+ ///
+ ///
+ ///
+ public static Tensor edit_distance(Tensor hypothesis_indices, Tensor hypothesis_values, Tensor hypothesis_shape, Tensor truth_indices, Tensor truth_values, Tensor truth_shape, bool normalize = true, string? name = null)
+ {
+ var _ctx = tf.Context;
+ if (_ctx.executing_eagerly())
+ {
+ try
+ {
+ var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "EditDistance", name) { args = new object[] { hypothesis_indices, hypothesis_values, hypothesis_shape, truth_indices, truth_values, truth_shape }, attrs = new Dictionary() { ["normalize"] = normalize } });
+ return _fast_path_result[0];
+ }
+ catch (Exception)
+ {
+ }
+ try
+ {
+ return edit_distance_eager_fallback(hypothesis_indices, hypothesis_values, hypothesis_shape, truth_indices, truth_values, truth_shape, normalize: normalize, name: name, ctx: _ctx);
+ }
+ catch (Exception)
+ {
+ }
+ }
+ Dictionary keywords = new();
+ keywords["hypothesis_indices"] = hypothesis_indices;
+ keywords["hypothesis_values"] = hypothesis_values;
+ keywords["hypothesis_shape"] = hypothesis_shape;
+ keywords["truth_indices"] = truth_indices;
+ keywords["truth_values"] = truth_values;
+ keywords["truth_shape"] = truth_shape;
+ keywords["normalize"] = normalize;
+ var _op = tf.OpDefLib._apply_op_helper("EditDistance", name, keywords);
+ var _result = _op.outputs;
+ if (_execute.must_record_gradient())
+ {
+ object[] _attrs = new object[] { "normalize", _op._get_attr_bool("normalize"), "T", _op._get_attr_type("T") };
+ _execute.record_gradient("EditDistance", _op.inputs, _attrs, _result);
+ }
+ return _result[0];
+ }
+
+ public static Tensor edit_distance_eager_fallback(Tensor hypothesis_indices, Tensor hypothesis_values, Tensor hypothesis_shape, Tensor truth_indices, Tensor truth_values, Tensor truth_shape, bool normalize, string name, Context ctx)
+ {
+ Tensor[] _inputs_flat = new Tensor[] { hypothesis_indices, hypothesis_values, hypothesis_shape, truth_indices, truth_values, truth_shape };
+ object[] _attrs = new object[] { "normalize", normalize, "T", hypothesis_values.dtype };
+ var _result = _execute.execute("EditDistance", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name);
+ if (_execute.must_record_gradient())
+ {
+ _execute.record_gradient("EditDistance", _inputs_flat, _attrs, _result);
+ }
+ return _result[0];
+ }
+ ///
+ ///
+ ///
+ ///
+ ///
+ ///
+ ///
+ public static Tensor empty(Tensor shape, TF_DataType dtype, bool init = false, string? name = null)
+ {
+ var _ctx = tf.Context;
+ if (_ctx.executing_eagerly())
+ {
+ try
+ {
+ var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "Empty", name) { args = new object[] { shape }, attrs = new Dictionary() { ["dtype"] = dtype, ["init"] = init } });
+ return _fast_path_result[0];
+ }
+ catch (Exception)
+ {
+ }
+ try
+ {
+ return empty_eager_fallback(shape, dtype: dtype, init: init, name: name, ctx: _ctx);
+ }
+ catch (Exception)
+ {
+ }
+ }
+ Dictionary keywords = new();
+ keywords["shape"] = shape;
+ keywords["dtype"] = dtype;
+ keywords["init"] = init;
+ var _op = tf.OpDefLib._apply_op_helper("Empty", name, keywords);
+ var _result = _op.outputs;
+ if (_execute.must_record_gradient())
+ {
+ object[] _attrs = new object[] { "dtype", _op._get_attr_type("dtype"), "init", _op._get_attr_bool("init") };
+ _execute.record_gradient("Empty", _op.inputs, _attrs, _result);
+ }
+ return _result[0];
+ }
+
+ public static Tensor empty_eager_fallback(Tensor shape, TF_DataType dtype, bool init, string name, Context ctx)
+ {
+ Tensor[] _inputs_flat = new Tensor[] { shape };
+ object[] _attrs = new object[] { "dtype", dtype, "init", init };
+ var _result = _execute.execute("Empty", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name);
+ if (_execute.must_record_gradient())
+ {
+ _execute.record_gradient("Empty", _inputs_flat, _attrs, _result);
+ }
+ return _result[0];
+ }
+ ///
+ /// Ensures that the tensor's shape matches the expected shape.
+ ///
+ ///
+ ///
+ /// Raises an error if the input tensor's shape does not match the specified shape.
+ /// Returns the input tensor otherwise.
+ ///
+ ///
+ ///
+ ///
+ ///
+ /// The expected (possibly partially specified) shape of the input tensor.
+ ///
+ ///
+ ///
+ public static Tensor ensure_shape(Tensor input, Shape shape, string? name = null)
+ {
+ var _ctx = tf.Context;
+ if (_ctx.executing_eagerly())
+ {
+ try
+ {
+ var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "EnsureShape", name) { args = new object[] { input }, attrs = new Dictionary() { ["shape"] = shape } });
+ return _fast_path_result[0];
+ }
+ catch (Exception)
+ {
+ }
+ try
+ {
+ return ensure_shape_eager_fallback(input, shape: shape, name: name, ctx: _ctx);
+ }
+ catch (Exception)
+ {
+ }
+ }
+ Dictionary keywords = new();
+ keywords["input"] = input;
+ keywords["shape"] = shape;
+ var _op = tf.OpDefLib._apply_op_helper("EnsureShape", name, keywords);
+ var _result = _op.outputs;
+ if (_execute.must_record_gradient())
+ {
+ object[] _attrs = new object[] { "shape", _op.get_attr("shape"), "T", _op._get_attr_type("T") };
+ _execute.record_gradient("EnsureShape", _op.inputs, _attrs, _result);
+ }
+ return _result[0];
+ }
+
+ public static Tensor ensure_shape_eager_fallback(Tensor input, Shape shape, string name, Context ctx)
+ {
+ Tensor[] _inputs_flat = new Tensor[] { input };
+ object[] _attrs = new object[] { "shape", shape, "T", input.dtype };
+ var _result = _execute.execute("EnsureShape", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name);
+ if (_execute.must_record_gradient())
+ {
+ _execute.record_gradient("EnsureShape", _inputs_flat, _attrs, _result);
+ }
+ return _result[0];
+ }
+ ///
+ /// Inserts a dimension of 1 into a tensor's shape.
+ ///
+ ///
+ ///
+ /// Given a tensor `input`, this operation inserts a dimension of 1 at the
+ /// dimension index `dim` of `input`'s shape. The dimension index `dim` starts at
+ /// zero; if you specify a negative number for `dim` it is counted backward from
+ /// the end.
+ ///
+ /// This operation is useful if you want to add a batch dimension to a single
+ /// element. For example, if you have a single image of shape `[height, width,
+ /// channels]`, you can make it a batch of 1 image with `expand_dims(image, 0)`,
+ /// which will make the shape `[1, height, width, channels]`.
+ ///
+ /// Other examples:
+ ///
+ /// ```
+ /// # 't' is a tensor of shape [2]
+ /// shape(expand_dims(t, 0)) ==> [1, 2]
+ /// shape(expand_dims(t, 1)) ==> [2, 1]
+ /// shape(expand_dims(t, -1)) ==> [2, 1]
+ ///
+ /// # 't2' is a tensor of shape [2, 3, 5]
+ /// shape(expand_dims(t2, 0)) ==> [1, 2, 3, 5]
+ /// shape(expand_dims(t2, 2)) ==> [2, 3, 1, 5]
+ /// shape(expand_dims(t2, 3)) ==> [2, 3, 5, 1]
+ /// ```
+ ///
+ /// This operation requires that:
+ ///
+ /// `-1-input.dims() <= dim <= input.dims()`
+ ///
+ /// This operation is related to `squeeze()`, which removes dimensions of
+ /// size 1.
+ ///
+ ///
+ ///
+ ///
+ ///
+ public static Tensor expand_dims(Tensor input, Tensor dim, string? name = null)
+ {
+ var _ctx = tf.Context;
+ if (_ctx.executing_eagerly())
+ {
+ try
+ {
+ var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "ExpandDims", name) { args = new object[] { input, dim }, attrs = new Dictionary() { } });
+ return _fast_path_result[0];
+ }
+ catch (Exception)
+ {
+ }
+ try
+ {
+ return expand_dims_eager_fallback(input, dim, name: name, ctx: _ctx);
+ }
+ catch (Exception)
+ {
+ }
+ }
+ Dictionary keywords = new();
+ keywords["input"] = input;
+ keywords["dim"] = dim;
+ var _op = tf.OpDefLib._apply_op_helper("ExpandDims", name, keywords);
+ var _result = _op.outputs;
+ if (_execute.must_record_gradient())
+ {
+ object[] _attrs = new object[] { "T", _op._get_attr_type("T"), "Tdim", _op._get_attr_type("Tdim") };
+ _execute.record_gradient("ExpandDims", _op.inputs, _attrs, _result);
+ }
+ return _result[0];
+ }
+
+ public static Tensor expand_dims_eager_fallback(Tensor input, Tensor dim, string name, Context ctx)
+ {
+ Tensor[] _inputs_flat = new Tensor[] { input, dim };
+ object[] _attrs = new object[] { "T", input.dtype, "Tdim", dim.dtype };
+ var _result = _execute.execute("ExpandDims", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name);
+ if (_execute.must_record_gradient())
+ {
+ _execute.record_gradient("ExpandDims", _inputs_flat, _attrs, _result);
+ }
+ return _result[0];
+ }
+ ///
+ /// Extract `patches` from `images` and put them in the "depth" output dimension.
+ ///
+ ///
+ ///
+ ///
+ /// The size of the sliding window for each dimension of `images`.
+ ///
+ ///
+ ///
+ ///
+ /// How far the centers of two consecutive patches are in
+ /// the images. Must be: `[1, stride_rows, stride_cols, 1]`.
+ ///
+ ///
+ ///
+ ///
+ /// Must be: `[1, rate_rows, rate_cols, 1]`. This is the
+ /// input stride, specifying how far two consecutive patch samples are in the
+ /// input. Equivalent to extracting patches with
+ /// `patch_sizes_eff = patch_sizes + (patch_sizes - 1) * (rates - 1)`, followed by
+ /// subsampling them spatially by a factor of `rates`. This is equivalent to
+ /// `rate` in dilated (a.k.a. Atrous) convolutions.
+ ///
+ ///
+ ///
+ ///
+ /// The type of padding algorithm to use.
+ ///
+ ///
+ ///
+ public static Tensor extract_image_patches(Tensor images, int[] ksizes, int[] strides, int[] rates, string padding, string? name = null)
+ {
+ var _ctx = tf.Context;
+ if (_ctx.executing_eagerly())
+ {
+ try
+ {
+ var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "ExtractImagePatches", name) { args = new object[] { images }, attrs = new Dictionary() { ["ksizes"] = ksizes, ["strides"] = strides, ["rates"] = rates, ["padding"] = padding } });
+ return _fast_path_result[0];
+ }
+ catch (Exception)
+ {
+ }
+ try
+ {
+ return extract_image_patches_eager_fallback(images, ksizes: ksizes, strides: strides, rates: rates, padding: padding, name: name, ctx: _ctx);
+ }
+ catch (Exception)
+ {
+ }
+ }
+ Dictionary keywords = new();
+ keywords["images"] = images;
+ keywords["ksizes"] = ksizes;
+ keywords["strides"] = strides;
+ keywords["rates"] = rates;
+ keywords["padding"] = padding;
+ var _op = tf.OpDefLib._apply_op_helper("ExtractImagePatches", name, keywords);
+ var _result = _op.outputs;
+ if (_execute.must_record_gradient())
+ {
+ object[] _attrs = new object[] { "ksizes", _op.get_attr("ksizes"), "strides", _op.get_attr("strides"), "rates", _op.get_attr("rates"), "T", _op._get_attr_type("T"), "padding", _op.get_attr("padding") };
+ _execute.record_gradient("ExtractImagePatches", _op.inputs, _attrs, _result);
+ }
+ return _result[0];
+ }
+
+ public static Tensor extract_image_patches_eager_fallback(Tensor images, int[] ksizes, int[] strides, int[] rates, string padding, string name, Context ctx)
+ {
+ Tensor[] _inputs_flat = new Tensor[] { images };
+ object[] _attrs = new object[] { "ksizes", ksizes, "strides", strides, "rates", rates, "T", images.dtype, "padding", padding };
+ var _result = _execute.execute("ExtractImagePatches", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name);
+ if (_execute.must_record_gradient())
+ {
+ _execute.record_gradient("ExtractImagePatches", _inputs_flat, _attrs, _result);
+ }
+ return _result[0];
+ }
+ ///
+ /// Extract `patches` from `input` and put them in the `"depth"` output dimension. 3D extension of `extract_image_patches`.
+ ///
+ ///
+ ///
+ ///
+ /// The size of the sliding window for each dimension of `input`.
+ ///
+ ///
+ ///
+ ///
+ /// 1-D of length 5. How far the centers of two consecutive patches are in
+ /// `input`. Must be: `[1, stride_planes, stride_rows, stride_cols, 1]`.
+ ///
+ ///
+ ///
+ ///
+ /// The type of padding algorithm to use.
+ ///
+ /// The size-related attributes are specified as follows:
+ ///
+ /// ```python
+ /// ksizes = [1, ksize_planes, ksize_rows, ksize_cols, 1]
+ /// strides = [1, stride_planes, strides_rows, strides_cols, 1]
+ /// ```
+ ///
+ ///
+ ///
+ public static Tensor extract_volume_patches(Tensor input, int[] ksizes, int[] strides, string padding, string? name = null)
+ {
+ var _ctx = tf.Context;
+ if (_ctx.executing_eagerly())
+ {
+ try
+ {
+ var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "ExtractVolumePatches", name) { args = new object[] { input }, attrs = new Dictionary() { ["ksizes"] = ksizes, ["strides"] = strides, ["padding"] = padding } });
+ return _fast_path_result[0];
+ }
+ catch (Exception)
+ {
+ }
+ try
+ {
+ return extract_volume_patches_eager_fallback(input, ksizes: ksizes, strides: strides, padding: padding, name: name, ctx: _ctx);
+ }
+ catch (Exception)
+ {
+ }
+ }
+ Dictionary keywords = new();
+ keywords["input"] = input;
+ keywords["ksizes"] = ksizes;
+ keywords["strides"] = strides;
+ keywords["padding"] = padding;
+ var _op = tf.OpDefLib._apply_op_helper("ExtractVolumePatches", name, keywords);
+ var _result = _op.outputs;
+ if (_execute.must_record_gradient())
+ {
+ object[] _attrs = new object[] { "ksizes", _op.get_attr("ksizes"), "strides", _op.get_attr("strides"), "T", _op._get_attr_type("T"), "padding", _op.get_attr("padding") };
+ _execute.record_gradient("ExtractVolumePatches", _op.inputs, _attrs, _result);
+ }
+ return _result[0];
+ }
+
+ public static Tensor extract_volume_patches_eager_fallback(Tensor input, int[] ksizes, int[] strides, string padding, string name, Context ctx)
+ {
+ Tensor[] _inputs_flat = new Tensor[] { input };
+ object[] _attrs = new object[] { "ksizes", ksizes, "strides", strides, "T", input.dtype, "padding", padding };
+ var _result = _execute.execute("ExtractVolumePatches", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name);
+ if (_execute.must_record_gradient())
+ {
+ _execute.record_gradient("ExtractVolumePatches", _inputs_flat, _attrs, _result);
+ }
+ return _result[0];
+ }
+ ///
+ /// Fake-quantize the 'inputs' tensor, type float to 'outputs' tensor of same type.
+ ///
+ ///
+ ///
+ /// Attributes
+ ///
+ /// * `[min; max]` define the clamping range for the `inputs` data.
+ /// * `inputs` values are quantized into the quantization range (
+ /// `[0; 2^num_bits - 1]` when `narrow_range` is false and `[1; 2^num_bits - 1]`
+ /// when it is true) and then de-quantized and output as floats in `[min; max]`
+ /// interval.
+ /// * `num_bits` is the bitwidth of the quantization; between 2 and 16, inclusive.
+ ///
+ /// Before quantization, `min` and `max` values are adjusted with the following
+ /// logic.
+ /// It is suggested to have `min <= 0 <= max`. If `0` is not in the range of values,
+ /// the behavior can be unexpected:
+ ///
+ /// * If `0 < min < max`: `min_adj = 0` and `max_adj = max - min`.
+ /// * If `min < max < 0`: `min_adj = min - max` and `max_adj = 0`.
+ /// * If `min <= 0 <= max`: `scale = (max - min) / (2^num_bits - 1) `,
+ /// `min_adj = scale * round(min / scale)` and `max_adj = max + min_adj - min`.
+ ///
+ /// Quantization is called fake since the output is still in floating point.
+ ///
+ ///
+ ///
+ ///
+ ///
+ ///
+ ///
+ ///
+ public static Tensor fake_quant_with_min_max_args(Tensor inputs, float min = -6f, float max = 6f, int num_bits = 8, bool narrow_range = false, string? name = null)
+ {
+ var _ctx = tf.Context;
+ if (_ctx.executing_eagerly())
+ {
+ try
+ {
+ var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "FakeQuantWithMinMaxArgs", name) { args = new object[] { inputs }, attrs = new Dictionary() { ["min"] = min, ["max"] = max, ["num_bits"] = num_bits, ["narrow_range"] = narrow_range } });
+ return _fast_path_result[0];
+ }
+ catch (Exception)
+ {
+ }
+ try
+ {
+ return fake_quant_with_min_max_args_eager_fallback(inputs, min: min, max: max, num_bits: num_bits, narrow_range: narrow_range, name: name, ctx: _ctx);
+ }
+ catch (Exception)
+ {
+ }
+ }
+ Dictionary keywords = new();
+ keywords["inputs"] = inputs;
+ keywords["min"] = min;
+ keywords["max"] = max;
+ keywords["num_bits"] = num_bits;
+ keywords["narrow_range"] = narrow_range;
+ var _op = tf.OpDefLib._apply_op_helper("FakeQuantWithMinMaxArgs", name, keywords);
+ var _result = _op.outputs;
+ if (_execute.must_record_gradient())
+ {
+ object[] _attrs = new object[] { "min", _op.get_attr("min"), "max", _op.get_attr("max"), "num_bits", _op._get_attr_int("num_bits"), "narrow_range", _op._get_attr_bool("narrow_range") };
+ _execute.record_gradient("FakeQuantWithMinMaxArgs", _op.inputs, _attrs, _result);
+ }
+ return _result[0];
+ }
+
+ public static Tensor fake_quant_with_min_max_args_eager_fallback(Tensor inputs, float min, float max, int num_bits, bool narrow_range, string name, Context ctx)
+ {
+ Tensor[] _inputs_flat = new Tensor[] { inputs };
+ object[] _attrs = new object[] { "min", min, "max", max, "num_bits", num_bits, "narrow_range", narrow_range };
+ var _result = _execute.execute("FakeQuantWithMinMaxArgs", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name);
+ if (_execute.must_record_gradient())
+ {
+ _execute.record_gradient("FakeQuantWithMinMaxArgs", _inputs_flat, _attrs, _result);
+ }
+ return _result[0];
+ }
+ ///
+ /// Compute gradients for a FakeQuantWithMinMaxArgs operation.
+ ///
+ ///
+ ///
+ ///
+ ///
+ ///
+ ///
+ ///
+ public static Tensor fake_quant_with_min_max_args_gradient(Tensor gradients, Tensor inputs, float min = -6f, float max = 6f, int num_bits = 8, bool narrow_range = false, string? name = null)
+ {
+ var _ctx = tf.Context;
+ if (_ctx.executing_eagerly())
+ {
+ try
+ {
+ var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "FakeQuantWithMinMaxArgsGradient", name) { args = new object[] { gradients, inputs }, attrs = new Dictionary() { ["min"] = min, ["max"] = max, ["num_bits"] = num_bits, ["narrow_range"] = narrow_range } });
+ return _fast_path_result[0];
+ }
+ catch (Exception)
+ {
+ }
+ try
+ {
+ return fake_quant_with_min_max_args_gradient_eager_fallback(gradients, inputs, min: min, max: max, num_bits: num_bits, narrow_range: narrow_range, name: name, ctx: _ctx);
+ }
+ catch (Exception)
+ {
+ }
+ }
+ Dictionary keywords = new();
+ keywords["gradients"] = gradients;
+ keywords["inputs"] = inputs;
+ keywords["min"] = min;
+ keywords["max"] = max;
+ keywords["num_bits"] = num_bits;
+ keywords["narrow_range"] = narrow_range;
+ var _op = tf.OpDefLib._apply_op_helper("FakeQuantWithMinMaxArgsGradient", name, keywords);
+ var _result = _op.outputs;
+ if (_execute.must_record_gradient())
+ {
+ object[] _attrs = new object[] { "min", _op.get_attr("min"), "max", _op.get_attr("max"), "num_bits", _op._get_attr_int("num_bits"), "narrow_range", _op._get_attr_bool("narrow_range") };
+ _execute.record_gradient("FakeQuantWithMinMaxArgsGradient", _op.inputs, _attrs, _result);
+ }
+ return _result[0];
+ }
+
+ public static Tensor fake_quant_with_min_max_args_gradient_eager_fallback(Tensor gradients, Tensor inputs, float min, float max, int num_bits, bool narrow_range, string name, Context ctx)
+ {
+ Tensor[] _inputs_flat = new Tensor[] { gradients, inputs };
+ object[] _attrs = new object[] { "min", min, "max", max, "num_bits", num_bits, "narrow_range", narrow_range };
+ var _result = _execute.execute("FakeQuantWithMinMaxArgsGradient", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name);
+ if (_execute.must_record_gradient())
+ {
+ _execute.record_gradient("FakeQuantWithMinMaxArgsGradient", _inputs_flat, _attrs, _result);
+ }
+ return _result[0];
+ }
+ ///
+ /// Fake-quantize the 'inputs' tensor of type float via global float scalars
+ ///
+ ///
+ ///
+ /// Fake-quantize the `inputs` tensor of type float via global float scalars
+ /// `min` and `max` to `outputs` tensor of same shape as `inputs`.
+ ///
+ /// Attributes
+ ///
+ /// * `[min; max]` define the clamping range for the `inputs` data.
+ /// * `inputs` values are quantized into the quantization range (
+ /// `[0; 2^num_bits - 1]` when `narrow_range` is false and `[1; 2^num_bits - 1]`
+ /// when it is true) and then de-quantized and output as floats in `[min; max]`
+ /// interval.
+ /// * `num_bits` is the bitwidth of the quantization; between 2 and 16, inclusive.
+ ///
+ /// Before quantization, `min` and `max` values are adjusted with the following
+ /// logic.
+ /// It is suggested to have `min <= 0 <= max`. If `0` is not in the range of values,
+ /// the behavior can be unexpected:
+ ///
+ /// * If `0 < min < max`: `min_adj = 0` and `max_adj = max - min`.
+ /// * If `min < max < 0`: `min_adj = min - max` and `max_adj = 0`.
+ /// * If `min <= 0 <= max`: `scale = (max - min) / (2^num_bits - 1) `,
+ /// `min_adj = scale * round(min / scale)` and `max_adj = max + min_adj - min`.
+ ///
+ /// This operation has a gradient and thus allows for training `min` and `max`
+ /// values.
+ ///
+ ///
+ ///
+ ///
+ ///
+ ///
+ ///
+ ///
+ public static Tensor fake_quant_with_min_max_vars(Tensor inputs, Tensor min, Tensor max, int num_bits = 8, bool narrow_range = false, string? name = null)
+ {
+ var _ctx = tf.Context;
+ if (_ctx.executing_eagerly())
+ {
+ try
+ {
+ var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "FakeQuantWithMinMaxVars", name) { args = new object[] { inputs, min, max }, attrs = new Dictionary() { ["num_bits"] = num_bits, ["narrow_range"] = narrow_range } });
+ return _fast_path_result[0];
+ }
+ catch (Exception)
+ {
+ }
+ try
+ {
+ return fake_quant_with_min_max_vars_eager_fallback(inputs, min, max, num_bits: num_bits, narrow_range: narrow_range, name: name, ctx: _ctx);
+ }
+ catch (Exception)
+ {
+ }
+ }
+ Dictionary keywords = new();
+ keywords["inputs"] = inputs;
+ keywords["min"] = min;
+ keywords["max"] = max;
+ keywords["num_bits"] = num_bits;
+ keywords["narrow_range"] = narrow_range;
+ var _op = tf.OpDefLib._apply_op_helper("FakeQuantWithMinMaxVars", name, keywords);
+ var _result = _op.outputs;
+ if (_execute.must_record_gradient())
+ {
+ object[] _attrs = new object[] { "num_bits", _op._get_attr_int("num_bits"), "narrow_range", _op._get_attr_bool("narrow_range") };
+ _execute.record_gradient("FakeQuantWithMinMaxVars", _op.inputs, _attrs, _result);
+ }
+ return _result[0];
+ }
+
+ public static Tensor fake_quant_with_min_max_vars_eager_fallback(Tensor inputs, Tensor min, Tensor max, int num_bits, bool narrow_range, string name, Context ctx)
+ {
+ Tensor[] _inputs_flat = new Tensor[] { inputs, min, max };
+ object[] _attrs = new object[] { "num_bits", num_bits, "narrow_range", narrow_range };
+ var _result = _execute.execute("FakeQuantWithMinMaxVars", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name);
+ if (_execute.must_record_gradient())
+ {
+ _execute.record_gradient("FakeQuantWithMinMaxVars", _inputs_flat, _attrs, _result);
+ }
+ return _result[0];
+ }
+ ///
+ /// Compute gradients for a FakeQuantWithMinMaxVars operation.
+ ///
+ ///
+ ///
+ ///
+ ///
+ ///
+ ///
+ /// The bitwidth of the quantization; between 2 and 8, inclusive.
+ ///
+ ///
+ ///
+ ///
+ /// Whether to quantize into 2^num_bits - 1 distinct values.
+ ///
+ ///
+ ///
+ public static Tensor[] fake_quant_with_min_max_vars_gradient(Tensor gradients, Tensor inputs, Tensor min, Tensor max, int num_bits = 8, bool narrow_range = false, string? name = null)
+ {
+ var _ctx = tf.Context;
+ if (_ctx.executing_eagerly())
+ {
+ try
+ {
+ var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "FakeQuantWithMinMaxVarsGradient", name) { args = new object[] { gradients, inputs, min, max }, attrs = new Dictionary() { ["num_bits"] = num_bits, ["narrow_range"] = narrow_range } });
+ return _fast_path_result;
+ }
+ catch (Exception)
+ {
+ }
+ try
+ {
+ return fake_quant_with_min_max_vars_gradient_eager_fallback(gradients, inputs, min, max, num_bits: num_bits, narrow_range: narrow_range, name: name, ctx: _ctx);
+ }
+ catch (Exception)
+ {
+ }
+ }
+ Dictionary keywords = new();
+ keywords["gradients"] = gradients;
+ keywords["inputs"] = inputs;
+ keywords["min"] = min;
+ keywords["max"] = max;
+ keywords["num_bits"] = num_bits;
+ keywords["narrow_range"] = narrow_range;
+ var _op = tf.OpDefLib._apply_op_helper("FakeQuantWithMinMaxVarsGradient", name, keywords);
+ var _result = _op.outputs;
+ if (_execute.must_record_gradient())
+ {
+ object[] _attrs = new object[] { "num_bits", _op._get_attr_int("num_bits"), "narrow_range", _op._get_attr_bool("narrow_range") };
+ _execute.record_gradient("FakeQuantWithMinMaxVarsGradient", _op.inputs, _attrs, _result);
+ }
+ return _result;
+ }
+
+ public static Tensor[] fake_quant_with_min_max_vars_gradient_eager_fallback(Tensor gradients, Tensor inputs, Tensor min, Tensor max, int num_bits, bool narrow_range, string name, Context ctx)
+ {
+ Tensor[] _inputs_flat = new Tensor[] { gradients, inputs, min, max };
+ object[] _attrs = new object[] { "num_bits", num_bits, "narrow_range", narrow_range };
+ var _result = _execute.execute("FakeQuantWithMinMaxVarsGradient", 3, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name);
+ if (_execute.must_record_gradient())
+ {
+ _execute.record_gradient("FakeQuantWithMinMaxVarsGradient", _inputs_flat, _attrs, _result);
+ }
+ return _result;
+ }
+ ///
+ /// Fake-quantize the 'inputs' tensor of type float via per-channel floats
+ ///
+ ///
+ ///
+ /// Fake-quantize the `inputs` tensor of type float per-channel and one of the
+ /// shapes: `[d]`, `[b, d]` `[b, h, w, d]` via per-channel floats `min` and `max`
+ /// of shape `[d]` to `outputs` tensor of same shape as `inputs`.
+ ///
+ /// Attributes
+ ///
+ /// * `[min; max]` define the clamping range for the `inputs` data.
+ /// * `inputs` values are quantized into the quantization range (
+ /// `[0; 2^num_bits - 1]` when `narrow_range` is false and `[1; 2^num_bits - 1]`
+ /// when it is true) and then de-quantized and output as floats in `[min; max]`
+ /// interval.
+ /// * `num_bits` is the bitwidth of the quantization; between 2 and 16, inclusive.
+ ///
+ /// Before quantization, `min` and `max` values are adjusted with the following
+ /// logic.
+ /// It is suggested to have `min <= 0 <= max`. If `0` is not in the range of values,
+ /// the behavior can be unexpected:
+ ///
+ /// * If `0 < min < max`: `min_adj = 0` and `max_adj = max - min`.
+ /// * If `min < max < 0`: `min_adj = min - max` and `max_adj = 0`.
+ /// * If `min <= 0 <= max`: `scale = (max - min) / (2^num_bits - 1) `,
+ /// `min_adj = scale * round(min / scale)` and `max_adj = max + min_adj - min`.
+ ///
+ /// This operation has a gradient and thus allows for training `min` and `max`
+ /// values.
+ ///
+ ///
+ ///
+ ///
+ ///
+ ///
+ ///
+ ///
+ public static Tensor fake_quant_with_min_max_vars_per_channel(Tensor inputs, Tensor min, Tensor max, int num_bits = 8, bool narrow_range = false, string? name = null)
+ {
+ var _ctx = tf.Context;
+ if (_ctx.executing_eagerly())
+ {
+ try
+ {
+ var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "FakeQuantWithMinMaxVarsPerChannel", name) { args = new object[] { inputs, min, max }, attrs = new Dictionary() { ["num_bits"] = num_bits, ["narrow_range"] = narrow_range } });
+ return _fast_path_result[0];
+ }
+ catch (Exception)
+ {
+ }
+ try
+ {
+ return fake_quant_with_min_max_vars_per_channel_eager_fallback(inputs, min, max, num_bits: num_bits, narrow_range: narrow_range, name: name, ctx: _ctx);
+ }
+ catch (Exception)
+ {
+ }
+ }
+ Dictionary keywords = new();
+ keywords["inputs"] = inputs;
+ keywords["min"] = min;
+ keywords["max"] = max;
+ keywords["num_bits"] = num_bits;
+ keywords["narrow_range"] = narrow_range;
+ var _op = tf.OpDefLib._apply_op_helper("FakeQuantWithMinMaxVarsPerChannel", name, keywords);
+ var _result = _op.outputs;
+ if (_execute.must_record_gradient())
+ {
+ object[] _attrs = new object[] { "num_bits", _op._get_attr_int("num_bits"), "narrow_range", _op._get_attr_bool("narrow_range") };
+ _execute.record_gradient("FakeQuantWithMinMaxVarsPerChannel", _op.inputs, _attrs, _result);
+ }
+ return _result[0];
+ }
+
+ public static Tensor fake_quant_with_min_max_vars_per_channel_eager_fallback(Tensor inputs, Tensor min, Tensor max, int num_bits, bool narrow_range, string name, Context ctx)
+ {
+ Tensor[] _inputs_flat = new Tensor[] { inputs, min, max };
+ object[] _attrs = new object[] { "num_bits", num_bits, "narrow_range", narrow_range };
+ var _result = _execute.execute("FakeQuantWithMinMaxVarsPerChannel", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name);
+ if (_execute.must_record_gradient())
+ {
+ _execute.record_gradient("FakeQuantWithMinMaxVarsPerChannel", _inputs_flat, _attrs, _result);
+ }
+ return _result[0];
+ }
+ ///
+ /// Compute gradients for a FakeQuantWithMinMaxVarsPerChannel operation.
+ ///
+ ///
+ ///
+ ///
+ ///
+ ///
+ ///
+ /// The bitwidth of the quantization; between 2 and 16, inclusive.
+ ///
+ ///
+ ///
+ ///
+ /// Whether to quantize into 2^num_bits - 1 distinct values.
+ ///
+ ///
+ ///
+ public static Tensor[] fake_quant_with_min_max_vars_per_channel_gradient(Tensor gradients, Tensor inputs, Tensor min, Tensor max, int num_bits = 8, bool narrow_range = false, string? name = null)
+ {
+ var _ctx = tf.Context;
+ if (_ctx.executing_eagerly())
+ {
+ try
+ {
+ var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "FakeQuantWithMinMaxVarsPerChannelGradient", name) { args = new object[] { gradients, inputs, min, max }, attrs = new Dictionary() { ["num_bits"] = num_bits, ["narrow_range"] = narrow_range } });
+ return _fast_path_result;
+ }
+ catch (Exception)
+ {
+ }
+ try
+ {
+ return fake_quant_with_min_max_vars_per_channel_gradient_eager_fallback(gradients, inputs, min, max, num_bits: num_bits, narrow_range: narrow_range, name: name, ctx: _ctx);
+ }
+ catch (Exception)
+ {
+ }
+ }
+ Dictionary keywords = new();
+ keywords["gradients"] = gradients;
+ keywords["inputs"] = inputs;
+ keywords["min"] = min;
+ keywords["max"] = max;
+ keywords["num_bits"] = num_bits;
+ keywords["narrow_range"] = narrow_range;
+ var _op = tf.OpDefLib._apply_op_helper("FakeQuantWithMinMaxVarsPerChannelGradient", name, keywords);
+ var _result = _op.outputs;
+ if (_execute.must_record_gradient())
+ {
+ object[] _attrs = new object[] { "num_bits", _op._get_attr_int("num_bits"), "narrow_range", _op._get_attr_bool("narrow_range") };
+ _execute.record_gradient("FakeQuantWithMinMaxVarsPerChannelGradient", _op.inputs, _attrs, _result);
+ }
+ return _result;
+ }
+
+ public static Tensor[] fake_quant_with_min_max_vars_per_channel_gradient_eager_fallback(Tensor gradients, Tensor inputs, Tensor min, Tensor max, int num_bits, bool narrow_range, string name, Context ctx)
+ {
+ Tensor[] _inputs_flat = new Tensor[] { gradients, inputs, min, max };
+ object[] _attrs = new object[] { "num_bits", num_bits, "narrow_range", narrow_range };
+ var _result = _execute.execute("FakeQuantWithMinMaxVarsPerChannelGradient", 3, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name);
+ if (_execute.must_record_gradient())
+ {
+ _execute.record_gradient("FakeQuantWithMinMaxVarsPerChannelGradient", _inputs_flat, _attrs, _result);
+ }
+ return _result;
+ }
+ ///
+ /// Creates a tensor filled with a scalar value.
+ ///
+ ///
+ ///
+ /// This operation creates a tensor of shape `dims` and fills it with `value`.
+ ///
+ /// For example:
+ ///
+ /// ```
+ /// # Output tensor has shape [2, 3].
+ /// fill([2, 3], 9) ==> [[9, 9, 9]
+ /// [9, 9, 9]]
+ /// ```
+ ///
+ /// `tf.fill` differs from `tf.constant` in a few ways:
+ ///
+ /// * `tf.fill` only supports scalar contents, whereas `tf.constant` supports
+ /// Tensor values.
+ /// * `tf.fill` creates an Op in the computation graph that constructs the actual
+ /// Tensor value at runtime. This is in contrast to `tf.constant` which embeds
+ /// the entire Tensor into the graph with a `Const` node.
+ /// * Because `tf.fill` evaluates at graph runtime, it supports dynamic shapes
+ /// based on other runtime Tensors, unlike `tf.constant`.
+ ///
+ ///
+ ///
+ ///
+ ///
+ public static Tensor fill(Tensor dims, Tensor value, string? name = null)
+ {
+ var _ctx = tf.Context;
+ if (_ctx.executing_eagerly())
+ {
+ try
+ {
+ var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "Fill", name) { args = new object[] { dims, value }, attrs = new Dictionary() { } });
+ return _fast_path_result[0];
+ }
+ catch (Exception)
+ {
+ }
+ try
+ {
+ return fill_eager_fallback(dims, value, name: name, ctx: _ctx);
+ }
+ catch (Exception)
+ {
+ }
+ }
+ Dictionary keywords = new();
+ keywords["dims"] = dims;
+ keywords["value"] = value;
+ var _op = tf.OpDefLib._apply_op_helper("Fill", name, keywords);
+ var _result = _op.outputs;
+ if (_execute.must_record_gradient())
+ {
+ object[] _attrs = new object[] { "T", _op._get_attr_type("T"), "index_type", _op._get_attr_type("index_type") };
+ _execute.record_gradient("Fill", _op.inputs, _attrs, _result);
+ }
+ return _result[0];
+ }
+
+ public static Tensor fill_eager_fallback(Tensor dims, Tensor value, string name, Context ctx)
+ {
+ Tensor[] _inputs_flat = new Tensor[] { dims, value };
+ object[] _attrs = new object[] { "T", value.dtype, "index_type", dims.dtype };
+ var _result = _execute.execute("Fill", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name);
+ if (_execute.must_record_gradient())
+ {
+ _execute.record_gradient("Fill", _inputs_flat, _attrs, _result);
+ }
+ return _result[0];
+ }
+ ///
+ /// Generates fingerprint values.
+ ///
+ ///
+ ///
+ /// Generates fingerprint values of `data`.
+ ///
+ /// Fingerprint op considers the first dimension of `data` as the batch dimension,
+ /// and `output[i]` contains the fingerprint value generated from contents in
+ /// `data[i, ...]` for all `i`.
+ ///
+ /// Fingerprint op writes fingerprint values as byte arrays. For example, the
+ /// default method `farmhash64` generates a 64-bit fingerprint value at a time.
+ /// This 8-byte value is written out as an `uint8` array of size 8, in little-endian
+ /// order.
+ ///
+ /// For example, suppose that `data` has data type `DT_INT32` and shape (2, 3, 4),
+ /// and that the fingerprint method is `farmhash64`. In this case, the output shape
+ /// is (2, 8), where 2 is the batch dimension size of `data`, and 8 is the size of
+ /// each fingerprint value in bytes. `output[0, :]` is generated from 12 integers in
+ /// `data[0, :, :]` and similarly `output[1, :]` is generated from other 12 integers
+ /// in `data[1, :, :]`.
+ ///
+ /// Note that this op fingerprints the raw underlying buffer, and it does not
+ /// fingerprint Tensor's metadata such as data type and/or shape. For example, the
+ /// fingerprint values are invariant under reshapes and bitcasts as long as the
+ /// batch dimension remain the same:
+ ///
+ /// ```
+ /// Fingerprint(data) == Fingerprint(Reshape(data, ...))
+ /// Fingerprint(data) == Fingerprint(Bitcast(data, ...))
+ /// ```
+ ///
+ /// For string data, one should expect `Fingerprint(data) !=
+ /// Fingerprint(ReduceJoin(data))` in general.
+ ///
+ ///
+ ///
+ ///
+ ///
+ public static Tensor fingerprint(Tensor data, Tensor method, string? name = null)
+ {
+ var _ctx = tf.Context;
+ if (_ctx.executing_eagerly())
+ {
+ try
+ {
+ var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "Fingerprint", name) { args = new object[] { data, method }, attrs = new Dictionary() { } });
+ return _fast_path_result[0];
+ }
+ catch (Exception)
+ {
+ }
+ try
+ {
+ return fingerprint_eager_fallback(data, method, name: name, ctx: _ctx);
+ }
+ catch (Exception)
+ {
+ }
+ }
+ Dictionary keywords = new();
+ keywords["data"] = data;
+ keywords["method"] = method;
+ var _op = tf.OpDefLib._apply_op_helper("Fingerprint", name, keywords);
+ var _result = _op.outputs;
+ if (_execute.must_record_gradient())
+ {
+ object[] _attrs = new object[] { "T", _op._get_attr_type("T") };
+ _execute.record_gradient("Fingerprint", _op.inputs, _attrs, _result);
+ }
+ return _result[0];
+ }
+
+ public static Tensor fingerprint_eager_fallback(Tensor data, Tensor method, string name, Context ctx)
+ {
+ Tensor[] _inputs_flat = new Tensor[] { data, method };
+ object[] _attrs = new object[] { "T", data.dtype };
+ var _result = _execute.execute("Fingerprint", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name);
+ if (_execute.must_record_gradient())
+ {
+ _execute.record_gradient("Fingerprint", _inputs_flat, _attrs, _result);
+ }
+ return _result[0];
+ }
+ ///
+ /// Gather slices from `params` according to `indices`.
+ ///
+ ///
+ ///
+ /// `indices` must be an integer tensor of any dimension (usually 0-D or 1-D).
+ /// Produces an output tensor with shape `indices.shape + params.shape[1:]` where:
+ ///
+ /// ```python
+ /// # Scalar indices
+ /// output[:, ..., :] = params[indices, :, ... :]
+ ///
+ /// # Vector indices
+ /// output[i, :, ..., :] = params[indices[i], :, ... :]
+ ///
+ /// # Higher rank indices
+ /// output[i, ..., j, :, ... :] = params[indices[i, ..., j], :, ..., :]
+ /// ```
+ ///
+ /// If `indices` is a permutation and `len(indices) == params.shape[0]` then
+ /// this operation will permute `params` accordingly.
+ ///
+ /// `validate_indices`: DEPRECATED. If this operation is assigned to CPU, values in
+ /// `indices` are always validated to be within range. If assigned to GPU,
+ /// out-of-bound indices result in safe but unspecified behavior, which may include
+ /// raising an error.
+ ///
+ ///
+ ///

+ ///
+ ///
+ ///
+ ///
+ ///
+ ///
+ ///
+ public static Tensor gather(Tensor params_, Tensor indices, bool validate_indices = true, string? name = null)
+ {
+ var _ctx = tf.Context;
+ if (_ctx.executing_eagerly())
+ {
+ try
+ {
+ var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "Gather", name) { args = new object[] { params_, indices }, attrs = new Dictionary() { ["validate_indices"] = validate_indices } });
+ return _fast_path_result[0];
+ }
+ catch (Exception)
+ {
+ }
+ try
+ {
+ return gather_eager_fallback(params_, indices, validate_indices: validate_indices, name: name, ctx: _ctx);
+ }
+ catch (Exception)
+ {
+ }
+ }
+ Dictionary keywords = new();
+ keywords["params"] = params_;
+ keywords["indices"] = indices;
+ keywords["validate_indices"] = validate_indices;
+ var _op = tf.OpDefLib._apply_op_helper("Gather", name, keywords);
+ var _result = _op.outputs;
+ if (_execute.must_record_gradient())
+ {
+ object[] _attrs = new object[] { "validate_indices", _op._get_attr_bool("validate_indices"), "Tparams", _op._get_attr_type("Tparams"), "Tindices", _op._get_attr_type("Tindices") };
+ _execute.record_gradient("Gather", _op.inputs, _attrs, _result);
+ }
+ return _result[0];
+ }
+
+ public static Tensor gather_eager_fallback(Tensor params_, Tensor indices, bool validate_indices, string name, Context ctx)
+ {
+ Tensor[] _inputs_flat = new Tensor[] { params_, indices };
+ object[] _attrs = new object[] { "validate_indices", validate_indices, "Tparams", params_.dtype, "Tindices", indices.dtype };
+ var _result = _execute.execute("Gather", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name);
+ if (_execute.must_record_gradient())
+ {
+ _execute.record_gradient("Gather", _inputs_flat, _attrs, _result);
+ }
+ return _result[0];
+ }
+ ///
+ /// Gather slices from `params` into a Tensor with shape specified by `indices`.
+ ///
+ ///
+ ///
+ /// `indices` is a K-dimensional integer tensor, best thought of as a
+ /// (K-1)-dimensional tensor of indices into `params`, where each element defines a
+ /// slice of `params`:
+ ///
+ /// output[\(i_0, ..., i_{K-2}\)] = params[indices[\(i_0, ..., i_{K-2}\)]]
+ ///
+ /// Whereas in `tf.gather` `indices` defines slices into the `axis`
+ /// dimension of `params`, in `tf.gather_nd`, `indices` defines slices into the
+ /// first `N` dimensions of `params`, where `N = indices.shape[-1]`.
+ ///
+ /// The last dimension of `indices` can be at most the rank of
+ /// `params`:
+ ///
+ /// indices.shape[-1] <= params.rank
+ ///
+ /// The last dimension of `indices` corresponds to elements
+ /// (if `indices.shape[-1] == params.rank`) or slices
+ /// (if `indices.shape[-1] < params.rank`) along dimension `indices.shape[-1]`
+ /// of `params`. The output tensor has shape
+ ///
+ /// indices.shape[:-1] + params.shape[indices.shape[-1]:]
+ ///
+ /// Note that on CPU, if an out of bound index is found, an error is returned.
+ /// On GPU, if an out of bound index is found, a 0 is stored in the
+ /// corresponding output value.
+ ///
+ /// Some examples below.
+ ///
+ /// Simple indexing into a matrix:
+ ///
+ /// ```python
+ /// indices = [[0, 0], [1, 1]]
+ /// params = [['a', 'b'], ['c', 'd']]
+ /// output = ['a', 'd']
+ /// ```
+ ///
+ /// Slice indexing into a matrix:
+ ///
+ /// ```python
+ /// indices = [[1], [0]]
+ /// params = [['a', 'b'], ['c', 'd']]
+ /// output = [['c', 'd'], ['a', 'b']]
+ /// ```
+ ///
+ /// Indexing into a 3-tensor:
+ ///
+ /// ```python
+ /// indices = [[1]]
+ /// params = [[['a0', 'b0'], ['c0', 'd0']],
+ /// [['a1', 'b1'], ['c1', 'd1']]]
+ /// output = [[['a1', 'b1'], ['c1', 'd1']]]
+ ///
+ ///
+ /// indices = [[0, 1], [1, 0]]
+ /// params = [[['a0', 'b0'], ['c0', 'd0']],
+ /// [['a1', 'b1'], ['c1', 'd1']]]
+ /// output = [['c0', 'd0'], ['a1', 'b1']]
+ ///
+ ///
+ /// indices = [[0, 0, 1], [1, 0, 1]]
+ /// params = [[['a0', 'b0'], ['c0', 'd0']],
+ /// [['a1', 'b1'], ['c1', 'd1']]]
+ /// output = ['b0', 'b1']
+ /// ```
+ ///
+ /// Batched indexing into a matrix:
+ ///
+ /// ```python
+ /// indices = [[[0, 0]], [[0, 1]]]
+ /// params = [['a', 'b'], ['c', 'd']]
+ /// output = [['a'], ['b']]
+ /// ```
+ ///
+ /// Batched slice indexing into a matrix:
+ ///
+ /// ```python
+ /// indices = [[[1]], [[0]]]
+ /// params = [['a', 'b'], ['c', 'd']]
+ /// output = [[['c', 'd']], [['a', 'b']]]
+ /// ```
+ ///
+ /// Batched indexing into a 3-tensor:
+ ///
+ /// ```python
+ /// indices = [[[1]], [[0]]]
+ /// params = [[['a0', 'b0'], ['c0', 'd0']],
+ /// [['a1', 'b1'], ['c1', 'd1']]]
+ /// output = [[[['a1', 'b1'], ['c1', 'd1']]],
+ /// [[['a0', 'b0'], ['c0', 'd0']]]]
+ ///
+ /// indices = [[[0, 1], [1, 0]], [[0, 0], [1, 1]]]
+ /// params = [[['a0', 'b0'], ['c0', 'd0']],
+ /// [['a1', 'b1'], ['c1', 'd1']]]
+ /// output = [[['c0', 'd0'], ['a1', 'b1']],
+ /// [['a0', 'b0'], ['c1', 'd1']]]
+ ///
+ ///
+ /// indices = [[[0, 0, 1], [1, 0, 1]], [[0, 1, 1], [1, 1, 0]]]
+ /// params = [[['a0', 'b0'], ['c0', 'd0']],
+ /// [['a1', 'b1'], ['c1', 'd1']]]
+ /// output = [['b0', 'b1'], ['d0', 'c1']]
+ /// ```
+ ///
+ /// See also `tf.gather` and `tf.batch_gather`.
+ ///
+ ///
+ ///
+ ///
+ ///
+ public static Tensor gather_nd(Tensor params_, Tensor indices, string? name = null)
+ {
+ var _ctx = tf.Context;
+ if (_ctx.executing_eagerly())
+ {
+ try
+ {
+ var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "GatherNd", name) { args = new object[] { params_, indices }, attrs = new Dictionary() { } });
+ return _fast_path_result[0];
+ }
+ catch (Exception)
+ {
+ }
+ try
+ {
+ return gather_nd_eager_fallback(params_, indices, name: name, ctx: _ctx);
+ }
+ catch (Exception)
+ {
+ }
+ }
+ Dictionary keywords = new();
+ keywords["params"] = params_;
+ keywords["indices"] = indices;
+ var _op = tf.OpDefLib._apply_op_helper("GatherNd", name, keywords);
+ var _result = _op.outputs;
+ if (_execute.must_record_gradient())
+ {
+ object[] _attrs = new object[] { "Tparams", _op._get_attr_type("Tparams"), "Tindices", _op._get_attr_type("Tindices") };
+ _execute.record_gradient("GatherNd", _op.inputs, _attrs, _result);
+ }
+ return _result[0];
+ }
+
+ public static Tensor gather_nd_eager_fallback(Tensor params_, Tensor indices, string name, Context ctx)
+ {
+ Tensor[] _inputs_flat = new Tensor[] { params_, indices };
+ object[] _attrs = new object[] { "Tparams", params_.dtype, "Tindices", indices.dtype };
+ var _result = _execute.execute("GatherNd", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name);
+ if (_execute.must_record_gradient())
+ {
+ _execute.record_gradient("GatherNd", _inputs_flat, _attrs, _result);
+ }
+ return _result[0];
+ }
+ ///
+ /// Gather slices from `params` axis `axis` according to `indices`.
+ ///
+ ///
+ ///
+ /// `indices` must be an integer tensor of any dimension (usually 0-D or 1-D).
+ /// Produces an output tensor with shape `params.shape[:axis] +
+ /// indices.shape[batch_dims:] + params.shape[axis + 1:]` where:
+ ///
+ /// ```python
+ /// # Scalar indices (output is rank(params) - 1).
+ /// output[a_0, ..., a_n, b_0, ..., b_n] =
+ /// params[a_0, ..., a_n, indices, b_0, ..., b_n]
+ ///
+ /// # Vector indices (output is rank(params)).
+ /// output[a_0, ..., a_n, i, b_0, ..., b_n] =
+ /// params[a_0, ..., a_n, indices[i], b_0, ..., b_n]
+ ///
+ /// # Higher rank indices (output is rank(params) + rank(indices) - 1).
+ /// output[a_0, ..., a_n, i, ..., j, b_0, ... b_n] =
+ /// params[a_0, ..., a_n, indices[i, ..., j], b_0, ..., b_n]
+ /// ```
+ ///
+ ///
+ ///

+ ///
+ ///
+ /// Note that on CPU, if an out of bound index is found, an error is returned.
+ /// On GPU, if an out of bound index is found, a 0 is stored in the
+ /// corresponding output value.
+ ///
+ /// See also `tf.batch_gather` and `tf.gather_nd`.
+ ///
+ ///
+ ///
+ ///
+ ///
+ ///
+ ///
+ public static Tensor gather_v2(Tensor params_, Tensor indices, Tensor axis, int batch_dims = 0, string? name = null)
+ {
+ var _ctx = tf.Context;
+ if (_ctx.executing_eagerly())
+ {
+ try
+ {
+ var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "GatherV2", name) { args = new object[] { params_, indices, axis }, attrs = new Dictionary() { ["batch_dims"] = batch_dims } });
+ return _fast_path_result[0];
+ }
+ catch (Exception)
+ {
+ }
+ try
+ {
+ return gather_v2_eager_fallback(params_, indices, axis, batch_dims: batch_dims, name: name, ctx: _ctx);
+ }
+ catch (Exception)
+ {
+ }
+ }
+ Dictionary keywords = new();
+ keywords["params"] = params_;
+ keywords["indices"] = indices;
+ keywords["axis"] = axis;
+ keywords["batch_dims"] = batch_dims;
+ var _op = tf.OpDefLib._apply_op_helper("GatherV2", name, keywords);
+ var _result = _op.outputs;
+ if (_execute.must_record_gradient())
+ {
+ object[] _attrs = new object[] { "batch_dims", _op._get_attr_int("batch_dims"), "Tparams", _op._get_attr_type("Tparams"), "Tindices", _op._get_attr_type("Tindices"), "Taxis", _op._get_attr_type("Taxis") };
+ _execute.record_gradient("GatherV2", _op.inputs, _attrs, _result);
+ }
+ return _result[0];
+ }
+
+ public static Tensor gather_v2_eager_fallback(Tensor params_, Tensor indices, Tensor axis, int batch_dims, string name, Context ctx)
+ {
+ Tensor[] _inputs_flat = new Tensor[] { params_, indices, axis };
+ object[] _attrs = new object[] { "batch_dims", batch_dims, "Tparams", params_.dtype, "Tindices", indices.dtype, "Taxis", axis.dtype };
+ var _result = _execute.execute("GatherV2", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name);
+ if (_execute.must_record_gradient())
+ {
+ _execute.record_gradient("GatherV2", _inputs_flat, _attrs, _result);
+ }
+ return _result[0];
+ }
+ ///
+ /// Gives a guarantee to the TF runtime that the input tensor is a constant.
+ ///
+ ///
+ ///
+ /// The runtime is then free to make optimizations based on this.
+ ///
+ /// Only accepts value typed tensors as inputs and rejects resource variable handles
+ /// as input.
+ ///
+ /// Returns the input tensor without modification.
+ ///
+ ///
+ ///
+ ///
+ public static Tensor guarantee_const(Tensor input, string? name = null)
+ {
+ var _ctx = tf.Context;
+ if (_ctx.executing_eagerly())
+ {
+ try
+ {
+ var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "GuaranteeConst", name) { args = new object[] { input }, attrs = new Dictionary() { } });
+ return _fast_path_result[0];
+ }
+ catch (Exception)
+ {
+ }
+ try
+ {
+ return guarantee_const_eager_fallback(input, name: name, ctx: _ctx);
+ }
+ catch (Exception)
+ {
+ }
+ }
+ Dictionary keywords = new();
+ keywords["input"] = input;
+ var _op = tf.OpDefLib._apply_op_helper("GuaranteeConst", name, keywords);
+ var _result = _op.outputs;
+ if (_execute.must_record_gradient())
+ {
+ object[] _attrs = new object[] { "T", _op._get_attr_type("T") };
+ _execute.record_gradient("GuaranteeConst", _op.inputs, _attrs, _result);
+ }
+ return _result[0];
+ }
+
+ public static Tensor guarantee_const_eager_fallback(Tensor input, string name, Context ctx)
+ {
+ Tensor[] _inputs_flat = new Tensor[] { input };
+ object[] _attrs = new object[] { "T", input.dtype };
+ var _result = _execute.execute("GuaranteeConst", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name);
+ if (_execute.must_record_gradient())
+ {
+ _execute.record_gradient("GuaranteeConst", _inputs_flat, _attrs, _result);
+ }
+ return _result[0];
+ }
+ ///
+ /// Return a tensor with the same shape and contents as the input tensor or value.
+ ///
+ ///
+ ///
+ public static Tensor identity(Tensor input, string? name = null)
+ {
+ var _ctx = tf.Context;
+ if (_ctx.executing_eagerly())
+ {
+ try
+ {
+ var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "Identity", name) { args = new object[] { input }, attrs = new Dictionary() { } });
+ return _fast_path_result[0];
+ }
+ catch (Exception)
+ {
+ }
+ try
+ {
+ return identity_eager_fallback(input, name: name, ctx: _ctx);
+ }
+ catch (Exception)
+ {
+ }
+ }
+ Dictionary keywords = new();
+ keywords["input"] = input;
+ var _op = tf.OpDefLib._apply_op_helper("Identity", name, keywords);
+ var _result = _op.outputs;
+ if (_execute.must_record_gradient())
+ {
+ object[] _attrs = new object[] { "T", _op._get_attr_type("T") };
+ _execute.record_gradient("Identity", _op.inputs, _attrs, _result);
+ }
+ return _result[0];
+ }
+
+ public static Tensor identity_eager_fallback(Tensor input, string name, Context ctx)
+ {
+ Tensor[] _inputs_flat = new Tensor[] { input };
+ object[] _attrs = new object[] { "T", input.dtype };
+ var _result = _execute.execute("Identity", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name);
+ if (_execute.must_record_gradient())
+ {
+ _execute.record_gradient("Identity", _inputs_flat, _attrs, _result);
+ }
+ return _result[0];
+ }
+ ///
+ /// Returns a list of tensors with the same shapes and contents as the input
+ ///
+ ///
+ ///
+ /// tensors.
+ ///
+ /// This op can be used to override the gradient for complicated functions. For
+ /// example, suppose y = f(x) and we wish to apply a custom function g for backprop
+ /// such that dx = g(dy). In Python,
+ ///
+ /// ```python
+ /// with tf.get_default_graph().gradient_override_map(
+ /// {'IdentityN': 'OverrideGradientWithG'}):
+ /// y, _ = identity_n([f(x), x])
+ ///
+ /// @tf.RegisterGradient('OverrideGradientWithG')
+ /// def ApplyG(op, dy, _):
+ /// return [None, g(dy)] # Do not backprop to f(x).
+ /// ```
+ ///
+ ///
+ ///
+ ///
+ ///
+ public static Tensor identity_n(Tensor input, TF_DataType[] T, string? name = null)
+ {
+ var _ctx = tf.Context;
+ if (_ctx.executing_eagerly())
+ {
+ try
+ {
+ var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "IdentityN", name) { args = new object[] { input }, attrs = new Dictionary() { ["T"] = T } });
+ return _fast_path_result[0];
+ }
+ catch (Exception)
+ {
+ }
+ try
+ {
+ return identity_n_eager_fallback(input, T: T, name: name, ctx: _ctx);
+ }
+ catch (Exception)
+ {
+ }
+ }
+ Dictionary keywords = new();
+ keywords["input"] = input;
+ keywords["T"] = T;
+ var _op = tf.OpDefLib._apply_op_helper("IdentityN", name, keywords);
+ var _result = _op.outputs;
+ if (_execute.must_record_gradient())
+ {
+ object[] _attrs = new object[] { "T", _op.get_attr("T") };
+ _execute.record_gradient("IdentityN", _op.inputs, _attrs, _result);
+ }
+ return _result[0];
+ }
+
+ public static Tensor identity_n_eager_fallback(Tensor input, TF_DataType[] T, string name, Context ctx)
+ {
+ Tensor[] _inputs_flat = new Tensor[] { input };
+ object[] _attrs = new object[] { "T", T };
+ var _result = _execute.execute("IdentityN", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name);
+ if (_execute.must_record_gradient())
+ {
+ _execute.record_gradient("IdentityN", _inputs_flat, _attrs, _result);
+ }
+ return _result[0];
+ }
+ ///
+ /// Returns immutable tensor from memory region.
+ ///
+ ///
+ ///
+ /// The current implementation memmaps the tensor from a file.
+ ///
+ ///
+ ///
+ ///
+ /// Type of the returned tensor.
+ ///
+ ///
+ ///
+ ///
+ /// Shape of the returned tensor.
+ ///
+ ///
+ ///
+ ///
+ /// Name of readonly memory region used by the tensor, see
+ /// NewReadOnlyMemoryRegionFromFile in tensorflow::Env.
+ ///
+ ///
+ ///
+ public static Tensor immutable_const(TF_DataType dtype, Shape shape, string memory_region_name, string? name = null)
+ {
+ var _ctx = tf.Context;
+ if (_ctx.executing_eagerly())
+ {
+ try
+ {
+ var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "ImmutableConst", name) { args = new object[] { }, attrs = new Dictionary() { ["dtype"] = dtype, ["shape"] = shape, ["memory_region_name"] = memory_region_name } });
+ return _fast_path_result[0];
+ }
+ catch (Exception)
+ {
+ }
+ try
+ {
+ return immutable_const_eager_fallback(dtype: dtype, shape: shape, memory_region_name: memory_region_name, name: name, ctx: _ctx);
+ }
+ catch (Exception)
+ {
+ }
+ }
+ Dictionary keywords = new();
+ keywords["dtype"] = dtype;
+ keywords["shape"] = shape;
+ keywords["memory_region_name"] = memory_region_name;
+ var _op = tf.OpDefLib._apply_op_helper("ImmutableConst", name, keywords);
+ var _result = _op.outputs;
+ if (_execute.must_record_gradient())
+ {
+ object[] _attrs = new object[] { "dtype", _op._get_attr_type("dtype"), "shape", _op.get_attr("shape"), "memory_region_name", _op.get_attr("memory_region_name") };
+ _execute.record_gradient("ImmutableConst", _op.inputs, _attrs, _result);
+ }
+ return _result[0];
+ }
+
+ public static Tensor immutable_const_eager_fallback(TF_DataType dtype, Shape shape, string memory_region_name, string name, Context ctx)
+ {
+ Tensor[] _inputs_flat = new Tensor[] { };
+ object[] _attrs = new object[] { "dtype", dtype, "shape", shape, "memory_region_name", memory_region_name };
+ var _result = _execute.execute("ImmutableConst", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name);
+ if (_execute.must_record_gradient())
+ {
+ _execute.record_gradient("ImmutableConst", _inputs_flat, _attrs, _result);
+ }
+ return _result[0];
+ }
+ ///
+ ///
+ ///
+ ///
+ ///
+ ///
+ ///
+ public static Tensor inplace_add(Tensor x, Tensor i, Tensor v, string? name = null)
+ {
+ var _ctx = tf.Context;
+ if (_ctx.executing_eagerly())
+ {
+ try
+ {
+ var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "InplaceAdd", name) { args = new object[] { x, i, v }, attrs = new Dictionary() { } });
+ return _fast_path_result[0];
+ }
+ catch (Exception)
+ {
+ }
+ try
+ {
+ return inplace_add_eager_fallback(x, i, v, name: name, ctx: _ctx);
+ }
+ catch (Exception)
+ {
+ }
+ }
+ Dictionary keywords = new();
+ keywords["x"] = x;
+ keywords["i"] = i;
+ keywords["v"] = v;
+ var _op = tf.OpDefLib._apply_op_helper("InplaceAdd", name, keywords);
+ var _result = _op.outputs;
+ if (_execute.must_record_gradient())
+ {
+ object[] _attrs = new object[] { "T", _op._get_attr_type("T") };
+ _execute.record_gradient("InplaceAdd", _op.inputs, _attrs, _result);
+ }
+ return _result[0];
+ }
+
+ public static Tensor inplace_add_eager_fallback(Tensor x, Tensor i, Tensor v, string name, Context ctx)
+ {
+ Tensor[] _inputs_flat = new Tensor[] { x, i, v };
+ object[] _attrs = new object[] { "T", x.dtype };
+ var _result = _execute.execute("InplaceAdd", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name);
+ if (_execute.must_record_gradient())
+ {
+ _execute.record_gradient("InplaceAdd", _inputs_flat, _attrs, _result);
+ }
+ return _result[0];
+ }
+ ///
+ ///
+ ///
+ ///
+ ///
+ ///
+ ///
+ public static Tensor inplace_sub(Tensor x, Tensor i, Tensor v, string? name = null)
+ {
+ var _ctx = tf.Context;
+ if (_ctx.executing_eagerly())
+ {
+ try
+ {
+ var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "InplaceSub", name) { args = new object[] { x, i, v }, attrs = new Dictionary() { } });
+ return _fast_path_result[0];
+ }
+ catch (Exception)
+ {
+ }
+ try
+ {
+ return inplace_sub_eager_fallback(x, i, v, name: name, ctx: _ctx);
+ }
+ catch (Exception)
+ {
+ }
+ }
+ Dictionary keywords = new();
+ keywords["x"] = x;
+ keywords["i"] = i;
+ keywords["v"] = v;
+ var _op = tf.OpDefLib._apply_op_helper("InplaceSub", name, keywords);
+ var _result = _op.outputs;
+ if (_execute.must_record_gradient())
+ {
+ object[] _attrs = new object[] { "T", _op._get_attr_type("T") };
+ _execute.record_gradient("InplaceSub", _op.inputs, _attrs, _result);
+ }
+ return _result[0];
+ }
+
+ public static Tensor inplace_sub_eager_fallback(Tensor x, Tensor i, Tensor v, string name, Context ctx)
+ {
+ Tensor[] _inputs_flat = new Tensor[] { x, i, v };
+ object[] _attrs = new object[] { "T", x.dtype };
+ var _result = _execute.execute("InplaceSub", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name);
+ if (_execute.must_record_gradient())
+ {
+ _execute.record_gradient("InplaceSub", _inputs_flat, _attrs, _result);
+ }
+ return _result[0];
+ }
+ ///
+ ///
+ ///
+ ///
+ ///
+ ///
+ ///
+ public static Tensor inplace_update(Tensor x, Tensor i, Tensor v, string? name = null)
+ {
+ var _ctx = tf.Context;
+ if (_ctx.executing_eagerly())
+ {
+ try
+ {
+ var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "InplaceUpdate", name) { args = new object[] { x, i, v }, attrs = new Dictionary() { } });
+ return _fast_path_result[0];
+ }
+ catch (Exception)
+ {
+ }
+ try
+ {
+ return inplace_update_eager_fallback(x, i, v, name: name, ctx: _ctx);
+ }
+ catch (Exception)
+ {
+ }
+ }
+ Dictionary keywords = new();
+ keywords["x"] = x;
+ keywords["i"] = i;
+ keywords["v"] = v;
+ var _op = tf.OpDefLib._apply_op_helper("InplaceUpdate", name, keywords);
+ var _result = _op.outputs;
+ if (_execute.must_record_gradient())
+ {
+ object[] _attrs = new object[] { "T", _op._get_attr_type("T") };
+ _execute.record_gradient("InplaceUpdate", _op.inputs, _attrs, _result);
+ }
+ return _result[0];
+ }
+
+ public static Tensor inplace_update_eager_fallback(Tensor x, Tensor i, Tensor v, string name, Context ctx)
+ {
+ Tensor[] _inputs_flat = new Tensor[] { x, i, v };
+ object[] _attrs = new object[] { "T", x.dtype };
+ var _result = _execute.execute("InplaceUpdate", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name);
+ if (_execute.must_record_gradient())
+ {
+ _execute.record_gradient("InplaceUpdate", _inputs_flat, _attrs, _result);
+ }
+ return _result[0];
+ }
+ ///
+ /// Computes the inverse permutation of a tensor.
+ ///
+ ///
+ ///
+ /// This operation computes the inverse of an index permutation. It takes a 1-D
+ /// integer tensor `x`, which represents the indices of a zero-based array, and
+ /// swaps each value with its index position. In other words, for an output tensor
+ /// `y` and an input tensor `x`, this operation computes the following:
+ ///
+ /// `y[x[i]] = i for i in [0, 1, ..., len(x) - 1]`
+ ///
+ /// The values must include 0. There can be no duplicate values or negative values.
+ ///
+ /// For example:
+ ///
+ /// ```
+ /// # tensor `x` is [3, 4, 0, 2, 1]
+ /// invert_permutation(x) ==> [2, 4, 3, 0, 1]
+ /// ```
+ ///
+ ///
+ ///
+ ///
+ public static Tensor invert_permutation(Tensor x, string? name = null)
+ {
+ var _ctx = tf.Context;
+ if (_ctx.executing_eagerly())
+ {
+ try
+ {
+ var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "InvertPermutation", name) { args = new object[] { x }, attrs = new Dictionary() { } });
+ return _fast_path_result[0];
+ }
+ catch (Exception)
+ {
+ }
+ try
+ {
+ return invert_permutation_eager_fallback(x, name: name, ctx: _ctx);
+ }
+ catch (Exception)
+ {
+ }
+ }
+ Dictionary keywords = new();
+ keywords["x"] = x;
+ var _op = tf.OpDefLib._apply_op_helper("InvertPermutation", name, keywords);
+ var _result = _op.outputs;
+ if (_execute.must_record_gradient())
+ {
+ object[] _attrs = new object[] { "T", _op._get_attr_type("T") };
+ _execute.record_gradient("InvertPermutation", _op.inputs, _attrs, _result);
+ }
+ return _result[0];
+ }
+
+ public static Tensor invert_permutation_eager_fallback(Tensor x, string name, Context ctx)
+ {
+ Tensor[] _inputs_flat = new Tensor[] { x };
+ object[] _attrs = new object[] { "T", x.dtype };
+ var _result = _execute.execute("InvertPermutation", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name);
+ if (_execute.must_record_gradient())
+ {
+ _execute.record_gradient("InvertPermutation", _inputs_flat, _attrs, _result);
+ }
+ return _result[0];
+ }
+ ///
+ /// Computes the difference between two lists of numbers or strings.
+ ///
+ ///
+ ///
+ /// Given a list `x` and a list `y`, this operation returns a list `out` that
+ /// represents all values that are in `x` but not in `y`. The returned list `out`
+ /// is sorted in the same order that the numbers appear in `x` (duplicates are
+ /// preserved). This operation also returns a list `idx` that represents the
+ /// position of each `out` element in `x`. In other words:
+ ///
+ /// `out[i] = x[idx[i]] for i in [0, 1, ..., len(out) - 1]`
+ ///
+ /// For example, given this input:
+ ///
+ /// ```
+ /// x = [1, 2, 3, 4, 5, 6]
+ /// y = [1, 3, 5]
+ /// ```
+ ///
+ /// This operation would return:
+ ///
+ /// ```
+ /// out ==> [2, 4, 6]
+ /// idx ==> [1, 3, 5]
+ /// ```
+ ///
+ ///
+ ///
+ ///
+ ///
+ ///
+ public static Tensor[] list_diff(Tensor x, Tensor y, TF_DataType out_idx = TF_DataType.TF_INT32, string? name = null)
+ {
+ var _ctx = tf.Context;
+ if (_ctx.executing_eagerly())
+ {
+ try
+ {
+ var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "ListDiff", name) { args = new object[] { x, y }, attrs = new Dictionary() { ["out_idx"] = out_idx } });
+ return _fast_path_result;
+ }
+ catch (Exception)
+ {
+ }
+ try
+ {
+ return list_diff_eager_fallback(x, y, out_idx: out_idx, name: name, ctx: _ctx);
+ }
+ catch (Exception)
+ {
+ }
+ }
+ Dictionary keywords = new();
+ keywords["x"] = x;
+ keywords["y"] = y;
+ keywords["out_idx"] = out_idx;
+ var _op = tf.OpDefLib._apply_op_helper("ListDiff", name, keywords);
+ var _result = _op.outputs;
+ if (_execute.must_record_gradient())
+ {
+ object[] _attrs = new object[] { "T", _op._get_attr_type("T"), "out_idx", _op._get_attr_type("out_idx") };
+ _execute.record_gradient("ListDiff", _op.inputs, _attrs, _result);
+ }
+ return _result;
+ }
+
+ public static Tensor[] list_diff_eager_fallback(Tensor x, Tensor y, TF_DataType out_idx, string name, Context ctx)
+ {
+ Tensor[] _inputs_flat = new Tensor[] { x, y };
+ object[] _attrs = new object[] { "T", x.dtype, "out_idx", out_idx };
+ var _result = _execute.execute("ListDiff", 2, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name);
+ if (_execute.must_record_gradient())
+ {
+ _execute.record_gradient("ListDiff", _inputs_flat, _attrs, _result);
+ }
+ return _result;
+ }
+ ///
+ /// Applies lower_bound(sorted_search_values, values) along each row.
+ ///
+ ///
+ ///
+ /// Each set of rows with the same index in (sorted_inputs, values) is treated
+ /// independently. The resulting row is the equivalent of calling
+ /// `np.searchsorted(sorted_inputs, values, side='left')`.
+ ///
+ /// The result is not a global index to the entire
+ /// `Tensor`, but rather just the index in the last dimension.
+ ///
+ /// A 2-D example:
+ /// sorted_sequence = [[0, 3, 9, 9, 10],
+ /// [1, 2, 3, 4, 5]]
+ /// values = [[2, 4, 9],
+ /// [0, 2, 6]]
+ ///
+ /// result = LowerBound(sorted_sequence, values)
+ ///
+ /// result == [[1, 2, 2],
+ /// [0, 1, 5]]
+ ///
+ ///
+ ///
+ ///
+ ///
+ ///
+ public static Tensor lower_bound(Tensor sorted_inputs, Tensor values, TF_DataType out_type = TF_DataType.TF_INT32, string? name = null)
+ {
+ var _ctx = tf.Context;
+ if (_ctx.executing_eagerly())
+ {
+ try
+ {
+ var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "LowerBound", name) { args = new object[] { sorted_inputs, values }, attrs = new Dictionary() { ["out_type"] = out_type } });
+ return _fast_path_result[0];
+ }
+ catch (Exception)
+ {
+ }
+ try
+ {
+ return lower_bound_eager_fallback(sorted_inputs, values, out_type: out_type, name: name, ctx: _ctx);
+ }
+ catch (Exception)
+ {
+ }
+ }
+ Dictionary keywords = new();
+ keywords["sorted_inputs"] = sorted_inputs;
+ keywords["values"] = values;
+ keywords["out_type"] = out_type;
+ var _op = tf.OpDefLib._apply_op_helper("LowerBound", name, keywords);
+ var _result = _op.outputs;
+ if (_execute.must_record_gradient())
+ {
+ object[] _attrs = new object[] { "T", _op._get_attr_type("T"), "out_type", _op._get_attr_type("out_type") };
+ _execute.record_gradient("LowerBound", _op.inputs, _attrs, _result);
+ }
+ return _result[0];
+ }
+
+ public static Tensor lower_bound_eager_fallback(Tensor sorted_inputs, Tensor values, TF_DataType out_type, string name, Context ctx)
+ {
+ Tensor[] _inputs_flat = new Tensor[] { sorted_inputs, values };
+ object[] _attrs = new object[] { "T", sorted_inputs.dtype, "out_type", out_type };
+ var _result = _execute.execute("LowerBound", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name);
+ if (_execute.must_record_gradient())
+ {
+ _execute.record_gradient("LowerBound", _inputs_flat, _attrs, _result);
+ }
+ return _result[0];
+ }
+ ///
+ /// Copy a tensor setting everything outside a central band in each innermost matrix to zero.
+ ///
+ ///
+ ///
+ /// The `band` part is computed as follows:
+ /// Assume `input` has `k` dimensions `[I, J, K, ..., M, N]`, then the output is a
+ /// tensor with the same shape where
+ ///
+ /// `band[i, j, k, ..., m, n] = in_band(m, n) * input[i, j, k, ..., m, n]`.
+ ///
+ /// The indicator function
+ ///
+ /// `in_band(m, n) = (num_lower < 0 || (m-n) <= num_lower)) &&
+ /// (num_upper < 0 || (n-m) <= num_upper)`.
+ ///
+ /// For example:
+ ///
+ /// ```
+ /// # if 'input' is [[ 0, 1, 2, 3]
+ /// # [-1, 0, 1, 2]
+ /// # [-2, -1, 0, 1]
+ /// # [-3, -2, -1, 0]],
+ ///
+ /// tf.linalg.band_part(input, 1, -1) ==> [[ 0, 1, 2, 3]
+ /// [-1, 0, 1, 2]
+ /// [ 0, -1, 0, 1]
+ /// [ 0, 0, -1, 0]],
+ ///
+ /// tf.linalg.band_part(input, 2, 1) ==> [[ 0, 1, 0, 0]
+ /// [-1, 0, 1, 0]
+ /// [-2, -1, 0, 1]
+ /// [ 0, -2, -1, 0]]
+ /// ```
+ ///
+ /// Useful special cases:
+ ///
+ /// ```
+ /// tf.linalg.band_part(input, 0, -1) ==> Upper triangular part.
+ /// tf.linalg.band_part(input, -1, 0) ==> Lower triangular part.
+ /// tf.linalg.band_part(input, 0, 0) ==> Diagonal.
+ /// ```
+ ///
+ ///
+ ///
+ ///
+ ///
+ ///
+ public static Tensor matrix_band_part(Tensor input, Tensor num_lower, Tensor num_upper, string? name = null)
+ {
+ var _ctx = tf.Context;
+ if (_ctx.executing_eagerly())
+ {
+ try
+ {
+ var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "MatrixBandPart", name) { args = new object[] { input, num_lower, num_upper }, attrs = new Dictionary() { } });
+ return _fast_path_result[0];
+ }
+ catch (Exception)
+ {
+ }
+ try
+ {
+ return matrix_band_part_eager_fallback(input, num_lower, num_upper, name: name, ctx: _ctx);
+ }
+ catch (Exception)
+ {
+ }
+ }
+ Dictionary keywords = new();
+ keywords["input"] = input;
+ keywords["num_lower"] = num_lower;
+ keywords["num_upper"] = num_upper;
+ var _op = tf.OpDefLib._apply_op_helper("MatrixBandPart", name, keywords);
+ var _result = _op.outputs;
+ if (_execute.must_record_gradient())
+ {
+ object[] _attrs = new object[] { "T", _op._get_attr_type("T"), "Tindex", _op._get_attr_type("Tindex") };
+ _execute.record_gradient("MatrixBandPart", _op.inputs, _attrs, _result);
+ }
+ return _result[0];
+ }
+
+ public static Tensor matrix_band_part_eager_fallback(Tensor input, Tensor num_lower, Tensor num_upper, string name, Context ctx)
+ {
+ Tensor[] _inputs_flat = new Tensor[] { input, num_lower, num_upper };
+ object[] _attrs = new object[] { "T", input.dtype, "Tindex", num_lower.dtype };
+ var _result = _execute.execute("MatrixBandPart", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name);
+ if (_execute.must_record_gradient())
+ {
+ _execute.record_gradient("MatrixBandPart", _inputs_flat, _attrs, _result);
+ }
+ return _result[0];
+ }
+ ///
+ /// Returns a batched diagonal tensor with a given batched diagonal values.
+ ///
+ ///
+ ///
+ /// Given a `diagonal`, this operation returns a tensor with the `diagonal` and
+ /// everything else padded with zeros. The diagonal is computed as follows:
+ ///
+ /// Assume `diagonal` has `k` dimensions `[I, J, K, ..., N]`, then the output is a
+ /// tensor of rank `k+1` with dimensions [I, J, K, ..., N, N]` where:
+ ///
+ /// `output[i, j, k, ..., m, n] = 1{m=n} * diagonal[i, j, k, ..., n]`.
+ ///
+ /// For example:
+ ///
+ /// ```
+ /// # 'diagonal' is [[1, 2, 3, 4], [5, 6, 7, 8]]
+ ///
+ /// and diagonal.shape = (2, 4)
+ ///
+ /// tf.matrix_diag(diagonal) ==> [[[1, 0, 0, 0]
+ /// [0, 2, 0, 0]
+ /// [0, 0, 3, 0]
+ /// [0, 0, 0, 4]],
+ /// [[5, 0, 0, 0]
+ /// [0, 6, 0, 0]
+ /// [0, 0, 7, 0]
+ /// [0, 0, 0, 8]]]
+ ///
+ /// which has shape (2, 4, 4)
+ /// ```
+ ///
+ ///
+ ///
+ ///
+ public static Tensor matrix_diag(Tensor diagonal, string? name = null)
+ {
+ var _ctx = tf.Context;
+ if (_ctx.executing_eagerly())
+ {
+ try
+ {
+ var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "MatrixDiag", name) { args = new object[] { diagonal }, attrs = new Dictionary() { } });
+ return _fast_path_result[0];
+ }
+ catch (Exception)
+ {
+ }
+ try
+ {
+ return matrix_diag_eager_fallback(diagonal, name: name, ctx: _ctx);
+ }
+ catch (Exception)
+ {
+ }
+ }
+ Dictionary keywords = new();
+ keywords["diagonal"] = diagonal;
+ var _op = tf.OpDefLib._apply_op_helper("MatrixDiag", name, keywords);
+ var _result = _op.outputs;
+ if (_execute.must_record_gradient())
+ {
+ object[] _attrs = new object[] { "T", _op._get_attr_type("T") };
+ _execute.record_gradient("MatrixDiag", _op.inputs, _attrs, _result);
+ }
+ return _result[0];
+ }
+
+ public static Tensor matrix_diag_eager_fallback(Tensor diagonal, string name, Context ctx)
+ {
+ Tensor[] _inputs_flat = new Tensor[] { diagonal };
+ object[] _attrs = new object[] { "T", diagonal.dtype };
+ var _result = _execute.execute("MatrixDiag", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name);
+ if (_execute.must_record_gradient())
+ {
+ _execute.record_gradient("MatrixDiag", _inputs_flat, _attrs, _result);
+ }
+ return _result[0];
+ }
+ ///
+ /// Returns the batched diagonal part of a batched tensor.
+ ///
+ ///
+ ///
+ /// This operation returns a tensor with the `diagonal` part
+ /// of the batched `input`. The `diagonal` part is computed as follows:
+ ///
+ /// Assume `input` has `k` dimensions `[I, J, K, ..., M, N]`, then the output is a
+ /// tensor of rank `k - 1` with dimensions `[I, J, K, ..., min(M, N)]` where:
+ ///
+ /// `diagonal[i, j, k, ..., n] = input[i, j, k, ..., n, n]`.
+ ///
+ /// The input must be at least a matrix.
+ ///
+ /// For example:
+ ///
+ /// ```
+ /// # 'input' is [[[1, 0, 0, 0]
+ /// [0, 2, 0, 0]
+ /// [0, 0, 3, 0]
+ /// [0, 0, 0, 4]],
+ /// [[5, 0, 0, 0]
+ /// [0, 6, 0, 0]
+ /// [0, 0, 7, 0]
+ /// [0, 0, 0, 8]]]
+ ///
+ /// and input.shape = (2, 4, 4)
+ ///
+ /// tf.matrix_diag_part(input) ==> [[1, 2, 3, 4], [5, 6, 7, 8]]
+ ///
+ /// which has shape (2, 4)
+ /// ```
+ ///
+ ///
+ ///
+ ///
+ public static Tensor matrix_diag_part(Tensor input, string? name = null)
+ {
+ var _ctx = tf.Context;
+ if (_ctx.executing_eagerly())
+ {
+ try
+ {
+ var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "MatrixDiagPart", name) { args = new object[] { input }, attrs = new Dictionary() { } });
+ return _fast_path_result[0];
+ }
+ catch (Exception)
+ {
+ }
+ try
+ {
+ return matrix_diag_part_eager_fallback(input, name: name, ctx: _ctx);
+ }
+ catch (Exception)
+ {
+ }
+ }
+ Dictionary keywords = new();
+ keywords["input"] = input;
+ var _op = tf.OpDefLib._apply_op_helper("MatrixDiagPart", name, keywords);
+ var _result = _op.outputs;
+ if (_execute.must_record_gradient())
+ {
+ object[] _attrs = new object[] { "T", _op._get_attr_type("T") };
+ _execute.record_gradient("MatrixDiagPart", _op.inputs, _attrs, _result);
+ }
+ return _result[0];
+ }
+
+ public static Tensor matrix_diag_part_eager_fallback(Tensor input, string name, Context ctx)
+ {
+ Tensor[] _inputs_flat = new Tensor[] { input };
+ object[] _attrs = new object[] { "T", input.dtype };
+ var _result = _execute.execute("MatrixDiagPart", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name);
+ if (_execute.must_record_gradient())
+ {
+ _execute.record_gradient("MatrixDiagPart", _inputs_flat, _attrs, _result);
+ }
+ return _result[0];
+ }
+ ///
+ /// Returns the batched diagonal part of a batched tensor.
+ ///
+ ///
+ ///
+ /// Returns a tensor with the `k[0]`-th to `k[1]`-th diagonals of the batched
+ /// `input`.
+ ///
+ /// Assume `input` has `r` dimensions `[I, J, ..., L, M, N]`.
+ /// Let `max_diag_len` be the maximum length among all diagonals to be extracted,
+ /// `max_diag_len = min(M + min(k[1], 0), N + min(-k[0], 0))`
+ /// Let `num_diags` be the number of diagonals to extract,
+ /// `num_diags = k[1] - k[0] + 1`.
+ ///
+ /// If `num_diags == 1`, the output tensor is of rank `r - 1` with shape
+ /// `[I, J, ..., L, max_diag_len]` and values:
+ ///
+ /// ```
+ /// diagonal[i, j, ..., l, n]
+ /// = input[i, j, ..., l, n+y, n+x] ; if 0 <= n+y < M and 0 <= n+x < N,
+ /// padding_value ; otherwise.
+ /// ```
+ /// where `y = max(-k[1], 0)`, `x = max(k[1], 0)`.
+ ///
+ /// Otherwise, the output tensor has rank `r` with dimensions
+ /// `[I, J, ..., L, num_diags, max_diag_len]` with values:
+ ///
+ /// ```
+ /// diagonal[i, j, ..., l, m, n]
+ /// = input[i, j, ..., l, n+y, n+x] ; if 0 <= n+y < M and 0 <= n+x < N,
+ /// padding_value ; otherwise.
+ /// ```
+ /// where `d = k[1] - m`, `y = max(-d, 0)`, and `x = max(d, 0)`.
+ ///
+ /// The input must be at least a matrix.
+ ///
+ /// For example:
+ ///
+ /// ```
+ /// input = np.array([[[1, 2, 3, 4], # Input shape: (2, 3, 4)
+ /// [5, 6, 7, 8],
+ /// [9, 8, 7, 6]],
+ /// [[5, 4, 3, 2],
+ /// [1, 2, 3, 4],
+ /// [5, 6, 7, 8]]])
+ ///
+ /// # A main diagonal from each batch.
+ /// tf.matrix_diag_part(input) ==> [[1, 6, 7], # Output shape: (2, 3)
+ /// [5, 2, 7]]
+ ///
+ /// # A superdiagonal from each batch.
+ /// tf.matrix_diag_part(input, k = 1)
+ /// ==> [[2, 7, 6], # Output shape: (2, 3)
+ /// [4, 3, 8]]
+ ///
+ /// # A tridiagonal band from each batch.
+ /// tf.matrix_diag_part(input, k = (-1, 1))
+ /// ==> [[[2, 7, 6], # Output shape: (2, 3, 3)
+ /// [1, 6, 7],
+ /// [5, 8, 0]],
+ /// [[4, 3, 8],
+ /// [5, 2, 7],
+ /// [1, 6, 0]]]
+ ///
+ /// # Padding value = 9
+ /// tf.matrix_diag_part(input, k = (1, 3), padding_value = 9)
+ /// ==> [[[4, 9, 9], # Output shape: (2, 3, 3)
+ /// [3, 8, 9],
+ /// [2, 7, 6]],
+ /// [[2, 9, 9],
+ /// [3, 4, 9],
+ /// [4, 3, 8]]]
+ /// ```
+ ///
+ ///
+ ///
+ ///
+ ///
+ ///
+ public static Tensor matrix_diag_part_v2(Tensor input, Tensor k, Tensor padding_value, string? name = null)
+ {
+ var _ctx = tf.Context;
+ if (_ctx.executing_eagerly())
+ {
+ try
+ {
+ var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "MatrixDiagPartV2", name) { args = new object[] { input, k, padding_value }, attrs = new Dictionary() { } });
+ return _fast_path_result[0];
+ }
+ catch (Exception)
+ {
+ }
+ try
+ {
+ return matrix_diag_part_v2_eager_fallback(input, k, padding_value, name: name, ctx: _ctx);
+ }
+ catch (Exception)
+ {
+ }
+ }
+ Dictionary keywords = new();
+ keywords["input"] = input;
+ keywords["k"] = k;
+ keywords["padding_value"] = padding_value;
+ var _op = tf.OpDefLib._apply_op_helper("MatrixDiagPartV2", name, keywords);
+ var _result = _op.outputs;
+ if (_execute.must_record_gradient())
+ {
+ object[] _attrs = new object[] { "T", _op._get_attr_type("T") };
+ _execute.record_gradient("MatrixDiagPartV2", _op.inputs, _attrs, _result);
+ }
+ return _result[0];
+ }
+
+ public static Tensor matrix_diag_part_v2_eager_fallback(Tensor input, Tensor k, Tensor padding_value, string name, Context ctx)
+ {
+ Tensor[] _inputs_flat = new Tensor[] { input, k, padding_value };
+ object[] _attrs = new object[] { "T", input.dtype };
+ var _result = _execute.execute("MatrixDiagPartV2", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name);
+ if (_execute.must_record_gradient())
+ {
+ _execute.record_gradient("MatrixDiagPartV2", _inputs_flat, _attrs, _result);
+ }
+ return _result[0];
+ }
+ ///
+ /// Returns the batched diagonal part of a batched tensor.
+ ///
+ ///
+ ///
+ /// Returns a tensor with the `k[0]`-th to `k[1]`-th diagonals of the batched
+ /// `input`.
+ ///
+ /// Assume `input` has `r` dimensions `[I, J, ..., L, M, N]`.
+ /// Let `max_diag_len` be the maximum length among all diagonals to be extracted,
+ /// `max_diag_len = min(M + min(k[1], 0), N + min(-k[0], 0))`
+ /// Let `num_diags` be the number of diagonals to extract,
+ /// `num_diags = k[1] - k[0] + 1`.
+ ///
+ /// If `num_diags == 1`, the output tensor is of rank `r - 1` with shape
+ /// `[I, J, ..., L, max_diag_len]` and values:
+ ///
+ /// ```
+ /// diagonal[i, j, ..., l, n]
+ /// = input[i, j, ..., l, n+y, n+x] ; if 0 <= n+y < M and 0 <= n+x < N,
+ /// padding_value ; otherwise.
+ /// ```
+ /// where `y = max(-k[1], 0)`, `x = max(k[1], 0)`.
+ ///
+ /// Otherwise, the output tensor has rank `r` with dimensions
+ /// `[I, J, ..., L, num_diags, max_diag_len]` with values:
+ ///
+ /// ```
+ /// diagonal[i, j, ..., l, m, n]
+ /// = input[i, j, ..., l, n+y, n+x] ; if 0 <= n+y < M and 0 <= n+x < N,
+ /// padding_value ; otherwise.
+ /// ```
+ /// where `d = k[1] - m`, `y = max(-d, 0) - offset`, and `x = max(d, 0) - offset`.
+ ///
+ /// `offset` is zero except when the alignment of the diagonal is to the right.
+ /// ```
+ /// offset = max_diag_len - diag_len(d) ; if (`align` in {RIGHT_LEFT, RIGHT_RIGHT}
+ /// and `d >= 0`) or
+ /// (`align` in {LEFT_RIGHT, RIGHT_RIGHT}
+ /// and `d <= 0`)
+ /// 0 ; otherwise
+ /// ```
+ /// where `diag_len(d) = min(cols - max(d, 0), rows + min(d, 0))`.
+ ///
+ /// The input must be at least a matrix.
+ ///
+ /// For example:
+ ///
+ /// ```
+ /// input = np.array([[[1, 2, 3, 4], # Input shape: (2, 3, 4)
+ /// [5, 6, 7, 8],
+ /// [9, 8, 7, 6]],
+ /// [[5, 4, 3, 2],
+ /// [1, 2, 3, 4],
+ /// [5, 6, 7, 8]]])
+ ///
+ /// # A main diagonal from each batch.
+ /// tf.matrix_diag_part(input) ==> [[1, 6, 7], # Output shape: (2, 3)
+ /// [5, 2, 7]]
+ ///
+ /// # A superdiagonal from each batch.
+ /// tf.matrix_diag_part(input, k = 1)
+ /// ==> [[2, 7, 6], # Output shape: (2, 3)
+ /// [4, 3, 8]]
+ ///
+ /// # A band from each batch.
+ /// tf.matrix_diag_part(input, k = (-1, 2))
+ /// ==> [[[0, 3, 8], # Output shape: (2, 4, 3)
+ /// [2, 7, 6],
+ /// [1, 6, 7],
+ /// [5, 8, 0]],
+ /// [[0, 3, 4],
+ /// [4, 3, 8],
+ /// [5, 2, 7],
+ /// [1, 6, 0]]]
+ ///
+ /// # LEFT_RIGHT alignment.
+ /// tf.matrix_diag_part(input, k = (-1, 2), align="LEFT_RIGHT")
+ /// ==> [[[3, 8, 0], # Output shape: (2, 4, 3)
+ /// [2, 7, 6],
+ /// [1, 6, 7],
+ /// [0, 5, 8]],
+ /// [[3, 4, 0],
+ /// [4, 3, 8],
+ /// [5, 2, 7],
+ /// [0, 1, 6]]]
+ ///
+ /// # max_diag_len can be shorter than the main diagonal.
+ /// tf.matrix_diag_part(input, k = (-2, -1))
+ /// ==> [[[5, 8],
+ /// [9, 0]],
+ /// [[1, 6],
+ /// [5, 0]]]
+ ///
+ /// # padding_value = 9
+ /// tf.matrix_diag_part(input, k = (1, 3), padding_value = 9)
+ /// ==> [[[9, 9, 4], # Output shape: (2, 3, 3)
+ /// [9, 3, 8],
+ /// [2, 7, 6]],
+ /// [[9, 9, 2],
+ /// [9, 3, 4],
+ /// [4, 3, 8]]]
+ ///
+ /// ```
+ ///
+ ///
+ ///
+ ///
+ ///
+ ///
+ ///
+ /// Some diagonals are shorter than `max_diag_len` and need to be padded. `align` is
+ /// a string specifying how superdiagonals and subdiagonals should be aligned,
+ /// respectively. There are four possible alignments: "RIGHT_LEFT" (default),
+ /// "LEFT_RIGHT", "LEFT_LEFT", and "RIGHT_RIGHT". "RIGHT_LEFT" aligns superdiagonals
+ /// to the right (left-pads the row) and subdiagonals to the left (right-pads the
+ /// row). It is the packing format LAPACK uses. cuSPARSE uses "LEFT_RIGHT", which is
+ /// the opposite alignment.
+ ///
+ ///
+ ///
+ public static Tensor matrix_diag_part_v3(Tensor input, Tensor k, Tensor padding_value, string align = "RIGHT_LEFT", string? name = null)
+ {
+ var _ctx = tf.Context;
+ if (_ctx.executing_eagerly())
+ {
+ try
+ {
+ var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "MatrixDiagPartV3", name) { args = new object[] { input, k, padding_value }, attrs = new Dictionary() { ["align"] = align } });
+ return _fast_path_result[0];
+ }
+ catch (Exception)
+ {
+ }
+ try
+ {
+ return matrix_diag_part_v3_eager_fallback(input, k, padding_value, align: align, name: name, ctx: _ctx);
+ }
+ catch (Exception)
+ {
+ }
+ }
+ if (align is null)
+ {
+ align = "RIGHT_LEFT";
+ }
+ Dictionary keywords = new();
+ keywords["input"] = input;
+ keywords["k"] = k;
+ keywords["padding_value"] = padding_value;
+ keywords["align"] = align;
+ var _op = tf.OpDefLib._apply_op_helper("MatrixDiagPartV3", name, keywords);
+ var _result = _op.outputs;
+ if (_execute.must_record_gradient())
+ {
+ object[] _attrs = new object[] { "T", _op._get_attr_type("T"), "align", _op.get_attr("align") };
+ _execute.record_gradient("MatrixDiagPartV3", _op.inputs, _attrs, _result);
+ }
+ return _result[0];
+ }
+
+ public static Tensor matrix_diag_part_v3_eager_fallback(Tensor input, Tensor k, Tensor padding_value, string align, string name, Context ctx)
+ {
+ Tensor[] _inputs_flat = new Tensor[] { input, k, padding_value };
+ object[] _attrs = new object[] { "T", input.dtype, "align", align };
+ var _result = _execute.execute("MatrixDiagPartV3", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name);
+ if (_execute.must_record_gradient())
+ {
+ _execute.record_gradient("MatrixDiagPartV3", _inputs_flat, _attrs, _result);
+ }
+ return _result[0];
+ }
+ ///
+ /// Returns a batched diagonal tensor with given batched diagonal values.
+ ///
+ ///
+ ///
+ /// Returns a tensor with the contents in `diagonal` as `k[0]`-th to `k[1]`-th
+ /// diagonals of a matrix, with everything else padded with `padding`. `num_rows`
+ /// and `num_cols` specify the dimension of the innermost matrix of the output. If
+ /// both are not specified, the op assumes the innermost matrix is square and infers
+ /// its size from `k` and the innermost dimension of `diagonal`. If only one of them
+ /// is specified, the op assumes the unspecified value is the smallest possible
+ /// based on other criteria.
+ ///
+ /// Let `diagonal` have `r` dimensions `[I, J, ..., L, M, N]`. The output tensor has
+ /// rank `r+1` with shape `[I, J, ..., L, M, num_rows, num_cols]` when only one
+ /// diagonal is given (`k` is an integer or `k[0] == k[1]`). Otherwise, it has rank
+ /// `r` with shape `[I, J, ..., L, num_rows, num_cols]`.
+ ///
+ /// The second innermost dimension of `diagonal` has double meaning.
+ /// When `k` is scalar or `k[0] == k[1]`, `M` is part of the batch size
+ /// [I, J, ..., M], and the output tensor is:
+ ///
+ /// ```
+ /// output[i, j, ..., l, m, n]
+ /// = diagonal[i, j, ..., l, n-max(d_upper, 0)] ; if n - m == d_upper
+ /// padding_value ; otherwise
+ /// ```
+ ///
+ /// Otherwise, `M` is treated as the number of diagonals for the matrix in the
+ /// same batch (`M = k[1]-k[0]+1`), and the output tensor is:
+ ///
+ /// ```
+ /// output[i, j, ..., l, m, n]
+ /// = diagonal[i, j, ..., l, diag_index, index_in_diag] ; if k[0] <= d <= k[1]
+ /// padding_value ; otherwise
+ /// ```
+ /// where `d = n - m`, `diag_index = k[1] - d`, and `index_in_diag = n - max(d, 0)`.
+ ///
+ /// For example:
+ ///
+ /// ```
+ /// # The main diagonal.
+ /// diagonal = np.array([[1, 2, 3, 4], # Input shape: (2, 4)
+ /// [5, 6, 7, 8]])
+ /// tf.matrix_diag(diagonal) ==> [[[1, 0, 0, 0], # Output shape: (2, 4, 4)
+ /// [0, 2, 0, 0],
+ /// [0, 0, 3, 0],
+ /// [0, 0, 0, 4]],
+ /// [[5, 0, 0, 0],
+ /// [0, 6, 0, 0],
+ /// [0, 0, 7, 0],
+ /// [0, 0, 0, 8]]]
+ ///
+ /// # A superdiagonal (per batch).
+ /// diagonal = np.array([[1, 2, 3], # Input shape: (2, 3)
+ /// [4, 5, 6]])
+ /// tf.matrix_diag(diagonal, k = 1)
+ /// ==> [[[0, 1, 0, 0], # Output shape: (2, 4, 4)
+ /// [0, 0, 2, 0],
+ /// [0, 0, 0, 3],
+ /// [0, 0, 0, 0]],
+ /// [[0, 4, 0, 0],
+ /// [0, 0, 5, 0],
+ /// [0, 0, 0, 6],
+ /// [0, 0, 0, 0]]]
+ ///
+ /// # A band of diagonals.
+ /// diagonals = np.array([[[1, 2, 3], # Input shape: (2, 2, 3)
+ /// [4, 5, 0]],
+ /// [[6, 7, 9],
+ /// [9, 1, 0]]])
+ /// tf.matrix_diag(diagonals, k = (-1, 0))
+ /// ==> [[[1, 0, 0], # Output shape: (2, 3, 3)
+ /// [4, 2, 0],
+ /// [0, 5, 3]],
+ /// [[6, 0, 0],
+ /// [9, 7, 0],
+ /// [0, 1, 9]]]
+ ///
+ /// # Rectangular matrix.
+ /// diagonal = np.array([1, 2]) # Input shape: (2)
+ /// tf.matrix_diag(diagonal, k = -1, num_rows = 3, num_cols = 4)
+ /// ==> [[0, 0, 0, 0], # Output shape: (3, 4)
+ /// [1, 0, 0, 0],
+ /// [0, 2, 0, 0]]
+ ///
+ /// # Rectangular matrix with inferred num_cols and padding_value = 9.
+ /// tf.matrix_diag(diagonal, k = -1, num_rows = 3, padding_value = 9)
+ /// ==> [[9, 9], # Output shape: (3, 2)
+ /// [1, 9],
+ /// [9, 2]]
+ /// ```
+ ///
+ ///
+ ///
+ ///
+ ///
+ ///
+ ///
+ ///
+ public static Tensor matrix_diag_v2(Tensor diagonal, Tensor k, Tensor num_rows, Tensor num_cols, Tensor padding_value, string? name = null)
+ {
+ var _ctx = tf.Context;
+ if (_ctx.executing_eagerly())
+ {
+ try
+ {
+ var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "MatrixDiagV2", name) { args = new object[] { diagonal, k, num_rows, num_cols, padding_value }, attrs = new Dictionary() { } });
+ return _fast_path_result[0];
+ }
+ catch (Exception)
+ {
+ }
+ try
+ {
+ return matrix_diag_v2_eager_fallback(diagonal, k, num_rows, num_cols, padding_value, name: name, ctx: _ctx);
+ }
+ catch (Exception)
+ {
+ }
+ }
+ Dictionary keywords = new();
+ keywords["diagonal"] = diagonal;
+ keywords["k"] = k;
+ keywords["num_rows"] = num_rows;
+ keywords["num_cols"] = num_cols;
+ keywords["padding_value"] = padding_value;
+ var _op = tf.OpDefLib._apply_op_helper("MatrixDiagV2", name, keywords);
+ var _result = _op.outputs;
+ if (_execute.must_record_gradient())
+ {
+ object[] _attrs = new object[] { "T", _op._get_attr_type("T") };
+ _execute.record_gradient("MatrixDiagV2", _op.inputs, _attrs, _result);
+ }
+ return _result[0];
+ }
+
+ public static Tensor matrix_diag_v2_eager_fallback(Tensor diagonal, Tensor k, Tensor num_rows, Tensor num_cols, Tensor padding_value, string name, Context ctx)
+ {
+ Tensor[] _inputs_flat = new Tensor[] { diagonal, k, num_rows, num_cols, padding_value };
+ object[] _attrs = new object[] { "T", diagonal.dtype };
+ var _result = _execute.execute("MatrixDiagV2", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name);
+ if (_execute.must_record_gradient())
+ {
+ _execute.record_gradient("MatrixDiagV2", _inputs_flat, _attrs, _result);
+ }
+ return _result[0];
+ }
+ ///
+ /// Returns a batched diagonal tensor with given batched diagonal values.
+ ///
+ ///
+ ///
+ /// Returns a tensor with the contents in `diagonal` as `k[0]`-th to `k[1]`-th
+ /// diagonals of a matrix, with everything else padded with `padding`. `num_rows`
+ /// and `num_cols` specify the dimension of the innermost matrix of the output. If
+ /// both are not specified, the op assumes the innermost matrix is square and infers
+ /// its size from `k` and the innermost dimension of `diagonal`. If only one of them
+ /// is specified, the op assumes the unspecified value is the smallest possible
+ /// based on other criteria.
+ ///
+ /// Let `diagonal` have `r` dimensions `[I, J, ..., L, M, N]`. The output tensor has
+ /// rank `r+1` with shape `[I, J, ..., L, M, num_rows, num_cols]` when only one
+ /// diagonal is given (`k` is an integer or `k[0] == k[1]`). Otherwise, it has rank
+ /// `r` with shape `[I, J, ..., L, num_rows, num_cols]`.
+ ///
+ /// The second innermost dimension of `diagonal` has double meaning.
+ /// When `k` is scalar or `k[0] == k[1]`, `M` is part of the batch size
+ /// [I, J, ..., M], and the output tensor is:
+ ///
+ /// ```
+ /// output[i, j, ..., l, m, n]
+ /// = diagonal[i, j, ..., l, n-max(d_upper, 0)] ; if n - m == d_upper
+ /// padding_value ; otherwise
+ /// ```
+ ///
+ /// Otherwise, `M` is treated as the number of diagonals for the matrix in the
+ /// same batch (`M = k[1]-k[0]+1`), and the output tensor is:
+ ///
+ /// ```
+ /// output[i, j, ..., l, m, n]
+ /// = diagonal[i, j, ..., l, diag_index, index_in_diag] ; if k[0] <= d <= k[1]
+ /// padding_value ; otherwise
+ /// ```
+ /// where `d = n - m`, `diag_index = [k] - d`, and
+ /// `index_in_diag = n - max(d, 0) + offset`.
+ ///
+ /// `offset` is zero except when the alignment of the diagonal is to the right.
+ /// ```
+ /// offset = max_diag_len - diag_len(d) ; if (`align` in {RIGHT_LEFT, RIGHT_RIGHT}
+ /// and `d >= 0`) or
+ /// (`align` in {LEFT_RIGHT, RIGHT_RIGHT}
+ /// and `d <= 0`)
+ /// 0 ; otherwise
+ /// ```
+ /// where `diag_len(d) = min(cols - max(d, 0), rows + min(d, 0))`.
+ ///
+ /// For example:
+ ///
+ /// ```
+ /// # The main diagonal.
+ /// diagonal = np.array([[1, 2, 3, 4], # Input shape: (2, 4)
+ /// [5, 6, 7, 8]])
+ /// tf.matrix_diag(diagonal) ==> [[[1, 0, 0, 0], # Output shape: (2, 4, 4)
+ /// [0, 2, 0, 0],
+ /// [0, 0, 3, 0],
+ /// [0, 0, 0, 4]],
+ /// [[5, 0, 0, 0],
+ /// [0, 6, 0, 0],
+ /// [0, 0, 7, 0],
+ /// [0, 0, 0, 8]]]
+ ///
+ /// # A superdiagonal (per batch).
+ /// diagonal = np.array([[1, 2, 3], # Input shape: (2, 3)
+ /// [4, 5, 6]])
+ /// tf.matrix_diag(diagonal, k = 1)
+ /// ==> [[[0, 1, 0, 0], # Output shape: (2, 4, 4)
+ /// [0, 0, 2, 0],
+ /// [0, 0, 0, 3],
+ /// [0, 0, 0, 0]],
+ /// [[0, 4, 0, 0],
+ /// [0, 0, 5, 0],
+ /// [0, 0, 0, 6],
+ /// [0, 0, 0, 0]]]
+ ///
+ /// # A tridiagonal band (per batch).
+ /// diagonals = np.array([[[0, 8, 9], # Input shape: (2, 2, 3)
+ /// [1, 2, 3],
+ /// [4, 5, 0]],
+ /// [[0, 2, 3],
+ /// [6, 7, 9],
+ /// [9, 1, 0]]])
+ /// tf.matrix_diag(diagonals, k = (-1, 1))
+ /// ==> [[[1, 8, 0], # Output shape: (2, 3, 3)
+ /// [4, 2, 9],
+ /// [0, 5, 3]],
+ /// [[6, 2, 0],
+ /// [9, 7, 3],
+ /// [0, 1, 9]]]
+ ///
+ /// # LEFT_RIGHT alignment.
+ /// diagonals = np.array([[[8, 9, 0], # Input shape: (2, 2, 3)
+ /// [1, 2, 3],
+ /// [0, 4, 5]],
+ /// [[2, 3, 0],
+ /// [6, 7, 9],
+ /// [0, 9, 1]]])
+ /// tf.matrix_diag(diagonals, k = (-1, 1), align="LEFT_RIGHT")
+ /// ==> [[[1, 8, 0], # Output shape: (2, 3, 3)
+ /// [4, 2, 9],
+ /// [0, 5, 3]],
+ /// [[6, 2, 0],
+ /// [9, 7, 3],
+ /// [0, 1, 9]]]
+ ///
+ /// # Rectangular matrix.
+ /// diagonal = np.array([1, 2]) # Input shape: (2)
+ /// tf.matrix_diag(diagonal, k = -1, num_rows = 3, num_cols = 4)
+ /// ==> [[0, 0, 0, 0], # Output shape: (3, 4)
+ /// [1, 0, 0, 0],
+ /// [0, 2, 0, 0]]
+ ///
+ /// # Rectangular matrix with inferred num_cols and padding_value = 9.
+ /// tf.matrix_diag(diagonal, k = -1, num_rows = 3, padding_value = 9)
+ /// ==> [[9, 9], # Output shape: (3, 2)
+ /// [1, 9],
+ /// [9, 2]]
+ ///
+ /// ```
+ ///
+ ///
+ ///
+ ///
+ ///
+ ///
+ ///
+ ///
+ ///
+ /// Some diagonals are shorter than `max_diag_len` and need to be padded. `align` is
+ /// a string specifying how superdiagonals and subdiagonals should be aligned,
+ /// respectively. There are four possible alignments: "RIGHT_LEFT" (default),
+ /// "LEFT_RIGHT", "LEFT_LEFT", and "RIGHT_RIGHT". "RIGHT_LEFT" aligns superdiagonals
+ /// to the right (left-pads the row) and subdiagonals to the left (right-pads the
+ /// row). It is the packing format LAPACK uses. cuSPARSE uses "LEFT_RIGHT", which is
+ /// the opposite alignment.
+ ///
+ ///
+ ///
+ public static Tensor matrix_diag_v3(Tensor diagonal, Tensor k, Tensor num_rows, Tensor num_cols, Tensor padding_value, string align = "RIGHT_LEFT", string? name = null)
+ {
+ var _ctx = tf.Context;
+ if (_ctx.executing_eagerly())
+ {
+ try
+ {
+ var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "MatrixDiagV3", name) { args = new object[] { diagonal, k, num_rows, num_cols, padding_value }, attrs = new Dictionary() { ["align"] = align } });
+ return _fast_path_result[0];
+ }
+ catch (Exception)
+ {
+ }
+ try
+ {
+ return matrix_diag_v3_eager_fallback(diagonal, k, num_rows, num_cols, padding_value, align: align, name: name, ctx: _ctx);
+ }
+ catch (Exception)
+ {
+ }
+ }
+ if (align is null)
+ {
+ align = "RIGHT_LEFT";
+ }
+ Dictionary keywords = new();
+ keywords["diagonal"] = diagonal;
+ keywords["k"] = k;
+ keywords["num_rows"] = num_rows;
+ keywords["num_cols"] = num_cols;
+ keywords["padding_value"] = padding_value;
+ keywords["align"] = align;
+ var _op = tf.OpDefLib._apply_op_helper("MatrixDiagV3", name, keywords);
+ var _result = _op.outputs;
+ if (_execute.must_record_gradient())
+ {
+ object[] _attrs = new object[] { "T", _op._get_attr_type("T"), "align", _op.get_attr("align") };
+ _execute.record_gradient("MatrixDiagV3", _op.inputs, _attrs, _result);
+ }
+ return _result[0];
+ }
+
+ public static Tensor matrix_diag_v3_eager_fallback(Tensor diagonal, Tensor k, Tensor num_rows, Tensor num_cols, Tensor padding_value, string align, string name, Context ctx)
+ {
+ Tensor[] _inputs_flat = new Tensor[] { diagonal, k, num_rows, num_cols, padding_value };
+ object[] _attrs = new object[] { "T", diagonal.dtype, "align", align };
+ var _result = _execute.execute("MatrixDiagV3", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name);
+ if (_execute.must_record_gradient())
+ {
+ _execute.record_gradient("MatrixDiagV3", _inputs_flat, _attrs, _result);
+ }
+ return _result[0];
+ }
+ ///
+ /// Returns a batched matrix tensor with new batched diagonal values.
+ ///
+ ///
+ ///
+ /// Given `input` and `diagonal`, this operation returns a tensor with the
+ /// same shape and values as `input`, except for the main diagonal of the
+ /// innermost matrices. These will be overwritten by the values in `diagonal`.
+ ///
+ /// The output is computed as follows:
+ ///
+ /// Assume `input` has `k+1` dimensions `[I, J, K, ..., M, N]` and `diagonal` has
+ /// `k` dimensions `[I, J, K, ..., min(M, N)]`. Then the output is a
+ /// tensor of rank `k+1` with dimensions `[I, J, K, ..., M, N]` where:
+ ///
+ /// * `output[i, j, k, ..., m, n] = diagonal[i, j, k, ..., n]` for `m == n`.
+ /// * `output[i, j, k, ..., m, n] = input[i, j, k, ..., m, n]` for `m != n`.
+ ///
+ ///
+ ///
+ ///
+ ///
+ public static Tensor matrix_set_diag(Tensor input, Tensor diagonal, string? name = null)
+ {
+ var _ctx = tf.Context;
+ if (_ctx.executing_eagerly())
+ {
+ try
+ {
+ var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "MatrixSetDiag", name) { args = new object[] { input, diagonal }, attrs = new Dictionary() { } });
+ return _fast_path_result[0];
+ }
+ catch (Exception)
+ {
+ }
+ try
+ {
+ return matrix_set_diag_eager_fallback(input, diagonal, name: name, ctx: _ctx);
+ }
+ catch (Exception)
+ {
+ }
+ }
+ Dictionary keywords = new();
+ keywords["input"] = input;
+ keywords["diagonal"] = diagonal;
+ var _op = tf.OpDefLib._apply_op_helper("MatrixSetDiag", name, keywords);
+ var _result = _op.outputs;
+ if (_execute.must_record_gradient())
+ {
+ object[] _attrs = new object[] { "T", _op._get_attr_type("T") };
+ _execute.record_gradient("MatrixSetDiag", _op.inputs, _attrs, _result);
+ }
+ return _result[0];
+ }
+
+ public static Tensor matrix_set_diag_eager_fallback(Tensor input, Tensor diagonal, string name, Context ctx)
+ {
+ Tensor[] _inputs_flat = new Tensor[] { input, diagonal };
+ object[] _attrs = new object[] { "T", input.dtype };
+ var _result = _execute.execute("MatrixSetDiag", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name);
+ if (_execute.must_record_gradient())
+ {
+ _execute.record_gradient("MatrixSetDiag", _inputs_flat, _attrs, _result);
+ }
+ return _result[0];
+ }
+ ///
+ /// Returns a batched matrix tensor with new batched diagonal values.
+ ///
+ ///
+ ///
+ /// Given `input` and `diagonal`, this operation returns a tensor with the
+ /// same shape and values as `input`, except for the specified diagonals of the
+ /// innermost matrices. These will be overwritten by the values in `diagonal`.
+ ///
+ /// `input` has `r+1` dimensions `[I, J, ..., L, M, N]`. When `k` is scalar or
+ /// `k[0] == k[1]`, `diagonal` has `r` dimensions `[I, J, ..., L, max_diag_len]`.
+ /// Otherwise, it has `r+1` dimensions `[I, J, ..., L, num_diags, max_diag_len]`.
+ /// `num_diags` is the number of diagonals, `num_diags = k[1] - k[0] + 1`.
+ /// `max_diag_len` is the longest diagonal in the range `[k[0], k[1]]`,
+ /// `max_diag_len = min(M + min(k[1], 0), N + min(-k[0], 0))`
+ ///
+ /// The output is a tensor of rank `k+1` with dimensions `[I, J, ..., L, M, N]`.
+ /// If `k` is scalar or `k[0] == k[1]`:
+ ///
+ /// ```
+ /// output[i, j, ..., l, m, n]
+ /// = diagonal[i, j, ..., l, n-max(k[1], 0)] ; if n - m == k[1]
+ /// input[i, j, ..., l, m, n] ; otherwise
+ /// ```
+ ///
+ /// Otherwise,
+ ///
+ /// ```
+ /// output[i, j, ..., l, m, n]
+ /// = diagonal[i, j, ..., l, diag_index, index_in_diag] ; if k[0] <= d <= k[1]
+ /// input[i, j, ..., l, m, n] ; otherwise
+ /// ```
+ /// where `d = n - m`, `diag_index = k[1] - d`, and `index_in_diag = n - max(d, 0)`.
+ ///
+ /// For example:
+ ///
+ /// ```
+ /// # The main diagonal.
+ /// input = np.array([[[7, 7, 7, 7], # Input shape: (2, 3, 4)
+ /// [7, 7, 7, 7],
+ /// [7, 7, 7, 7]],
+ /// [[7, 7, 7, 7],
+ /// [7, 7, 7, 7],
+ /// [7, 7, 7, 7]]])
+ /// diagonal = np.array([[1, 2, 3], # Diagonal shape: (2, 3)
+ /// [4, 5, 6]])
+ /// tf.matrix_set_diag(diagonal) ==> [[[1, 7, 7, 7], # Output shape: (2, 3, 4)
+ /// [7, 2, 7, 7],
+ /// [7, 7, 3, 7]],
+ /// [[4, 7, 7, 7],
+ /// [7, 5, 7, 7],
+ /// [7, 7, 6, 7]]]
+ ///
+ /// # A superdiagonal (per batch).
+ /// tf.matrix_set_diag(diagonal, k = 1)
+ /// ==> [[[7, 1, 7, 7], # Output shape: (2, 3, 4)
+ /// [7, 7, 2, 7],
+ /// [7, 7, 7, 3]],
+ /// [[7, 4, 7, 7],
+ /// [7, 7, 5, 7],
+ /// [7, 7, 7, 6]]]
+ ///
+ /// # A band of diagonals.
+ /// diagonals = np.array([[[1, 2, 3], # Diagonal shape: (2, 2, 3)
+ /// [4, 5, 0]],
+ /// [[6, 1, 2],
+ /// [3, 4, 0]]])
+ /// tf.matrix_set_diag(diagonals, k = (-1, 0))
+ /// ==> [[[1, 7, 7, 7], # Output shape: (2, 3, 4)
+ /// [4, 2, 7, 7],
+ /// [0, 5, 3, 7]],
+ /// [[6, 7, 7, 7],
+ /// [3, 1, 7, 7],
+ /// [7, 4, 2, 7]]]
+ ///
+ /// ```
+ ///
+ ///
+ ///
+ ///
+ ///
+ ///
+ public static Tensor matrix_set_diag_v2(Tensor input, Tensor diagonal, Tensor k, string? name = null)
+ {
+ var _ctx = tf.Context;
+ if (_ctx.executing_eagerly())
+ {
+ try
+ {
+ var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "MatrixSetDiagV2", name) { args = new object[] { input, diagonal, k }, attrs = new Dictionary() { } });
+ return _fast_path_result[0];
+ }
+ catch (Exception)
+ {
+ }
+ try
+ {
+ return matrix_set_diag_v2_eager_fallback(input, diagonal, k, name: name, ctx: _ctx);
+ }
+ catch (Exception)
+ {
+ }
+ }
+ Dictionary keywords = new();
+ keywords["input"] = input;
+ keywords["diagonal"] = diagonal;
+ keywords["k"] = k;
+ var _op = tf.OpDefLib._apply_op_helper("MatrixSetDiagV2", name, keywords);
+ var _result = _op.outputs;
+ if (_execute.must_record_gradient())
+ {
+ object[] _attrs = new object[] { "T", _op._get_attr_type("T") };
+ _execute.record_gradient("MatrixSetDiagV2", _op.inputs, _attrs, _result);
+ }
+ return _result[0];
+ }
+
+ public static Tensor matrix_set_diag_v2_eager_fallback(Tensor input, Tensor diagonal, Tensor k, string name, Context ctx)
+ {
+ Tensor[] _inputs_flat = new Tensor[] { input, diagonal, k };
+ object[] _attrs = new object[] { "T", input.dtype };
+ var _result = _execute.execute("MatrixSetDiagV2", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name);
+ if (_execute.must_record_gradient())
+ {
+ _execute.record_gradient("MatrixSetDiagV2", _inputs_flat, _attrs, _result);
+ }
+ return _result[0];
+ }
+ ///
+ /// Returns a batched matrix tensor with new batched diagonal values.
+ ///
+ ///
+ ///
+ /// Given `input` and `diagonal`, this operation returns a tensor with the
+ /// same shape and values as `input`, except for the specified diagonals of the
+ /// innermost matrices. These will be overwritten by the values in `diagonal`.
+ ///
+ /// `input` has `r+1` dimensions `[I, J, ..., L, M, N]`. When `k` is scalar or
+ /// `k[0] == k[1]`, `diagonal` has `r` dimensions `[I, J, ..., L, max_diag_len]`.
+ /// Otherwise, it has `r+1` dimensions `[I, J, ..., L, num_diags, max_diag_len]`.
+ /// `num_diags` is the number of diagonals, `num_diags = k[1] - k[0] + 1`.
+ /// `max_diag_len` is the longest diagonal in the range `[k[0], k[1]]`,
+ /// `max_diag_len = min(M + min(k[1], 0), N + min(-k[0], 0))`
+ ///
+ /// The output is a tensor of rank `k+1` with dimensions `[I, J, ..., L, M, N]`.
+ /// If `k` is scalar or `k[0] == k[1]`:
+ ///
+ /// ```
+ /// output[i, j, ..., l, m, n]
+ /// = diagonal[i, j, ..., l, n-max(k[1], 0)] ; if n - m == k[1]
+ /// input[i, j, ..., l, m, n] ; otherwise
+ /// ```
+ ///
+ /// Otherwise,
+ ///
+ /// ```
+ /// output[i, j, ..., l, m, n]
+ /// = diagonal[i, j, ..., l, diag_index, index_in_diag] ; if k[0] <= d <= k[1]
+ /// input[i, j, ..., l, m, n] ; otherwise
+ /// ```
+ /// where `d = n - m`, `diag_index = k[1] - d`, and
+ /// `index_in_diag = n - max(d, 0) + offset`.
+ ///
+ /// `offset` is zero except when the alignment of the diagonal is to the right.
+ /// ```
+ /// offset = max_diag_len - diag_len(d) ; if (`align` in {RIGHT_LEFT, RIGHT_RIGHT}
+ /// and `d >= 0`) or
+ /// (`align` in {LEFT_RIGHT, RIGHT_RIGHT}
+ /// and `d <= 0`)
+ /// 0 ; otherwise
+ /// ```
+ /// where `diag_len(d) = min(cols - max(d, 0), rows + min(d, 0))`.
+ ///
+ /// For example:
+ ///
+ /// ```
+ /// # The main diagonal.
+ /// input = np.array([[[7, 7, 7, 7], # Input shape: (2, 3, 4)
+ /// [7, 7, 7, 7],
+ /// [7, 7, 7, 7]],
+ /// [[7, 7, 7, 7],
+ /// [7, 7, 7, 7],
+ /// [7, 7, 7, 7]]])
+ /// diagonal = np.array([[1, 2, 3], # Diagonal shape: (2, 3)
+ /// [4, 5, 6]])
+ /// tf.matrix_set_diag(input, diagonal)
+ /// ==> [[[1, 7, 7, 7], # Output shape: (2, 3, 4)
+ /// [7, 2, 7, 7],
+ /// [7, 7, 3, 7]],
+ /// [[4, 7, 7, 7],
+ /// [7, 5, 7, 7],
+ /// [7, 7, 6, 7]]]
+ ///
+ /// # A superdiagonal (per batch).
+ /// tf.matrix_set_diag(input, diagonal, k = 1)
+ /// ==> [[[7, 1, 7, 7], # Output shape: (2, 3, 4)
+ /// [7, 7, 2, 7],
+ /// [7, 7, 7, 3]],
+ /// [[7, 4, 7, 7],
+ /// [7, 7, 5, 7],
+ /// [7, 7, 7, 6]]]
+ ///
+ /// # A band of diagonals.
+ /// diagonals = np.array([[[0, 9, 1], # Diagonal shape: (2, 4, 3)
+ /// [6, 5, 8],
+ /// [1, 2, 3],
+ /// [4, 5, 0]],
+ /// [[0, 1, 2],
+ /// [5, 6, 4],
+ /// [6, 1, 2],
+ /// [3, 4, 0]]])
+ /// tf.matrix_set_diag(input, diagonals, k = (-1, 2))
+ /// ==> [[[1, 6, 9, 7], # Output shape: (2, 3, 4)
+ /// [4, 2, 5, 1],
+ /// [7, 5, 3, 8]],
+ /// [[6, 5, 1, 7],
+ /// [3, 1, 6, 2],
+ /// [7, 4, 2, 4]]]
+ ///
+ /// # LEFT_RIGHT alignment.
+ /// diagonals = np.array([[[9, 1, 0], # Diagonal shape: (2, 4, 3)
+ /// [6, 5, 8],
+ /// [1, 2, 3],
+ /// [0, 4, 5]],
+ /// [[1, 2, 0],
+ /// [5, 6, 4],
+ /// [6, 1, 2],
+ /// [0, 3, 4]]])
+ /// tf.matrix_set_diag(input, diagonals, k = (-1, 2), align="LEFT_RIGHT")
+ /// ==> [[[1, 6, 9, 7], # Output shape: (2, 3, 4)
+ /// [4, 2, 5, 1],
+ /// [7, 5, 3, 8]],
+ /// [[6, 5, 1, 7],
+ /// [3, 1, 6, 2],
+ /// [7, 4, 2, 4]]]
+ ///
+ /// ```
+ ///
+ ///
+ ///
+ ///
+ ///
+ ///
+ ///
+ /// Some diagonals are shorter than `max_diag_len` and need to be padded. `align` is
+ /// a string specifying how superdiagonals and subdiagonals should be aligned,
+ /// respectively. There are four possible alignments: "RIGHT_LEFT" (default),
+ /// "LEFT_RIGHT", "LEFT_LEFT", and "RIGHT_RIGHT". "RIGHT_LEFT" aligns superdiagonals
+ /// to the right (left-pads the row) and subdiagonals to the left (right-pads the
+ /// row). It is the packing format LAPACK uses. cuSPARSE uses "LEFT_RIGHT", which is
+ /// the opposite alignment.
+ ///
+ ///
+ ///
+ public static Tensor matrix_set_diag_v3(Tensor input, Tensor diagonal, Tensor k, string align = "RIGHT_LEFT", string? name = null)
+ {
+ var _ctx = tf.Context;
+ if (_ctx.executing_eagerly())
+ {
+ try
+ {
+ var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "MatrixSetDiagV3", name) { args = new object[] { input, diagonal, k }, attrs = new Dictionary() { ["align"] = align } });
+ return _fast_path_result[0];
+ }
+ catch (Exception)
+ {
+ }
+ try
+ {
+ return matrix_set_diag_v3_eager_fallback(input, diagonal, k, align: align, name: name, ctx: _ctx);
+ }
+ catch (Exception)
+ {
+ }
+ }
+ if (align is null)
+ {
+ align = "RIGHT_LEFT";
+ }
+ Dictionary keywords = new();
+ keywords["input"] = input;
+ keywords["diagonal"] = diagonal;
+ keywords["k"] = k;
+ keywords["align"] = align;
+ var _op = tf.OpDefLib._apply_op_helper("MatrixSetDiagV3", name, keywords);
+ var _result = _op.outputs;
+ if (_execute.must_record_gradient())
+ {
+ object[] _attrs = new object[] { "T", _op._get_attr_type("T"), "align", _op.get_attr("align") };
+ _execute.record_gradient("MatrixSetDiagV3", _op.inputs, _attrs, _result);
+ }
+ return _result[0];
+ }
+
+ public static Tensor matrix_set_diag_v3_eager_fallback(Tensor input, Tensor diagonal, Tensor k, string align, string name, Context ctx)
+ {
+ Tensor[] _inputs_flat = new Tensor[] { input, diagonal, k };
+ object[] _attrs = new object[] { "T", input.dtype, "align", align };
+ var _result = _execute.execute("MatrixSetDiagV3", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name);
+ if (_execute.must_record_gradient())
+ {
+ _execute.record_gradient("MatrixSetDiagV3", _inputs_flat, _attrs, _result);
+ }
+ return _result[0];
+ }
+ ///
+ /// Pads a tensor with mirrored values.
+ ///
+ ///
+ ///
+ /// This operation pads a `input` with mirrored values according to the `paddings`
+ /// you specify. `paddings` is an integer tensor with shape `[n, 2]`, where n is
+ /// the rank of `input`. For each dimension D of `input`, `paddings[D, 0]` indicates
+ /// how many values to add before the contents of `input` in that dimension, and
+ /// `paddings[D, 1]` indicates how many values to add after the contents of `input`
+ /// in that dimension. Both `paddings[D, 0]` and `paddings[D, 1]` must be no greater
+ /// than `input.dim_size(D)` (or `input.dim_size(D) - 1`) if `copy_border` is true
+ /// (if false, respectively).
+ ///
+ /// The padded size of each dimension D of the output is:
+ ///
+ /// `paddings(D, 0) + input.dim_size(D) + paddings(D, 1)`
+ ///
+ /// For example:
+ ///
+ /// ```
+ /// # 't' is [[1, 2, 3], [4, 5, 6]].
+ /// # 'paddings' is [[1, 1]], [2, 2]].
+ /// # 'mode' is SYMMETRIC.
+ /// # rank of 't' is 2.
+ /// pad(t, paddings) ==> [[2, 1, 1, 2, 3, 3, 2]
+ /// [2, 1, 1, 2, 3, 3, 2]
+ /// [5, 4, 4, 5, 6, 6, 5]
+ /// [5, 4, 4, 5, 6, 6, 5]]
+ /// ```
+ ///
+ ///
+ ///
+ ///
+ ///
+ ///
+ /// Either `REFLECT` or `SYMMETRIC`. In reflect mode the padded regions
+ /// do not include the borders, while in symmetric mode the padded regions
+ /// do include the borders. For example, if `input` is `[1, 2, 3]` and `paddings`
+ /// is `[0, 2]`, then the output is `[1, 2, 3, 2, 1]` in reflect mode, and
+ /// it is `[1, 2, 3, 3, 2]` in symmetric mode.
+ ///
+ ///
+ ///
+ public static Tensor mirror_pad(Tensor input, Tensor paddings, string mode, string? name = null)
+ {
+ var _ctx = tf.Context;
+ if (_ctx.executing_eagerly())
+ {
+ try
+ {
+ var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "MirrorPad", name) { args = new object[] { input, paddings }, attrs = new Dictionary() { ["mode"] = mode } });
+ return _fast_path_result[0];
+ }
+ catch (Exception)
+ {
+ }
+ try
+ {
+ return mirror_pad_eager_fallback(input, paddings, mode: mode, name: name, ctx: _ctx);
+ }
+ catch (Exception)
+ {
+ }
+ }
+ Dictionary keywords = new();
+ keywords["input"] = input;
+ keywords["paddings"] = paddings;
+ keywords["mode"] = mode;
+ var _op = tf.OpDefLib._apply_op_helper("MirrorPad", name, keywords);
+ var _result = _op.outputs;
+ if (_execute.must_record_gradient())
+ {
+ object[] _attrs = new object[] { "T", _op._get_attr_type("T"), "Tpaddings", _op._get_attr_type("Tpaddings"), "mode", _op.get_attr("mode") };
+ _execute.record_gradient("MirrorPad", _op.inputs, _attrs, _result);
+ }
+ return _result[0];
+ }
+
+ public static Tensor mirror_pad_eager_fallback(Tensor input, Tensor paddings, string mode, string name, Context ctx)
+ {
+ Tensor[] _inputs_flat = new Tensor[] { input, paddings };
+ object[] _attrs = new object[] { "T", input.dtype, "Tpaddings", paddings.dtype, "mode", mode };
+ var _result = _execute.execute("MirrorPad", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name);
+ if (_execute.must_record_gradient())
+ {
+ _execute.record_gradient("MirrorPad", _inputs_flat, _attrs, _result);
+ }
+ return _result[0];
+ }
+ ///
+ /// Gradient op for `MirrorPad` op. This op folds a mirror-padded tensor.
+ ///
+ ///
+ ///
+ /// This operation folds the padded areas of `input` by `MirrorPad` according to the
+ /// `paddings` you specify. `paddings` must be the same as `paddings` argument
+ /// given to the corresponding `MirrorPad` op.
+ ///
+ /// The folded size of each dimension D of the output is:
+ ///
+ /// `input.dim_size(D) - paddings(D, 0) - paddings(D, 1)`
+ ///
+ /// For example:
+ ///
+ /// ```
+ /// # 't' is [[1, 2, 3], [4, 5, 6], [7, 8, 9]].
+ /// # 'paddings' is [[0, 1]], [0, 1]].
+ /// # 'mode' is SYMMETRIC.
+ /// # rank of 't' is 2.
+ /// pad(t, paddings) ==> [[ 1, 5]
+ /// [11, 28]]
+ /// ```
+ ///
+ ///
+ ///
+ ///
+ ///
+ ///
+ /// The mode used in the `MirrorPad` op.
+ ///
+ ///
+ ///
+ public static Tensor mirror_pad_grad(Tensor input, Tensor paddings, string mode, string? name = null)
+ {
+ var _ctx = tf.Context;
+ if (_ctx.executing_eagerly())
+ {
+ try
+ {
+ var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "MirrorPadGrad", name) { args = new object[] { input, paddings }, attrs = new Dictionary() { ["mode"] = mode } });
+ return _fast_path_result[0];
+ }
+ catch (Exception)
+ {
+ }
+ try
+ {
+ return mirror_pad_grad_eager_fallback(input, paddings, mode: mode, name: name, ctx: _ctx);
+ }
+ catch (Exception)
+ {
+ }
+ }
+ Dictionary keywords = new();
+ keywords["input"] = input;
+ keywords["paddings"] = paddings;
+ keywords["mode"] = mode;
+ var _op = tf.OpDefLib._apply_op_helper("MirrorPadGrad", name, keywords);
+ var _result = _op.outputs;
+ if (_execute.must_record_gradient())
+ {
+ object[] _attrs = new object[] { "T", _op._get_attr_type("T"), "Tpaddings", _op._get_attr_type("Tpaddings"), "mode", _op.get_attr("mode") };
+ _execute.record_gradient("MirrorPadGrad", _op.inputs, _attrs, _result);
+ }
+ return _result[0];
+ }
+
+ public static Tensor mirror_pad_grad_eager_fallback(Tensor input, Tensor paddings, string mode, string name, Context ctx)
+ {
+ Tensor[] _inputs_flat = new Tensor[] { input, paddings };
+ object[] _attrs = new object[] { "T", input.dtype, "Tpaddings", paddings.dtype, "mode", mode };
+ var _result = _execute.execute("MirrorPadGrad", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name);
+ if (_execute.must_record_gradient())
+ {
+ _execute.record_gradient("MirrorPadGrad", _inputs_flat, _attrs, _result);
+ }
+ return _result[0];
+ }
+ ///
+ /// Returns a one-hot tensor.
+ ///
+ ///
+ ///
+ /// The locations represented by indices in `indices` take value `on_value`,
+ /// while all other locations take value `off_value`.
+ ///
+ /// If the input `indices` is rank `N`, the output will have rank `N+1`,
+ /// The new axis is created at dimension `axis` (default: the new axis is
+ /// appended at the end).
+ ///
+ /// If `indices` is a scalar the output shape will be a vector of length `depth`.
+ ///
+ /// If `indices` is a vector of length `features`, the output shape will be:
+ /// ```
+ /// features x depth if axis == -1
+ /// depth x features if axis == 0
+ /// ```
+ ///
+ /// If `indices` is a matrix (batch) with shape `[batch, features]`,
+ /// the output shape will be:
+ /// ```
+ /// batch x features x depth if axis == -1
+ /// batch x depth x features if axis == 1
+ /// depth x batch x features if axis == 0
+ /// ```
+ ///
+ ///
+ /// Examples
+ /// =========
+ ///
+ /// Suppose that
+ /// ```
+ /// indices = [0, 2, -1, 1]
+ /// depth = 3
+ /// on_value = 5.0
+ /// off_value = 0.0
+ /// axis = -1
+ /// ```
+ ///
+ /// Then output is `[4 x 3]`:
+ /// ```
+ /// output =
+ /// [5.0 0.0 0.0] // one_hot(0)
+ /// [0.0 0.0 5.0] // one_hot(2)
+ /// [0.0 0.0 0.0] // one_hot(-1)
+ /// [0.0 5.0 0.0] // one_hot(1)
+ /// ```
+ ///
+ /// Suppose that
+ /// ```
+ /// indices = [0, 2, -1, 1]
+ /// depth = 3
+ /// on_value = 0.0
+ /// off_value = 3.0
+ /// axis = 0
+ /// ```
+ ///
+ /// Then output is `[3 x 4]`:
+ /// ```
+ /// output =
+ /// [0.0 3.0 3.0 3.0]
+ /// [3.0 3.0 3.0 0.0]
+ /// [3.0 3.0 3.0 3.0]
+ /// [3.0 0.0 3.0 3.0]
+ /// // ^ one_hot(0)
+ /// // ^ one_hot(2)
+ /// // ^ one_hot(-1)
+ /// // ^ one_hot(1)
+ /// ```
+ ///
+ /// Suppose that
+ /// ```
+ /// indices = [[0, 2], [1, -1]]
+ /// depth = 3
+ /// on_value = 1.0
+ /// off_value = 0.0
+ /// axis = -1
+ /// ```
+ ///
+ /// Then output is `[2 x 2 x 3]`:
+ /// ```
+ /// output =
+ /// [
+ /// [1.0, 0.0, 0.0] // one_hot(0)
+ /// [0.0, 0.0, 1.0] // one_hot(2)
+ /// ][
+ /// [0.0, 1.0, 0.0] // one_hot(1)
+ /// [0.0, 0.0, 0.0] // one_hot(-1)
+ /// ]
+ /// ```
+ ///
+ ///
+ ///
+ ///
+ ///
+ ///
+ ///
+ ///
+ /// The axis to fill (default: -1, a new inner-most axis).
+ ///
+ ///
+ ///
+ public static Tensor one_hot(Tensor indices, Tensor depth, Tensor on_value, Tensor off_value, int axis = -1, string? name = null)
+ {
+ var _ctx = tf.Context;
+ if (_ctx.executing_eagerly())
+ {
+ try
+ {
+ var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "OneHot", name) { args = new object[] { indices, depth, on_value, off_value }, attrs = new Dictionary() { ["axis"] = axis } });
+ return _fast_path_result[0];
+ }
+ catch (Exception)
+ {
+ }
+ try
+ {
+ return one_hot_eager_fallback(indices, depth, on_value, off_value, axis: axis, name: name, ctx: _ctx);
+ }
+ catch (Exception)
+ {
+ }
+ }
+ Dictionary keywords = new();
+ keywords["indices"] = indices;
+ keywords["depth"] = depth;
+ keywords["on_value"] = on_value;
+ keywords["off_value"] = off_value;
+ keywords["axis"] = axis;
+ var _op = tf.OpDefLib._apply_op_helper("OneHot", name, keywords);
+ var _result = _op.outputs;
+ if (_execute.must_record_gradient())
+ {
+ object[] _attrs = new object[] { "axis", _op._get_attr_int("axis"), "T", _op._get_attr_type("T"), "TI", _op._get_attr_type("TI") };
+ _execute.record_gradient("OneHot", _op.inputs, _attrs, _result);
+ }
+ return _result[0];
+ }
+
+ public static Tensor one_hot_eager_fallback(Tensor indices, Tensor depth, Tensor on_value, Tensor off_value, int axis, string name, Context ctx)
+ {
+ Tensor[] _inputs_flat = new Tensor[] { indices, depth, on_value, off_value };
+ object[] _attrs = new object[] { "axis", axis, "T", on_value.dtype, "TI", indices.dtype };
+ var _result = _execute.execute("OneHot", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name);
+ if (_execute.must_record_gradient())
+ {
+ _execute.record_gradient("OneHot", _inputs_flat, _attrs, _result);
+ }
+ return _result[0];
+ }
+ ///
+ /// Returns a tensor of ones with the same shape and type as x.
+ ///
+ ///
+ ///
+ public static Tensor ones_like(Tensor x, string? name = null)
+ {
+ var _ctx = tf.Context;
+ if (_ctx.executing_eagerly())
+ {
+ try
+ {
+ var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "OnesLike", name) { args = new object[] { x }, attrs = new Dictionary() { } });
+ return _fast_path_result[0];
+ }
+ catch (Exception)
+ {
+ }
+ try
+ {
+ return ones_like_eager_fallback(x, name: name, ctx: _ctx);
+ }
+ catch (Exception)
+ {
+ }
+ }
+ Dictionary keywords = new();
+ keywords["x"] = x;
+ var _op = tf.OpDefLib._apply_op_helper("OnesLike", name, keywords);
+ var _result = _op.outputs;
+ if (_execute.must_record_gradient())
+ {
+ object[] _attrs = new object[] { "T", _op._get_attr_type("T") };
+ _execute.record_gradient("OnesLike", _op.inputs, _attrs, _result);
+ }
+ return _result[0];
+ }
+
+ public static Tensor ones_like_eager_fallback(Tensor x, string name, Context ctx)
+ {
+ Tensor[] _inputs_flat = new Tensor[] { x };
+ object[] _attrs = new object[] { "T", x.dtype };
+ var _result = _execute.execute("OnesLike", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name);
+ if (_execute.must_record_gradient())
+ {
+ _execute.record_gradient("OnesLike", _inputs_flat, _attrs, _result);
+ }
+ return _result[0];
+ }
+ ///
+ /// Packs a list of `N` rank-`R` tensors into one rank-`(R+1)` tensor.
+ ///
+ ///
+ ///
+ /// Packs the `N` tensors in `values` into a tensor with rank one higher than each
+ /// tensor in `values`, by packing them along the `axis` dimension.
+ /// Given a list of tensors of shape `(A, B, C)`;
+ ///
+ /// if `axis == 0` then the `output` tensor will have the shape `(N, A, B, C)`.
+ /// if `axis == 1` then the `output` tensor will have the shape `(A, N, B, C)`.
+ /// Etc.
+ ///
+ /// For example:
+ ///
+ /// ```
+ /// # 'x' is [1, 4]
+ /// # 'y' is [2, 5]
+ /// # 'z' is [3, 6]
+ /// pack([x, y, z]) => [[1, 4], [2, 5], [3, 6]] # Pack along first dim.
+ /// pack([x, y, z], axis=1) => [[1, 2, 3], [4, 5, 6]]
+ /// ```
+ ///
+ /// This is the opposite of `unpack`.
+ ///
+ ///
+ ///
+ ///
+ ///
+ /// Dimension along which to pack. Negative values wrap around, so the
+ /// valid range is `[-(R+1), R+1)`.
+ ///
+ ///
+ ///
+ public static Tensor pack(Tensors values, int axis = 0, string? name = null)
+ {
+ var _ctx = tf.Context;
+ if (_ctx.executing_eagerly())
+ {
+ try
+ {
+ var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "Pack", name) { args = new object[] { values }, attrs = new Dictionary() { ["axis"] = axis } });
+ return _fast_path_result[0];
+ }
+ catch (Exception)
+ {
+ }
+ try
+ {
+ return pack_eager_fallback(values, axis: axis, name: name, ctx: _ctx);
+ }
+ catch (Exception)
+ {
+ }
+ }
+ Dictionary keywords = new();
+ keywords["values"] = values;
+ keywords["axis"] = axis;
+ var _op = tf.OpDefLib._apply_op_helper("Pack", name, keywords);
+ var _result = _op.outputs;
+ if (_execute.must_record_gradient())
+ {
+ object[] _attrs = new object[] { "N", _op._get_attr_int("N"), "T", _op._get_attr_type("T"), "axis", _op._get_attr_int("axis") };
+ _execute.record_gradient("Pack", _op.inputs, _attrs, _result);
+ }
+ return _result[0];
+ }
+
+ public static Tensor pack_eager_fallback(Tensors values, int axis, string name, Context ctx)
+ {
+ List _inputs_flat_list = new();
+ _inputs_flat_list.AddRange(values);
+ var _inputs_flat = _inputs_flat_list.ToArray();
+ object[] _attrs = new object[] { "N", values.Length, "T", values.dtype, "axis", axis };
+ var _result = _execute.execute("Pack", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name);
+ if (_execute.must_record_gradient())
+ {
+ _execute.record_gradient("Pack", _inputs_flat, _attrs, _result);
+ }
+ return _result[0];
+ }
+ ///
+ /// Pads a tensor with zeros.
+ ///
+ ///
+ ///
+ /// This operation pads a `input` with zeros according to the `paddings` you
+ /// specify. `paddings` is an integer tensor with shape `[Dn, 2]`, where n is the
+ /// rank of `input`. For each dimension D of `input`, `paddings[D, 0]` indicates
+ /// how many zeros to add before the contents of `input` in that dimension, and
+ /// `paddings[D, 1]` indicates how many zeros to add after the contents of `input`
+ /// in that dimension.
+ ///
+ /// The padded size of each dimension D of the output is:
+ ///
+ /// `paddings(D, 0) + input.dim_size(D) + paddings(D, 1)`
+ ///
+ /// For example:
+ ///
+ /// ```
+ /// # 't' is [[1, 1], [2, 2]]
+ /// # 'paddings' is [[1, 1], [2, 2]]
+ /// # rank of 't' is 2
+ /// pad(t, paddings) ==> [[0, 0, 0, 0, 0, 0]
+ /// [0, 0, 1, 1, 0, 0]
+ /// [0, 0, 2, 2, 0, 0]
+ /// [0, 0, 0, 0, 0, 0]]
+ /// ```
+ ///
+ ///
+ ///
+ ///
+ ///
+ ///
+ public static Tensor pad(Tensor input, Tensor paddings, string? name = null)
+ {
+ var _ctx = tf.Context;
+ if (_ctx.executing_eagerly())
+ {
+ try
+ {
+ var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "Pad", name) { args = new object[] { input, paddings }, attrs = new Dictionary() { } });
+ return _fast_path_result[0];
+ }
+ catch (Exception)
+ {
+ }
+ try
+ {
+ return pad_eager_fallback(input, paddings, name: name, ctx: _ctx);
+ }
+ catch (Exception)
+ {
+ }
+ }
+ Dictionary keywords = new();
+ keywords["input"] = input;
+ keywords["paddings"] = paddings;
+ var _op = tf.OpDefLib._apply_op_helper("Pad", name, keywords);
+ var _result = _op.outputs;
+ if (_execute.must_record_gradient())
+ {
+ object[] _attrs = new object[] { "T", _op._get_attr_type("T"), "Tpaddings", _op._get_attr_type("Tpaddings") };
+ _execute.record_gradient("Pad", _op.inputs, _attrs, _result);
+ }
+ return _result[0];
+ }
+
+ public static Tensor pad_eager_fallback(Tensor input, Tensor paddings, string name, Context ctx)
+ {
+ Tensor[] _inputs_flat = new Tensor[] { input, paddings };
+ object[] _attrs = new object[] { "T", input.dtype, "Tpaddings", paddings.dtype };
+ var _result = _execute.execute("Pad", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name);
+ if (_execute.must_record_gradient())
+ {
+ _execute.record_gradient("Pad", _inputs_flat, _attrs, _result);
+ }
+ return _result[0];
+ }
+ ///
+ /// Pads a tensor.
+ ///
+ ///
+ ///
+ /// This operation pads `input` according to the `paddings` and `constant_values`
+ /// you specify. `paddings` is an integer tensor with shape `[Dn, 2]`, where n is
+ /// the rank of `input`. For each dimension D of `input`, `paddings[D, 0]` indicates
+ /// how many padding values to add before the contents of `input` in that dimension,
+ /// and `paddings[D, 1]` indicates how many padding values to add after the contents
+ /// of `input` in that dimension. `constant_values` is a scalar tensor of the same
+ /// type as `input` that indicates the value to use for padding `input`.
+ ///
+ /// The padded size of each dimension D of the output is:
+ ///
+ /// `paddings(D, 0) + input.dim_size(D) + paddings(D, 1)`
+ ///
+ /// For example:
+ ///
+ /// ```
+ /// # 't' is [[1, 1], [2, 2]]
+ /// # 'paddings' is [[1, 1], [2, 2]]
+ /// # 'constant_values' is 0
+ /// # rank of 't' is 2
+ /// pad(t, paddings) ==> [[0, 0, 0, 0, 0, 0]
+ /// [0, 0, 1, 1, 0, 0]
+ /// [0, 0, 2, 2, 0, 0]
+ /// [0, 0, 0, 0, 0, 0]]
+ /// ```
+ ///
+ ///
+ ///
+ ///
+ ///
+ ///
+ public static Tensor pad_v2(Tensor input, Tensor paddings, Tensor constant_values, string? name = null)
+ {
+ var _ctx = tf.Context;
+ if (_ctx.executing_eagerly())
+ {
+ try
+ {
+ var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "PadV2", name) { args = new object[] { input, paddings, constant_values }, attrs = new Dictionary() { } });
+ return _fast_path_result[0];
+ }
+ catch (Exception)
+ {
+ }
+ try
+ {
+ return pad_v2_eager_fallback(input, paddings, constant_values, name: name, ctx: _ctx);
+ }
+ catch (Exception)
+ {
+ }
+ }
+ Dictionary keywords = new();
+ keywords["input"] = input;
+ keywords["paddings"] = paddings;
+ keywords["constant_values"] = constant_values;
+ var _op = tf.OpDefLib._apply_op_helper("PadV2", name, keywords);
+ var _result = _op.outputs;
+ if (_execute.must_record_gradient())
+ {
+ object[] _attrs = new object[] { "T", _op._get_attr_type("T"), "Tpaddings", _op._get_attr_type("Tpaddings") };
+ _execute.record_gradient("PadV2", _op.inputs, _attrs, _result);
+ }
+ return _result[0];
+ }
+
+ public static Tensor pad_v2_eager_fallback(Tensor input, Tensor paddings, Tensor constant_values, string name, Context ctx)
+ {
+ Tensor[] _inputs_flat = new Tensor[] { input, paddings, constant_values };
+ object[] _attrs = new object[] { "T", input.dtype, "Tpaddings", paddings.dtype };
+ var _result = _execute.execute("PadV2", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name);
+ if (_execute.must_record_gradient())
+ {
+ _execute.record_gradient("PadV2", _inputs_flat, _attrs, _result);
+ }
+ return _result[0];
+ }
+ ///
+ /// Concatenates a list of `N` tensors along the first dimension.
+ ///
+ ///
+ ///
+ /// The input tensors are all required to have size 1 in the first dimension.
+ ///
+ /// For example:
+ ///
+ /// ```
+ /// # 'x' is [[1, 4]]
+ /// # 'y' is [[2, 5]]
+ /// # 'z' is [[3, 6]]
+ /// parallel_concat([x, y, z]) => [[1, 4], [2, 5], [3, 6]] # Pack along first dim.
+ /// ```
+ ///
+ /// The difference between concat and parallel_concat is that concat requires all
+ /// of the inputs be computed before the operation will begin but doesn't require
+ /// that the input shapes be known during graph construction. Parallel concat
+ /// will copy pieces of the input into the output as they become available, in
+ /// some situations this can provide a performance benefit.
+ ///
+ ///
+ ///
+ ///
+ ///
+ /// the final shape of the result; should be equal to the shapes of any input
+ /// but with the number of input values in the first dimension.
+ ///
+ ///
+ ///
+ public static Tensor parallel_concat(Tensors values, Shape shape, string? name = null)
+ {
+ var _ctx = tf.Context;
+ if (_ctx.executing_eagerly())
+ {
+ try
+ {
+ var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "ParallelConcat", name) { args = new object[] { values }, attrs = new Dictionary() { ["shape"] = shape } });
+ return _fast_path_result[0];
+ }
+ catch (Exception)
+ {
+ }
+ try
+ {
+ return parallel_concat_eager_fallback(values, shape: shape, name: name, ctx: _ctx);
+ }
+ catch (Exception)
+ {
+ }
+ }
+ Dictionary keywords = new();
+ keywords["values"] = values;
+ keywords["shape"] = shape;
+ var _op = tf.OpDefLib._apply_op_helper("ParallelConcat", name, keywords);
+ var _result = _op.outputs;
+ if (_execute.must_record_gradient())
+ {
+ object[] _attrs = new object[] { "N", _op._get_attr_int("N"), "T", _op._get_attr_type("T"), "shape", _op.get_attr("shape") };
+ _execute.record_gradient("ParallelConcat", _op.inputs, _attrs, _result);
+ }
+ return _result[0];
+ }
+
+ public static Tensor parallel_concat_eager_fallback(Tensors values, Shape shape, string name, Context ctx)
+ {
+ List _inputs_flat_list = new();
+ _inputs_flat_list.AddRange(values);
+ var _inputs_flat = _inputs_flat_list.ToArray();
+ object[] _attrs = new object[] { "N", values.Length, "T", values.dtype, "shape", shape };
+ var _result = _execute.execute("ParallelConcat", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name);
+ if (_execute.must_record_gradient())
+ {
+ _execute.record_gradient("ParallelConcat", _inputs_flat, _attrs, _result);
+ }
+ return _result[0];
+ }
+ ///
+ /// A placeholder op for a value that will be fed into the computation.
+ ///
+ ///
+ ///
+ /// N.B. This operation will fail with an error if it is executed. It is
+ /// intended as a way to represent a value that will always be fed, and to
+ /// provide attrs that enable the fed value to be checked at runtime.
+ ///
+ ///
+ ///
+ ///
+ /// The type of elements in the tensor.
+ ///
+ ///
+ ///
+ ///
+ /// (Optional) The shape of the tensor. If the shape has 0 dimensions, the
+ /// shape is unconstrained.
+ ///
+ ///
+ ///
+ public static Tensor placeholder(TF_DataType dtype, Shape shape = null, string? name = null)
+ {
+ var _ctx = tf.Context;
+ if (_ctx.executing_eagerly())
+ {
+ try
+ {
+ var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "Placeholder", name) { args = new object[] { }, attrs = new Dictionary() { ["dtype"] = dtype, ["shape"] = shape } });
+ return _fast_path_result[0];
+ }
+ catch (Exception)
+ {
+ }
+ try
+ {
+ return placeholder_eager_fallback(dtype: dtype, shape: shape, name: name, ctx: _ctx);
+ }
+ catch (Exception)
+ {
+ }
+ }
+ Dictionary keywords = new();
+ keywords["dtype"] = dtype;
+ keywords["shape"] = shape;
+ var _op = tf.OpDefLib._apply_op_helper("Placeholder", name, keywords);
+ var _result = _op.outputs;
+ if (_execute.must_record_gradient())
+ {
+ object[] _attrs = new object[] { "dtype", _op._get_attr_type("dtype"), "shape", _op.get_attr("shape") };
+ _execute.record_gradient("Placeholder", _op.inputs, _attrs, _result);
+ }
+ return _result[0];
+ }
+
+ public static Tensor placeholder_eager_fallback(TF_DataType dtype, Shape shape, string name, Context ctx)
+ {
+ Tensor[] _inputs_flat = new Tensor[] { };
+ object[] _attrs = new object[] { "dtype", dtype, "shape", shape };
+ var _result = _execute.execute("Placeholder", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name);
+ if (_execute.must_record_gradient())
+ {
+ _execute.record_gradient("Placeholder", _inputs_flat, _attrs, _result);
+ }
+ return _result[0];
+ }
+ ///
+ /// A placeholder op for a value that will be fed into the computation.
+ ///
+ ///
+ ///
+ /// N.B. This operation will fail with an error if it is executed. It is
+ /// intended as a way to represent a value that will always be fed, and to
+ /// provide attrs that enable the fed value to be checked at runtime.
+ ///
+ ///
+ ///
+ ///
+ /// The type of elements in the tensor.
+ ///
+ ///
+ ///
+ ///
+ /// The shape of the tensor. The shape can be any partially-specified
+ /// shape. To be unconstrained, pass in a shape with unknown rank.
+ ///
+ ///
+ ///
+ public static Tensor placeholder_v2(TF_DataType dtype, Shape shape, string? name = null)
+ {
+ var _ctx = tf.Context;
+ if (_ctx.executing_eagerly())
+ {
+ try
+ {
+ var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "PlaceholderV2", name) { args = new object[] { }, attrs = new Dictionary() { ["dtype"] = dtype, ["shape"] = shape } });
+ return _fast_path_result[0];
+ }
+ catch (Exception)
+ {
+ }
+ try
+ {
+ return placeholder_v2_eager_fallback(dtype: dtype, shape: shape, name: name, ctx: _ctx);
+ }
+ catch (Exception)
+ {
+ }
+ }
+ Dictionary keywords = new();
+ keywords["dtype"] = dtype;
+ keywords["shape"] = shape;
+ var _op = tf.OpDefLib._apply_op_helper("PlaceholderV2", name, keywords);
+ var _result = _op.outputs;
+ if (_execute.must_record_gradient())
+ {
+ object[] _attrs = new object[] { "dtype", _op._get_attr_type("dtype"), "shape", _op.get_attr("shape") };
+ _execute.record_gradient("PlaceholderV2", _op.inputs, _attrs, _result);
+ }
+ return _result[0];
+ }
+
+ public static Tensor placeholder_v2_eager_fallback(TF_DataType dtype, Shape shape, string name, Context ctx)
+ {
+ Tensor[] _inputs_flat = new Tensor[] { };
+ object[] _attrs = new object[] { "dtype", dtype, "shape", shape };
+ var _result = _execute.execute("PlaceholderV2", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name);
+ if (_execute.must_record_gradient())
+ {
+ _execute.record_gradient("PlaceholderV2", _inputs_flat, _attrs, _result);
+ }
+ return _result[0];
+ }
+ ///
+ /// A placeholder op that passes through `input` when its output is not fed.
+ ///
+ ///
+ ///
+ ///
+ /// The (possibly partial) shape of the tensor.
+ ///
+ ///
+ ///
+ public static Tensor placeholder_with_default(Tensor input, Shape shape, string? name = null)
+ {
+ var _ctx = tf.Context;
+ if (_ctx.executing_eagerly())
+ {
+ try
+ {
+ var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "PlaceholderWithDefault", name) { args = new object[] { input }, attrs = new Dictionary() { ["shape"] = shape } });
+ return _fast_path_result[0];
+ }
+ catch (Exception)
+ {
+ }
+ try
+ {
+ return placeholder_with_default_eager_fallback(input, shape: shape, name: name, ctx: _ctx);
+ }
+ catch (Exception)
+ {
+ }
+ }
+ Dictionary keywords = new();
+ keywords["input"] = input;
+ keywords["shape"] = shape;
+ var _op = tf.OpDefLib._apply_op_helper("PlaceholderWithDefault", name, keywords);
+ var _result = _op.outputs;
+ if (_execute.must_record_gradient())
+ {
+ object[] _attrs = new object[] { "dtype", _op._get_attr_type("dtype"), "shape", _op.get_attr("shape") };
+ _execute.record_gradient("PlaceholderWithDefault", _op.inputs, _attrs, _result);
+ }
+ return _result[0];
+ }
+
+ public static Tensor placeholder_with_default_eager_fallback(Tensor input, Shape shape, string name, Context ctx)
+ {
+ Tensor[] _inputs_flat = new Tensor[] { input };
+ object[] _attrs = new object[] { "dtype", input.dtype, "shape", shape };
+ var _result = _execute.execute("PlaceholderWithDefault", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name);
+ if (_execute.must_record_gradient())
+ {
+ _execute.record_gradient("PlaceholderWithDefault", _inputs_flat, _attrs, _result);
+ }
+ return _result[0];
+ }
+ ///
+ /// An identity op that triggers an error if a gradient is requested.
+ ///
+ ///
+ ///
+ /// When executed in a graph, this op outputs its input tensor as-is.
+ ///
+ /// When building ops to compute gradients, the TensorFlow gradient system
+ /// will return an error when trying to lookup the gradient of this op,
+ /// because no gradient must ever be registered for this function. This
+ /// op exists to prevent subtle bugs from silently returning unimplemented
+ /// gradients in some corner cases.
+ ///
+ ///
+ ///
+ ///
+ ///
+ /// Will be printed in the error when anyone tries to differentiate
+ /// this operation.
+ ///
+ ///
+ ///
+ public static Tensor prevent_gradient(Tensor input, string message = "", string? name = null)
+ {
+ var _ctx = tf.Context;
+ if (_ctx.executing_eagerly())
+ {
+ try
+ {
+ var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "PreventGradient", name) { args = new object[] { input }, attrs = new Dictionary() { ["message"] = message } });
+ return _fast_path_result[0];
+ }
+ catch (Exception)
+ {
+ }
+ try
+ {
+ return prevent_gradient_eager_fallback(input, message: message, name: name, ctx: _ctx);
+ }
+ catch (Exception)
+ {
+ }
+ }
+ if (message is null)
+ {
+ message = "";
+ }
+ Dictionary keywords = new();
+ keywords["input"] = input;
+ keywords["message"] = message;
+ var _op = tf.OpDefLib._apply_op_helper("PreventGradient", name, keywords);
+ var _result = _op.outputs;
+ if (_execute.must_record_gradient())
+ {
+ object[] _attrs = new object[] { "T", _op._get_attr_type("T"), "message", _op.get_attr("message") };
+ _execute.record_gradient("PreventGradient", _op.inputs, _attrs, _result);
+ }
+ return _result[0];
+ }
+
+ public static Tensor prevent_gradient_eager_fallback(Tensor input, string message, string name, Context ctx)
+ {
+ Tensor[] _inputs_flat = new Tensor[] { input };
+ object[] _attrs = new object[] { "T", input.dtype, "message", message };
+ var _result = _execute.execute("PreventGradient", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name);
+ if (_execute.must_record_gradient())
+ {
+ _execute.record_gradient("PreventGradient", _inputs_flat, _attrs, _result);
+ }
+ return _result[0];
+ }
+ ///
+ /// Use QuantizeAndDequantizeV2 instead.
+ ///
+ ///
+ ///
+ ///
+ ///
+ ///
+ ///
+ ///
+ public static Tensor quantize_and_dequantize(Tensor input, bool signed_input = true, int num_bits = 8, bool range_given = false, float input_min = 0f, float input_max = 0f, string? name = null)
+ {
+ var _ctx = tf.Context;
+ if (_ctx.executing_eagerly())
+ {
+ try
+ {
+ var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "QuantizeAndDequantize", name) { args = new object[] { input }, attrs = new Dictionary() { ["signed_input"] = signed_input, ["num_bits"] = num_bits, ["range_given"] = range_given, ["input_min"] = input_min, ["input_max"] = input_max } });
+ return _fast_path_result[0];
+ }
+ catch (Exception)
+ {
+ }
+ try
+ {
+ return quantize_and_dequantize_eager_fallback(input, signed_input: signed_input, num_bits: num_bits, range_given: range_given, input_min: input_min, input_max: input_max, name: name, ctx: _ctx);
+ }
+ catch (Exception)
+ {
+ }
+ }
+ Dictionary keywords = new();
+ keywords["input"] = input;
+ keywords["signed_input"] = signed_input;
+ keywords["num_bits"] = num_bits;
+ keywords["range_given"] = range_given;
+ keywords["input_min"] = input_min;
+ keywords["input_max"] = input_max;
+ var _op = tf.OpDefLib._apply_op_helper("QuantizeAndDequantize", name, keywords);
+ var _result = _op.outputs;
+ if (_execute.must_record_gradient())
+ {
+ object[] _attrs = new object[] { "signed_input", _op._get_attr_bool("signed_input"), "num_bits", _op._get_attr_int("num_bits"), "range_given", _op._get_attr_bool("range_given"), "input_min", _op.get_attr("input_min"), "input_max", _op.get_attr("input_max"), "T", _op._get_attr_type("T") };
+ _execute.record_gradient("QuantizeAndDequantize", _op.inputs, _attrs, _result);
+ }
+ return _result[0];
+ }
+
+ public static Tensor quantize_and_dequantize_eager_fallback(Tensor input, bool signed_input, int num_bits, bool range_given, float input_min, float input_max, string name, Context ctx)
+ {
+ Tensor[] _inputs_flat = new Tensor[] { input };
+ object[] _attrs = new object[] { "signed_input", signed_input, "num_bits", num_bits, "range_given", range_given, "input_min", input_min, "input_max", input_max, "T", input.dtype };
+ var _result = _execute.execute("QuantizeAndDequantize", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name);
+ if (_execute.must_record_gradient())
+ {
+ _execute.record_gradient("QuantizeAndDequantize", _inputs_flat, _attrs, _result);
+ }
+ return _result[0];
+ }
+ ///
+ /// Quantizes then dequantizes a tensor.
+ ///
+ ///
+ ///
+ /// This op simulates the precision loss from the quantized forward pass by:
+ ///
+ /// 1. Quantizing the tensor to fixed point numbers, which should match the target
+ /// quantization method when it is used in inference.
+ /// 2. Dequantizing it back to floating point numbers for the following ops, most
+ /// likely matmul.
+ ///
+ /// There are different ways to quantize. This version uses only scaling, so 0.0
+ /// maps to 0.
+ ///
+ /// From the specified 'num_bits' in the quantized output type, it determines
+ /// minimum and maximum representable quantized values.
+ ///
+ /// e.g.
+ ///
+ /// * [-128, 127] for signed, num_bits = 8, or
+ /// * [0, 255] for unsigned, num_bits = 8.
+ ///
+ /// If range_given == False, the initial input_min, input_max will be determined
+ /// automatically as the minimum and maximum values in the input tensor, otherwise
+ /// the specified values of input_min, input_max are used.
+ ///
+ /// Note: If the input_min, input_max are specified, they do not need to equal the
+ /// actual minimum and maximum values in the tensor. e.g. in some cases it may be
+ /// beneficial to specify these values such that the low probability extremes of the
+ /// input distribution are clipped.
+ ///
+ /// This op determines the maximum scale_factor that would map the initial
+ /// [input_min, input_max] range to a range that lies within the representable
+ /// quantized range.
+ ///
+ /// It determines the scale from one of input_min and input_max, then updates the
+ /// other one to maximize the representable range.
+ ///
+ /// e.g.
+ ///
+ /// * if the output is signed, num_bits = 8, [input_min, input_max] = [-10.0,
+ /// 5.0]: it would use a scale_factor of -128 / -10.0 = 12.8 In this case, it
+ /// would update input_max to be 127 / 12.8 = 9.921875
+ /// * if the output is signed, num_bits = 8, [input_min, input_max] = [-10.0,
+ /// 10.0]: it would use a scale_factor of 127 / 10.0 = 12.7 In this case, it
+ /// would update input_min to be 128.0 / 12.7 = -10.07874
+ /// * if the output is unsigned, input_min is forced to be 0, and only the
+ /// specified input_max is used.
+ ///
+ /// After determining the scale_factor and updating the input range, it applies the
+ /// following to each value in the 'input' tensor.
+ ///
+ /// output = round(clamp(value, input_min, input_max) * scale_factor) / scale_factor.
+ ///
+ /// The above round function rounds the value based on the given round_mode.
+ ///
+ ///
+ ///
+ ///
+ ///
+ ///
+ ///
+ ///
+ /// Whether the quantization is signed or unsigned. (actually this parameter should
+ /// have been called `signed_output`)
+ ///
+ ///
+ ///
+ ///
+ /// The bitwidth of the quantization.
+ ///
+ ///
+ ///
+ ///
+ /// Whether the range is given or should be determined from the `input` tensor.
+ ///
+ ///
+ ///
+ ///
+ /// The 'round_mode' attribute controls which rounding tie-breaking algorithm is
+ /// used when rounding float values to their quantized equivalents. The following
+ /// rounding modes are currently supported:
+ ///
+ /// * HALF_TO_EVEN: this is the default round_mode.
+ /// * HALF_UP: round towards positive. In this mode 7.5 rounds up to 8 and -7.5
+ /// rounds up to -7.
+ ///
+ ///
+ ///
+ ///
+ ///
+ /// If True, then the absolute value of the quantized minimum value is the same as
+ /// the quantized maximum value, instead of 1 greater.
+ /// i.e. for 8 bit quantization, the minimum value is -127 instead of -128.
+ ///
+ ///
+ ///
+ ///
+ /// If specified, this axis is treated as a channel or slice axis, and a separate
+ /// quantization range is used for each channel or slice along this axis.
+ ///
+ ///
+ ///
+ public static Tensor quantize_and_dequantize_v2(Tensor input, Tensor input_min, Tensor input_max, bool signed_input = true, int num_bits = 8, bool range_given = false, string round_mode = "HALF_TO_EVEN", bool narrow_range = false, int axis = -1, string? name = null)
+ {
+ var _ctx = tf.Context;
+ if (_ctx.executing_eagerly())
+ {
+ try
+ {
+ var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "QuantizeAndDequantizeV2", name) { args = new object[] { input, input_min, input_max }, attrs = new Dictionary() { ["signed_input"] = signed_input, ["num_bits"] = num_bits, ["range_given"] = range_given, ["round_mode"] = round_mode, ["narrow_range"] = narrow_range, ["axis"] = axis } });
+ return _fast_path_result[0];
+ }
+ catch (Exception)
+ {
+ }
+ try
+ {
+ return quantize_and_dequantize_v2_eager_fallback(input, input_min, input_max, signed_input: signed_input, num_bits: num_bits, range_given: range_given, round_mode: round_mode, narrow_range: narrow_range, axis: axis, name: name, ctx: _ctx);
+ }
+ catch (Exception)
+ {
+ }
+ }
+ if (round_mode is null)
+ {
+ round_mode = "HALF_TO_EVEN";
+ }
+ Dictionary keywords = new();
+ keywords["input"] = input;
+ keywords["input_min"] = input_min;
+ keywords["input_max"] = input_max;
+ keywords["signed_input"] = signed_input;
+ keywords["num_bits"] = num_bits;
+ keywords["range_given"] = range_given;
+ keywords["round_mode"] = round_mode;
+ keywords["narrow_range"] = narrow_range;
+ keywords["axis"] = axis;
+ var _op = tf.OpDefLib._apply_op_helper("QuantizeAndDequantizeV2", name, keywords);
+ var _result = _op.outputs;
+ if (_execute.must_record_gradient())
+ {
+ object[] _attrs = new object[] { "signed_input", _op._get_attr_bool("signed_input"), "num_bits", _op._get_attr_int("num_bits"), "range_given", _op._get_attr_bool("range_given"), "T", _op._get_attr_type("T"), "round_mode", _op.get_attr("round_mode"), "narrow_range", _op._get_attr_bool("narrow_range"), "axis", _op._get_attr_int("axis") };
+ _execute.record_gradient("QuantizeAndDequantizeV2", _op.inputs, _attrs, _result);
+ }
+ return _result[0];
+ }
+
+ public static Tensor quantize_and_dequantize_v2_eager_fallback(Tensor input, Tensor input_min, Tensor input_max, bool signed_input, int num_bits, bool range_given, string round_mode, bool narrow_range, int axis, string name, Context ctx)
+ {
+ Tensor[] _inputs_flat = new Tensor[] { input, input_min, input_max };
+ object[] _attrs = new object[] { "signed_input", signed_input, "num_bits", num_bits, "range_given", range_given, "T", input.dtype, "round_mode", round_mode, "narrow_range", narrow_range, "axis", axis };
+ var _result = _execute.execute("QuantizeAndDequantizeV2", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name);
+ if (_execute.must_record_gradient())
+ {
+ _execute.record_gradient("QuantizeAndDequantizeV2", _inputs_flat, _attrs, _result);
+ }
+ return _result[0];
+ }
+ ///
+ /// Quantizes then dequantizes a tensor.
+ ///
+ ///
+ ///
+ /// This is almost identical to QuantizeAndDequantizeV2, except that num_bits is a
+ /// tensor, so its value can change during training.
+ ///
+ ///
+ ///
+ ///
+ ///
+ ///
+ ///
+ ///
+ ///
+ ///
+ ///
+ public static Tensor quantize_and_dequantize_v3(Tensor input, Tensor input_min, Tensor input_max, Tensor num_bits, bool signed_input = true, bool range_given = true, bool narrow_range = false, int axis = -1, string? name = null)
+ {
+ var _ctx = tf.Context;
+ if (_ctx.executing_eagerly())
+ {
+ try
+ {
+ var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "QuantizeAndDequantizeV3", name) { args = new object[] { input, input_min, input_max, num_bits }, attrs = new Dictionary() { ["signed_input"] = signed_input, ["range_given"] = range_given, ["narrow_range"] = narrow_range, ["axis"] = axis } });
+ return _fast_path_result[0];
+ }
+ catch (Exception)
+ {
+ }
+ try
+ {
+ return quantize_and_dequantize_v3_eager_fallback(input, input_min, input_max, num_bits, signed_input: signed_input, range_given: range_given, narrow_range: narrow_range, axis: axis, name: name, ctx: _ctx);
+ }
+ catch (Exception)
+ {
+ }
+ }
+ Dictionary keywords = new();
+ keywords["input"] = input;
+ keywords["input_min"] = input_min;
+ keywords["input_max"] = input_max;
+ keywords["num_bits"] = num_bits;
+ keywords["signed_input"] = signed_input;
+ keywords["range_given"] = range_given;
+ keywords["narrow_range"] = narrow_range;
+ keywords["axis"] = axis;
+ var _op = tf.OpDefLib._apply_op_helper("QuantizeAndDequantizeV3", name, keywords);
+ var _result = _op.outputs;
+ if (_execute.must_record_gradient())
+ {
+ object[] _attrs = new object[] { "signed_input", _op._get_attr_bool("signed_input"), "range_given", _op._get_attr_bool("range_given"), "T", _op._get_attr_type("T"), "narrow_range", _op._get_attr_bool("narrow_range"), "axis", _op._get_attr_int("axis") };
+ _execute.record_gradient("QuantizeAndDequantizeV3", _op.inputs, _attrs, _result);
+ }
+ return _result[0];
+ }
+
+ public static Tensor quantize_and_dequantize_v3_eager_fallback(Tensor input, Tensor input_min, Tensor input_max, Tensor num_bits, bool signed_input, bool range_given, bool narrow_range, int axis, string name, Context ctx)
+ {
+ Tensor[] _inputs_flat = new Tensor[] { input, input_min, input_max, num_bits };
+ object[] _attrs = new object[] { "signed_input", signed_input, "range_given", range_given, "T", input.dtype, "narrow_range", narrow_range, "axis", axis };
+ var _result = _execute.execute("QuantizeAndDequantizeV3", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name);
+ if (_execute.must_record_gradient())
+ {
+ _execute.record_gradient("QuantizeAndDequantizeV3", _inputs_flat, _attrs, _result);
+ }
+ return _result[0];
+ }
+ ///
+ /// Quantizes then dequantizes a tensor.
+ ///
+ ///
+ ///
+ /// This is almost identical to QuantizeAndDequantizeV2, except that it returns a
+ /// gradient of 1 for inputs that are within the quantization range, or 0 otherwise.
+ ///
+ ///
+ ///
+ ///
+ ///
+ ///
+ ///
+ /// Whether the quantization is signed or unsigned. (actually this parameter should
+ /// have been called `signed_output`)
+ ///
+ ///
+ ///
+ ///
+ /// The bitwidth of the quantization.
+ ///
+ ///
+ ///
+ ///
+ /// Whether the range is given or should be determined from the `input` tensor.
+ ///
+ ///
+ ///
+ ///
+ /// The 'round_mode' attribute controls which rounding tie-breaking algorithm is
+ /// used when rounding float values to their quantized equivalents. The following
+ /// rounding modes are currently supported:
+ ///
+ /// * HALF_TO_EVEN: this is the default round_mode.
+ /// * HALF_UP: round towards positive. In this mode 7.5 rounds up to 8 and -7.5
+ /// rounds up to -7.
+ ///
+ ///
+ ///
+ ///
+ ///
+ /// If True, then the absolute value of the quantized minimum value is the same as
+ /// the quantized maximum value, instead of 1 greater.
+ /// i.e. for 8 bit quantization, the minimum value is -127 instead of -128.
+ ///
+ ///
+ ///
+ ///
+ /// If specified, this axis is treated as a channel or slice axis, and a separate
+ /// quantization range is used for each channel or slice along this axis.
+ ///
+ ///
+ ///
+ public static Tensor quantize_and_dequantize_v4(Tensor input, Tensor input_min, Tensor input_max, bool signed_input = true, int num_bits = 8, bool range_given = false, string round_mode = "HALF_TO_EVEN", bool narrow_range = false, int axis = -1, string? name = null)
+ {
+ var _ctx = tf.Context;
+ if (_ctx.executing_eagerly())
+ {
+ try
+ {
+ var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "QuantizeAndDequantizeV4", name) { args = new object[] { input, input_min, input_max }, attrs = new Dictionary() { ["signed_input"] = signed_input, ["num_bits"] = num_bits, ["range_given"] = range_given, ["round_mode"] = round_mode, ["narrow_range"] = narrow_range, ["axis"] = axis } });
+ return _fast_path_result[0];
+ }
+ catch (Exception)
+ {
+ }
+ try
+ {
+ return quantize_and_dequantize_v4_eager_fallback(input, input_min, input_max, signed_input: signed_input, num_bits: num_bits, range_given: range_given, round_mode: round_mode, narrow_range: narrow_range, axis: axis, name: name, ctx: _ctx);
+ }
+ catch (Exception)
+ {
+ }
+ }
+ if (round_mode is null)
+ {
+ round_mode = "HALF_TO_EVEN";
+ }
+ Dictionary keywords = new();
+ keywords["input"] = input;
+ keywords["input_min"] = input_min;
+ keywords["input_max"] = input_max;
+ keywords["signed_input"] = signed_input;
+ keywords["num_bits"] = num_bits;
+ keywords["range_given"] = range_given;
+ keywords["round_mode"] = round_mode;
+ keywords["narrow_range"] = narrow_range;
+ keywords["axis"] = axis;
+ var _op = tf.OpDefLib._apply_op_helper("QuantizeAndDequantizeV4", name, keywords);
+ var _result = _op.outputs;
+ if (_execute.must_record_gradient())
+ {
+ object[] _attrs = new object[] { "signed_input", _op._get_attr_bool("signed_input"), "num_bits", _op._get_attr_int("num_bits"), "range_given", _op._get_attr_bool("range_given"), "T", _op._get_attr_type("T"), "round_mode", _op.get_attr("round_mode"), "narrow_range", _op._get_attr_bool("narrow_range"), "axis", _op._get_attr_int("axis") };
+ _execute.record_gradient("QuantizeAndDequantizeV4", _op.inputs, _attrs, _result);
+ }
+ return _result[0];
+ }
+
+ public static Tensor quantize_and_dequantize_v4_eager_fallback(Tensor input, Tensor input_min, Tensor input_max, bool signed_input, int num_bits, bool range_given, string round_mode, bool narrow_range, int axis, string name, Context ctx)
+ {
+ Tensor[] _inputs_flat = new Tensor[] { input, input_min, input_max };
+ object[] _attrs = new object[] { "signed_input", signed_input, "num_bits", num_bits, "range_given", range_given, "T", input.dtype, "round_mode", round_mode, "narrow_range", narrow_range, "axis", axis };
+ var _result = _execute.execute("QuantizeAndDequantizeV4", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name);
+ if (_execute.must_record_gradient())
+ {
+ _execute.record_gradient("QuantizeAndDequantizeV4", _inputs_flat, _attrs, _result);
+ }
+ return _result[0];
+ }
+ ///
+ /// Quantize the 'input' tensor of type float to 'output' tensor of type 'T'.
+ ///
+ ///
+ ///
+ /// [min_range, max_range] are scalar floats that specify the range for
+ /// the 'input' data. The 'mode' attribute controls exactly which calculations are
+ /// used to convert the float values to their quantized equivalents. The
+ /// 'round_mode' attribute controls which rounding tie-breaking algorithm is used
+ /// when rounding float values to their quantized equivalents.
+ ///
+ /// In 'MIN_COMBINED' mode, each value of the tensor will undergo the following:
+ ///
+ /// ```
+ /// out[i] = (in[i] - min_range) * range(T) / (max_range - min_range)
+ /// if T == qint8: out[i] -= (range(T) + 1) / 2.0
+ /// ```
+ ///
+ /// here `range(T) = numeric_limits::max() - numeric_limits::min()`
+ ///
+ /// *MIN_COMBINED Mode Example*
+ ///
+ /// Assume the input is type float and has a possible range of [0.0, 6.0] and the
+ /// output type is quint8 ([0, 255]). The min_range and max_range values should be
+ /// specified as 0.0 and 6.0. Quantizing from float to quint8 will multiply each
+ /// value of the input by 255/6 and cast to quint8.
+ ///
+ /// If the output type was qint8 ([-128, 127]), the operation will additionally
+ /// subtract each value by 128 prior to casting, so that the range of values aligns
+ /// with the range of qint8.
+ ///
+ /// If the mode is 'MIN_FIRST', then this approach is used:
+ ///
+ /// ```
+ /// num_discrete_values = 1 << (# of bits in T)
+ /// range_adjust = num_discrete_values / (num_discrete_values - 1)
+ /// range = (range_max - range_min) * range_adjust
+ /// range_scale = num_discrete_values / range
+ /// quantized = round(input * range_scale) - round(range_min * range_scale) +
+ /// numeric_limits::min()
+ /// quantized = max(quantized, numeric_limits::min())
+ /// quantized = min(quantized, numeric_limits::max())
+ /// ```
+ ///
+ /// The biggest difference between this and MIN_COMBINED is that the minimum range
+ /// is rounded first, before it's subtracted from the rounded value. With
+ /// MIN_COMBINED, a small bias is introduced where repeated iterations of quantizing
+ /// and dequantizing will introduce a larger and larger error.
+ ///
+ /// *SCALED mode Example*
+ ///
+ /// `SCALED` mode matches the quantization approach used in
+ /// `QuantizeAndDequantize{V2|V3}`.
+ ///
+ /// If the mode is `SCALED`, the quantization is performed by multiplying each
+ /// input value by a scaling_factor.
+ /// The scaling_factor is determined from `min_range` and `max_range` to be as large
+ /// as possible such that the range from `min_range` to `max_range` is representable
+ /// within values of type T.
+ ///
+ /// ```c++
+ ///
+ /// const int min_T = std::numeric_limits::min();
+ /// const int max_T = std::numeric_limits::max();
+ /// const float max_float = std::numeric_limits::max();
+ ///
+ /// const float scale_factor_from_min_side =
+ /// (min_T * min_range > 0) ? min_T / min_range : max_float;
+ /// const float scale_factor_from_max_side =
+ /// (max_T * max_range > 0) ? max_T / max_range : max_float;
+ ///
+ /// const float scale_factor = std::min(scale_factor_from_min_side,
+ /// scale_factor_from_max_side);
+ /// ```
+ ///
+ /// We next use the scale_factor to adjust min_range and max_range as follows:
+ ///
+ /// ```c++
+ /// min_range = min_T / scale_factor;
+ /// max_range = max_T / scale_factor;
+ /// ```
+ ///
+ ///
+ /// e.g. if T = qint8, and initially min_range = -10, and max_range = 9, we would
+ /// compare -128/-10.0 = 12.8 to 127/9.0 = 14.11, and set scaling_factor = 12.8
+ /// In this case, min_range would remain -10, but max_range would be adjusted to
+ /// 127 / 12.8 = 9.921875
+ ///
+ /// So we will quantize input values in the range (-10, 9.921875) to (-128, 127).
+ ///
+ /// The input tensor can now be quantized by clipping values to the range
+ /// `min_range` to `max_range`, then multiplying by scale_factor as follows:
+ ///
+ /// ```c++
+ /// result = round(min(max_range, max(min_range, input)) * scale_factor)
+ /// ```
+ ///
+ /// The adjusted `min_range` and `max_range` are returned as outputs 2 and 3 of
+ /// this operation. These outputs should be used as the range for any further
+ /// calculations.
+ ///
+ ///
+ /// *narrow_range (bool) attribute*
+ ///
+ /// If true, we do not use the minimum quantized value.
+ /// i.e. for int8 the quantized output, it would be restricted to the range
+ /// -127..127 instead of the full -128..127 range.
+ /// This is provided for compatibility with certain inference backends.
+ /// (Only applies to SCALED mode)
+ ///
+ ///
+ /// *axis (int) attribute*
+ ///
+ /// An optional `axis` attribute can specify a dimension index of the input tensor,
+ /// such that quantization ranges will be calculated and applied separately for each
+ /// slice of the tensor along that dimension. This is useful for per-channel
+ /// quantization.
+ ///
+ /// If axis is specified, min_range and max_range
+ ///
+ /// if `axis`=None, per-tensor quantization is performed as normal.
+ ///
+ ///
+ /// *ensure_minimum_range (float) attribute*
+ ///
+ /// Ensures the minimum quantization range is at least this value.
+ /// The legacy default value for this is 0.01, but it is strongly suggested to
+ /// set it to 0 for new uses.
+ ///
+ ///
+ ///
+ ///
+ ///
+ ///
+ ///
+ ///
+ ///
+ ///
+ ///
+ ///
+ ///
+ public static Tensor[] quantize_v2(Tensor input, Tensor min_range, Tensor max_range, TF_DataType T, string mode = "MIN_COMBINED", string round_mode = "HALF_AWAY_FROM_ZERO", bool narrow_range = false, int axis = -1, float ensure_minimum_range = 0.01f, string? name = null)
+ {
+ var _ctx = tf.Context;
+ if (_ctx.executing_eagerly())
+ {
+ try
+ {
+ var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "QuantizeV2", name) { args = new object[] { input, min_range, max_range }, attrs = new Dictionary() { ["T"] = T, ["mode"] = mode, ["round_mode"] = round_mode, ["narrow_range"] = narrow_range, ["axis"] = axis, ["ensure_minimum_range"] = ensure_minimum_range } });
+ return _fast_path_result;
+ }
+ catch (Exception)
+ {
+ }
+ try
+ {
+ return quantize_v2_eager_fallback(input, min_range, max_range, T: T, mode: mode, round_mode: round_mode, narrow_range: narrow_range, axis: axis, ensure_minimum_range: ensure_minimum_range, name: name, ctx: _ctx);
+ }
+ catch (Exception)
+ {
+ }
+ }
+ if (mode is null)
+ {
+ mode = "MIN_COMBINED";
+ }
+ if (round_mode is null)
+ {
+ round_mode = "HALF_AWAY_FROM_ZERO";
+ }
+ Dictionary keywords = new();
+ keywords["input"] = input;
+ keywords["min_range"] = min_range;
+ keywords["max_range"] = max_range;
+ keywords["T"] = T;
+ keywords["mode"] = mode;
+ keywords["round_mode"] = round_mode;
+ keywords["narrow_range"] = narrow_range;
+ keywords["axis"] = axis;
+ keywords["ensure_minimum_range"] = ensure_minimum_range;
+ var _op = tf.OpDefLib._apply_op_helper("QuantizeV2", name, keywords);
+ var _result = _op.outputs;
+ if (_execute.must_record_gradient())
+ {
+ object[] _attrs = new object[] { "T", _op._get_attr_type("T"), "mode", _op.get_attr("mode"), "round_mode", _op.get_attr("round_mode"), "narrow_range", _op._get_attr_bool("narrow_range"), "axis", _op._get_attr_int("axis"), "ensure_minimum_range", _op.get_attr("ensure_minimum_range") };
+ _execute.record_gradient("QuantizeV2", _op.inputs, _attrs, _result);
+ }
+ return _result;
+ }
+
+ public static Tensor[] quantize_v2_eager_fallback(Tensor input, Tensor min_range, Tensor max_range, TF_DataType T, string mode, string round_mode, bool narrow_range, int axis, float ensure_minimum_range, string name, Context ctx)
+ {
+ Tensor[] _inputs_flat = new Tensor[] { input, min_range, max_range };
+ object[] _attrs = new object[] { "T", T, "mode", mode, "round_mode", round_mode, "narrow_range", narrow_range, "axis", axis, "ensure_minimum_range", ensure_minimum_range };
+ var _result = _execute.execute("QuantizeV2", 3, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name);
+ if (_execute.must_record_gradient())
+ {
+ _execute.record_gradient("QuantizeV2", _inputs_flat, _attrs, _result);
+ }
+ return _result;
+ }
+ ///
+ /// Concatenates quantized tensors along one dimension.
+ ///
+ ///
+ ///
+ ///
+ ///
+ ///
+ public static Tensor[] quantized_concat(Tensor concat_dim, Tensors values, Tensors input_mins, Tensors input_maxes, string? name = null)
+ {
+ var _ctx = tf.Context;
+ if (_ctx.executing_eagerly())
+ {
+ try
+ {
+ var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "QuantizedConcat", name) { args = new object[] { concat_dim, values, input_mins, input_maxes }, attrs = new Dictionary() { } });
+ return _fast_path_result;
+ }
+ catch (Exception)
+ {
+ }
+ try
+ {
+ return quantized_concat_eager_fallback(concat_dim, values, input_mins, input_maxes, name: name, ctx: _ctx);
+ }
+ catch (Exception)
+ {
+ }
+ }
+ Dictionary keywords = new();
+ keywords["concat_dim"] = concat_dim;
+ keywords["values"] = values;
+ keywords["input_mins"] = input_mins;
+ keywords["input_maxes"] = input_maxes;
+ var _op = tf.OpDefLib._apply_op_helper("QuantizedConcat", name, keywords);
+ var _result = _op.outputs;
+ if (_execute.must_record_gradient())
+ {
+ object[] _attrs = new object[] { "N", _op._get_attr_int("N"), "T", _op._get_attr_type("T") };
+ _execute.record_gradient("QuantizedConcat", _op.inputs, _attrs, _result);
+ }
+ return _result;
+ }
+
+ public static Tensor[] quantized_concat_eager_fallback(Tensor concat_dim, Tensors values, Tensors input_mins, Tensors input_maxes, string name, Context ctx)
+ {
+ List _inputs_flat_list = new();
+ _inputs_flat_list.Add(concat_dim);
+ _inputs_flat_list.AddRange(values);
+ _inputs_flat_list.AddRange(input_mins);
+ _inputs_flat_list.AddRange(input_maxes);
+ var _inputs_flat = _inputs_flat_list.ToArray();
+ object[] _attrs = new object[] { "N", values.Length, "T", values.dtype };
+ var _result = _execute.execute("QuantizedConcat", 3, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name);
+ if (_execute.must_record_gradient())
+ {
+ _execute.record_gradient("QuantizedConcat", _inputs_flat, _attrs, _result);
+ }
+ return _result;
+ }
+ ///
+ /// Quantized Instance normalization.
+ ///
+ ///
+ ///
+ ///
+ ///
+ ///
+ /// If True, `given_y_min` and `given_y_min`
+ /// and `given_y_max` are used as the output range. Otherwise,
+ /// the implementation computes the output range.
+ ///
+ ///
+ ///
+ ///
+ /// Output in `y_min` if `output_range_given` is True.
+ ///
+ ///
+ ///
+ ///
+ /// Output in `y_max` if `output_range_given` is True.
+ ///
+ ///
+ ///
+ ///
+ /// A small float number to avoid dividing by 0.
+ ///
+ ///
+ ///
+ ///
+ /// Minimum value of `y_max - y_min`
+ ///
+ ///
+ ///
+ public static Tensor[] quantized_instance_norm(Tensor x, Tensor x_min, Tensor x_max, bool output_range_given = false, float given_y_min = 0f, float given_y_max = 0f, float variance_epsilon = 1E-05f, float min_separation = 0.001f, string? name = null)
+ {
+ var _ctx = tf.Context;
+ if (_ctx.executing_eagerly())
+ {
+ try
+ {
+ var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "QuantizedInstanceNorm", name) { args = new object[] { x, x_min, x_max }, attrs = new Dictionary() { ["output_range_given"] = output_range_given, ["given_y_min"] = given_y_min, ["given_y_max"] = given_y_max, ["variance_epsilon"] = variance_epsilon, ["min_separation"] = min_separation } });
+ return _fast_path_result;
+ }
+ catch (Exception)
+ {
+ }
+ try
+ {
+ return quantized_instance_norm_eager_fallback(x, x_min, x_max, output_range_given: output_range_given, given_y_min: given_y_min, given_y_max: given_y_max, variance_epsilon: variance_epsilon, min_separation: min_separation, name: name, ctx: _ctx);
+ }
+ catch (Exception)
+ {
+ }
+ }
+ Dictionary keywords = new();
+ keywords["x"] = x;
+ keywords["x_min"] = x_min;
+ keywords["x_max"] = x_max;
+ keywords["output_range_given"] = output_range_given;
+ keywords["given_y_min"] = given_y_min;
+ keywords["given_y_max"] = given_y_max;
+ keywords["variance_epsilon"] = variance_epsilon;
+ keywords["min_separation"] = min_separation;
+ var _op = tf.OpDefLib._apply_op_helper("QuantizedInstanceNorm", name, keywords);
+ var _result = _op.outputs;
+ if (_execute.must_record_gradient())
+ {
+ object[] _attrs = new object[] { "T", _op._get_attr_type("T"), "output_range_given", _op._get_attr_bool("output_range_given"), "given_y_min", _op.get_attr("given_y_min"), "given_y_max", _op.get_attr("given_y_max"), "variance_epsilon", _op.get_attr("variance_epsilon"), "min_separation", _op.get_attr("min_separation") };
+ _execute.record_gradient("QuantizedInstanceNorm", _op.inputs, _attrs, _result);
+ }
+ return _result;
+ }
+
+ public static Tensor[] quantized_instance_norm_eager_fallback(Tensor x, Tensor x_min, Tensor x_max, bool output_range_given, float given_y_min, float given_y_max, float variance_epsilon, float min_separation, string name, Context ctx)
+ {
+ Tensor[] _inputs_flat = new Tensor[] { x, x_min, x_max };
+ object[] _attrs = new object[] { "T", x.dtype, "output_range_given", output_range_given, "given_y_min", given_y_min, "given_y_max", given_y_max, "variance_epsilon", variance_epsilon, "min_separation", min_separation };
+ var _result = _execute.execute("QuantizedInstanceNorm", 3, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name);
+ if (_execute.must_record_gradient())
+ {
+ _execute.record_gradient("QuantizedInstanceNorm", _inputs_flat, _attrs, _result);
+ }
+ return _result;
+ }
+ ///
+ /// Reshapes a quantized tensor as per the Reshape op.
+ ///
+ ///
+ ///
+ /// ```
+ ///
+ ///
+ ///
+ ///
+ ///
+ ///
+ ///
+ public static Tensor[] quantized_reshape(Tensor tensor, Tensor shape, Tensor input_min, Tensor input_max, string? name = null)
+ {
+ var _ctx = tf.Context;
+ if (_ctx.executing_eagerly())
+ {
+ try
+ {
+ var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "QuantizedReshape", name) { args = new object[] { tensor, shape, input_min, input_max }, attrs = new Dictionary() { } });
+ return _fast_path_result;
+ }
+ catch (Exception)
+ {
+ }
+ try
+ {
+ return quantized_reshape_eager_fallback(tensor, shape, input_min, input_max, name: name, ctx: _ctx);
+ }
+ catch (Exception)
+ {
+ }
+ }
+ Dictionary keywords = new();
+ keywords["tensor"] = tensor;
+ keywords["shape"] = shape;
+ keywords["input_min"] = input_min;
+ keywords["input_max"] = input_max;
+ var _op = tf.OpDefLib._apply_op_helper("QuantizedReshape", name, keywords);
+ var _result = _op.outputs;
+ if (_execute.must_record_gradient())
+ {
+ object[] _attrs = new object[] { "T", _op._get_attr_type("T"), "Tshape", _op._get_attr_type("Tshape") };
+ _execute.record_gradient("QuantizedReshape", _op.inputs, _attrs, _result);
+ }
+ return _result;
+ }
+
+ public static Tensor[] quantized_reshape_eager_fallback(Tensor tensor, Tensor shape, Tensor input_min, Tensor input_max, string name, Context ctx)
+ {
+ Tensor[] _inputs_flat = new Tensor[] { tensor, shape, input_min, input_max };
+ object[] _attrs = new object[] { "T", tensor.dtype, "Tshape", shape.dtype };
+ var _result = _execute.execute("QuantizedReshape", 3, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name);
+ if (_execute.must_record_gradient())
+ {
+ _execute.record_gradient("QuantizedReshape", _inputs_flat, _attrs, _result);
+ }
+ return _result;
+ }
+ ///
+ /// Returns the rank of a tensor.
+ ///
+ ///
+ ///
+ /// This operation returns an integer representing the rank of `input`.
+ ///
+ /// For example:
+ ///
+ /// ```
+ /// # 't' is [[[1, 1, 1], [2, 2, 2]], [[3, 3, 3], [4, 4, 4]]]
+ /// # shape of tensor 't' is [2, 2, 3]
+ /// rank(t) ==> 3
+ /// ```
+ ///
+ /// **Note**: The rank of a tensor is not the same as the rank of a matrix. The rank
+ /// of a tensor is the number of indices required to uniquely select each element
+ /// of the tensor. Rank is also known as "order", "degree", or "ndims."
+ ///
+ ///
+ ///
+ ///
+ public static Tensor rank(Tensor input, string? name = null)
+ {
+ var _ctx = tf.Context;
+ if (_ctx.executing_eagerly())
+ {
+ try
+ {
+ var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "Rank", name) { args = new object[] { input }, attrs = new Dictionary() { } });
+ return _fast_path_result[0];
+ }
+ catch (Exception)
+ {
+ }
+ try
+ {
+ return rank_eager_fallback(input, name: name, ctx: _ctx);
+ }
+ catch (Exception)
+ {
+ }
+ }
+ Dictionary keywords = new();
+ keywords["input"] = input;
+ var _op = tf.OpDefLib._apply_op_helper("Rank", name, keywords);
+ var _result = _op.outputs;
+ if (_execute.must_record_gradient())
+ {
+ object[] _attrs = new object[] { "T", _op._get_attr_type("T") };
+ _execute.record_gradient("Rank", _op.inputs, _attrs, _result);
+ }
+ return _result[0];
+ }
+
+ public static Tensor rank_eager_fallback(Tensor input, string name, Context ctx)
+ {
+ Tensor[] _inputs_flat = new Tensor[] { input };
+ object[] _attrs = new object[] { "T", input.dtype };
+ var _result = _execute.execute("Rank", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name);
+ if (_execute.must_record_gradient())
+ {
+ _execute.record_gradient("Rank", _inputs_flat, _attrs, _result);
+ }
+ return _result[0];
+ }
+ ///
+ /// Return the same ref tensor as the input ref tensor.
+ ///
+ ///
+ ///
+ public static Tensor ref_identity(Tensor input, string? name = null)
+ {
+ var _ctx = tf.Context;
+ if (_ctx.executing_eagerly())
+ {
+ throw new RuntimeError("ref_identity op does not support eager execution. Arg input is a ref.");
+ }
+ Dictionary keywords = new();
+ keywords["input"] = input;
+ var _op = tf.OpDefLib._apply_op_helper("RefIdentity", name, keywords);
+ var _result = _op.outputs;
+ if (_execute.must_record_gradient())
+ {
+ object[] _attrs = new object[] { "T", _op._get_attr_type("T") };
+ _execute.record_gradient("RefIdentity", _op.inputs, _attrs, _result);
+ }
+ return _result[0];
+ }
+
+ public static Tensor ref_identity_eager_fallback(Tensor input, string name, Context ctx)
+ {
+ throw new RuntimeError($"ref_identity op does not support eager execution. Arg 'input' is a ref.");
+ }
+ ///
+ /// Reshapes a tensor.
+ ///
+ ///
+ ///
+ /// Given `tensor`, this operation returns a tensor that has the same values
+ /// as `tensor` with shape `shape`.
+ ///
+ /// If one component of 1-D tensor `shape` is the special value -1, the size of that
+ /// dimension is computed so that the total size remains constant. In particular, a
+ /// `shape` of `[-1]` flattens into 1-D. At most one component of `shape` may be
+ /// unknown.
+ ///
+ /// The `shape` must be 1-D and the operation returns a tensor with shape
+ /// `shape` filled with the values of `tensor`. In this case, the number of elements
+ /// implied by `shape` must be the same as the number of elements in `tensor`.
+ ///
+ /// It is an error if `shape` is not 1-D.
+ ///
+ /// For example:
+ ///
+ /// ```
+ /// # tensor 't' is [1, 2, 3, 4, 5, 6, 7, 8, 9]
+ /// # tensor 't' has shape [9]
+ /// reshape(t, [3, 3]) ==> [[1, 2, 3],
+ /// [4, 5, 6],
+ /// [7, 8, 9]]
+ ///
+ /// # tensor 't' is [[[1, 1], [2, 2]],
+ /// # [[3, 3], [4, 4]]]
+ /// # tensor 't' has shape [2, 2, 2]
+ /// reshape(t, [2, 4]) ==> [[1, 1, 2, 2],
+ /// [3, 3, 4, 4]]
+ ///
+ /// # tensor 't' is [[[1, 1, 1],
+ /// # [2, 2, 2]],
+ /// # [[3, 3, 3],
+ /// # [4, 4, 4]],
+ /// # [[5, 5, 5],
+ /// # [6, 6, 6]]]
+ /// # tensor 't' has shape [3, 2, 3]
+ /// # pass '[-1]' to flatten 't'
+ /// reshape(t, [-1]) ==> [1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4, 5, 5, 5, 6, 6, 6]
+ ///
+ /// # -1 can also be used to infer the shape
+ ///
+ /// # -1 is inferred to be 9:
+ /// reshape(t, [2, -1]) ==> [[1, 1, 1, 2, 2, 2, 3, 3, 3],
+ /// [4, 4, 4, 5, 5, 5, 6, 6, 6]]
+ /// # -1 is inferred to be 2:
+ /// reshape(t, [-1, 9]) ==> [[1, 1, 1, 2, 2, 2, 3, 3, 3],
+ /// [4, 4, 4, 5, 5, 5, 6, 6, 6]]
+ /// # -1 is inferred to be 3:
+ /// reshape(t, [ 2, -1, 3]) ==> [[[1, 1, 1],
+ /// [2, 2, 2],
+ /// [3, 3, 3]],
+ /// [[4, 4, 4],
+ /// [5, 5, 5],
+ /// [6, 6, 6]]]
+ ///
+ /// # tensor 't' is [7]
+ /// # shape `[]` reshapes to a scalar
+ /// reshape(t, []) ==> 7
+ /// ```
+ ///
+ ///
+ ///
+ ///
+ ///
+ public static Tensor reshape(Tensor tensor, Tensor shape, string? name = null)
+ {
+ var _ctx = tf.Context;
+ if (_ctx.executing_eagerly())
+ {
+ try
+ {
+ var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "Reshape", name) { args = new object[] { tensor, shape }, attrs = new Dictionary() { } });
+ return _fast_path_result[0];
+ }
+ catch (Exception)
+ {
+ }
+ try
+ {
+ return reshape_eager_fallback(tensor, shape, name: name, ctx: _ctx);
+ }
+ catch (Exception)
+ {
+ }
+ }
+ Dictionary keywords = new();
+ keywords["tensor"] = tensor;
+ keywords["shape"] = shape;
+ var _op = tf.OpDefLib._apply_op_helper("Reshape", name, keywords);
+ var _result = _op.outputs;
+ if (_execute.must_record_gradient())
+ {
+ object[] _attrs = new object[] { "T", _op._get_attr_type("T"), "Tshape", _op._get_attr_type("Tshape") };
+ _execute.record_gradient("Reshape", _op.inputs, _attrs, _result);
+ }
+ return _result[0];
+ }
+
+ public static Tensor reshape_eager_fallback(Tensor tensor, Tensor shape, string name, Context ctx)
+ {
+ Tensor[] _inputs_flat = new Tensor[] { tensor, shape };
+ object[] _attrs = new object[] { "T", tensor.dtype, "Tshape", shape.dtype };
+ var _result = _execute.execute("Reshape", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name);
+ if (_execute.must_record_gradient())
+ {
+ _execute.record_gradient("Reshape", _inputs_flat, _attrs, _result);
+ }
+ return _result[0];
+ }
+ ///
+ /// Assign `value` to the sliced l-value reference of `ref`.
+ ///
+ ///
+ ///
+ /// The values of `value` are assigned to the positions in the variable
+ /// `ref` that are selected by the slice parameters. The slice parameters
+ /// `begin, `end`, `strides`, etc. work exactly as in `StridedSlice`.
+ ///
+ /// NOTE this op currently does not support broadcasting and so `value`'s
+ /// shape must be exactly the shape produced by the slice of `ref`.
+ ///
+ ///
+ ///
+ ///
+ ///
+ ///
+ ///
+ ///
+ ///
+ ///
+ ///
+ ///
+ ///
+ public static Operation resource_strided_slice_assign(Tensor ref_, Tensor begin, Tensor end, Tensor strides, Tensor value, int begin_mask = 0, int end_mask = 0, int ellipsis_mask = 0, int new_axis_mask = 0, int shrink_axis_mask = 0, string? name = null)
+ {
+ var _ctx = tf.Context;
+ if (_ctx.executing_eagerly())
+ {
+ try
+ {
+ var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "ResourceStridedSliceAssign", name) { args = new object[] { ref_, begin, end, strides, value }, attrs = new Dictionary() { ["begin_mask"] = begin_mask, ["end_mask"] = end_mask, ["ellipsis_mask"] = ellipsis_mask, ["new_axis_mask"] = new_axis_mask, ["shrink_axis_mask"] = shrink_axis_mask } });
+ return null;
+ }
+ catch (Exception)
+ {
+ }
+ try
+ {
+ return resource_strided_slice_assign_eager_fallback(ref_, begin, end, strides, value, begin_mask: begin_mask, end_mask: end_mask, ellipsis_mask: ellipsis_mask, new_axis_mask: new_axis_mask, shrink_axis_mask: shrink_axis_mask, name: name, ctx: _ctx);
+ }
+ catch (Exception)
+ {
+ }
+ }
+ Dictionary keywords = new();
+ keywords["ref"] = ref_;
+ keywords["begin"] = begin;
+ keywords["end"] = end;
+ keywords["strides"] = strides;
+ keywords["value"] = value;
+ keywords["begin_mask"] = begin_mask;
+ keywords["end_mask"] = end_mask;
+ keywords["ellipsis_mask"] = ellipsis_mask;
+ keywords["new_axis_mask"] = new_axis_mask;
+ keywords["shrink_axis_mask"] = shrink_axis_mask;
+ var _op = tf.OpDefLib._apply_op_helper("ResourceStridedSliceAssign", name, keywords);
+ var _result = _op.outputs;
+ if (_execute.must_record_gradient())
+ {
+ object[] _attrs = new object[] { "T", _op._get_attr_type("T"), "Index", _op._get_attr_type("Index"), "begin_mask", _op._get_attr_int("begin_mask"), "end_mask", _op._get_attr_int("end_mask"), "ellipsis_mask", _op._get_attr_int("ellipsis_mask"), "new_axis_mask", _op._get_attr_int("new_axis_mask"), "shrink_axis_mask", _op._get_attr_int("shrink_axis_mask") };
+ _execute.record_gradient("ResourceStridedSliceAssign", _op.inputs, _attrs, _result);
+ }
+ return _op;
+ }
+
+ public static Operation resource_strided_slice_assign_eager_fallback(Tensor ref_, Tensor begin, Tensor end, Tensor strides, Tensor value, int begin_mask, int end_mask, int ellipsis_mask, int new_axis_mask, int shrink_axis_mask, string name, Context ctx)
+ {
+ Tensor[] _inputs_flat = new Tensor[] { ref_, begin, end, strides, value };
+ object[] _attrs = new object[] { "T", value.dtype, "Index", begin.dtype, "begin_mask", begin_mask, "end_mask", end_mask, "ellipsis_mask", ellipsis_mask, "new_axis_mask", new_axis_mask, "shrink_axis_mask", shrink_axis_mask };
+ var _result = _execute.execute("ResourceStridedSliceAssign", 0, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name);
+ if (_execute.must_record_gradient())
+ {
+ _execute.record_gradient("ResourceStridedSliceAssign", _inputs_flat, _attrs, _result);
+ }
+ return null;
+ }
+ ///