@@ -83,11 +83,24 @@ namespace Tensorflow.Gradients | |||||
var ind_shape = array_ops.shape(op.outputs[1]); | var ind_shape = array_ops.shape(op.outputs[1]); | ||||
// int32 is not supported on GPU hence up-casting | // int32 is not supported on GPU hence up-casting | ||||
var ind_lastdim = array_ops.gather(math_ops.cast( | |||||
ind_shape, TF_DataType.TF_INT64), array_ops.size(ind_shape) - 1); | |||||
var cast = math_ops.cast(ind_shape, TF_DataType.TF_INT64); | |||||
var size = array_ops.size(ind_shape) - 1; | |||||
var ind_lastdim = array_ops.gather(cast, size); | |||||
// Flatten indices to 2D. | // Flatten indices to 2D. | ||||
var ind_2d = array_ops.reshape(op.outputs[1], array_ops.stack(new object[] { -1, ind_lastdim })); | |||||
var stack = array_ops.stack(new object[] { -1L, ind_lastdim }); | |||||
var ind_2d = array_ops.reshape(op.outputs[1], stack); | |||||
var in_lastdim = array_ops.gather(math_ops.cast(in_shape, TF_DataType.TF_INT64), | |||||
array_ops.size(in_shape) - 1); | |||||
var outerdim = array_ops.shape(ind_2d); | |||||
// Compute linear indices(flattened to 1D). | |||||
var cast1 = math_ops.cast(outerdim, TF_DataType.TF_INT64); | |||||
var range2 = math_ops.range(0L, cast1 * in_lastdim, in_lastdim); | |||||
var dim2 = array_ops.expand_dims(range2, -1); | |||||
var cast2 = math_ops.cast(dim2, TF_DataType.TF_INT32); | |||||
var ind = array_ops.reshape(ind_2d + cast2, new int[] { -1 }); | |||||
throw new NotImplementedException("nn_grad._TopKGrad"); | throw new NotImplementedException("nn_grad._TopKGrad"); | ||||
} | } | ||||
@@ -46,6 +46,35 @@ namespace Tensorflow | |||||
} | } | ||||
} | } | ||||
public static Tensor _autopacking_conversion_function(object[] v, TF_DataType dtype = TF_DataType.DtInvalid, string name = null, bool as_ref = false) | |||||
{ | |||||
var inferred_dtype = _get_dtype_from_nested_lists(v); | |||||
if (dtype == TF_DataType.DtInvalid) | |||||
dtype = inferred_dtype; | |||||
return _autopacking_helper(v, dtype, name == null ? "packed" : name); | |||||
} | |||||
private static TF_DataType _get_dtype_from_nested_lists(object[] list_or_tuple) | |||||
{ | |||||
TF_DataType dtype = TF_DataType.DtInvalid; | |||||
foreach(var obj in list_or_tuple) | |||||
{ | |||||
switch (obj) | |||||
{ | |||||
case Tensor t: | |||||
dtype = t.dtype.as_base_dtype(); | |||||
break; | |||||
} | |||||
if (dtype != TF_DataType.DtInvalid) | |||||
break; | |||||
} | |||||
return dtype; | |||||
} | |||||
public static Tensor _autopacking_helper(object[] list_or_tuple, TF_DataType dtype, string name) | public static Tensor _autopacking_helper(object[] list_or_tuple, TF_DataType dtype, string name) | ||||
{ | { | ||||
var must_pack = false; | var must_pack = false; | ||||
@@ -242,32 +271,21 @@ namespace Tensorflow | |||||
private static Tensor size_internal(Tensor input, string name = null, bool optimize = true, TF_DataType out_type = TF_DataType.TF_INT32) | private static Tensor size_internal(Tensor input, string name = null, bool optimize = true, TF_DataType out_type = TF_DataType.TF_INT32) | ||||
{ | { | ||||
return with(ops.name_scope(name, "Size", new Tensor[] { input }), scope => | |||||
return with(ops.name_scope(name, "Size", new { input }), scope => | |||||
{ | { | ||||
name = scope; | name = scope; | ||||
if (!tf.context.executing_eagerly()) | |||||
var input_tensor = ops.convert_to_tensor(input); | |||||
var input_shape = tensor_util.to_shape(input_tensor.shape); | |||||
if (optimize) | |||||
{ | { | ||||
var input_tensor = ops.convert_to_tensor(input); | |||||
var input_shape = tensor_util.to_shape(input_tensor.shape); | |||||
if (optimize) | |||||
if (input_shape.is_fully_defined()) | |||||
{ | { | ||||
if (input_shape.is_fully_defined()) | |||||
{ | |||||
var nd = np.array(input_tensor.shape, out_type.as_numpy_datatype()); | |||||
return constant_op.constant(nd, name: name); | |||||
} | |||||
return constant_op.constant(input_shape.Size, dtype: out_type, name: name); | |||||
} | } | ||||
return gen_array_ops.size(input, name: name, out_type: out_type); | |||||
} | |||||
else | |||||
{ | |||||
// result = gen_array_ops.shape(); | |||||
throw new NotImplementedException("array_ops.size_internal"); | |||||
} | } | ||||
return null; | |||||
return gen_array_ops.size(input, name: name, out_type: out_type); | |||||
}); | }); | ||||
} | } | ||||
@@ -18,6 +18,7 @@ namespace Tensorflow | |||||
return with(ops.name_scope(name, "Cast", new { x }), scope => | return with(ops.name_scope(name, "Cast", new { x }), scope => | ||||
{ | { | ||||
name = scope; | |||||
x = ops.convert_to_tensor(x, name: "x"); | x = ops.convert_to_tensor(x, name: "x"); | ||||
if (x.dtype.as_base_dtype() != base_type) | if (x.dtype.as_base_dtype() != base_type) | ||||
x = gen_math_ops.cast(x, base_type, name: name); | x = gen_math_ops.cast(x, base_type, name: name); | ||||
@@ -263,7 +264,7 @@ namespace Tensorflow | |||||
if (delta == null) | if (delta == null) | ||||
delta = 1; | delta = 1; | ||||
return with(ops.name_scope(name, "Range", new object[] { start, limit, delta }), scope => | |||||
return with(ops.name_scope(name, "Range", new { start, limit, delta }), scope => | |||||
{ | { | ||||
name = scope; | name = scope; | ||||
var start1 = ops.convert_to_tensor(start, name: "start"); | var start1 = ops.convert_to_tensor(start, name: "start"); | ||||
@@ -34,6 +34,9 @@ namespace Tensorflow | |||||
case "Int32": | case "Int32": | ||||
dtype = TF_DataType.TF_INT32; | dtype = TF_DataType.TF_INT32; | ||||
break; | break; | ||||
case "Int64": | |||||
dtype = TF_DataType.TF_INT64; | |||||
break; | |||||
case "Single": | case "Single": | ||||
dtype = TF_DataType.TF_FLOAT; | dtype = TF_DataType.TF_FLOAT; | ||||
break; | break; | ||||
@@ -47,7 +50,7 @@ namespace Tensorflow | |||||
dtype = TF_DataType.TF_STRING; | dtype = TF_DataType.TF_STRING; | ||||
break; | break; | ||||
default: | default: | ||||
throw new Exception("Not Implemented"); | |||||
throw new Exception($"{type.Name} Not Implemented in as_dtype"); | |||||
} | } | ||||
return dtype; | return dtype; | ||||
@@ -111,6 +111,9 @@ namespace Tensorflow | |||||
case int intVal: | case int intVal: | ||||
nparray = intVal; | nparray = intVal; | ||||
break; | break; | ||||
case long intVal: | |||||
nparray = intVal; | |||||
break; | |||||
case int[] intVals: | case int[] intVals: | ||||
nparray = np.array(intVals); | nparray = np.array(intVals); | ||||
break; | break; | ||||
@@ -231,6 +234,9 @@ namespace Tensorflow | |||||
case "Int32": | case "Int32": | ||||
tensor_proto.IntVal.AddRange(proto_values.Data<int>()); | tensor_proto.IntVal.AddRange(proto_values.Data<int>()); | ||||
break; | break; | ||||
case "Int64": | |||||
tensor_proto.Int64Val.AddRange(proto_values.Data<long>()); | |||||
break; | |||||
case "Single": | case "Single": | ||||
tensor_proto.FloatVal.AddRange(proto_values.Data<float>()); | tensor_proto.FloatVal.AddRange(proto_values.Data<float>()); | ||||
break; | break; | ||||
@@ -410,26 +410,12 @@ namespace Tensorflow | |||||
return tensor; | return tensor; | ||||
case Tensor[] tensors: | case Tensor[] tensors: | ||||
return array_ops._autopacking_helper(tensors, dtype, name); | return array_ops._autopacking_helper(tensors, dtype, name); | ||||
case string str: | |||||
return constant_op.constant(str, dtype: dtype, name: name); | |||||
case string[] strArray: | |||||
return constant_op.constant(strArray, dtype: dtype, name: name); | |||||
case int intVal: | |||||
return constant_op.constant(intVal, dtype: dtype, name: name); | |||||
case int[] intArray: | |||||
return constant_op.constant(intArray, dtype: dtype, name: name); | |||||
case float floatVal: | |||||
return constant_op.constant(floatVal, dtype: dtype, name: name); | |||||
case float[] floatArray: | |||||
return constant_op.constant(floatArray, dtype: dtype, name: name); | |||||
case double doubleVal: | |||||
return constant_op.constant(doubleVal, dtype: dtype, name: name); | |||||
case RefVariable varVal: | case RefVariable varVal: | ||||
return varVal._TensorConversionFunction(as_ref: as_ref); | return varVal._TensorConversionFunction(as_ref: as_ref); | ||||
case object[] objects: | case object[] objects: | ||||
return array_ops._autopacking_helper(objects, dtype: dtype, name: name); | |||||
return array_ops._autopacking_conversion_function(objects, dtype: dtype, name: name); | |||||
default: | default: | ||||
throw new NotImplementedException($"internal_convert_to_tensor: Can't convert {value.GetType().Name} to Tensor"); | |||||
return constant_op.constant(value, dtype: dtype, name: name); | |||||
} | } | ||||
} | } | ||||