@@ -29,7 +29,7 @@ namespace Tensorflow | |||||
public Action<int, int> Constant2x3 | public Action<int, int> Constant2x3 | ||||
=> (epoch, iterate) => | => (epoch, iterate) => | ||||
{ | { | ||||
var nd = np.arange(1000).reshape(10, 100); | |||||
var nd = np.arange(1000).reshape((10, 100)); | |||||
var tensor = tf.constant(nd); | var tensor = tf.constant(nd); | ||||
var data = tensor.numpy(); | var data = tensor.numpy(); | ||||
}; | }; | ||||
@@ -51,14 +51,14 @@ namespace Tensorflow | |||||
public Action<int, int> Variable | public Action<int, int> Variable | ||||
=> (epoch, iterate) => | => (epoch, iterate) => | ||||
{ | { | ||||
var nd = np.arange(1 * 256 * 256 * 3).reshape(1, 256, 256, 3); | |||||
var nd = np.arange(1 * 256 * 256 * 3).reshape((1, 256, 256, 3)); | |||||
ResourceVariable variable = tf.Variable(nd); | ResourceVariable variable = tf.Variable(nd); | ||||
}; | }; | ||||
public Action<int, int> VariableRead | public Action<int, int> VariableRead | ||||
=> (epoch, iterate) => | => (epoch, iterate) => | ||||
{ | { | ||||
var nd = np.zeros(1 * 256 * 256 * 3).astype(np.float32).reshape(1, 256, 256, 3); | |||||
var nd = np.zeros(1 * 256 * 256 * 3).astype(np.float32).reshape((1, 256, 256, 3)); | |||||
ResourceVariable variable = tf.Variable(nd); | ResourceVariable variable = tf.Variable(nd); | ||||
for (int i = 0; i< 10; i++) | for (int i = 0; i< 10; i++) | ||||
@@ -17,7 +17,7 @@ namespace Tensorflow | |||||
NumOfExamples = (int)images.dims[0]; | NumOfExamples = (int)images.dims[0]; | ||||
images = images.reshape(images.dims[0], images.dims[1] * images.dims[2]); | |||||
images = images.reshape((images.dims[0], images.dims[1] * images.dims[2])); | |||||
images = images.astype(dataType); | images = images.astype(dataType); | ||||
// for debug np.multiply performance | // for debug np.multiply performance | ||||
var sw = new Stopwatch(); | var sw = new Stopwatch(); | ||||
@@ -124,7 +124,7 @@ namespace Tensorflow | |||||
bytestream.Read(buf, 0, buf.Length); | bytestream.Read(buf, 0, buf.Length); | ||||
var data = np.frombuffer(buf, np.@byte); | var data = np.frombuffer(buf, np.@byte); | ||||
data = data.reshape(num_images, rows, cols, 1); | |||||
data = data.reshape((num_images, rows, cols, 1)); | |||||
return data; | return data; | ||||
} | } | ||||
@@ -360,7 +360,7 @@ namespace Tensorflow.Eager | |||||
c_api.TFE_OpSetAttrFloat(op, key, Convert.ToSingle(value)); | c_api.TFE_OpSetAttrFloat(op, key, Convert.ToSingle(value)); | ||||
break; | break; | ||||
case TF_AttrType.TF_ATTR_SHAPE: | case TF_AttrType.TF_ATTR_SHAPE: | ||||
var dims = (value as int[]).Select(x => (long)x).ToArray(); | |||||
var dims = (value as long[]).ToArray(); | |||||
c_api.TFE_OpSetAttrShape(op, key, dims, dims.Length, status.Handle); | c_api.TFE_OpSetAttrShape(op, key, dims, dims.Length, status.Handle); | ||||
status.Check(true); | status.Check(true); | ||||
break; | break; | ||||
@@ -24,6 +24,9 @@ namespace Tensorflow.Eager | |||||
NewEagerTensorHandle(_handle); | NewEagerTensorHandle(_handle); | ||||
} | } | ||||
internal unsafe EagerTensor(string value) : base(value) | |||||
=> NewEagerTensorHandle(_handle); | |||||
internal unsafe EagerTensor(Array array, Shape shape) : base(array, shape) | internal unsafe EagerTensor(Array array, Shape shape) : base(array, shape) | ||||
=> NewEagerTensorHandle(_handle); | => NewEagerTensorHandle(_handle); | ||||
@@ -135,8 +135,9 @@ namespace Tensorflow | |||||
output_node.Attr["dtype"] = dtype; | output_node.Attr["dtype"] = dtype; | ||||
output_node.Attr["value"] = new AttrValue() | output_node.Attr["value"] = new AttrValue() | ||||
{ | { | ||||
Tensor = tensor_util.make_tensor_proto( | |||||
data, dtype: dtype.Type.as_tf_dtype(), shape: data_shape) | |||||
Tensor = tensor_util.make_tensor_proto(data, | |||||
dtype: dtype.Type.as_tf_dtype(), | |||||
shape: data_shape) | |||||
}; | }; | ||||
return output_node; | return output_node; | ||||
@@ -1,5 +1,6 @@ | |||||
using System; | using System; | ||||
using System.Collections.Generic; | using System.Collections.Generic; | ||||
using System.Linq; | |||||
using System.Text; | using System.Text; | ||||
using static Tensorflow.Binding; | using static Tensorflow.Binding; | ||||
@@ -15,8 +16,17 @@ namespace Tensorflow.NumPy | |||||
long val => GetAtIndex<long>(0) == val, | long val => GetAtIndex<long>(0) == val, | ||||
float val => GetAtIndex<float>(0) == val, | float val => GetAtIndex<float>(0) == val, | ||||
double val => GetAtIndex<double>(0) == val, | double val => GetAtIndex<double>(0) == val, | ||||
NDArray val => Equals(this, val), | |||||
_ => base.Equals(obj) | _ => base.Equals(obj) | ||||
}; | }; | ||||
} | } | ||||
bool Equals(NDArray x, NDArray y) | |||||
{ | |||||
if (x.ndim != y.ndim) | |||||
return false; | |||||
return Enumerable.SequenceEqual(x.ToByteArray(), y.ToByteArray()); | |||||
} | |||||
} | } | ||||
} | } |
@@ -18,6 +18,9 @@ namespace Tensorflow.NumPy | |||||
public static implicit operator int(NDArray nd) | public static implicit operator int(NDArray nd) | ||||
=> nd._tensor.ToArray<int>()[0]; | => nd._tensor.ToArray<int>()[0]; | ||||
public static implicit operator float(NDArray nd) | |||||
=> nd._tensor.ToArray<float>()[0]; | |||||
public static implicit operator double(NDArray nd) | public static implicit operator double(NDArray nd) | ||||
=> nd._tensor.ToArray<double>()[0]; | => nd._tensor.ToArray<double>()[0]; | ||||
@@ -0,0 +1,63 @@ | |||||
using System; | |||||
using System.Collections.Generic; | |||||
using System.Linq; | |||||
using System.Text; | |||||
using static Tensorflow.Binding; | |||||
namespace Tensorflow.NumPy | |||||
{ | |||||
public partial class NDArray | |||||
{ | |||||
public NDArray this[int index] | |||||
{ | |||||
get | |||||
{ | |||||
return _tensor[index]; | |||||
} | |||||
set | |||||
{ | |||||
} | |||||
} | |||||
public NDArray this[params int[] index] | |||||
{ | |||||
get | |||||
{ | |||||
return _tensor[index.Select(x => new Slice(x, x + 1)).ToArray()]; | |||||
} | |||||
set | |||||
{ | |||||
} | |||||
} | |||||
public NDArray this[params Slice[] slices] | |||||
{ | |||||
get | |||||
{ | |||||
return _tensor[slices]; | |||||
} | |||||
set | |||||
{ | |||||
} | |||||
} | |||||
public NDArray this[NDArray mask] | |||||
{ | |||||
get | |||||
{ | |||||
throw new NotImplementedException(""); | |||||
} | |||||
set | |||||
{ | |||||
} | |||||
} | |||||
} | |||||
} |
@@ -2,6 +2,7 @@ | |||||
using System.Collections.Generic; | using System.Collections.Generic; | ||||
using System.Linq; | using System.Linq; | ||||
using System.Text; | using System.Text; | ||||
using static Tensorflow.Binding; | |||||
namespace Tensorflow.NumPy | namespace Tensorflow.NumPy | ||||
{ | { | ||||
@@ -55,48 +56,12 @@ namespace Tensorflow.NumPy | |||||
Initialize(shape, dtype: dtype); | Initialize(shape, dtype: dtype); | ||||
} | } | ||||
public NDArray(Tensor value) | |||||
public NDArray(Tensor value, Shape? shape = null) | |||||
{ | { | ||||
_tensor = value; | |||||
} | |||||
public NDArray this[params int[] index] | |||||
{ | |||||
get | |||||
{ | |||||
throw new NotImplementedException(""); | |||||
} | |||||
set | |||||
{ | |||||
} | |||||
} | |||||
public NDArray this[params Slice[] slices] | |||||
{ | |||||
get | |||||
{ | |||||
throw new NotImplementedException(""); | |||||
} | |||||
set | |||||
{ | |||||
} | |||||
} | |||||
public NDArray this[NDArray mask] | |||||
{ | |||||
get | |||||
{ | |||||
throw new NotImplementedException(""); | |||||
} | |||||
set | |||||
{ | |||||
} | |||||
if (shape is not null) | |||||
_tensor = tf.reshape(value, shape); | |||||
else | |||||
_tensor = value; | |||||
} | } | ||||
public static NDArray Scalar<T>(T value) where T : unmanaged | public static NDArray Scalar<T>(T value) where T : unmanaged | ||||
@@ -129,15 +94,14 @@ namespace Tensorflow.NumPy | |||||
public bool HasNext() => throw new NotImplementedException(""); | public bool HasNext() => throw new NotImplementedException(""); | ||||
public T MoveNext<T>() => throw new NotImplementedException(""); | public T MoveNext<T>() => throw new NotImplementedException(""); | ||||
public NDArray reshape(params int[] shape) => throw new NotImplementedException(""); | |||||
public NDArray reshape(params long[] shape) => throw new NotImplementedException(""); | |||||
public NDArray reshape(Shape newshape) => new NDArray(_tensor, newshape); | |||||
public NDArray astype(Type type) => throw new NotImplementedException(""); | public NDArray astype(Type type) => throw new NotImplementedException(""); | ||||
public NDArray astype(NumpyDType type) => throw new NotImplementedException(""); | public NDArray astype(NumpyDType type) => throw new NotImplementedException(""); | ||||
public bool array_equal(NDArray rhs) => throw new NotImplementedException(""); | public bool array_equal(NDArray rhs) => throw new NotImplementedException(""); | ||||
public NDArray ravel() => throw new NotImplementedException(""); | public NDArray ravel() => throw new NotImplementedException(""); | ||||
public void shuffle(NDArray nd) => throw new NotImplementedException(""); | public void shuffle(NDArray nd) => throw new NotImplementedException(""); | ||||
public Array ToMuliDimArray<T>() => throw new NotImplementedException(""); | public Array ToMuliDimArray<T>() => throw new NotImplementedException(""); | ||||
public byte[] ToByteArray() => _tensor.ToArray<byte>(); | |||||
public byte[] ToByteArray() => _tensor.BufferToArray(); | |||||
public static string[] AsStringArray(NDArray arr) => throw new NotImplementedException(""); | public static string[] AsStringArray(NDArray arr) => throw new NotImplementedException(""); | ||||
public T[] Data<T>() where T : unmanaged | public T[] Data<T>() where T : unmanaged | ||||
@@ -48,6 +48,8 @@ namespace Tensorflow | |||||
public static implicit operator Shape((long, long, long, long) dims) | public static implicit operator Shape((long, long, long, long) dims) | ||||
=> new Shape(dims.Item1, dims.Item2, dims.Item3, dims.Item4); | => new Shape(dims.Item1, dims.Item2, dims.Item3, dims.Item4); | ||||
public bool IsEmpty => size == 0; | |||||
public bool IsScalar => ndim == 0; | public bool IsScalar => ndim == 0; | ||||
public static Shape Scalar | public static Shape Scalar | ||||
@@ -73,8 +75,6 @@ namespace Tensorflow | |||||
} | } | ||||
} | } | ||||
public bool IsEmpty => throw new NotImplementedException(""); | |||||
public override bool Equals(object obj) | public override bool Equals(object obj) | ||||
{ | { | ||||
if(obj is Shape shape) | if(obj is Shape shape) | ||||
@@ -1,4 +1,4 @@ | |||||
using Tensorflow.NumPy; | |||||
using System; | |||||
namespace Tensorflow | namespace Tensorflow | ||||
{ | { | ||||
@@ -12,6 +12,9 @@ namespace Tensorflow | |||||
public ResourceVariable assign(Tensor tensor) | public ResourceVariable assign(Tensor tensor) | ||||
{ | { | ||||
if (tensor.dtype != dtype) | |||||
throw new ArrayTypeMismatchException(""); | |||||
if (OriginalVar != null) | if (OriginalVar != null) | ||||
{ | { | ||||
OriginalVar.StridedSliceAssign(tensor, OriginalVarSlice); | OriginalVar.StridedSliceAssign(tensor, OriginalVarSlice); | ||||
@@ -465,7 +465,7 @@ namespace Tensorflow | |||||
/// <summary> | /// <summary> | ||||
/// Create a string Tensor from the given string | /// Create a string Tensor from the given string | ||||
/// </summary> | /// </summary> | ||||
public unsafe Tensor(string str) | |||||
public Tensor(string str) | |||||
{ | { | ||||
_handle = StringTensor(new string[] { str }, TensorShape.Scalar); | _handle = StringTensor(new string[] { str }, TensorShape.Scalar); | ||||
#if TRACK_TENSOR_LIFE | #if TRACK_TENSOR_LIFE | ||||
@@ -473,7 +473,7 @@ namespace Tensorflow | |||||
#endif | #endif | ||||
} | } | ||||
public unsafe Tensor(string[] strings) | |||||
public Tensor(string[] strings) | |||||
{ | { | ||||
_handle = StringTensor(strings, new TensorShape(strings.Length)); | _handle = StringTensor(strings, new TensorShape(strings.Length)); | ||||
#if TRACK_TENSOR_LIFE | #if TRACK_TENSOR_LIFE | ||||
@@ -7,109 +7,34 @@ namespace Tensorflow | |||||
{ | { | ||||
public partial class Tensor | public partial class Tensor | ||||
{ | { | ||||
[Obsolete("Please use ToArray<T>() instead.", false)] | |||||
public T[] Data<T>() where T : unmanaged | |||||
{ | |||||
return ToArray<T>(); | |||||
} | |||||
/// <summary> | /// <summary> | ||||
/// | /// | ||||
/// </summary> | /// </summary> | ||||
/// <typeparam name="T"></typeparam> | /// <typeparam name="T"></typeparam> | ||||
/// <returns></returns> | /// <returns></returns> | ||||
public T[] ToArray<T>() where T : unmanaged | |||||
public unsafe T[] ToArray<T>() where T : unmanaged | |||||
{ | { | ||||
//Are the types matching? | //Are the types matching? | ||||
if (typeof(T).as_tf_dtype() == dtype) | |||||
{ | |||||
if (NDims == 0 && size == 1) //is it a scalar? | |||||
{ | |||||
unsafe | |||||
{ | |||||
return new T[] { *(T*)buffer }; | |||||
} | |||||
} | |||||
if (typeof(T).as_tf_dtype() != dtype) | |||||
throw new ArrayTypeMismatchException($"dtype {dtype} mismatch."); | |||||
//types match, no need to perform cast | |||||
var ret = new T[size]; | |||||
if (NDims == 0 && size == 1) //is it a scalar? | |||||
{ | |||||
unsafe | unsafe | ||||
{ | { | ||||
var len = (long)size; | |||||
fixed (T* dst = ret) | |||||
{ | |||||
//T can only be unmanaged, I believe it is safe to say that MemoryCopy is valid for all cases this method can be called. | |||||
var src = (T*)buffer; | |||||
len *= (long)itemsize; | |||||
System.Buffer.MemoryCopy(src, dst, len, len); | |||||
} | |||||
return new T[] { *(T*)buffer }; | |||||
} | } | ||||
return ret; | |||||
} | } | ||||
else | |||||
{ | |||||
//types do not match, need to perform cast | |||||
if (NDims == 0 && size == 1) //is it a scalar? | |||||
{ | |||||
unsafe | |||||
{ | |||||
throw new NotImplementedException(""); | |||||
#region Compute | |||||
/*switch (dtype.as_numpy_dtype().GetTypeCode()) | |||||
{ | |||||
case NPTypeCode.Boolean: return new T[] { Converts.ChangeType<T>(*(bool*)buffer) }; | |||||
case NPTypeCode.Byte: return new T[] { Converts.ChangeType<T>(*(byte*)buffer) }; | |||||
case NPTypeCode.Int16: return new T[] { Converts.ChangeType<T>(*(short*)buffer) }; | |||||
case NPTypeCode.UInt16: return new T[] { Converts.ChangeType<T>(*(ushort*)buffer) }; | |||||
case NPTypeCode.Int32: return new T[] { Converts.ChangeType<T>(*(int*)buffer) }; | |||||
case NPTypeCode.UInt32: return new T[] { Converts.ChangeType<T>(*(uint*)buffer) }; | |||||
case NPTypeCode.Int64: return new T[] { Converts.ChangeType<T>(*(long*)buffer) }; | |||||
case NPTypeCode.UInt64: return new T[] { Converts.ChangeType<T>(*(ulong*)buffer) }; | |||||
case NPTypeCode.Char: return new T[] { Converts.ChangeType<T>(*(char*)buffer) }; | |||||
case NPTypeCode.Double: return new T[] { Converts.ChangeType<T>(*(double*)buffer) }; | |||||
case NPTypeCode.Single: return new T[] { Converts.ChangeType<T>(*(float*)buffer) }; | |||||
case NPTypeCode.String: return new T[] { Converts.ChangeType<T>((string)this) }; | |||||
default: | |||||
throw new NotSupportedException(); | |||||
}*/ | |||||
#endregion | |||||
} | |||||
} | |||||
//types match, no need to perform cast | |||||
var ret = new T[size]; | |||||
var len = (long)(size * itemsize); | |||||
var src = (T*)buffer; | |||||
var ret = new T[size]; | |||||
unsafe | |||||
{ | |||||
var len = (long)size; | |||||
fixed (T* dstRet = ret) | |||||
{ | |||||
T* dst = dstRet; //local stack copy | |||||
throw new NotImplementedException(""); | |||||
#region Compute | |||||
/*switch (dtype.as_numpy_dtype().GetTypeCode()) | |||||
{ | |||||
case NPTypeCode.Boolean: new UnmanagedMemoryBlock<bool>((bool*)buffer, len).CastTo(new UnmanagedMemoryBlock<T>(dst, len), null, null); break; | |||||
case NPTypeCode.Byte: new UnmanagedMemoryBlock<byte>((byte*)buffer, len).CastTo(new UnmanagedMemoryBlock<T>(dst, len), null, null); break; | |||||
case NPTypeCode.Int16: new UnmanagedMemoryBlock<short>((short*)buffer, len).CastTo(new UnmanagedMemoryBlock<T>(dst, len), null, null); break; | |||||
case NPTypeCode.UInt16: new UnmanagedMemoryBlock<ushort>((ushort*)buffer, len).CastTo(new UnmanagedMemoryBlock<T>(dst, len), null, null); break; | |||||
case NPTypeCode.Int32: new UnmanagedMemoryBlock<int>((int*)buffer, len).CastTo(new UnmanagedMemoryBlock<T>(dst, len), null, null); break; | |||||
case NPTypeCode.UInt32: new UnmanagedMemoryBlock<uint>((uint*)buffer, len).CastTo(new UnmanagedMemoryBlock<T>(dst, len), null, null); break; | |||||
case NPTypeCode.Int64: new UnmanagedMemoryBlock<long>((long*)buffer, len).CastTo(new UnmanagedMemoryBlock<T>(dst, len), null, null); break; | |||||
case NPTypeCode.UInt64: new UnmanagedMemoryBlock<ulong>((ulong*)buffer, len).CastTo(new UnmanagedMemoryBlock<T>(dst, len), null, null); break; | |||||
case NPTypeCode.Char: new UnmanagedMemoryBlock<char>((char*)buffer, len).CastTo(new UnmanagedMemoryBlock<T>(dst, len), null, null); break; | |||||
case NPTypeCode.Double: new UnmanagedMemoryBlock<double>((double*)buffer, len).CastTo(new UnmanagedMemoryBlock<T>(dst, len), null, null); break; | |||||
case NPTypeCode.Single: new UnmanagedMemoryBlock<float>((float*)buffer, len).CastTo(new UnmanagedMemoryBlock<T>(dst, len), null, null); break; | |||||
case NPTypeCode.String: throw new NotSupportedException("Unable to convert from string to other dtypes"); //TODO! this should call Converts.To<T> | |||||
default: | |||||
throw new NotSupportedException(); | |||||
}*/ | |||||
#endregion | |||||
} | |||||
} | |||||
fixed (T* dst = ret) | |||||
System.Buffer.MemoryCopy(src, dst, len, len); | |||||
return ret; | |||||
} | |||||
return ret; | |||||
} | } | ||||
/// <summary> | /// <summary> | ||||
@@ -160,7 +160,7 @@ namespace Tensorflow | |||||
{ | { | ||||
if (dims != null && shape2.dims != null) | if (dims != null && shape2.dims != null) | ||||
{ | { | ||||
if (dims.Length != shape2.dims.Length) | |||||
if (shape.size != (ulong)shape2.size) | |||||
return false; | return false; | ||||
} | } | ||||
@@ -165,7 +165,7 @@ namespace Tensorflow | |||||
case TensorShape val: | case TensorShape val: | ||||
return new EagerTensor(val.dims, ctx.DeviceName); | return new EagerTensor(val.dims, ctx.DeviceName); | ||||
case string val: | case string val: | ||||
return new EagerTensor(val, ctx.DeviceName); | |||||
return new EagerTensor(val); | |||||
case string[] val: | case string[] val: | ||||
return new EagerTensor(val, ctx.DeviceName); | return new EagerTensor(val, ctx.DeviceName); | ||||
case bool val: | case bool val: | ||||
@@ -100,7 +100,7 @@ namespace Tensorflow | |||||
}; | }; | ||||
/// <summary> | /// <summary> | ||||
/// Create a TensorProto. | |||||
/// Create a TensorProto, invoked in graph mode | |||||
/// </summary> | /// </summary> | ||||
/// <param name="values"></param> | /// <param name="values"></param> | ||||
/// <param name="dtype"></param> | /// <param name="dtype"></param> | ||||
@@ -108,89 +108,22 @@ namespace Tensorflow | |||||
/// <param name="verify_shape"></param> | /// <param name="verify_shape"></param> | ||||
/// <param name="allow_broadcast"></param> | /// <param name="allow_broadcast"></param> | ||||
/// <returns></returns> | /// <returns></returns> | ||||
public static TensorProto make_tensor_proto(object values, TF_DataType dtype = TF_DataType.DtInvalid, int[] shape = null, bool verify_shape = false, bool allow_broadcast = false) | |||||
public static TensorProto make_tensor_proto(object values, TF_DataType dtype = TF_DataType.DtInvalid, int[]? shape = null, bool verify_shape = false, bool allow_broadcast = false) | |||||
{ | { | ||||
if (allow_broadcast && verify_shape) | if (allow_broadcast && verify_shape) | ||||
throw new ValueError("allow_broadcast and verify_shape are not both allowed."); | throw new ValueError("allow_broadcast and verify_shape are not both allowed."); | ||||
if (values is TensorProto tp) | if (values is TensorProto tp) | ||||
return tp; | return tp; | ||||
dtype = values.GetType().as_tf_dtype(); | |||||
// We first convert value to a numpy array or scalar. | // We first convert value to a numpy array or scalar. | ||||
NDArray nparray = null; | |||||
var np_dt = dtype.as_system_dtype(); | |||||
if (values is NDArray nd) | |||||
{ | |||||
nparray = nd; | |||||
} | |||||
else if(values is string str) | |||||
{ | |||||
// scalar string | |||||
nparray = convert_to_numpy_ndarray(values); | |||||
shape = new int[0]; | |||||
} | |||||
else if(values is string[] strings) | |||||
{ | |||||
nparray = convert_to_numpy_ndarray(values); | |||||
shape = new[] { strings.Length }; | |||||
} | |||||
else | |||||
{ | |||||
if (values == null) | |||||
throw new ValueError("None values not supported."); | |||||
nparray = convert_to_numpy_ndarray(values); | |||||
if (np_dt != null && np_dt != typeof(string)) | |||||
nparray = nparray.astype(np_dt); | |||||
} | |||||
var numpy_dtype = nparray.dtype.as_tf_dtype(); | |||||
if (numpy_dtype == TF_DataType.DtInvalid) | |||||
throw new TypeError($"Unrecognized data type: {nparray.dtype}"); | |||||
// If dtype was specified and is a quantized type, we convert | |||||
// numpy_dtype back into the quantized version. | |||||
if (quantized_types.Contains(dtype)) | |||||
numpy_dtype = dtype; | |||||
bool is_same_size = false; | |||||
ulong shape_size = 0; | |||||
// If shape is not given, get the shape from the numpy array. | |||||
if (shape == null) | |||||
{ | |||||
if (numpy_dtype == TF_DataType.TF_STRING) | |||||
{ | |||||
if (nparray.ndim == 0) | |||||
{ | |||||
// scalar string | |||||
shape = new int[0]; | |||||
shape_size = 0; | |||||
} | |||||
else | |||||
throw new NotImplementedException($"Not implemented for {nparray.ndim} dims string array."); | |||||
} | |||||
else | |||||
{ | |||||
shape = nparray.dims.Select(x => Convert.ToInt32(x)).ToArray(); | |||||
is_same_size = true; | |||||
shape_size = nparray.size; | |||||
} | |||||
} | |||||
else | |||||
{ | |||||
shape_size = (ulong)new Shape(shape.Select(x => Convert.ToInt64(x)).ToArray()).size; | |||||
is_same_size = shape_size == nparray.size; | |||||
} | |||||
var tensor_proto = new TensorProto | var tensor_proto = new TensorProto | ||||
{ | { | ||||
Dtype = numpy_dtype.as_datatype_enum(), | |||||
TensorShape = tensor_util.as_shape(shape) | |||||
Dtype = dtype.as_datatype_enum(), | |||||
// TensorShape = tensor_util.as_shape(shape.dims) | |||||
}; | }; | ||||
if (is_same_size && _TENSOR_CONTENT_TYPES.Contains(numpy_dtype) && shape_size > 1) | |||||
/*if (is_same_size && _TENSOR_CONTENT_TYPES.Contains(numpy_dtype) && shape_size > 1) | |||||
{ | { | ||||
byte[] bytes = nparray.ToByteArray(); | byte[] bytes = nparray.ToByteArray(); | ||||
tensor_proto.TensorContent = Google.Protobuf.ByteString.CopyFrom(bytes.ToArray()); | tensor_proto.TensorContent = Google.Protobuf.ByteString.CopyFrom(bytes.ToArray()); | ||||
@@ -212,11 +145,15 @@ namespace Tensorflow | |||||
return tensor_proto; | return tensor_proto; | ||||
} | } | ||||
var proto_values = nparray.ravel(); | |||||
switch (nparray.dtype.ToString()) | |||||
var proto_values = nparray.ravel();*/ | |||||
switch (values) | |||||
{ | { | ||||
case "Bool": | |||||
case float val: | |||||
tensor_proto.TensorShape = tensor_util.as_shape(new int[0]); | |||||
tensor_proto.FloatVal.AddRange(new[] { val }); | |||||
break; | |||||
/*case "Bool": | |||||
case "Boolean": | case "Boolean": | ||||
tensor_proto.BoolVal.AddRange(proto_values.Data<bool>()); | tensor_proto.BoolVal.AddRange(proto_values.Data<bool>()); | ||||
break; | break; | ||||
@@ -226,13 +163,10 @@ namespace Tensorflow | |||||
case "Int64": | case "Int64": | ||||
tensor_proto.Int64Val.AddRange(proto_values.Data<long>()); | tensor_proto.Int64Val.AddRange(proto_values.Data<long>()); | ||||
break; | break; | ||||
case "Single": | |||||
tensor_proto.FloatVal.AddRange(proto_values.Data<float>()); | |||||
break; | |||||
case "Double": | case "Double": | ||||
tensor_proto.DoubleVal.AddRange(proto_values.Data<double>()); | tensor_proto.DoubleVal.AddRange(proto_values.Data<double>()); | ||||
break; | break; | ||||
/*case "String": | |||||
case "String": | |||||
tensor_proto.StringVal.AddRange(proto_values.Data<string>().Select(x => Google.Protobuf.ByteString.CopyFromUtf8(x.ToString()))); | tensor_proto.StringVal.AddRange(proto_values.Data<string>().Select(x => Google.Protobuf.ByteString.CopyFromUtf8(x.ToString()))); | ||||
break;*/ | break;*/ | ||||
default: | default: | ||||
@@ -435,76 +369,6 @@ would not be rank 1.", tensor.op.get_attr("axis"))); | |||||
return ret; | return ret; | ||||
} | } | ||||
public static NDArray convert_to_numpy_ndarray(object values) | |||||
{ | |||||
NDArray nd; | |||||
switch (values) | |||||
{ | |||||
case NDArray val: | |||||
nd = val; | |||||
break; | |||||
case TensorShape val: | |||||
nd = val.dims; | |||||
break; | |||||
case bool boolVal: | |||||
nd = boolVal; | |||||
break; | |||||
case int intVal: | |||||
nd = intVal; | |||||
break; | |||||
case int[] intVals: | |||||
nd = np.array(intVals); | |||||
break; | |||||
case int[,] intVals: | |||||
nd = np.array(intVals); | |||||
break; | |||||
case long intVal: | |||||
nd = intVal; | |||||
break; | |||||
case long[] intVals: | |||||
nd = np.array(intVals); | |||||
break; | |||||
case long[,] intVals: | |||||
nd = np.array(intVals); | |||||
break; | |||||
case float floatVal: | |||||
nd = floatVal; | |||||
break; | |||||
case float[] floatVals: | |||||
nd = floatVals; | |||||
break; | |||||
case float[,] floatVals: | |||||
nd = np.array(floatVals); | |||||
break; | |||||
case double doubleVal: | |||||
nd = doubleVal; | |||||
break; | |||||
case double[] doubleVals: | |||||
nd = np.array(doubleVals); | |||||
break; | |||||
case double[,] doubleVals: | |||||
nd = np.array(doubleVals); | |||||
break; | |||||
case string strVal: | |||||
nd = new NDArray(Encoding.ASCII.GetBytes(strVal)); | |||||
break; | |||||
case string[] strVals: | |||||
nd = np.array(strVals); | |||||
break; | |||||
case byte[] byteValues: | |||||
nd = byteValues; | |||||
break; | |||||
case byte[,] byteValues: | |||||
nd = np.array(byteValues); | |||||
break; | |||||
default: | |||||
throw new NotImplementedException($"convert_to_numpy_ndarray: Support for type {values.GetType()} Not Implemented"); | |||||
} | |||||
return nd; | |||||
} | |||||
public static TensorShapeProto as_shape<T>(T[] dims) | public static TensorShapeProto as_shape<T>(T[] dims) | ||||
{ | { | ||||
TensorShapeProto shape = new TensorShapeProto(); | TensorShapeProto shape = new TensorShapeProto(); | ||||
@@ -118,7 +118,7 @@ namespace Tensorflow.Keras.Datasets | |||||
var value = new Span<byte>(pickle, start_pos, value_length).ToArray(); | var value = new Span<byte>(pickle, start_pos, value_length).ToArray(); | ||||
start_pos += value_length; | start_pos += value_length; | ||||
return (key, np.array(value).reshape(10000, 3, 32, 32)); | |||||
return (key, np.array(value).reshape((10000, 3, 32, 32))); | |||||
} | } | ||||
string Download() | string Download() | ||||
@@ -12,7 +12,7 @@ namespace Tensorflow.Benchmark.Crash | |||||
[Benchmark] | [Benchmark] | ||||
public void Run() | public void Run() | ||||
{ | { | ||||
var data = tf.convert_to_tensor(np.arange(0, 50000 * 10).astype(np.float32).reshape(50000, 10)); | |||||
var data = tf.convert_to_tensor(np.arange(0, 50000 * 10).astype(np.float32).reshape((50000, 10))); | |||||
var dataset = keras.preprocessing.timeseries_dataset_from_array(data, | var dataset = keras.preprocessing.timeseries_dataset_from_array(data, | ||||
sequence_length: 10, | sequence_length: 10, | ||||
@@ -24,7 +24,7 @@ namespace Tensorflow.Benchmark.Leak | |||||
var bytes = new byte[num * width * height * 3]; | var bytes = new byte[num * width * height * 3]; | ||||
var inputImages = np.array(bytes) / 255.0f; | var inputImages = np.array(bytes) / 255.0f; | ||||
inputImages = inputImages.reshape(num, height, width, 3); | |||||
inputImages = inputImages.reshape((num, height, width, 3)); | |||||
bytes = new byte[num]; | bytes = new byte[num]; | ||||
var outLables = np.array(bytes); | var outLables = np.array(bytes); | ||||
@@ -16,7 +16,7 @@ namespace TensorFlowNET.Keras.UnitTest | |||||
var conv = keras.layers.Conv1D(filters, activation: "linear"); | var conv = keras.layers.Conv1D(filters, activation: "linear"); | ||||
var x = np.arange(256.0f).reshape(8, 8, 4); | |||||
var x = np.arange(256.0f).reshape((8, 8, 4)); | |||||
var y = conv.Apply(x); | var y = conv.Apply(x); | ||||
Assert.AreEqual(3, y.shape.ndim); | Assert.AreEqual(3, y.shape.ndim); | ||||
@@ -32,7 +32,7 @@ namespace TensorFlowNET.Keras.UnitTest | |||||
var conv = keras.layers.Conv1D(filters, kernel_size: 3, activation: "linear"); | var conv = keras.layers.Conv1D(filters, kernel_size: 3, activation: "linear"); | ||||
var x = np.arange(256.0f).reshape(8, 8, 4); | |||||
var x = np.arange(256.0f).reshape((8, 8, 4)); | |||||
var y = conv.Apply(x); | var y = conv.Apply(x); | ||||
Assert.AreEqual(3, y.shape.ndim); | Assert.AreEqual(3, y.shape.ndim); | ||||
@@ -48,7 +48,7 @@ namespace TensorFlowNET.Keras.UnitTest | |||||
var conv = keras.layers.Conv1D(filters, kernel_size: 3, padding: "same", activation: "linear"); | var conv = keras.layers.Conv1D(filters, kernel_size: 3, padding: "same", activation: "linear"); | ||||
var x = np.arange(256.0f).reshape(8, 8, 4); | |||||
var x = np.arange(256.0f).reshape((8, 8, 4)); | |||||
var y = conv.Apply(x); | var y = conv.Apply(x); | ||||
Assert.AreEqual(3, y.shape.ndim); | Assert.AreEqual(3, y.shape.ndim); | ||||
@@ -63,7 +63,7 @@ namespace TensorFlowNET.Keras.UnitTest | |||||
var filters = 8; | var filters = 8; | ||||
var conv = keras.layers.Conv1D(filters, kernel_size: 3, strides: 2, activation: "linear"); | var conv = keras.layers.Conv1D(filters, kernel_size: 3, strides: 2, activation: "linear"); | ||||
var x = np.arange(256.0f).reshape(8, 8, 4); | |||||
var x = np.arange(256.0f).reshape((8, 8, 4)); | |||||
var y = conv.Apply(x); | var y = conv.Apply(x); | ||||
Assert.AreEqual(3, y.shape.ndim); | Assert.AreEqual(3, y.shape.ndim); | ||||
@@ -78,7 +78,7 @@ namespace TensorFlowNET.Keras.UnitTest | |||||
var filters = 8; | var filters = 8; | ||||
var conv = keras.layers.Conv1D(filters, kernel_size: 3, dilation_rate: 2, activation: "linear"); | var conv = keras.layers.Conv1D(filters, kernel_size: 3, dilation_rate: 2, activation: "linear"); | ||||
var x = np.arange(256.0f).reshape(8, 8, 4); | |||||
var x = np.arange(256.0f).reshape((8, 8, 4)); | |||||
var y = conv.Apply(x); | var y = conv.Apply(x); | ||||
Assert.AreEqual(3, y.shape.ndim); | Assert.AreEqual(3, y.shape.ndim); | ||||
@@ -93,7 +93,7 @@ namespace TensorFlowNET.Keras.UnitTest | |||||
var filters = 8; | var filters = 8; | ||||
var conv = keras.layers.Conv1D(filters, kernel_size: 3, dilation_rate: 2, padding: "same", activation: "linear"); | var conv = keras.layers.Conv1D(filters, kernel_size: 3, dilation_rate: 2, padding: "same", activation: "linear"); | ||||
var x = np.arange(256.0f).reshape(8, 8, 4); | |||||
var x = np.arange(256.0f).reshape((8, 8, 4)); | |||||
var y = conv.Apply(x); | var y = conv.Apply(x); | ||||
Assert.AreEqual(3, y.shape.ndim); | Assert.AreEqual(3, y.shape.ndim); | ||||
@@ -108,7 +108,7 @@ namespace TensorFlowNET.Keras.UnitTest | |||||
var filters = 8; | var filters = 8; | ||||
var conv = keras.layers.Conv2D(filters, activation: "linear"); | var conv = keras.layers.Conv2D(filters, activation: "linear"); | ||||
var x = np.arange(256.0f).reshape(1,8,8,4); | |||||
var x = np.arange(256.0f).reshape((1, 8, 8, 4)); | |||||
var y = conv.Apply(x); | var y = conv.Apply(x); | ||||
Assert.AreEqual(4, y.shape.ndim); | Assert.AreEqual(4, y.shape.ndim); | ||||
@@ -124,7 +124,7 @@ namespace TensorFlowNET.Keras.UnitTest | |||||
var filters = 8; | var filters = 8; | ||||
var conv = keras.layers.Conv2D(filters, kernel_size: 3, activation: "linear"); | var conv = keras.layers.Conv2D(filters, kernel_size: 3, activation: "linear"); | ||||
var x = np.arange(256.0f).reshape(1, 8, 8, 4); | |||||
var x = np.arange(256.0f).reshape((1, 8, 8, 4)); | |||||
var y = conv.Apply(x); | var y = conv.Apply(x); | ||||
Assert.AreEqual(4, y.shape.ndim); | Assert.AreEqual(4, y.shape.ndim); | ||||
@@ -140,7 +140,7 @@ namespace TensorFlowNET.Keras.UnitTest | |||||
var filters = 8; | var filters = 8; | ||||
var conv = keras.layers.Conv2D(filters, kernel_size: 3, padding: "same", activation: "linear"); | var conv = keras.layers.Conv2D(filters, kernel_size: 3, padding: "same", activation: "linear"); | ||||
var x = np.arange(256.0f).reshape(1, 8, 8, 4); | |||||
var x = np.arange(256.0f).reshape((1, 8, 8, 4)); | |||||
var y = conv.Apply(x); | var y = conv.Apply(x); | ||||
Assert.AreEqual(4, y.shape.ndim); | Assert.AreEqual(4, y.shape.ndim); | ||||
@@ -156,7 +156,7 @@ namespace TensorFlowNET.Keras.UnitTest | |||||
var filters = 8; | var filters = 8; | ||||
var conv = keras.layers.Conv2D(filters, kernel_size: 3, strides: 2, activation: "linear"); | var conv = keras.layers.Conv2D(filters, kernel_size: 3, strides: 2, activation: "linear"); | ||||
var x = np.arange(256.0f).reshape(1, 8, 8, 4); | |||||
var x = np.arange(256.0f).reshape((1, 8, 8, 4)); | |||||
var y = conv.Apply(x); | var y = conv.Apply(x); | ||||
Assert.AreEqual(4, y.shape.ndim); | Assert.AreEqual(4, y.shape.ndim); | ||||
@@ -172,7 +172,7 @@ namespace TensorFlowNET.Keras.UnitTest | |||||
var filters = 8; | var filters = 8; | ||||
var conv = keras.layers.Conv2D(filters, kernel_size: 3, dilation_rate: 2, activation: "linear"); | var conv = keras.layers.Conv2D(filters, kernel_size: 3, dilation_rate: 2, activation: "linear"); | ||||
var x = np.arange(256.0f).reshape(1, 8, 8, 4); | |||||
var x = np.arange(256.0f).reshape((1, 8, 8, 4)); | |||||
var y = conv.Apply(x); | var y = conv.Apply(x); | ||||
Assert.AreEqual(4, y.shape.ndim); | Assert.AreEqual(4, y.shape.ndim); | ||||
@@ -188,7 +188,7 @@ namespace TensorFlowNET.Keras.UnitTest | |||||
var filters = 8; | var filters = 8; | ||||
var conv = keras.layers.Conv2D(filters, kernel_size: 3, dilation_rate: 2, padding: "same", activation: "linear"); | var conv = keras.layers.Conv2D(filters, kernel_size: 3, dilation_rate: 2, padding: "same", activation: "linear"); | ||||
var x = np.arange(256.0f).reshape(1, 8, 8, 4); | |||||
var x = np.arange(256.0f).reshape((1, 8, 8, 4)); | |||||
var y = conv.Apply(x); | var y = conv.Apply(x); | ||||
Assert.AreEqual(4, y.shape.ndim); | Assert.AreEqual(4, y.shape.ndim); | ||||
@@ -11,8 +11,8 @@ namespace TensorFlowNET.Keras.UnitTest | |||||
[TestMethod] | [TestMethod] | ||||
public void Concatenate() | public void Concatenate() | ||||
{ | { | ||||
var x = np.arange(20).reshape(2, 2, 5); | |||||
var y = np.arange(20, 30).reshape(2, 1, 5); | |||||
var x = np.arange(20).reshape((2, 2, 5)); | |||||
var y = np.arange(20, 30).reshape((2, 1, 5)); | |||||
var z = keras.layers.Concatenate(axis: 1).Apply(new Tensors(x, y)); | var z = keras.layers.Concatenate(axis: 1).Apply(new Tensors(x, y)); | ||||
Assert.AreEqual((2, 3, 5), z.shape); | Assert.AreEqual((2, 3, 5), z.shape); | ||||
} | } | ||||
@@ -84,7 +84,7 @@ namespace TensorFlowNET.Keras.UnitTest | |||||
public void Embedding_Simple() | public void Embedding_Simple() | ||||
{ | { | ||||
var emb = keras.layers.Embedding(256, 12, input_length: 4); | var emb = keras.layers.Embedding(256, 12, input_length: 4); | ||||
var input_array = np.arange(12).reshape(3, 4).astype(np.float32); | |||||
var input_array = np.arange(12).reshape((3, 4)).astype(np.float32); | |||||
var output = emb.Apply(input_array); | var output = emb.Apply(input_array); | ||||
Assert.AreEqual(new TensorShape(3, 4, 12), output.shape); | Assert.AreEqual(new TensorShape(3, 4, 12), output.shape); | ||||
} | } | ||||
@@ -226,7 +226,7 @@ namespace Tensorflow.Native.UnitTest | |||||
//long[] dims = { 2, 2 }; | //long[] dims = { 2, 2 }; | ||||
//Tensor t = c_api.TF_AllocateTensor(TF_FLOAT, dims, 2, sizeof(float) * 4); | //Tensor t = c_api.TF_AllocateTensor(TF_FLOAT, dims, 2, sizeof(float) * 4); | ||||
//Marshal.Copy(values, 0, t, 4); | //Marshal.Copy(values, 0, t, 4); | ||||
Tensor t = new Tensor(new NDArray(values).reshape(2, 2)); | |||||
Tensor t = new Tensor(new NDArray(values).reshape((2, 2))); | |||||
return t; | return t; | ||||
} | } | ||||
@@ -94,7 +94,7 @@ namespace Tensorflow.Native.UnitTest.Tensors | |||||
[TestMethod] | [TestMethod] | ||||
public void Tensor() | public void Tensor() | ||||
{ | { | ||||
var nd = np.array(1f, 2f, 3f, 4f, 5f, 6f).reshape(2, 3); | |||||
var nd = np.array(1f, 2f, 3f, 4f, 5f, 6f).reshape((2, 3)); | |||||
var tensor = new Tensor(nd); | var tensor = new Tensor(nd); | ||||
var array = tensor.ToArray<float>(); | var array = tensor.ToArray<float>(); | ||||
@@ -17,7 +17,7 @@ namespace TensorFlowNET.UnitTest.Basics | |||||
[TestMethod] | [TestMethod] | ||||
public void TFRandomSeedTest() | public void TFRandomSeedTest() | ||||
{ | { | ||||
var initValue = np.arange(6).reshape(3, 2); | |||||
var initValue = np.arange(6).reshape((3, 2)); | |||||
tf.set_random_seed(1234); | tf.set_random_seed(1234); | ||||
var a1 = tf.random_uniform(1); | var a1 = tf.random_uniform(1); | ||||
var b1 = tf.random_shuffle(tf.constant(initValue)); | var b1 = tf.random_shuffle(tf.constant(initValue)); | ||||
@@ -40,7 +40,7 @@ namespace TensorFlowNET.UnitTest.Basics | |||||
[TestMethod, Ignore] | [TestMethod, Ignore] | ||||
public void TFRandomSeedTest2() | public void TFRandomSeedTest2() | ||||
{ | { | ||||
var initValue = np.arange(6).reshape(3, 2); | |||||
var initValue = np.arange(6).reshape((3, 2)); | |||||
tf.set_random_seed(1234); | tf.set_random_seed(1234); | ||||
var a1 = tf.random_uniform(1, seed:1234); | var a1 = tf.random_uniform(1, seed:1234); | ||||
var b1 = tf.random_shuffle(tf.constant(initValue), seed: 1234); | var b1 = tf.random_shuffle(tf.constant(initValue), seed: 1234); | ||||
@@ -18,8 +18,8 @@ namespace TensorFlowNET.UnitTest | |||||
{ | { | ||||
lock (this) | lock (this) | ||||
{ | { | ||||
var a = constant_op.constant(np.array(3.0).reshape(1, 1)); | |||||
var b = constant_op.constant(np.array(2.0).reshape(1, 1)); | |||||
var a = constant_op.constant(np.array(3.0).reshape((1, 1))); | |||||
var b = constant_op.constant(np.array(2.0).reshape((1, 1))); | |||||
var c = math_ops.matmul(a, b, name: "matmul"); | var c = math_ops.matmul(a, b, name: "matmul"); | ||||
using (var sess = tf.Session()) | using (var sess = tf.Session()) | ||||
{ | { | ||||
@@ -54,7 +54,7 @@ namespace TensorFlowNET.UnitTest | |||||
[TestMethod] | [TestMethod] | ||||
public void batch_to_space_nd() | public void batch_to_space_nd() | ||||
{ | { | ||||
var inputs = np.arange(24).reshape(4, 2, 3); | |||||
var inputs = np.arange(24).reshape((4, 2, 3)); | |||||
var block_shape = new[] { 2, 2 }; | var block_shape = new[] { 2, 2 }; | ||||
int[,] crops = { { 0, 0 }, { 0, 0 } }; | int[,] crops = { { 0, 0 }, { 0, 0 } }; | ||||
var tensor = tf.batch_to_space_nd(inputs, block_shape, crops); | var tensor = tf.batch_to_space_nd(inputs, block_shape, crops); | ||||
@@ -2,6 +2,7 @@ | |||||
using Tensorflow.NumPy; | using Tensorflow.NumPy; | ||||
using System.Linq; | using System.Linq; | ||||
using static Tensorflow.Binding; | using static Tensorflow.Binding; | ||||
using System; | |||||
namespace TensorFlowNET.UnitTest.Basics | namespace TensorFlowNET.UnitTest.Basics | ||||
{ | { | ||||
@@ -13,7 +14,7 @@ namespace TensorFlowNET.UnitTest.Basics | |||||
{ | { | ||||
var x = tf.Variable(10, name: "x"); | var x = tf.Variable(10, name: "x"); | ||||
Assert.AreEqual(0, x.shape.ndim); | Assert.AreEqual(0, x.shape.ndim); | ||||
Assert.AreEqual(10, (int)x.numpy()); | |||||
Assert.AreEqual(x.numpy(), 10); | |||||
} | } | ||||
[TestMethod] | [TestMethod] | ||||
@@ -28,7 +29,7 @@ namespace TensorFlowNET.UnitTest.Basics | |||||
{ | { | ||||
var x = tf.constant(3, name: "x"); | var x = tf.constant(3, name: "x"); | ||||
var y = tf.Variable(x + 1, name: "y"); | var y = tf.Variable(x + 1, name: "y"); | ||||
Assert.AreEqual(4, (int)y.numpy()); | |||||
Assert.AreEqual(y.numpy(), 4); | |||||
} | } | ||||
[TestMethod] | [TestMethod] | ||||
@@ -36,7 +37,7 @@ namespace TensorFlowNET.UnitTest.Basics | |||||
{ | { | ||||
var variable = tf.Variable(31, name: "tree"); | var variable = tf.Variable(31, name: "tree"); | ||||
var unread = variable.assign(12); | var unread = variable.assign(12); | ||||
Assert.AreEqual(12, (int)unread.numpy()); | |||||
Assert.AreEqual(unread.numpy(), 12); | |||||
} | } | ||||
[TestMethod] | [TestMethod] | ||||
@@ -45,7 +46,7 @@ namespace TensorFlowNET.UnitTest.Basics | |||||
var v1 = tf.Variable(10.0f, name: "v1"); | var v1 = tf.Variable(10.0f, name: "v1"); | ||||
var v2 = v1.assign(v1 + 1.0f); | var v2 = v1.assign(v1 + 1.0f); | ||||
Assert.AreEqual(v1.numpy(), v2.numpy()); | Assert.AreEqual(v1.numpy(), v2.numpy()); | ||||
Assert.AreEqual(11f, (float)v1.numpy()); | |||||
Assert.AreEqual(v1.numpy(), 11f); | |||||
} | } | ||||
[TestMethod] | [TestMethod] | ||||
@@ -71,6 +72,7 @@ namespace TensorFlowNET.UnitTest.Basics | |||||
{ 4, 5, 6 }, | { 4, 5, 6 }, | ||||
{ 7, 8, 9 } | { 7, 8, 9 } | ||||
}; | }; | ||||
var x = tf.Variable(nd); | var x = tf.Variable(nd); | ||||
// get slice form variable | // get slice form variable | ||||
@@ -93,7 +95,8 @@ namespace TensorFlowNET.UnitTest.Basics | |||||
Assert.AreEqual(nd[2], x[2].numpy()); | Assert.AreEqual(nd[2], x[2].numpy()); | ||||
} | } | ||||
[TestMethod, Ignore] | |||||
[TestMethod] | |||||
[ExpectedException(typeof(ArrayTypeMismatchException))] | |||||
public void TypeMismatchedSliceAssign() | public void TypeMismatchedSliceAssign() | ||||
{ | { | ||||
NDArray intNd = new int[] | NDArray intNd = new int[] | ||||
@@ -105,12 +108,7 @@ namespace TensorFlowNET.UnitTest.Basics | |||||
-5, 6, -7 | -5, 6, -7 | ||||
}; | }; | ||||
var x = tf.Variable(doubleNd); | var x = tf.Variable(doubleNd); | ||||
var slice = x[":"]; | |||||
Assert.ThrowsException<System.Exception>( | |||||
// this statement exit without throwing any exception but the "test execution summary" seems not able to detect that. | |||||
() => slice.assign(intNd) | |||||
); | |||||
x[":"].assign(intNd); | |||||
} | } | ||||
[TestMethod] | [TestMethod] | ||||
@@ -120,7 +118,7 @@ namespace TensorFlowNET.UnitTest.Basics | |||||
for (int i = 0; i < 5; i++) | for (int i = 0; i < 5; i++) | ||||
x.assign(x + 1); | x.assign(x + 1); | ||||
Assert.AreEqual(15, (int)x.numpy()); | |||||
Assert.AreEqual(x.numpy(), 15); | |||||
} | } | ||||
[TestMethod] | [TestMethod] | ||||
@@ -138,8 +136,8 @@ namespace TensorFlowNET.UnitTest.Basics | |||||
var a = tf.Variable(5); | var a = tf.Variable(5); | ||||
var a_identity = tf.identity(a); | var a_identity = tf.identity(a); | ||||
a.assign_add(1); | a.assign_add(1); | ||||
Assert.AreEqual(5, (int)a_identity.numpy()); | |||||
Assert.AreEqual(6, (int)a.numpy()); | |||||
Assert.AreEqual(a_identity.numpy(), 5); | |||||
Assert.AreEqual(a.numpy(), 6); | |||||
} | } | ||||
} | } | ||||
} | } |
@@ -15,11 +15,11 @@ namespace TensorFlowNET.UnitTest.ManagedAPI | |||||
public void Slice() | public void Slice() | ||||
{ | { | ||||
// Tests based on example code in TF documentation | // Tests based on example code in TF documentation | ||||
var input_array = tf.constant(np.array(new int[] { 1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4, 5, 5, 5, 6, 6, 6 }).reshape(3,2,3)); | |||||
var input_array = tf.constant(np.array(new int[] { 1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4, 5, 5, 5, 6, 6, 6 }).reshape((3,2,3))); | |||||
var indices = tf.constant(np.array(new int[] { 0, 2 })); | var indices = tf.constant(np.array(new int[] { 0, 2 })); | ||||
var r1 = array_ops.slice(input_array, new int[] { 1, 0, 0 }, new int[] { 1, 1, 3 }); | var r1 = array_ops.slice(input_array, new int[] { 1, 0, 0 }, new int[] { 1, 1, 3 }); | ||||
Assert.AreEqual(new TensorShape(1,1,3), r1.shape); | |||||
Assert.AreEqual(new Shape(1,1,3), r1.shape); | |||||
var r1np = r1.numpy(); | var r1np = r1.numpy(); | ||||
Assert.AreEqual(r1np[0, 0, 0], 3); | Assert.AreEqual(r1np[0, 0, 0], 3); | ||||
Assert.AreEqual(r1np[0, 0, 1], 3); | Assert.AreEqual(r1np[0, 0, 1], 3); | ||||
@@ -27,7 +27,7 @@ namespace TensorFlowNET.UnitTest.ManagedAPI | |||||
var r2 = array_ops.slice(input_array, new int[] { 1, 0, 0 }, new int[] { 1, 2, 3 }); | var r2 = array_ops.slice(input_array, new int[] { 1, 0, 0 }, new int[] { 1, 2, 3 }); | ||||
Assert.AreEqual(new TensorShape(1, 2, 3), r2.shape); | |||||
Assert.AreEqual(new Shape(1, 2, 3), r2.shape); | |||||
var r2np = r2.numpy(); | var r2np = r2.numpy(); | ||||
Assert.AreEqual(r2np[0, 0, 0], 3); | Assert.AreEqual(r2np[0, 0, 0], 3); | ||||
Assert.AreEqual(r2np[0, 0, 1], 3); | Assert.AreEqual(r2np[0, 0, 1], 3); | ||||
@@ -37,7 +37,7 @@ namespace TensorFlowNET.UnitTest.ManagedAPI | |||||
Assert.AreEqual(r2np[0, 1, 2], 4); | Assert.AreEqual(r2np[0, 1, 2], 4); | ||||
var r3 = array_ops.slice(input_array, new int[] { 1, 0, 0 }, new int[] { 2, 1, 3 }); | var r3 = array_ops.slice(input_array, new int[] { 1, 0, 0 }, new int[] { 2, 1, 3 }); | ||||
Assert.AreEqual(new TensorShape(2, 1, 3), r3.shape); | |||||
Assert.AreEqual(new Shape(2, 1, 3), r3.shape); | |||||
var r3np = r3.numpy(); | var r3np = r3.numpy(); | ||||
Assert.AreEqual(r3np[0, 0, 0], 3); | Assert.AreEqual(r3np[0, 0, 0], 3); | ||||
Assert.AreEqual(r3np[0, 0, 1], 3); | Assert.AreEqual(r3np[0, 0, 1], 3); | ||||
@@ -53,29 +53,29 @@ namespace TensorFlowNET.UnitTest.ManagedAPI | |||||
[TestMethod] | [TestMethod] | ||||
public void Gather() | public void Gather() | ||||
{ | { | ||||
var input_array = tf.constant(np.arange(12).reshape(3, 4).astype(np.float32)); | |||||
var input_array = tf.constant(np.arange(12).reshape((3, 4)).astype(np.float32)); | |||||
var indices = tf.constant(np.array(new int[] { 0, 2 })); | var indices = tf.constant(np.array(new int[] { 0, 2 })); | ||||
var result = array_ops.gather(input_array, indices); | var result = array_ops.gather(input_array, indices); | ||||
Assert.AreEqual(new TensorShape(2, 4), result.shape); | |||||
Assert.AreEqual(new Shape(2, 4), result.shape); | |||||
Assert.AreEqual(result.numpy()[0, 0], 0.0f); | Assert.AreEqual(result.numpy()[0, 0], 0.0f); | ||||
Assert.AreEqual(result.numpy()[0, 1], 1.0f); | Assert.AreEqual(result.numpy()[0, 1], 1.0f); | ||||
Assert.AreEqual(result.numpy()[1, 3], 11.0f); | Assert.AreEqual(result.numpy()[1, 3], 11.0f); | ||||
// Tests based on example code in Python doc string for tf.gather() | // Tests based on example code in Python doc string for tf.gather() | ||||
var p1 = tf.random.normal(new TensorShape(5, 6, 7, 8)); | |||||
var i1 = tf.random_uniform(new TensorShape(10, 11), maxval: 7, dtype: tf.int32); | |||||
var p1 = tf.random.normal(new Shape(5, 6, 7, 8)); | |||||
var i1 = tf.random_uniform(new Shape(10, 11), maxval: 7, dtype: tf.int32); | |||||
var r1 = tf.gather(p1, i1, axis:2); | var r1 = tf.gather(p1, i1, axis:2); | ||||
Assert.AreEqual(new TensorShape(5, 6, 10, 11, 8), r1.shape); | |||||
Assert.AreEqual(new Shape(5, 6, 10, 11, 8), r1.shape); | |||||
var p2 = tf.random.normal(new TensorShape(4,3)); | |||||
var p2 = tf.random.normal(new Shape(4,3)); | |||||
var i2 = tf.constant(new int[,] { { 0, 2} }); | var i2 = tf.constant(new int[,] { { 0, 2} }); | ||||
var r2 = tf.gather(p2, i2, axis: 0); | var r2 = tf.gather(p2, i2, axis: 0); | ||||
Assert.AreEqual(new TensorShape(1, 2, 3), r2.shape); | |||||
Assert.AreEqual(new Shape(1, 2, 3), r2.shape); | |||||
var r3 = tf.gather(p2, i2, axis: 1); | var r3 = tf.gather(p2, i2, axis: 1); | ||||
Assert.AreEqual(new TensorShape(4,1,2), r3.shape); | |||||
Assert.AreEqual(new Shape(4,1,2), r3.shape); | |||||
} | } | ||||
} | } | ||||
} | } |
@@ -136,10 +136,10 @@ namespace TensorFlowNET.UnitTest.Basics | |||||
[TestMethod] | [TestMethod] | ||||
public void NDimConst() | public void NDimConst() | ||||
{ | { | ||||
var nd = np.array(new int[][] | |||||
var nd = np.array(new int[,] | |||||
{ | { | ||||
new int[]{ 3, 1, 1 }, | |||||
new int[]{ 2, 1, 3 } | |||||
{ 3, 1, 1 }, | |||||
{ 2, 1, 3 } | |||||
}); | }); | ||||
var tensor = tf.constant(nd); | var tensor = tf.constant(nd); | ||||