Browse Source

Built-in numpy api.

tags/v0.60-tf.numpy
Oceania2018 4 years ago
parent
commit
f3eb31fb87
100 changed files with 1792 additions and 923 deletions
  1. +1
    -1
      src/TensorFlowNET.Console/MemoryBasicTest.cs
  2. +1
    -1
      src/TensorFlowNET.Console/MemoryFuncGraphTest.cs
  3. +1
    -1
      src/TensorFlowNET.Console/MemoryKerasTest.cs
  4. +7
    -2
      src/TensorFlowNET.Console/MemoryMonitor.cs
  5. +1
    -1
      src/TensorFlowNET.Console/Tensorflow.Console.csproj
  6. +1
    -1
      src/TensorFlowNET.Core/APIs/tf.array.cs
  7. +11
    -10
      src/TensorFlowNET.Core/Binding.Util.cs
  8. +12
    -10
      src/TensorFlowNET.Core/Buffers/Buffer.cs
  9. +1
    -1
      src/TensorFlowNET.Core/Data/DataSetBase.cs
  10. +1
    -1
      src/TensorFlowNET.Core/Data/DatasetManager.cs
  11. +2
    -2
      src/TensorFlowNET.Core/Data/Datasets.cs
  12. +1
    -1
      src/TensorFlowNET.Core/Data/IDataSet.cs
  13. +3
    -3
      src/TensorFlowNET.Core/Data/MnistDataSet.cs
  14. +6
    -6
      src/TensorFlowNET.Core/Data/MnistModelLoader.cs
  15. +1
    -1
      src/TensorFlowNET.Core/Data/TensorDataset.cs
  16. +1
    -1
      src/TensorFlowNET.Core/Data/TensorSliceDataset.cs
  17. +6
    -13
      src/TensorFlowNET.Core/Eager/EagerTensor.Creation.cs
  18. +2
    -2
      src/TensorFlowNET.Core/Framework/graph_util_impl.cs
  19. +1
    -2
      src/TensorFlowNET.Core/Framework/op_def_registry.py.cs
  20. +7
    -7
      src/TensorFlowNET.Core/Framework/tensor_shape.cs
  21. +2
    -1
      src/TensorFlowNET.Core/Gradients/array_grad.cs
  22. +1
    -1
      src/TensorFlowNET.Core/Gradients/math_grad.cs
  23. +1
    -1
      src/TensorFlowNET.Core/Graphs/Graph.Export.cs
  24. +1
    -1
      src/TensorFlowNET.Core/Interfaces/ITensorOrOperation.cs
  25. +1
    -1
      src/TensorFlowNET.Core/Keras/ArgsDefinition/Reshaping/ZeroPadding2DArgs.cs
  26. +1
    -1
      src/TensorFlowNET.Core/Keras/ArgsDefinition/TensorFlowOpLayerArgs.cs
  27. +37
    -0
      src/TensorFlowNET.Core/Numpy/IMemoryBlock.cs
  28. +67
    -0
      src/TensorFlowNET.Core/Numpy/InfoOf.cs
  29. +14
    -0
      src/TensorFlowNET.Core/Numpy/IteratorType.cs
  30. +15
    -0
      src/TensorFlowNET.Core/Numpy/NDArray.Creation.cs
  31. +176
    -0
      src/TensorFlowNET.Core/Numpy/NDArray.cs
  32. +47
    -0
      src/TensorFlowNET.Core/Numpy/NDIterator.Generic.cs
  33. +24
    -0
      src/TensorFlowNET.Core/Numpy/NDIterator.cs
  34. +207
    -0
      src/TensorFlowNET.Core/Numpy/NpzDictionary.cs
  35. +48
    -0
      src/TensorFlowNET.Core/Numpy/Numpy.Creation.cs
  36. +140
    -0
      src/TensorFlowNET.Core/Numpy/Numpy.cs
  37. +90
    -0
      src/TensorFlowNET.Core/Numpy/NumpyDType.cs
  38. +63
    -0
      src/TensorFlowNET.Core/Numpy/Shape.cs
  39. +294
    -0
      src/TensorFlowNET.Core/Numpy/Slice.cs
  40. +1
    -1
      src/TensorFlowNET.Core/Operations/NnOps/BasicLSTMCell.cs
  41. +1
    -1
      src/TensorFlowNET.Core/Operations/NnOps/BasicRNNCell.cs
  42. +3
    -3
      src/TensorFlowNET.Core/Operations/NnOps/rnn.cs
  43. +4
    -4
      src/TensorFlowNET.Core/Operations/Operation.cs
  44. +3
    -3
      src/TensorFlowNET.Core/Operations/array_ops.cs
  45. +21
    -21
      src/TensorFlowNET.Core/Operations/image_ops_impl.cs
  46. +3
    -3
      src/TensorFlowNET.Core/Operations/linalg_ops.cs
  47. +15
    -4
      src/TensorFlowNET.Core/Operations/math_ops.cs
  48. +2
    -1
      src/TensorFlowNET.Core/Operations/nn_impl.py.cs
  49. +1
    -1
      src/TensorFlowNET.Core/Operations/nn_ops.cs
  50. +1
    -1
      src/TensorFlowNET.Core/Operations/string_ops.cs
  51. +3
    -13
      src/TensorFlowNET.Core/Sessions/BaseSession.cs
  52. +1
    -1
      src/TensorFlowNET.Core/Sessions/_ElementFetchMapper.cs
  53. +18
    -18
      src/TensorFlowNET.Core/Sessions/_FetchHandler.cs
  54. +1
    -1
      src/TensorFlowNET.Core/Sessions/_FetchMapper.cs
  55. +8
    -8
      src/TensorFlowNET.Core/Tensorflow.Binding.csproj
  56. +5
    -5
      src/TensorFlowNET.Core/Tensors/Dimension.cs
  57. +0
    -81
      src/TensorFlowNET.Core/Tensors/EagerTensorV2.cs
  58. +1
    -1
      src/TensorFlowNET.Core/Tensors/Ragged/RaggedTensor.cs
  59. +1
    -1
      src/TensorFlowNET.Core/Tensors/Tensor.Assign.cs
  60. +29
    -266
      src/TensorFlowNET.Core/Tensors/Tensor.Conversions.cs
  61. +57
    -56
      src/TensorFlowNET.Core/Tensors/Tensor.Creation.cs
  62. +1
    -1
      src/TensorFlowNET.Core/Tensors/Tensor.Implicit.cs
  63. +1
    -1
      src/TensorFlowNET.Core/Tensors/Tensor.Index.cs
  64. +1
    -1
      src/TensorFlowNET.Core/Tensors/Tensor.Operators.cs
  65. +1
    -1
      src/TensorFlowNET.Core/Tensors/Tensor.String.cs
  66. +11
    -67
      src/TensorFlowNET.Core/Tensors/Tensor.Value.cs
  67. +5
    -4
      src/TensorFlowNET.Core/Tensors/Tensor.cs
  68. +35
    -96
      src/TensorFlowNET.Core/Tensors/TensorConverter.cs
  69. +23
    -20
      src/TensorFlowNET.Core/Tensors/TensorShape.Convert.cs
  70. +1
    -1
      src/TensorFlowNET.Core/Tensors/TensorShape.Equals.cs
  71. +44
    -33
      src/TensorFlowNET.Core/Tensors/TensorShape.cs
  72. +1
    -1
      src/TensorFlowNET.Core/Tensors/Tensors.cs
  73. +10
    -0
      src/TensorFlowNET.Core/Tensors/c_api.tensor.cs
  74. +13
    -11
      src/TensorFlowNET.Core/Tensors/constant_op.cs
  75. +41
    -17
      src/TensorFlowNET.Core/Tensors/dtypes.cs
  76. +51
    -21
      src/TensorFlowNET.Core/Tensors/tensor_util.cs
  77. +1
    -1
      src/TensorFlowNET.Core/Training/Saving/Saver.cs
  78. +22
    -0
      src/TensorFlowNET.Core/Util/Arrays.cs
  79. +11
    -0
      src/TensorFlowNET.Core/Util/Converts.cs
  80. +1
    -38
      src/TensorFlowNET.Core/Util/UnmanagedExtensions.cs
  81. +1
    -1
      src/TensorFlowNET.Core/Util/nest.py.cs
  82. +1
    -1
      src/TensorFlowNET.Core/Variables/BaseResourceVariable.cs
  83. +1
    -1
      src/TensorFlowNET.Core/Variables/IVariableV1.cs
  84. +1
    -1
      src/TensorFlowNET.Core/Variables/RefVariable.cs
  85. +1
    -1
      src/TensorFlowNET.Core/Variables/ResourceVariable.Index.cs
  86. +1
    -1
      src/TensorFlowNET.Core/Variables/ResourceVariable.Operators.cs
  87. +1
    -1
      src/TensorFlowNET.Core/ops.cs
  88. +1
    -1
      src/TensorFlowNET.Keras/BackendImpl.cs
  89. +1
    -1
      src/TensorFlowNET.Keras/Datasets/Cifar10.cs
  90. +1
    -1
      src/TensorFlowNET.Keras/Datasets/DatasetPass.cs
  91. +3
    -3
      src/TensorFlowNET.Keras/Datasets/Imdb.cs
  92. +1
    -1
      src/TensorFlowNET.Keras/Datasets/MNIST.cs
  93. +1
    -1
      src/TensorFlowNET.Keras/Engine/Layer.cs
  94. +1
    -1
      src/TensorFlowNET.Keras/Engine/Model.Evaluate.cs
  95. +2
    -2
      src/TensorFlowNET.Keras/Engine/Model.Fit.cs
  96. +1
    -1
      src/TensorFlowNET.Keras/Engine/Model.Predict.cs
  97. +1
    -1
      src/TensorFlowNET.Keras/Engine/Model.Training.cs
  98. +15
    -15
      src/TensorFlowNET.Keras/Layers/Convolution/Conv2DTranspose.cs
  99. +7
    -6
      src/TensorFlowNET.Keras/Layers/Convolution/Convolutional.cs
  100. +1
    -1
      src/TensorFlowNET.Keras/Layers/Core/Dense.cs

+ 1
- 1
src/TensorFlowNET.Console/MemoryBasicTest.cs View File

@@ -1,4 +1,4 @@
using NumSharp;
using Tensorflow.Numpy;
using System;
using Tensorflow.Keras.ArgsDefinition;
using Tensorflow.Keras.Engine.DataAdapters;


+ 1
- 1
src/TensorFlowNET.Console/MemoryFuncGraphTest.cs View File

@@ -1,4 +1,4 @@
using NumSharp;
using Tensorflow.Numpy;
using System;
using System.Collections.Generic;
using System.Text;


+ 1
- 1
src/TensorFlowNET.Console/MemoryKerasTest.cs View File

@@ -1,4 +1,4 @@
using NumSharp;
using Tensorflow.Numpy;
using System;
using static Tensorflow.Binding;
using static Tensorflow.KerasApi;


+ 7
- 2
src/TensorFlowNET.Console/MemoryMonitor.cs View File

@@ -2,7 +2,7 @@
using System.Diagnostics;
using System.Threading;
using System.Threading.Tasks;
using NumSharp;
using Tensorflow.Numpy;
using static Tensorflow.Binding;
using static Tensorflow.KerasApi;

@@ -12,12 +12,17 @@ namespace Tensorflow
{
public void WarmUp()
{
while (true)
{
var ones = np.ones((128, 128));
}

TensorShape shape = (1, 32, 32, 3);
np.arange(shape.size).astype(np.float32).reshape(shape.dims);

print($"tensorflow native version: v{tf.VERSION}");
tf.Context.ensure_initialized();
var a = tf.constant(np.ones(10, 10));
var a = tf.constant(np.ones((10, 10)));
var b = tf.Variable(a);
var c = tf.Variable(b);
var d = b * c;


+ 1
- 1
src/TensorFlowNET.Console/Tensorflow.Console.csproj View File

@@ -15,7 +15,7 @@
</PropertyGroup>

<ItemGroup>
<PackageReference Include="SciSharp.TensorFlow.Redist" Version="2.5.0" />
<PackageReference Include="SciSharp.TensorFlow.Redist" Version="2.6.0-rc0" />
</ItemGroup>

<ItemGroup>


+ 1
- 1
src/TensorFlowNET.Core/APIs/tf.array.cs View File

@@ -14,7 +14,7 @@
limitations under the License.
******************************************************************************/

using NumSharp;
using Tensorflow.Numpy;
using System.Collections.Generic;
using System.Diagnostics;
using System.Linq;


+ 11
- 10
src/TensorFlowNET.Core/Binding.Util.cs View File

@@ -14,8 +14,7 @@
limitations under the License.
******************************************************************************/

using NumSharp;
using NumSharp.Utilities;
using Tensorflow.Numpy;
using System;
using System.Collections;
using System.Collections.Generic;
@@ -90,12 +89,12 @@ namespace Tensorflow
switch (obj)
{
case NDArray nd:
return nd.ToString(false);
case Array arr:
return nd.ToString();
/*case Array arr:
if (arr.Rank != 1 || arr.GetType().GetElementType()?.IsArray == true)
arr = Arrays.Flatten(arr);
var objs = toObjectArray(arr);
return $"[{string.Join(", ", objs.Select(_tostring))}]";
return $"[{string.Join(", ", objs.Select(_tostring))}]";*/
default:
return obj?.ToString() ?? "null";
}
@@ -166,7 +165,7 @@ namespace Tensorflow
case ICollection arr:
return arr.Count;
case NDArray ndArray:
return ndArray.ndim == 0 ? 1 : ndArray.shape[0];
return ndArray.ndim == 0 ? 1 : (int)ndArray.dims[0];
case IEnumerable enumerable:
return enumerable.OfType<object>().Count();
case TensorShape arr:
@@ -272,10 +271,11 @@ namespace Tensorflow
public static IEnumerable<(T, T)> zip<T>(NDArray t1, NDArray t2)
where T : unmanaged
{
var a = t1.AsIterator<T>();
/*var a = t1.AsIterator<T>();
var b = t2.AsIterator<T>();
while (a.HasNext() && b.HasNext())
yield return (a.MoveNext(), b.MoveNext());
yield return (a.MoveNext(), b.MoveNext());*/
throw new NotImplementedException("");
}

public static IEnumerable<(T1, T2)> zip<T1, T2>(IList<T1> t1, IList<T2> t2)
@@ -296,8 +296,9 @@ namespace Tensorflow
{
var a = t1.AsIterator<T1>();
var b = t2.AsIterator<T2>();
while (a.HasNext() && b.HasNext())
yield return (a.MoveNext(), b.MoveNext());
//while (a.HasNext() && b.HasNext())
//yield return (a.MoveNext(), b.MoveNext());
throw new NotImplementedException("");
}

public static IEnumerable<(T1, T2)> zip<T1, T2>(IEnumerable<T1> e1, IEnumerable<T2> e2)


+ 12
- 10
src/TensorFlowNET.Core/Buffers/Buffer.cs View File

@@ -14,8 +14,8 @@
limitations under the License.
******************************************************************************/

using NumSharp.Backends.Unmanaged;
using System;
using System.IO;
using System.Runtime.CompilerServices;
using Tensorflow.Util;
using static Tensorflow.c_api;
@@ -43,12 +43,12 @@ namespace Tensorflow
///
/// <inheritdoc cref="SafeHandleLease" path="/devdoc/usage"/>
/// </remarks>
public unsafe UnmanagedMemoryBlock<byte> DangerousMemoryBlock
public unsafe MemoryStream DangerousMemoryBlock
{
get
{
ref readonly TF_Buffer buffer = ref DangerousBuffer;
return new UnmanagedMemoryBlock<byte>((byte*)buffer.data.ToPointer(), (long)buffer.length);
return new MemoryStream(ToArray());
}
}

@@ -90,17 +90,19 @@ namespace Tensorflow
/// <summary>
/// Copies this buffer's contents onto a <see cref="byte"/> array.
/// </summary>
public byte[] ToArray()
public unsafe byte[] ToArray()
{
using (Handle.Lease())
{
var block = DangerousMemoryBlock;
var len = block.Count;
if (len == 0)
return Array.Empty<byte>();
ref readonly TF_Buffer buffer = ref DangerousBuffer;

if (buffer.length == 0)
return new byte[0];

var data = new byte[DangerousBuffer.length];
fixed (byte* dst = data)
System.Buffer.MemoryCopy(buffer.data.ToPointer(), dst, buffer.length, buffer.length);

var data = new byte[len];
block.CopyTo(data, 0);
return data;
}
}


+ 1
- 1
src/TensorFlowNET.Core/Data/DataSetBase.cs View File

@@ -1,4 +1,4 @@
using NumSharp;
using Tensorflow.Numpy;

namespace Tensorflow
{


+ 1
- 1
src/TensorFlowNET.Core/Data/DatasetManager.cs View File

@@ -1,4 +1,4 @@
using NumSharp;
using Tensorflow.Numpy;
using System.Collections.Generic;
using Tensorflow.Data;



+ 2
- 2
src/TensorFlowNET.Core/Data/Datasets.cs View File

@@ -1,4 +1,4 @@
using NumSharp;
using Tensorflow.Numpy;

namespace Tensorflow
{
@@ -19,7 +19,7 @@ namespace Tensorflow

public (NDArray, NDArray) Randomize(NDArray x, NDArray y)
{
var perm = np.random.permutation(y.shape[0]);
var perm = np.random.permutation((int)y.dims[0]);
np.random.shuffle(perm);
return (x[perm], y[perm]);
}


+ 1
- 1
src/TensorFlowNET.Core/Data/IDataSet.cs View File

@@ -1,4 +1,4 @@
using NumSharp;
using Tensorflow.Numpy;

namespace Tensorflow
{


+ 3
- 3
src/TensorFlowNET.Core/Data/MnistDataSet.cs View File

@@ -1,4 +1,4 @@
using NumSharp;
using Tensorflow.Numpy;
using System;
using System.Diagnostics;

@@ -15,9 +15,9 @@ namespace Tensorflow
EpochsCompleted = 0;
IndexInEpoch = 0;

NumOfExamples = images.shape[0];
NumOfExamples = (int)images.dims[0];

images = images.reshape(images.shape[0], images.shape[1] * images.shape[2]);
images = images.reshape(images.dims[0], images.dims[1] * images.dims[2]);
images = images.astype(dataType);
// for debug np.multiply performance
var sw = new Stopwatch();


+ 6
- 6
src/TensorFlowNET.Core/Data/MnistModelLoader.cs View File

@@ -1,4 +1,4 @@
using NumSharp;
using Tensorflow.Numpy;
using System;
using System.IO;
using System.Threading.Tasks;
@@ -82,15 +82,15 @@ namespace Tensorflow

var testLabels = ExtractLabels(Path.Combine(setting.TrainDir, Path.GetFileNameWithoutExtension(TEST_LABELS)), one_hot: setting.OneHot, limit: setting.TestSize);

var end = trainImages.shape[0];
var end = trainImages.dims[0];

var validationSize = setting.ValidationSize;

var validationImages = trainImages[np.arange(validationSize)];
var validationLabels = trainLabels[np.arange(validationSize)];

trainImages = trainImages[np.arange(validationSize, end)];
trainLabels = trainLabels[np.arange(validationSize, end)];
trainImages = trainImages[np.arange(validationSize, (int)end)];
trainLabels = trainLabels[np.arange(validationSize, (int)end)];

var dtype = setting.DataType;
var reshape = setting.ReShape;
@@ -159,9 +159,9 @@ namespace Tensorflow

private NDArray DenseToOneHot(NDArray labels_dense, int num_classes)
{
var num_labels = labels_dense.shape[0];
var num_labels = labels_dense.dims[0];
var index_offset = np.arange(num_labels) * num_classes;
var labels_one_hot = np.zeros(num_labels, num_classes);
var labels_one_hot = np.zeros((num_labels, num_classes));
var labels = labels_dense.Data<byte>();
for (int row = 0; row < num_labels; row++)
{


+ 1
- 1
src/TensorFlowNET.Core/Data/TensorDataset.cs View File

@@ -1,4 +1,4 @@
using NumSharp;
using Tensorflow.Numpy;
using System.Linq;
using static Tensorflow.Binding;



+ 1
- 1
src/TensorFlowNET.Core/Data/TensorSliceDataset.cs View File

@@ -1,4 +1,4 @@
using NumSharp;
using Tensorflow.Numpy;
using System.Linq;
using static Tensorflow.Binding;



+ 6
- 13
src/TensorFlowNET.Core/Eager/EagerTensor.Creation.cs View File

@@ -1,4 +1,4 @@
using NumSharp;
using Tensorflow.Numpy;
using System;
using System.Linq;
using static Tensorflow.Binding;
@@ -14,25 +14,18 @@ namespace Tensorflow.Eager
Resolve();
}

public EagerTensor(string value, string device_name) : base(value)
public EagerTensor(object value,string device_name, TF_DataType dtype = TF_DataType.TF_UINT8) : base((float[])value)
{
NewEagerTensorHandle(_handle);
}

public EagerTensor(byte[] value, string device_name, TF_DataType dtype) : base(value, dType: dtype)
{
NewEagerTensorHandle(_handle);
throw new NotImplementedException("");
}

public EagerTensor(string[] value, string device_name) : base(value)
public EagerTensor(object value, Shape shape = null, string device_name = null, TF_DataType dtype = TF_DataType.TF_UINT8) : base((float[])value)
{
NewEagerTensorHandle(_handle);
}

public EagerTensor(NDArray value, string device_name) : base(value)
{
NewEagerTensorHandle(_handle);
}
internal unsafe EagerTensor(Array array, Shape shape) : base(array, shape)
=> NewEagerTensorHandle(_handle);

void NewEagerTensorHandle(IntPtr h)
{


+ 2
- 2
src/TensorFlowNET.Core/Framework/graph_util_impl.cs View File

@@ -14,7 +14,7 @@
limitations under the License.
******************************************************************************/

using NumSharp;
using Tensorflow.Numpy;
using System;
using System.Collections.Generic;
using System.Linq;
@@ -97,7 +97,7 @@ namespace Tensorflow
{
var data = variables_data_map[input_node.Name];
output_node = create_const_op(input_node.Name, input_node.Attr["dtype"],
data, data.shape);
data, data.dims.Select(x => Convert.ToInt32(x)).ToArray());
how_many_converted += 1;
}
// else if (resource_identity_types.ContainsKey(input_node.Name))


+ 1
- 2
src/TensorFlowNET.Core/Framework/op_def_registry.py.cs View File

@@ -34,8 +34,7 @@ namespace Tensorflow
return _registered_ops;

using var buffer = new Buffer(c_api.TF_GetAllOpList());
using var stream = buffer.DangerousMemoryBlock.Stream();
var op_list = OpList.Parser.ParseFrom(stream);
var op_list = OpList.Parser.ParseFrom(buffer.ToArray());
foreach (var op_def in op_list.Op)
_registered_ops[op_def.Name] = op_def;
}


+ 7
- 7
src/TensorFlowNET.Core/Framework/tensor_shape.cs View File

@@ -1,4 +1,4 @@
using NumSharp;
using Tensorflow.Numpy;
using System;
using System.Linq;
using System.Text;
@@ -29,13 +29,13 @@ namespace Tensorflow.Framework
bool _shape_is_compatible_0dim(Shape _this, Shape _other)
{
var __other = tensor_shape.as_shape(_other);
if (_this.Dimensions == null || __other.dims == null)
if (_this.dims == null || __other.dims == null)
return true;

if (_this.NDim != __other.ndim)
if (_this.ndim != __other.ndim)
return false;

foreach (var (x_dim, y_dim) in _this.Dimensions.Zip(__other.dims, (x_dim, y_dim) => (x_dim, y_dim)))
foreach (var (x_dim, y_dim) in _this.dims.Zip(__other.dims, (x_dim, y_dim) => (x_dim, y_dim)))
{
if (x_dim != y_dim)
return false;
@@ -62,14 +62,14 @@ namespace Tensorflow.Framework
}

public static int dimension_value(Dimension dimension)
=> dimension.value;
=> (int)dimension.value;

public static TensorShape as_shape(this Shape shape)
=> new TensorShape(shape.Dimensions);
=> new TensorShape(shape.dims);

public static TensorShape most_specific_compatible_shape(this TensorShape self, TensorShape other)
{
var dims = range(self.rank).Select(x => -1).ToArray();
var dims = range(self.rank).Select(x => -1L).ToArray();
foreach(var (i, (d1, d2)) in enumerate(zip(self.dims, other.dims)))
{
if (d1 == d2)


+ 2
- 1
src/TensorFlowNET.Core/Gradients/array_grad.cs View File

@@ -18,6 +18,7 @@ using System.Collections.Generic;
using System.Linq;
using Tensorflow.Eager;
using Tensorflow.Framework;
using Tensorflow.Numpy;
using static Tensorflow.Binding;

namespace Tensorflow.Gradients
@@ -201,7 +202,7 @@ namespace Tensorflow.Gradients
// For axis 0 gathers, build an appropriately shaped IndexedSlices.
if ((int)axis_static == 0)
{
var params_tail_shape = params_shape.slice(new NumSharp.Slice(start: 1));
var params_tail_shape = params_shape.slice(new Slice(start: 1));
var values_shape = array_ops.concat(new[] { indices_size, params_tail_shape }, 0);
var values = array_ops.reshape(grad, values_shape);
indices = array_ops.reshape(indices, indices_size);


+ 1
- 1
src/TensorFlowNET.Core/Gradients/math_grad.cs View File

@@ -14,7 +14,7 @@
limitations under the License.
******************************************************************************/

using NumSharp;
using Tensorflow.Numpy;
using System;
using System.Linq;
using Tensorflow.Eager;


+ 1
- 1
src/TensorFlowNET.Core/Graphs/Graph.Export.cs View File

@@ -38,7 +38,7 @@ namespace Tensorflow
{
status.Check(true);
// limit size to 250M, recursion to max 100
var inputStream = CodedInputStream.CreateWithLimits(buffer.DangerousMemoryBlock.Stream(), 250 * 1024 * 1024, 100);
var inputStream = CodedInputStream.CreateWithLimits(buffer.DangerousMemoryBlock, 250 * 1024 * 1024, 100);
def = GraphDef.Parser.ParseFrom(inputStream);
}



+ 1
- 1
src/TensorFlowNET.Core/Interfaces/ITensorOrOperation.cs View File

@@ -14,7 +14,7 @@
limitations under the License.
******************************************************************************/

using NumSharp;
using Tensorflow.Numpy;

namespace Tensorflow
{


+ 1
- 1
src/TensorFlowNET.Core/Keras/ArgsDefinition/Reshaping/ZeroPadding2DArgs.cs View File

@@ -1,4 +1,4 @@
using NumSharp;
using Tensorflow.Numpy;

namespace Tensorflow.Keras.ArgsDefinition
{


+ 1
- 1
src/TensorFlowNET.Core/Keras/ArgsDefinition/TensorFlowOpLayerArgs.cs View File

@@ -1,4 +1,4 @@
using NumSharp;
using Tensorflow.Numpy;
using System.Collections.Generic;

namespace Tensorflow.Keras.ArgsDefinition


+ 37
- 0
src/TensorFlowNET.Core/Numpy/IMemoryBlock.cs View File

@@ -0,0 +1,37 @@
using System;
using System.Collections.Generic;
using System.Text;

namespace Tensorflow.Numpy
{
public interface IMemoryBlock
{
/// <summary>
/// The size of a single item stored in <see cref="Address"/>.
/// </summary>
/// <remarks>Equivalent to <see cref="NumpyDType.SizeOf"/> extension.</remarks>
int ItemLength { get; }

/// <summary>
/// The start address of this memory block.
/// </summary>
unsafe void* Address { get; }

/// <summary>
/// How many items are stored in <see cref="Address"/>.
/// </summary>
/// <remarks>Not to confuse with <see cref="BytesLength"/></remarks>
long Count { get; }

/// <summary>
/// How many bytes are stored in this memory block.
/// </summary>
/// <remarks>Calculated by <see cref="Count"/>*<see cref="ItemLength"/></remarks>
long BytesLength { get; }

/// <summary>
/// The <see cref="NumpyDType"/> of the type stored inside this memory block.
/// </summary>
NumpyDType TypeCode { get; }
}
}

+ 67
- 0
src/TensorFlowNET.Core/Numpy/InfoOf.cs View File

@@ -0,0 +1,67 @@
using System;
using System.Collections.Generic;
using System.Runtime.InteropServices;
using System.Text;

namespace Tensorflow.Numpy
{
public class InfoOf<T>
{
public static readonly int Size;
public static readonly NumpyDType NPTypeCode;
public static readonly T Zero;
public static readonly T MaxValue;
public static readonly T MinValue;

static InfoOf()
{
NPTypeCode = typeof(T).GetTypeCode();

switch (NPTypeCode)
{
case NumpyDType.Boolean:
Size = 1;
break;
case NumpyDType.Char:
Size = 2;
break;
case NumpyDType.Byte:
Size = 1;
break;
case NumpyDType.Int16:
Size = 2;
break;
case NumpyDType.UInt16:
Size = 2;
break;
case NumpyDType.Int32:
Size = 4;
break;
case NumpyDType.UInt32:
Size = 4;
break;
case NumpyDType.Int64:
Size = 8;
break;
case NumpyDType.UInt64:
Size = 8;
break;
case NumpyDType.Single:
Size = 4;
break;
case NumpyDType.Double:
Size = 8;
break;
case NumpyDType.Decimal:
Size = 16;
break;
case NumpyDType.String:
break;
case NumpyDType.Complex:
default:
Size = Marshal.SizeOf<T>();
break;
}
}
}
}

+ 14
- 0
src/TensorFlowNET.Core/Numpy/IteratorType.cs View File

@@ -0,0 +1,14 @@
using System;
using System.Collections.Generic;
using System.Text;

namespace Tensorflow.Numpy
{
public enum IteratorType
{
Scalar,
Vector,
Matrix,
Tensor
}
}

+ 15
- 0
src/TensorFlowNET.Core/Numpy/NDArray.Creation.cs View File

@@ -0,0 +1,15 @@
using System;
using System.Collections.Generic;
using System.Text;
using static Tensorflow.Binding;

namespace Tensorflow.Numpy
{
public partial class NDArray
{
void Initialize(Shape shape, NumpyDType dtype = NumpyDType.Float)
{
_tensor = tf.zeros(shape, dtype: dtype.as_tf_dtype());
}
}
}

+ 176
- 0
src/TensorFlowNET.Core/Numpy/NDArray.cs View File

@@ -0,0 +1,176 @@
using System;
using System.Collections.Generic;
using System.Linq;
using System.Text;

namespace Tensorflow.Numpy
{
public partial class NDArray
{
Tensor _tensor;

public NumpyDType dtype => _tensor.dtype.as_numpy_typecode();
public ulong size => _tensor.size;
public ulong dtypesize => _tensor.itemsize;
public int ndim => _tensor.NDims;
public long[] dims => _tensor.dims.Select(x => Convert.ToInt64(x)).ToArray();
public Shape shape => _tensor.shape;

public NDArray(bool value)
{

}

public NDArray(float value)
{

}

public NDArray(double value)
{

}

public NDArray(Array value, Shape shape = null)
{

}

public NDArray(Type dtype, Shape shape)
{

}

public NDArray(Shape shape, NumpyDType dtype = NumpyDType.Float)
{
Initialize(shape, dtype: dtype);
}

public NDArray(Tensor value)
{
_tensor = value;
}

public NDArray this[params int[] index]
{
get
{
throw new NotImplementedException("");
}

set
{

}
}

public NDArray this[params Slice[] slices]
{
get
{
throw new NotImplementedException("");
}

set
{

}
}

public NDArray this[NDArray mask]
{
get
{
throw new NotImplementedException("");
}

set
{

}
}

public static NDArray Scalar<T>(T value) where T : unmanaged
{
return value switch
{
bool b => new NDArray(b),
_ => throw new NotImplementedException("")
};
}

public T GetValue<T>(int index) where T : unmanaged
=> _tensor.ToArray<T>()[index];
public T GetAtIndex<T>(int index) where T : unmanaged
=> _tensor.ToArray<T>()[index];
public T[] GetData<T>() where T : unmanaged
=> _tensor.ToArray<T>();

public NDArray[] GetNDArrays()
=> throw new NotImplementedException("");

public ValueType GetValue(params int[] indices)
=> throw new NotImplementedException("");

public void SetData(object value, params int[] indices)
=> throw new NotImplementedException("");

public NDIterator<T> AsIterator<T>(bool autoreset = false) where T : unmanaged
=> throw new NotImplementedException("");

public bool HasNext() => throw new NotImplementedException("");
public T MoveNext<T>() => throw new NotImplementedException("");
public NDArray reshape(params int[] shape) => throw new NotImplementedException("");
public NDArray reshape(params long[] shape) => throw new NotImplementedException("");
public NDArray astype(Type type) => throw new NotImplementedException("");
public NDArray astype(NumpyDType type) => throw new NotImplementedException("");
public bool array_equal(NDArray rhs) => throw new NotImplementedException("");
public NDArray ravel() => throw new NotImplementedException("");
public void shuffle(NDArray nd) => throw new NotImplementedException("");
public Array ToMuliDimArray<T>() => throw new NotImplementedException("");
public byte[] ToByteArray() => _tensor.ToArray<byte>();
public static string[] AsStringArray(NDArray arr) => throw new NotImplementedException("");

public T[] Data<T>() where T : unmanaged
=> _tensor.ToArray<T>();
public T[] ToArray<T>() where T : unmanaged
=> _tensor.ToArray<T>();

public static implicit operator NDArray(Array array)
=> new NDArray(array);

public static implicit operator bool(NDArray nd)
=> nd._tensor.ToArray<bool>()[0];

public static implicit operator int(NDArray nd)
=> nd._tensor.ToArray<int>()[0];

public static implicit operator NDArray(bool value)
=> new NDArray(value);

public static implicit operator NDArray(float value)
=> new NDArray(value);

public static implicit operator NDArray(double value)
=> new NDArray(value);

public static implicit operator NDArray(byte[] value)
=> new NDArray(value);

public static implicit operator byte[](NDArray nd)
=> nd.ToByteArray();

public static implicit operator NDArray(int[] value)
=> new NDArray(value, new Shape(value.Length));

public static implicit operator NDArray(float[] value)
=> new NDArray(value);

public static NDArray operator /(NDArray x, NDArray y) => throw new NotImplementedException("");

public override string ToString()
{
return tensor_util.to_numpy_string(_tensor);
}
}
}

+ 47
- 0
src/TensorFlowNET.Core/Numpy/NDIterator.Generic.cs View File

@@ -0,0 +1,47 @@
using System;
using System.Collections;
using System.Collections.Generic;
using System.Text;

namespace Tensorflow.Numpy
{
public partial class NDIterator<TOut> : NDIterator, IEnumerable<TOut>, IDisposable where TOut : unmanaged
{
public IMemoryBlock Block => throw new NotImplementedException();

public IteratorType Type => throw new NotImplementedException();

public Shape Shape => throw new NotImplementedException();

public Shape BroadcastedShape => throw new NotImplementedException();

public bool AutoReset => throw new NotImplementedException();

public Func<bool> HasNext => throw new NotImplementedException();

public Action Reset => throw new NotImplementedException();

public void Dispose()
{
throw new NotImplementedException();
}

public IEnumerator GetEnumerator()
{
throw new NotImplementedException();
}

public Func<T> MoveNext<T>() where T : unmanaged
=> throw new NotImplementedException();

public MoveNextReferencedDelegate<T> MoveNextReference<T>() where T : unmanaged
{
throw new NotImplementedException();
}

IEnumerator<TOut> IEnumerable<TOut>.GetEnumerator()
{
throw new NotImplementedException();
}
}
}

+ 24
- 0
src/TensorFlowNET.Core/Numpy/NDIterator.cs View File

@@ -0,0 +1,24 @@
using System;
using System.Collections;
using System.Collections.Generic;
using System.Text;

namespace Tensorflow.Numpy
{
public delegate ref T MoveNextReferencedDelegate<T>() where T : unmanaged;

public interface NDIterator : IEnumerable
{
IMemoryBlock Block { get; }
IteratorType Type { get; }
Shape Shape { get; } //TODO! is there a performance difference if this shape is readonly or not?
Shape? BroadcastedShape { get; }
bool AutoReset { get; }

Func<T> MoveNext<T>() where T : unmanaged;
MoveNextReferencedDelegate<T> MoveNextReference<T>() where T : unmanaged;

Func<bool> HasNext { get; }
Action Reset { get; }
}
}

+ 207
- 0
src/TensorFlowNET.Core/Numpy/NpzDictionary.cs View File

@@ -0,0 +1,207 @@
using System;
using System.Collections;
using System.Collections.Generic;
using System.IO;
using System.IO.Compression;
using System.Linq;
using System.Text;

namespace Tensorflow.Numpy
{
public class NpzDictionary<T> : IDisposable, IReadOnlyDictionary<string, T>, ICollection<T>
where T : class,
IList, ICloneable, ICollection, IEnumerable, IStructuralComparable, IStructuralEquatable
{
Stream stream;
ZipArchive archive;

bool disposedValue = false;

Dictionary<string, ZipArchiveEntry> entries;
Dictionary<string, T> arrays;


public NpzDictionary(Stream stream)
{
this.stream = stream;
this.archive = new ZipArchive(stream, ZipArchiveMode.Read, leaveOpen: true);

this.entries = new Dictionary<string, ZipArchiveEntry>();
foreach (var entry in archive.Entries)
this.entries[entry.FullName] = entry;

this.arrays = new Dictionary<string, T>();
}


public IEnumerable<string> Keys
{
get { return entries.Keys; }
}


public IEnumerable<T> Values
{
get { return entries.Values.Select(OpenEntry); }
}

public int Count
{
get { return entries.Count; }
}


public object SyncRoot
{
get { return ((ICollection)entries).SyncRoot; }
}


public bool IsSynchronized
{
get { return ((ICollection)entries).IsSynchronized; }
}

public bool IsReadOnly
{
get { return true; }
}

public T this[string key]
{
get { return OpenEntry(entries[key]); }
}

private T OpenEntry(ZipArchiveEntry entry)
{
T array;
if (arrays.TryGetValue(entry.FullName, out array))
return array;

using (Stream s = entry.Open())
{
array = Load_Npz(s);
arrays[entry.FullName] = array;
return array;
}
}

protected virtual T Load_Npz(Stream s)
{
// return np.Load<T>(s);
throw new NotImplementedException("");
}

public bool ContainsKey(string key)
{
return entries.ContainsKey(key);
}

public bool TryGetValue(string key, out T value)
{
value = default(T);
ZipArchiveEntry entry;
if (!entries.TryGetValue(key, out entry))
return false;
value = OpenEntry(entry);
return true;
}

public IEnumerator<KeyValuePair<string, T>> GetEnumerator()
{
foreach (var entry in archive.Entries)
yield return new KeyValuePair<string, T>(entry.FullName, OpenEntry(entry));
}

IEnumerator IEnumerable.GetEnumerator()
{
foreach (var entry in archive.Entries)
yield return new KeyValuePair<string, T>(entry.FullName, OpenEntry(entry));
}

IEnumerator<T> IEnumerable<T>.GetEnumerator()
{
foreach (var entry in archive.Entries)
yield return OpenEntry(entry);
}

public void CopyTo(Array array, int arrayIndex)
{
foreach (var v in this)
array.SetValue(v, arrayIndex++);
}

public void CopyTo(T[] array, int arrayIndex)
{
foreach (var v in this)
array.SetValue(v, arrayIndex++);
}

public void Add(T item)
{
//throw new ReadOnlyException();
}

public void Clear()
{
//throw new ReadOnlyException();
}

public bool Contains(T item)
{
foreach (var v in this)
if (Object.Equals(v.Value, item))
return true;
return false;
}

public bool Remove(T item)
{
// throw new ReadOnlyException();
throw new NotImplementedException("");
}

protected virtual void Dispose(bool disposing)
{
if (!disposedValue)
{
if (disposing)
{
archive.Dispose();
stream.Dispose();
}

archive = null;
stream = null;
entries = null;
arrays = null;

disposedValue = true;
}
}

public void Dispose()
{
Dispose(true);
}
}

public class NpzDictionary : NpzDictionary<Array>
{
bool jagged;

public NpzDictionary(Stream stream, bool jagged)
: base(stream)
{
this.jagged = jagged;
}

protected override Array Load_Npz(Stream s)
{
throw new NotImplementedException("");
/*if (jagged)
return np.LoadJagged(s);
return np.LoadMatrix(s);*/
}
}
}

+ 48
- 0
src/TensorFlowNET.Core/Numpy/Numpy.Creation.cs View File

@@ -0,0 +1,48 @@
using System;
using System.Collections;
using System.Collections.Generic;
using System.Numerics;
using System.Text;
using static Tensorflow.Binding;

namespace Tensorflow.Numpy
{
public partial class np
{
public static NDArray array(object data)
=> throw new NotImplementedException("");

public static NDArray array(Array data)
=> new NDArray(tf.constant(data));

public static NDArray array<T>(params T[] data)
where T : unmanaged
=> new NDArray(tf.constant(data));

public static NDArray array(params float[] data)
=> throw new NotImplementedException("");

public static NDArray arange<T>(T end)
where T : unmanaged
=> new NDArray(tf.range(default(T), limit: end));

public static NDArray arange<T>(T start, T? end = null, T? step = null)
where T : unmanaged
=> new NDArray(tf.range(start, limit: end, delta: step));

public static NDArray empty(Shape shape, NumpyDType dtype = NumpyDType.Double)
=> new NDArray(tf.zeros(shape, dtype: dtype.as_tf_dtype()));

public static NDArray ones(Shape shape, NumpyDType dtype = NumpyDType.Double)
=> new NDArray(tf.ones(shape, dtype: dtype.as_tf_dtype()));

public static NDArray ones_like(NDArray a, Type dtype = null)
=> throw new NotImplementedException("");

public static NDArray zeros(Shape shape, NumpyDType dtype = NumpyDType.Double)
=> new NDArray(tf.zeros(shape, dtype: dtype.as_tf_dtype()));

public static NDArray full<T>(Shape shape, T fill_value)
=> new NDArray(tf.fill(tf.constant(shape), fill_value));
}
}

+ 140
- 0
src/TensorFlowNET.Core/Numpy/Numpy.cs View File

@@ -0,0 +1,140 @@
using System;
using System.Collections;
using System.Collections.Generic;
using System.Numerics;
using System.Text;

namespace Tensorflow.Numpy
{
public partial class np
{
/// <summary>
/// A convenient alias for None, useful for indexing arrays.
/// </summary>
/// <remarks>https://docs.scipy.org/doc/numpy-1.17.0/reference/arrays.indexing.html<br></br><br></br>https://stackoverflow.com/questions/42190783/what-does-three-dots-in-python-mean-when-indexing-what-looks-like-a-number</remarks>
public static readonly Slice newaxis = new Slice(null, null, 1) { IsNewAxis = true };

// https://docs.scipy.org/doc/numpy-1.16.0/user/basics.types.html
public static readonly Type bool_ = typeof(bool);
public static readonly Type bool8 = bool_;
public static readonly Type @bool = bool_;

public static readonly Type @char = typeof(char);

public static readonly Type @byte = typeof(byte);
public static readonly Type uint8 = typeof(byte);
public static readonly Type ubyte = uint8;


public static readonly Type int16 = typeof(short);

public static readonly Type uint16 = typeof(ushort);

public static readonly Type int32 = typeof(int);

public static readonly Type uint32 = typeof(uint);

public static readonly Type int_ = typeof(long);
public static readonly Type int64 = int_;
public static readonly Type intp = int_; //TODO! IntPtr?
public static readonly Type int0 = int_;

public static readonly Type uint64 = typeof(ulong);
public static readonly Type uint0 = uint64;
public static readonly Type @uint = uint64;

public static readonly Type float32 = typeof(float);

public static readonly Type float_ = typeof(double);
public static readonly Type float64 = float_;
public static readonly Type @double = float_;

public static readonly Type complex_ = typeof(Complex);
public static readonly Type complex128 = complex_;
public static readonly Type complex64 = complex_;
public static readonly Type @decimal = typeof(decimal);

public static Type chars => throw new NotSupportedException("Please use char with extra dimension.");

public static double nan => double.NaN;
public static double NAN => double.NaN;
public static double NaN => double.NaN;
public static double pi => Math.PI;
public static double e => Math.E;
public static double euler_gamma => 0.57721566490153286060651209008240243d;
public static double inf => double.PositiveInfinity;
public static double infty => double.PositiveInfinity;
public static double Inf => double.PositiveInfinity;
public static double NINF => double.NegativeInfinity;
public static double PINF => double.PositiveInfinity;
public static double Infinity => double.PositiveInfinity;
public static double infinity => double.PositiveInfinity;


public static bool array_equal(NDArray a, NDArray b)
=> throw new NotImplementedException("");

public static NDArray concatenate(NDArray[] arrays, int axis = 0)
=> throw new NotImplementedException("");

public static NDArray frombuffer(byte[] bytes, Type dtype)
=> throw new NotImplementedException("");

public static NDArray frombuffer(byte[] bytes, string dtype)
=> throw new NotImplementedException("");


public static NDArray prod(in NDArray a, int? axis = null, Type dtype = null, bool keepdims = false)
=> throw new NotImplementedException("");

public static NDArray prod(params int[] array)
=> throw new NotImplementedException("");

public static NDArray multiply(in NDArray x1, in NDArray x2)
=> throw new NotImplementedException("");

public static NDArray sum(NDArray x1)
=> throw new NotImplementedException("");

public static NDArray squeeze(NDArray x1)
=> throw new NotImplementedException("");
public static NDArray log(NDArray x)
=> throw new NotImplementedException("");

public static bool allclose(NDArray a, NDArray b, double rtol = 1.0E-5, double atol = 1.0E-8,
bool equal_nan = false) => throw new NotImplementedException("");


public static class random
{
public static NDArray permutation(int x)
{
throw new NotImplementedException("");
}

public static void shuffle(NDArray nd)
{

}

public static NDArray rand(params int[] shape)
=> throw new NotImplementedException("");

public static NDArray randint(long x)
=> throw new NotImplementedException("");

public static NDArray RandomState(int x)
=> throw new NotImplementedException("");
}

public static NpzDictionary<T> Load_Npz<T>(byte[] bytes)
where T : class, IList, ICloneable, ICollection, IEnumerable, IStructuralComparable, IStructuralEquatable
{
throw new NotImplementedException("");
}
}
}

+ 90
- 0
src/TensorFlowNET.Core/Numpy/NumpyDType.cs View File

@@ -0,0 +1,90 @@
using System;
using System.Collections.Generic;
using System.Numerics;
using System.Text;

namespace Tensorflow.Numpy
{
/// <summary>
/// Represents all available types in numpy.
/// </summary>
/// <remarks>The int values of the enum are a copy of <see cref="TypeCode"/> excluding types not available in numpy.</remarks>
public enum NumpyDType
{
/// <summary>A null reference.</summary>
Empty = 0,

/// <summary>A simple type representing Boolean values of true or false.</summary>
Boolean = 3,

/// <summary>An integral type representing unsigned 16-bit integers with values between 0 and 65535. The set of possible values for the <see cref="F:System.TypeCode.Char"></see> type corresponds to the Unicode character set.</summary>
Char = 4,

/// <summary>An integral type representing unsigned 8-bit integers with values between 0 and 255.</summary>
Byte = 6,

/// <summary>An integral type representing signed 16-bit integers with values between -32768 and 32767.</summary>
Int16 = 7,

/// <summary>An integral type representing unsigned 16-bit integers with values between 0 and 65535.</summary>
UInt16 = 8,

/// <summary>An integral type representing signed 32-bit integers with values between -2147483648 and 2147483647.</summary>
Int32 = 9,

/// <summary>An integral type representing unsigned 32-bit integers with values between 0 and 4294967295.</summary>
UInt32 = 10, // 0x0000000A

/// <summary>An integral type representing signed 64-bit integers with values between -9223372036854775808 and 9223372036854775807.</summary>
Int64 = 11, // 0x0000000B

/// <summary>An integral type representing unsigned 64-bit integers with values between 0 and 18446744073709551615.</summary>
UInt64 = 12, // 0x0000000C

/// <summary>A floating point type representing values ranging from approximately 1.5 x 10 -45 to 3.4 x 10 38 with a precision of 7 digits.</summary>
Single = 13, // 0x0000000D
Float = 13, // 0x0000000D

/// <summary>A floating point type representing values ranging from approximately 5.0 x 10 -324 to 1.7 x 10 308 with a precision of 15-16 digits.</summary>
Double = 14, // 0x0000000E

/// <summary>A simple type representing values ranging from 1.0 x 10 -28 to approximately 7.9 x 10 28 with 28-29 significant digits.</summary>
Decimal = 15, // 0x0000000F

/// <summary>A sealed class type representing Unicode character strings.</summary>
String = 18, // 0x00000012

Complex = 128, //0x00000080
}

public static class NTTypeCodeExtension
{
public static NumpyDType GetTypeCode(this Type type)
{
// ReSharper disable once PossibleNullReferenceException
while (type.IsArray)
type = type.GetElementType();

var tc = Type.GetTypeCode(type);
if (tc == TypeCode.Object)
{
if (type == typeof(Complex))
{
return NumpyDType.Complex;
}

return NumpyDType.Empty;
}

try
{
return (NumpyDType)(int)tc;
}
catch (InvalidCastException)
{
return NumpyDType.Empty;
}
}
}

}

+ 63
- 0
src/TensorFlowNET.Core/Numpy/Shape.cs View File

@@ -0,0 +1,63 @@
using System;
using System.Collections.Generic;
using System.Linq;
using System.Text;

namespace Tensorflow.Numpy
{
public class Shape
{
public int ndim => _dims.Length;
long[] _dims;
public long[] dims => _dims;

public Shape(params long[] dims)
=> _dims = dims;

public static implicit operator Shape(int dims)
=> new Shape(dims);

public static implicit operator Shape(long[] dims)
=> new Shape(dims);

public static implicit operator Shape(int[] dims)
=> new Shape(dims.Select(x => Convert.ToInt64(x)).ToArray());

public static implicit operator Shape((long, long) dims)
=> new Shape(dims.Item1, dims.Item2);

public bool IsSliced => throw new NotImplementedException("");
public bool IsScalar => throw new NotImplementedException("");
public bool IsBroadcasted => throw new NotImplementedException("");

public static Shape Scalar
=> new Shape(new long[0]);

/// <summary>
/// Returns the size this shape represents.
/// </summary>
public ulong size
{
get
{
var computed = 1L;
for (int i = 0; i < _dims.Length; i++)
{
var val = _dims[i];
if (val <= 0)
continue;
computed *= val;
}

return (ulong)computed;
}
}

public bool IsEmpty => throw new NotImplementedException("");

public override string ToString()
{
return "(" + string.Join(", ", _dims) + ")";
}
}
}

+ 294
- 0
src/TensorFlowNET.Core/Numpy/Slice.cs View File

@@ -0,0 +1,294 @@
using System;
using System.Collections.Generic;
using System.Linq;
using System.Text;
using System.Text.RegularExpressions;

namespace Tensorflow.Numpy
{
/// <summary> <br></br>
/// NDArray can be indexed using slicing <br></br>
/// A slice is constructed by start:stop:step notation <br></br>
/// <br></br>
/// Examples: <br></br>
/// <br></br>
/// a[start:stop] # items start through stop-1 <br></br>
/// a[start:] # items start through the rest of the array <br></br>
/// a[:stop] # items from the beginning through stop-1 <br></br>
/// <br></br>
/// The key point to remember is that the :stop value represents the first value that is not <br></br>
/// in the selected slice. So, the difference between stop and start is the number of elements <br></br>
/// selected (if step is 1, the default). <br></br>
/// <br></br>
/// There is also the step value, which can be used with any of the above: <br></br>
/// a[:] # a copy of the whole array <br></br>
/// a[start:stop:step] # start through not past stop, by step <br></br>
/// <br></br>
/// The other feature is that start or stop may be a negative number, which means it counts <br></br>
/// from the end of the array instead of the beginning. So: <br></br>
/// a[-1] # last item in the array <br></br>
/// a[-2:] # last two items in the array <br></br>
/// a[:-2] # everything except the last two items <br></br>
/// Similarly, step may be a negative number: <br></br>
/// <br></br>
/// a[::- 1] # all items in the array, reversed <br></br>
/// a[1::- 1] # the first two items, reversed <br></br>
/// a[:-3:-1] # the last two items, reversed <br></br>
/// a[-3::- 1] # everything except the last two items, reversed <br></br>
/// <br></br>
/// NumSharp is kind to the programmer if there are fewer items than <br></br>
/// you ask for. For example, if you ask for a[:-2] and a only contains one element, you get an <br></br>
/// empty list instead of an error.Sometimes you would prefer the error, so you have to be aware <br></br>
/// that this may happen. <br></br>
/// <br></br>
/// Adapted from Greg Hewgill's answer on Stackoverflow: https://stackoverflow.com/questions/509211/understanding-slice-notation <br></br>
/// <br></br>
/// Note: special IsIndex == true <br></br>
/// It will pick only a single value at Start in this dimension effectively reducing the Shape of the sliced matrix by 1 dimension. <br></br>
/// It can be used to reduce an N-dimensional array/matrix to a (N-1)-dimensional array/matrix <br></br>
/// <br></br>
/// Example: <br></br>
/// a=[[1, 2], [3, 4]] <br></br>
/// a[:, 1] returns the second column of that 2x2 matrix as a 1-D vector <br></br>
/// </summary>
public class Slice
{
/// <summary>
/// return : for this dimension
/// </summary>
public static readonly Slice All = new Slice(null, null);

/// <summary>
/// return 0:0 for this dimension
/// </summary>
public static readonly Slice None = new Slice(0, 0, 1);

/// <summary>
/// fill up the missing dimensions with : at this point, corresponds to ...
/// </summary>
public static readonly Slice Ellipsis = new Slice(0, 0, 1) { IsEllipsis = true };

/// <summary>
/// insert a new dimension at this point
/// </summary>
public static readonly Slice NewAxis = new Slice(0, 0, 1) { IsNewAxis = true };

/// <summary>
/// return exactly one element at this dimension and reduce the shape from n-dim to (n-1)-dim
/// </summary>
/// <param name="index"></param>
/// <returns></returns>
public static Slice Index(int index) => new Slice(index, index + 1) { IsIndex = true };

///// <summary>
///// return multiple elements for this dimension specified by the given index array (or boolean mask array)
///// </summary>
///// <param name="index_array_or_mask"></param>
///// <returns></returns>
//[MethodImpl(MethodImplOptions.AggressiveInlining)]
//public static Slice Select(NDArray index_array_or_mask) => new Slice(null, null) { Selection=index_array_or_mask };

public int? Start;
public int? Stop;
public int Step;
public bool IsIndex;
public bool IsEllipsis;
public bool IsNewAxis;

///// <summary>
///// Array of integer indices to select elements by index extraction or boolean values to select by masking the elements of the given dimension.
///// </summary>
//public NDArray Selection = null;

/// <summary>
/// Length of the slice.
/// <remarks>
/// The length is not guaranteed to be known for i.e. a slice like ":". Make sure to check Start and Stop
/// for null before using it</remarks>
/// </summary>
public int? Length => Stop - Start;

/// <summary>
/// ndarray can be indexed using slicing
/// slice is constructed by start:stop:step notation
/// </summary>
/// <param name="start">Start index of the slice, null means from the start of the array</param>
/// <param name="stop">Stop index (first index after end of slice), null means to the end of the array</param>
/// <param name="step">Optional step to select every n-th element, defaults to 1</param>
public Slice(int? start = null, int? stop = null, int step = 1)
{
Start = start;
Stop = stop;
Step = step;
}

public Slice(string slice_notation)
{
Parse(slice_notation);
}

/// <summary>
/// Parses Python array slice notation and returns an array of Slice objects
/// </summary>
public static Slice[] ParseSlices(string multi_slice_notation)
{
return Regex.Split(multi_slice_notation, @",\s*").Where(s => !string.IsNullOrWhiteSpace(s)).Select(token => new Slice(token)).ToArray();
}

/// <summary>
/// Creates Python array slice notation out of an array of Slice objects (mainly used for tests)
/// </summary>
public static string FormatSlices(params Slice[] slices)
{
return string.Join(",", slices.Select(s => s.ToString()));
}

private void Parse(string slice_notation)
{
if (string.IsNullOrEmpty(slice_notation))
throw new ArgumentException("Slice notation expected, got empty string or null");
var match = Regex.Match(slice_notation, @"^\s*((?'start'[+-]?\s*\d+)?\s*:\s*(?'stop'[+-]?\s*\d+)?\s*(:\s*(?'step'[+-]?\s*\d+)?)?|(?'index'[+-]?\s*\d+)|(?'ellipsis'\.\.\.)|(?'newaxis'(np\.)?newaxis))\s*$");
if (!match.Success)
throw new ArgumentException($"Invalid slice notation: '{slice_notation}'");
if (match.Groups["ellipsis"].Success)
{
Start = 0;
Stop = 0;
Step = 1;
IsEllipsis = true;
return;
}
if (match.Groups["newaxis"].Success)
{
Start = 0;
Stop = 0;
Step = 1;
IsNewAxis = true;
return;
}
if (match.Groups["index"].Success)
{
if (!int.TryParse(Regex.Replace(match.Groups["index"].Value ?? "", @"\s+", ""), out var start))
throw new ArgumentException($"Invalid value for index: '{match.Groups["index"].Value}'");
Start = start;
Stop = start + 1;
Step = 1; // special case for dimensionality reduction by picking a single element
IsIndex = true;
return;
}
var start_string = Regex.Replace(match.Groups["start"].Value ?? "", @"\s+", ""); // removing spaces from match to be able to parse what python allows, like: "+ 1" or "- 9";
var stop_string = Regex.Replace(match.Groups["stop"].Value ?? "", @"\s+", "");
var step_string = Regex.Replace(match.Groups["step"].Value ?? "", @"\s+", "");

if (string.IsNullOrWhiteSpace(start_string))
Start = null;
else
{
if (!int.TryParse(start_string, out var start))
throw new ArgumentException($"Invalid value for start: {start_string}");
Start = start;
}

if (string.IsNullOrWhiteSpace(stop_string))
Stop = null;
else
{
if (!int.TryParse(stop_string, out var stop))
throw new ArgumentException($"Invalid value for start: {stop_string}");
Stop = stop;
}

if (string.IsNullOrWhiteSpace(step_string))
Step = 1;
else
{
if (!int.TryParse(step_string, out var step))
throw new ArgumentException($"Invalid value for start: {step_string}");
Step = step;
}
}

#region Equality comparison

public static bool operator ==(Slice a, Slice b)
{
if (ReferenceEquals(a, b))
return true;

if (a is null || b is null)
return false;

return a.Start == b.Start && a.Stop == b.Stop && a.Step == b.Step;
}

public static bool operator !=(Slice a, Slice b)
{
return !(a == b);
}

public override bool Equals(object obj)
{
if (obj == null)
return false;

if (obj.GetType() != typeof(Slice))
return false;

var b = (Slice)obj;
return Start == b.Start && Stop == b.Stop && Step == b.Step;
}

public override int GetHashCode()
{
return ToString().GetHashCode();
}

#endregion

public override string ToString()
{
if (IsIndex)
return $"{Start ?? 0}";
else if (IsNewAxis)
return "np.newaxis";
else if (IsEllipsis)
return "...";
var optional_step = Step == 1 ? "" : $":{Step}";
return $"{(Start == 0 ? "" : Start.ToString())}:{(Stop == null ? "" : Stop.ToString())}{optional_step}";
}

// return the size of the slice, given the data dimension on this axis
// note: this works only with sanitized shapes!
public int GetSize()
{
var astep = Math.Abs(Step);
return (Math.Abs(Start.Value - Stop.Value) + (astep - 1)) / astep;
}

#region Operators

public static Slice operator ++(Slice a)
{
if (a.Start.HasValue)
a.Start++;
if (a.Stop.HasValue)
a.Stop++;
return a;
}

public static Slice operator --(Slice a)
{
if (a.Start.HasValue)
a.Start--;
if (a.Stop.HasValue)
a.Stop--;
return a;
}

public static implicit operator Slice(int index) => Slice.Index(index);
public static implicit operator Slice(string slice) => new Slice(slice);
//public static implicit operator Slice(NDArray selection) => Slice.Select(selection);

#endregion
}
}

+ 1
- 1
src/TensorFlowNET.Core/Operations/NnOps/BasicLSTMCell.cs View File

@@ -51,7 +51,7 @@ namespace Tensorflow
var input_depth = input_shape.dims.Last();
var h_depth = _num_units;
_kernel = add_weight(_WEIGHTS_VARIABLE_NAME,
shape: new[] { input_depth + h_depth, 4 * _num_units });
shape: new int[] { (int)(input_depth + h_depth), 4 * _num_units });
_bias = add_weight(_BIAS_VARIABLE_NAME,
shape: new[] { 4 * _num_units },
initializer: tf.zeros_initializer);


+ 1
- 1
src/TensorFlowNET.Core/Operations/NnOps/BasicRNNCell.cs View File

@@ -56,7 +56,7 @@ namespace Tensorflow

_kernel = add_weight(
_WEIGHTS_VARIABLE_NAME,
shape: new[] { input_depth + _num_units, _num_units });
shape: new int[] { (int)(input_depth + _num_units), _num_units });

_bias = add_weight(
_BIAS_VARIABLE_NAME,


+ 3
- 3
src/TensorFlowNET.Core/Operations/NnOps/rnn.cs View File

@@ -14,7 +14,7 @@
limitations under the License.
******************************************************************************/

using NumSharp;
using Tensorflow.Numpy;
using System;
using System.Collections.Generic;
using System.Linq;
@@ -396,7 +396,7 @@ namespace Tensorflow.Operations
// Restore some shape information
foreach (var (output, output_size) in zip(final_outputs, flat_output_size))
{
var shape = rnn_cell_impl._concat(new[] { const_time_steps, const_batch_size }, output_size, @static: true);
var shape = rnn_cell_impl._concat(new int[] { (int)const_time_steps, (int)const_batch_size }, output_size, @static: true);
output.set_shape(shape);
}

@@ -436,7 +436,7 @@ namespace Tensorflow.Operations
};
var x_t = array_ops.transpose(x, array_ops.concat(con1, 0));

var dims = new int[] { x_static_shape.dims[1], x_static_shape.dims[0] }
var dims = new [] { x_static_shape.dims[1], x_static_shape.dims[0] }
.ToList();
dims.AddRange(x_static_shape.dims.Skip(2));
var shape = new TensorShape(dims.ToArray());


+ 4
- 4
src/TensorFlowNET.Core/Operations/Operation.cs View File

@@ -14,7 +14,7 @@
limitations under the License.
******************************************************************************/

using NumSharp;
using Tensorflow.Numpy;
using System;
using System.Collections.Generic;
using System.Linq;
@@ -207,7 +207,7 @@ namespace Tensorflow
c_api.TF_OperationGetAttrValueProto(_handle, name, buf.Handle, tf.Status.Handle);
tf.Status.Check(true);

x = AttrValue.Parser.ParseFrom(buf.DangerousMemoryBlock.Stream());
x = AttrValue.Parser.ParseFrom(buf.ToArray());
}

string oneof_value = x.ValueCase.ToString();
@@ -235,7 +235,7 @@ namespace Tensorflow
c_api.TF_OperationGetAttrValueProto(_handle, name, buf.Handle, tf.Status.Handle);
tf.Status.Check(true);

x = AttrValue.Parser.ParseFrom(buf.DangerousMemoryBlock.Stream());
x = AttrValue.Parser.ParseFrom(buf.ToArray());
}

string oneof_value = x.ValueCase.ToString();
@@ -269,7 +269,7 @@ namespace Tensorflow
c_api.TF_OperationToNodeDef(_handle, buffer.Handle, s.Handle);
s.Check();

return NodeDef.Parser.ParseFrom(buffer.DangerousMemoryBlock.Stream());
return NodeDef.Parser.ParseFrom(buffer.ToArray());
}
}



+ 3
- 3
src/TensorFlowNET.Core/Operations/array_ops.cs View File

@@ -14,7 +14,7 @@
limitations under the License.
******************************************************************************/

using NumSharp;
using Tensorflow.Numpy;
using System;
using System.Collections.Generic;
using System.Linq;
@@ -510,7 +510,7 @@ namespace Tensorflow
{
value = ops.convert_to_tensor(value);
var value_shape = value.TensorShape;
num = value_shape.dims[axis];
num = (int)value_shape.dims[axis];
}

return gen_array_ops.unpack(value, num: num.Value, axis: axis, name: name);
@@ -588,7 +588,7 @@ namespace Tensorflow
var input_shape = input.TensorShape;
if (optimize && input.NDims > -1 && input_shape.is_fully_defined())
{
var nd = np.array(input.shape).astype(out_type.as_numpy_dtype());
var nd = np.array(input.shape).astype(out_type.as_system_dtype());
return constant_op.constant(nd, name: name);
}
}


+ 21
- 21
src/TensorFlowNET.Core/Operations/image_ops_impl.cs View File

@@ -51,7 +51,7 @@ namespace Tensorflow
return false;
}

internal static int[] _ImageDimensions(Tensor image, int rank)
internal static long[] _ImageDimensions(Tensor image, int rank)
{
if (image.TensorShape.is_fully_defined())
return image.TensorShape.as_list();
@@ -60,13 +60,13 @@ namespace Tensorflow
var static_shape = image.TensorShape.with_rank(rank).as_list();
var dynamic_shape = array_ops.unstack(array_ops.shape(image), rank);

int[] ss_storage = null;
int[] ds_storage = null;
long[] ss_storage = null;
long[] ds_storage = null;
// var sd = static_shape.Zip(dynamic_shape, (first, second) => storage[storage.Length] = first;
var sd = static_shape.Zip(dynamic_shape, (ss, ds) =>
{
ss_storage[ss_storage.Length] = ss;
ds_storage[ds_storage.Length] = (int)ds;
ds_storage[ds_storage.Length] = (long)ds;
return true;
});

@@ -110,14 +110,14 @@ namespace Tensorflow
}
}

var image_shape_last_three_elements = new TensorShape(new int[3] {
var image_shape_last_three_elements = new TensorShape(new[] {
image_shape.dims[image_shape.dims.Length - 1],
image_shape.dims[image_shape.dims.Length - 2],
image_shape.dims[image_shape.dims.Length - 3]});
if (!image_shape_last_three_elements.is_fully_defined())
{
Tensor image_shape_ = array_ops.shape(image);
var image_shape_return = tf.constant(new int[3] {
var image_shape_return = tf.constant(new[] {
image_shape_.dims[image_shape.dims.Length - 1],
image_shape_.dims[image_shape.dims.Length - 2],
image_shape_.dims[image_shape.dims.Length - 3]});
@@ -146,7 +146,7 @@ namespace Tensorflow
if (image_shape == image_shape.unknown_shape())
{
// c# defaults null types to 0 anyhow, so this should be a pretty equivalent port
result.set_shape(new TensorShape(new int[] { 0, 0, 0 }));
result.set_shape(new TensorShape(new long[] { 0, 0, 0 }));
}
else
{
@@ -284,7 +284,7 @@ namespace Tensorflow
math_ops.equal(k, 3), _rot270()};

var result = control_flow_ops.case_v2(cases, callable_default: () => new Tensor[] { image }, exclusive: true, name: name_scope);
result.set_shape(new[] { -1, -1, image.TensorShape.dims[2] });
result.set_shape(new long[] { -1, -1, image.TensorShape.dims[2] });
return result;
}

@@ -466,7 +466,7 @@ or rank = 4. Had rank = {0}", rank));
var assert_ops = _CheckAtLeast3DImage(image, require_static: false);

// batch: [0], height: [1], width: [2], depth: [3]
int[] bhwd = _ImageDimensions(image, rank: 4);
var bhwd = _ImageDimensions(image, rank: 4);

var after_padding_width = target_width - offset_width - bhwd[2];

@@ -496,7 +496,7 @@ or rank = 4. Had rank = {0}", rank));

TensorShape padded_shape_result()
{
int[] i_remnants = { };
long[] i_remnants = { };
foreach (var i in new[] { bhwd[0], target_height, target_width, bhwd[3] })
if (_is_tensor(i))
return null;
@@ -534,7 +534,7 @@ or rank = 4. Had rank = {0}", rank));
{
is_batch = false;
image = array_ops.expand_dims(image, 0);
image.set_shape(new TensorShape(new int[] { 0, 0, 0, 0 }));
image.set_shape(new TensorShape(new long[] { 0, 0, 0, 0 }));
}
else if (image_shape.ndim != 4)
{
@@ -545,7 +545,7 @@ or rank = 4. Had rank = {0}", rank));
var assert_ops = _CheckAtLeast3DImage(image, require_static: false);

// batch: [0], height: [1], width: [2], depth: [3]
int[] bhwd = _ImageDimensions(image, rank: 4);
var bhwd = _ImageDimensions(image, rank: 4);

assert_ops[assert_ops.Length] = _assert(check_ops.assert_greater_equal(tf.constant(offset_height),
tf.constant(0)), typeof(ValueError),
@@ -575,7 +575,7 @@ or rank = 4. Had rank = {0}", rank));

TensorShape cropped_shape_result()
{
int[] i_remnants = { };
long[] i_remnants = { };
foreach (var i in new[] { bhwd[0], target_height, target_width, bhwd[3] })
if (_is_tensor(i))
return null;
@@ -668,12 +668,12 @@ or rank = 4. Had rank = {0}", rank));
return x == y;
}

int[] _hw_ = _ImageDimensions(image, rank: 4);
int width_diff = (int)target_width - _hw_[2];
var _hw_ = _ImageDimensions(image, rank: 4);
var width_diff = (long)target_width - _hw_[2];
int offset_crop_width = (int)max_(Math.Floor(Math.Abs((decimal)width_diff) / 2), 0);
int offset_pad_width = (int)max_(Math.Floor((decimal)width_diff / 2), 0);

int height_diff = (int)target_height - _hw_[1];
var height_diff = (long)target_height - _hw_[1];
int offset_crop_height = (int)max_(Math.Floor(Math.Abs((decimal)height_diff) / 2), 0);
int offset_pad_height = (int)max_(Math.Floor((decimal)height_diff / 2), 0);

@@ -687,7 +687,7 @@ or rank = 4. Had rank = {0}", rank));
if (resized.TensorShape.ndim == Unknown)
throw new ValueError("resized contains no shape.");

int[] _rhrw_ = _ImageDimensions(resized, rank: 4);
var _rhrw_ = _ImageDimensions(resized, rank: 4);

assert_ops = new Operation[2];
assert_ops[0] = _assert(
@@ -782,7 +782,7 @@ new_height, new_width");

images = resizer_fn(images, size);

images.set_shape(new TensorShape(new int[] { Unknown, new_height_const, new_width_const, Unknown }));
images.set_shape(new TensorShape(new long[] { Unknown, new_height_const, new_width_const, Unknown }));

if (!is_batch)
images = array_ops.squeeze(images, axis: new int[] { 0 });
@@ -1181,7 +1181,7 @@ new_height, new_width");
// it is passed as a tensor
image = gen_ops.encode_jpeg_variable_quality(image, quality: jpeg_quality);

image = gen_ops.decode_jpeg(image, channels: channels);
image = gen_ops.decode_jpeg(image, channels: (int)channels);
return convert_image_dtype(image, orig_dtype, saturate: true);
});
}
@@ -1369,7 +1369,7 @@ new_height, new_width");
TensorShape shape1 = img1.TensorShape.with_rank_at_least(3);
TensorShape shape2 = img2.TensorShape.with_rank_at_least(3);
shape1 = new TensorShape(shape1.dims.Skip(shape1.dims.Length - 3).Take(shape1.dims.Length - (shape1.dims.Length - 3)).ToArray());
tensor_shape.assert_is_compatible_with(self: new Tensor(shape1), other: new Tensor(shape2.dims.Skip(shape2.dims.Length - 3).Take(shape2.dims.Length - (shape2.dims.Length - 3)).ToArray()));
tensor_shape.assert_is_compatible_with(self: new Tensor(shape1.dims), other: new Tensor(shape2.dims.Skip(shape2.dims.Length - 3).Take(shape2.dims.Length - (shape2.dims.Length - 3)).ToArray()));

if (shape1.ndim != -1 && shape2.ndim != -1)
{
@@ -1377,7 +1377,7 @@ new_height, new_width");
var shape2_temp = shape2.dims.Skip(shape2.dims.Length - 3).Take(shape2.dims.Length - (shape1.dims.Length - 3)).ToArray();
Array.Reverse(shape1_temp);
Array.Reverse(shape2_temp);
foreach ((int dim1, int dim2) in shape1_temp.Zip(shape2_temp, Tuple.Create))
foreach (var (dim1, dim2) in shape1_temp.Zip(shape2_temp, Tuple.Create))
{
if (dim1 != 1 || dim2 != 1 /*|| !dim1.is_compatible_with(dim2)*/)
throw new ValueError(String.Format("Two images are not compatible: {0} and {1}", shape1, shape2));


+ 3
- 3
src/TensorFlowNET.Core/Operations/linalg_ops.cs View File

@@ -20,11 +20,11 @@ namespace Tensorflow
var diag_size = Math.Min(num_rows, num_columns);
if (batch_shape == null)
batch_shape = new TensorShape(new int[0]);
var diag_shape = batch_shape.dims.concat(new[] { diag_size });
var diag_shape = batch_shape.dims.concat(new long[] { diag_size });

int[] shape = null;
long[] shape = null;
if (!is_square)
shape = batch_shape.dims.concat(new[] { num_rows, num_columns });
shape = batch_shape.dims.concat(new long[] { num_rows, num_columns });

var diag_ones = array_ops.ones(diag_shape, dtype: dtype);
if (is_square)


+ 15
- 4
src/TensorFlowNET.Core/Operations/math_ops.cs View File

@@ -14,7 +14,7 @@
limitations under the License.
******************************************************************************/

using NumSharp;
using Tensorflow.Numpy;
using System;
using System.Collections.Generic;
using System.Linq;
@@ -726,7 +726,18 @@ namespace Tensorflow
}

if (delta == null)
delta = 1;
{
if (limit is int)
delta = 1;
else if (limit is long)
delta = 1L;
else if (limit is float)
delta = 1.0f;
else if (limit is double)
delta = 1.0d;
else
delta = 1;
}

return tf_with(ops.name_scope(name, "Range", new { start, limit, delta }), scope =>
{
@@ -883,14 +894,14 @@ namespace Tensorflow
// free_dims
int[] free_dims = { };
foreach (int i in free)
free_dims[free_dims.Length] = shape_a[i];
free_dims[free_dims.Length] = (int)shape_a[i];

int prod_free = (int)np.prod(free_dims);

// prod_axes
int[] prod_axes_pre = { };
foreach (int i in axes)
prod_axes_pre[prod_axes_pre.Length] = shape_a[i];
prod_axes_pre[prod_axes_pre.Length] = (int)shape_a[i];
int prod_axes = (int)np.prod(prod_axes_pre);

// perm


+ 2
- 1
src/TensorFlowNET.Core/Operations/nn_impl.py.cs View File

@@ -14,6 +14,7 @@
limitations under the License.
******************************************************************************/

using Tensorflow.Numpy;
using Tensorflow.Operations;
using static Tensorflow.Binding;

@@ -185,7 +186,7 @@ namespace Tensorflow
{
return tf_with(ops.name_scope("count_nonzero", "count_nonzero", new { input_tensor }), scope =>
{
var zero = array_ops.zeros(new NumSharp.Shape(), dtype: input_tensor.dtype);
var zero = array_ops.zeros(new Shape(), dtype: input_tensor.dtype);
var nonzero_count = math_ops.reduce_sum(
math_ops.cast(gen_math_ops.not_equal(input_tensor, zero), dtype: dtype), name: "nonzero_count");
return nonzero_count;


+ 1
- 1
src/TensorFlowNET.Core/Operations/nn_ops.cs View File

@@ -285,7 +285,7 @@ namespace Tensorflow
var shape = logits.TensorShape;
if (shape != null && shape.ndim > 0)
{
var product = 1;
var product = 1L;
var product_valid = true;
foreach (var d in shape.dims.Take(shape.ndim - 1))
{


+ 1
- 1
src/TensorFlowNET.Core/Operations/string_ops.cs View File

@@ -14,7 +14,7 @@
limitations under the License.
******************************************************************************/

using NumSharp;
using Tensorflow.Numpy;
using Tensorflow.Framework;
using static Tensorflow.Binding;



+ 3
- 13
src/TensorFlowNET.Core/Sessions/BaseSession.cs View File

@@ -15,7 +15,7 @@
******************************************************************************/

using Google.Protobuf;
using NumSharp;
using Tensorflow.Numpy;
using System;
using System.Collections;
using System.Collections.Generic;
@@ -179,17 +179,8 @@ namespace Tensorflow

feeds[i++] = new KeyValuePair<TF_Output, Tensor>(key._as_tf_output(), tensor);
break;
#if _REGEN
// @formatter:off — disable formatter after this line
%types = ["bool", "sbyte", "byte", "short", "ushort", "int", "uint", "long", "ulong", "float", "double", "Complex"]
%foreach types%
case #1 v: feeds[i++] = new KeyValuePair<TF_Output, Tensor>(key._as_tf_output(), TensorConverter.ToTensor(v, key.dtype)); break;
case #1[] v: feeds[i++] = new KeyValuePair<TF_Output, Tensor>(key._as_tf_output(), TensorConverter.ToTensor(v, key.dtype)); break;
%
// @formatter:on — enable formatter after this line
#else
// @formatter:off — disable formatter after this line
case bool v: feeds[i++] = new KeyValuePair<TF_Output, Tensor>(key._as_tf_output(), TensorConverter.ToTensor(v, key.dtype)); break;
/*case bool v: feeds[i++] = new KeyValuePair<TF_Output, Tensor>(key._as_tf_output(), TensorConverter.ToTensor(v, key.dtype)); break;
case bool[] v: feeds[i++] = new KeyValuePair<TF_Output, Tensor>(key._as_tf_output(), TensorConverter.ToTensor(v, key.dtype)); break;
case sbyte v: feeds[i++] = new KeyValuePair<TF_Output, Tensor>(key._as_tf_output(), TensorConverter.ToTensor(v, key.dtype)); break;
case sbyte[] v: feeds[i++] = new KeyValuePair<TF_Output, Tensor>(key._as_tf_output(), TensorConverter.ToTensor(v, key.dtype)); break;
@@ -212,9 +203,8 @@ namespace Tensorflow
case double v: feeds[i++] = new KeyValuePair<TF_Output, Tensor>(key._as_tf_output(), TensorConverter.ToTensor(v, key.dtype)); break;
case double[] v: feeds[i++] = new KeyValuePair<TF_Output, Tensor>(key._as_tf_output(), TensorConverter.ToTensor(v, key.dtype)); break;
case Complex v: feeds[i++] = new KeyValuePair<TF_Output, Tensor>(key._as_tf_output(), TensorConverter.ToTensor(v, key.dtype)); break;
case Complex[] v: feeds[i++] = new KeyValuePair<TF_Output, Tensor>(key._as_tf_output(), TensorConverter.ToTensor(v, key.dtype)); break;
case Complex[] v: feeds[i++] = new KeyValuePair<TF_Output, Tensor>(key._as_tf_output(), TensorConverter.ToTensor(v, key.dtype)); break;*/
// @formatter:on — enable formatter after this line
#endif

case string v:
feeds[i++] = new KeyValuePair<TF_Output, Tensor>(key._as_tf_output(), TensorConverter.ToTensor(v, key.dtype));


+ 1
- 1
src/TensorFlowNET.Core/Sessions/_ElementFetchMapper.cs View File

@@ -14,7 +14,7 @@
limitations under the License.
******************************************************************************/

using NumSharp;
using Tensorflow.Numpy;
using System;
using System.Collections.Generic;



+ 18
- 18
src/TensorFlowNET.Core/Sessions/_FetchHandler.cs View File

@@ -14,7 +14,7 @@
limitations under the License.
******************************************************************************/

using NumSharp;
using Tensorflow.Numpy;
using System;
using System.Collections.Generic;

@@ -71,28 +71,28 @@ namespace Tensorflow
{
if (tensor_values.Length > 0)
{
switch (tensor_values[0].typecode)
switch (tensor_values[0].dtype)
{
case NPTypeCode.Int32:
case NumpyDType.Int32:
full_values.Add(float.NaN);
break;
case NPTypeCode.Single:
case NumpyDType.Single:
full_values.Add(float.NaN);
break;
case NPTypeCode.Double:
case NumpyDType.Double:
full_values.Add(float.NaN);
break;
case NPTypeCode.String:
case NumpyDType.String:
full_values.Add(float.NaN);
break;
case NPTypeCode.Char:
case NumpyDType.Char:
full_values.Add(float.NaN);
break;
case NPTypeCode.Byte:
case NumpyDType.Byte:
full_values.Add(float.NaN);
break;
default:
throw new NotImplementedException($"build_results tensor_values[0] {tensor_values[0].dtype.Name}");
throw new NotImplementedException($"build_results tensor_values[0] {tensor_values[0].dtype}");
}
}
else
@@ -106,36 +106,36 @@ namespace Tensorflow
j += 1;
if (value.ndim == 0)
{
switch (value.typecode)
switch (value.dtype)
{
case NPTypeCode.Int16:
case NumpyDType.Int16:
full_values.Add(value.GetValue<short>(0));
break;
case NPTypeCode.Int32:
case NumpyDType.Int32:
full_values.Add(value.GetValue<int>(0));
break;
case NPTypeCode.Int64:
case NumpyDType.Int64:
full_values.Add(value.GetValue<long>(0));
break;
case NPTypeCode.Single:
case NumpyDType.Single:
full_values.Add(value.GetValue<float>(0));
break;
case NPTypeCode.Double:
case NumpyDType.Double:
full_values.Add(value.GetValue<double>(0));
break;
case NPTypeCode.Boolean:
case NumpyDType.Boolean:
full_values.Add(value.GetValue<bool>(0));
break;
/*case "String":
full_values.Add(value.Data<byte>()[0]);
break;*/
default:
throw new NotImplementedException($"build_results tensor_values[0] {tensor_values[0].dtype.Name}");
throw new NotImplementedException($"build_results tensor_values[0] {tensor_values[0].dtype}");
}
}
else
{
full_values.Add(value[np.arange(0, value.shape[0])]);
full_values.Add(value[np.arange(0, (int)value.dims[0])]);
}
}
i += 1;


+ 1
- 1
src/TensorFlowNET.Core/Sessions/_FetchMapper.cs View File

@@ -14,7 +14,7 @@
limitations under the License.
******************************************************************************/

using NumSharp;
using Tensorflow.Numpy;
using System.Collections.Generic;

namespace Tensorflow


+ 8
- 8
src/TensorFlowNET.Core/Tensorflow.Binding.csproj View File

@@ -1,12 +1,12 @@
<Project Sdk="Microsoft.NET.Sdk">

<PropertyGroup>
<TargetFramework>netstandard2.0</TargetFramework>
<TargetFramework>netstandard2.1</TargetFramework>
<AssemblyName>TensorFlow.NET</AssemblyName>
<RootNamespace>Tensorflow</RootNamespace>
<TargetTensorFlow>2.2.0</TargetTensorFlow>
<Version>0.50.0</Version>
<LangVersion>8.0</LangVersion>
<Version>0.60.0</Version>
<LangVersion>9.0</LangVersion>
<Authors>Haiping Chen, Meinrad Recheis, Eli Belash</Authors>
<Company>SciSharp STACK</Company>
<GeneratePackageOnBuild>true</GeneratePackageOnBuild>
@@ -19,7 +19,7 @@
<Description>Google's TensorFlow full binding in .NET Standard.
Building, training and infering deep learning models.
https://tensorflownet.readthedocs.io</Description>
<AssemblyVersion>0.50.0.0</AssemblyVersion>
<AssemblyVersion>0.60.0.0</AssemblyVersion>
<PackageReleaseNotes>tf.net 0.20.x and above are based on tensorflow native 2.x.

* Eager Mode is added finally.
@@ -32,8 +32,9 @@ TensorFlow .NET v0.3x is focused on making more Keras API works.
Keras API is a separate package released as TensorFlow.Keras.

tf.net 0.4x.x aligns with TensorFlow v2.4.1 native library.
tf.net 0.5x.x aligns with TensorFlow v2.5.x native library.</PackageReleaseNotes>
<FileVersion>0.50.0.0</FileVersion>
tf.net 0.5x.x aligns with TensorFlow v2.5.x native library.
tf.net 0.6x.x aligns with TensorFlow v2.6.x native library.</PackageReleaseNotes>
<FileVersion>0.60.0.0</FileVersion>
<PackageLicenseFile>LICENSE</PackageLicenseFile>
<PackageRequireLicenseAcceptance>true</PackageRequireLicenseAcceptance>
<SignAssembly>true</SignAssembly>
@@ -86,9 +87,8 @@ tf.net 0.5x.x aligns with TensorFlow v2.5.x native library.</PackageReleaseNotes
</ItemGroup>

<ItemGroup>
<PackageReference Include="MethodBoundaryAspect.Fody" Version="2.0.138" />
<PackageReference Include="MethodBoundaryAspect.Fody" Version="2.0.139" />
<PackageReference Include="Microsoft.Extensions.DependencyInjection" Version="5.0.1" />
<PackageReference Include="NumSharp" Version="0.30.0" />
<PackageReference Include="Protobuf.Text" Version="0.5.0" />
<PackageReference Include="Serilog.Sinks.Console" Version="3.1.1" />
</ItemGroup>


+ 5
- 5
src/TensorFlowNET.Core/Tensors/Dimension.cs View File

@@ -2,10 +2,10 @@
{
public class Dimension
{
int _value;
public int value => _value;
long _value;
public long value => _value;

public Dimension(int value)
public Dimension(long value)
{
_value = value;
}
@@ -18,10 +18,10 @@
return new Dimension(_value);
}

public static implicit operator Dimension(int value)
public static implicit operator Dimension(long value)
=> new Dimension(value);

public static implicit operator int(Dimension dimension)
public static implicit operator long(Dimension dimension)
=> dimension.value;

public override string ToString() => $"Dimension({_value})";


+ 0
- 81
src/TensorFlowNET.Core/Tensors/EagerTensorV2.cs View File

@@ -1,81 +0,0 @@
using NumSharp;
using System;
using System.Linq;
using Tensorflow.Eager;
using Tensorflow.Util;
using static Tensorflow.Binding;

namespace Tensorflow
{
public class EagerTensorV2 : DisposableObject
{
SafeTensorHandleHandle EagerTensorHandle;
public string Device
{
get
{
using var _ = EagerTensorHandle.Lease();
return c_api.StringPiece(c_api.TFE_TensorHandleDeviceName(EagerTensorHandle, tf.Status.Handle));
}
}

public EagerTensorV2(IntPtr handle)
{
EagerTensorHandle = c_api.TFE_EagerTensorHandle(handle);
_handle = c_api.TFE_TensorHandleResolve(EagerTensorHandle, tf.Status.Handle);
}

public unsafe EagerTensorV2(NDArray nd, string device_name = "")
{
if (nd.typecode == NPTypeCode.String)
throw new NotImplementedException("Support for NDArray of type string not implemented yet");

var arraySlice = nd.Unsafe.Storage.Shape.IsContiguous ? nd.GetData() : nd.CloneData();

_handle = c_api.TF_NewTensor(nd.dtype.as_dtype(),
nd.shape.Select(i => (long)i).ToArray(),
nd.ndim,
new IntPtr(arraySlice.Address),
nd.size * nd.dtypesize,
deallocator: (IntPtr dataPtr, long len, IntPtr args) =>
{

}, IntPtr.Zero);

EagerTensorHandle = c_api.TFE_NewTensorHandle(_handle, tf.Status.Handle);
}

/*public unsafe EagerTensorV2(float[,] value)
{
var dims = new long[] { value.Rank, value.Length / value.Rank };
fixed (float* pointer = &value[0, 0])
{
// The address stored in pointerToFirst
// is valid only inside this fixed statement block.
tensorHandle = c_api.TF_NewTensor(TF_DataType.TF_FLOAT,
dims,
value.Rank,
new IntPtr(pointer),
value.Length * sizeof(float),
deallocator: (IntPtr dataPtr, long len, IntPtr args) =>
{

}, IntPtr.Zero);


localTensorHandle = c_api.TFE_NewTensorHandle(tensorHandle, status);
_handle = c_api.TFE_EagerTensorFromHandle(tf.context, localTensorHandle);
}
}*/

protected override void DisposeManagedResources()
{
EagerTensorHandle.Dispose();
}

protected override void DisposeUnmanagedResources(IntPtr handle)
{
c_api.TF_DeleteTensor(_handle);
}
}
}

+ 1
- 1
src/TensorFlowNET.Core/Tensors/Ragged/RaggedTensor.cs View File

@@ -20,7 +20,7 @@ using System.Text;
using System.Linq;
using Tensorflow.Framework;
using static Tensorflow.Binding;
using NumSharp;
using Tensorflow.Numpy;

namespace Tensorflow
{


+ 1
- 1
src/TensorFlowNET.Core/Tensors/Tensor.Assign.cs View File

@@ -1,4 +1,4 @@
using NumSharp;
using Tensorflow.Numpy;

namespace Tensorflow
{


+ 29
- 266
src/TensorFlowNET.Core/Tensors/Tensor.Conversions.cs View File

@@ -14,12 +14,9 @@
limitations under the License.
******************************************************************************/

using NumSharp;
using NumSharp.Utilities;
using Tensorflow.Numpy;
using System;
using System.Diagnostics.CodeAnalysis;
using System.Globalization;
using System.Runtime.CompilerServices;
using System.Text;
using Tensorflow.Framework.Models;
using static Tensorflow.Binding;
@@ -31,310 +28,76 @@ namespace Tensorflow
{
public unsafe void CopyTo(NDArray nd)
{
if (!nd.Shape.IsContiguous)
throw new ArgumentException("NDArray has to be contiguous (ndarray.Shape.IsContiguous).");
//if (!nd.Shape.IsContiguous)
//throw new ArgumentException("NDArray has to be contiguous (ndarray.Shape.IsContiguous).");

#if _REGEN
#region Compute
switch (nd.typecode)
{
%foreach supported_dtypes,supported_dtypes_lowercase%
case NPTypeCode.#1:
{
CopyTo<#2>(new Span<#2>(nd.Unsafe.Address, nd.size*nd.dtypesize));
break;
}
%
default:
throw new NotSupportedException();
}
#endregion
#else
var length = (int)(nd.size * nd.dtypesize);

#region Compute

switch (nd.typecode)
switch (nd.dtype)
{
case NPTypeCode.Boolean:
/*case NumpyDType.Boolean:
{
CopyTo<bool>(new Span<bool>(nd.Unsafe.Address, nd.size * nd.dtypesize));
CopyTo(new Span<bool>(nd.Address.ToPointer(), length));
break;
}
case NPTypeCode.Byte:
case NumpyDType.Byte:
{
CopyTo<byte>(new Span<byte>(nd.Unsafe.Address, nd.size * nd.dtypesize));
CopyTo(new Span<byte>(nd.Address.ToPointer(), length));
break;
}
case NPTypeCode.Int16:
case NumpyDType.Int16:
{
CopyTo<short>(new Span<short>(nd.Unsafe.Address, nd.size * nd.dtypesize));
CopyTo(new Span<short>(nd.Address.ToPointer(), length));
break;
}
case NPTypeCode.UInt16:
case NumpyDType.UInt16:
{
CopyTo<ushort>(new Span<ushort>(nd.Unsafe.Address, nd.size * nd.dtypesize));
CopyTo(new Span<ushort>(nd.Address.ToPointer(), length));
break;
}
case NPTypeCode.Int32:
case NumpyDType.Int32:
{
CopyTo<int>(new Span<int>(nd.Unsafe.Address, nd.size * nd.dtypesize));
CopyTo(new Span<int>(nd.Address.ToPointer(), length));
break;
}
case NPTypeCode.UInt32:
case NumpyDType.UInt32:
{
CopyTo<uint>(new Span<uint>(nd.Unsafe.Address, nd.size * nd.dtypesize));
CopyTo(new Span<uint>(nd.Address.ToPointer(), length));
break;
}
case NPTypeCode.Int64:
case NumpyDType.Int64:
{
CopyTo<long>(new Span<long>(nd.Unsafe.Address, nd.size * nd.dtypesize));
CopyTo(new Span<long>(nd.Address.ToPointer(), length));
break;
}
case NPTypeCode.UInt64:
case NumpyDType.UInt64:
{
CopyTo<ulong>(new Span<ulong>(nd.Unsafe.Address, nd.size * nd.dtypesize));
CopyTo(new Span<ulong>(nd.Address.ToPointer(), length));
break;
}
case NPTypeCode.Char:
case NumpyDType.Char:
{
CopyTo<char>(new Span<char>(nd.Unsafe.Address, nd.size * nd.dtypesize));
CopyTo(new Span<char>(nd.Address.ToPointer(), length));
break;
}
case NPTypeCode.Double:
case NumpyDType.Double:
{
CopyTo<double>(new Span<double>(nd.Unsafe.Address, nd.size * nd.dtypesize));
CopyTo(new Span<double>(nd.Address.ToPointer(), length));
break;
}
case NPTypeCode.Single:
case NumpyDType.Single:
{
CopyTo<float>(new Span<float>(nd.Unsafe.Address, nd.size * nd.dtypesize));
CopyTo(new Span<float>(nd.Address.ToPointer(), length));
break;
}
}*/
default:
throw new NotSupportedException();
}

#endregion
#endif
}

public void CopyTo<T>(Span<T> destination) where T : unmanaged
{
unsafe
{
var len = checked((int)this.size);
//perform regular CopyTo using Span.CopyTo.
if (typeof(T).as_dtype() == this.dtype && this.dtype != TF_DataType.TF_STRING) //T can't be a string but tensor can.
{
var src = (T*)this.buffer;
var srcSpan = new Span<T>(src, len);
srcSpan.CopyTo(destination);

return;
}

if (len > destination.Length)
throw new ArgumentException("Destinion was too short to perform CopyTo.");

//Perform cast to type <T>.
fixed (T* dst = destination)
{
switch (this.dtype)
{
#if _REGEN
%foreach supported_numericals_TF_DataType,supported_numericals,supported_numericals_lowercase%
case TF_DataType.#1:
{
var converter = Converts.FindConverter<#3, T>();
var src = (#3*) this.buffer;
for (var i = 0; i < len; i++)
*(dst + i) = converter(unchecked(*(src + i)));
return;
}
%
#else
case TF_DataType.TF_BOOL:
{
var converter = Converts.FindConverter<bool, T>();
var src = (bool*)this.buffer;
for (var i = 0; i < len; i++)
*(dst + i) = converter(unchecked(*(src + i)));
return;
}
case TF_DataType.TF_UINT8:
{
var converter = Converts.FindConverter<byte, T>();
var src = (byte*)this.buffer;
for (var i = 0; i < len; i++)
*(dst + i) = converter(unchecked(*(src + i)));
return;
}
case TF_DataType.TF_INT16:
{
var converter = Converts.FindConverter<short, T>();
var src = (short*)this.buffer;
for (var i = 0; i < len; i++)
*(dst + i) = converter(unchecked(*(src + i)));
return;
}
case TF_DataType.TF_UINT16:
{
var converter = Converts.FindConverter<ushort, T>();
var src = (ushort*)this.buffer;
for (var i = 0; i < len; i++)
*(dst + i) = converter(unchecked(*(src + i)));
return;
}
case TF_DataType.TF_INT32:
{
var converter = Converts.FindConverter<int, T>();
var src = (int*)this.buffer;
for (var i = 0; i < len; i++)
*(dst + i) = converter(unchecked(*(src + i)));
return;
}
case TF_DataType.TF_UINT32:
{
var converter = Converts.FindConverter<uint, T>();
var src = (uint*)this.buffer;
for (var i = 0; i < len; i++)
*(dst + i) = converter(unchecked(*(src + i)));
return;
}
case TF_DataType.TF_INT64:
{
var converter = Converts.FindConverter<long, T>();
var src = (long*)this.buffer;
for (var i = 0; i < len; i++)
*(dst + i) = converter(unchecked(*(src + i)));
return;
}
case TF_DataType.TF_UINT64:
{
var converter = Converts.FindConverter<ulong, T>();
var src = (ulong*)this.buffer;
for (var i = 0; i < len; i++)
*(dst + i) = converter(unchecked(*(src + i)));
return;
}
case TF_DataType.TF_DOUBLE:
{
var converter = Converts.FindConverter<double, T>();
var src = (double*)this.buffer;
for (var i = 0; i < len; i++)
*(dst + i) = converter(unchecked(*(src + i)));
return;
}
case TF_DataType.TF_FLOAT:
{
var converter = Converts.FindConverter<float, T>();
var src = (float*)this.buffer;
for (var i = 0; i < len; i++)
*(dst + i) = converter(unchecked(*(src + i)));
return;
}
#endif
case TF_DataType.TF_STRING:
{
var src = this.StringData();
var culture = CultureInfo.InvariantCulture;

//pin to prevent GC from moving the span around.
fixed (T* _ = destination)
switch (typeof(T).as_dtype())
{
#if _REGEN
%foreach supported_numericals_TF_DataType,supported_numericals,supported_numericals_lowercase%
case TF_DataType.#1: {
var sdst = (#3*)Unsafe.AsPointer(ref destination.GetPinnableReference());
for (var i = 0; i < len; i++)
*(sdst + i) = ((IConvertible)src[i]).To#2(culture);
return;
}
%
#else
case TF_DataType.TF_BOOL:
{
var sdst = (bool*)Unsafe.AsPointer(ref destination.GetPinnableReference());
for (var i = 0; i < len; i++)
*(sdst + i) = ((IConvertible)src[i]).ToBoolean(culture);
return;
}
case TF_DataType.TF_UINT8:
{
var sdst = (byte*)Unsafe.AsPointer(ref destination.GetPinnableReference());
for (var i = 0; i < len; i++)
*(sdst + i) = ((IConvertible)src[i]).ToByte(culture);
return;
}
case TF_DataType.TF_INT16:
{
var sdst = (short*)Unsafe.AsPointer(ref destination.GetPinnableReference());
for (var i = 0; i < len; i++)
*(sdst + i) = ((IConvertible)src[i]).ToInt16(culture);
return;
}
case TF_DataType.TF_UINT16:
{
var sdst = (ushort*)Unsafe.AsPointer(ref destination.GetPinnableReference());
for (var i = 0; i < len; i++)
*(sdst + i) = ((IConvertible)src[i]).ToUInt16(culture);
return;
}
case TF_DataType.TF_INT32:
{
var sdst = (int*)Unsafe.AsPointer(ref destination.GetPinnableReference());
for (var i = 0; i < len; i++)
*(sdst + i) = ((IConvertible)src[i]).ToInt32(culture);
return;
}
case TF_DataType.TF_UINT32:
{
var sdst = (uint*)Unsafe.AsPointer(ref destination.GetPinnableReference());
for (var i = 0; i < len; i++)
*(sdst + i) = ((IConvertible)src[i]).ToUInt32(culture);
return;
}
case TF_DataType.TF_INT64:
{
var sdst = (long*)Unsafe.AsPointer(ref destination.GetPinnableReference());
for (var i = 0; i < len; i++)
*(sdst + i) = ((IConvertible)src[i]).ToInt64(culture);
return;
}
case TF_DataType.TF_UINT64:
{
var sdst = (ulong*)Unsafe.AsPointer(ref destination.GetPinnableReference());
for (var i = 0; i < len; i++)
*(sdst + i) = ((IConvertible)src[i]).ToUInt64(culture);
return;
}
case TF_DataType.TF_DOUBLE:
{
var sdst = (double*)Unsafe.AsPointer(ref destination.GetPinnableReference());
for (var i = 0; i < len; i++)
*(sdst + i) = ((IConvertible)src[i]).ToDouble(culture);
return;
}
case TF_DataType.TF_FLOAT:
{
var sdst = (float*)Unsafe.AsPointer(ref destination.GetPinnableReference());
for (var i = 0; i < len; i++)
*(sdst + i) = ((IConvertible)src[i]).ToSingle(culture);
return;
}
#endif
default:
throw new NotSupportedException();
}
}
case TF_DataType.TF_COMPLEX64:
case TF_DataType.TF_COMPLEX128:
default:
throw new NotSupportedException();
}
}
}
throw new NotImplementedException("");
}

public TensorSpec ToTensorSpec()


+ 57
- 56
src/TensorFlowNET.Core/Tensors/Tensor.Creation.cs View File

@@ -14,8 +14,7 @@
limitations under the License.
******************************************************************************/

using NumSharp;
using NumSharp.Backends.Unmanaged;
using Tensorflow.Numpy;
using System;
using System.Diagnostics.CodeAnalysis;
using System.Linq;
@@ -66,6 +65,45 @@ namespace Tensorflow
#endif
}

internal Tensor(Array array, Shape shape)
=> InitTensor(array, shape);

unsafe void InitTensor(Array array, Shape shape)
{
var dtype = array.GetType().GetElementType().as_dtype();
var length = (ulong)(array.Length * dtype.get_datatype_size());

switch (array)
{
case int[] val:
fixed (void* addr = &val[0])
_handle = TF_NewTensor(shape, dtype, addr, length);
break;
case int[,] val:
fixed (void* addr = &val[0, 0])
_handle = TF_NewTensor(shape, dtype, addr, length);
break;
case long[] val:
fixed (void* addr = &val[0])
_handle = TF_NewTensor(shape, dtype, addr, length);
break;
case float[] val:
fixed (void* addr = &val[0])
_handle = TF_NewTensor(shape, dtype, addr, length);
break;
case float[,] val:
fixed (void* addr = &val[0, 0])
_handle = TF_NewTensor(shape, dtype, addr, length);
break;
case double[] val:
fixed (void* addr = &val[0])
_handle = TF_NewTensor(shape, dtype, addr, length);
break;
default:
throw new NotImplementedException("");
}
}

public Tensor(int value)
{
unsafe
@@ -107,38 +145,6 @@ namespace Tensorflow
AllocationType = TF_TensorData(_handle).ToPointer() == data_ptr ? AllocationType.FromPointer : AllocationType.Tensorflow;
}

#if _REGEN
%types = ["sbyte", "bool", "byte", "short", "ushort", "int", "uint", "long", "ulong", "float", "double", "Complex"]
%foreach types%
/// <summary>
/// Create a 1d Tensor from the given linear array and shape
/// </summary>
public Tensor(#1[] data, TF_DataType? dType = null)
{
_handle = CreateTensorFromArray(dType ?? dtypes.as_dtype(typeof(#1)), new long[] {data.Length}, data, #(#1=="Complex"|"Marshal.SizeOf<Complex>()"|"sizeof(#(str(#1)))"));
}

/// <summary>
/// Create a N-dimensional Tensor from the given array
/// </summary>
public Tensor(#1[] data, long[] shape, TF_DataType? dType = null)
{
_handle = CreateTensorFromArray(dType ?? dtypes.as_dtype(typeof(#1)), shape, data, #(#1=="Complex"|"Marshal.SizeOf<Complex>()"|"sizeof(#(str(#1)))"));
}

/// <summary>
/// Create a scalar Tensor from the given value
/// </summary>
public unsafe Tensor(#1 value, TF_DataType? dType = null)
{
_handle = TF_AllocateTensor(dType ?? dtypes.as_dtype(typeof(#1)), dims: new long[0], num_dims: 0, len: (UIntPtr) sizeof(#1));
*(#1*) TF_TensorData(_handle) = value;
AllocationType = AllocationType.Tensorflow;
}
%
#else

/// <summary>
/// Create a 1d Tensor from the given linear array and shape
/// </summary>
@@ -181,6 +187,11 @@ namespace Tensorflow
_handle = CreateTensorFromArray(dType ?? dtypes.as_dtype(typeof(bool)), shape, data, sizeof(bool));
}

internal Tensor(float[] data, long[] shape, TF_DataType? dType = null)
{
_handle = CreateTensorFromArray(dType ?? dtypes.as_dtype(typeof(bool)), shape, data, sizeof(bool));
}

/// <summary>
/// Create a scalar Tensor from the given value
/// </summary>
@@ -381,14 +392,6 @@ namespace Tensorflow
_handle = CreateTensorFromArray(TF_DataType.TF_FLOAT, new long[] { data.Length }, data, sizeof(float));
}

/// <summary>
/// Create a N-dimensional Tensor from the given array
/// </summary>
public Tensor(float[] data, long[] shape, TF_DataType? dType = null)
{
_handle = CreateTensorFromArray(dType ?? dtypes.as_dtype(typeof(float)), shape, data, sizeof(float));
}

/// <summary>
/// Create a scalar Tensor from the given value
/// </summary>
@@ -450,7 +453,6 @@ namespace Tensorflow
*(Complex*)TF_TensorData(_handle) = value;
AllocationType = AllocationType.Tensorflow;
}
#endif

/// <summary>
/// Create a string Tensor from the given string
@@ -474,7 +476,7 @@ namespace Tensorflow
public unsafe Tensor(NDArray nd, TF_DataType? tensorDType = null)
{
if (tensorDType == null)
tensorDType = nd.dtype.as_dtype();
tensorDType = nd.dtype.as_tf_dtype();

// todo: handle nd of type "String" here too
/*if (tensorDType == TF_DataType.TF_STRING && nd.typecode == NPTypeCode.Byte)
@@ -521,26 +523,25 @@ namespace Tensorflow

private unsafe void CreateTensorFromNDArray(NDArray nd, TF_DataType? given_dtype)
{
if (nd.typecode == NPTypeCode.String)
if (nd.dtype == NumpyDType.String)
throw new NotImplementedException("Support for NDArray of type string not implemented yet");

var arraySlice = nd.Unsafe.Storage.Shape.IsContiguous ? nd.GetData() : nd.CloneData();

_handle = TF_NewTensor(
given_dtype ?? nd.dtype.as_dtype(),
dims: nd.shape.Select(i => (long)i).ToArray(),
throw new NotImplementedException("");
/*_handle = TF_NewTensor(
given_dtype ?? nd.dtype.as_tf_dtype(),
dims: nd.dims.Select(i => (long)i).ToArray(),
num_dims: nd.ndim,
data: arraySlice.Address,
len: (ulong)nd.size * (ulong)nd.dtypesize);
data: nd.Address,
len: nd.size * nd.dtypesize);

// if TF decided not to perform copy, hold reference for given NDArray.
if (TensorDataPointer.ToPointer() == arraySlice.Address)
if (TensorDataPointer == nd.Address)
{
AllocationType = AllocationType.FromPointer;
AllocationHandle = arraySlice;
AllocationHandle = nd;
}
else
AllocationType = AllocationType.Tensorflow;
AllocationType = AllocationType.Tensorflow;*/
}

public Tensor(Operation op, int value_index, TF_DataType dtype)
@@ -591,7 +592,7 @@ namespace Tensorflow
protected IntPtr CreateTensorFromArray(TF_DataType dt, long[] shape, Array data, int start, int count, int element_size)
{
if (start < 0 || start > data.Length - count)
throw new ArgumentException($"Array length {data.Length} does not match the given shape {new Shape(shape.Cast<int>().ToArray())}");
throw new ArgumentException($"Array length {data.Length} does not match the given shape {new Shape(shape.ToArray())}");

// get a handle to the pinned array which we will pass on to the tensor computation engine to use
var gcHandle = GCHandle.Alloc(data, GCHandleType.Pinned);


+ 1
- 1
src/TensorFlowNET.Core/Tensors/Tensor.Implicit.cs View File

@@ -1,4 +1,4 @@
using NumSharp;
using Tensorflow.Numpy;
using System;
using static Tensorflow.Binding;



+ 1
- 1
src/TensorFlowNET.Core/Tensors/Tensor.Index.cs View File

@@ -14,7 +14,7 @@
limitations under the License.
******************************************************************************/

using NumSharp;
using Tensorflow.Numpy;
using System;
using System.Collections.Generic;
using System.Linq;


+ 1
- 1
src/TensorFlowNET.Core/Tensors/Tensor.Operators.cs View File

@@ -14,7 +14,7 @@
limitations under the License.
******************************************************************************/

using NumSharp;
using Tensorflow.Numpy;
using System;
using System.Collections.Generic;
using System.Numerics;


+ 1
- 1
src/TensorFlowNET.Core/Tensors/Tensor.String.cs View File

@@ -62,7 +62,7 @@ namespace Tensorflow
// TF_STRING tensors are encoded with a table of 8-byte offsets followed by TF_StringEncode-encoded bytes.
// [offset1, offset2,...,offsetn, s1size, s1bytes, s2size, s2bytes,...,snsize,snbytes]
//
int size = 1;
long size = 1;
foreach (var s in TensorShape.dims)
size *= s;



+ 11
- 67
src/TensorFlowNET.Core/Tensors/Tensor.Value.cs View File

@@ -1,9 +1,5 @@
using NumSharp;
using NumSharp.Backends;
using NumSharp.Backends.Unmanaged;
using NumSharp.Utilities;
using Tensorflow.Numpy;
using System;
using System.Runtime.InteropServices;
using System.Text;
using static Tensorflow.Binding;

@@ -59,21 +55,9 @@ namespace Tensorflow
{
unsafe
{
#if _REGEN
throw new NotImplementedException("");
#region Compute
switch (dtype.as_numpy_dtype().GetTypeCode())
{
%foreach supported_dtypes,supported_dtypes_lowercase%
case NPTypeCode.#1: return new T[] {Converts.ChangeType<T>(*(#2*) buffer)};
%
case NPTypeCode.String: return new T[] {Converts.ChangeType<T>((string)this)};
default:
throw new NotSupportedException();
}
#endregion
#else
#region Compute
switch (dtype.as_numpy_dtype().GetTypeCode())
/*switch (dtype.as_numpy_dtype().GetTypeCode())
{
case NPTypeCode.Boolean: return new T[] { Converts.ChangeType<T>(*(bool*)buffer) };
case NPTypeCode.Byte: return new T[] { Converts.ChangeType<T>(*(byte*)buffer) };
@@ -89,9 +73,8 @@ namespace Tensorflow
case NPTypeCode.String: return new T[] { Converts.ChangeType<T>((string)this) };
default:
throw new NotSupportedException();
}
}*/
#endregion
#endif
}
}

@@ -102,21 +85,9 @@ namespace Tensorflow
fixed (T* dstRet = ret)
{
T* dst = dstRet; //local stack copy

#if _REGEN
#region Compute
switch (dtype.as_numpy_dtype().GetTypeCode())
{
%foreach supported_dtypes,supported_dtypes_lowercase%
case NPTypeCode.#1: new UnmanagedMemoryBlock<#2>((#2*) buffer, len).CastTo(new UnmanagedMemoryBlock<T>(dst, len), null, null); break;
%
default:
throw new NotSupportedException();
}
#endregion
#else
throw new NotImplementedException("");
#region Compute
switch (dtype.as_numpy_dtype().GetTypeCode())
/*switch (dtype.as_numpy_dtype().GetTypeCode())
{
case NPTypeCode.Boolean: new UnmanagedMemoryBlock<bool>((bool*)buffer, len).CastTo(new UnmanagedMemoryBlock<T>(dst, len), null, null); break;
case NPTypeCode.Byte: new UnmanagedMemoryBlock<byte>((byte*)buffer, len).CastTo(new UnmanagedMemoryBlock<T>(dst, len), null, null); break;
@@ -132,10 +103,8 @@ namespace Tensorflow
case NPTypeCode.String: throw new NotSupportedException("Unable to convert from string to other dtypes"); //TODO! this should call Converts.To<T>
default:
throw new NotSupportedException();
}
}*/
#endregion
#endif

}
}

@@ -153,36 +122,11 @@ namespace Tensorflow
public NDArray numpy()
=> GetNDArray(dtype);

protected unsafe NDArray GetNDArray(TF_DataType dtype)
protected NDArray GetNDArray(TF_DataType dtype)
{
if (dtype == TF_DataType.TF_STRING)
return np.array(StringData());

var count = Convert.ToInt64(size);
IUnmanagedMemoryBlock mem;
switch (dtype)
{
case TF_DataType.TF_BOOL:
mem = new UnmanagedMemoryBlock<bool>((bool*)buffer, count);
break;
case TF_DataType.TF_INT32:
mem = new UnmanagedMemoryBlock<int>((int*)buffer, count);
break;
case TF_DataType.TF_INT64:
mem = new UnmanagedMemoryBlock<long>((long*)buffer, count);
break;
case TF_DataType.TF_FLOAT:
mem = new UnmanagedMemoryBlock<float>((float*)buffer, count);
break;
case TF_DataType.TF_DOUBLE:
mem = new UnmanagedMemoryBlock<double>((double*)buffer, count);
break;
default:
mem = new UnmanagedMemoryBlock<byte>((byte*)buffer, count);
break;
}

return new NDArray(ArraySlice.FromMemoryBlock(mem, copy: true), new Shape(shape));
/*if (dtype == TF_DataType.TF_STRING)
return np.array(StringData());*/
return new NDArray(this);
}

/// <summary>


+ 5
- 4
src/TensorFlowNET.Core/Tensors/Tensor.cs View File

@@ -14,7 +14,7 @@
limitations under the License.
******************************************************************************/

using NumSharp;
using Tensorflow.Numpy;
using System;
using System.Diagnostics.CodeAnalysis;
using System.Globalization;
@@ -149,7 +149,8 @@ namespace Tensorflow
/// </summary>
public virtual void set_shape(TensorShape shape)
{
this.shape = shape.rank >= 0 ? shape.dims : null;
// this.shape = shape.rank >= 0 ? shape.dims : null;
throw new NotImplementedException("");
}

/// <summary>
@@ -286,12 +287,12 @@ namespace Tensorflow

if (dtype == TF_DataType.TF_STRING)
{
int size = 1;
long size = 1;
foreach (var s in TensorShape.dims)
size *= s;
var tstr = TensorDataPointer;
#if TRACK_TENSOR_LIFE
print($"Delete TString 0x{handle.ToString("x16")} {AllocationType} Data: 0x{tstrings.ToString("x16")}");
print($"Delete TString 0x{handle.ToString("x16")} {AllocationType} Data: 0x{tstr.ToString("x16")}");
#endif
for (int i = 0; i < size; i++)
{


+ 35
- 96
src/TensorFlowNET.Core/Tensors/TensorConverter.cs View File

@@ -1,7 +1,7 @@
using NumSharp;
using NumSharp.Utilities;
using Tensorflow.Numpy;
using System;
using System.Threading.Tasks;
using Tensorflow.Util;

namespace Tensorflow
{
@@ -18,7 +18,8 @@ namespace Tensorflow
/// <exception cref="NotSupportedException"></exception>
public static Tensor ToTensor(NDArray nd, TF_DataType? astype = null)
{
return new Tensor(astype == null ? nd : nd.astype(astype.Value.as_numpy_typecode(), false));
// return new Tensor(astype == null ? nd : nd.astype(astype.Value.as_numpy_typecode(), false));
throw new NotImplementedException("");
}

/// <summary>
@@ -27,9 +28,10 @@ namespace Tensorflow
/// <param name="nd">The ndarray to convert.</param>
/// <param name="astype">Convert <see cref="Array"/> to given <paramref name="astype"/> before inserting it into a <see cref="Tensor"/>.</param>
/// <exception cref="NotSupportedException"></exception>
public static Tensor ToTensor(NDArray nd, NPTypeCode? astype = null)
public static Tensor ToTensor(NDArray nd, NumpyDType? astype = null)
{
return new Tensor(astype == null ? nd : nd.astype(astype.Value, false));
// return new Tensor(astype == null ? nd : nd.astype(astype.Value, false));
throw new NotImplementedException("");
}

/// <summary>
@@ -43,7 +45,7 @@ namespace Tensorflow
if (array == null) throw new ArgumentNullException(nameof(array));
var arrtype = array.ResolveElementType();

var astype_type = astype?.as_numpy_dtype() ?? arrtype;
var astype_type = astype?.as_system_dtype() ?? arrtype;
if (astype_type == arrtype)
{
//no conversion required
@@ -54,8 +56,8 @@ namespace Tensorflow

if (astype == TF_DataType.TF_INT8)
{
if (array.Rank != 1 || array.GetType().GetElementType()?.IsArray == true) //is multidim or jagged
array = Arrays.Flatten(array);
// if (array.Rank != 1 || array.GetType().GetElementType()?.IsArray == true) //is multidim or jagged
// array = Arrays.Flatten(array);

return new Tensor((sbyte[])array);
}
@@ -64,41 +66,22 @@ namespace Tensorflow
if (array.Rank != 1 || array.GetType().GetElementType().IsArray)
return new Tensor(new NDArray(array));

#if _REGEN
#region Compute
switch (arrtype)
{
%foreach supported_dtypes,supported_dtypes_lowercase%
case NPTypeCode.#1: return new Tensor((#2[])arr);
%
default:
throw new NotSupportedException();
}
#endregion
#else

#region Compute

switch (arrtype.GetTypeCode())
{
case NPTypeCode.Boolean: return new Tensor((bool[])array);
case NPTypeCode.Byte: return new Tensor((byte[])array);
case NPTypeCode.Int16: return new Tensor((short[])array);
case NPTypeCode.UInt16: return new Tensor((ushort[])array);
case NPTypeCode.Int32: return new Tensor((int[])array);
case NPTypeCode.UInt32: return new Tensor((uint[])array);
case NPTypeCode.Int64: return new Tensor((long[])array);
case NPTypeCode.UInt64: return new Tensor((ulong[])array);
case NPTypeCode.Char: return new Tensor((char[])array);
case NPTypeCode.Double: return new Tensor((double[])array);
case NPTypeCode.Single: return new Tensor((float[])array);
case NumpyDType.Boolean: return new Tensor((bool[])array);
case NumpyDType.Byte: return new Tensor((byte[])array);
case NumpyDType.Int16: return new Tensor((short[])array);
case NumpyDType.UInt16: return new Tensor((ushort[])array);
case NumpyDType.Int32: return new Tensor((int[])array);
case NumpyDType.UInt32: return new Tensor((uint[])array);
case NumpyDType.Int64: return new Tensor((long[])array);
case NumpyDType.UInt64: return new Tensor((ulong[])array);
// case NPTypeCode.Char: return new Tensor((char[])array);
case NumpyDType.Double: return new Tensor((double[])array);
case NumpyDType.Single: return new Tensor((float[])array);
default:
throw new NotSupportedException();
}

#endregion

#endif
}
else
{
@@ -106,7 +89,7 @@ namespace Tensorflow
//by this point astype is not null.

//flatten if required
if (array.Rank != 1 || array.GetType().GetElementType()?.IsArray == true) //is multidim or jagged
/*if (array.Rank != 1 || array.GetType().GetElementType()?.IsArray == true) //is multidim or jagged
array = Arrays.Flatten(array);

try
@@ -122,7 +105,8 @@ namespace Tensorflow
var ret = Array.CreateInstance(astype_type, array.LongLength);
Parallel.For(0, ret.LongLength, i => ret.SetValue(Convert.ChangeType(array.GetValue(i), astype_type), i));
return ToTensor(ret, null);
}
}*/
throw new NotImplementedException("");
}
}

@@ -145,22 +129,7 @@ namespace Tensorflow
if (constantType == TF_DataType.TF_STRING)
return new Tensor((string)(object)constant);

#if _REGEN
#region Compute
switch (InfoOf<T>.NPTypeCode)
{
%foreach supported_dtypes,supported_dtypes_lowercase%
case NPTypeCode.#1: return new Tensor((#2)(object)constant);
%
default:
throw new NotSupportedException();
}
#endregion
#else

#region Compute

switch (InfoOf<T>.NPTypeCode)
/*switch (InfoOf<T>.NPTypeCode)
{
case NPTypeCode.Boolean: return new Tensor((bool)(object)constant);
case NPTypeCode.Byte: return new Tensor((byte)(object)constant);
@@ -170,20 +139,18 @@ namespace Tensorflow
case NPTypeCode.UInt32: return new Tensor((uint)(object)constant);
case NPTypeCode.Int64: return new Tensor((long)(object)constant);
case NPTypeCode.UInt64: return new Tensor((ulong)(object)constant);
case NPTypeCode.Char: return new Tensor(Converts.ToByte(constant));
// case NPTypeCode.Char: return new Tensor(Converts.ToByte(constant));
case NPTypeCode.Double: return new Tensor((double)(object)constant);
case NPTypeCode.Single: return new Tensor((float)(object)constant);
default:
throw new NotSupportedException();
}

#endregion
#endif
}*/
throw new NotImplementedException("");
}

//conversion required

if (astype == TF_DataType.TF_INT8)
/*if (astype == TF_DataType.TF_INT8)
return new Tensor(Converts.ToSByte(constant));

if (astype == TF_DataType.TF_STRING)
@@ -191,20 +158,7 @@ namespace Tensorflow

var astype_np = astype?.as_numpy_typecode();

#if _REGEN
#region Compute
switch (astype_np)
{
%foreach supported_dtypes,supported_dtypes_lowercase%
case NPTypeCode.#1: return new Tensor(Converts.To#1(constant));
%
default:
throw new NotSupportedException();
}
#endregion
#else

#region Compute
switch (astype_np)
{
case NPTypeCode.Boolean: return new Tensor(Converts.ToBoolean(constant));
@@ -220,9 +174,9 @@ namespace Tensorflow
case NPTypeCode.Single: return new Tensor(Converts.ToSingle(constant));
default:
throw new NotSupportedException();
}
#endregion
#endif
}*/
throw new NotImplementedException("");
}

/// <summary>
@@ -233,7 +187,7 @@ namespace Tensorflow
/// <exception cref="NotSupportedException"></exception>
public static Tensor ToTensor(string constant, TF_DataType? astype = null)
{
switch (astype)
/*switch (astype)
{
//was conversion requested?
case null:
@@ -246,20 +200,6 @@ namespace Tensorflow
{
var astype_np = astype?.as_numpy_typecode();

#if _REGEN
#region Compute
switch (astype_np)
{
%foreach supported_dtypes,supported_dtypes_lowercase%
case NPTypeCode.#1: return new Tensor(Converts.To#1(constant));
%
default:
throw new NotSupportedException();
}
#endregion
#else

#region Compute
switch (astype_np)
{
case NPTypeCode.Boolean: return new Tensor(Converts.ToBoolean(constant));
@@ -276,10 +216,9 @@ namespace Tensorflow
default:
throw new NotSupportedException();
}
#endregion
#endif
}
}
}*/
throw new NotImplementedException("");
}

}

+ 23
- 20
src/TensorFlowNET.Core/Tensors/TensorShape.Convert.cs View File

@@ -1,43 +1,46 @@
using NumSharp;
using Tensorflow.Numpy;

namespace Tensorflow
{
public partial class TensorShape
{
public void Deconstruct(out int h, out int w)
public void Deconstruct(out long h, out long w)
{
h = dims[0];
w = dims[1];
}

public static implicit operator TensorShape(Shape shape) => new TensorShape((int[])shape.Dimensions.Clone());
public static implicit operator Shape(TensorShape shape) => new Shape((int[])shape.dims.Clone());
public static implicit operator TensorShape(Shape shape) => new TensorShape((long[])shape.dims.Clone());
public static implicit operator Shape(TensorShape shape) => new Shape((long[])shape.dims.Clone());

public static implicit operator int[](TensorShape shape) => shape == null ? null : (int[])shape.dims.Clone(); //we clone to avoid any changes
public static implicit operator TensorShape(int[] dims) => dims == null ? null : new TensorShape(dims);

public static explicit operator int(TensorShape shape) => shape.size;
public static implicit operator TensorShape(int dim) => new TensorShape(dim);
public static implicit operator long[](TensorShape shape) => shape == null ? null : (long[])shape.dims.Clone(); //we clone to avoid any changes
public static implicit operator TensorShape(long[] dims) => dims == null ? null : new TensorShape(dims);

public static explicit operator (int, int)(TensorShape shape) => shape.dims.Length == 2 ? (shape.dims[0], shape.dims[1]) : (0, 0);
public static implicit operator TensorShape((int, int) dims) => new TensorShape(dims.Item1, dims.Item2);
public static explicit operator long(TensorShape shape) => shape.size;
public static implicit operator TensorShape(long dim) => new TensorShape(dim);

public static explicit operator (int, int, int)(TensorShape shape) => shape.dims.Length == 3 ? (shape.dims[0], shape.dims[1], shape.dims[2]) : (0, 0, 0);
public static implicit operator TensorShape((int, int, int) dims) => new TensorShape(dims.Item1, dims.Item2, dims.Item3);
public static explicit operator (long, long)(TensorShape shape) => shape.dims.Length == 2 ? (shape.dims[0], shape.dims[1]) : (0, 0);
public static implicit operator TensorShape((long, long) dims) => new TensorShape(dims.Item1, dims.Item2);

public static explicit operator (int, int, int, int)(TensorShape shape) => shape.dims.Length == 4 ? (shape.dims[0], shape.dims[1], shape.dims[2], shape.dims[3]) : (0, 0, 0, 0);
public static implicit operator TensorShape((int, int, int, int) dims) => new TensorShape(dims.Item1, dims.Item2, dims.Item3, dims.Item4);
public static explicit operator (long, long, long)(TensorShape shape) => shape.dims.Length == 3 ? (shape.dims[0], shape.dims[1], shape.dims[2]) : (0, 0, 0);
public static implicit operator TensorShape((long, long, long) dims) => new TensorShape(dims.Item1, dims.Item2, dims.Item3);

public static explicit operator (int, int, int, int, int)(TensorShape shape) => shape.dims.Length == 5 ? (shape.dims[0], shape.dims[1], shape.dims[2], shape.dims[3], shape.dims[4]) : (0, 0, 0, 0, 0);
public static implicit operator TensorShape((int, int, int, int, int) dims) => new TensorShape(dims.Item1, dims.Item2, dims.Item3, dims.Item4, dims.Item5);
public static explicit operator (long, long, long, long)(TensorShape shape) => shape.dims.Length == 4 ? (shape.dims[0], shape.dims[1], shape.dims[2], shape.dims[3]) : (0, 0, 0, 0);
public static implicit operator TensorShape((long, long, long, long) dims) => new TensorShape(dims.Item1, dims.Item2, dims.Item3, dims.Item4);

public static explicit operator (int, int, int, int, int, int)(TensorShape shape) => shape.dims.Length == 6 ? (shape.dims[0], shape.dims[1], shape.dims[2], shape.dims[3], shape.dims[4], shape.dims[5]) : (0, 0, 0, 0, 0, 0);
public static implicit operator TensorShape((int, int, int, int, int, int) dims) => new TensorShape(dims.Item1, dims.Item2, dims.Item3, dims.Item4, dims.Item5, dims.Item6);
public static explicit operator (long, long, long, long, long)(TensorShape shape) => shape.dims.Length == 5 ? (shape.dims[0], shape.dims[1], shape.dims[2], shape.dims[3], shape.dims[4]) : (0, 0, 0, 0, 0);
public static implicit operator TensorShape((long, long, long, long, long) dims) => new TensorShape(dims.Item1, dims.Item2, dims.Item3, dims.Item4, dims.Item5);

public static explicit operator (int, int, int, int, int, int, int)(TensorShape shape) => shape.dims.Length == 7 ? (shape.dims[0], shape.dims[1], shape.dims[2], shape.dims[3], shape.dims[4], shape.dims[5], shape.dims[6]) : (0, 0, 0, 0, 0, 0, 0);
public static implicit operator TensorShape((int, int, int, int, int, int, int) dims) => new TensorShape(dims.Item1, dims.Item2, dims.Item3, dims.Item4, dims.Item5, dims.Item6, dims.Item7);
public static explicit operator (long, long, long, long, long, long)(TensorShape shape) => shape.dims.Length == 6 ? (shape.dims[0], shape.dims[1], shape.dims[2], shape.dims[3], shape.dims[4], shape.dims[5]) : (0, 0, 0, 0, 0, 0);
public static implicit operator TensorShape((long, long, long, long, long, long) dims) => new TensorShape(dims.Item1, dims.Item2, dims.Item3, dims.Item4, dims.Item5, dims.Item6);

public static explicit operator (int, int, int, int, int, int, int, int)(TensorShape shape) => shape.dims.Length == 8 ? (shape.dims[0], shape.dims[1], shape.dims[2], shape.dims[3], shape.dims[4], shape.dims[5], shape.dims[6], shape.dims[7]) : (0, 0, 0, 0, 0, 0, 0, 0);
public static implicit operator TensorShape((int, int, int, int, int, int, int, int) dims) => new TensorShape(dims.Item1, dims.Item2, dims.Item3, dims.Item4, dims.Item5, dims.Item6, dims.Item7, dims.Item8);
public static explicit operator (long, long, long, long, long, long, long)(TensorShape shape) => shape.dims.Length == 7 ? (shape.dims[0], shape.dims[1], shape.dims[2], shape.dims[3], shape.dims[4], shape.dims[5], shape.dims[6]) : (0, 0, 0, 0, 0, 0, 0);
public static implicit operator TensorShape((long, long, long, long, long, long, long) dims) => new TensorShape(dims.Item1, dims.Item2, dims.Item3, dims.Item4, dims.Item5, dims.Item6, dims.Item7);

public static explicit operator (long, long, long, long, long, long, long, long)(TensorShape shape) => shape.dims.Length == 8 ? (shape.dims[0], shape.dims[1], shape.dims[2], shape.dims[3], shape.dims[4], shape.dims[5], shape.dims[6], shape.dims[7]) : (0, 0, 0, 0, 0, 0, 0, 0);
public static implicit operator TensorShape((long, long, long, long, long, long, long, long) dims) => new TensorShape(dims.Item1, dims.Item2, dims.Item3, dims.Item4, dims.Item5, dims.Item6, dims.Item7, dims.Item8);
}
}

+ 1
- 1
src/TensorFlowNET.Core/Tensors/TensorShape.Equals.cs View File

@@ -15,7 +15,7 @@ namespace Tensorflow
else if (rank != shape1.rank)
return false;
return Enumerable.SequenceEqual(shape1.dims, dims);
case int[] shape2:
case long[] shape2:
if (rank != shape2.Length)
return false;
return Enumerable.SequenceEqual(dims, shape2);


+ 44
- 33
src/TensorFlowNET.Core/Tensors/TensorShape.cs View File

@@ -1,4 +1,4 @@
using NumSharp;
using Tensorflow.Numpy;
using System;
using System.Collections.Generic;
using System.Diagnostics.CodeAnalysis;
@@ -19,7 +19,7 @@ namespace Tensorflow
/// <summary>
/// Returns a list of Dimensions, or None if the shape is unspecified.
/// </summary>
public int[] dims => shape.Dimensions;
public long[] dims => shape.dims;

/// <summary>
/// Returns the rank of this shape.
@@ -30,17 +30,17 @@ namespace Tensorflow
/// <summary>
/// Returns the rank of this shape.
/// </summary>
public int rank => _rank > -1 ? shape.NDim : -1;
public int rank => _rank > -1 ? shape.ndim : -1;

/// <summary>
/// Returns the size this shape represents.
/// </summary>
public int size
public long size
{
get
{
var dims = shape.Dimensions;
var computed = 1;
var dims = shape.dims;
var computed = 1L;
for (int i = 0; i < dims.Length; i++)
{
var val = dims[i];
@@ -60,25 +60,23 @@ namespace Tensorflow
}

public static TensorShape Scalar
=> new TensorShape(new int[0]);
=> new TensorShape(new long[0]);

public TensorShape(TensorShapeProto proto)
{
if (proto.UnknownRank) return;
switch (proto.Dim.Count)
{
case 0: shape = new Shape(new int[0]); break;
case 1: shape = Shape.Vector((int)proto.Dim[0].Size); break;
case 2: shape = Shape.Matrix((int)proto.Dim[0].Size, (int)proto.Dim[1].Size); break;
case 0: shape = new Shape(new long[0]);
break;
default:
var protodims = proto.Dim;
var len = protodims.Count;
var dims = new int[len];
var dims = new long[len];
for (int i = 0; i < len; i++)
dims[i] = (int)protodims[i].Size;


shape = new Shape(dims); break;
dims[i] = protodims[i].Size;
shape = new Shape(dims);
break;
}
}

@@ -86,23 +84,36 @@ namespace Tensorflow
{
switch (dims.Length)
{
case 0: shape = new Shape(new int[0]); break;
case 1: shape = Shape.Vector(dims[0]); break;
case 2: shape = Shape.Matrix(dims[0], dims[1]); break;
default: shape = new Shape(dims); break;
case 0:
shape = new Shape(new long[0]);
break;
default:
shape = new Shape(dims.Select(x => Convert.ToInt64(x)).ToArray());
break;
}
}

public TensorShape(params long[] dims)
{
switch (dims.Length)
{
case 0: shape = new Shape(new long[0]);
break;
default: shape = new Shape(dims);
break;
}
}

public TensorShape(int[][] dims)
public TensorShape(long[][] dims)
{
if (dims.Length == 1)
{
switch (dims[0].Length)
{
case 0: shape = new Shape(new int[0]); break;
case 1: shape = Shape.Vector((int)dims[0][0]); break;
case 2: shape = Shape.Matrix(dims[0][0], dims[1][2]); break;
default: shape = new Shape(dims[0]); break;
case 0: shape = new Shape(new long[0]);
break;
default: shape = new Shape(dims[0]);
break;
}
}
else
@@ -134,7 +145,7 @@ namespace Tensorflow
}
}

public int this[int index] => index < 0 ? dims[ndim + index] : dims[index];
public long this[int index] => index < 0 ? dims[ndim + index] : dims[index];

/// <summary>
/// Returns True iff `self` is fully defined in every dimension.
@@ -186,7 +197,7 @@ namespace Tensorflow
if (rank == -1)
return new TensorShape(-1);
else
return new TensorShape(Enumerable.Repeat(-1, rank).ToArray());
return new TensorShape(Enumerable.Repeat(-1L, rank).ToArray());
}

/// <summary>
@@ -195,7 +206,7 @@ namespace Tensorflow
/// <param name="other"></param>
/// <returns></returns>
[MethodImpl(MethodImplOptions.AggressiveInlining)]
public TensorShape concatenate(int[] other)
public TensorShape concatenate(long[] other)
{
return concatenate(new TensorShape(other));
}
@@ -213,7 +224,7 @@ namespace Tensorflow
return new TensorShape();
else
{
var concatenate_dims = new int[ndim + otherShape.ndim];
var concatenate_dims = new long[ndim + otherShape.ndim];
for (int i = 0; i < ndim; i++)
concatenate_dims[i] = dims[i];

@@ -234,7 +245,7 @@ namespace Tensorflow
if (dims == null)
return other;

var new_dims = new List<int>();
var new_dims = new List<long>();

foreach (var i in range(ndim))
{
@@ -249,11 +260,11 @@ namespace Tensorflow
/// <summary>
/// Returns a cloned array from <see cref="dims"/>.
/// </summary>
public int[] as_list()
public long[] as_list()
{
if (shape.IsEmpty)
throw new ValueError("as_list() is not defined on an unknown TensorShape.");
return (int[])dims.Clone();
return (long[])dims.Clone();
}

public long[] as_list_long()
@@ -263,11 +274,11 @@ namespace Tensorflow
return dims.Select(x => Convert.ToInt64(x)).ToArray();
}

public int num_elements()
public long num_elements()
{
if (is_fully_defined())
{
var size = 1;
var size = 1L;
foreach (var dim in dims)
size *= dim;
return size;


+ 1
- 1
src/TensorFlowNET.Core/Tensors/Tensors.cs View File

@@ -1,4 +1,4 @@
using NumSharp;
using Tensorflow.Numpy;
using System;
using System.Collections;
using System.Collections.Generic;


+ 10
- 0
src/TensorFlowNET.Core/Tensors/c_api.tensor.cs View File

@@ -17,6 +17,7 @@
using System;
using System.Runtime.CompilerServices;
using System.Runtime.InteropServices;
using Tensorflow.Numpy;

namespace Tensorflow
{
@@ -102,6 +103,15 @@ namespace Tensorflow
{
return c_api.TF_NewTensor(dataType, dims, num_dims, data, len, EmptyDeallocator, DeallocatorArgs.Empty);
}

public static unsafe IntPtr TF_NewTensor(Shape shape, TF_DataType dtype, void* data, ulong length)
{
var handle = TF_AllocateTensor(dtype, shape.dims, shape.ndim, length);
var tensor = TF_TensorData(handle);
System.Buffer.MemoryCopy(data, tensor.ToPointer(), length, length);
return handle;
}

/// <summary>
/// Return a new tensor that holds the bytes data[0,len-1]
/// </summary>


+ 13
- 11
src/TensorFlowNET.Core/Tensors/constant_op.cs View File

@@ -14,7 +14,7 @@
limitations under the License.
******************************************************************************/

using NumSharp;
using Tensorflow.Numpy;
using System;
using System.Collections.Generic;
using System.Linq;
@@ -56,7 +56,7 @@ namespace Tensorflow
if (shape == null)
return t;

if (t.shape.SequenceEqual(shape.dims))
if (t.shape.Select(x => Convert.ToInt64(x)).SequenceEqual(shape.dims))
return t;

if (verify_shape)
@@ -146,9 +146,9 @@ namespace Tensorflow
}
else if (dtype != TF_DataType.DtInvalid &&
value is NDArray nd &&
dtypes.as_dtype(nd.dtype) != dtype)
nd.dtype.as_tf_dtype() != dtype)
{
value = nd.astype(dtype.as_numpy_dtype());
value = nd.astype(dtype.as_system_dtype());
}

if (dtype == TF_DataType.TF_STRING && value is byte[] bytes)
@@ -160,6 +160,8 @@ namespace Tensorflow
return val;
case NDArray val:
return new EagerTensor(val, ctx.DeviceName);
case Shape val:
return new EagerTensor(val.dims, new Shape(val.ndim));
case TensorShape val:
return new EagerTensor(val.dims, ctx.DeviceName);
case string val:
@@ -177,23 +179,23 @@ namespace Tensorflow
case byte[,,] val:
return new EagerTensor(val, ctx.DeviceName);
case int val:
return new EagerTensor(val, ctx.DeviceName);
return new EagerTensor(new[] { val }, Shape.Scalar);
case int[] val:
return new EagerTensor(val, ctx.DeviceName);
return new EagerTensor(val, new Shape(val.Length));
case int[,] val:
return new EagerTensor(val, ctx.DeviceName);
return new EagerTensor(val, new Shape(val.GetLength(0), val.GetLength(1)));
case int[,,] val:
return new EagerTensor(val, ctx.DeviceName);
case long val:
return new EagerTensor(val, ctx.DeviceName);
case long[] val:
return new EagerTensor(val, ctx.DeviceName);
return new EagerTensor(val, new Shape(val.Length));
case long[,] val:
return new EagerTensor(val, ctx.DeviceName);
case long[,,] val:
return new EagerTensor(val, ctx.DeviceName);
case float val:
return new EagerTensor(val, ctx.DeviceName);
return new EagerTensor(new[] { val }, Shape.Scalar);
case float[] val:
return new EagerTensor(val, ctx.DeviceName);
case float[,] val:
@@ -201,7 +203,7 @@ namespace Tensorflow
case float[,,] val:
return new EagerTensor(val, ctx.DeviceName);
case double val:
return new EagerTensor(val, ctx.DeviceName);
return new EagerTensor(new[] { val }, Shape.Scalar);
case double[] val:
return new EagerTensor(val, ctx.DeviceName);
case double[,] val:
@@ -227,7 +229,7 @@ namespace Tensorflow
bool as_ref = false)
{
var s_list = s.dims;
var int64_value = 0;
var int64_value = 0L;
foreach (var dim in s_list)
{
if (dim > Math.Pow(2, 31))


+ 41
- 17
src/TensorFlowNET.Core/Tensors/dtypes.cs View File

@@ -14,7 +14,7 @@
limitations under the License.
******************************************************************************/

using NumSharp;
using Tensorflow.Numpy;
using System;
using System.Numerics;

@@ -43,7 +43,7 @@ namespace Tensorflow
/// </summary>
/// <param name="type"></param>
/// <returns><see cref="System.Type"/> equivalent to <paramref name="type"/>, if none exists, returns null.</returns>
public static Type as_numpy_dtype(this TF_DataType type)
public static Type as_system_dtype(this TF_DataType type)
{
switch (type.as_base_dtype())
{
@@ -84,36 +84,36 @@ namespace Tensorflow
/// </summary>
/// <param name="type"></param>
/// <returns></returns>
/// <exception cref="ArgumentException">When <paramref name="type"/> has no equivalent <see cref="NPTypeCode"/></exception>
public static NPTypeCode as_numpy_typecode(this TF_DataType type)
/// <exception cref="ArgumentException">When <paramref name="type"/> has no equivalent <see cref="NumpyDType"/></exception>
public static NumpyDType as_numpy_typecode(this TF_DataType type)
{
switch (type)
{
case TF_DataType.TF_BOOL:
return NPTypeCode.Boolean;
return NumpyDType.Boolean;
case TF_DataType.TF_UINT8:
return NPTypeCode.Byte;
return NumpyDType.Byte;
case TF_DataType.TF_INT64:
return NPTypeCode.Int64;
return NumpyDType.Int64;
case TF_DataType.TF_INT32:
return NPTypeCode.Int32;
return NumpyDType.Int32;
case TF_DataType.TF_INT16:
return NPTypeCode.Int16;
return NumpyDType.Int16;
case TF_DataType.TF_UINT64:
return NPTypeCode.UInt64;
return NumpyDType.UInt64;
case TF_DataType.TF_UINT32:
return NPTypeCode.UInt32;
return NumpyDType.UInt32;
case TF_DataType.TF_UINT16:
return NPTypeCode.UInt16;
return NumpyDType.UInt16;
case TF_DataType.TF_FLOAT:
return NPTypeCode.Single;
return NumpyDType.Single;
case TF_DataType.TF_DOUBLE:
return NPTypeCode.Double;
return NumpyDType.Double;
case TF_DataType.TF_STRING:
return NPTypeCode.String;
return NumpyDType.String;
case TF_DataType.TF_COMPLEX128:
case TF_DataType.TF_COMPLEX64: //64 is also TF_COMPLEX
return NPTypeCode.Complex;
return NumpyDType.Complex;
default:
throw new NotSupportedException($"Unable to convert {type} to a NumSharp typecode.");
}
@@ -202,15 +202,29 @@ namespace Tensorflow
TF_DataType.TF_INT32 => "int32",
TF_DataType.TF_INT64 => "int64",
TF_DataType.TF_FLOAT => "float32",
TF_DataType.TF_DOUBLE => "float64",
TF_DataType.TF_BOOL => "bool",
TF_DataType.TF_RESOURCE => "resource",
TF_DataType.TF_VARIANT => "variant",
_ => type.ToString()
};

public static int get_datatype_size(this TF_DataType type)
=> type.as_base_dtype() switch
{
TF_DataType.TF_BOOL => sizeof(bool),
TF_DataType.TF_UINT8 => sizeof(byte),
TF_DataType.TF_INT16 => sizeof(short),
TF_DataType.TF_INT32 => sizeof(int),
TF_DataType.TF_INT64 => sizeof(long),
TF_DataType.TF_FLOAT => sizeof(float),
TF_DataType.TF_DOUBLE => sizeof(double),
_ => -1
};

public static Type as_numpy_dtype(this DataType type)
{
return type.as_tf_dtype().as_numpy_dtype();
return type.as_tf_dtype().as_system_dtype();
}

public static DataType as_base_dtype(this DataType type)
@@ -300,5 +314,15 @@ namespace Tensorflow
|| type == TF_DataType.TF_UINT32
|| type == TF_DataType.TF_UINT64;
}

public static TF_DataType as_tf_dtype(this NumpyDType type)
=> type switch
{
NumpyDType.Int32 => TF_DataType.TF_INT32,
NumpyDType.Int64 => TF_DataType.TF_INT64,
NumpyDType.Float => TF_DataType.TF_FLOAT,
NumpyDType.Double => TF_DataType.TF_DOUBLE,
_ => TF_DataType.TF_UINT8
};
}
}

+ 51
- 21
src/TensorFlowNET.Core/Tensors/tensor_util.cs View File

@@ -14,7 +14,7 @@
limitations under the License.
******************************************************************************/

using NumSharp;
using Tensorflow.Numpy;
using System;
using System.Collections.Generic;
using System.Linq;
@@ -117,7 +117,7 @@ namespace Tensorflow

// We first convert value to a numpy array or scalar.
NDArray nparray = null;
var np_dt = dtype.as_numpy_dtype();
var np_dt = dtype.as_system_dtype();

if (values is NDArray nd)
{
@@ -145,7 +145,7 @@ namespace Tensorflow
nparray = nparray.astype(np_dt);
}

var numpy_dtype = nparray.dtype.as_dtype(dtype: dtype);
var numpy_dtype = nparray.dtype.as_tf_dtype();
if (numpy_dtype == TF_DataType.DtInvalid)
throw new TypeError($"Unrecognized data type: {nparray.dtype}");

@@ -155,7 +155,7 @@ namespace Tensorflow
numpy_dtype = dtype;

bool is_same_size = false;
int shape_size = 0;
ulong shape_size = 0;

// If shape is not given, get the shape from the numpy array.
if (shape == null)
@@ -173,14 +173,14 @@ namespace Tensorflow
}
else
{
shape = nparray.shape;
shape = nparray.dims.Select(x => Convert.ToInt32(x)).ToArray();
is_same_size = true;
shape_size = nparray.size;
}
}
else
{
shape_size = new TensorShape(shape).size;
shape_size = (ulong)new Shape(shape.Select(x => Convert.ToInt64(x)).ToArray()).size;
is_same_size = shape_size == nparray.size;
}

@@ -214,7 +214,7 @@ namespace Tensorflow

var proto_values = nparray.ravel();

switch (nparray.dtype.Name)
switch (nparray.dtype.ToString())
{
case "Bool":
case "Boolean":
@@ -286,18 +286,18 @@ scalar with value '-1' to describe an unknown shape.", value_));
return pre_cast;
var cast_dtype = dtypes.as_dtype((Type)tensor.op.get_attr("DstT"));
if (!Array.Exists(new[] { dtypes.int32, dtypes.int64 }, cast_dtype_ => cast_dtype_ == cast_dtype))
return tensor.TensorShape.unknown_shape(shape.dims[0]);
return tensor.TensorShape.unknown_shape((int)shape.dims[0]);

int[] x_ = { };
long[] x_ = { };
foreach (var x in pre_cast.as_list())
if (x != -1)
x_[x_.Length] = x;
else
x_[x_.Length] = -1;
var dest_dtype_shape_array = np.array(x_).astype(cast_dtype.as_numpy_dtype());
var dest_dtype_shape_array = np.array(x_).astype(cast_dtype.as_system_dtype());

int[] y_ = { };
foreach (int y in dest_dtype_shape_array)
long[] y_ = { };
foreach (int y in dest_dtype_shape_array.Data<int>())
if (y >= 0)
y_[y_.Length] = y;
else
@@ -331,7 +331,7 @@ would not be rank 1.", tensor.op.get_attr("axis")));
{
new_dim = new Dimension(pack_input_val);
}
ret_ = ret_.concatenate(new int[] { new_dim });
ret_ = ret_.concatenate(new long[] { new_dim });
}
return ret_;
}
@@ -422,7 +422,7 @@ would not be rank 1.", tensor.op.get_attr("axis")));
}
}

var ret = tensor.TensorShape.unknown_shape(shape.dims[0]);
var ret = tensor.TensorShape.unknown_shape((int)shape.dims[0]);
var value = constant_value(tensor);
if (!(value is null))
{
@@ -543,7 +543,7 @@ would not be rank 1.", tensor.op.get_attr("axis")));

public static TensorShape as_shape(this Shape shape)
{
return new TensorShape(shape.Dimensions);
return new TensorShape(shape.dims);
}

public static TensorShape reshape(this Shape shape, int[] dims)
@@ -575,6 +575,7 @@ would not be rank 1.", tensor.op.get_attr("axis")));
public static string to_numpy_string(Tensor tensor)
{
var dtype = tensor.dtype;
var shape = tensor.shape;

if (dtype == TF_DataType.TF_STRING)
{
@@ -593,14 +594,43 @@ would not be rank 1.", tensor.op.get_attr("axis")));
{
return "<unprintable>";
}
else if (dtype == TF_DataType.TF_INT32)
{
var array = tensor.ToArray<int>();
return DisplayArrayAsString(array);
}
else if (dtype == TF_DataType.TF_INT64)
{
var array = tensor.ToArray<long>();
return DisplayArrayAsString(array);
}
else if (dtype == TF_DataType.TF_FLOAT)
{
var array = tensor.ToArray<float>();
return DisplayArrayAsString(array);
}
else if(dtype == TF_DataType.TF_DOUBLE)
{
var array = tensor.ToArray<double>();
return DisplayArrayAsString(array);
}
else
{
var array = tensor.ToArray<byte>();
return DisplayArrayAsString(array);
}
}

var nd = tensor.numpy();

if (nd.size == 0)
return "[]";

return nd.ToString();
static string DisplayArrayAsString<T>(T[] array)
{
var display = "[";
if (array.Length < 10)
display += string.Join(", ", array);
else
display += string.Join(", ", array.Take(3)) + " ... " + string.Join(", ", array.Skip(array.Length - 3));
return display + "]";
}

public static ParsedSliceArgs ParseSlices(Slice[] slices)
{


+ 1
- 1
src/TensorFlowNET.Core/Training/Saving/Saver.cs View File

@@ -14,7 +14,7 @@
limitations under the License.
******************************************************************************/

using NumSharp;
using Tensorflow.Numpy;
using System;
using System.Collections.Generic;
using System.IO;


+ 22
- 0
src/TensorFlowNET.Core/Util/Arrays.cs View File

@@ -0,0 +1,22 @@
using System;
using System.Collections.Generic;
using System.Text;

namespace Tensorflow.Util
{
public static class Arrays
{
public static Type ResolveElementType(this Array arr)
{
if (arr == null)
throw new ArgumentNullException(nameof(arr));

var t = arr.GetType().GetElementType();
// ReSharper disable once PossibleNullReferenceException
while (t.IsArray)
t = t.GetElementType();

return t;
}
}
}

+ 11
- 0
src/TensorFlowNET.Core/Util/Converts.cs View File

@@ -0,0 +1,11 @@
using System;
using System.Collections.Generic;
using System.Text;

namespace Tensorflow.Util
{
public class Converts
{

}
}

+ 1
- 38
src/TensorFlowNET.Core/Util/UnmanagedExtensions.cs View File

@@ -1,5 +1,4 @@
using NumSharp.Backends.Unmanaged;
using System;
using System;
using System.IO;
using System.Runtime.CompilerServices;
using System.Runtime.InteropServices;
@@ -11,42 +10,6 @@ namespace Tensorflow.Util
//internally UnmanagedMemoryStream can't construct with null address.
private static readonly unsafe byte* _empty = (byte*)Marshal.AllocHGlobal(1);

/// <summary>
/// Creates a memory stream based on given <paramref name="block"/>.
/// </summary>
/// <param name="block">The block to stream. Can be default/null.</param>
/// <remarks>There is no need to dispose the returned <see cref="UnmanagedMemoryStream"/></remarks>
[MethodImpl(MethodImplOptions.AggressiveInlining)]
public static UnmanagedMemoryStream Stream(this UnmanagedMemoryBlock<byte> block)
{
unsafe
{
if (block.Address == null)
return new UnmanagedMemoryStream(_empty, 0);
return new UnmanagedMemoryStream(block.Address, block.BytesCount);
}
}

/// <summary>
/// Creates a memory stream based on given <paramref name="block"/>.
/// </summary>
/// <param name="block">The block to stream. Can be default/null.</param>
/// <param name="offset">Offset from the start of the block.</param>
/// <remarks>There is no need to dispose the returned <see cref="UnmanagedMemoryStream"/></remarks>
[MethodImpl(MethodImplOptions.AggressiveInlining)]
public static UnmanagedMemoryStream Stream(this UnmanagedMemoryBlock<byte> block, long offset)
{
if (block.BytesCount - offset <= 0)
throw new ArgumentOutOfRangeException(nameof(offset));

unsafe
{
if (block.Address == null)
return new UnmanagedMemoryStream(_empty, 0);
return new UnmanagedMemoryStream(block.Address + offset, block.BytesCount - offset);
}
}

/// <summary>
/// Creates a memory stream based on given <paramref name="address"/>.
/// </summary>


+ 1
- 1
src/TensorFlowNET.Core/Util/nest.py.cs View File

@@ -14,7 +14,7 @@
limitations under the License.
******************************************************************************/

using NumSharp;
using Tensorflow.Numpy;
using System;
using System.Collections;
using System.Collections.Generic;


+ 1
- 1
src/TensorFlowNET.Core/Variables/BaseResourceVariable.cs View File

@@ -1,4 +1,4 @@
using NumSharp;
using Tensorflow.Numpy;
using System;
using Tensorflow.Eager;
using Tensorflow.Variables;


+ 1
- 1
src/TensorFlowNET.Core/Variables/IVariableV1.cs View File

@@ -14,7 +14,7 @@
limitations under the License.
******************************************************************************/

using NumSharp;
using Tensorflow.Numpy;

namespace Tensorflow
{


+ 1
- 1
src/TensorFlowNET.Core/Variables/RefVariable.cs View File

@@ -15,7 +15,7 @@
******************************************************************************/

using Google.Protobuf;
using NumSharp;
using Tensorflow.Numpy;
using System;
using System.Collections.Generic;
using System.Linq;


+ 1
- 1
src/TensorFlowNET.Core/Variables/ResourceVariable.Index.cs View File

@@ -14,7 +14,7 @@
limitations under the License.
******************************************************************************/

using NumSharp;
using Tensorflow.Numpy;
using System;
using System.Collections.Generic;
using System.Text;


+ 1
- 1
src/TensorFlowNET.Core/Variables/ResourceVariable.Operators.cs View File

@@ -14,7 +14,7 @@
limitations under the License.
******************************************************************************/

using NumSharp;
using Tensorflow.Numpy;

namespace Tensorflow
{


+ 1
- 1
src/TensorFlowNET.Core/ops.cs View File

@@ -16,7 +16,7 @@

using Google.Protobuf;
using Google.Protobuf.Collections;
using NumSharp;
using Tensorflow.Numpy;
using System;
using System.Collections.Generic;
using System.Linq;


+ 1
- 1
src/TensorFlowNET.Keras/BackendImpl.cs View File

@@ -14,7 +14,7 @@
limitations under the License.
******************************************************************************/

using NumSharp;
using Tensorflow.Numpy;
using System;
using System.Linq;
using System.Collections.Generic;


+ 1
- 1
src/TensorFlowNET.Keras/Datasets/Cifar10.cs View File

@@ -1,4 +1,4 @@
using NumSharp;
using Tensorflow.Numpy;
using System;
using System.Collections.Generic;
using System.IO;


+ 1
- 1
src/TensorFlowNET.Keras/Datasets/DatasetPass.cs View File

@@ -1,4 +1,4 @@
using NumSharp;
using Tensorflow.Numpy;

namespace Tensorflow.Keras.Datasets
{


+ 3
- 3
src/TensorFlowNET.Keras/Datasets/Imdb.cs View File

@@ -3,7 +3,7 @@ using System.Collections.Generic;
using System.IO;
using System.Text;
using Tensorflow.Keras.Utils;
using NumSharp;
using Tensorflow.Numpy;
using System.Linq;

namespace Tensorflow.Keras.Datasets
@@ -44,7 +44,7 @@ namespace Tensorflow.Keras.Datasets

var lines = File.ReadAllLines(Path.Combine(dst, "imdb_train.txt"));
var x_train_string = new string[lines.Length];
var y_train = np.zeros(new int[] { lines.Length }, NPTypeCode.Int64);
var y_train = np.zeros(new int[] { lines.Length }, NumpyDType.Int64);
for (int i = 0; i < lines.Length; i++)
{
y_train[i] = long.Parse(lines[i].Substring(0, 1));
@@ -55,7 +55,7 @@ namespace Tensorflow.Keras.Datasets

File.ReadAllLines(Path.Combine(dst, "imdb_test.txt"));
var x_test_string = new string[lines.Length];
var y_test = np.zeros(new int[] { lines.Length }, NPTypeCode.Int64);
var y_test = np.zeros(new int[] { lines.Length }, NumpyDType.Int64);
for (int i = 0; i < lines.Length; i++)
{
y_test[i] = long.Parse(lines[i].Substring(0, 1));


+ 1
- 1
src/TensorFlowNET.Keras/Datasets/MNIST.cs View File

@@ -14,7 +14,7 @@
limitations under the License.
******************************************************************************/

using NumSharp;
using Tensorflow.Numpy;
using System;
using System.IO;
using Tensorflow.Keras.Utils;


+ 1
- 1
src/TensorFlowNET.Keras/Engine/Layer.cs View File

@@ -108,7 +108,7 @@ namespace Tensorflow.Keras.Engine
// Manage input shape information if passed.
if (args.BatchInputShape == null && args.InputShape != null)
{
args.BatchInputShape = new int[] { args.BatchSize }.Concat(args.InputShape.dims).ToArray();
args.BatchInputShape = new long[] { args.BatchSize }.Concat(args.InputShape.dims).ToArray();
}
}



+ 1
- 1
src/TensorFlowNET.Keras/Engine/Model.Evaluate.cs View File

@@ -1,4 +1,4 @@
using NumSharp;
using Tensorflow.Numpy;
using System;
using System.Collections.Generic;
using System.Linq;


+ 2
- 2
src/TensorFlowNET.Keras/Engine/Model.Fit.cs View File

@@ -1,4 +1,4 @@
using NumSharp;
using Tensorflow.Numpy;
using System;
using System.Collections.Generic;
using System.Linq;
@@ -30,7 +30,7 @@ namespace Tensorflow.Keras.Engine
int workers = 1,
bool use_multiprocessing = false)
{
int train_count = Convert.ToInt32(x.shape[0] * (1 - validation_split));
int train_count = Convert.ToInt32(x.dims[0] * (1 - validation_split));
var train_x = x[new Slice(0, train_count)];
var train_y = y[new Slice(0, train_count)];
var val_x = x[new Slice(train_count)];


+ 1
- 1
src/TensorFlowNET.Keras/Engine/Model.Predict.cs View File

@@ -1,4 +1,4 @@
using NumSharp;
using Tensorflow.Numpy;
using System;
using System.Collections.Generic;
using System.Linq;


+ 1
- 1
src/TensorFlowNET.Keras/Engine/Model.Training.cs View File

@@ -3,7 +3,7 @@ using System.Collections.Generic;
using System.Text;
using HDF.PInvoke;
using HDF5CSharp;
using NumSharp;
using Tensorflow.Numpy;
using Tensorflow.Keras.Saving;

namespace Tensorflow.Keras.Engine


+ 15
- 15
src/TensorFlowNET.Keras/Layers/Convolution/Conv2DTranspose.cs View File

@@ -65,7 +65,7 @@ namespace Tensorflow.Keras.Layers
if(inputs.shape.rank > -1)
{
var dims = inputs.shape.dims;
(height, width) = (dims[h_axis], dims[w_axis]);
(height, width) = ((int)dims[h_axis], (int)dims[w_axis]);
}
var (kernel_h, kernel_w) = kernel_size;
var (stride_h, stride_w) = strides;
@@ -74,18 +74,18 @@ namespace Tensorflow.Keras.Layers

// Infer the dynamic output shape:
var out_height = conv_utils.deconv_output_length(height,
kernel_h,
(int)kernel_h,
padding: padding,
output_padding: out_pad_h,
stride: stride_h,
dilation: dilation_rate[0]);
stride: (int)stride_h,
dilation: (int)dilation_rate[0]);

var out_width = conv_utils.deconv_output_length(width,
kernel_w,
(int)kernel_w,
padding: padding,
output_padding: out_pad_w,
stride: stride_w,
dilation: dilation_rate[1]);
stride: (int)stride_w,
dilation: (int)dilation_rate[1]);

Tensor output_shape_tensor;
if (data_format == "channels_first")
@@ -130,19 +130,19 @@ namespace Tensorflow.Keras.Layers
var (out_pad_h, out_pad_w) = (-1, -1);
output_shape[c_axis] = filters;
output_shape[h_axis] = conv_utils.deconv_output_length(
output_shape[h_axis],
kernel_h,
(int)output_shape[h_axis],
(int)kernel_h,
padding: padding,
output_padding: out_pad_h,
stride: stride_h,
dilation: dilation_rate[0]);
stride: (int)stride_h,
dilation: (int)dilation_rate[0]);
output_shape[w_axis] = conv_utils.deconv_output_length(
output_shape[w_axis],
kernel_w,
(int)output_shape[w_axis],
(int)kernel_w,
padding: padding,
output_padding: out_pad_w,
stride: stride_w,
dilation: dilation_rate[1]);
stride: (int)stride_w,
dilation: (int)dilation_rate[1]);

return new TensorShape(output_shape);
}


+ 7
- 6
src/TensorFlowNET.Keras/Layers/Convolution/Convolutional.cs View File

@@ -16,6 +16,7 @@

using System;
using System.Collections.Generic;
using System.Linq;
using Tensorflow.Keras.ArgsDefinition;
using Tensorflow.Keras.Engine;
using Tensorflow.Keras.Utils;
@@ -47,11 +48,11 @@ namespace Tensorflow.Keras.Layers
public Convolutional(ConvolutionalArgs args) : base(args)
{
this.args = args;
args.KernelSize = conv_utils.normalize_tuple(args.KernelSize.dims, args.Rank, "kernel_size");
args.Strides = conv_utils.normalize_tuple(args.Strides.dims, args.Rank, "strides");
args.KernelSize = conv_utils.normalize_tuple(args.KernelSize.dims.Select(x => (int)x).ToArray(), args.Rank, "kernel_size");
args.Strides = conv_utils.normalize_tuple(args.Strides.dims.Select(x => (int)x).ToArray(), args.Rank, "strides");
args.Padding = conv_utils.normalize_padding(args.Padding);
args.DataFormat = conv_utils.normalize_data_format(args.DataFormat);
args.DilationRate = conv_utils.normalize_tuple(args.DilationRate.dims, args.Rank, "dilation_rate");
args.DilationRate = conv_utils.normalize_tuple(args.DilationRate.dims.Select(x => (int)x).ToArray(), args.Rank, "dilation_rate");
inputSpec = new InputSpec(ndim: rank + 2);
_tf_data_format = conv_utils.convert_data_format(data_format, rank + 2);
}
@@ -60,10 +61,10 @@ namespace Tensorflow.Keras.Layers
{
TensorShape input_shape = inputs.shape;
int channel_axis = data_format == "channels_first" ? 1 : -1;
int input_channel = channel_axis < 0 ?
var input_channel = channel_axis < 0 ?
input_shape.dims[input_shape.ndim + channel_axis] :
input_shape.dims[channel_axis];
TensorShape kernel_shape = kernel_size.dims.concat(new int[] { input_channel / args.Groups, filters });
TensorShape kernel_shape = kernel_size.dims.concat(new long[] { input_channel / args.Groups, filters });
kernel = add_weight(name: "kernel",
shape: kernel_shape,
initializer: kernel_initializer,
@@ -78,7 +79,7 @@ namespace Tensorflow.Keras.Layers
dtype: DType);

var axes = new Dictionary<int, int>();
axes.Add(-1, input_channel);
axes.Add(-1, (int)input_channel);
inputSpec = new InputSpec(min_ndim: rank + 2, axes: axes);

string tf_padding;


+ 1
- 1
src/TensorFlowNET.Keras/Layers/Core/Dense.cs View File

@@ -46,7 +46,7 @@ namespace Tensorflow.Keras.Layers
TensorShape input_shape = inputs.shape;
var last_dim = input_shape.dims.Last();
var axes = new Dictionary<int, int>();
axes[-1] = last_dim;
axes[-1] = (int)last_dim;
inputSpec = new InputSpec(min_ndim: 2, axes: axes);
kernel = add_weight(
"kernel",


Some files were not shown because too many files changed in this diff

Loading…
Cancel
Save