Browse Source

Tensor: Rewrite Creation to fix heap corruption.

TensorTest: Added Unit tests for more coverage, reformatted file.
c_api.tensor: Added overloads for TF_NewTensor that does not have deallocator parameters.
BaseSession: Removed disposal immediately after TF_SessionRun call.
c_api.DeallocatorArgs: Added DeallocatorArgs.Empty
tags/v0.12
Eli Belash 6 years ago
parent
commit
e4e62dd988
8 changed files with 365 additions and 329 deletions
  1. +2
    -0
      TensorFlow.NET.sln.DotSettings
  2. +9
    -0
      src/TensorFlowNET.Core/APIs/c_api.cs
  3. +3
    -15
      src/TensorFlowNET.Core/Sessions/BaseSession.cs
  4. +27
    -0
      src/TensorFlowNET.Core/Tensors/AllocationType.cs
  5. +182
    -258
      src/TensorFlowNET.Core/Tensors/Tensor.Creation.cs
  6. +26
    -0
      src/TensorFlowNET.Core/Tensors/Tensor.cs
  7. +73
    -0
      src/TensorFlowNET.Core/Tensors/c_api.tensor.cs
  8. +43
    -56
      test/TensorFlowNET.UnitTest/TensorTest.cs

+ 2
- 0
TensorFlow.NET.sln.DotSettings View File

@@ -0,0 +1,2 @@
<wpf:ResourceDictionary xml:space="preserve" xmlns:x="http://schemas.microsoft.com/winfx/2006/xaml" xmlns:s="clr-namespace:System;assembly=mscorlib" xmlns:ss="urn:shemas-jetbrains-com:settings-storage-xaml" xmlns:wpf="http://schemas.microsoft.com/winfx/2006/xaml/presentation">
<s:Boolean x:Key="/Default/UserDictionary/Words/=Tensorflow/@EntryIndexedValue">True</s:Boolean></wpf:ResourceDictionary>

+ 9
- 0
src/TensorFlowNET.Core/APIs/c_api.cs View File

@@ -54,6 +54,15 @@ namespace Tensorflow

public struct DeallocatorArgs
{
internal static unsafe c_api.DeallocatorArgs* EmptyPtr;
internal static unsafe IntPtr Empty;

static unsafe DeallocatorArgs()
{
Empty = new IntPtr(EmptyPtr = (DeallocatorArgs*) Marshal.AllocHGlobal(Marshal.SizeOf<DeallocatorArgs>()));
*EmptyPtr = new DeallocatorArgs() {gc_handle = IntPtr.Zero, deallocator_called = false};
}

public bool deallocator_called;
public IntPtr gc_handle;
}


+ 3
- 15
src/TensorFlowNET.Core/Sessions/BaseSession.cs View File

@@ -152,7 +152,6 @@ namespace Tensorflow
{

var feeds = new KeyValuePair<TF_Output, Tensor>[feed_dict.Count];
var ignoreDispose = new bool[feed_dict.Count];
int i = 0;
foreach (var x in feed_dict)
{
@@ -160,8 +159,9 @@ namespace Tensorflow
{
switch (x.Value)
{
case Tensor v: ignoreDispose[i] = true; feeds[i++] = new KeyValuePair<TF_Output, Tensor>(tensor._as_tf_output(), v); break;
case Tensor v: feeds[i++] = new KeyValuePair<TF_Output, Tensor>(tensor._as_tf_output(), v); break;
case NDArray v: feeds[i++] = new KeyValuePair<TF_Output, Tensor>(tensor._as_tf_output(), new Tensor(v, tensor.dtype)); break;
case IntPtr v: feeds[i++] = new KeyValuePair<TF_Output, Tensor>(tensor._as_tf_output(), new Tensor(v)); break;
#if _REGEN
%types = ["sbyte", "byte", "short", "ushort", "int", "uint", "long", "ulong", "float", "double", "Complex"]
%foreach types%
@@ -194,7 +194,6 @@ namespace Tensorflow
#endif
case bool v: feeds[i++] = new KeyValuePair<TF_Output, Tensor>(tensor._as_tf_output(), new Tensor((byte) (v ? 1 : 0), TF_DataType.TF_BOOL)); break;
case string v: feeds[i++] = new KeyValuePair<TF_Output, Tensor>(tensor._as_tf_output(), new Tensor(v)); break;
case IntPtr v: feeds[i++] = new KeyValuePair<TF_Output, Tensor>(tensor._as_tf_output(), new Tensor(v)); break;
default:
throw new NotImplementedException($"feed_dict data type {x.Value?.GetType().Name ?? "<null>"}");
}
@@ -203,18 +202,7 @@ namespace Tensorflow

var fetches = fetch_list.Select(x => x._as_tf_output()).ToArray();
//var targets = target_list;
try
{
return _call_tf_sessionrun(feeds, fetches, target_list);
} finally
{
for (var idx = 0; idx < feeds.Length; idx++)
{
if (ignoreDispose[idx])
continue;
feeds[idx].Value.Dispose();
}
}
return _call_tf_sessionrun(feeds, fetches, target_list);
}

private unsafe NDArray[] _call_tf_sessionrun(KeyValuePair<TF_Output, Tensor>[] feed_dict, TF_Output[] fetch_list, List<Operation> target_list)


+ 27
- 0
src/TensorFlowNET.Core/Tensors/AllocationType.cs View File

@@ -0,0 +1,27 @@
namespace Tensorflow
{
/// <summary>
/// Used internally to
/// </summary>
public enum AllocationType
{
None = 0,
/// <summary>
/// Allocation was done by passing in a pointer, might be also holding reference to a C# object.
/// </summary>
FromPointer = 1,
/// <summary>
/// Allocation was done by calling c_api.TF_AllocateTensor or TF decided it has to copy data during c_api.TF_NewTensor. <br></br>
/// Deallocation is handled solely by Tensorflow.
/// </summary>
Tensorflow = 2,
/// <summary>
/// Allocation was done by Marshal.AllocateHGlobal
/// </summary>
Marshal = 3,
/// <summary>
/// Allocation was done by GCHandle.Alloc
/// </summary>
GCHandle = 4,
}
}

+ 182
- 258
src/TensorFlowNET.Core/Tensors/Tensor.Creation.cs View File

@@ -28,42 +28,37 @@ using static Tensorflow.c_api;

namespace Tensorflow
{
[SuppressMessage("ReSharper", "InvokeAsExtensionMethod")]
public partial class Tensor
{
/// <summary>
/// true if unmanaged buffer has been freed.
/// When Tensor was created from an object that is managed by C#'s GC - this will hold reference to prevent it from being collected.
/// </summary>
private bool _deallocator_called => _deallocatorArgs.deallocator_called;
protected object AllocationReferenceHolder;

/// <summary>
/// true if the Tensor was created from a managed array
/// The handle that was used to allocate this tensor, dependent on <see cref="AllocationType"/>.
/// </summary>
private bool _isPinnedArray => _deallocatorArgs.gc_handle != IntPtr.Zero;
protected object AllocationHandle;

/// <summary>
/// True only if the Tensor object was created in a way that the Tensor object itself allocated memory or pinned a managed object.
/// False if the Tensor was created from a pointer
/// True if this Tensor holds data allocated by C#.
/// </summary>
public bool IsMemoryOwner { get; private set; }
public bool IsMemoryOwner => AllocationType >= AllocationType.Marshal;

/// <summary>
/// This holds values that are used by the unmanaged deallocator callback
/// The allocation method used to create this Tensor.
/// </summary>
private DeallocatorArgs _deallocatorArgs = new DeallocatorArgs() { gc_handle = IntPtr.Zero };

// note: they must be assigned to a static variable in order to work as unmanaged callbacks
private static readonly Deallocator _hGlobalDeallocator = FreeHGlobalMemory;
private static readonly Deallocator _gcHandleDeallocator = FreeGCHandle;
private static readonly Deallocator _nothingDeallocator = FreeNothing;
public AllocationType AllocationType { get; protected set; }

/// <summary>
/// Create a Tensor object from an existing TF handle
/// Create a Tensor object from an existing TF handle
/// </summary>
/// <param name="handle"></param>
/// <param name="handle">Handle to a <see cref="Tensor"/> object.</param>
public Tensor(IntPtr handle)
{
_handle = handle;
IsMemoryOwner = false;
//no need to set AllocationType = AllocationType.None;
}

/// <summary>
@@ -71,399 +66,376 @@ namespace Tensorflow
/// Note: the caller is responsible for freeing the memory. Calling Dispose on this object will dispose the TensorFlow tensor
/// but not the memory itself!
/// </summary>
/// <param name="ptr">Pointer to unmanaged, fixed or pinned memory which the caller owns</param>
/// <param name="data_ptr">Pointer to unmanaged, fixed or pinned memory which the caller owns</param>
/// <param name="shape">Tensor shape</param>
/// <param name="dType">TF data type</param>
/// <param name="num_bytes">Size of the tensor in memory</param>
public Tensor(IntPtr ptr, long[] shape, TF_DataType dType, int num_bytes)
public Tensor(IntPtr data_ptr, long[] shape, TF_DataType dType, int num_bytes)
{
_handle = TF_NewTensor(dType, dims: shape, num_dims: shape.Length, data: ptr, len: (UIntPtr)num_bytes, deallocator: _nothingDeallocator, ref _deallocatorArgs);
IsMemoryOwner = false;
unsafe
{
_handle = TF_NewTensor(dType, dims: shape, num_dims: shape.Length, data: data_ptr, len: (UIntPtr) num_bytes);
AllocationType = TF_TensorData(_handle) == data_ptr ? AllocationType.FromPointer : AllocationType.Tensorflow;
}
}

/// <summary>
/// Create a new Tensor from the given unmanaged memory pointer (which must be allocated, fixed or pinned by the caller)
/// Note: the caller is responsible for freeing the memory. Calling Dispose on this object will dispose the TensorFlow tensor
/// but not the memory itself!
/// </summary>
/// <param name="data_ptr">Pointer to unmanaged, fixed or pinned memory which the caller owns</param>
/// <param name="shape">Tensor shape</param>
/// <param name="dType">TF data type</param>
/// <param name="num_bytes">Size of the tensor in memory</param>
public unsafe Tensor(void* data_ptr, long[] shape, TF_DataType dType, int num_bytes)
{
_handle = TF_NewTensor(dType, dims: shape, num_dims: shape.Length, data: data_ptr, len: (UIntPtr) num_bytes);
AllocationType = TF_TensorData(_handle).ToPointer() == data_ptr ? AllocationType.FromPointer : AllocationType.Tensorflow;
}

#if _REGEN
%types=["sbyte", "bool", "byte", "short", "ushort", "int", "uint", "long", "ulong", "float", "double", "Complex"]
%types = ["sbyte", "bool", "byte", "short", "ushort", "int", "uint", "long", "ulong", "float", "double", "Complex"]
%foreach types%
/// <summary>
/// Create a 1d Tensor from the given linear array and shape
/// Create a 1d Tensor from the given linear array and shape
/// </summary>
public Tensor(#1[] data, TF_DataType? dType = null)
{
_handle = CreateTensorWithoutCopying(dType ?? dtypes.as_dtype(typeof(#1)), new long[]{data.Length}, data, Marshal.SizeOf<#1>());
IsMemoryOwner=true;
_handle = CreateTensorFromArray(dType ?? dtypes.as_dtype(typeof(#1)), new long[] {data.Length}, data, #(#1=="Complex"|"Marshal.SizeOf<Complex>()"|"sizeof(#(str(#1)))"));
}

/// <summary>
/// Create a N-dimensional Tensor from the given array
/// Create a N-dimensional Tensor from the given array
/// </summary>
public Tensor(#1[] data, long[] shape, TF_DataType? dType = null)
{
_handle = CreateTensorWithoutCopying(dType ?? dtypes.as_dtype(typeof(#1)), shape, data, Marshal.SizeOf<#1>());
IsMemoryOwner=true;
_handle = CreateTensorFromArray(dType ?? dtypes.as_dtype(typeof(#1)), shape, data, #(#1=="Complex"|"Marshal.SizeOf<Complex>()"|"sizeof(#(str(#1)))"));
}

/// <summary>
/// Create a scalar Tensor from the given value
/// Create a scalar Tensor from the given value
/// </summary>
public unsafe Tensor(#1 value, TF_DataType? dType = null)
{
var v = (#1*)Marshal.AllocHGlobal(sizeof(#1));
*v = value;
_handle = TF_NewTensor(dType ?? dtypes.as_dtype(typeof(#1)), dims:new long[0], num_dims: 0, data: (IntPtr)v, len: (UIntPtr)sizeof(#1), deallocator: _hGlobalDeallocator, ref _deallocatorArgs);
IsMemoryOwner=true;
_handle = TF_AllocateTensor(dType ?? dtypes.as_dtype(typeof(#1)), dims: new long[0], num_dims: 0, len: (UIntPtr) sizeof(#1));
*(#1*) TF_TensorData(_handle) = value;
AllocationType = AllocationType.Tensorflow;
}
%
#else

/// <summary>
/// Create a 1d Tensor from the given linear array and shape
/// Create a 1d Tensor from the given linear array and shape
/// </summary>
public Tensor(sbyte[] data, TF_DataType? dType = null)
{
_handle = CreateTensorWithoutCopying(dType ?? dtypes.as_dtype(typeof(sbyte)), new long[]{data.Length}, data, Marshal.SizeOf<sbyte>());
IsMemoryOwner=true;
_handle = CreateTensorFromArray(dType ?? dtypes.as_dtype(typeof(sbyte)), new long[] {data.Length}, data, sizeof(sbyte));
}

/// <summary>
/// Create a N-dimensional Tensor from the given array
/// Create a N-dimensional Tensor from the given array
/// </summary>
public Tensor(sbyte[] data, long[] shape, TF_DataType? dType = null)
{
_handle = CreateTensorWithoutCopying(dType ?? dtypes.as_dtype(typeof(sbyte)), shape, data, Marshal.SizeOf<sbyte>());
IsMemoryOwner=true;
_handle = CreateTensorFromArray(dType ?? dtypes.as_dtype(typeof(sbyte)), shape, data, sizeof(sbyte));
}

/// <summary>
/// Create a scalar Tensor from the given value
/// Create a scalar Tensor from the given value
/// </summary>
public unsafe Tensor(sbyte value, TF_DataType? dType = null)
{
var v = (sbyte*)Marshal.AllocHGlobal(sizeof(sbyte));
*v = value;
_handle = TF_NewTensor(dType ?? dtypes.as_dtype(typeof(sbyte)), dims:new long[0], num_dims: 0, data: (IntPtr)v, len: (UIntPtr)sizeof(sbyte), deallocator: _hGlobalDeallocator, ref _deallocatorArgs);
IsMemoryOwner=true;
_handle = TF_AllocateTensor(dType ?? dtypes.as_dtype(typeof(sbyte)), dims: new long[0], num_dims: 0, len: (UIntPtr) sizeof(sbyte));
*(sbyte*) TF_TensorData(_handle) = value;
AllocationType = AllocationType.Tensorflow;
}
/// <summary>
/// Create a 1d Tensor from the given linear array and shape
/// Create a 1d Tensor from the given linear array and shape
/// </summary>
public Tensor(bool[] data, TF_DataType? dType = null)
{
_handle = CreateTensorWithoutCopying(dType ?? dtypes.as_dtype(typeof(bool)), new long[]{data.Length}, data, Marshal.SizeOf<bool>());
IsMemoryOwner=true;
_handle = CreateTensorFromArray(dType ?? dtypes.as_dtype(typeof(bool)), new long[] {data.Length}, data, sizeof(bool));
}

/// <summary>
/// Create a N-dimensional Tensor from the given array
/// Create a N-dimensional Tensor from the given array
/// </summary>
public Tensor(bool[] data, long[] shape, TF_DataType? dType = null)
{
_handle = CreateTensorWithoutCopying(dType ?? dtypes.as_dtype(typeof(bool)), shape, data, Marshal.SizeOf<bool>());
IsMemoryOwner=true;
_handle = CreateTensorFromArray(dType ?? dtypes.as_dtype(typeof(bool)), shape, data, sizeof(bool));
}

/// <summary>
/// Create a scalar Tensor from the given value
/// Create a scalar Tensor from the given value
/// </summary>
public unsafe Tensor(bool value, TF_DataType? dType = null)
{
var v = (bool*)Marshal.AllocHGlobal(sizeof(bool));
*v = value;
_handle = TF_NewTensor(dType ?? dtypes.as_dtype(typeof(bool)), dims:new long[0], num_dims: 0, data: (IntPtr)v, len: (UIntPtr)sizeof(bool), deallocator: _hGlobalDeallocator, ref _deallocatorArgs);
IsMemoryOwner=true;
_handle = TF_AllocateTensor(dType ?? dtypes.as_dtype(typeof(bool)), dims: new long[0], num_dims: 0, len: (UIntPtr) sizeof(bool));
*(bool*) TF_TensorData(_handle) = value;
AllocationType = AllocationType.Tensorflow;
}
/// <summary>
/// Create a 1d Tensor from the given linear array and shape
/// Create a 1d Tensor from the given linear array and shape
/// </summary>
public Tensor(byte[] data, TF_DataType? dType = null)
{
_handle = CreateTensorWithoutCopying(dType ?? dtypes.as_dtype(typeof(byte)), new long[]{data.Length}, data, Marshal.SizeOf<byte>());
IsMemoryOwner=true;
_handle = CreateTensorFromArray(dType ?? dtypes.as_dtype(typeof(byte)), new long[] {data.Length}, data, sizeof(byte));
}

/// <summary>
/// Create a N-dimensional Tensor from the given array
/// Create a N-dimensional Tensor from the given array
/// </summary>
public Tensor(byte[] data, long[] shape, TF_DataType? dType = null)
{
_handle = CreateTensorWithoutCopying(dType ?? dtypes.as_dtype(typeof(byte)), shape, data, Marshal.SizeOf<byte>());
IsMemoryOwner=true;
_handle = CreateTensorFromArray(dType ?? dtypes.as_dtype(typeof(byte)), shape, data, sizeof(byte));
}

/// <summary>
/// Create a scalar Tensor from the given value
/// Create a scalar Tensor from the given value
/// </summary>
public unsafe Tensor(byte value, TF_DataType? dType = null)
{
var v = (byte*)Marshal.AllocHGlobal(sizeof(byte));
*v = value;
_handle = TF_NewTensor(dType ?? dtypes.as_dtype(typeof(byte)), dims:new long[0], num_dims: 0, data: (IntPtr)v, len: (UIntPtr)sizeof(byte), deallocator: _hGlobalDeallocator, ref _deallocatorArgs);
IsMemoryOwner=true;
_handle = TF_AllocateTensor(dType ?? dtypes.as_dtype(typeof(byte)), dims: new long[0], num_dims: 0, len: (UIntPtr) sizeof(byte));
*(byte*) TF_TensorData(_handle) = value;
AllocationType = AllocationType.Tensorflow;
}
/// <summary>
/// Create a 1d Tensor from the given linear array and shape
/// Create a 1d Tensor from the given linear array and shape
/// </summary>
public Tensor(short[] data, TF_DataType? dType = null)
{
_handle = CreateTensorWithoutCopying(dType ?? dtypes.as_dtype(typeof(short)), new long[]{data.Length}, data, Marshal.SizeOf<short>());
IsMemoryOwner=true;
_handle = CreateTensorFromArray(dType ?? dtypes.as_dtype(typeof(short)), new long[] {data.Length}, data, sizeof(short));
}

/// <summary>
/// Create a N-dimensional Tensor from the given array
/// Create a N-dimensional Tensor from the given array
/// </summary>
public Tensor(short[] data, long[] shape, TF_DataType? dType = null)
{
_handle = CreateTensorWithoutCopying(dType ?? dtypes.as_dtype(typeof(short)), shape, data, Marshal.SizeOf<short>());
IsMemoryOwner=true;
_handle = CreateTensorFromArray(dType ?? dtypes.as_dtype(typeof(short)), shape, data, sizeof(short));
}

/// <summary>
/// Create a scalar Tensor from the given value
/// Create a scalar Tensor from the given value
/// </summary>
public unsafe Tensor(short value, TF_DataType? dType = null)
{
var v = (short*)Marshal.AllocHGlobal(sizeof(short));
*v = value;
_handle = TF_NewTensor(dType ?? dtypes.as_dtype(typeof(short)), dims:new long[0], num_dims: 0, data: (IntPtr)v, len: (UIntPtr)sizeof(short), deallocator: _hGlobalDeallocator, ref _deallocatorArgs);
IsMemoryOwner=true;
_handle = TF_AllocateTensor(dType ?? dtypes.as_dtype(typeof(short)), dims: new long[0], num_dims: 0, len: (UIntPtr) sizeof(short));
*(short*) TF_TensorData(_handle) = value;
AllocationType = AllocationType.Tensorflow;
}
/// <summary>
/// Create a 1d Tensor from the given linear array and shape
/// Create a 1d Tensor from the given linear array and shape
/// </summary>
public Tensor(ushort[] data, TF_DataType? dType = null)
{
_handle = CreateTensorWithoutCopying(dType ?? dtypes.as_dtype(typeof(ushort)), new long[]{data.Length}, data, Marshal.SizeOf<ushort>());
IsMemoryOwner=true;
_handle = CreateTensorFromArray(dType ?? dtypes.as_dtype(typeof(ushort)), new long[] {data.Length}, data, sizeof(ushort));
}

/// <summary>
/// Create a N-dimensional Tensor from the given array
/// Create a N-dimensional Tensor from the given array
/// </summary>
public Tensor(ushort[] data, long[] shape, TF_DataType? dType = null)
{
_handle = CreateTensorWithoutCopying(dType ?? dtypes.as_dtype(typeof(ushort)), shape, data, Marshal.SizeOf<ushort>());
IsMemoryOwner=true;
_handle = CreateTensorFromArray(dType ?? dtypes.as_dtype(typeof(ushort)), shape, data, sizeof(ushort));
}

/// <summary>
/// Create a scalar Tensor from the given value
/// Create a scalar Tensor from the given value
/// </summary>
public unsafe Tensor(ushort value, TF_DataType? dType = null)
{
var v = (ushort*)Marshal.AllocHGlobal(sizeof(ushort));
*v = value;
_handle = TF_NewTensor(dType ?? dtypes.as_dtype(typeof(ushort)), dims:new long[0], num_dims: 0, data: (IntPtr)v, len: (UIntPtr)sizeof(ushort), deallocator: _hGlobalDeallocator, ref _deallocatorArgs);
IsMemoryOwner=true;
_handle = TF_AllocateTensor(dType ?? dtypes.as_dtype(typeof(ushort)), dims: new long[0], num_dims: 0, len: (UIntPtr) sizeof(ushort));
*(ushort*) TF_TensorData(_handle) = value;
AllocationType = AllocationType.Tensorflow;
}
/// <summary>
/// Create a 1d Tensor from the given linear array and shape
/// Create a 1d Tensor from the given linear array and shape
/// </summary>
public Tensor(int[] data, TF_DataType? dType = null)
{
_handle = CreateTensorWithoutCopying(dType ?? dtypes.as_dtype(typeof(int)), new long[]{data.Length}, data, Marshal.SizeOf<int>());
IsMemoryOwner=true;
_handle = CreateTensorFromArray(dType ?? dtypes.as_dtype(typeof(int)), new long[] {data.Length}, data, sizeof(int));
}

/// <summary>
/// Create a N-dimensional Tensor from the given array
/// Create a N-dimensional Tensor from the given array
/// </summary>
public Tensor(int[] data, long[] shape, TF_DataType? dType = null)
{
_handle = CreateTensorWithoutCopying(dType ?? dtypes.as_dtype(typeof(int)), shape, data, Marshal.SizeOf<int>());
IsMemoryOwner=true;
_handle = CreateTensorFromArray(dType ?? dtypes.as_dtype(typeof(int)), shape, data, sizeof(int));
}

/// <summary>
/// Create a scalar Tensor from the given value
/// Create a scalar Tensor from the given value
/// </summary>
public unsafe Tensor(int value, TF_DataType? dType = null)
{
var v = (int*)Marshal.AllocHGlobal(sizeof(int));
*v = value;
_handle = TF_NewTensor(dType ?? dtypes.as_dtype(typeof(int)), dims:new long[0], num_dims: 0, data: (IntPtr)v, len: (UIntPtr)sizeof(int), deallocator: _hGlobalDeallocator, ref _deallocatorArgs);
IsMemoryOwner=true;
_handle = TF_AllocateTensor(dType ?? dtypes.as_dtype(typeof(int)), dims: new long[0], num_dims: 0, len: (UIntPtr) sizeof(int));
*(int*) TF_TensorData(_handle) = value;
AllocationType = AllocationType.Tensorflow;
}
/// <summary>
/// Create a 1d Tensor from the given linear array and shape
/// Create a 1d Tensor from the given linear array and shape
/// </summary>
public Tensor(uint[] data, TF_DataType? dType = null)
{
_handle = CreateTensorWithoutCopying(dType ?? dtypes.as_dtype(typeof(uint)), new long[]{data.Length}, data, Marshal.SizeOf<uint>());
IsMemoryOwner=true;
_handle = CreateTensorFromArray(dType ?? dtypes.as_dtype(typeof(uint)), new long[] {data.Length}, data, sizeof(uint));
}

/// <summary>
/// Create a N-dimensional Tensor from the given array
/// Create a N-dimensional Tensor from the given array
/// </summary>
public Tensor(uint[] data, long[] shape, TF_DataType? dType = null)
{
_handle = CreateTensorWithoutCopying(dType ?? dtypes.as_dtype(typeof(uint)), shape, data, Marshal.SizeOf<uint>());
IsMemoryOwner=true;
_handle = CreateTensorFromArray(dType ?? dtypes.as_dtype(typeof(uint)), shape, data, sizeof(uint));
}

/// <summary>
/// Create a scalar Tensor from the given value
/// Create a scalar Tensor from the given value
/// </summary>
public unsafe Tensor(uint value, TF_DataType? dType = null)
{
var v = (uint*)Marshal.AllocHGlobal(sizeof(uint));
*v = value;
_handle = TF_NewTensor(dType ?? dtypes.as_dtype(typeof(uint)), dims:new long[0], num_dims: 0, data: (IntPtr)v, len: (UIntPtr)sizeof(uint), deallocator: _hGlobalDeallocator, ref _deallocatorArgs);
IsMemoryOwner=true;
_handle = TF_AllocateTensor(dType ?? dtypes.as_dtype(typeof(uint)), dims: new long[0], num_dims: 0, len: (UIntPtr) sizeof(uint));
*(uint*) TF_TensorData(_handle) = value;
AllocationType = AllocationType.Tensorflow;
}
/// <summary>
/// Create a 1d Tensor from the given linear array and shape
/// Create a 1d Tensor from the given linear array and shape
/// </summary>
public Tensor(long[] data, TF_DataType? dType = null)
{
_handle = CreateTensorWithoutCopying(dType ?? dtypes.as_dtype(typeof(long)), new long[]{data.Length}, data, Marshal.SizeOf<long>());
IsMemoryOwner=true;
_handle = CreateTensorFromArray(dType ?? dtypes.as_dtype(typeof(long)), new long[] {data.Length}, data, sizeof(long));
}

/// <summary>
/// Create a N-dimensional Tensor from the given array
/// Create a N-dimensional Tensor from the given array
/// </summary>
public Tensor(long[] data, long[] shape, TF_DataType? dType = null)
{
_handle = CreateTensorWithoutCopying(dType ?? dtypes.as_dtype(typeof(long)), shape, data, Marshal.SizeOf<long>());
IsMemoryOwner=true;
_handle = CreateTensorFromArray(dType ?? dtypes.as_dtype(typeof(long)), shape, data, sizeof(long));
}

/// <summary>
/// Create a scalar Tensor from the given value
/// Create a scalar Tensor from the given value
/// </summary>
public unsafe Tensor(long value, TF_DataType? dType = null)
{
var v = (long*)Marshal.AllocHGlobal(sizeof(long));
*v = value;
_handle = TF_NewTensor(dType ?? dtypes.as_dtype(typeof(long)), dims:new long[0], num_dims: 0, data: (IntPtr)v, len: (UIntPtr)sizeof(long), deallocator: _hGlobalDeallocator, ref _deallocatorArgs);
IsMemoryOwner=true;
_handle = TF_AllocateTensor(dType ?? dtypes.as_dtype(typeof(long)), dims: new long[0], num_dims: 0, len: (UIntPtr) sizeof(long));
*(long*) TF_TensorData(_handle) = value;
AllocationType = AllocationType.Tensorflow;
}
/// <summary>
/// Create a 1d Tensor from the given linear array and shape
/// Create a 1d Tensor from the given linear array and shape
/// </summary>
public Tensor(ulong[] data, TF_DataType? dType = null)
{
_handle = CreateTensorWithoutCopying(dType ?? dtypes.as_dtype(typeof(ulong)), new long[]{data.Length}, data, Marshal.SizeOf<ulong>());
IsMemoryOwner=true;
_handle = CreateTensorFromArray(dType ?? dtypes.as_dtype(typeof(ulong)), new long[] {data.Length}, data, sizeof(ulong));
}

/// <summary>
/// Create a N-dimensional Tensor from the given array
/// Create a N-dimensional Tensor from the given array
/// </summary>
public Tensor(ulong[] data, long[] shape, TF_DataType? dType = null)
{
_handle = CreateTensorWithoutCopying(dType ?? dtypes.as_dtype(typeof(ulong)), shape, data, Marshal.SizeOf<ulong>());
IsMemoryOwner=true;
_handle = CreateTensorFromArray(dType ?? dtypes.as_dtype(typeof(ulong)), shape, data, sizeof(ulong));
}

/// <summary>
/// Create a scalar Tensor from the given value
/// Create a scalar Tensor from the given value
/// </summary>
public unsafe Tensor(ulong value, TF_DataType? dType = null)
{
var v = (ulong*)Marshal.AllocHGlobal(sizeof(ulong));
*v = value;
_handle = TF_NewTensor(dType ?? dtypes.as_dtype(typeof(ulong)), dims:new long[0], num_dims: 0, data: (IntPtr)v, len: (UIntPtr)sizeof(ulong), deallocator: _hGlobalDeallocator, ref _deallocatorArgs);
IsMemoryOwner=true;
_handle = TF_AllocateTensor(dType ?? dtypes.as_dtype(typeof(ulong)), dims: new long[0], num_dims: 0, len: (UIntPtr) sizeof(ulong));
*(ulong*) TF_TensorData(_handle) = value;
AllocationType = AllocationType.Tensorflow;
}
/// <summary>
/// Create a 1d Tensor from the given linear array and shape
/// Create a 1d Tensor from the given linear array and shape
/// </summary>
public Tensor(float[] data, TF_DataType? dType = null)
{
_handle = CreateTensorWithoutCopying(dType ?? dtypes.as_dtype(typeof(float)), new long[]{data.Length}, data, Marshal.SizeOf<float>());
IsMemoryOwner=true;
_handle = CreateTensorFromArray(dType ?? dtypes.as_dtype(typeof(float)), new long[] {data.Length}, data, sizeof(float));
}

/// <summary>
/// Create a N-dimensional Tensor from the given array
/// Create a N-dimensional Tensor from the given array
/// </summary>
public Tensor(float[] data, long[] shape, TF_DataType? dType = null)
{
_handle = CreateTensorWithoutCopying(dType ?? dtypes.as_dtype(typeof(float)), shape, data, Marshal.SizeOf<float>());
IsMemoryOwner=true;
_handle = CreateTensorFromArray(dType ?? dtypes.as_dtype(typeof(float)), shape, data, sizeof(float));
}

/// <summary>
/// Create a scalar Tensor from the given value
/// Create a scalar Tensor from the given value
/// </summary>
public unsafe Tensor(float value, TF_DataType? dType = null)
{
var v = (float*)Marshal.AllocHGlobal(sizeof(float));
*v = value;
_handle = TF_NewTensor(dType ?? dtypes.as_dtype(typeof(float)), dims:new long[0], num_dims: 0, data: (IntPtr)v, len: (UIntPtr)sizeof(float), deallocator: _hGlobalDeallocator, ref _deallocatorArgs);
IsMemoryOwner=true;
_handle = TF_AllocateTensor(dType ?? dtypes.as_dtype(typeof(float)), dims: new long[0], num_dims: 0, len: (UIntPtr) sizeof(float));
*(float*) TF_TensorData(_handle) = value;
AllocationType = AllocationType.Tensorflow;
}
/// <summary>
/// Create a 1d Tensor from the given linear array and shape
/// Create a 1d Tensor from the given linear array and shape
/// </summary>
public Tensor(double[] data, TF_DataType? dType = null)
{
_handle = CreateTensorWithoutCopying(dType ?? dtypes.as_dtype(typeof(double)), new long[]{data.Length}, data, Marshal.SizeOf<double>());
IsMemoryOwner=true;
_handle = CreateTensorFromArray(dType ?? dtypes.as_dtype(typeof(double)), new long[] {data.Length}, data, sizeof(double));
}

/// <summary>
/// Create a N-dimensional Tensor from the given array
/// Create a N-dimensional Tensor from the given array
/// </summary>
public Tensor(double[] data, long[] shape, TF_DataType? dType = null)
{
_handle = CreateTensorWithoutCopying(dType ?? dtypes.as_dtype(typeof(double)), shape, data, Marshal.SizeOf<double>());
IsMemoryOwner=true;
_handle = CreateTensorFromArray(dType ?? dtypes.as_dtype(typeof(double)), shape, data, sizeof(double));
}

/// <summary>
/// Create a scalar Tensor from the given value
/// Create a scalar Tensor from the given value
/// </summary>
public unsafe Tensor(double value, TF_DataType? dType = null)
{
var v = (double*)Marshal.AllocHGlobal(sizeof(double));
*v = value;
_handle = TF_NewTensor(dType ?? dtypes.as_dtype(typeof(double)), dims:new long[0], num_dims: 0, data: (IntPtr)v, len: (UIntPtr)sizeof(double), deallocator: _hGlobalDeallocator, ref _deallocatorArgs);
IsMemoryOwner=true;
_handle = TF_AllocateTensor(dType ?? dtypes.as_dtype(typeof(double)), dims: new long[0], num_dims: 0, len: (UIntPtr) sizeof(double));
*(double*) TF_TensorData(_handle) = value;
AllocationType = AllocationType.Tensorflow;
}
/// <summary>
/// Create a 1d Tensor from the given linear array and shape
/// Create a 1d Tensor from the given linear array and shape
/// </summary>
public Tensor(Complex[] data, TF_DataType? dType = null)
{
_handle = CreateTensorWithoutCopying(dType ?? dtypes.as_dtype(typeof(Complex)), new long[]{data.Length}, data, Marshal.SizeOf<Complex>());
IsMemoryOwner=true;
_handle = CreateTensorFromArray(dType ?? dtypes.as_dtype(typeof(Complex)), new long[] {data.Length}, data, Marshal.SizeOf<Complex>());
}

/// <summary>
/// Create a N-dimensional Tensor from the given array
/// Create a N-dimensional Tensor from the given array
/// </summary>
public Tensor(Complex[] data, long[] shape, TF_DataType? dType = null)
{
_handle = CreateTensorWithoutCopying(dType ?? dtypes.as_dtype(typeof(Complex)), shape, data, Marshal.SizeOf<Complex>());
IsMemoryOwner=true;
_handle = CreateTensorFromArray(dType ?? dtypes.as_dtype(typeof(Complex)), shape, data, Marshal.SizeOf<Complex>());
}

/// <summary>
/// Create a scalar Tensor from the given value
/// Create a scalar Tensor from the given value
/// </summary>
public unsafe Tensor(Complex value, TF_DataType? dType = null)
{
var v = (Complex*)Marshal.AllocHGlobal(sizeof(Complex));
*v = value;
_handle = TF_NewTensor(dType ?? dtypes.as_dtype(typeof(Complex)), dims:new long[0], num_dims: 0, data: (IntPtr)v, len: (UIntPtr)sizeof(Complex), deallocator: _hGlobalDeallocator, ref _deallocatorArgs);
IsMemoryOwner=true;
_handle = TF_AllocateTensor(dType ?? dtypes.as_dtype(typeof(Complex)), dims: new long[0], num_dims: 0, len: (UIntPtr) sizeof(Complex));
*(Complex*) TF_TensorData(_handle) = value;
AllocationType = AllocationType.Tensorflow;
}
#endif

@@ -474,13 +446,13 @@ namespace Tensorflow
{
var status = new Status();
var buffer = Encoding.UTF8.GetBytes(str);
var size = c_api.TF_StringEncodedSize((UIntPtr)buffer.Length);
var handle = TF_AllocateTensor(TF_DataType.TF_STRING, IntPtr.Zero, 0, (UIntPtr)((ulong)size + 8));
var size = c_api.TF_StringEncodedSize((UIntPtr) buffer.Length);
var handle = TF_AllocateTensor(TF_DataType.TF_STRING, IntPtr.Zero, 0, (UIntPtr) ((ulong) size + 8));

IntPtr tensor = c_api.TF_TensorData(handle);
Marshal.WriteInt64(tensor, 0);
fixed (byte* src = buffer)
c_api.TF_StringEncode(src, (UIntPtr)buffer.Length, (sbyte*)(tensor + sizeof(Int64)), size, status);
c_api.TF_StringEncode(src, (UIntPtr) buffer.Length, (sbyte*) (tensor + sizeof(Int64)), size, status);
_handle = handle;
status.Check(true);
}
@@ -492,7 +464,7 @@ namespace Tensorflow
{
if (nd.Unsafe.Storage.Shape.IsContiguous)
{
var bytesLength = (UIntPtr)nd.size;
var bytesLength = (UIntPtr) nd.size;
var size = c_api.TF_StringEncodedSize(bytesLength);
var handle = TF_AllocateTensor(TF_DataType.TF_STRING, IntPtr.Zero, 0, (UIntPtr) ((ulong) size + 8));

@@ -504,9 +476,7 @@ namespace Tensorflow

status.Check(true);
_handle = handle;
IsMemoryOwner = false;
}
else
} else
{
var buffer = nd.ToArray<byte>();
var size = c_api.TF_StringEncodedSize((UIntPtr) buffer.Length);
@@ -521,7 +491,6 @@ namespace Tensorflow

status.Check(true);
_handle = handle;
IsMemoryOwner = false;
}

return;
@@ -532,27 +501,27 @@ namespace Tensorflow

private unsafe IntPtr CreateTensorFromNDArray(NDArray nd, TF_DataType? given_dtype)
{
if (nd.dtype.Name == "String")
if (nd.typecode == NPTypeCode.String)
throw new NotImplementedException("Support for NDArray of type string not implemented yet");
IArraySlice arraySlice;
if (nd.Unsafe.Storage.Shape.IsContiguous == false)
{
// the memory is NOT contiguous, so we have to copy the view into a contiguous memory block.
arraySlice = nd.CloneData();
}
else

var arraySlice = nd.Unsafe.Storage.Shape.IsContiguous ? nd.GetData() : nd.CloneData();

var handle = TF_NewTensor(
given_dtype ?? nd.dtype.as_dtype(),
dims: nd.shape.Select(i => (long) i).ToArray(),
num_dims: nd.ndim,
data: arraySlice.Address,
len: (UIntPtr) (nd.size * nd.dtypesize));

//if TF decided not to perform copy, hold reference for given NDArray.
if (TF_TensorData(handle).ToPointer() == nd.Unsafe.Address)
{
// the memory is contiguous
arraySlice = nd.GetData();
}
this.Tag = arraySlice; // keep a reference to the memory block to make sure it is not disposed while TF is using it
var ptr = new IntPtr(arraySlice.Address);
int num_bytes = (nd.size * nd.dtypesize);
var dtype = given_dtype ?? nd.dtype.as_dtype();
var handle = TF_NewTensor(dtype, dims: nd.shape.Select(i=>(long)i).ToArray(), num_dims: nd.ndim, data: ptr, len: (UIntPtr)num_bytes, deallocator: _nothingDeallocator, ref _deallocatorArgs);
IsMemoryOwner = false;
return handle;
AllocationType = AllocationType.FromPointer;
AllocationReferenceHolder = nd.Unsafe.Storage;
} else
AllocationType = AllocationType.Tensorflow;

return handle;
}

public unsafe Tensor(byte[][] buffer, long[] shape)
@@ -560,11 +529,12 @@ namespace Tensorflow
int size = 0;
foreach (var b in buffer)
{
size += (int)TF_StringEncodedSize((UIntPtr)b.Length);
size += (int) TF_StringEncodedSize((UIntPtr) b.Length);
}

int totalSize = size + buffer.Length * 8;
ulong offset = 0;
IntPtr handle = TF_AllocateTensor(TF_DataType.TF_STRING, shape, shape.Length, (UIntPtr)totalSize);
IntPtr handle = TF_AllocateTensor(TF_DataType.TF_STRING, shape, shape.Length, (UIntPtr) totalSize);

// Clear offset table
IntPtr pOffset = TF_TensorData(handle);
@@ -572,15 +542,15 @@ namespace Tensorflow
IntPtr dstLimit = pOffset + totalSize;
for (int i = 0; i < buffer.Length; i++)
{
Marshal.WriteInt64(pOffset, (long)offset);
Marshal.WriteInt64(pOffset, (long) offset);
using (var status = new Status())
{
fixed (byte* src = &buffer[i][0])
{
var written = TF_StringEncode(src, (UIntPtr)buffer[i].Length, (sbyte*)dst, (UIntPtr)(dstLimit.ToInt64() - dst.ToInt64()), status);
var written = TF_StringEncode(src, (UIntPtr) buffer[i].Length, (sbyte*) dst, (UIntPtr) (dstLimit.ToInt64() - dst.ToInt64()), status);
status.Check(true);
pOffset += 8;
dst += (int)written;
dst += (int) written;
offset += written;
}
}
@@ -612,24 +582,26 @@ namespace Tensorflow
/// </remarks>
[MethodImpl(MethodImplOptions.AggressiveInlining)]
[SuppressMessage("ReSharper", "LocalVariableHidesMember")]
protected unsafe IntPtr CreateTensorWithoutCopying(TF_DataType dt, long[] shape, Array data, int element_size)
protected unsafe IntPtr CreateTensorFromArray(TF_DataType dt, long[] shape, Array data, int element_size)
{
if (dt == TF_DataType.TF_STRING && data is byte[] buffer)
{
var size = c_api.TF_StringEncodedSize((UIntPtr)buffer.Length);
var handle = TF_AllocateTensor(TF_DataType.TF_STRING, IntPtr.Zero, 0, (UIntPtr)((ulong)size + 8));
var size = c_api.TF_StringEncodedSize((UIntPtr) buffer.Length);
var handle = TF_AllocateTensor(TF_DataType.TF_STRING, IntPtr.Zero, 0, (UIntPtr) ((ulong) size + 8));
AllocationType = AllocationType.Tensorflow;

IntPtr tensor = c_api.TF_TensorData(handle);
Marshal.WriteInt64(tensor, 0);

var status = new Status();
fixed (byte* src = buffer)
c_api.TF_StringEncode(src, (UIntPtr)buffer.Length, (sbyte*)(tensor + sizeof(Int64)), size, status);
c_api.TF_StringEncode(src, (UIntPtr) buffer.Length, (sbyte*) (tensor + sizeof(Int64)), size, status);

status.Check(true);
return handle;
}
return CreateTensorWithoutCopying(dt, shape, data, 0, data.Length, element_size);

return CreateTensorFromArray(dt, shape, data, 0, data.Length, element_size);
}

/// <summary>
@@ -647,67 +619,19 @@ namespace Tensorflow
/// specified dimensions.
/// </remarks>
[MethodImpl(MethodImplOptions.AggressiveInlining)]
protected unsafe IntPtr CreateTensorWithoutCopying(TF_DataType dt, long[] shape, Array data, int start, int count, int element_size)
protected IntPtr CreateTensorFromArray(TF_DataType dt, long[] shape, Array data, int start, int count, int element_size)
{
if (start < 0 || start > data.Length - count)
throw new ArgumentException($"Array length {data.Length} does not match the given shape {new Shape(shape.Cast<int>().ToArray())}");

// get a handle to the pinned array which we will pass on to the tensor computation engine to use
var gcHandle = GCHandle.Alloc(data, GCHandleType.Pinned);
_deallocatorArgs = new DeallocatorArgs() { gc_handle = GCHandle.ToIntPtr(gcHandle) };
if (shape == null || shape.Length == 0)
return TF_NewTensor(dt, new long[0], 0, gcHandle.AddrOfPinnedObject() + start * element_size, (UIntPtr)(count * element_size), _gcHandleDeallocator, ref _deallocatorArgs);
else
return TF_NewTensor(dt, shape, shape.Length, gcHandle.AddrOfPinnedObject() + start * element_size, (UIntPtr)(count * element_size), _gcHandleDeallocator, ref _deallocatorArgs);
}

[MonoPInvokeCallback(typeof(Deallocator))]
internal static void FreeHGlobalMemory(IntPtr dataPtr, IntPtr len, ref DeallocatorArgs args)
{
if (args.deallocator_called || dataPtr == IntPtr.Zero)
return;

// NumSharp will dispose
Marshal.FreeHGlobal(dataPtr);
args.deallocator_called = true;
}
AllocationType = AllocationType.GCHandle;
AllocationHandle = gcHandle;

[MonoPInvokeCallback(typeof(Deallocator))]
internal static void FreeGCHandle(IntPtr dataPtr, IntPtr len, ref DeallocatorArgs args)
{
if (args.deallocator_called || args.gc_handle == IntPtr.Zero)
return;
// note: since the ptr given to tensorflow is just the addr of the pinned object we can not directly free it! we need to free the gcHandle instead
GCHandle.FromIntPtr(args.gc_handle).Free();
args.deallocator_called = true;
}

[MonoPInvokeCallback(typeof(Deallocator))]
internal static void FreeNothing(IntPtr dataPtr, IntPtr len, ref DeallocatorArgs args)
{
args.deallocator_called = true;
if (shape == null || shape.Length == 0)
return TF_NewTensor(dt, new long[0], 0, gcHandle.AddrOfPinnedObject() + start * element_size, (UIntPtr) (count * element_size));
return TF_NewTensor(dt, shape, shape.Length, gcHandle.AddrOfPinnedObject() + start * element_size, (UIntPtr) (count * element_size));
}
}

/// <summary>
/// This attribute can be applied to callback functions that will be invoked
/// from unmanaged code to managed code.
/// </summary>
/// <remarks>
/// <code>
/// [TensorFlow.MonoPInvokeCallback (typeof (BufferReleaseFunc))]
/// internal static void MyFreeFunc (IntPtr data, IntPtr length){..}
/// </code>
/// </remarks>
public sealed class MonoPInvokeCallbackAttribute : Attribute
{
/// <summary>
/// Use this constructor to annotate the type of the callback function that
/// will be invoked from unmanaged code.
/// </summary>
/// <param name="t">T.</param>
public MonoPInvokeCallbackAttribute(Type t) { }
}

}
}

+ 26
- 0
src/TensorFlowNET.Core/Tensors/Tensor.cs View File

@@ -555,9 +555,35 @@ namespace Tensorflow
return $"tf.Tensor '{name}' shape=({string.Join(",", shape)}) dtype={dtype}";
}

/// <summary>
/// Dispose any managed resources.
/// </summary>
/// <remarks>Equivalent to what you would perform inside <see cref="DisposableObject.Dispose"/></remarks>
protected override void DisposeManagedResources()
{
AllocationReferenceHolder = null;
}

[SuppressMessage("ReSharper", "ConvertIfStatementToSwitchStatement")]
protected override void DisposeUnmanagedResources(IntPtr handle)
{
c_api.TF_DeleteTensor(handle);

if (AllocationHandle == null)
return;

if (AllocationType == AllocationType.GCHandle)
{
((GCHandle) AllocationHandle).Free();
AllocationHandle = null;
AllocationType = AllocationType.None;
} else if (AllocationType == AllocationType.Marshal)
{
Marshal.FreeHGlobal((IntPtr) AllocationHandle);
AllocationHandle = null;
AllocationType = AllocationType.None;
} else
throw new InvalidOperationException($"Tensor.AllocationHandle is not null ({AllocationHandle}) but AllocationType is not matched to a C# allocation type ({AllocationType}).");
}

public bool IsDisposed => _disposed;


+ 73
- 0
src/TensorFlowNET.Core/Tensors/c_api.tensor.cs View File

@@ -15,6 +15,7 @@
******************************************************************************/

using System;
using System.Runtime.CompilerServices;
using System.Runtime.InteropServices;

namespace Tensorflow
@@ -77,6 +78,51 @@ namespace Tensorflow
[DllImport(TensorFlowLibName)]
public static extern IntPtr TF_NewTensor(TF_DataType dataType, long[] dims, int num_dims, IntPtr data, UIntPtr len, Deallocator deallocator, ref DeallocatorArgs deallocator_arg);

/// <summary>
/// Return a new tensor that holds the bytes data[0,len-1]
/// </summary>
/// <param name="dataType"></param>
/// <param name="dims"></param>
/// <param name="num_dims"></param>
/// <param name="data"></param>
/// <param name="len">num_bytes, ex: 6 * sizeof(float)</param>
/// <param name="deallocator"></param>
/// <param name="deallocator_arg"></param>
/// <returns></returns>
[DllImport(TensorFlowLibName)]
public static extern IntPtr TF_NewTensor(TF_DataType dataType, long[] dims, int num_dims, IntPtr data, UIntPtr len, Deallocator deallocator, IntPtr deallocator_arg);

/// <summary>
/// Return a new tensor that holds the bytes data[0,len-1]
/// </summary>
/// <param name="dataType"></param>
/// <param name="dims"></param>
/// <param name="num_dims"></param>
/// <param name="data"></param>
/// <param name="len">num_bytes, ex: 6 * sizeof(float)</param>
/// <param name="deallocator"></param>
/// <param name="deallocator_arg"></param>
[MethodImpl(MethodImplOptions.AggressiveInlining)]
public static unsafe IntPtr TF_NewTensor(TF_DataType dataType, long[] dims, int num_dims, IntPtr data, UIntPtr len)
{
return TF_NewTensor(dataType, dims, num_dims, data, len, EmptyDeallocator, DeallocatorArgs.Empty);
}
/// <summary>
/// Return a new tensor that holds the bytes data[0,len-1]
/// </summary>
/// <param name="dataType"></param>
/// <param name="dims"></param>
/// <param name="num_dims"></param>
/// <param name="data"></param>
/// <param name="len">num_bytes, ex: 6 * sizeof(float)</param>
/// <param name="deallocator"></param>
/// <param name="deallocator_arg"></param>
[MethodImpl(MethodImplOptions.AggressiveInlining)]
public static unsafe IntPtr TF_NewTensor(TF_DataType dataType, long[] dims, int num_dims, void* data, UIntPtr len)
{
return TF_NewTensor(dataType, dims, num_dims, new IntPtr(data), len);
}

/// <summary>
/// Return the number of dimensions that the tensor has.
/// </summary>
@@ -159,5 +205,32 @@ namespace Tensorflow

[DllImport(TensorFlowLibName)]
public static extern unsafe UIntPtr TF_StringDecode(byte* src, UIntPtr src_len, byte** dst, UIntPtr* dst_len, IntPtr status);


public static c_api.Deallocator EmptyDeallocator = FreeNothingDeallocator;

[MonoPInvokeCallback(typeof(c_api.Deallocator))]
private static void FreeNothingDeallocator(IntPtr dataPtr, IntPtr len, ref c_api.DeallocatorArgs args)
{ }

/// <summary>
/// This attribute can be applied to callback functions that will be invoked
/// from unmanaged code to managed code.
/// </summary>
/// <remarks>
/// <code>
/// [TensorFlow.MonoPInvokeCallback (typeof (BufferReleaseFunc))]
/// internal static void MyFreeFunc (IntPtr data, IntPtr length){..}
/// </code>
/// </remarks>
public sealed class MonoPInvokeCallbackAttribute : Attribute
{
/// <summary>
/// Use this constructor to annotate the type of the callback function that
/// will be invoked from unmanaged code.
/// </summary>
/// <param name="t">T.</param>
public MonoPInvokeCallbackAttribute(Type t) { }
}
}
}

+ 43
- 56
test/TensorFlowNET.UnitTest/TensorTest.cs View File

@@ -4,6 +4,7 @@ using System;
using System.Linq;
using System.Runtime.InteropServices;
using System.Threading;
using FluentAssertions;
using Tensorflow;
using static Tensorflow.Binding;

@@ -12,77 +13,63 @@ namespace TensorFlowNET.UnitTest
[TestClass]
public class TensorTest : CApiTest
{
[Ignore("Not for mult-thread")]
public void TensorDeallocationThreadSafety()
{
var tensors = new Tensor[1000];
foreach (var i in range(1000))
{
tensors[i] = new Tensor(new int[1000]);
}
SemaphoreSlim s = new SemaphoreSlim(0, 2);
SemaphoreSlim s_done = new SemaphoreSlim(0, 2);

var t1 = new Thread(() =>
{
s.Wait();
foreach (var t in tensors)
t.Dispose();
s_done.Release();
});

var t2 = new Thread(() =>
{
s.Wait();
foreach (var t in tensors)
t.Dispose();
s_done.Release();
});

t1.Start();
t2.Start();
s.Release(2);
s_done.Wait();
s_done.Wait();

foreach (var t in tensors)
Assert.IsTrue(t.IsDisposed);
}

[TestMethod]
public unsafe void TensorFromFixed()
{
var array = new float[1000];
var span = new Span<float>(array, 100, 500);
fixed (float* ptr=&MemoryMarshal.GetReference(span))
fixed (float* ptr = &MemoryMarshal.GetReference(span))
{
using (var t = new Tensor((IntPtr)ptr, new long[] {span.Length}, tf.float32, 4*span.Length))
using (var t = new Tensor((IntPtr) ptr, new long[] {span.Length}, tf.float32, 4 * span.Length))
{
Assert.IsFalse(t.IsDisposed);
Assert.IsFalse(t.IsMemoryOwner);
Assert.AreEqual(2000, (int) t.bytesize);
}
}

fixed (float* ptr = &array[0])
{
using (var t = new Tensor((IntPtr)ptr, new long[] { array.Length }, tf.float32, 4 * array.Length))
using (var t = new Tensor((IntPtr) ptr, new long[] {array.Length}, tf.float32, 4 * array.Length))
{
Assert.IsFalse(t.IsDisposed);
Assert.IsFalse(t.IsMemoryOwner);
Assert.AreEqual(4000, (int)t.bytesize);
Assert.AreEqual(4000, (int) t.bytesize);
}
}
}

[TestMethod]
public unsafe void TensorFromArray()
{
var array = new float[1000];
using (var t = new Tensor(array, new long[] {array.Length}, tf.float32))
{
Assert.IsFalse(t.IsDisposed);
Assert.AreEqual(1000 * sizeof(float), (int) t.bytesize);
}

using (var t = new Tensor(new float[] {1}, new long[] {1}, tf.float32))
{
Assert.IsFalse(t.IsDisposed);
Assert.AreEqual(1 * sizeof(float), (int) t.bytesize);
}

using (var t = new Tensor(new float[] {1}, null, tf.float32))
{
Assert.IsFalse(t.IsDisposed);
Assert.AreEqual(1 * sizeof(float), (int) t.bytesize);
t.shape.Should().BeEmpty();
}
}

[TestMethod]
public void AllocateTensor()
{
ulong num_bytes = 6 * sizeof(float);
long[] dims = { 2, 3 };
long[] dims = {2, 3};
Tensor t = c_api.TF_AllocateTensor(TF_DataType.TF_FLOAT, dims, 2, num_bytes);
EXPECT_EQ(TF_DataType.TF_FLOAT, t.dtype);
EXPECT_EQ(2, t.NDims);
EXPECT_EQ((int)dims[0], t.shape[0]);
EXPECT_EQ((int) dims[0], t.shape[0]);
EXPECT_EQ(num_bytes, t.bytesize);
t.Dispose();
}
@@ -98,7 +85,7 @@ namespace TensorFlowNET.UnitTest
NDArray nd = np.array(2, 3);
Tensor t = new Tensor(nd);
Tensor o = t.MaybeMove();
ASSERT_TRUE(o == IntPtr.Zero); // It is unsafe to move memory TF might not own.
ASSERT_TRUE(o == IntPtr.Zero); // It is unsafe to move memory TF might not own.
t.Dispose();
}

@@ -116,10 +103,10 @@ namespace TensorFlowNET.UnitTest

EXPECT_EQ(tensor.dtype, TF_DataType.TF_FLOAT);
EXPECT_EQ(tensor.rank, nd.ndim);
EXPECT_EQ((int)tensor.shape[0], nd.shape[0]);
EXPECT_EQ((int)tensor.shape[1], nd.shape[1]);
EXPECT_EQ(tensor.bytesize, (ulong)nd.size * sizeof(float));
Assert.IsTrue(Enumerable.SequenceEqual(nd.Data<float>(), new float[] { 1, 2, 3, 4, 5, 6 }));
EXPECT_EQ((int) tensor.shape[0], nd.shape[0]);
EXPECT_EQ((int) tensor.shape[1], nd.shape[1]);
EXPECT_EQ(tensor.bytesize, (ulong) nd.size * sizeof(float));
Assert.IsTrue(Enumerable.SequenceEqual(nd.Data<float>(), new float[] {1, 2, 3, 4, 5, 6}));
}

/// <summary>
@@ -148,7 +135,7 @@ namespace TensorFlowNET.UnitTest
EXPECT_EQ(-1, num_dims);

// Set the shape to be 2 x Unknown
long[] dims = { 2, -1 };
long[] dims = {2, -1};
c_api.TF_GraphSetTensorShape(graph, feed_out_0, dims, dims.Length, s);
Assert.IsTrue(s.Code == TF_Code.TF_OK);
num_dims = c_api.TF_GraphGetTensorNumDims(graph, feed_out_0, s);
@@ -177,8 +164,8 @@ namespace TensorFlowNET.UnitTest
c_api.TF_GraphGetTensorShape(graph, feed_out_0, returned_dims, num_dims, s);
Assert.IsTrue(s.Code == TF_Code.TF_OK);
EXPECT_EQ(2, num_dims);
EXPECT_EQ(2, (int)returned_dims[0]);
EXPECT_EQ(3, (int)returned_dims[1]);
EXPECT_EQ(2, (int) returned_dims[0]);
EXPECT_EQ(3, (int) returned_dims[1]);

// Try to set 'unknown' with same rank on the shape and see that
// it doesn't change.
@@ -189,8 +176,8 @@ namespace TensorFlowNET.UnitTest
c_api.TF_GraphGetTensorShape(graph, feed_out_0, returned_dims, num_dims, s);
Assert.IsTrue(s.Code == TF_Code.TF_OK);
EXPECT_EQ(2, num_dims);
EXPECT_EQ(2, (int)returned_dims[0]);
EXPECT_EQ(3, (int)returned_dims[1]);
EXPECT_EQ(2, (int) returned_dims[0]);
EXPECT_EQ(3, (int) returned_dims[1]);

// Try to fetch a shape with the wrong num_dims
c_api.TF_GraphGetTensorShape(graph, feed_out_0, returned_dims, 5, s);
@@ -216,4 +203,4 @@ namespace TensorFlowNET.UnitTest
s.Dispose();
}
}
}
}

Loading…
Cancel
Save