Browse Source

Tensor: dispose Tensors that have been garbage collected (thread safe) + test

tags/v0.10
Meinrad Recheis 6 years ago
parent
commit
b2a9a989af
6 changed files with 125 additions and 36 deletions
  1. +15
    -2
      src/TensorFlowNET.Core/Status/Status.cs
  2. +2
    -0
      src/TensorFlowNET.Core/TensorFlowNET.Core.csproj.DotSettings
  3. +30
    -23
      src/TensorFlowNET.Core/Tensors/Tensor.Creation.cs
  4. +26
    -3
      src/TensorFlowNET.Core/Tensors/Tensor.cs
  5. +10
    -4
      src/TensorFlowNet.Benchmarks/TensorCreation.cs
  6. +42
    -4
      test/TensorFlowNET.UnitTest/TensorTest.cs

+ 15
- 2
src/TensorFlowNET.Core/Status/Status.cs View File

@@ -26,7 +26,7 @@ namespace Tensorflow
/// </summary>
public class Status : IDisposable
{
protected readonly IntPtr _handle;
protected IntPtr _handle;

/// <summary>
/// Error message
@@ -71,7 +71,20 @@ namespace Tensorflow

public void Dispose()
{
c_api.TF_DeleteStatus(_handle);
IntPtr h = IntPtr.Zero;
lock (this)
{
h = _handle;
_handle = IntPtr.Zero;
}
if (h != IntPtr.Zero)
c_api.TF_DeleteStatus(h);
GC.SuppressFinalize(this);
}

~Status()
{
Dispose();
}
}
}

+ 2
- 0
src/TensorFlowNET.Core/TensorFlowNET.Core.csproj.DotSettings View File

@@ -0,0 +1,2 @@
<wpf:ResourceDictionary xml:space="preserve" xmlns:x="http://schemas.microsoft.com/winfx/2006/xaml" xmlns:s="clr-namespace:System;assembly=mscorlib" xmlns:ss="urn:shemas-jetbrains-com:settings-storage-xaml" xmlns:wpf="http://schemas.microsoft.com/winfx/2006/xaml/presentation">
<s:Boolean x:Key="/Default/CodeInspection/NamespaceProvider/NamespaceFoldersToSkip/=tensors/@EntryIndexedValue">True</s:Boolean></wpf:ResourceDictionary>

+ 30
- 23
src/TensorFlowNET.Core/Tensors/Tensor.Creation.cs View File

@@ -31,7 +31,21 @@ namespace Tensorflow
/// <summary>
/// true if unmanaged buffer has been freed.
/// </summary>
private bool deallocator_called => _deallocatorArgs.deallocator_called;
private bool _deallocator_called => _deallocatorArgs.deallocator_called;

/// <summary>
/// true if the Tensor was created from a managed array
/// </summary>
private bool _isPinnedArray => _deallocatorArgs.gc_handle != IntPtr.Zero;

/// <summary>
/// This holds values that are used by the unmanaged deallocator callback
/// </summary>
private DeallocatorArgs _deallocatorArgs = new DeallocatorArgs() { gc_handle = IntPtr.Zero };

// note: they must be assigned to a static variable in order to work as unmanaged callbacks
static Deallocator _hGlobalDeallocator = FreeHGlobalMemory;
static Deallocator _gcHandleDeallocator = FreeGCHandle;

public Tensor(IntPtr handle)
{
@@ -94,7 +108,7 @@ namespace Tensorflow
{
var v = (sbyte*)Marshal.AllocHGlobal(sizeof(sbyte));
*v = value;
_handle = TF_NewTensor(dType ?? dtypes.as_dtype(typeof(sbyte)), dims:new long[0], num_dims: 0, data: (IntPtr)v, len: (UIntPtr)sizeof(sbyte), deallocator: hGlobalDeallocator, ref _deallocatorArgs);
_handle = TF_NewTensor(dType ?? dtypes.as_dtype(typeof(sbyte)), dims:new long[0], num_dims: 0, data: (IntPtr)v, len: (UIntPtr)sizeof(sbyte), deallocator: _hGlobalDeallocator, ref _deallocatorArgs);
}
/// <summary>
@@ -120,7 +134,7 @@ namespace Tensorflow
{
var v = (byte*)Marshal.AllocHGlobal(sizeof(byte));
*v = value;
_handle = TF_NewTensor(dType ?? dtypes.as_dtype(typeof(byte)), dims:new long[0], num_dims: 0, data: (IntPtr)v, len: (UIntPtr)sizeof(byte), deallocator: hGlobalDeallocator, ref _deallocatorArgs);
_handle = TF_NewTensor(dType ?? dtypes.as_dtype(typeof(byte)), dims:new long[0], num_dims: 0, data: (IntPtr)v, len: (UIntPtr)sizeof(byte), deallocator: _hGlobalDeallocator, ref _deallocatorArgs);
}
/// <summary>
@@ -146,7 +160,7 @@ namespace Tensorflow
{
var v = (short*)Marshal.AllocHGlobal(sizeof(short));
*v = value;
_handle = TF_NewTensor(dType ?? dtypes.as_dtype(typeof(short)), dims:new long[0], num_dims: 0, data: (IntPtr)v, len: (UIntPtr)sizeof(short), deallocator: hGlobalDeallocator, ref _deallocatorArgs);
_handle = TF_NewTensor(dType ?? dtypes.as_dtype(typeof(short)), dims:new long[0], num_dims: 0, data: (IntPtr)v, len: (UIntPtr)sizeof(short), deallocator: _hGlobalDeallocator, ref _deallocatorArgs);
}
/// <summary>
@@ -172,7 +186,7 @@ namespace Tensorflow
{
var v = (ushort*)Marshal.AllocHGlobal(sizeof(ushort));
*v = value;
_handle = TF_NewTensor(dType ?? dtypes.as_dtype(typeof(ushort)), dims:new long[0], num_dims: 0, data: (IntPtr)v, len: (UIntPtr)sizeof(ushort), deallocator: hGlobalDeallocator, ref _deallocatorArgs);
_handle = TF_NewTensor(dType ?? dtypes.as_dtype(typeof(ushort)), dims:new long[0], num_dims: 0, data: (IntPtr)v, len: (UIntPtr)sizeof(ushort), deallocator: _hGlobalDeallocator, ref _deallocatorArgs);
}
/// <summary>
@@ -198,7 +212,7 @@ namespace Tensorflow
{
var v = (int*)Marshal.AllocHGlobal(sizeof(int));
*v = value;
_handle = TF_NewTensor(dType ?? dtypes.as_dtype(typeof(int)), dims:new long[0], num_dims: 0, data: (IntPtr)v, len: (UIntPtr)sizeof(int), deallocator: hGlobalDeallocator, ref _deallocatorArgs);
_handle = TF_NewTensor(dType ?? dtypes.as_dtype(typeof(int)), dims:new long[0], num_dims: 0, data: (IntPtr)v, len: (UIntPtr)sizeof(int), deallocator: _hGlobalDeallocator, ref _deallocatorArgs);
}
/// <summary>
@@ -224,7 +238,7 @@ namespace Tensorflow
{
var v = (uint*)Marshal.AllocHGlobal(sizeof(uint));
*v = value;
_handle = TF_NewTensor(dType ?? dtypes.as_dtype(typeof(uint)), dims:new long[0], num_dims: 0, data: (IntPtr)v, len: (UIntPtr)sizeof(uint), deallocator: hGlobalDeallocator, ref _deallocatorArgs);
_handle = TF_NewTensor(dType ?? dtypes.as_dtype(typeof(uint)), dims:new long[0], num_dims: 0, data: (IntPtr)v, len: (UIntPtr)sizeof(uint), deallocator: _hGlobalDeallocator, ref _deallocatorArgs);
}
/// <summary>
@@ -250,7 +264,7 @@ namespace Tensorflow
{
var v = (long*)Marshal.AllocHGlobal(sizeof(long));
*v = value;
_handle = TF_NewTensor(dType ?? dtypes.as_dtype(typeof(long)), dims:new long[0], num_dims: 0, data: (IntPtr)v, len: (UIntPtr)sizeof(long), deallocator: hGlobalDeallocator, ref _deallocatorArgs);
_handle = TF_NewTensor(dType ?? dtypes.as_dtype(typeof(long)), dims:new long[0], num_dims: 0, data: (IntPtr)v, len: (UIntPtr)sizeof(long), deallocator: _hGlobalDeallocator, ref _deallocatorArgs);
}
/// <summary>
@@ -276,7 +290,7 @@ namespace Tensorflow
{
var v = (ulong*)Marshal.AllocHGlobal(sizeof(ulong));
*v = value;
_handle = TF_NewTensor(dType ?? dtypes.as_dtype(typeof(ulong)), dims:new long[0], num_dims: 0, data: (IntPtr)v, len: (UIntPtr)sizeof(ulong), deallocator: hGlobalDeallocator, ref _deallocatorArgs);
_handle = TF_NewTensor(dType ?? dtypes.as_dtype(typeof(ulong)), dims:new long[0], num_dims: 0, data: (IntPtr)v, len: (UIntPtr)sizeof(ulong), deallocator: _hGlobalDeallocator, ref _deallocatorArgs);
}
/// <summary>
@@ -302,7 +316,7 @@ namespace Tensorflow
{
var v = (float*)Marshal.AllocHGlobal(sizeof(float));
*v = value;
_handle = TF_NewTensor(dType ?? dtypes.as_dtype(typeof(float)), dims:new long[0], num_dims: 0, data: (IntPtr)v, len: (UIntPtr)sizeof(float), deallocator: hGlobalDeallocator, ref _deallocatorArgs);
_handle = TF_NewTensor(dType ?? dtypes.as_dtype(typeof(float)), dims:new long[0], num_dims: 0, data: (IntPtr)v, len: (UIntPtr)sizeof(float), deallocator: _hGlobalDeallocator, ref _deallocatorArgs);
}
/// <summary>
@@ -328,7 +342,7 @@ namespace Tensorflow
{
var v = (double*)Marshal.AllocHGlobal(sizeof(double));
*v = value;
_handle = TF_NewTensor(dType ?? dtypes.as_dtype(typeof(double)), dims:new long[0], num_dims: 0, data: (IntPtr)v, len: (UIntPtr)sizeof(double), deallocator: hGlobalDeallocator, ref _deallocatorArgs);
_handle = TF_NewTensor(dType ?? dtypes.as_dtype(typeof(double)), dims:new long[0], num_dims: 0, data: (IntPtr)v, len: (UIntPtr)sizeof(double), deallocator: _hGlobalDeallocator, ref _deallocatorArgs);
}
/// <summary>
@@ -354,7 +368,7 @@ namespace Tensorflow
{
var v = (Complex*)Marshal.AllocHGlobal(sizeof(Complex));
*v = value;
_handle = TF_NewTensor(dType ?? dtypes.as_dtype(typeof(Complex)), dims:new long[0], num_dims: 0, data: (IntPtr)v, len: (UIntPtr)sizeof(Complex), deallocator: hGlobalDeallocator, ref _deallocatorArgs);
_handle = TF_NewTensor(dType ?? dtypes.as_dtype(typeof(Complex)), dims:new long[0], num_dims: 0, data: (IntPtr)v, len: (UIntPtr)sizeof(Complex), deallocator: _hGlobalDeallocator, ref _deallocatorArgs);
}
#endif

@@ -444,7 +458,7 @@ namespace Tensorflow
dims.Length,
dotHandle,
(UIntPtr)buffersize,
hGlobalDeallocator,
_hGlobalDeallocator,
ref _deallocatorArgs);

return tfHandle;
@@ -458,8 +472,7 @@ namespace Tensorflow
_id = ops.uid();
}

private bool _isPinnedArray => _deallocatorArgs.gc_handle != IntPtr.Zero;

/// <summary>
/// Creates a new tensor from the given array without copying memory. The array is pinned down and the pointer passed on.
/// </summary>
@@ -516,17 +529,11 @@ namespace Tensorflow
var gcHandle = GCHandle.Alloc(data, GCHandleType.Pinned);
_deallocatorArgs = new DeallocatorArgs() { gc_handle = GCHandle.ToIntPtr(gcHandle) };
if (shape == null || shape.Length == 0)
return TF_NewTensor(dt, new long[0], 0, gcHandle.AddrOfPinnedObject() + start * element_size, (UIntPtr)(count * element_size), gcHandleDeallocator, ref _deallocatorArgs);
return TF_NewTensor(dt, new long[0], 0, gcHandle.AddrOfPinnedObject() + start * element_size, (UIntPtr)(count * element_size), _gcHandleDeallocator, ref _deallocatorArgs);
else
return TF_NewTensor(dt, shape, shape.Length, gcHandle.AddrOfPinnedObject() + start * element_size, (UIntPtr)(count * element_size), gcHandleDeallocator, ref _deallocatorArgs);
return TF_NewTensor(dt, shape, shape.Length, gcHandle.AddrOfPinnedObject() + start * element_size, (UIntPtr)(count * element_size), _gcHandleDeallocator, ref _deallocatorArgs);
}

private DeallocatorArgs _deallocatorArgs = new DeallocatorArgs() { gc_handle = IntPtr.Zero };

// note: they must be assigned to a static variable in order to work as unmanaged callbacks
static Deallocator hGlobalDeallocator = FreeHGlobalMemory;
static Deallocator gcHandleDeallocator = FreeGCHandle;

[MonoPInvokeCallback(typeof(Deallocator))]
internal static void FreeHGlobalMemory(IntPtr dataPtr, IntPtr len, ref DeallocatorArgs args)
{


+ 26
- 3
src/TensorFlowNET.Core/Tensors/Tensor.cs View File

@@ -350,12 +350,35 @@ namespace Tensorflow

public void Dispose()
{
if (_handle != IntPtr.Zero)
IntPtr h=IntPtr.Zero;
lock (this)
{
c_api.TF_DeleteTensor(_handle);
_handle = IntPtr.Zero;
h = _handle;
_handle=IntPtr.Zero;
}
if (h != IntPtr.Zero)
c_api.TF_DeleteTensor(_handle);
status.Dispose();
GC.SuppressFinalize(this);
}

/// <summary>
/// Dispose the tensor when it gets garbage collected
/// </summary>
~Tensor()
{
Dispose();
}

public bool IsDisposed
{
get
{
lock (this)
{
return _handle == IntPtr.Zero;
}
}
}

}


+ 10
- 4
src/TensorFlowNet.Benchmarks/TensorCreation.cs View File

@@ -7,7 +7,7 @@ using Tensorflow;
namespace TensorFlowNet.Benchmark
{
[SimpleJob(launchCount: 1, warmupCount: 10, targetCount: 30)]
[SimpleJob(launchCount: 1, warmupCount: 2, targetCount: 10)]
[MinColumn, MaxColumn, MeanColumn, MedianColumn]
public class TensorCreation
{
@@ -16,16 +16,19 @@ namespace TensorFlowNet.Benchmark
[GlobalSetup]
public void Setup()
{
data = new double[1000];
data = new double[10];
}
[Benchmark]
public void TensorFromArray()
{
var g=new Graph();
for (int i = 0; i < 100; i++)
for (int i = 0; i < 1000; i++)
{
var tensor = new Tensor(data);
{
}
}
}
@@ -34,9 +37,12 @@ namespace TensorFlowNet.Benchmark
public void TensorFromNDArray()
{
var g = new Graph();
for (int i = 0; i < 100; i++)
for (int i = 0; i < 1000; i++)
{
var tensor = new Tensor(new NDArray(data));
{
}
}
}


+ 42
- 4
test/TensorFlowNET.UnitTest/TensorTest.cs View File

@@ -5,17 +5,54 @@ using System.Collections.Generic;
using System.Linq;
using System.Runtime.InteropServices;
using System.Text;
using System.Threading;
using System.Threading.Tasks;
using Tensorflow;
using static Tensorflow.Python;

namespace TensorFlowNET.UnitTest
{
[TestClass]
public class TensorTest : CApiTest
{
/// <summary>
/// Port from c_api_test.cc
/// `TEST(CAPI, AllocateTensor)`
/// </summary>
[TestMethod]
public void TensorDeallocationThreadSafety()
{
var tensors = new Tensor[1000];
foreach (var i in range(1000))
{
tensors[i] = new Tensor(new int[1000]);
}
SemaphoreSlim s = new SemaphoreSlim(0, 2);
SemaphoreSlim s_done = new SemaphoreSlim(0, 2);

var t1 = new Thread(() =>
{
s.Wait();
foreach (var t in tensors)
t.Dispose();
s_done.Release();
});

var t2 = new Thread(() =>
{
s.Wait();
foreach (var t in tensors)
t.Dispose();
s_done.Release();
});

t1.Start();
t2.Start();
s.Release(2);
s_done.Wait();
s_done.Wait();

foreach(var t in tensors)
Assert.IsTrue(t.IsDisposed);
}


[TestMethod]
public void AllocateTensor()
{
@@ -29,6 +66,7 @@ namespace TensorFlowNET.UnitTest
t.Dispose();*/
}


/// <summary>
/// Port from c_api_test.cc
/// `TEST(CAPI, MaybeMove)`


Loading…
Cancel
Save