diff --git a/README.md b/README.md
index 421242cd..9ec3432a 100644
--- a/README.md
+++ b/README.md
@@ -149,6 +149,7 @@ Example runner will download all the required files like training data and model
* [Object Detection](test/TensorFlowNET.Examples/ImageProcess/ObjectDetection.cs)
* [Text Classification](test/TensorFlowNET.Examples/TextProcess/BinaryTextClassification.cs)
* [CNN Text Classification](test/TensorFlowNET.Examples/TextProcess/cnn_models/VdCnn.cs)
+* [MNIST CNN](test/TensorFlowNET.Examples/ImageProcess/DigitRecognitionCNN.cs)
* [Named Entity Recognition](test/TensorFlowNET.Examples/TextProcess/NER)
* [Transfer Learning for Image Classification in InceptionV3](test/TensorFlowNET.Examples/ImageProcess/RetrainImageClassifier.cs)
diff --git a/TensorFlow.NET.sln b/TensorFlow.NET.sln
index 874f8082..19a66b80 100644
--- a/TensorFlow.NET.sln
+++ b/TensorFlow.NET.sln
@@ -1,7 +1,7 @@
Microsoft Visual Studio Solution File, Format Version 12.00
-# Visual Studio Version 16
-VisualStudioVersion = 16.0.29025.244
+# Visual Studio 15
+VisualStudioVersion = 15.0.28307.645
MinimumVisualStudioVersion = 10.0.40219.1
Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "TensorFlowNET.UnitTest", "test\TensorFlowNET.UnitTest\TensorFlowNET.UnitTest.csproj", "{029A8CF1-CF95-4DCB-98AA-9D3D96A83B3E}"
EndProject
@@ -17,6 +17,8 @@ Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "Keras.UnitTest", "test\Kera
EndProject
Project("{6EC3EE1D-3C4E-46DD-8F32-0CC8E7565705}") = "TensorFlowNET.Examples.FSharp", "test\TensorFlowNET.Examples.FSharp\TensorFlowNET.Examples.FSharp.fsproj", "{62BC3801-F0D3-44A9-A0AC-712F40C8F961}"
EndProject
+Project("{FAE04EC0-301F-11D3-BF4B-00C04F79EFBC}") = "TensorFlowNet.Benchmark", "src\TensorFlowNet.Benchmarks\TensorFlowNet.Benchmark.csproj", "{68861442-971A-4196-876E-C9330F0B3C54}"
+EndProject
Global
GlobalSection(SolutionConfigurationPlatforms) = preSolution
Debug|Any CPU = Debug|Any CPU
@@ -51,6 +53,10 @@ Global
{62BC3801-F0D3-44A9-A0AC-712F40C8F961}.Debug|Any CPU.Build.0 = Debug|Any CPU
{62BC3801-F0D3-44A9-A0AC-712F40C8F961}.Release|Any CPU.ActiveCfg = Release|Any CPU
{62BC3801-F0D3-44A9-A0AC-712F40C8F961}.Release|Any CPU.Build.0 = Release|Any CPU
+ {68861442-971A-4196-876E-C9330F0B3C54}.Debug|Any CPU.ActiveCfg = Debug|Any CPU
+ {68861442-971A-4196-876E-C9330F0B3C54}.Debug|Any CPU.Build.0 = Debug|Any CPU
+ {68861442-971A-4196-876E-C9330F0B3C54}.Release|Any CPU.ActiveCfg = Release|Any CPU
+ {68861442-971A-4196-876E-C9330F0B3C54}.Release|Any CPU.Build.0 = Release|Any CPU
EndGlobalSection
GlobalSection(SolutionProperties) = preSolution
HideSolutionNode = FALSE
diff --git a/docs/assets/tf.net.architecture.svg b/docs/assets/tf.net.architecture.svg
new file mode 100644
index 00000000..933fd802
--- /dev/null
+++ b/docs/assets/tf.net.architecture.svg
@@ -0,0 +1,370 @@
+
+
+
+
diff --git a/src/SciSharp.TensorFlow.Redist/CommonPackage.props b/src/SciSharp.TensorFlow.Redist/CommonPackage.props
new file mode 100644
index 00000000..08fbb153
--- /dev/null
+++ b/src/SciSharp.TensorFlow.Redist/CommonPackage.props
@@ -0,0 +1,24 @@
+
+
+
+
+
+ PreserveNewest
+ false
+ %(Filename)%(Extension)
+
+
+ PreserveNewest
+ false
+ %(Filename)%(Extension)
+
+
+
+
\ No newline at end of file
diff --git a/src/SciSharp.TensorFlow.Redist/SciSharp.TensorFlow.Redist.nupkgproj b/src/SciSharp.TensorFlow.Redist/SciSharp.TensorFlow.Redist.nupkgproj
new file mode 100644
index 00000000..79eaa564
--- /dev/null
+++ b/src/SciSharp.TensorFlow.Redist/SciSharp.TensorFlow.Redist.nupkgproj
@@ -0,0 +1,188 @@
+
+
+
+ $(MSBuildThisFileDirectory)
+ $(ProjDir)bin\
+ $(ProjDir)obj\
+
+ x64
+ netstandard2.0
+ 1.14.0
+ 1
+
+ $(BinDir)packages\
+ $(MSBuildProjectName)
+ $(TensorFlowVersion)
+
+ true
+ false
+
+ Redist.nuspec
+ packageId=$(PackageId);version=$(PackageVersion)
+ $(ProjDir)
+
+ CopyFilesFromArchive
+
+ win
+ linux
+ osx
+ $(PackageRid)-$(TargetArchitecture)
+
+
+
+
+ false
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ <_downloadFiles Include="@(TensorFlowArchive);@(AdditionalDownloadFile)" Url="%(Identity)" DestinationFile="%(DownloadFile)" />
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ @(FilesWithHashes->'%(FileHash)')
+ $([System.IO.File]::ReadAllText('%(LocalShaFile)'))
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ <_fileFromArchive Include="%(TensorFlowArchive.FilesFromArchive)" ExtractDirectory="%(TensorFlowArchive.ExtractDirectory)" Runtime="%(TensorFlowArchive.Runtime)" />
+ <_fileFromArchive DestinationFile="%(FileName)%(Extension)"/>
+ <_fileFromArchive PackagePath="runtimes\%(_fileFromArchive.Runtime)\native\%(_fileFromArchive.DestinationFile)" />
+
+
+ <_fileFromArchive Condition="'%(DestinationFile)' == 'LICENSE'" PackagePath="THIRD_PARTY_NOTICES.txt" Runtime="" />
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/src/SciSharp.TensorFlow.Redist/libtensorflow-cpu-darwin-x86_64-1.14.0.tar.gz.sha b/src/SciSharp.TensorFlow.Redist/libtensorflow-cpu-darwin-x86_64-1.14.0.tar.gz.sha
new file mode 100644
index 00000000..951c4556
--- /dev/null
+++ b/src/SciSharp.TensorFlow.Redist/libtensorflow-cpu-darwin-x86_64-1.14.0.tar.gz.sha
@@ -0,0 +1 @@
+7002EF701BD23C5EF5FF94192E935F0DDF960A21BE2531CEE158586830C00E0BA889900F7F6E8AB568BEE0ACF1F5A6A246BB43D11C4109E9DC782B46377D8142
\ No newline at end of file
diff --git a/src/SciSharp.TensorFlow.Redist/libtensorflow-cpu-linux-x86_64-1.14.0.tar.gz.sha b/src/SciSharp.TensorFlow.Redist/libtensorflow-cpu-linux-x86_64-1.14.0.tar.gz.sha
new file mode 100644
index 00000000..784640a0
--- /dev/null
+++ b/src/SciSharp.TensorFlow.Redist/libtensorflow-cpu-linux-x86_64-1.14.0.tar.gz.sha
@@ -0,0 +1 @@
+E3F6D0309117E9E45780ECF8BC4D0268B3FC9F12E3E38FFE58496789330A4ACD2DC8FF721F3B8900357F6155F8A54000E45B99495F823486B558E8B42532392D
\ No newline at end of file
diff --git a/src/SciSharp.TensorFlow.Redist/libtensorflow-cpu-windows-x86_64-1.14.0.zip.sha b/src/SciSharp.TensorFlow.Redist/libtensorflow-cpu-windows-x86_64-1.14.0.zip.sha
new file mode 100644
index 00000000..b7d6402c
--- /dev/null
+++ b/src/SciSharp.TensorFlow.Redist/libtensorflow-cpu-windows-x86_64-1.14.0.zip.sha
@@ -0,0 +1 @@
+59A2B80B441439B851202358CE4A65BA0DDDB319A8A29E87B135DCD9954BC5B0628F2C0C8E72D6942EA3CDCE172805C2BD5421815B3D0210B62BC0936DC59A08
\ No newline at end of file
diff --git a/src/SciSharp.TensorFlow.Redist/redist.nuspec b/src/SciSharp.TensorFlow.Redist/redist.nuspec
new file mode 100644
index 00000000..d2527c8b
--- /dev/null
+++ b/src/SciSharp.TensorFlow.Redist/redist.nuspec
@@ -0,0 +1,26 @@
+
+
+
+ $packageId$
+ $version$
+ The TensorFlow Authors
+ The TensorFlow Authors
+ true
+ LICENSE.txt
+ https://aka.ms/deprecateLicenseUrl
+ https://www.tensorflow.org/
+ $packageId$ contains the TensorFlow C library version $version$ redistributed as a NuGet package.
+ https://github.com/tensorflow/tensorflow/releases/tag/v$version$
+ Copyright 2019 The TensorFlow Authors. All rights reserved.
+ TensorFlow
+
+
+
+
+
+
+
+
+
+
+
\ No newline at end of file
diff --git a/src/TensorFlowNET.Core/Tensors/Tensor.Creation.cs b/src/TensorFlowNET.Core/Tensors/Tensor.Creation.cs
index 81c8006b..f6d49192 100644
--- a/src/TensorFlowNET.Core/Tensors/Tensor.Creation.cs
+++ b/src/TensorFlowNET.Core/Tensors/Tensor.Creation.cs
@@ -1,6 +1,6 @@
/*****************************************************************************
Copyright 2018 The TensorFlow.NET Authors. All Rights Reserved.
-
+
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
@@ -18,6 +18,8 @@ using NumSharp;
using System;
using System.Collections.Generic;
using System.Linq;
+using System.Numerics;
+using System.Runtime.CompilerServices;
using System.Runtime.InteropServices;
using System.Text;
using static Tensorflow.c_api;
@@ -36,39 +38,246 @@ namespace Tensorflow
_handle = handle;
}
- public Tensor(NDArray nd, TF_DataType? tensorDType = null)
+#if _REGEN
+ %types=["sbyte", "byte", "short", "ushort", "int", "uint", "long", "ulong", "float", "double", "Complex"]
+ %foreach types%
+
+ ///
+ /// Create a 1d Tensor from the given linear array and shape
+ ///
+ public Tensor(#1[] data)
{
- _handle = Allocate(nd, tensorDType: tensorDType);
+ _handle = CreateTensorWithoutCopying(dtypes.as_dtype(typeof(#1)), new long[]{data.Length}, data, Marshal.SizeOf<#1>());
}
- public unsafe Tensor(byte[] buffer)
+ ///
+ /// Create a N-dimensional Tensor from the given array
+ ///
+ public Tensor(#1[] data, long[] shape)
{
- var size = c_api.TF_StringEncodedSize((UIntPtr)buffer.Length);
- _handle = TF_AllocateTensor(TF_DataType.TF_STRING, IntPtr.Zero, 0, (UIntPtr)((ulong)size + 8));
+ _handle = CreateTensorWithoutCopying(dtypes.as_dtype(typeof(#1)), shape, data, Marshal.SizeOf<#1>());
+ }
- IntPtr tensor = c_api.TF_TensorData(_handle);
- Marshal.WriteInt64(tensor, 0);
- fixed (byte* src = &buffer[0])
- c_api.TF_StringEncode(src, (UIntPtr)buffer.Length, (sbyte*)(tensor + sizeof(Int64)), size, status);
+ %
+#else
+
+
+ ///
+ /// Create a 1d Tensor from the given linear array and shape
+ ///
+ public Tensor(sbyte[] data)
+ {
+ _handle = CreateTensorWithoutCopying(dtypes.as_dtype(typeof(sbyte)), new long[]{data.Length}, data, Marshal.SizeOf());
+ }
- status.Check(true);
+ ///
+ /// Create a N-dimensional Tensor from the given array
+ ///
+ public Tensor(sbyte[] data, long[] shape)
+ {
+ _handle = CreateTensorWithoutCopying(dtypes.as_dtype(typeof(sbyte)), shape, data, Marshal.SizeOf());
}
- private IntPtr Allocate(NDArray nd, TF_DataType? tensorDType = null)
+
+ ///
+ /// Create a 1d Tensor from the given linear array and shape
+ ///
+ public Tensor(byte[] data)
{
- if (tensorDType == TF_DataType.TF_STRING &&
- nd.dtype.Name == "Byte")
+ _handle = CreateTensorWithoutCopying(dtypes.as_dtype(typeof(byte)), new long[]{data.Length}, data, Marshal.SizeOf());
+ }
+
+ ///
+ /// Create a N-dimensional Tensor from the given array
+ ///
+ public Tensor(byte[] data, long[] shape)
+ {
+ _handle = CreateTensorWithoutCopying(dtypes.as_dtype(typeof(byte)), shape, data, Marshal.SizeOf());
+ }
+
+
+ ///
+ /// Create a 1d Tensor from the given linear array and shape
+ ///
+ public Tensor(short[] data)
+ {
+ _handle = CreateTensorWithoutCopying(dtypes.as_dtype(typeof(short)), new long[]{data.Length}, data, Marshal.SizeOf());
+ }
+
+ ///
+ /// Create a N-dimensional Tensor from the given array
+ ///
+ public Tensor(short[] data, long[] shape)
+ {
+ _handle = CreateTensorWithoutCopying(dtypes.as_dtype(typeof(short)), shape, data, Marshal.SizeOf());
+ }
+
+
+ ///
+ /// Create a 1d Tensor from the given linear array and shape
+ ///
+ public Tensor(ushort[] data)
+ {
+ _handle = CreateTensorWithoutCopying(dtypes.as_dtype(typeof(ushort)), new long[]{data.Length}, data, Marshal.SizeOf());
+ }
+
+ ///
+ /// Create a N-dimensional Tensor from the given array
+ ///
+ public Tensor(ushort[] data, long[] shape)
+ {
+ _handle = CreateTensorWithoutCopying(dtypes.as_dtype(typeof(ushort)), shape, data, Marshal.SizeOf());
+ }
+
+
+ ///
+ /// Create a 1d Tensor from the given linear array and shape
+ ///
+ public Tensor(int[] data)
+ {
+ _handle = CreateTensorWithoutCopying(dtypes.as_dtype(typeof(int)), new long[]{data.Length}, data, Marshal.SizeOf());
+ }
+
+ ///
+ /// Create a N-dimensional Tensor from the given array
+ ///
+ public Tensor(int[] data, long[] shape)
+ {
+ _handle = CreateTensorWithoutCopying(dtypes.as_dtype(typeof(int)), shape, data, Marshal.SizeOf());
+ }
+
+
+ ///
+ /// Create a 1d Tensor from the given linear array and shape
+ ///
+ public Tensor(uint[] data)
+ {
+ _handle = CreateTensorWithoutCopying(dtypes.as_dtype(typeof(uint)), new long[]{data.Length}, data, Marshal.SizeOf());
+ }
+
+ ///
+ /// Create a N-dimensional Tensor from the given array
+ ///
+ public Tensor(uint[] data, long[] shape)
+ {
+ _handle = CreateTensorWithoutCopying(dtypes.as_dtype(typeof(uint)), shape, data, Marshal.SizeOf());
+ }
+
+
+ ///
+ /// Create a 1d Tensor from the given linear array and shape
+ ///
+ public Tensor(long[] data)
+ {
+ _handle = CreateTensorWithoutCopying(dtypes.as_dtype(typeof(long)), new long[]{data.Length}, data, Marshal.SizeOf());
+ }
+
+ ///
+ /// Create a N-dimensional Tensor from the given array
+ ///
+ public Tensor(long[] data, long[] shape)
+ {
+ _handle = CreateTensorWithoutCopying(dtypes.as_dtype(typeof(long)), shape, data, Marshal.SizeOf());
+ }
+
+
+ ///
+ /// Create a 1d Tensor from the given linear array and shape
+ ///
+ public Tensor(ulong[] data)
+ {
+ _handle = CreateTensorWithoutCopying(dtypes.as_dtype(typeof(ulong)), new long[]{data.Length}, data, Marshal.SizeOf());
+ }
+
+ ///
+ /// Create a N-dimensional Tensor from the given array
+ ///
+ public Tensor(ulong[] data, long[] shape)
+ {
+ _handle = CreateTensorWithoutCopying(dtypes.as_dtype(typeof(ulong)), shape, data, Marshal.SizeOf());
+ }
+
+
+ ///
+ /// Create a 1d Tensor from the given linear array and shape
+ ///
+ public Tensor(float[] data)
+ {
+ _handle = CreateTensorWithoutCopying(dtypes.as_dtype(typeof(float)), new long[]{data.Length}, data, Marshal.SizeOf());
+ }
+
+ ///
+ /// Create a N-dimensional Tensor from the given array
+ ///
+ public Tensor(float[] data, long[] shape)
+ {
+ _handle = CreateTensorWithoutCopying(dtypes.as_dtype(typeof(float)), shape, data, Marshal.SizeOf());
+ }
+
+
+ ///
+ /// Create a 1d Tensor from the given linear array and shape
+ ///
+ public Tensor(double[] data)
+ {
+ _handle = CreateTensorWithoutCopying(dtypes.as_dtype(typeof(double)), new long[]{data.Length}, data, Marshal.SizeOf());
+ }
+
+ ///
+ /// Create a N-dimensional Tensor from the given array
+ ///
+ public Tensor(double[] data, long[] shape)
+ {
+ _handle = CreateTensorWithoutCopying(dtypes.as_dtype(typeof(double)), shape, data, Marshal.SizeOf());
+ }
+
+
+ ///
+ /// Create a 1d Tensor from the given linear array and shape
+ ///
+ public Tensor(Complex[] data)
+ {
+ _handle = CreateTensorWithoutCopying(dtypes.as_dtype(typeof(Complex)), new long[]{data.Length}, data, Marshal.SizeOf());
+ }
+
+ ///
+ /// Create a N-dimensional Tensor from the given array
+ ///
+ public Tensor(Complex[] data, long[] shape)
+ {
+ _handle = CreateTensorWithoutCopying(dtypes.as_dtype(typeof(Complex)), shape, data, Marshal.SizeOf());
+ }
+#endif
+
+ public Tensor(NDArray nd, TF_DataType? tensorDType = null)
+ {
+ _handle = Allocate(nd, tensorDType: tensorDType);
+ }
+
+ private unsafe IntPtr Allocate(NDArray nd, TF_DataType? tensorDType = null)
+ {
+ if (tensorDType == TF_DataType.TF_STRING && nd.dtype.Name == "Byte")
{
- return new Tensor(nd.Data());
+ var buffer=nd.Data();
+ var size = c_api.TF_StringEncodedSize((UIntPtr)buffer.Length);
+ var handle = TF_AllocateTensor(TF_DataType.TF_STRING, IntPtr.Zero, 0, (UIntPtr)((ulong)size + 8));
+
+ IntPtr tensor = c_api.TF_TensorData(handle);
+ Marshal.WriteInt64(tensor, 0);
+ fixed (byte* src = &buffer[0])
+ c_api.TF_StringEncode(src, (UIntPtr)buffer.Length, (sbyte*)(tensor + sizeof(Int64)), size, status);
+
+ status.Check(true);
+ return handle;
}
IntPtr dotHandle = IntPtr.Zero;
- ulong size = 0;
+ int buffersize = 0;
if (nd.dtype.Name != "String")
{
- dotHandle = Marshal.AllocHGlobal(nd.dtypesize * nd.size);
- size = (ulong)(nd.size * nd.dtypesize);
+ buffersize = (nd.size * nd.dtypesize);
+ dotHandle = Marshal.AllocHGlobal(buffersize);
}
var dataType = ToTFDataType(nd.dtype);
@@ -116,7 +325,7 @@ namespace Tensorflow
dims,
dims.Length,
dotHandle,
- (UIntPtr)size,
+ (UIntPtr)buffersize,
deallocator,
ref deallocator_called);
@@ -130,5 +339,59 @@ namespace Tensorflow
_dtype = dtype;
_id = ops.uid();
}
+
+ ///
+ /// Creates a new tensor from the given array without copying memory. The array is pinned down and the pointer passed on.
+ ///
+ /// Represents the tensor shape.
+ /// The linear array of data, the data must fit in the tensor with the specified dimensions.
+ /// The number of bytes in memory of a single array element
+ ///
+ /// Use the FromBuffer method to create a tensor that has the specified dimensions
+ /// and is initialized with data from the data array. The data is copied starting
+ /// at the start offset, for count bytes and is laid out into the tensor following the
+ /// specified dimensions.
+ ///
+ [MethodImpl(MethodImplOptions.AggressiveInlining)]
+ protected IntPtr CreateTensorWithoutCopying(TF_DataType dt, long[] shape, Array data, int element_size)
+ {
+ return CreateTensorWithoutCopying(dt, shape, data, 0, data.Length, element_size);
+ }
+
+ ///
+ /// Creates a new tensor from a subsection of the given array without copying memory. The array is pinned down and the pointer passed on.
+ ///
+ /// Represents the tensor shape.
+ /// The linear array of data, the data must fit in the tensor with the specified dimensions.
+ /// The offset into the provided data array where the data resides.
+ /// The number of elements to copy from data.
+ /// The number of bytes in memory of a single array element
+ ///
+ /// Use the FromBuffer method to create a tensor that has the specified dimensions
+ /// and is initialized with data from the data array. The data is copied starting
+ /// at the start offset, for count bytes and is laid out into the tensor following the
+ /// specified dimensions.
+ ///
+ [MethodImpl(MethodImplOptions.AggressiveInlining)]
+ protected IntPtr CreateTensorWithoutCopying(TF_DataType dt, long[] shape, Array data, int start, int count, int element_size)
+ {
+ if (start < 0 || start > data.Length - count)
+ throw new ArgumentException($"Array length {data.Length} does not match the given shape {new Shape(shape.Cast().ToArray())}");
+
+ // get a handle to the pinned array which we will pass on to the tensor computation engine to use
+ var dataHandle = GCHandle.Alloc(data, GCHandleType.Pinned);
+
+ // Free the original buffer and set flag
+ Deallocator deallocator = (IntPtr values, IntPtr len, ref bool closure) =>
+ {
+ dataHandle.Free();
+ closure = true;
+ };
+
+ if (shape == null)
+ return TF_NewTensor(dt, null, 0, dataHandle.AddrOfPinnedObject() + start * element_size, (UIntPtr)(count * element_size), deallocator, ref deallocator_called);
+ else
+ return TF_NewTensor(dt, shape, shape.Length, dataHandle.AddrOfPinnedObject() + start * element_size, (UIntPtr)(count * element_size), deallocator, ref deallocator_called);
+ }
}
}
diff --git a/src/TensorFlowNET.Core/Tensors/Tensor.cs b/src/TensorFlowNET.Core/Tensors/Tensor.cs
index 786daa5c..cc54c667 100644
--- a/src/TensorFlowNET.Core/Tensors/Tensor.cs
+++ b/src/TensorFlowNET.Core/Tensors/Tensor.cs
@@ -31,7 +31,7 @@ namespace Tensorflow
///
public partial class Tensor : IDisposable, ITensorOrOperation
{
- private readonly IntPtr _handle;
+ private IntPtr _handle;
private int _id;
private Operation _op;
@@ -351,6 +351,7 @@ namespace Tensorflow
public void Dispose()
{
c_api.TF_DeleteTensor(_handle);
+ _handle = IntPtr.Zero;
status.Dispose();
}
diff --git a/src/TensorFlowNET.Core/Tensors/dtypes.cs b/src/TensorFlowNET.Core/Tensors/dtypes.cs
index ebe6e283..5000401a 100644
--- a/src/TensorFlowNET.Core/Tensors/dtypes.cs
+++ b/src/TensorFlowNET.Core/Tensors/dtypes.cs
@@ -52,32 +52,51 @@ namespace Tensorflow
}
}
+ // "sbyte", "byte", "short", "ushort", "int", "uint", "long", "ulong", "float", "double", "Complex"
public static TF_DataType as_dtype(Type type)
{
TF_DataType dtype = TF_DataType.DtInvalid;
-
+
switch (type.Name)
{
- case "Boolean":
- dtype = TF_DataType.TF_BOOL;
+ case "SByte":
+ dtype = TF_DataType.TF_INT8;
+ break;
+ case "Byte":
+ dtype = TF_DataType.TF_UINT8;
+ break;
+ case "Int16":
+ dtype = TF_DataType.TF_INT16;
+ break;
+ case "UInt16":
+ dtype = TF_DataType.TF_UINT16;
break;
case "Int32":
dtype = TF_DataType.TF_INT32;
break;
+ case "UInt32":
+ dtype = TF_DataType.TF_UINT32;
+ break;
case "Int64":
dtype = TF_DataType.TF_INT64;
break;
+ case "UInt64":
+ dtype = TF_DataType.TF_UINT64;
+ break;
case "Single":
dtype = TF_DataType.TF_FLOAT;
break;
case "Double":
dtype = TF_DataType.TF_DOUBLE;
break;
+ case "Complex":
+ dtype = TF_DataType.TF_COMPLEX128;
+ break;
case "String":
dtype = TF_DataType.TF_STRING;
break;
- case "Byte":
- dtype = TF_DataType.TF_STRING;
+ case "Boolean":
+ dtype = TF_DataType.TF_BOOL;
break;
default:
throw new Exception("as_dtype Not Implemented");
diff --git a/src/TensorFlowNET.Core/Variables/_VariableScopeStore.cs b/src/TensorFlowNET.Core/Variables/_VariableScopeStore.cs
index 3f0411a0..0bea062a 100644
--- a/src/TensorFlowNET.Core/Variables/_VariableScopeStore.cs
+++ b/src/TensorFlowNET.Core/Variables/_VariableScopeStore.cs
@@ -41,7 +41,11 @@ namespace Tensorflow
public void close_variable_subscopes(string scope_name)
{
+ var variable_scopes_count_tmp = new Dictionary();
foreach (var k in variable_scopes_count.Keys)
+ variable_scopes_count_tmp.Add(k, variable_scopes_count[k]);
+
+ foreach (var k in variable_scopes_count_tmp.Keys)
if (scope_name == null || k.StartsWith(scope_name + "/"))
variable_scopes_count[k] = 0;
}
diff --git a/src/TensorFlowNet.Benchmarks/Program.cs b/src/TensorFlowNet.Benchmarks/Program.cs
new file mode 100644
index 00000000..4ba08cf1
--- /dev/null
+++ b/src/TensorFlowNet.Benchmarks/Program.cs
@@ -0,0 +1,40 @@
+using System;
+using System.Reflection;
+using BenchmarkDotNet.Configs;
+using BenchmarkDotNet.Running;
+
+namespace TensorFlowNet.Benchmark
+{
+ class Program
+ {
+ ///
+ /// dotnet NumSharp.Benchmark.dll (Benchmark Class Name)
+ /// dotnet NumSharp.Benchmark.dll nparange
+ ///
+ ///
+ static void Main(string[] args)
+ {
+#if DEBUG
+ IConfig config = new DebugInProcessConfig();
+#else
+ IConfig config = null;
+#endif
+
+ if (args?.Length > 0)
+ {
+ for (int i = 0; i < args.Length; i++)
+ {
+ string name = $"TensorFlowNet.Benchmark.{args[i]}";
+ var type = Type.GetType(name);
+ BenchmarkRunner.Run(type, config);
+ }
+ }
+ else
+ {
+ BenchmarkSwitcher.FromAssembly(Assembly.GetExecutingAssembly()).Run(args, config);
+ }
+
+ Console.ReadLine();
+ }
+ }
+}
diff --git a/src/TensorFlowNet.Benchmarks/TensorCreation.cs b/src/TensorFlowNet.Benchmarks/TensorCreation.cs
new file mode 100644
index 00000000..1ac849f3
--- /dev/null
+++ b/src/TensorFlowNet.Benchmarks/TensorCreation.cs
@@ -0,0 +1,55 @@
+using System;
+using System.Linq;
+using BenchmarkDotNet.Attributes;
+using BenchmarkDotNet.Engines;
+using NumSharp;
+using Tensorflow;
+
+namespace TensorFlowNet.Benchmark
+{
+ [SimpleJob(launchCount: 1, warmupCount: 10, targetCount: 30)]
+ [MinColumn, MaxColumn, MeanColumn, MedianColumn]
+ public class TensorCreation
+ {
+ private double[] data;
+
+ [GlobalSetup]
+ public void Setup()
+ {
+ data = new double[1000];
+ }
+
+ [Benchmark]
+ public void TensorFromArray()
+ {
+ var g=new Graph();
+ for (int i = 0; i < 100; i++)
+ {
+ var tensor = new Tensor(data);
+ }
+ }
+
+
+ [Benchmark]
+ public void TensorFromNDArray()
+ {
+ var g = new Graph();
+ for (int i = 0; i < 100; i++)
+ {
+ var tensor = new Tensor(new NDArray(data));
+ }
+ }
+
+ //[Benchmark]
+ //public void Constant()
+ //{
+ // for (int i = 0; i < 100; i++)
+ // {
+ // //var tensor = new Tensor(new NDArray(data));
+ // var c = tf.constant(42.0);
+ // }
+ //}
+
+ }
+}
+
diff --git a/src/TensorFlowNet.Benchmarks/TensorFlowNet.Benchmark.csproj b/src/TensorFlowNet.Benchmarks/TensorFlowNet.Benchmark.csproj
new file mode 100644
index 00000000..10f6e0a0
--- /dev/null
+++ b/src/TensorFlowNet.Benchmarks/TensorFlowNet.Benchmark.csproj
@@ -0,0 +1,22 @@
+
+
+
+ Exe
+ netcoreapp2.2
+
+
+
+
+
+
+
+
+
+
+
+
+ PreserveNewest
+
+
+
+
diff --git a/src/TensorFlowNet.Benchmarks/tensorflow.dll b/src/TensorFlowNet.Benchmarks/tensorflow.dll
new file mode 100644
index 00000000..f98eb380
Binary files /dev/null and b/src/TensorFlowNet.Benchmarks/tensorflow.dll differ
diff --git a/test/TensorFlowNET.Examples/AudioProcess/README.md b/test/TensorFlowNET.Examples/AudioProcessing/README.md
similarity index 100%
rename from test/TensorFlowNET.Examples/AudioProcess/README.md
rename to test/TensorFlowNET.Examples/AudioProcessing/README.md
diff --git a/test/TensorFlowNET.Examples/ImageProcess/DigitRecognitionCNN.cs b/test/TensorFlowNET.Examples/ImageProcessing/DigitRecognitionCNN.cs
similarity index 87%
rename from test/TensorFlowNET.Examples/ImageProcess/DigitRecognitionCNN.cs
rename to test/TensorFlowNET.Examples/ImageProcessing/DigitRecognitionCNN.cs
index a5a70f92..c637e09f 100644
--- a/test/TensorFlowNET.Examples/ImageProcess/DigitRecognitionCNN.cs
+++ b/test/TensorFlowNET.Examples/ImageProcessing/DigitRecognitionCNN.cs
@@ -73,6 +73,10 @@ namespace TensorFlowNET.Examples.ImageProcess
float accuracy_test = 0f;
float loss_test = 1f;
+ NDArray x_train, y_train;
+ NDArray x_valid, y_valid;
+ NDArray x_test, y_test;
+
public bool Run()
{
PrepareData();
@@ -133,6 +137,62 @@ namespace TensorFlowNET.Examples.ImageProcess
return graph;
}
+ public void Train(Session sess)
+ {
+ // Number of training iterations in each epoch
+ var num_tr_iter = y_train.len / batch_size;
+
+ var init = tf.global_variables_initializer();
+ sess.run(init);
+
+ float loss_val = 100.0f;
+ float accuracy_val = 0f;
+
+ foreach (var epoch in range(epochs))
+ {
+ print($"Training epoch: {epoch + 1}");
+ // Randomly shuffle the training data at the beginning of each epoch
+ (x_train, y_train) = mnist.Randomize(x_train, y_train);
+
+ foreach (var iteration in range(num_tr_iter))
+ {
+ var start = iteration * batch_size;
+ var end = (iteration + 1) * batch_size;
+ var (x_batch, y_batch) = mnist.GetNextBatch(x_train, y_train, start, end);
+
+ // Run optimization op (backprop)
+ sess.run(optimizer, new FeedItem(x, x_batch), new FeedItem(y, y_batch));
+
+ if (iteration % display_freq == 0)
+ {
+ // Calculate and display the batch loss and accuracy
+ var result = sess.run(new[] { loss, accuracy }, new FeedItem(x, x_batch), new FeedItem(y, y_batch));
+ loss_val = result[0];
+ accuracy_val = result[1];
+ print($"iter {iteration.ToString("000")}: Loss={loss_val.ToString("0.0000")}, Training Accuracy={accuracy_val.ToString("P")}");
+ }
+ }
+
+ // Run validation after every epoch
+ var results1 = sess.run(new[] { loss, accuracy }, new FeedItem(x, x_valid), new FeedItem(y, y_valid));
+ loss_val = results1[0];
+ accuracy_val = results1[1];
+ print("---------------------------------------------------------");
+ print($"Epoch: {epoch + 1}, validation loss: {loss_val.ToString("0.0000")}, validation accuracy: {accuracy_val.ToString("P")}");
+ print("---------------------------------------------------------");
+ }
+ }
+
+ public void Test(Session sess)
+ {
+ var result = sess.run(new[] { loss, accuracy }, new FeedItem(x, x_test), new FeedItem(y, y_test));
+ loss_test = result[0];
+ accuracy_test = result[1];
+ print("---------------------------------------------------------");
+ print($"Test loss: {loss_test.ToString("0.0000")}, test accuracy: {accuracy_test.ToString("P")}");
+ print("---------------------------------------------------------");
+ }
+
///
/// Create a 2D convolution layer
///
@@ -217,6 +277,14 @@ namespace TensorFlowNET.Examples.ImageProcess
initializer: initial);
}
+ ///
+ /// Create a fully-connected layer
+ ///
+ /// input from previous layer
+ /// number of hidden units in the fully-connected layer
+ /// layer name
+ /// boolean to add ReLU non-linearity (or not)
+ /// The output array
private Tensor fc_layer(Tensor x, int num_units, string name, bool use_relu = true)
{
return with(tf.variable_scope(name), delegate
@@ -233,73 +301,36 @@ namespace TensorFlowNET.Examples.ImageProcess
return layer;
});
}
-
- public Graph ImportGraph() => throw new NotImplementedException();
-
- public void Predict(Session sess) => throw new NotImplementedException();
public void PrepareData()
{
mnist = MNIST.read_data_sets("mnist", one_hot: true);
+ (x_train, y_train) = Reformat(mnist.train.data, mnist.train.labels);
+ (x_valid, y_valid) = Reformat(mnist.validation.data, mnist.validation.labels);
+ (x_test, y_test) = Reformat(mnist.test.data, mnist.test.labels);
+
print("Size of:");
print($"- Training-set:\t\t{len(mnist.train.data)}");
print($"- Validation-set:\t{len(mnist.validation.data)}");
}
- public void Train(Session sess)
+ ///
+ /// Reformats the data to the format acceptable for convolutional layers
+ ///
+ ///
+ ///
+ ///
+ private (NDArray, NDArray) Reformat(NDArray x, NDArray y)
{
- // Number of training iterations in each epoch
- var num_tr_iter = mnist.train.labels.len / batch_size;
-
- var init = tf.global_variables_initializer();
- sess.run(init);
-
- float loss_val = 100.0f;
- float accuracy_val = 0f;
-
- foreach (var epoch in range(epochs))
- {
- print($"Training epoch: {epoch + 1}");
- // Randomly shuffle the training data at the beginning of each epoch
- var (x_train, y_train) = mnist.Randomize(mnist.train.data, mnist.train.labels);
-
- foreach (var iteration in range(num_tr_iter))
- {
- var start = iteration * batch_size;
- var end = (iteration + 1) * batch_size;
- var (x_batch, y_batch) = mnist.GetNextBatch(x_train, y_train, start, end);
-
- // Run optimization op (backprop)
- sess.run(optimizer, new FeedItem(x, x_batch), new FeedItem(y, y_batch));
-
- if (iteration % display_freq == 0)
- {
- // Calculate and display the batch loss and accuracy
- var result = sess.run(new[] { loss, accuracy }, new FeedItem(x, x_batch), new FeedItem(y, y_batch));
- loss_val = result[0];
- accuracy_val = result[1];
- print($"iter {iteration.ToString("000")}: Loss={loss_val.ToString("0.0000")}, Training Accuracy={accuracy_val.ToString("P")}");
- }
- }
-
- // Run validation after every epoch
- var results1 = sess.run(new[] { loss, accuracy }, new FeedItem(x, mnist.validation.data), new FeedItem(y, mnist.validation.labels));
- loss_val = results1[0];
- accuracy_val = results1[1];
- print("---------------------------------------------------------");
- print($"Epoch: {epoch + 1}, validation loss: {loss_val.ToString("0.0000")}, validation accuracy: {accuracy_val.ToString("P")}");
- print("---------------------------------------------------------");
- }
+ var (img_size, num_ch, num_class) = (np.sqrt(x.shape[1]), 1, len(np.unique(np.argmax(y, 1))));
+ var dataset = x.reshape(x.shape[0], img_size, img_size, num_ch).astype(np.float32);
+ //y[0] = np.arange(num_class) == y[0];
+ //var labels = (np.arange(num_class) == y.reshape(y.shape[0], 1, y.shape[1])).astype(np.float32);
+ return (dataset, y);
}
- public void Test(Session sess)
- {
- var result = sess.run(new[] { loss, accuracy }, new FeedItem(x, mnist.test.data), new FeedItem(y, mnist.test.labels));
- loss_test = result[0];
- accuracy_test = result[1];
- print("---------------------------------------------------------");
- print($"Test loss: {loss_test.ToString("0.0000")}, test accuracy: {accuracy_test.ToString("P")}");
- print("---------------------------------------------------------");
- }
+ public Graph ImportGraph() => throw new NotImplementedException();
+
+ public void Predict(Session sess) => throw new NotImplementedException();
}
}
diff --git a/test/TensorFlowNET.Examples/ImageProcess/DigitRecognitionNN.cs b/test/TensorFlowNET.Examples/ImageProcessing/DigitRecognitionNN.cs
similarity index 100%
rename from test/TensorFlowNET.Examples/ImageProcess/DigitRecognitionNN.cs
rename to test/TensorFlowNET.Examples/ImageProcessing/DigitRecognitionNN.cs
diff --git a/test/TensorFlowNET.Examples/ImageProcess/ImageBackgroundRemoval.cs b/test/TensorFlowNET.Examples/ImageProcessing/ImageBackgroundRemoval.cs
similarity index 100%
rename from test/TensorFlowNET.Examples/ImageProcess/ImageBackgroundRemoval.cs
rename to test/TensorFlowNET.Examples/ImageProcessing/ImageBackgroundRemoval.cs
diff --git a/test/TensorFlowNET.Examples/ImageProcess/ImageRecognitionInception.cs b/test/TensorFlowNET.Examples/ImageProcessing/ImageRecognitionInception.cs
similarity index 100%
rename from test/TensorFlowNET.Examples/ImageProcess/ImageRecognitionInception.cs
rename to test/TensorFlowNET.Examples/ImageProcessing/ImageRecognitionInception.cs
diff --git a/test/TensorFlowNET.Examples/ImageProcess/InceptionArchGoogLeNet.cs b/test/TensorFlowNET.Examples/ImageProcessing/InceptionArchGoogLeNet.cs
similarity index 100%
rename from test/TensorFlowNET.Examples/ImageProcess/InceptionArchGoogLeNet.cs
rename to test/TensorFlowNET.Examples/ImageProcessing/InceptionArchGoogLeNet.cs
diff --git a/test/TensorFlowNET.Examples/ImageProcess/ObjectDetection.cs b/test/TensorFlowNET.Examples/ImageProcessing/ObjectDetection.cs
similarity index 86%
rename from test/TensorFlowNET.Examples/ImageProcess/ObjectDetection.cs
rename to test/TensorFlowNET.Examples/ImageProcessing/ObjectDetection.cs
index 32f7841b..9c4760d7 100644
--- a/test/TensorFlowNET.Examples/ImageProcess/ObjectDetection.cs
+++ b/test/TensorFlowNET.Examples/ImageProcessing/ObjectDetection.cs
@@ -32,7 +32,7 @@ namespace TensorFlowNET.Examples
{
public bool Enabled { get; set; } = true;
public string Name => "Object Detection";
- public bool IsImportingGraph { get; set; } = false;
+ public bool IsImportingGraph { get; set; } = true;
public float MIN_SCORE = 0.5f;
@@ -42,16 +42,34 @@ namespace TensorFlowNET.Examples
string labelFile = "mscoco_label_map.pbtxt";
string picFile = "input.jpg";
+ NDArray imgArr;
+
public bool Run()
{
PrepareData();
// read in the input image
- var imgArr = ReadTensorFromImageFile(Path.Join(imageDir, "input.jpg"));
+ imgArr = ReadTensorFromImageFile(Path.Join(imageDir, "input.jpg"));
+
+ var graph = IsImportingGraph ? ImportGraph() : BuildGraph();
+
+ with(tf.Session(graph), sess => Predict(sess));
- var graph = new Graph().as_default();
+ return true;
+ }
+
+ public Graph ImportGraph()
+ {
+ var graph = new Graph().as_default();
graph.Import(Path.Join(modelDir, pbFile));
+ return graph;
+ }
+
+ public void Predict(Session sess)
+ {
+ var graph = tf.get_default_graph();
+
Tensor tensorNum = graph.OperationByName("num_detections");
Tensor tensorBoxes = graph.OperationByName("detection_boxes");
Tensor tensorScores = graph.OperationByName("detection_scores");
@@ -59,16 +77,11 @@ namespace TensorFlowNET.Examples
Tensor imgTensor = graph.OperationByName("image_tensor");
Tensor[] outTensorArr = new Tensor[] { tensorNum, tensorBoxes, tensorScores, tensorClasses };
- with(tf.Session(graph), sess =>
- {
- var results = sess.run(outTensorArr, new FeedItem(imgTensor, imgArr));
-
- NDArray[] resultArr = results.Data();
-
- buildOutputImage(resultArr);
- });
+ var results = sess.run(outTensorArr, new FeedItem(imgTensor, imgArr));
- return true;
+ NDArray[] resultArr = results.Data();
+
+ buildOutputImage(resultArr);
}
public void PrepareData()
@@ -159,29 +172,8 @@ namespace TensorFlowNET.Examples
}
}
- public Graph ImportGraph()
- {
- throw new NotImplementedException();
- }
-
- public Graph BuildGraph()
- {
- throw new NotImplementedException();
- }
-
- public void Train(Session sess)
- {
- throw new NotImplementedException();
- }
-
- public void Predict(Session sess)
- {
- throw new NotImplementedException();
- }
-
- public void Test(Session sess)
- {
- throw new NotImplementedException();
- }
+ public Graph BuildGraph() => throw new NotImplementedException();
+ public void Train(Session sess) => throw new NotImplementedException();
+ public void Test(Session sess) => throw new NotImplementedException();
}
}
diff --git a/test/TensorFlowNET.Examples/ImageProcess/RetrainImageClassifier.cs b/test/TensorFlowNET.Examples/ImageProcessing/RetrainImageClassifier.cs
similarity index 100%
rename from test/TensorFlowNET.Examples/ImageProcess/RetrainImageClassifier.cs
rename to test/TensorFlowNET.Examples/ImageProcessing/RetrainImageClassifier.cs
diff --git a/test/TensorFlowNET.Examples/TextProcess/BinaryTextClassification.cs b/test/TensorFlowNET.Examples/TextProcessing/BinaryTextClassification.cs
similarity index 100%
rename from test/TensorFlowNET.Examples/TextProcess/BinaryTextClassification.cs
rename to test/TensorFlowNET.Examples/TextProcessing/BinaryTextClassification.cs
diff --git a/test/TensorFlowNET.Examples/TextProcess/CnnTextClassification.cs b/test/TensorFlowNET.Examples/TextProcessing/CnnTextClassification.cs
similarity index 100%
rename from test/TensorFlowNET.Examples/TextProcess/CnnTextClassification.cs
rename to test/TensorFlowNET.Examples/TextProcessing/CnnTextClassification.cs
diff --git a/test/TensorFlowNET.Examples/TextProcess/DataHelpers.cs b/test/TensorFlowNET.Examples/TextProcessing/DataHelpers.cs
similarity index 100%
rename from test/TensorFlowNET.Examples/TextProcess/DataHelpers.cs
rename to test/TensorFlowNET.Examples/TextProcessing/DataHelpers.cs
diff --git a/test/TensorFlowNET.Examples/TextProcess/NER/BiLstmCrfNer.cs b/test/TensorFlowNET.Examples/TextProcessing/NER/BiLstmCrfNer.cs
similarity index 100%
rename from test/TensorFlowNET.Examples/TextProcess/NER/BiLstmCrfNer.cs
rename to test/TensorFlowNET.Examples/TextProcessing/NER/BiLstmCrfNer.cs
diff --git a/test/TensorFlowNET.Examples/TextProcess/NER/CRF.cs b/test/TensorFlowNET.Examples/TextProcessing/NER/CRF.cs
similarity index 100%
rename from test/TensorFlowNET.Examples/TextProcess/NER/CRF.cs
rename to test/TensorFlowNET.Examples/TextProcessing/NER/CRF.cs
diff --git a/test/TensorFlowNET.Examples/TextProcess/NER/LstmCrfNer.cs b/test/TensorFlowNET.Examples/TextProcessing/NER/LstmCrfNer.cs
similarity index 100%
rename from test/TensorFlowNET.Examples/TextProcess/NER/LstmCrfNer.cs
rename to test/TensorFlowNET.Examples/TextProcessing/NER/LstmCrfNer.cs
diff --git a/test/TensorFlowNET.Examples/TextProcess/NamedEntityRecognition.cs b/test/TensorFlowNET.Examples/TextProcessing/NamedEntityRecognition.cs
similarity index 100%
rename from test/TensorFlowNET.Examples/TextProcess/NamedEntityRecognition.cs
rename to test/TensorFlowNET.Examples/TextProcessing/NamedEntityRecognition.cs
diff --git a/test/TensorFlowNET.Examples/TextProcess/Word2Vec.cs b/test/TensorFlowNET.Examples/TextProcessing/Word2Vec.cs
similarity index 100%
rename from test/TensorFlowNET.Examples/TextProcess/Word2Vec.cs
rename to test/TensorFlowNET.Examples/TextProcessing/Word2Vec.cs
diff --git a/test/TensorFlowNET.Examples/TextProcess/cnn_models/CharCnn.cs b/test/TensorFlowNET.Examples/TextProcessing/cnn_models/CharCnn.cs
similarity index 100%
rename from test/TensorFlowNET.Examples/TextProcess/cnn_models/CharCnn.cs
rename to test/TensorFlowNET.Examples/TextProcessing/cnn_models/CharCnn.cs
diff --git a/test/TensorFlowNET.Examples/TextProcess/cnn_models/ITextModel.cs b/test/TensorFlowNET.Examples/TextProcessing/cnn_models/ITextModel.cs
similarity index 100%
rename from test/TensorFlowNET.Examples/TextProcess/cnn_models/ITextModel.cs
rename to test/TensorFlowNET.Examples/TextProcessing/cnn_models/ITextModel.cs
diff --git a/test/TensorFlowNET.Examples/TextProcess/cnn_models/VdCnn.cs b/test/TensorFlowNET.Examples/TextProcessing/cnn_models/VdCnn.cs
similarity index 100%
rename from test/TensorFlowNET.Examples/TextProcess/cnn_models/VdCnn.cs
rename to test/TensorFlowNET.Examples/TextProcessing/cnn_models/VdCnn.cs
diff --git a/test/TensorFlowNET.Examples/TextProcess/cnn_models/WordCnn.cs b/test/TensorFlowNET.Examples/TextProcessing/cnn_models/WordCnn.cs
similarity index 100%
rename from test/TensorFlowNET.Examples/TextProcess/cnn_models/WordCnn.cs
rename to test/TensorFlowNET.Examples/TextProcessing/cnn_models/WordCnn.cs
diff --git a/test/TensorFlowNET.Examples/Utility/Datasets.cs b/test/TensorFlowNET.Examples/Utility/Datasets.cs
index 93ca1869..af57c7cf 100644
--- a/test/TensorFlowNET.Examples/Utility/Datasets.cs
+++ b/test/TensorFlowNET.Examples/Utility/Datasets.cs
@@ -28,7 +28,7 @@ namespace TensorFlowNET.Examples.Utility
var perm = np.random.permutation(y.shape[0]);
np.random.shuffle(perm);
- return (train.data[perm], train.labels[perm]);
+ return (x[perm], y[perm]);
}
///
diff --git a/test/TensorFlowNET.UnitTest/ExamplesTests/ExamplesTest.cs b/test/TensorFlowNET.UnitTest/ExamplesTests/ExamplesTest.cs
index 4c71d7e2..9b889359 100644
--- a/test/TensorFlowNET.UnitTest/ExamplesTests/ExamplesTest.cs
+++ b/test/TensorFlowNET.UnitTest/ExamplesTests/ExamplesTest.cs
@@ -39,6 +39,7 @@ namespace TensorFlowNET.ExamplesTests
new InceptionArchGoogLeNet() { Enabled = true }.Run();
}
+ [Ignore]
[TestMethod]
public void KMeansClustering()
{
@@ -83,10 +84,12 @@ namespace TensorFlowNET.ExamplesTests
new NearestNeighbor() { Enabled = true, TrainSize = 500, ValidationSize = 100, TestSize = 100 }.Run();
}
+ [Ignore]
[TestMethod]
public void WordCnnTextClassification()
=> new CnnTextClassification { Enabled = true, ModelName = "word_cnn", DataLimit =100 }.Run();
+ [Ignore]
[TestMethod]
public void CharCnnTextClassification()
=> new CnnTextClassification { Enabled = true, ModelName = "char_cnn", DataLimit = 100 }.Run();