@@ -28,8 +28,14 @@ In comparison to other projects, like for instance TensorFlowSharp which only pr | |||||
Install TF.NET and TensorFlow binary through NuGet. | Install TF.NET and TensorFlow binary through NuGet. | ||||
```sh | ```sh | ||||
### install tensorflow C# binding | |||||
PM> Install-Package TensorFlow.NET | PM> Install-Package TensorFlow.NET | ||||
### Install tensorflow binary | |||||
### For CPU version | |||||
PM> Install-Package SciSharp.TensorFlow.Redist | PM> Install-Package SciSharp.TensorFlow.Redist | ||||
### For GPU version (CUDA and cuDNN are required) | |||||
PM> Install-Package SciSharp.TensorFlow.Redist-Windows-GPU | |||||
``` | ``` | ||||
Import TF.NET. | Import TF.NET. | ||||
@@ -1,8 +1,14 @@ | |||||
## SciSharp.TensorFlow.Redist ## | ## SciSharp.TensorFlow.Redist ## | ||||
`SciSharp.TensorFlow.Redist` is a migration from [Microsoft.ML.TensorFlow.Redist](https://github.com/dotnet/machinelearning/tree/release/1.2/src/Redist/Microsoft.ML.TensorFlow.Redist). [ML.NET](https://github.com/dotnet/machinelearning) team will not maintain the package since [ML.NET](https://www.nuget.org/packages/Microsoft.ML) v1.4.0 going forward. | |||||
`SciSharp.TensorFlow.Redist` is a migration from [Microsoft.ML.TensorFlow.Redist](https://github.com/dotnet/machinelearning/tree/release/1.2/src/Redist/Microsoft.ML.TensorFlow.Redist). [ML.NET](https://github.com/dotnet/machinelearning) team will not maintain the package since [ML.NET](https://www.nuget.org/packages/Microsoft.ML) v1.3.0 going forward. | |||||
* CPU version for all platforms (Windows, Linux, OSX) | |||||
```powershell | |||||
PM> Install-Package SciSharp.TensorFlow.Redist | |||||
``` | |||||
* GPU version for Windows | |||||
```powershell | ```powershell | ||||
PM> Install-Package SciSharp.TensorFlow.Redist | PM> Install-Package SciSharp.TensorFlow.Redist | ||||
``` | ``` | ||||
@@ -16,7 +22,7 @@ Related merged [commits](https://github.com/SciSharp/TensorFlow.NET/commit/854a5 | |||||
On Windows, the tar command does not support extracting archives with symlinks. So when `dotnet pack` runs on Windows it will only package the Windows binaries. | On Windows, the tar command does not support extracting archives with symlinks. So when `dotnet pack` runs on Windows it will only package the Windows binaries. | ||||
1. Run `dotnet pack` under `src/SciSharp.TensorFlow.Redist` directory in Linux. | 1. Run `dotnet pack` under `src/SciSharp.TensorFlow.Redist` directory in Linux. | ||||
2. Run `nuget push SciSharp.TensorFlow.Redist.1.14.0.nupkg -k APIKEY -s https://api.nuget.org/v3/index.json` | |||||
2. Run `dotnet nuget push SciSharp.TensorFlow.Redist.1.14.0.nupkg -k APIKEY -s https://api.nuget.org/v3/index.json` | |||||
@@ -9,7 +9,7 @@ | |||||
<license type="file">LICENSE.txt</license> | <license type="file">LICENSE.txt</license> | ||||
<licenseUrl>https://aka.ms/deprecateLicenseUrl</licenseUrl> | <licenseUrl>https://aka.ms/deprecateLicenseUrl</licenseUrl> | ||||
<projectUrl>https://www.tensorflow.org/</projectUrl> | <projectUrl>https://www.tensorflow.org/</projectUrl> | ||||
<description>$packageId$ contains the TensorFlow C library version $version$ redistributed as a NuGet package.</description> | |||||
<description>$packageId$ contains the TensorFlow C library CPU version $version$ redistributed as a NuGet package.</description> | |||||
<releaseNotes>https://github.com/tensorflow/tensorflow/releases/tag/v$version$</releaseNotes> | <releaseNotes>https://github.com/tensorflow/tensorflow/releases/tag/v$version$</releaseNotes> | ||||
<copyright>Copyright 2019 The TensorFlow Authors. All rights reserved.</copyright> | <copyright>Copyright 2019 The TensorFlow Authors. All rights reserved.</copyright> | ||||
<tags>TensorFlow</tags> | <tags>TensorFlow</tags> |
@@ -0,0 +1,26 @@ | |||||
<?xml version="1.0" encoding="utf-8"?> | |||||
<package xmlns="http://schemas.microsoft.com/packaging/2012/06/nuspec.xsd"> | |||||
<metadata> | |||||
<id>$packageId$</id> | |||||
<version>$version$</version> | |||||
<authors>The TensorFlow Authors</authors> | |||||
<owners>The TensorFlow Authors</owners> | |||||
<requireLicenseAcceptance>true</requireLicenseAcceptance> | |||||
<license type="file">LICENSE.txt</license> | |||||
<licenseUrl>https://aka.ms/deprecateLicenseUrl</licenseUrl> | |||||
<projectUrl>https://www.tensorflow.org/</projectUrl> | |||||
<description>$packageId$ contains the TensorFlow C library GPU version $version$ redistributed as a NuGet package.</description> | |||||
<releaseNotes>https://github.com/tensorflow/tensorflow/releases/tag/v$version$</releaseNotes> | |||||
<copyright>Copyright 2019 The TensorFlow Authors. All rights reserved.</copyright> | |||||
<tags>TensorFlow</tags> | |||||
<dependencies> | |||||
<group targetFramework=".NETStandard2.0" /> | |||||
</dependencies> | |||||
</metadata> | |||||
<files> | |||||
<file src="CommonPackage.props" target="build\netstandard2.0\$packageId$.props" /> | |||||
<file src="bin\packages\$packageId$\LICENSE.txt" target="LICENSE.txt" /> | |||||
<file src="bin\packages\$packageId$\THIRD_PARTY_NOTICES.txt" target="THIRD_PARTY_NOTICES.txt" /> | |||||
<file src="bin\packages\$packageId$\runtimes\**\*" target="runtimes" /> | |||||
</files> | |||||
</package> |
@@ -17,7 +17,7 @@ | |||||
<NoBuild>true</NoBuild> | <NoBuild>true</NoBuild> | ||||
<IncludeBuildOutput>false</IncludeBuildOutput> | <IncludeBuildOutput>false</IncludeBuildOutput> | ||||
<NuspecFile>Redist.nuspec</NuspecFile> | |||||
<NuspecFile>Redist-CPU.nuspec</NuspecFile> | |||||
<NuspecProperties>packageId=$(PackageId);version=$(PackageVersion)</NuspecProperties> | <NuspecProperties>packageId=$(PackageId);version=$(PackageVersion)</NuspecProperties> | ||||
<NuspecBasePath>$(ProjDir)</NuspecBasePath> | <NuspecBasePath>$(ProjDir)</NuspecBasePath> | ||||
@@ -0,0 +1,187 @@ | |||||
<Project Sdk="Microsoft.NET.Sdk"> | |||||
<PropertyGroup> | |||||
<ProjDir>$(MSBuildThisFileDirectory)</ProjDir> | |||||
<BinDir>$(ProjDir)bin\</BinDir> | |||||
<ObjDir>$(ProjDir)obj\</ObjDir> | |||||
<TargetArchitecture Condition="'$(TargetArchitecture)' == ''">x64</TargetArchitecture> | |||||
<TargetFramework>netstandard2.0</TargetFramework> | |||||
<TensorFlowVersion>1.14.0</TensorFlowVersion> | |||||
<TensorFlowMajorVersion>1</TensorFlowMajorVersion> | |||||
<PackageAssetsPath>$(BinDir)packages\</PackageAssetsPath> | |||||
<PackageId>$(MSBuildProjectName)</PackageId> | |||||
<PackageVersion>$(TensorFlowVersion)</PackageVersion> | |||||
<NoBuild>true</NoBuild> | |||||
<IncludeBuildOutput>false</IncludeBuildOutput> | |||||
<NuspecFile>Redist-Windows-GPU.nuspec</NuspecFile> | |||||
<NuspecProperties>packageId=$(PackageId);version=$(PackageVersion)</NuspecProperties> | |||||
<NuspecBasePath>$(ProjDir)</NuspecBasePath> | |||||
<GenerateNuspecDependsOn>CopyFilesFromArchive</GenerateNuspecDependsOn> | |||||
<PackageRid Condition="'$(OS)' == 'Windows_NT'">win</PackageRid> | |||||
<PackageRid Condition="'$(OS)' != 'Windows_NT'">linux</PackageRid> | |||||
<PackageRid Condition="$([MSBuild]::IsOSPlatform('osx'))">osx</PackageRid> | |||||
<PackageRid>$(PackageRid)-$(TargetArchitecture)</PackageRid> | |||||
</PropertyGroup> | |||||
<PropertyGroup> | |||||
<IncludeMLNetNotices>false</IncludeMLNetNotices> | |||||
</PropertyGroup> | |||||
<ItemGroup> | |||||
<TensorFlowConfig Include="windows" | |||||
FileExtension=".zip" | |||||
FilesFromArchive="lib\tensorflow.dll; | |||||
include\tensorflow\c\LICENSE" | |||||
Runtime="win-x64"/> | |||||
<TensorFlowConfig Condition="'$(OS)' != 'Windows_NT'" | |||||
Include="linux" | |||||
FileExtension=".tar.gz" | |||||
FilesFromArchive="lib\libtensorflow.so; | |||||
lib\libtensorflow_framework.so.$(TensorFlowMajorVersion); | |||||
include\tensorflow\c\LICENSE" | |||||
Runtime="linux-x64" /> | |||||
<TensorFlowConfig Condition="'$(OS)' != 'Windows_NT'" | |||||
Include="darwin" FileExtension=".tar.gz" | |||||
FilesFromArchive="lib\libtensorflow.dylib; | |||||
lib\libtensorflow_framework.$(TensorFlowMajorVersion).dylib; | |||||
include\tensorflow\c\LICENSE" | |||||
Runtime="osx-x64" /> | |||||
<AdditionalDownloadFile Include="https://raw.githubusercontent.com/tensorflow/tensorflow/master/LICENSE" | |||||
DownloadFile="$(BinDir)LICENSE" /> | |||||
</ItemGroup> | |||||
<Target Name="PrepareArchives"> | |||||
<ItemGroup> | |||||
<!-- although we could extract all archives on all machines, mac requires a fixup which can only be run on mac | |||||
so we split these per-rid and join during the official build packaging. --> | |||||
<TensorFlowArchive | |||||
Include="@(TensorFlowConfig->'https://storage.googleapis.com/tensorflow/libtensorflow/libtensorflow-gpu-%(Identity)-x86_64-$(TensorFlowVersion)%(FileExtension)')" /> | |||||
<!-- set up metdata used by all targets --> | |||||
<TensorFlowArchive DownloadFile="$(BinDir)%(FileName)%(Extension)" | |||||
DownloadShaFile="$(BinDir)%(FileName)%(Extension).sha" | |||||
ExtractDirectory="$(BinDir)%(FileName)" | |||||
ExtractSemaphore="$(BinDir)%(FileName)\.extracted" | |||||
LocalShaFile="$(MSBuildProjectDirectory)\%(FileName)%(Extension).sha"/> | |||||
</ItemGroup> | |||||
<Message Importance="high" Text="%(TensorFlowConfig.Runtime)"/> | |||||
</Target> | |||||
<Target Name="DownloadArchives" | |||||
DependsOnTargets="PrepareArchives" | |||||
Inputs="$(MSBuildProjectFile)" | |||||
Outputs="@(TensorFlowArchive->'%(DownloadFile)');@(AdditionalDownloadFile->'%(DownloadFile)')"> | |||||
<MakeDir Directories="$(BinDir)" /> | |||||
<ItemGroup> | |||||
<_downloadFiles Include="@(TensorFlowArchive);@(AdditionalDownloadFile)" Url="%(Identity)" DestinationFile="%(DownloadFile)" /> | |||||
</ItemGroup> | |||||
<Message Importance="High" Text="Downloading '%(_downloadFiles.Identity)' to '$(BinDir)'." /> | |||||
<DownloadFile SourceUrl="%(_downloadFiles.Identity)" DestinationFolder="$(BinDir)"> | |||||
<Output TaskParameter="DownloadedFile" ItemName="Content" /> | |||||
</DownloadFile> | |||||
</Target> | |||||
<Target Name="ValidateAndExtractArchives" | |||||
DependsOnTargets="DownloadArchives" | |||||
Inputs="@(TensorFlowArchive->'%(DownloadFile)')" | |||||
Outputs="@(TensorFlowArchive->'%(ExtractSemaphore)')"> | |||||
<GetFileHash Files="@(TensorFlowArchive->'%(DownloadFile)')" Algorithm="SHA512"> | |||||
<Output | |||||
TaskParameter="Items" | |||||
ItemName="FilesWithHashes" /> | |||||
</GetFileHash> | |||||
<WriteLinesToFile File="%(FilesWithHashes.Identity).sha" Lines="%(FilesWithHashes.FileHash)" Overwrite="true"/> | |||||
<!-- If specified we'll update the checked in SHAs with the downloaded ones. --> | |||||
<Copy Condition="'$(UpdateSHA)' == 'true'" | |||||
SourceFiles="@(TensorFlowArchive->'%(DownloadShaFile)')" | |||||
DestinationFiles="@(TensorFlowArchive->'%(LocalShaFile)')" /> | |||||
<ItemGroup> | |||||
<TensorFlowArchive> | |||||
<DownloadSha>@(FilesWithHashes->'%(FileHash)')</DownloadSha> | |||||
<LocalSha>$([System.IO.File]::ReadAllText('%(LocalShaFile)').Replace("%0A", "").Replace("%0D", ""))</LocalSha> | |||||
</TensorFlowArchive> | |||||
</ItemGroup> | |||||
<Error Condition="!Exists('%(TensorFlowArchive.LocalShaFile)')" Text="SHA file '%(TensorFlowArchive.LocalShaFile)' does not exist. Build with /p:UpdateSHA=true to save it." /> | |||||
<Message Importance="High" Text="@TensorFlowArchive->'%(TensorFlowArchive.DownloadFile) - %(TensorFlowArchive.LocalSha) - %(TensorFlowArchive.DownloadSha)"/> | |||||
<!-- Validate that the downloaded SHAs match the expected checked in SHAs --> | |||||
<Error Condition="'%(TensorFlowArchive.LocalSha)' != '%(TensorFlowArchive.DownloadSha)'" Text="Downloaded file '%(TensorFlowArchive.DownloadFile)' has unexpected SHA.%0A expected: %(TensorFlowArchive.LocalSha)%0A --actual: %(TensorFlowArchive.DownloadSha)%0ABuild with /p:UpdateSHA=true if you intentionally changed the URL and wish to update the SHAs, otherwise this could indicate an incomplete download or intercerpted URL and should be examined." /> | |||||
<!-- The archives are valid, lets extract them, ensuring an empty directory --> | |||||
<RemoveDir Directories="@(TensorFlowArchive->'%(ExtractDirectory)')" /> | |||||
<MakeDir Directories="@(TensorFlowArchive->'%(ExtractDirectory)')" /> | |||||
<Message Importance="High" Text="Decompressing '%(TensorFlowArchive.DownloadFile)' to '%(TensorFlowArchive.ExtractDirectory)'." /> | |||||
<Unzip Condition="'%(TensorFlowArchive.FileExtension)' == '.zip'" | |||||
SourceFiles="%(TensorFlowArchive.DownloadFile)" | |||||
DestinationFolder="%(TensorFlowArchive.ExtractDirectory)" /> | |||||
<Exec Condition="'$(OS)' != 'Windows_NT' AND '%(TensorFlowArchive.FileExtension)' == '.tar.gz'" | |||||
WorkingDirectory="$(MSBuildThisFileDirectory)" | |||||
Command="tar -xzm --hard-dereference -f %(TensorFlowArchive.DownloadFile) -C %(TensorFlowArchive.ExtractDirectory)" /> | |||||
<Exec Condition="'$(OS)' != 'Windows_NT'" | |||||
Command="chmod -R +w %(TensorFlowArchive.ExtractDirectory)" /> | |||||
<Touch Files="@(TensorFlowArchive->'%(ExtractSemaphore)')" AlwaysCreate="true" /> | |||||
</Target> | |||||
<!-- Select the files we want to copy out of each archive. --> | |||||
<Target Name="GetFilesFromArchive" | |||||
DependsOnTargets="ValidateAndExtractArchives" > | |||||
<ItemGroup> | |||||
<!-- batch rather than transform so that we can split FilesFromArchive metadata --> | |||||
<_fileFromArchive Include="%(TensorFlowArchive.FilesFromArchive)" ExtractDirectory="%(TensorFlowArchive.ExtractDirectory)" Runtime="%(TensorFlowArchive.Runtime)" /> | |||||
<_fileFromArchive DestinationFile="%(FileName)%(Extension)"/> | |||||
<_fileFromArchive PackagePath="runtimes\%(_fileFromArchive.Runtime)\native\%(_fileFromArchive.DestinationFile)" /> | |||||
<!-- LICENSE from the package is actually THIRD_PARTY_NOTICES--> | |||||
<_fileFromArchive Condition="'%(DestinationFile)' == 'LICENSE'" PackagePath="THIRD_PARTY_NOTICES.txt" Runtime="" /> | |||||
<!-- copy to packaging location --> | |||||
<FilesFromArchive Include="@(_fileFromArchive->'%(ExtractDirectory)\%(Identity)')" | |||||
TargetPath="$(PackageAssetsPath)$(MSBuildProjectName)\%(PackagePath)" /> | |||||
<!-- include LICENSE that was downloaded from GitHub --> | |||||
<FilesFromArchive Include="$(BinDir)\LICENSE" | |||||
TargetPath="$(PackageAssetsPath)$(MSBuildProjectName)\LICENSE.txt" /> | |||||
<!-- copy to NativeAssets location, only for current RID, so that they may be used by tests --> | |||||
<!--<FilesFromArchive Condition="'$(PackageRID)' == '%(_fileFromArchive.Runtime)'" | |||||
Include="@(_fileFromArchive->'%(ExtractDirectory)\%(Identity)')" | |||||
TargetPath="$(NativeAssetsBuiltPath)\%(_fileFromArchive.DestinationFile)" />--> | |||||
</ItemGroup> | |||||
</Target> | |||||
<Target Name="CopyFilesFromArchive" | |||||
DependsOnTargets="GetFilesFromArchive"> | |||||
<Message Importance="High" Text="@(FilesFromArchive) -> %(FilesFromArchive.TargetPath)" /> | |||||
<Copy SourceFiles="@(FilesFromArchive)" | |||||
DestinationFiles="@(FilesFromArchive->'%(TargetPath)')" /> | |||||
</Target> | |||||
<Target Name="Clean"> | |||||
<Message Importance="High" Text="Deleting $(BinDir);$(ObjDir)" /> | |||||
<RemoveDir Directories="$(BinDir);$(ObjDir)" /> | |||||
</Target> | |||||
</Project> |
@@ -0,0 +1 @@ | |||||
850A27858FA951DF77A78CD1BD78B54F6EE2532DD5A49F0579A7B02C795C62F0212F20177EAEA2BD77BD451A57FBBD1348362492F9E14BFE5CA5028C71711293 |
@@ -27,5 +27,54 @@ namespace Tensorflow.Hub | |||||
labels.astype(dataType); | labels.astype(dataType); | ||||
Labels = labels; | Labels = labels; | ||||
} | } | ||||
public (NDArray, NDArray) GetNextBatch(int batch_size, bool fake_data = false, bool shuffle = true) | |||||
{ | |||||
var start = IndexInEpoch; | |||||
// Shuffle for the first epoch | |||||
if(EpochsCompleted == 0 && start == 0 && shuffle) | |||||
{ | |||||
var perm0 = np.arange(NumOfExamples); | |||||
np.random.shuffle(perm0); | |||||
Data = Data[perm0]; | |||||
Labels = Labels[perm0]; | |||||
} | |||||
// Go to the next epoch | |||||
if (start + batch_size > NumOfExamples) | |||||
{ | |||||
// Finished epoch | |||||
EpochsCompleted += 1; | |||||
// Get the rest examples in this epoch | |||||
var rest_num_examples = NumOfExamples - start; | |||||
//var images_rest_part = _images[np.arange(start, _num_examples)]; | |||||
//var labels_rest_part = _labels[np.arange(start, _num_examples)]; | |||||
// Shuffle the data | |||||
if (shuffle) | |||||
{ | |||||
var perm = np.arange(NumOfExamples); | |||||
np.random.shuffle(perm); | |||||
Data = Data[perm]; | |||||
Labels = Labels[perm]; | |||||
} | |||||
start = 0; | |||||
IndexInEpoch = batch_size - rest_num_examples; | |||||
var end = IndexInEpoch; | |||||
var images_new_part = Data[np.arange(start, end)]; | |||||
var labels_new_part = Labels[np.arange(start, end)]; | |||||
/*return (np.concatenate(new float[][] { images_rest_part.Data<float>(), images_new_part.Data<float>() }, axis: 0), | |||||
np.concatenate(new float[][] { labels_rest_part.Data<float>(), labels_new_part.Data<float>() }, axis: 0));*/ | |||||
return (images_new_part, labels_new_part); | |||||
} | |||||
else | |||||
{ | |||||
IndexInEpoch += batch_size; | |||||
var end = IndexInEpoch; | |||||
return (Data[np.arange(start, end)], Labels[np.arange(start, end)]); | |||||
} | |||||
} | |||||
} | } | ||||
} | } |
@@ -15,14 +15,27 @@ namespace Tensorflow.Hub | |||||
private const string TEST_IMAGES = "t10k-images-idx3-ubyte.gz"; | private const string TEST_IMAGES = "t10k-images-idx3-ubyte.gz"; | ||||
private const string TEST_LABELS = "t10k-labels-idx1-ubyte.gz"; | private const string TEST_LABELS = "t10k-labels-idx1-ubyte.gz"; | ||||
public static async Task<Datasets<MnistDataSet>> LoadAsync(string trainDir, bool oneHot = false) | |||||
public static async Task<Datasets<MnistDataSet>> LoadAsync(string trainDir, bool oneHot = false, int? trainSize = null, int? validationSize = null, int? testSize = null) | |||||
{ | { | ||||
var loader = new MnistModelLoader(); | var loader = new MnistModelLoader(); | ||||
return await loader.LoadAsync(new ModelLoadSetting | |||||
var setting = new ModelLoadSetting | |||||
{ | { | ||||
TrainDir = trainDir, | TrainDir = trainDir, | ||||
OneHot = oneHot | |||||
}); | |||||
OneHot = oneHot, | |||||
TrainSize = trainSize | |||||
}; | |||||
if (trainSize.HasValue) | |||||
setting.TrainSize = trainSize.Value; | |||||
if (validationSize.HasValue) | |||||
setting.ValidationSize = validationSize.Value; | |||||
if (testSize.HasValue) | |||||
setting.TestSize = testSize.Value; | |||||
return await loader.LoadAsync(setting); | |||||
} | } | ||||
public async Task<Datasets<MnistDataSet>> LoadAsync(ModelLoadSetting setting) | public async Task<Datasets<MnistDataSet>> LoadAsync(ModelLoadSetting setting) | ||||
@@ -15,6 +15,7 @@ | |||||
******************************************************************************/ | ******************************************************************************/ | ||||
using System; | using System; | ||||
using System.Linq; | |||||
using static Tensorflow.Python; | using static Tensorflow.Python; | ||||
namespace Tensorflow | namespace Tensorflow | ||||
@@ -63,17 +64,37 @@ namespace Tensorflow | |||||
public static Tensor operator *(long constant, Tensor tensor) => BinaryOpWrapper("mul", constant, tensor); | public static Tensor operator *(long constant, Tensor tensor) => BinaryOpWrapper("mul", constant, tensor); | ||||
public static Tensor operator *(ulong constant, Tensor tensor) => BinaryOpWrapper("mul", constant, tensor); | public static Tensor operator *(ulong constant, Tensor tensor) => BinaryOpWrapper("mul", constant, tensor); | ||||
public static Tensor operator /(Tensor x, Tensor y) => BinaryOpWrapper("truediv", x, y); | |||||
public static Tensor operator /(Tensor x, float y) => BinaryOpWrapper("truediv", x, y); | |||||
private static readonly TF_DataType[] _intTfDataTypes = { | |||||
TF_DataType.TF_INT8, TF_DataType.TF_INT16, TF_DataType.TF_INT32, TF_DataType.TF_INT64, | |||||
TF_DataType.TF_QINT8, TF_DataType.TF_QINT16, TF_DataType.TF_QINT32, | |||||
TF_DataType.TF_UINT8, TF_DataType.TF_UINT16, TF_DataType.TF_UINT32, TF_DataType.TF_UINT64 | |||||
}; | |||||
public static Tensor operator /(double x, Tensor y) => BinaryOpWrapper("truediv", x, y); | |||||
public static Tensor operator /(float x, Tensor y) => BinaryOpWrapper("truediv", x, y); | public static Tensor operator /(float x, Tensor y) => BinaryOpWrapper("truediv", x, y); | ||||
public static Tensor operator /(int x, Tensor y) => BinaryOpWrapper("floordiv", x, y); | |||||
public static Tensor operator /(Tensor x, Tensor y) => | |||||
_intTfDataTypes.Contains(x._dtype) | |||||
? BinaryOpWrapper("floordiv", x, y) | |||||
: BinaryOpWrapper("truediv", x, y); | |||||
public static Tensor operator /(Tensor x, int y) => BinaryOpWrapper("floordiv", x, y); | |||||
public static Tensor operator /(Tensor x, float y) => BinaryOpWrapper("truediv", x, y); | |||||
public static Tensor operator /(Tensor x, double y) => BinaryOpWrapper("truediv", x, y); | public static Tensor operator /(Tensor x, double y) => BinaryOpWrapper("truediv", x, y); | ||||
public static Tensor operator %(Tensor x, Tensor y) => BinaryOpWrapper("mod", x, y); | public static Tensor operator %(Tensor x, Tensor y) => BinaryOpWrapper("mod", x, y); | ||||
public static Tensor operator >(double x, Tensor y) => gen_math_ops.greater(x, y); | |||||
public static Tensor operator >(float x, Tensor y) => gen_math_ops.greater(x, y); | |||||
public static Tensor operator >(int x, Tensor y) => gen_math_ops.greater(x, y); | |||||
public static Tensor operator >(Tensor x, Tensor y) => gen_math_ops.greater(x, y); | |||||
public static Tensor operator >(Tensor x, int y) => gen_math_ops.greater(x, y); | public static Tensor operator >(Tensor x, int y) => gen_math_ops.greater(x, y); | ||||
public static Tensor operator >=(Tensor x, Tensor y) => gen_math_ops.greater_equal(x, y); | public static Tensor operator >=(Tensor x, Tensor y) => gen_math_ops.greater_equal(x, y); | ||||
public static Tensor operator >(Tensor x, float y) => gen_math_ops.greater(x, y); | public static Tensor operator >(Tensor x, float y) => gen_math_ops.greater(x, y); | ||||
public static Tensor operator >(Tensor x, double y) => gen_math_ops.greater(x, y); | public static Tensor operator >(Tensor x, double y) => gen_math_ops.greater(x, y); | ||||
public static Tensor operator <(double x, Tensor y) => gen_math_ops.less(x, y); | |||||
public static Tensor operator <(float x, Tensor y) => gen_math_ops.less(x, y); | |||||
public static Tensor operator <(int x, Tensor y) => gen_math_ops.less(x, y); | |||||
public static Tensor operator <(Tensor x, Tensor y) => gen_math_ops.less(x, y); | |||||
public static Tensor operator <(Tensor x, int y) => gen_math_ops.less(x, y); | public static Tensor operator <(Tensor x, int y) => gen_math_ops.less(x, y); | ||||
public static Tensor operator <=(Tensor x, Tensor y) => gen_math_ops.less_equal(x, y); | public static Tensor operator <=(Tensor x, Tensor y) => gen_math_ops.less_equal(x, y); | ||||
public static Tensor operator <(Tensor x, float y) => gen_math_ops.less(x, y); | public static Tensor operator <(Tensor x, float y) => gen_math_ops.less(x, y); | ||||
@@ -99,6 +120,9 @@ namespace Tensorflow | |||||
case "add": | case "add": | ||||
result = gen_math_ops.add(x1, y1, name: scope); | result = gen_math_ops.add(x1, y1, name: scope); | ||||
break; | break; | ||||
case "floordiv": | |||||
result = gen_math_ops.floor_div(x1, y1, name: scope); | |||||
break; | |||||
case "truediv": | case "truediv": | ||||
result = gen_math_ops.real_div(x1, y1, name: scope); | result = gen_math_ops.real_div(x1, y1, name: scope); | ||||
break; | break; | ||||
@@ -16,6 +16,8 @@ Here are some pre-built TensorFlow binaries you can use for each platform: | |||||
- CPU-only: https://storage.googleapis.com/tensorflow/libtensorflow/libtensorflow-cpu-windows-x86_64-1.14.0.zip | - CPU-only: https://storage.googleapis.com/tensorflow/libtensorflow/libtensorflow-cpu-windows-x86_64-1.14.0.zip | ||||
- GPU-enabled: https://storage.googleapis.com/tensorflow/libtensorflow/libtensorflow-gpu-windows-x86_64-1.14.0.zip | - GPU-enabled: https://storage.googleapis.com/tensorflow/libtensorflow/libtensorflow-gpu-windows-x86_64-1.14.0.zip | ||||
### Run in Linux | ### Run in Linux | ||||
`Install-Package TensorFlow.NET` | `Install-Package TensorFlow.NET` | ||||
@@ -31,10 +33,21 @@ sudo apt install libgdiplus | |||||
More information about [System.Drawing on Linux](<https://www.hanselman.com/blog/HowDoYouUseSystemDrawingInNETCore.aspx>). | More information about [System.Drawing on Linux](<https://www.hanselman.com/blog/HowDoYouUseSystemDrawingInNETCore.aspx>). | ||||
### Run in Mac OS | ### Run in Mac OS | ||||
### GPU Tensorflow for windows | |||||
Before running verify you installed CUDA and cuDNN | |||||
### Tensorflow GPU for Windows | |||||
Before running verify you installed CUDA and cuDNN (TensorFlow v1.14 is compatible with CUDA v10.0 and cuDNN v7.4), and make sure the corresponding cuda version is compatible. | |||||
```powershell | |||||
PM> Install-Package SciSharp.TensorFlow.Redist-Windows-GPU | |||||
``` | |||||
### Build from source for Windows | ### Build from source for Windows | ||||
@@ -16,6 +16,7 @@ | |||||
using NumSharp; | using NumSharp; | ||||
using System; | using System; | ||||
using System.Diagnostics; | |||||
using Tensorflow; | using Tensorflow; | ||||
using TensorFlowNET.Examples.Utility; | using TensorFlowNET.Examples.Utility; | ||||
using static Tensorflow.Python; | using static Tensorflow.Python; | ||||
@@ -144,6 +145,8 @@ namespace TensorFlowNET.Examples.ImageProcess | |||||
float loss_val = 100.0f; | float loss_val = 100.0f; | ||||
float accuracy_val = 0f; | float accuracy_val = 0f; | ||||
var sw = new Stopwatch(); | |||||
sw.Start(); | |||||
foreach (var epoch in range(epochs)) | foreach (var epoch in range(epochs)) | ||||
{ | { | ||||
print($"Training epoch: {epoch + 1}"); | print($"Training epoch: {epoch + 1}"); | ||||
@@ -165,7 +168,8 @@ namespace TensorFlowNET.Examples.ImageProcess | |||||
var result = sess.run(new[] { loss, accuracy }, new FeedItem(x, x_batch), new FeedItem(y, y_batch)); | var result = sess.run(new[] { loss, accuracy }, new FeedItem(x, x_batch), new FeedItem(y, y_batch)); | ||||
loss_val = result[0]; | loss_val = result[0]; | ||||
accuracy_val = result[1]; | accuracy_val = result[1]; | ||||
print($"iter {iteration.ToString("000")}: Loss={loss_val.ToString("0.0000")}, Training Accuracy={accuracy_val.ToString("P")}"); | |||||
print($"iter {iteration.ToString("000")}: Loss={loss_val.ToString("0.0000")}, Training Accuracy={accuracy_val.ToString("P")} {sw.ElapsedMilliseconds}ms"); | |||||
sw.Restart(); | |||||
} | } | ||||
} | } | ||||
@@ -467,5 +467,679 @@ namespace TensorFlowNET.UnitTest | |||||
} | } | ||||
#endregion | #endregion | ||||
} | } | ||||
private IEnumerable<int> MultiplyArray(IReadOnlyCollection<int> first, IReadOnlyCollection<int> second) | |||||
{ | |||||
if(first.Count != second.Count) | |||||
throw new ArgumentException("Arrays should be of equal size!"); | |||||
var firstEnumerator = first.GetEnumerator(); | |||||
var secondEnumerator = second.GetEnumerator(); | |||||
var result = new List<int>(); | |||||
while (firstEnumerator.MoveNext()) | |||||
{ | |||||
secondEnumerator.MoveNext(); | |||||
result.Add(firstEnumerator.Current * secondEnumerator.Current); | |||||
} | |||||
firstEnumerator.Dispose(); | |||||
secondEnumerator.Dispose(); | |||||
return result; | |||||
} | |||||
private IEnumerable<float> MultiplyArray(IReadOnlyCollection<float> first, IReadOnlyCollection<float> second) | |||||
{ | |||||
if(first.Count != second.Count) | |||||
throw new ArgumentException("Arrays should be of equal size!"); | |||||
var firstEnumerator = first.GetEnumerator(); | |||||
var secondEnumerator = second.GetEnumerator(); | |||||
var result = new List<float>(); | |||||
while (firstEnumerator.MoveNext()) | |||||
{ | |||||
secondEnumerator.MoveNext(); | |||||
result.Add(firstEnumerator.Current * secondEnumerator.Current); | |||||
} | |||||
firstEnumerator.Dispose(); | |||||
secondEnumerator.Dispose(); | |||||
return result; | |||||
} | |||||
private IEnumerable<double> MultiplyArray(IReadOnlyCollection<double> first, IReadOnlyCollection<double> second) | |||||
{ | |||||
if(first.Count != second.Count) | |||||
throw new ArgumentException("Arrays should be of equal size!"); | |||||
var firstEnumerator = first.GetEnumerator(); | |||||
var secondEnumerator = second.GetEnumerator(); | |||||
var result = new List<double>(); | |||||
while (firstEnumerator.MoveNext()) | |||||
{ | |||||
secondEnumerator.MoveNext(); | |||||
result.Add(firstEnumerator.Current * secondEnumerator.Current); | |||||
} | |||||
firstEnumerator.Dispose(); | |||||
secondEnumerator.Dispose(); | |||||
return result; | |||||
} | |||||
[TestMethod] | |||||
public void mulOpTests() | |||||
{ | |||||
const int rows = 2; // to avoid broadcasting effect | |||||
const int cols = 10; | |||||
#region intTest | |||||
const int firstIntVal = 2; | |||||
const int secondIntVal = 3; | |||||
var firstIntFeed = Enumerable.Repeat(firstIntVal, rows * cols).ToArray(); | |||||
var secondIntFeed = Enumerable.Repeat(secondIntVal, rows * cols).ToArray(); | |||||
var intResult = MultiplyArray(firstIntFeed, secondIntFeed).Sum(); | |||||
var a = tf.placeholder(tf.int32, shape: new TensorShape(rows, cols)); | |||||
var b = tf.placeholder(tf.int32, shape: new TensorShape(rows, cols)); | |||||
var c = tf.reduce_sum(tf.reduce_sum(tf.multiply(a, b), 1)); | |||||
using (var sess = tf.Session()) | |||||
{ | |||||
var o = sess.run(c, | |||||
new FeedItem(a, new NDArray(firstIntFeed, new Shape(rows, cols))), | |||||
new FeedItem(b, new NDArray(secondIntFeed, new Shape(rows, cols)))); | |||||
Assert.AreEqual((int)o, intResult); | |||||
} | |||||
// Testing `operator *(Tensor x, Tensor y) | |||||
c = tf.reduce_sum(tf.reduce_sum(a * b, 1)); | |||||
using (var sess = tf.Session()) | |||||
{ | |||||
var o = sess.run(c, | |||||
new FeedItem(a, new NDArray(firstIntFeed, new Shape(rows, cols))), | |||||
new FeedItem(b, new NDArray(secondIntFeed, new Shape(rows, cols)))); | |||||
Assert.AreEqual((int)o, intResult); | |||||
} | |||||
// Testing `operator *(Tensor x, int y) | |||||
c = tf.reduce_sum(tf.reduce_sum(a * secondIntVal, 1)); | |||||
using (var sess = tf.Session()) | |||||
{ | |||||
var o = sess.run(c, | |||||
new FeedItem(a, new NDArray(firstIntFeed, new Shape(rows, cols)))); | |||||
Assert.AreEqual((int)o, intResult); | |||||
} | |||||
// Testing `operator *(int x, Tensor y) | |||||
c = tf.reduce_sum(tf.reduce_sum(firstIntVal * b, 1)); | |||||
using (var sess = tf.Session()) | |||||
{ | |||||
var o = sess.run(c, | |||||
new FeedItem(b, new NDArray(secondIntFeed, new Shape(rows, cols)))); | |||||
Assert.AreEqual((int)o, intResult); | |||||
} | |||||
#endregion | |||||
#region floatTest | |||||
const float firstFloatVal = 2.0f; | |||||
const float secondFloatVal = 3.0f; | |||||
var firstFloatFeed = Enumerable.Repeat(firstFloatVal, rows * cols).ToArray(); | |||||
var secondFloatFeed = Enumerable.Repeat(secondFloatVal, rows * cols).ToArray(); | |||||
var floatResult = MultiplyArray(firstFloatFeed, secondFloatFeed).Sum(); | |||||
a = tf.placeholder(tf.float32, shape: new TensorShape(rows, cols)); | |||||
b = tf.placeholder(tf.float32, shape: new TensorShape(rows, cols)); | |||||
c = tf.reduce_sum(tf.reduce_sum(tf.multiply(a, b), 1)); | |||||
using (var sess = tf.Session()) | |||||
{ | |||||
var o = sess.run(c, | |||||
new FeedItem(a, new NDArray(firstFloatFeed, new Shape(rows, cols))), | |||||
new FeedItem(b, new NDArray(secondFloatFeed, new Shape(rows, cols)))); | |||||
Assert.AreEqual((float)o, floatResult); | |||||
} | |||||
// Testing `operator *(Tensor x, Tensor y) | |||||
c = tf.reduce_sum(tf.reduce_sum(a * b, 1)); | |||||
using (var sess = tf.Session()) | |||||
{ | |||||
var o = sess.run(c, | |||||
new FeedItem(a, new NDArray(firstFloatFeed, new Shape(rows, cols))), | |||||
new FeedItem(b, new NDArray(secondFloatFeed, new Shape(rows, cols)))); | |||||
Assert.AreEqual((float)o, floatResult); | |||||
} | |||||
// Testing `operator *(Tensor x, float y) | |||||
c = tf.reduce_sum(tf.reduce_sum(a * secondFloatVal, 1)); | |||||
using (var sess = tf.Session()) | |||||
{ | |||||
var o = sess.run(c, | |||||
new FeedItem(a, new NDArray(firstFloatFeed, new Shape(rows, cols)))); | |||||
Assert.AreEqual((float)o, floatResult); | |||||
} | |||||
// Testing `operator *(float x, Tensor y) | |||||
c = tf.reduce_sum(tf.reduce_sum(firstFloatVal * b, 1)); | |||||
using (var sess = tf.Session()) | |||||
{ | |||||
var o = sess.run(c, | |||||
new FeedItem(b, new NDArray(secondFloatFeed, new Shape(rows, cols)))); | |||||
Assert.AreEqual((float)o, floatResult); | |||||
} | |||||
#endregion | |||||
#region doubleTest | |||||
const double firstDoubleVal = 2.0; | |||||
const double secondDoubleVal = 3.0; | |||||
var firstDoubleFeed = Enumerable.Repeat(firstDoubleVal, rows * cols).ToArray(); | |||||
var secondDoubleFeed = Enumerable.Repeat(secondDoubleVal, rows * cols).ToArray(); | |||||
var doubleResult = MultiplyArray(firstDoubleFeed, secondDoubleFeed).Sum(); | |||||
a = tf.placeholder(tf.float64, shape: new TensorShape(rows, cols)); | |||||
b = tf.placeholder(tf.float64, shape: new TensorShape(rows, cols)); | |||||
c = tf.reduce_sum(tf.reduce_sum(tf.multiply(a, b), 1)); | |||||
using (var sess = tf.Session()) | |||||
{ | |||||
var o = sess.run(c, | |||||
new FeedItem(a, new NDArray(firstDoubleFeed, new Shape(rows, cols))), | |||||
new FeedItem(b, new NDArray(secondDoubleFeed, new Shape(rows, cols)))); | |||||
Assert.AreEqual((double)o, doubleResult); | |||||
} | |||||
// Testing `operator *(Tensor x, Tensor y) | |||||
c = tf.reduce_sum(tf.reduce_sum(a * b, 1)); | |||||
using (var sess = tf.Session()) | |||||
{ | |||||
var o = sess.run(c, | |||||
new FeedItem(a, new NDArray(firstDoubleFeed, new Shape(rows, cols))), | |||||
new FeedItem(b, new NDArray(secondDoubleFeed, new Shape(rows, cols)))); | |||||
Assert.AreEqual((double)o, doubleResult); | |||||
} | |||||
// Testing `operator *(Tensor x, double y) | |||||
c = tf.reduce_sum(tf.reduce_sum(a * secondFloatVal, 1)); | |||||
using (var sess = tf.Session()) | |||||
{ | |||||
var o = sess.run(c, | |||||
new FeedItem(a, new NDArray(firstDoubleFeed, new Shape(rows, cols)))); | |||||
Assert.AreEqual((double)o, doubleResult); | |||||
} | |||||
// Testing `operator *(double x, Tensor y) | |||||
c = tf.reduce_sum(tf.reduce_sum(firstFloatVal * b, 1)); | |||||
using (var sess = tf.Session()) | |||||
{ | |||||
var o = sess.run(c, | |||||
new FeedItem(b, new NDArray(secondDoubleFeed, new Shape(rows, cols)))); | |||||
Assert.AreEqual((double) o, doubleResult); | |||||
} | |||||
#endregion | |||||
} | |||||
[TestMethod] | |||||
public void divOpTests() | |||||
{ | |||||
const int rows = 2; // to avoid broadcasting effect | |||||
const int cols = 10; | |||||
#region intTest | |||||
const int firstIntVal = 6; | |||||
const int secondIntVal = 3; | |||||
var firstIntFeed = Enumerable.Repeat(firstIntVal, rows * cols).ToArray(); | |||||
var secondIntFeed = Enumerable.Repeat(secondIntVal, rows * cols).ToArray(); | |||||
var intResult = (int)(firstIntFeed.Sum() / (float)secondIntVal); | |||||
var a = tf.placeholder(tf.int32, shape: new TensorShape(rows, cols)); | |||||
var b = tf.placeholder(tf.int32, shape: new TensorShape(rows, cols)); | |||||
var c = tf.reduce_sum(tf.reduce_sum(gen_math_ops.floor_div(a, b), 1)); | |||||
using (var sess = tf.Session()) | |||||
{ | |||||
var o = sess.run(c, | |||||
new FeedItem(a, new NDArray(firstIntFeed, new Shape(rows, cols))), | |||||
new FeedItem(b, new NDArray(secondIntFeed, new Shape(rows, cols)))); | |||||
Assert.AreEqual((int)o, intResult); | |||||
} | |||||
// Testing `operator /(Tensor x, Tensor y) | |||||
c = tf.reduce_sum(tf.reduce_sum(a / b, 1)); | |||||
using (var sess = tf.Session()) | |||||
{ | |||||
var o = sess.run(c, | |||||
new FeedItem(a, new NDArray(firstIntFeed, new Shape(rows, cols))), | |||||
new FeedItem(b, new NDArray(secondIntFeed, new Shape(rows, cols)))); | |||||
Assert.AreEqual((int)o, intResult); | |||||
} | |||||
// Testing `operator /(Tensor x, int y) | |||||
c = tf.reduce_sum(tf.reduce_sum(a / secondIntVal, 1)); | |||||
using (var sess = tf.Session()) | |||||
{ | |||||
var o = sess.run(c, | |||||
new FeedItem(a, new NDArray(firstIntFeed, new Shape(rows, cols)))); | |||||
Assert.AreEqual((int)o, intResult); | |||||
} | |||||
// Testing `operator /(int x, Tensor y) | |||||
c = tf.reduce_sum(tf.reduce_sum(firstIntVal / b, 1)); | |||||
using (var sess = tf.Session()) | |||||
{ | |||||
var o = sess.run(c, | |||||
new FeedItem(b, new NDArray(secondIntFeed, new Shape(rows, cols)))); | |||||
Assert.AreEqual((int)o, intResult); | |||||
} | |||||
#endregion | |||||
#region floatTest | |||||
const float firstFloatVal = 6.0f; | |||||
const float secondFloatVal = 3.0f; | |||||
var firstFloatFeed = Enumerable.Repeat(firstFloatVal, rows * cols).ToArray(); | |||||
var secondFloatFeed = Enumerable.Repeat(secondFloatVal, rows * cols).ToArray(); | |||||
var floatResult = MultiplyArray(firstFloatFeed, secondFloatFeed.Select(x => 1/x).ToArray()).Sum(); | |||||
a = tf.placeholder(tf.float32, shape: new TensorShape(rows, cols)); | |||||
b = tf.placeholder(tf.float32, shape: new TensorShape(rows, cols)); | |||||
c = tf.reduce_sum(tf.reduce_sum(tf.divide(a, b), 1)); | |||||
using (var sess = tf.Session()) | |||||
{ | |||||
var o = sess.run(c, | |||||
new FeedItem(a, new NDArray(firstFloatFeed, new Shape(rows, cols))), | |||||
new FeedItem(b, new NDArray(secondFloatFeed, new Shape(rows, cols)))); | |||||
Assert.AreEqual((float)o, floatResult); | |||||
} | |||||
// Testing `operator /(Tensor x, Tensor y) | |||||
c = tf.reduce_sum(tf.reduce_sum(a / b, 1)); | |||||
using (var sess = tf.Session()) | |||||
{ | |||||
var o = sess.run(c, | |||||
new FeedItem(a, new NDArray(firstFloatFeed, new Shape(rows, cols))), | |||||
new FeedItem(b, new NDArray(secondFloatFeed, new Shape(rows, cols)))); | |||||
Assert.AreEqual((float)o, floatResult); | |||||
} | |||||
// Testing `operator /(Tensor x, float y) | |||||
c = tf.reduce_sum(tf.reduce_sum(a / secondFloatVal, 1)); | |||||
using (var sess = tf.Session()) | |||||
{ | |||||
var o = sess.run(c, | |||||
new FeedItem(a, new NDArray(firstFloatFeed, new Shape(rows, cols)))); | |||||
Assert.AreEqual((float)o, floatResult); | |||||
} | |||||
// Testing `operator /(float x, Tensor y) | |||||
c = tf.reduce_sum(tf.reduce_sum(firstFloatVal / b, 1)); | |||||
using (var sess = tf.Session()) | |||||
{ | |||||
var o = sess.run(c, | |||||
new FeedItem(b, new NDArray(secondFloatFeed, new Shape(rows, cols)))); | |||||
Assert.AreEqual((float)o, floatResult); | |||||
} | |||||
#endregion | |||||
#region doubleTest | |||||
const double firstDoubleVal = 6.0; | |||||
const double secondDoubleVal = 3.0; | |||||
var firstDoubleFeed = Enumerable.Repeat(firstDoubleVal, rows * cols).ToArray(); | |||||
var secondDoubleFeed = Enumerable.Repeat(secondDoubleVal, rows * cols).ToArray(); | |||||
var doubleResult = MultiplyArray(firstDoubleFeed, secondDoubleFeed.Select(x => 1/x).ToArray()).Sum(); | |||||
a = tf.placeholder(tf.float64, shape: new TensorShape(rows, cols)); | |||||
b = tf.placeholder(tf.float64, shape: new TensorShape(rows, cols)); | |||||
c = tf.reduce_sum(tf.reduce_sum(tf.divide(a, b), 1)); | |||||
using (var sess = tf.Session()) | |||||
{ | |||||
var o = sess.run(c, | |||||
new FeedItem(a, new NDArray(firstDoubleFeed, new Shape(rows, cols))), | |||||
new FeedItem(b, new NDArray(secondDoubleFeed, new Shape(rows, cols)))); | |||||
Assert.AreEqual((double)o, doubleResult); | |||||
} | |||||
// Testing `operator /(Tensor x, Tensor y) | |||||
c = tf.reduce_sum(tf.reduce_sum(a / b, 1)); | |||||
using (var sess = tf.Session()) | |||||
{ | |||||
var o = sess.run(c, | |||||
new FeedItem(a, new NDArray(firstDoubleFeed, new Shape(rows, cols))), | |||||
new FeedItem(b, new NDArray(secondDoubleFeed, new Shape(rows, cols)))); | |||||
Assert.AreEqual((double)o, doubleResult); | |||||
} | |||||
// Testing `operator /(Tensor x, double y) | |||||
c = tf.reduce_sum(tf.reduce_sum(a / secondFloatVal, 1)); | |||||
using (var sess = tf.Session()) | |||||
{ | |||||
var o = sess.run(c, | |||||
new FeedItem(a, new NDArray(firstDoubleFeed, new Shape(rows, cols)))); | |||||
Assert.AreEqual((double)o, doubleResult); | |||||
} | |||||
// Testing `operator /(double x, Tensor y) | |||||
c = tf.reduce_sum(tf.reduce_sum(firstFloatVal / b, 1)); | |||||
using (var sess = tf.Session()) | |||||
{ | |||||
var o = sess.run(c, | |||||
new FeedItem(b, new NDArray(secondDoubleFeed, new Shape(rows, cols)))); | |||||
Assert.AreEqual((double)o, doubleResult); | |||||
} | |||||
#endregion | |||||
} | |||||
[TestMethod] | |||||
public void greaterThanOpTests() | |||||
{ | |||||
const int rows = 2; // to avoid broadcasting effect | |||||
const int cols = 10; | |||||
#region intTest | |||||
const int intThreshold = 10; | |||||
var firstIntFeed = Enumerable.Range(0, rows * cols).ToArray(); | |||||
var secondIntFeed = Enumerable.Repeat(intThreshold, rows * cols).ToArray(); | |||||
var intResult = firstIntFeed.Count(elem => elem > intThreshold); | |||||
var intResultTwo = firstIntFeed.Count(elem => elem < intThreshold); | |||||
var a = tf.placeholder(tf.int32, shape: new TensorShape(rows, cols)); | |||||
var b = tf.placeholder(tf.int32, shape: new TensorShape(rows, cols)); | |||||
var c = tf.reduce_sum(tf.reduce_sum(tf.cast(tf.greater(a, b), tf.int32), 1)); | |||||
using (var sess = tf.Session()) | |||||
{ | |||||
var o = sess.run(c, | |||||
new FeedItem(a, new NDArray(firstIntFeed, new Shape(rows, cols))), | |||||
new FeedItem(b, new NDArray(secondIntFeed, new Shape(rows, cols)))); | |||||
Assert.AreEqual((int)o, intResult); | |||||
} | |||||
// Testing `operator >(Tensor x, Tensor y) | |||||
c = tf.reduce_sum(tf.reduce_sum(tf.cast(a > b, tf.int32), 1)); | |||||
using (var sess = tf.Session()) | |||||
{ | |||||
var o = sess.run(c, | |||||
new FeedItem(a, new NDArray(firstIntFeed, new Shape(rows, cols))), | |||||
new FeedItem(b, new NDArray(secondIntFeed, new Shape(rows, cols)))); | |||||
Assert.AreEqual((int)o, intResult); | |||||
} | |||||
// Testing `operator >(Tensor x, int y) | |||||
c = tf.reduce_sum(tf.reduce_sum(tf.cast(a > intThreshold, tf.int32), 1)); | |||||
using (var sess = tf.Session()) | |||||
{ | |||||
var o = sess.run(c, | |||||
new FeedItem(a, new NDArray(firstIntFeed, new Shape(rows, cols)))); | |||||
Assert.AreEqual((int)o, intResult); | |||||
} | |||||
// Testing `operator >(int x, Tensor y) | |||||
c = tf.reduce_sum(tf.reduce_sum(tf.cast(intThreshold > a, tf.int32), 1)); | |||||
using (var sess = tf.Session()) | |||||
{ | |||||
var o = sess.run(c, | |||||
new FeedItem(a, new NDArray(firstIntFeed, new Shape(rows, cols)))); | |||||
Assert.AreEqual((int)o, intResultTwo); | |||||
} | |||||
#endregion | |||||
#region floatTest | |||||
const float floatThreshold = 10.0f; | |||||
var firstFloatFeed = Enumerable.Range(0, rows * cols).Select(elem => (float)elem).ToArray(); | |||||
var secondFloatFeed = Enumerable.Repeat(floatThreshold, rows * cols).ToArray(); | |||||
var floatResult = firstFloatFeed.Count(elem => elem > floatThreshold); | |||||
var floatResultTwo = firstFloatFeed.Count(elem => elem < floatThreshold); | |||||
a = tf.placeholder(tf.float32, shape: new TensorShape(rows, cols)); | |||||
b = tf.placeholder(tf.float32, shape: new TensorShape(rows, cols)); | |||||
c = tf.reduce_sum(tf.reduce_sum(tf.cast(tf.greater(a, b), tf.int32), 1)); | |||||
using (var sess = tf.Session()) | |||||
{ | |||||
var o = sess.run(c, | |||||
new FeedItem(a, new NDArray(firstFloatFeed, new Shape(rows, cols))), | |||||
new FeedItem(b, new NDArray(secondFloatFeed, new Shape(rows, cols)))); | |||||
Assert.AreEqual((int)o, floatResult); | |||||
} | |||||
// Testing `operator >(Tensor x, Tensor y) | |||||
c = tf.reduce_sum(tf.reduce_sum(tf.cast(a > b, tf.int32), 1)); | |||||
using (var sess = tf.Session()) | |||||
{ | |||||
var o = sess.run(c, | |||||
new FeedItem(a, new NDArray(firstFloatFeed, new Shape(rows, cols))), | |||||
new FeedItem(b, new NDArray(secondFloatFeed, new Shape(rows, cols)))); | |||||
Assert.AreEqual((int)o, floatResult); | |||||
} | |||||
// Testing `operator >(Tensor x, float y) | |||||
c = tf.reduce_sum(tf.reduce_sum(tf.cast(a > floatThreshold, tf.int32), 1)); | |||||
using (var sess = tf.Session()) | |||||
{ | |||||
var o = sess.run(c, | |||||
new FeedItem(a, new NDArray(firstFloatFeed, new Shape(rows, cols)))); | |||||
Assert.AreEqual((int)o, floatResult); | |||||
} | |||||
// Testing `operator >(float x, Tensor y) | |||||
c = tf.reduce_sum(tf.reduce_sum(tf.cast(floatThreshold > a, tf.int32), 1)); | |||||
using (var sess = tf.Session()) | |||||
{ | |||||
var o = sess.run(c, | |||||
new FeedItem(a, new NDArray(firstFloatFeed, new Shape(rows, cols)))); | |||||
Assert.AreEqual((int)o, floatResultTwo); | |||||
} | |||||
#endregion | |||||
#region doubleTest | |||||
const double doubleThreshold = 10.0; | |||||
var firstDoubleFeed = Enumerable.Repeat(0, rows * cols).Select(elem => (double)elem).ToArray(); | |||||
var secondDoubleFeed = Enumerable.Repeat(doubleThreshold, rows * cols).ToArray(); | |||||
var doubleResult = firstDoubleFeed.Count(elem => elem > doubleThreshold); | |||||
var doubleResultTwo = firstDoubleFeed.Count(elem => elem < doubleThreshold); | |||||
a = tf.placeholder(tf.float64, shape: new TensorShape(rows, cols)); | |||||
b = tf.placeholder(tf.float64, shape: new TensorShape(rows, cols)); | |||||
c = tf.reduce_sum(tf.reduce_sum(tf.cast(tf.greater(a, b), tf.int32), 1)); | |||||
using (var sess = tf.Session()) | |||||
{ | |||||
var o = sess.run(c, | |||||
new FeedItem(a, new NDArray(firstDoubleFeed, new Shape(rows, cols))), | |||||
new FeedItem(b, new NDArray(secondDoubleFeed, new Shape(rows, cols)))); | |||||
Assert.AreEqual((int)o, doubleResult); | |||||
} | |||||
// Testing `operator >(Tensor x, Tensor y) | |||||
c = tf.reduce_sum(tf.reduce_sum(tf.cast(a > b, tf.int32), 1)); | |||||
using (var sess = tf.Session()) | |||||
{ | |||||
var o = sess.run(c, | |||||
new FeedItem(a, new NDArray(firstDoubleFeed, new Shape(rows, cols))), | |||||
new FeedItem(b, new NDArray(secondDoubleFeed, new Shape(rows, cols)))); | |||||
Assert.AreEqual((int)o, doubleResult); | |||||
} | |||||
// Testing `operator >(Tensor x, double y) | |||||
c = tf.reduce_sum(tf.reduce_sum(tf.cast(a > doubleThreshold, tf.int32), 1)); | |||||
using (var sess = tf.Session()) | |||||
{ | |||||
var o = sess.run(c, | |||||
new FeedItem(a, new NDArray(firstDoubleFeed, new Shape(rows, cols)))); | |||||
Assert.AreEqual((int)o, doubleResult); | |||||
} | |||||
// Testing `operator >(double x, Tensor y) | |||||
c = tf.reduce_sum(tf.reduce_sum(tf.cast(doubleThreshold > a, tf.int32), 1)); | |||||
using (var sess = tf.Session()) | |||||
{ | |||||
var o = sess.run(c, | |||||
new FeedItem(a, new NDArray(firstDoubleFeed, new Shape(rows, cols)))); | |||||
Assert.AreEqual((int)o, doubleResultTwo); | |||||
} | |||||
#endregion | |||||
} | |||||
[TestMethod] | |||||
public void lessThanOpTests() | |||||
{ | |||||
const int rows = 2; // to avoid broadcasting effect | |||||
const int cols = 10; | |||||
#region intTest | |||||
const int intThreshold = 10; | |||||
var firstIntFeed = Enumerable.Range(0, rows * cols).ToArray(); | |||||
var secondIntFeed = Enumerable.Repeat(intThreshold, rows * cols).ToArray(); | |||||
var intResult = firstIntFeed.Count(elem => elem < intThreshold); | |||||
var intResultTwo = firstIntFeed.Count(elem => elem > intThreshold); | |||||
var a = tf.placeholder(tf.int32, shape: new TensorShape(rows, cols)); | |||||
var b = tf.placeholder(tf.int32, shape: new TensorShape(rows, cols)); | |||||
var c = tf.reduce_sum(tf.reduce_sum(tf.cast(tf.less(a, b), tf.int32), 1)); | |||||
using (var sess = tf.Session()) | |||||
{ | |||||
var o = sess.run(c, | |||||
new FeedItem(a, new NDArray(firstIntFeed, new Shape(rows, cols))), | |||||
new FeedItem(b, new NDArray(secondIntFeed, new Shape(rows, cols)))); | |||||
Assert.AreEqual((int)o, intResult); | |||||
} | |||||
// Testing `operator <(Tensor x, Tensor y) | |||||
c = tf.reduce_sum(tf.reduce_sum(tf.cast(a < b, tf.int32), 1)); | |||||
using (var sess = tf.Session()) | |||||
{ | |||||
var o = sess.run(c, | |||||
new FeedItem(a, new NDArray(firstIntFeed, new Shape(rows, cols))), | |||||
new FeedItem(b, new NDArray(secondIntFeed, new Shape(rows, cols)))); | |||||
Assert.AreEqual((int)o, intResult); | |||||
} | |||||
// Testing `operator <(Tensor x, int y) | |||||
c = tf.reduce_sum(tf.reduce_sum(tf.cast(a < intThreshold, tf.int32), 1)); | |||||
using (var sess = tf.Session()) | |||||
{ | |||||
var o = sess.run(c, | |||||
new FeedItem(a, new NDArray(firstIntFeed, new Shape(rows, cols)))); | |||||
Assert.AreEqual((int)o, intResult); | |||||
} | |||||
// Testing `operator <(int x, Tensor y) | |||||
c = tf.reduce_sum(tf.reduce_sum(tf.cast(intThreshold < a, tf.int32), 1)); | |||||
using (var sess = tf.Session()) | |||||
{ | |||||
var o = sess.run(c, | |||||
new FeedItem(a, new NDArray(firstIntFeed, new Shape(rows, cols)))); | |||||
Assert.AreEqual((int)o, intResultTwo); | |||||
} | |||||
#endregion | |||||
#region floatTest | |||||
const float floatThreshold = 10.0f; | |||||
var firstFloatFeed = Enumerable.Range(0, rows * cols).Select(elem => (float)elem).ToArray(); | |||||
var secondFloatFeed = Enumerable.Repeat(floatThreshold, rows * cols).ToArray(); | |||||
var floatResult = firstFloatFeed.Count(elem => elem < floatThreshold); | |||||
var floatResultTwo = firstFloatFeed.Count(elem => elem > floatThreshold); | |||||
a = tf.placeholder(tf.float32, shape: new TensorShape(rows, cols)); | |||||
b = tf.placeholder(tf.float32, shape: new TensorShape(rows, cols)); | |||||
c = tf.reduce_sum(tf.reduce_sum(tf.cast(tf.less(a, b), tf.int32), 1)); | |||||
using (var sess = tf.Session()) | |||||
{ | |||||
var o = sess.run(c, | |||||
new FeedItem(a, new NDArray(firstFloatFeed, new Shape(rows, cols))), | |||||
new FeedItem(b, new NDArray(secondFloatFeed, new Shape(rows, cols)))); | |||||
Assert.AreEqual((int)o, floatResult); | |||||
} | |||||
// Testing `operator <(Tensor x, Tensor y) | |||||
c = tf.reduce_sum(tf.reduce_sum(tf.cast(a < b, tf.int32), 1)); | |||||
using (var sess = tf.Session()) | |||||
{ | |||||
var o = sess.run(c, | |||||
new FeedItem(a, new NDArray(firstFloatFeed, new Shape(rows, cols))), | |||||
new FeedItem(b, new NDArray(secondFloatFeed, new Shape(rows, cols)))); | |||||
Assert.AreEqual((int)o, floatResult); | |||||
} | |||||
// Testing `operator <(Tensor x, float y) | |||||
c = tf.reduce_sum(tf.reduce_sum(tf.cast(a < floatThreshold, tf.int32), 1)); | |||||
using (var sess = tf.Session()) | |||||
{ | |||||
var o = sess.run(c, | |||||
new FeedItem(a, new NDArray(firstFloatFeed, new Shape(rows, cols)))); | |||||
Assert.AreEqual((int)o, floatResult); | |||||
} | |||||
// Testing `operator <(float x, Tensor y) | |||||
c = tf.reduce_sum(tf.reduce_sum(tf.cast(floatThreshold < a, tf.int32), 1)); | |||||
using (var sess = tf.Session()) | |||||
{ | |||||
var o = sess.run(c, | |||||
new FeedItem(a, new NDArray(firstFloatFeed, new Shape(rows, cols)))); | |||||
Assert.AreEqual((int)o, floatResultTwo); | |||||
} | |||||
#endregion | |||||
#region doubleTest | |||||
const double doubleThreshold = 10.0; | |||||
var firstDoubleFeed = Enumerable.Repeat(0, rows * cols).Select(elem => (double)elem).ToArray(); | |||||
var secondDoubleFeed = Enumerable.Repeat(doubleThreshold, rows * cols).ToArray(); | |||||
var doubleResult = firstDoubleFeed.Count(elem => elem < doubleThreshold); | |||||
var doubleResultTwo = firstDoubleFeed.Count(elem => elem > doubleThreshold); | |||||
a = tf.placeholder(tf.float64, shape: new TensorShape(rows, cols)); | |||||
b = tf.placeholder(tf.float64, shape: new TensorShape(rows, cols)); | |||||
c = tf.reduce_sum(tf.reduce_sum(tf.cast(tf.less(a, b), tf.int32), 1)); | |||||
using (var sess = tf.Session()) | |||||
{ | |||||
var o = sess.run(c, | |||||
new FeedItem(a, new NDArray(firstDoubleFeed, new Shape(rows, cols))), | |||||
new FeedItem(b, new NDArray(secondDoubleFeed, new Shape(rows, cols)))); | |||||
Assert.AreEqual((int)o, doubleResult); | |||||
} | |||||
// Testing `operator <(Tensor x, Tensor y) | |||||
c = tf.reduce_sum(tf.reduce_sum(tf.cast(a < b, tf.int32), 1)); | |||||
using (var sess = tf.Session()) | |||||
{ | |||||
var o = sess.run(c, | |||||
new FeedItem(a, new NDArray(firstDoubleFeed, new Shape(rows, cols))), | |||||
new FeedItem(b, new NDArray(secondDoubleFeed, new Shape(rows, cols)))); | |||||
Assert.AreEqual((int)o, doubleResult); | |||||
} | |||||
// Testing `operator <(Tensor x, double y) | |||||
c = tf.reduce_sum(tf.reduce_sum(tf.cast(a < doubleThreshold, tf.int32), 1)); | |||||
using (var sess = tf.Session()) | |||||
{ | |||||
var o = sess.run(c, | |||||
new FeedItem(a, new NDArray(firstDoubleFeed, new Shape(rows, cols)))); | |||||
Assert.AreEqual((int)o, doubleResult); | |||||
} | |||||
// Testing `operator <(double x, Tensor y) | |||||
c = tf.reduce_sum(tf.reduce_sum(tf.cast(doubleThreshold < a, tf.int32), 1)); | |||||
using (var sess = tf.Session()) | |||||
{ | |||||
var o = sess.run(c, | |||||
new FeedItem(a, new NDArray(firstDoubleFeed, new Shape(rows, cols)))); | |||||
Assert.AreEqual((int)o, doubleResultTwo); | |||||
} | |||||
#endregion | |||||
} | |||||
} | } | ||||
} | } |