Author | SHA1 | Message | Date |
---|---|---|---|
|
1ded2ed3cb
|
v0.10.2 (#318)
* fixed a bug about unzip * added a test case for MnistModelLoader * cleared warnings in the UnitTest project * Upgrade NumSharp with signature. * Testing gradients function: (#307) - Concat - LGamma - Tanh - Slice * Adding new ctor for `Session` (#309) Meant to be a shortcut to directly set some basic configuration. E.g. ``` var sessionOptions = new SessionOptions(); sessionOptions.SetConfig(new ConfigProto { LogDevicePlacement = true, InterOpParallelismThreads = 4, GpuOptions = new GPUOptions { AllowGrowth = true, PerProcessGpuMemoryFraction = 0.12 } }); var session = tf.Session(sessionOptions); ``` Adding `AbsGrad` To be aligned with Tensorflow, introducing also `Sign` operation and its gradient. * fix strided_slice_grad type convention error. #307 * added a static load method for MnistModelLoader * adjusted the assembly name for hub * add Prediction function for Transfer Learning * fixed the path combining issue * Init TensorFlow.Text project. put all UnitTest and Example into one project. * Predict image category in bootleneck file. * Adding `IsNan` operation (#315) Exposing also the `IsFinite` one. Unit testing both operations. * change bottleneck file to raw image as input for transfer learning. * remove unnecessary projects (#316) * implementing some additional methods (#317) * got rid off the reference to the TF.NET * updated project file for packaging |
6 years ago |
@@ -11,17 +11,13 @@ Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "TensorFlowNET.Core", "src\T | |||||
EndProject | EndProject | ||||
Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "Keras.Core", "src\KerasNET.Core\Keras.Core.csproj", "{902E188F-A953-43B4-9991-72BAB1697BC3}" | Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "Keras.Core", "src\KerasNET.Core\Keras.Core.csproj", "{902E188F-A953-43B4-9991-72BAB1697BC3}" | ||||
EndProject | EndProject | ||||
Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "Keras.Example", "test\KerasNET.Example\Keras.Example.csproj", "{17E1AC16-9E0E-4545-905A-E92C6300C7AF}" | |||||
EndProject | |||||
Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "Keras.UnitTest", "test\KerasNET.Test\Keras.UnitTest.csproj", "{A5839A45-A117-4BEA-898B-DE1ED6E0D58F}" | |||||
EndProject | |||||
Project("{6EC3EE1D-3C4E-46DD-8F32-0CC8E7565705}") = "TensorFlowNET.Examples.FSharp", "test\TensorFlowNET.Examples.FSharp\TensorFlowNET.Examples.FSharp.fsproj", "{62BC3801-F0D3-44A9-A0AC-712F40C8F961}" | Project("{6EC3EE1D-3C4E-46DD-8F32-0CC8E7565705}") = "TensorFlowNET.Examples.FSharp", "test\TensorFlowNET.Examples.FSharp\TensorFlowNET.Examples.FSharp.fsproj", "{62BC3801-F0D3-44A9-A0AC-712F40C8F961}" | ||||
EndProject | EndProject | ||||
Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "TensorFlowBenchmark", "src\TensorFlowNet.Benchmarks\TensorFlowBenchmark.csproj", "{68861442-971A-4196-876E-C9330F0B3C54}" | Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "TensorFlowBenchmark", "src\TensorFlowNet.Benchmarks\TensorFlowBenchmark.csproj", "{68861442-971A-4196-876E-C9330F0B3C54}" | ||||
EndProject | EndProject | ||||
Project("{FAE04EC0-301F-11D3-BF4B-00C04F79EFBC}") = "TensorFlowHub", "src\TensorFlowHub\TensorFlowHub.csproj", "{8FD59A5A-97EB-457E-B9F1-D88B0C822C6E}" | |||||
Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "TensorFlowHub", "src\TensorFlowHub\TensorFlowHub.csproj", "{8FD59A5A-97EB-457E-B9F1-D88B0C822C6E}" | |||||
EndProject | EndProject | ||||
Project("{FAE04EC0-301F-11D3-BF4B-00C04F79EFBC}") = "TensorFlowHub.Examples", "test\TensorFlowHub.Examples\TensorFlowHub.Examples.csproj", "{5FF6CB67-DC99-4741-9545-E803DFCE5CDC}" | |||||
Project("{FAE04EC0-301F-11D3-BF4B-00C04F79EFBC}") = "TensorFlowText", "src\TensorFlowText\TensorFlowText.csproj", "{B598E5D5-BD2D-4191-8532-F2FBAC31AB81}" | |||||
EndProject | EndProject | ||||
Global | Global | ||||
GlobalSection(SolutionConfigurationPlatforms) = preSolution | GlobalSection(SolutionConfigurationPlatforms) = preSolution | ||||
@@ -45,14 +41,6 @@ Global | |||||
{902E188F-A953-43B4-9991-72BAB1697BC3}.Debug|Any CPU.Build.0 = Debug|Any CPU | {902E188F-A953-43B4-9991-72BAB1697BC3}.Debug|Any CPU.Build.0 = Debug|Any CPU | ||||
{902E188F-A953-43B4-9991-72BAB1697BC3}.Release|Any CPU.ActiveCfg = Release|Any CPU | {902E188F-A953-43B4-9991-72BAB1697BC3}.Release|Any CPU.ActiveCfg = Release|Any CPU | ||||
{902E188F-A953-43B4-9991-72BAB1697BC3}.Release|Any CPU.Build.0 = Release|Any CPU | {902E188F-A953-43B4-9991-72BAB1697BC3}.Release|Any CPU.Build.0 = Release|Any CPU | ||||
{17E1AC16-9E0E-4545-905A-E92C6300C7AF}.Debug|Any CPU.ActiveCfg = Debug|Any CPU | |||||
{17E1AC16-9E0E-4545-905A-E92C6300C7AF}.Debug|Any CPU.Build.0 = Debug|Any CPU | |||||
{17E1AC16-9E0E-4545-905A-E92C6300C7AF}.Release|Any CPU.ActiveCfg = Release|Any CPU | |||||
{17E1AC16-9E0E-4545-905A-E92C6300C7AF}.Release|Any CPU.Build.0 = Release|Any CPU | |||||
{A5839A45-A117-4BEA-898B-DE1ED6E0D58F}.Debug|Any CPU.ActiveCfg = Debug|Any CPU | |||||
{A5839A45-A117-4BEA-898B-DE1ED6E0D58F}.Debug|Any CPU.Build.0 = Debug|Any CPU | |||||
{A5839A45-A117-4BEA-898B-DE1ED6E0D58F}.Release|Any CPU.ActiveCfg = Release|Any CPU | |||||
{A5839A45-A117-4BEA-898B-DE1ED6E0D58F}.Release|Any CPU.Build.0 = Release|Any CPU | |||||
{62BC3801-F0D3-44A9-A0AC-712F40C8F961}.Debug|Any CPU.ActiveCfg = Debug|Any CPU | {62BC3801-F0D3-44A9-A0AC-712F40C8F961}.Debug|Any CPU.ActiveCfg = Debug|Any CPU | ||||
{62BC3801-F0D3-44A9-A0AC-712F40C8F961}.Debug|Any CPU.Build.0 = Debug|Any CPU | {62BC3801-F0D3-44A9-A0AC-712F40C8F961}.Debug|Any CPU.Build.0 = Debug|Any CPU | ||||
{62BC3801-F0D3-44A9-A0AC-712F40C8F961}.Release|Any CPU.ActiveCfg = Release|Any CPU | {62BC3801-F0D3-44A9-A0AC-712F40C8F961}.Release|Any CPU.ActiveCfg = Release|Any CPU | ||||
@@ -65,10 +53,10 @@ Global | |||||
{8FD59A5A-97EB-457E-B9F1-D88B0C822C6E}.Debug|Any CPU.Build.0 = Debug|Any CPU | {8FD59A5A-97EB-457E-B9F1-D88B0C822C6E}.Debug|Any CPU.Build.0 = Debug|Any CPU | ||||
{8FD59A5A-97EB-457E-B9F1-D88B0C822C6E}.Release|Any CPU.ActiveCfg = Release|Any CPU | {8FD59A5A-97EB-457E-B9F1-D88B0C822C6E}.Release|Any CPU.ActiveCfg = Release|Any CPU | ||||
{8FD59A5A-97EB-457E-B9F1-D88B0C822C6E}.Release|Any CPU.Build.0 = Release|Any CPU | {8FD59A5A-97EB-457E-B9F1-D88B0C822C6E}.Release|Any CPU.Build.0 = Release|Any CPU | ||||
{5FF6CB67-DC99-4741-9545-E803DFCE5CDC}.Debug|Any CPU.ActiveCfg = Debug|Any CPU | |||||
{5FF6CB67-DC99-4741-9545-E803DFCE5CDC}.Debug|Any CPU.Build.0 = Debug|Any CPU | |||||
{5FF6CB67-DC99-4741-9545-E803DFCE5CDC}.Release|Any CPU.ActiveCfg = Release|Any CPU | |||||
{5FF6CB67-DC99-4741-9545-E803DFCE5CDC}.Release|Any CPU.Build.0 = Release|Any CPU | |||||
{B598E5D5-BD2D-4191-8532-F2FBAC31AB81}.Debug|Any CPU.ActiveCfg = Debug|Any CPU | |||||
{B598E5D5-BD2D-4191-8532-F2FBAC31AB81}.Debug|Any CPU.Build.0 = Debug|Any CPU | |||||
{B598E5D5-BD2D-4191-8532-F2FBAC31AB81}.Release|Any CPU.ActiveCfg = Release|Any CPU | |||||
{B598E5D5-BD2D-4191-8532-F2FBAC31AB81}.Release|Any CPU.Build.0 = Release|Any CPU | |||||
EndGlobalSection | EndGlobalSection | ||||
GlobalSection(SolutionProperties) = preSolution | GlobalSection(SolutionProperties) = preSolution | ||||
HideSolutionNode = FALSE | HideSolutionNode = FALSE | ||||
@@ -12,7 +12,7 @@ namespace Tensorflow.Hub | |||||
public int EpochsCompleted { get; private set; } | public int EpochsCompleted { get; private set; } | ||||
public int IndexInEpoch { get; private set; } | public int IndexInEpoch { get; private set; } | ||||
public MnistDataSet(NDArray images, NDArray labels, TF_DataType dtype, bool reshape) | |||||
public MnistDataSet(NDArray images, NDArray labels, Type dataType, bool reshape) | |||||
{ | { | ||||
EpochsCompleted = 0; | EpochsCompleted = 0; | ||||
IndexInEpoch = 0; | IndexInEpoch = 0; | ||||
@@ -20,11 +20,11 @@ namespace Tensorflow.Hub | |||||
NumOfExamples = images.shape[0]; | NumOfExamples = images.shape[0]; | ||||
images = images.reshape(images.shape[0], images.shape[1] * images.shape[2]); | images = images.reshape(images.shape[0], images.shape[1] * images.shape[2]); | ||||
images.astype(dtype.as_numpy_datatype()); | |||||
images.astype(dataType); | |||||
images = np.multiply(images, 1.0f / 255.0f); | images = np.multiply(images, 1.0f / 255.0f); | ||||
Data = images; | Data = images; | ||||
labels.astype(dtype.as_numpy_datatype()); | |||||
labels.astype(dataType); | |||||
Labels = labels; | Labels = labels; | ||||
} | } | ||||
} | } | ||||
@@ -15,6 +15,16 @@ namespace Tensorflow.Hub | |||||
private const string TEST_IMAGES = "t10k-images-idx3-ubyte.gz"; | private const string TEST_IMAGES = "t10k-images-idx3-ubyte.gz"; | ||||
private const string TEST_LABELS = "t10k-labels-idx1-ubyte.gz"; | private const string TEST_LABELS = "t10k-labels-idx1-ubyte.gz"; | ||||
public static async Task<Datasets<MnistDataSet>> LoadAsync(string trainDir, bool oneHot = false) | |||||
{ | |||||
var loader = new MnistModelLoader(); | |||||
return await loader.LoadAsync(new ModelLoadSetting | |||||
{ | |||||
TrainDir = trainDir, | |||||
OneHot = oneHot | |||||
}); | |||||
} | |||||
public async Task<Datasets<MnistDataSet>> LoadAsync(ModelLoadSetting setting) | public async Task<Datasets<MnistDataSet>> LoadAsync(ModelLoadSetting setting) | ||||
{ | { | ||||
if (setting.TrainSize.HasValue && setting.ValidationSize >= setting.TrainSize.Value) | if (setting.TrainSize.HasValue && setting.ValidationSize >= setting.TrainSize.Value) | ||||
@@ -71,7 +81,7 @@ namespace Tensorflow.Hub | |||||
trainImages = trainImages[np.arange(validationSize, end)]; | trainImages = trainImages[np.arange(validationSize, end)]; | ||||
trainLabels = trainLabels[np.arange(validationSize, end)]; | trainLabels = trainLabels[np.arange(validationSize, end)]; | ||||
var dtype = setting.DtType; | |||||
var dtype = setting.DataType; | |||||
var reshape = setting.ReShape; | var reshape = setting.ReShape; | ||||
var train = new MnistDataSet(trainImages, trainLabels, dtype, reshape); | var train = new MnistDataSet(trainImages, trainLabels, dtype, reshape); | ||||
@@ -83,11 +93,14 @@ namespace Tensorflow.Hub | |||||
private NDArray ExtractImages(string file, int? limit = null) | private NDArray ExtractImages(string file, int? limit = null) | ||||
{ | { | ||||
if (!Path.IsPathRooted(file)) | |||||
file = Path.Combine(AppContext.BaseDirectory, file); | |||||
using (var bytestream = new FileStream(file, FileMode.Open)) | using (var bytestream = new FileStream(file, FileMode.Open)) | ||||
{ | { | ||||
var magic = Read32(bytestream); | var magic = Read32(bytestream); | ||||
if (magic != 2051) | if (magic != 2051) | ||||
throw new ValueError($"Invalid magic number {magic} in MNIST image file: {file}"); | |||||
throw new Exception($"Invalid magic number {magic} in MNIST image file: {file}"); | |||||
var num_images = Read32(bytestream); | var num_images = Read32(bytestream); | ||||
num_images = limit == null ? num_images : Math.Min(num_images, (uint)limit); | num_images = limit == null ? num_images : Math.Min(num_images, (uint)limit); | ||||
@@ -108,11 +121,14 @@ namespace Tensorflow.Hub | |||||
private NDArray ExtractLabels(string file, bool one_hot = false, int num_classes = 10, int? limit = null) | private NDArray ExtractLabels(string file, bool one_hot = false, int num_classes = 10, int? limit = null) | ||||
{ | { | ||||
if (!Path.IsPathRooted(file)) | |||||
file = Path.Combine(AppContext.BaseDirectory, file); | |||||
using (var bytestream = new FileStream(file, FileMode.Open)) | using (var bytestream = new FileStream(file, FileMode.Open)) | ||||
{ | { | ||||
var magic = Read32(bytestream); | var magic = Read32(bytestream); | ||||
if (magic != 2049) | if (magic != 2049) | ||||
throw new ValueError($"Invalid magic number {magic} in MNIST label file: {file}"); | |||||
throw new Exception($"Invalid magic number {magic} in MNIST label file: {file}"); | |||||
var num_items = Read32(bytestream); | var num_items = Read32(bytestream); | ||||
num_items = limit == null ? num_items : Math.Min(num_items, (uint)limit); | num_items = limit == null ? num_items : Math.Min(num_items, (uint)limit); | ||||
@@ -9,7 +9,7 @@ namespace Tensorflow.Hub | |||||
{ | { | ||||
public string TrainDir { get; set; } | public string TrainDir { get; set; } | ||||
public bool OneHot { get; set; } | public bool OneHot { get; set; } | ||||
public TF_DataType DtType { get; set; } = TF_DataType.TF_FLOAT; | |||||
public Type DataType { get; set; } = typeof(float); | |||||
public bool ReShape { get; set; } | public bool ReShape { get; set; } | ||||
public int ValidationSize { get; set; } = 5000; | public int ValidationSize { get; set; } = 5000; | ||||
public int? TrainSize { get; set; } | public int? TrainSize { get; set; } | ||||
@@ -1,14 +1,18 @@ | |||||
<Project Sdk="Microsoft.NET.Sdk"> | <Project Sdk="Microsoft.NET.Sdk"> | ||||
<PropertyGroup> | <PropertyGroup> | ||||
<AssemblyName>TensorFlow.Net.Hub</AssemblyName> | |||||
<RootNamespace>Tensorflow.Hub</RootNamespace> | <RootNamespace>Tensorflow.Hub</RootNamespace> | ||||
<TargetFramework>netstandard2.0</TargetFramework> | <TargetFramework>netstandard2.0</TargetFramework> | ||||
<Version>0.0.1</Version> | |||||
<Authors>SciSharp</Authors> | |||||
<Company>SciSharp STACK</Company> | |||||
<Copyright>Apache 2.0</Copyright> | |||||
<RepositoryUrl>https://github.com/SciSharp/TensorFlow.NET</RepositoryUrl> | |||||
<RepositoryType>git</RepositoryType> | |||||
<PackageProjectUrl>http://scisharpstack.org</PackageProjectUrl> | |||||
<PackageTags>TensorFlow, Training model</PackageTags> | |||||
<Description>TensorFlow Hub is a library to foster the publication, discovery, and consumption of reusable parts of machine learning models.</Description> | |||||
</PropertyGroup> | </PropertyGroup> | ||||
<ItemGroup> | <ItemGroup> | ||||
<ProjectReference Include="..\..\src\TensorFlowNET.Core\TensorFlowNET.Core.csproj" /> | |||||
<PackageReference Include="NumSharp" Version="0.10.6" /> | |||||
</ItemGroup> | </ItemGroup> | ||||
<ItemGroup> | |||||
<PackageReference Include="NumSharp" Version="0.10.5" /> | |||||
<PackageReference Include="sharpcompress" Version="0.23.0" /> | |||||
</ItemGroup> | |||||
</Project> | |||||
</Project> |
@@ -1,14 +1,11 @@ | |||||
using System; | using System; | ||||
using System.IO; | using System.IO; | ||||
using System.IO.Compression; | |||||
using System.Collections.Generic; | using System.Collections.Generic; | ||||
using System.Net; | using System.Net; | ||||
using System.Text; | using System.Text; | ||||
using System.Threading; | using System.Threading; | ||||
using System.Threading.Tasks; | using System.Threading.Tasks; | ||||
using NumSharp; | |||||
using SharpCompress; | |||||
using SharpCompress.Common; | |||||
using SharpCompress.Readers; | |||||
namespace Tensorflow.Hub | namespace Tensorflow.Hub | ||||
{ | { | ||||
@@ -37,7 +34,7 @@ namespace Tensorflow.Hub | |||||
} | } | ||||
} | } | ||||
public static void Unzip<TDataSet>(this IModelLoader<TDataSet> modelLoader, string zipFile, string saveTo) | |||||
public static async Task UnzipAsync<TDataSet>(this IModelLoader<TDataSet> modelLoader, string zipFile, string saveTo) | |||||
where TDataSet : IDataSet | where TDataSet : IDataSet | ||||
{ | { | ||||
if (!Path.IsPathRooted(saveTo)) | if (!Path.IsPathRooted(saveTo)) | ||||
@@ -45,28 +42,26 @@ namespace Tensorflow.Hub | |||||
if (!Directory.Exists(saveTo)) | if (!Directory.Exists(saveTo)) | ||||
Directory.CreateDirectory(saveTo); | Directory.CreateDirectory(saveTo); | ||||
if (!Path.IsPathRooted(zipFile)) | |||||
zipFile = Path.Combine(AppContext.BaseDirectory, zipFile); | |||||
var destFilePath = Path.Combine(saveTo, Path.GetFileNameWithoutExtension(zipFile)); | |||||
using (var stream = File.OpenRead(zipFile)) | |||||
using (var reader = ReaderFactory.Open(stream)) | |||||
if (File.Exists(destFilePath)) | |||||
File.Delete(destFilePath); | |||||
using (GZipStream unzipStream = new GZipStream(File.OpenRead(zipFile), CompressionMode.Decompress)) | |||||
{ | { | ||||
while (reader.MoveToNextEntry()) | |||||
using (var destStream = File.Create(destFilePath)) | |||||
{ | { | ||||
if (!reader.Entry.IsDirectory) | |||||
{ | |||||
reader.WriteEntryToDirectory(saveTo, new ExtractionOptions() | |||||
{ | |||||
ExtractFullPath = true, | |||||
Overwrite = true | |||||
}); | |||||
} | |||||
await unzipStream.CopyToAsync(destStream); | |||||
await destStream.FlushAsync(); | |||||
destStream.Close(); | |||||
} | } | ||||
} | |||||
} | |||||
public static async Task UnzipAsync<TDataSet>(this IModelLoader<TDataSet> modelLoader, string zipFile, string saveTo) | |||||
where TDataSet : IDataSet | |||||
{ | |||||
await Task.Run(() => modelLoader.Unzip(zipFile, saveTo)); | |||||
unzipStream.Close(); | |||||
} | |||||
} | } | ||||
public static async Task ShowProgressInConsole(this Task task) | public static async Task ShowProgressInConsole(this Task task) | ||||
@@ -24,7 +24,7 @@ namespace Tensorflow | |||||
return ops.get_default_graph(); | return ops.get_default_graph(); | ||||
} | } | ||||
public static Graph Graph() => new Graph(); | |||||
public static Graph Graph() | |||||
=> new Graph(); | |||||
} | } | ||||
} | } |
@@ -57,6 +57,12 @@ namespace Tensorflow | |||||
public static Tensor arg_min(Tensor input, int dimension, TF_DataType output_type = TF_DataType.TF_INT64, string name = null) | public static Tensor arg_min(Tensor input, int dimension, TF_DataType output_type = TF_DataType.TF_INT64, string name = null) | ||||
=> gen_math_ops.arg_min(input, dimension, output_type: output_type, name: name); | => gen_math_ops.arg_min(input, dimension, output_type: output_type, name: name); | ||||
public static Tensor is_finite(Tensor input, string name = null) | |||||
=> gen_math_ops.is_finite(input, name); | |||||
public static Tensor is_nan(Tensor input, string name = null) | |||||
=> gen_math_ops.is_nan(input, name); | |||||
/// <summary> | /// <summary> | ||||
/// Returns element-wise smallest integer not less than x. | /// Returns element-wise smallest integer not less than x. | ||||
/// </summary> | /// </summary> | ||||
@@ -203,6 +209,9 @@ namespace Tensorflow | |||||
public static Tensor sqrt(Tensor a, string name = null) | public static Tensor sqrt(Tensor a, string name = null) | ||||
=> gen_math_ops.sqrt(a, name); | => gen_math_ops.sqrt(a, name); | ||||
public static Tensor sign(Tensor a, string name = null) | |||||
=> gen_math_ops.sign(a, name); | |||||
public static Tensor subtract<T>(Tensor x, T[] y, string name = null) where T : struct | public static Tensor subtract<T>(Tensor x, T[] y, string name = null) where T : struct | ||||
=> gen_math_ops.sub(x, ops.convert_to_tensor(y, dtype: x.dtype.as_base_dtype(), name: "y"), name); | => gen_math_ops.sub(x, ops.convert_to_tensor(y, dtype: x.dtype.as_base_dtype(), name: "y"), name); | ||||
@@ -18,8 +18,11 @@ namespace Tensorflow | |||||
{ | { | ||||
public static partial class tf | public static partial class tf | ||||
{ | { | ||||
public static object get_collection(string key, string scope = "") => get_default_graph() | |||||
.get_collection(key, scope: scope); | |||||
public static Tensor assign(Tensor @ref, object value, bool validate_shape = true, bool use_locking = true, string name = null) | |||||
=> state_ops.assign(@ref, value, validate_shape, use_locking, name); | |||||
public static object get_collection(string key, string scope = "") | |||||
=> get_default_graph().get_collection(key, scope: scope); | |||||
/// <summary> | /// <summary> | ||||
/// Returns a context manager that creates hierarchical names for operations. | /// Returns a context manager that creates hierarchical names for operations. | ||||
@@ -28,8 +31,7 @@ namespace Tensorflow | |||||
/// <param name="default_name">The default name to use if the name argument is None.</param> | /// <param name="default_name">The default name to use if the name argument is None.</param> | ||||
/// <param name="values">The list of Tensor arguments that are passed to the op function.</param> | /// <param name="values">The list of Tensor arguments that are passed to the op function.</param> | ||||
/// <returns>The scope name.</returns> | /// <returns>The scope name.</returns> | ||||
public static ops.NameScope name_scope(string name, | |||||
string default_name = "", | |||||
object values = null) => new ops.NameScope(name, default_name, values); | |||||
public static ops.NameScope name_scope(string name, string default_name = "", object values = null) | |||||
=> new ops.NameScope(name, default_name, values); | |||||
} | } | ||||
} | } |
@@ -0,0 +1,39 @@ | |||||
using System.Linq; | |||||
using NumSharp; | |||||
using Tensorflow.Framework; | |||||
namespace Tensorflow.Contrib.Learn.Estimators | |||||
{ | |||||
public static class tensor_signature | |||||
{ | |||||
public static bool is_compatible_with(this Tensor self, Tensor other) | |||||
{ | |||||
bool _shape_is_compatible_0dim(Shape _this, Shape _other) | |||||
{ | |||||
var __other = tensor_shape.as_shape(_other); | |||||
if (_this.Dimensions == null || __other.Dimensions == null) | |||||
return true; | |||||
if (_this.NDim != __other.NDim) | |||||
return false; | |||||
foreach (var (x_dim, y_dim) in _this.Dimensions.Zip(__other.Dimensions, (x_dim, y_dim) => (x_dim, y_dim))) | |||||
{ | |||||
if (x_dim != y_dim) | |||||
return false; | |||||
} | |||||
return true; | |||||
} | |||||
if (other.is_sparse()) | |||||
{ | |||||
return self.dtype.is_compatible_with(other.dtype); | |||||
} | |||||
return self.dtype.is_compatible_with(other.dtype) && | |||||
_shape_is_compatible_0dim(self.shape, other.shape) && | |||||
!self.is_sparse(); | |||||
} | |||||
} | |||||
} |
@@ -1,8 +1,19 @@ | |||||
namespace Tensorflow.Framework | namespace Tensorflow.Framework | ||||
{ | { | ||||
public static class SparseTensor | |||||
public interface _TensorLike | |||||
{ } | |||||
public class SparseTensor : CompositeTensor, _TensorLike | |||||
{ | { | ||||
private static Tensor _dense_shape { get; set; } | private static Tensor _dense_shape { get; set; } | ||||
} | } | ||||
public static class sparse_tensor | |||||
{ | |||||
public static bool is_sparse(this _TensorLike x) | |||||
{ | |||||
return x is SparseTensor; | |||||
} | |||||
} | |||||
} | } |
@@ -0,0 +1,34 @@ | |||||
using System; | |||||
using System.Linq; | |||||
using System.Text; | |||||
using NumSharp; | |||||
using Tensorflow.Contrib.Learn.Estimators; | |||||
namespace Tensorflow.Framework | |||||
{ | |||||
public static class tensor_shape | |||||
{ | |||||
public static void assert_is_compatible_with(this Tensor self, Tensor other) | |||||
{ | |||||
if (!self.is_compatible_with(other)) | |||||
{ | |||||
var selfDim = self.shape | |||||
.Aggregate(new StringBuilder("{"), (sb, i) => sb.Append(i).Append(", "), sb => sb.ToString()) | |||||
.Replace(", }", "}"); | |||||
var otherDim = other.shape | |||||
.Aggregate(new StringBuilder("{"), (sb, i) => sb.Append(i).Append(", "), sb => sb.ToString()) | |||||
.Replace(", }", "}"); | |||||
throw new ArgumentException($"Dimensions {selfDim} and {otherDim} are not compatible"); | |||||
} | |||||
} | |||||
public static TensorShape as_shape(this Shape shape) | |||||
{ | |||||
if (shape is TensorShape tshape) | |||||
return tshape; | |||||
return new TensorShape(shape); | |||||
} | |||||
} | |||||
} |
@@ -220,11 +220,11 @@ namespace Tensorflow.Gradients | |||||
end, | end, | ||||
strides, | strides, | ||||
grad, | grad, | ||||
begin_mask: (int)op.get_attr("begin_mask"), | |||||
end_mask: (int)op.get_attr("end_mask"), | |||||
ellipsis_mask: (int)op.get_attr("ellipsis_mask"), | |||||
new_axis_mask: (int)op.get_attr("new_axis_mask"), | |||||
shrink_axis_mask: (int)op.get_attr("shrink_axis_mask")), | |||||
begin_mask: int.Parse(op.get_attr("begin_mask").ToString()), | |||||
end_mask: int.Parse(op.get_attr("end_mask").ToString()), | |||||
ellipsis_mask: int.Parse(op.get_attr("ellipsis_mask").ToString()), | |||||
new_axis_mask: int.Parse(op.get_attr("new_axis_mask").ToString()), | |||||
shrink_axis_mask: int.Parse(op.get_attr("shrink_axis_mask").ToString())), | |||||
null, | null, | ||||
null, | null, | ||||
null | null | ||||
@@ -16,6 +16,7 @@ | |||||
using System; | using System; | ||||
using System.Linq; | using System.Linq; | ||||
using Tensorflow.Operations; | |||||
using static Tensorflow.Python; | using static Tensorflow.Python; | ||||
namespace Tensorflow.Gradients | namespace Tensorflow.Gradients | ||||
@@ -26,6 +27,15 @@ namespace Tensorflow.Gradients | |||||
[RegisterGradient("math_grad")] | [RegisterGradient("math_grad")] | ||||
public class math_grad | public class math_grad | ||||
{ | { | ||||
[RegisterGradient("Abs")] | |||||
public static Tensor[] _AbsGrad(Operation op, Tensor[] grads) | |||||
{ | |||||
var x = op.inputs[0]; | |||||
var grad = grads[0]; | |||||
return new Tensor[] { gen_ops.mul(grad, gen_math_ops.sign(x)) }; | |||||
} | |||||
[RegisterGradient("Add")] | [RegisterGradient("Add")] | ||||
public static Tensor[] _AddGrad(Operation op, Tensor[] grads) | public static Tensor[] _AddGrad(Operation op, Tensor[] grads) | ||||
{ | { | ||||
@@ -428,6 +438,15 @@ namespace Tensorflow.Gradients | |||||
}); | }); | ||||
} | } | ||||
[RegisterGradient("Sign")] | |||||
public static Tensor[] _SignGrad(Operation op, Tensor[] grads) | |||||
{ | |||||
var x = op.inputs[0]; | |||||
var zero = constant_op.constant(0.0f, x.dtype, x.shape); | |||||
return new Tensor[] {zero}; | |||||
} | |||||
[RegisterGradient("Square")] | [RegisterGradient("Square")] | ||||
public static Tensor[] _SquareGrad(Operation op, Tensor[] grads) | public static Tensor[] _SquareGrad(Operation op, Tensor[] grads) | ||||
{ | { | ||||
@@ -55,11 +55,11 @@ namespace Tensorflow | |||||
return Status; | return Status; | ||||
} | } | ||||
public static Graph ImportFromPB(string file_path) | |||||
public static Graph ImportFromPB(string file_path, string name = null) | |||||
{ | { | ||||
var graph = tf.Graph().as_default(); | var graph = tf.Graph().as_default(); | ||||
var graph_def = GraphDef.Parser.ParseFrom(File.ReadAllBytes(file_path)); | var graph_def = GraphDef.Parser.ParseFrom(File.ReadAllBytes(file_path)); | ||||
importer.import_graph_def(graph_def); | |||||
importer.import_graph_def(graph_def, name: name); | |||||
return graph; | return graph; | ||||
} | } | ||||
} | } | ||||
@@ -426,7 +426,7 @@ namespace Tensorflow | |||||
/// <param name="name">A name for the operation (optional).</param> | /// <param name="name">A name for the operation (optional).</param> | ||||
/// <returns>A `Tensor`. Has the same type as `dy`.</returns> | /// <returns>A `Tensor`. Has the same type as `dy`.</returns> | ||||
public static Tensor strided_slice_grad(Tensor shape, Tensor begin, Tensor end, Tensor strides, Tensor dy, | public static Tensor strided_slice_grad(Tensor shape, Tensor begin, Tensor end, Tensor strides, Tensor dy, | ||||
int begin_mask = 0, int end_mask = 0, int ellipsis_mask = 0, int new_axis_mask = 0, | |||||
int begin_mask = 0, int end_mask = 0, int ellipsis_mask = 0, int new_axis_mask = 0, | |||||
int shrink_axis_mask = 0, string name = null) | int shrink_axis_mask = 0, string name = null) | ||||
{ | { | ||||
var op = _op_def_lib._apply_op_helper("StridedSliceGrad", name: name, args: new | var op = _op_def_lib._apply_op_helper("StridedSliceGrad", name: name, args: new | ||||
@@ -210,6 +210,13 @@ namespace Tensorflow | |||||
return op.outputs[0]; | return op.outputs[0]; | ||||
} | } | ||||
public static Tensor sign(Tensor x, string name = "Sign") | |||||
{ | |||||
var op = _op_def_lib._apply_op_helper("Sign", name: name, args: new {x}); | |||||
return op.outputs[0]; | |||||
} | |||||
public static Tensor sinh(Tensor x, string name = null) | public static Tensor sinh(Tensor x, string name = null) | ||||
{ | { | ||||
var _op = _op_def_lib._apply_op_helper("Sinh", name, args: new { x }); | var _op = _op_def_lib._apply_op_helper("Sinh", name, args: new { x }); | ||||
@@ -369,6 +376,13 @@ namespace Tensorflow | |||||
return _op.outputs[0]; | return _op.outputs[0]; | ||||
} | } | ||||
public static Tensor is_nan(Tensor x, string name = null) | |||||
{ | |||||
var _op = _op_def_lib._apply_op_helper("IsNan", name: name, args: new { x }); | |||||
return _op.outputs[0]; | |||||
} | |||||
/// <summary> | /// <summary> | ||||
/// Computes exponential of x element-wise. \\(y = e^x\\). | /// Computes exponential of x element-wise. \\(y = e^x\\). | ||||
/// </summary> | /// </summary> | ||||
@@ -216,6 +216,15 @@ namespace Tensorflow | |||||
return gen_math_ops.sigmoid(x_tensor, name: name); | return gen_math_ops.sigmoid(x_tensor, name: name); | ||||
} | } | ||||
public static Tensor sign(Tensor x, string name = null) | |||||
{ | |||||
return with(ops.name_scope(name, "Sign", new {x}), scope => | |||||
{ | |||||
x = ops.convert_to_tensor(x, name: "x"); | |||||
return gen_math_ops.sign(x); | |||||
}); | |||||
} | |||||
/// <summary> | /// <summary> | ||||
/// Returns (x - y)(x - y) element-wise. | /// Returns (x - y)(x - y) element-wise. | ||||
/// </summary> | /// </summary> | ||||
@@ -14,12 +14,15 @@ | |||||
limitations under the License. | limitations under the License. | ||||
******************************************************************************/ | ******************************************************************************/ | ||||
using System; | |||||
using Tensorflow.Framework; | |||||
namespace Tensorflow | namespace Tensorflow | ||||
{ | { | ||||
/// <summary> | /// <summary> | ||||
/// tensorflow\python\ops\resource_variable_ops.py | /// tensorflow\python\ops\resource_variable_ops.py | ||||
/// </summary> | /// </summary> | ||||
public class resource_variable_ops | |||||
public static class resource_variable_ops | |||||
{ | { | ||||
public static ITensorOrOperation shape_safe_assign_variable_handle(Tensor handle, int[] shape, Tensor value, string name = null) | public static ITensorOrOperation shape_safe_assign_variable_handle(Tensor handle, int[] shape, Tensor value, string name = null) | ||||
{ | { | ||||
@@ -29,9 +32,61 @@ namespace Tensorflow | |||||
name: name); | name: name); | ||||
} | } | ||||
/// <summary> | |||||
/// | |||||
/// </summary> | |||||
/// <param name="self"></param> | |||||
/// <param name="value"></param> | |||||
/// <param name="use_locking"></param> | |||||
/// <param name="read_value"></param> | |||||
/// <returns> | |||||
/// If `read_value` is `True`, this method will return the new value of the | |||||
/// variable after the assignment has completed.Otherwise, when in graph mode | |||||
/// it will return the `Operation` that does the assignment, and when in eager | |||||
/// mode it will return `None`. | |||||
/// </returns> | |||||
public static Operation assign(this Tensor self, Tensor value, bool use_locking = false, string name = null, bool read_value = true) | |||||
{ | |||||
var value_tensor = ops.convert_to_tensor(value, dtype: self.dtype); | |||||
self.assert_is_compatible_with(value_tensor); | |||||
var assign_op = gen_resource_variable_ops.assign_variable_op(self, value_tensor, name: name); | |||||
if (read_value) | |||||
{ | |||||
return self._lazy_read(assign_op); | |||||
} | |||||
return assign_op; | |||||
} | |||||
public static Operation _lazy_read(this Tensor self, Operation op) | |||||
{ | |||||
variable_accessed(self); | |||||
throw new NotImplementedException(); | |||||
} | |||||
public static void variable_accessed(this Tensor variable) | |||||
{ | |||||
throw new NotImplementedException(); | |||||
} | |||||
public static bool is_resource_variable(VariableV1 var) | public static bool is_resource_variable(VariableV1 var) | ||||
{ | { | ||||
return var is ResourceVariable; | return var is ResourceVariable; | ||||
} | } | ||||
/// <summary> | |||||
/// Represents a future for a read of a variable. | |||||
/// Pretends to be the tensor if anyone looks. | |||||
/// </summary> | |||||
public class _UnreadVariable : BaseResourceVariable | |||||
{ | |||||
} | |||||
/// <summary> | |||||
/// A python variable from an existing handle. | |||||
/// </summary> | |||||
public class BaseResourceVariable : VariableV1 | |||||
{ | |||||
} | |||||
} | } | ||||
} | } |
@@ -5,7 +5,7 @@ | |||||
<AssemblyName>TensorFlow.NET</AssemblyName> | <AssemblyName>TensorFlow.NET</AssemblyName> | ||||
<RootNamespace>Tensorflow</RootNamespace> | <RootNamespace>Tensorflow</RootNamespace> | ||||
<TargetTensorFlow>1.14.0</TargetTensorFlow> | <TargetTensorFlow>1.14.0</TargetTensorFlow> | ||||
<Version>0.10.0</Version> | |||||
<Version>0.10.3</Version> | |||||
<Authors>Haiping Chen, Meinrad Recheis</Authors> | <Authors>Haiping Chen, Meinrad Recheis</Authors> | ||||
<Company>SciSharp STACK</Company> | <Company>SciSharp STACK</Company> | ||||
<GeneratePackageOnBuild>true</GeneratePackageOnBuild> | <GeneratePackageOnBuild>true</GeneratePackageOnBuild> | ||||
@@ -16,9 +16,8 @@ | |||||
<PackageIconUrl>https://avatars3.githubusercontent.com/u/44989469?s=200&v=4</PackageIconUrl> | <PackageIconUrl>https://avatars3.githubusercontent.com/u/44989469?s=200&v=4</PackageIconUrl> | ||||
<PackageTags>TensorFlow, NumSharp, SciSharp, MachineLearning, TensorFlow.NET, C#</PackageTags> | <PackageTags>TensorFlow, NumSharp, SciSharp, MachineLearning, TensorFlow.NET, C#</PackageTags> | ||||
<Description>Google's TensorFlow full binding in .NET Standard. | <Description>Google's TensorFlow full binding in .NET Standard. | ||||
Docs: https://tensorflownet.readthedocs.io | |||||
Medium: https://medium.com/scisharp</Description> | |||||
<AssemblyVersion>0.10.0.0</AssemblyVersion> | |||||
Docs: https://tensorflownet.readthedocs.io</Description> | |||||
<AssemblyVersion>0.10.3.0</AssemblyVersion> | |||||
<PackageReleaseNotes>Changes since v0.9.0: | <PackageReleaseNotes>Changes since v0.9.0: | ||||
1. Added full connected Convolution Neural Network example. | 1. Added full connected Convolution Neural Network example. | ||||
@@ -27,11 +26,14 @@ Medium: https://medium.com/scisharp</Description> | |||||
4. Fix path issue of Transfer Learning example on Linux. | 4. Fix path issue of Transfer Learning example on Linux. | ||||
5. Add back gen_ops.cs. | 5. Add back gen_ops.cs. | ||||
6. Add StridedSliceGrad. | 6. Add StridedSliceGrad. | ||||
7. Add BatchMatMulGrad.</PackageReleaseNotes> | |||||
7. Add BatchMatMulGrad. | |||||
8. Upgrade NumSharp. | |||||
9. Fix strided_slice_grad type convention error. | |||||
10. Add AbsGrad.</PackageReleaseNotes> | |||||
<LangVersion>7.2</LangVersion> | <LangVersion>7.2</LangVersion> | ||||
<FileVersion>0.10.0.0</FileVersion> | |||||
<FileVersion>0.10.3.0</FileVersion> | |||||
<PackageLicenseFile>LICENSE</PackageLicenseFile> | <PackageLicenseFile>LICENSE</PackageLicenseFile> | ||||
<PackageRequireLicenseAcceptance>false</PackageRequireLicenseAcceptance> | |||||
<PackageRequireLicenseAcceptance>true</PackageRequireLicenseAcceptance> | |||||
<SignAssembly>true</SignAssembly> | <SignAssembly>true</SignAssembly> | ||||
<AssemblyOriginatorKeyFile>Open.snk</AssemblyOriginatorKeyFile> | <AssemblyOriginatorKeyFile>Open.snk</AssemblyOriginatorKeyFile> | ||||
</PropertyGroup> | </PropertyGroup> | ||||
@@ -61,7 +63,7 @@ Medium: https://medium.com/scisharp</Description> | |||||
<ItemGroup> | <ItemGroup> | ||||
<PackageReference Include="Google.Protobuf" Version="3.9.0" /> | <PackageReference Include="Google.Protobuf" Version="3.9.0" /> | ||||
<PackageReference Include="NumSharp" Version="0.10.5" /> | |||||
<PackageReference Include="NumSharp" Version="0.10.6" /> | |||||
</ItemGroup> | </ItemGroup> | ||||
<ItemGroup> | <ItemGroup> | ||||
@@ -19,6 +19,7 @@ using System; | |||||
using System.Collections.Generic; | using System.Collections.Generic; | ||||
using System.Linq; | using System.Linq; | ||||
using System.Runtime.InteropServices; | using System.Runtime.InteropServices; | ||||
using Tensorflow.Framework; | |||||
using static Tensorflow.Python; | using static Tensorflow.Python; | ||||
namespace Tensorflow | namespace Tensorflow | ||||
@@ -27,7 +28,7 @@ namespace Tensorflow | |||||
/// A tensor is a generalization of vectors and matrices to potentially higher dimensions. | /// A tensor is a generalization of vectors and matrices to potentially higher dimensions. | ||||
/// Internally, TensorFlow represents tensors as n-dimensional arrays of base datatypes. | /// Internally, TensorFlow represents tensors as n-dimensional arrays of base datatypes. | ||||
/// </summary> | /// </summary> | ||||
public partial class Tensor : IDisposable, ITensorOrOperation | |||||
public partial class Tensor : IDisposable, ITensorOrOperation, _TensorLike | |||||
{ | { | ||||
private IntPtr _handle; | private IntPtr _handle; | ||||
@@ -109,6 +110,8 @@ namespace Tensorflow | |||||
this.shape = shape.Dimensions; | this.shape = shape.Dimensions; | ||||
} | } | ||||
public int[] dims => shape; | |||||
/// <summary> | /// <summary> | ||||
/// number of dimensions | /// number of dimensions | ||||
/// 0 Scalar (magnitude only) | /// 0 Scalar (magnitude only) | ||||
@@ -205,5 +205,10 @@ namespace Tensorflow | |||||
{ | { | ||||
return (int)type > 100; | return (int)type > 100; | ||||
} | } | ||||
public static bool is_compatible_with(this TF_DataType self, TF_DataType other) | |||||
{ | |||||
return self.as_datatype_enum() == other.as_datatype_enum(); | |||||
} | |||||
} | } | ||||
} | } |
@@ -214,6 +214,12 @@ namespace Tensorflow | |||||
else | else | ||||
nparray = Convert.ToString(values); | nparray = Convert.ToString(values); | ||||
break; | break; | ||||
case "Boolean": | |||||
if (values.GetType().IsArray) | |||||
nparray = np.array((bool[])values, np_dt); | |||||
else | |||||
nparray = Convert.ToBoolean(values); | |||||
break; | |||||
default: | default: | ||||
throw new NotImplementedException($"make_tensor_proto: Support for type {np_dt.Name} Not Implemented"); | throw new NotImplementedException($"make_tensor_proto: Support for type {np_dt.Name} Not Implemented"); | ||||
} | } | ||||
@@ -40,6 +40,8 @@ namespace Tensorflow | |||||
public override string name => _variable.name; | public override string name => _variable.name; | ||||
public Tensor eval() => _variable; | |||||
public RefVariable(object initial_value = null, | public RefVariable(object initial_value = null, | ||||
bool trainable = true, | bool trainable = true, | ||||
List<string> collections = null, | List<string> collections = null, | ||||
@@ -12,102 +12,102 @@ | |||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||||
See the License for the specific language governing permissions and | See the License for the specific language governing permissions and | ||||
limitations under the License. | limitations under the License. | ||||
******************************************************************************/ | |||||
using System.Collections.Generic; | |||||
using Tensorflow.Eager; | |||||
namespace Tensorflow | |||||
{ | |||||
public class gen_state_ops | |||||
{ | |||||
public static OpDefLibrary _op_def_lib = new OpDefLibrary(); | |||||
public static Execute _execute = new Execute(); | |||||
/// <summary> | |||||
/// Holds state in the form of a tensor that persists across steps. | |||||
/// Outputs a ref to the tensor state so it may be read or modified. | |||||
/// </summary> | |||||
/// <param name="shape">The shape of the variable tensor.</param> | |||||
/// <param name="dtype">The type of elements in the variable tensor.</param> | |||||
/// <param name="name"></param> | |||||
/// <param name="container"></param> | |||||
/// <param name="shared_name"></param> | |||||
/// <returns></returns> | |||||
public static Tensor variable_v2(int[] shape, TF_DataType dtype, string name = null, string container = "", string shared_name = "") | |||||
{ | |||||
var _op = _op_def_lib._apply_op_helper("VariableV2", name: name, args: new { dtype, shape, container, shared_name }); | |||||
var _result = _op.outputs; | |||||
var _inputs_flat = _op.inputs; | |||||
var _attrs = new Dictionary<string, object>(); | |||||
_attrs["dtype"] = _op.get_attr("dtype"); | |||||
_attrs["shape"] = _op.get_attr("shape"); | |||||
_attrs["container"] = _op.get_attr("container"); | |||||
_attrs["shared_name"] = _op.get_attr("shared_name"); | |||||
_execute.record_gradient("VariableV2", _inputs_flat, _attrs, _result, name); | |||||
return _result[0]; | |||||
} | |||||
/// <summary> | |||||
/// Update 'ref' by assigning 'value' to it | |||||
/// </summary> | |||||
/// <param name="REF"></param> | |||||
/// <param name="value"></param> | |||||
/// <param name="validate_shape"></param> | |||||
/// <param name="use_locking"></param> | |||||
/// <param name="name"></param> | |||||
public static Tensor assign(Tensor @ref, object value, | |||||
bool validate_shape = true, | |||||
bool use_locking = true, | |||||
string name = null) | |||||
{ | |||||
var _op = _op_def_lib._apply_op_helper("Assign", name: name, args: new { @ref, value, validate_shape, use_locking }); | |||||
var _result = _op.outputs; | |||||
var _inputs_flat = _op.inputs; | |||||
var _attrs = new Dictionary<string, object>(); | |||||
_attrs["T"] = _op.get_attr("T"); | |||||
_attrs["validate_shape"] = _op.get_attr("validate_shape"); | |||||
_attrs["use_locking"] = _op.get_attr("use_locking"); | |||||
_execute.record_gradient("Assign", _inputs_flat, _attrs, _result, name); | |||||
return _result[0]; | |||||
} | |||||
******************************************************************************/ | |||||
using System.Collections.Generic; | |||||
using Tensorflow.Eager; | |||||
namespace Tensorflow | |||||
{ | |||||
public class gen_state_ops | |||||
{ | |||||
public static OpDefLibrary _op_def_lib = new OpDefLibrary(); | |||||
public static Execute _execute = new Execute(); | |||||
/// <summary> | |||||
/// Holds state in the form of a tensor that persists across steps. | |||||
/// Outputs a ref to the tensor state so it may be read or modified. | |||||
/// </summary> | |||||
/// <param name="shape">The shape of the variable tensor.</param> | |||||
/// <param name="dtype">The type of elements in the variable tensor.</param> | |||||
/// <param name="name"></param> | |||||
/// <param name="container"></param> | |||||
/// <param name="shared_name"></param> | |||||
/// <returns></returns> | |||||
public static Tensor variable_v2(int[] shape, TF_DataType dtype, string name = null, string container = "", string shared_name = "") | |||||
{ | |||||
var _op = _op_def_lib._apply_op_helper("VariableV2", name: name, args: new { dtype, shape, container, shared_name }); | |||||
var _result = _op.outputs; | |||||
var _inputs_flat = _op.inputs; | |||||
var _attrs = new Dictionary<string, object>(); | |||||
_attrs["dtype"] = _op.get_attr("dtype"); | |||||
_attrs["shape"] = _op.get_attr("shape"); | |||||
_attrs["container"] = _op.get_attr("container"); | |||||
_attrs["shared_name"] = _op.get_attr("shared_name"); | |||||
_execute.record_gradient("VariableV2", _inputs_flat, _attrs, _result, name); | |||||
return _result[0]; | |||||
} | |||||
/// <summary> | |||||
/// Update 'ref' by assigning 'value' to it | |||||
/// </summary> | |||||
/// <param name="REF"></param> | |||||
/// <param name="value"></param> | |||||
/// <param name="validate_shape"></param> | |||||
/// <param name="use_locking"></param> | |||||
/// <param name="name"></param> | |||||
public static Tensor assign(Tensor @ref, object value, | |||||
bool validate_shape = true, | |||||
bool use_locking = true, | |||||
string name = null) | |||||
{ | |||||
var _op = _op_def_lib._apply_op_helper("Assign", name: name, args: new { @ref, value, validate_shape, use_locking }); | |||||
var _result = _op.outputs; | |||||
var _inputs_flat = _op.inputs; | |||||
var _attrs = new Dictionary<string, object>(); | |||||
_attrs["T"] = _op.get_attr("T"); | |||||
_attrs["validate_shape"] = _op.get_attr("validate_shape"); | |||||
_attrs["use_locking"] = _op.get_attr("use_locking"); | |||||
_execute.record_gradient("Assign", _inputs_flat, _attrs, _result, name); | |||||
return _result[0]; | |||||
} | |||||
public static Tensor assign(RefVariable @ref, object value, | public static Tensor assign(RefVariable @ref, object value, | ||||
bool validate_shape = true, | bool validate_shape = true, | ||||
bool use_locking = true, | |||||
string name = null) | |||||
{ | |||||
var _op = _op_def_lib._apply_op_helper("Assign", name: name, args: new { @ref, value, validate_shape, use_locking }); | |||||
var _result = _op.outputs; | |||||
var _inputs_flat = _op.inputs; | |||||
var _attrs = new Dictionary<string, object>(); | |||||
_attrs["T"] = _op.get_attr("T"); | |||||
_attrs["validate_shape"] = _op.get_attr("validate_shape"); | |||||
_attrs["use_locking"] = _op.get_attr("use_locking"); | |||||
_execute.record_gradient("Assign", _inputs_flat, _attrs, _result, name); | |||||
return _result[0]; | |||||
} | |||||
public static Tensor assign_sub(RefVariable @ref, | |||||
Tensor value, | |||||
bool use_locking = false, | |||||
string name = null) | |||||
{ | |||||
var _op = _op_def_lib._apply_op_helper("AssignSub", name: name, args: new { @ref, value, use_locking }); | |||||
return _op.outputs[0]; | |||||
bool use_locking = true, | |||||
string name = null) | |||||
{ | |||||
var _op = _op_def_lib._apply_op_helper("Assign", name: name, args: new { @ref, value, validate_shape, use_locking }); | |||||
var _result = _op.outputs; | |||||
var _inputs_flat = _op.inputs; | |||||
var _attrs = new Dictionary<string, object>(); | |||||
_attrs["T"] = _op.get_attr("T"); | |||||
_attrs["validate_shape"] = _op.get_attr("validate_shape"); | |||||
_attrs["use_locking"] = _op.get_attr("use_locking"); | |||||
_execute.record_gradient("Assign", _inputs_flat, _attrs, _result, name); | |||||
return _result[0]; | |||||
} | |||||
public static Tensor assign_sub(RefVariable @ref, | |||||
Tensor value, | |||||
bool use_locking = false, | |||||
string name = null) | |||||
{ | |||||
var _op = _op_def_lib._apply_op_helper("AssignSub", name: name, args: new { @ref, value, use_locking }); | |||||
return _op.outputs[0]; | |||||
} | } | ||||
@@ -125,10 +125,10 @@ namespace Tensorflow | |||||
// name: A name for the operation(optional). | // name: A name for the operation(optional). | ||||
// Returns: | // Returns: | ||||
// A mutable `Tensor`. Has the same type as `ref`. | // A mutable `Tensor`. Has the same type as `ref`. | ||||
public static Tensor assign_add(RefVariable @ref, Tensor value, bool use_locking = false, string name = null) | |||||
{ | |||||
var _op = _op_def_lib._apply_op_helper("AssignAdd", name: name, args: new { @ref, value, use_locking }); | |||||
return _op.outputs[0]; | |||||
public static Tensor assign_add(RefVariable @ref, Tensor value, bool use_locking = false, string name = null) | |||||
{ | |||||
var _op = _op_def_lib._apply_op_helper("AssignAdd", name: name, args: new { @ref, value, use_locking }); | |||||
return _op.outputs[0]; | |||||
} | } | ||||
/// <summary> | /// <summary> | ||||
@@ -142,8 +142,8 @@ namespace Tensorflow | |||||
/// <returns></returns> | /// <returns></returns> | ||||
public static Tensor scatter_add(RefVariable @ref, Tensor indices, Tensor updates, bool use_locking = false, string name = null) | public static Tensor scatter_add(RefVariable @ref, Tensor indices, Tensor updates, bool use_locking = false, string name = null) | ||||
{ | { | ||||
var _op = _op_def_lib._apply_op_helper("ScatterAdd", name: name, args: new { @ref, indices, updates, use_locking }); | |||||
var _op = _op_def_lib._apply_op_helper("ScatterAdd", name: name, args: new { @ref, indices, updates, use_locking }); | |||||
return _op.outputs[0]; | return _op.outputs[0]; | ||||
} | |||||
} | |||||
} | |||||
} | |||||
} | |||||
} |
@@ -12,99 +12,99 @@ | |||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||||
See the License for the specific language governing permissions and | See the License for the specific language governing permissions and | ||||
limitations under the License. | limitations under the License. | ||||
******************************************************************************/ | |||||
using System; | |||||
namespace Tensorflow | |||||
{ | |||||
public class state_ops | |||||
{ | |||||
/// <summary> | |||||
/// Create a variable Operation. | |||||
/// </summary> | |||||
/// <param name="shape"></param> | |||||
/// <param name="dtype"></param> | |||||
/// <param name="name"></param> | |||||
/// <param name="container"></param> | |||||
/// <param name="shared_name"></param> | |||||
/// <returns></returns> | |||||
public static Tensor variable_op_v2(int[] shape, | |||||
TF_DataType dtype, | |||||
string name = "Variable", | |||||
string container = "", | |||||
string shared_name = "") => gen_state_ops.variable_v2(shape, | |||||
dtype, | |||||
name: name, | |||||
container: container, | |||||
shared_name: shared_name); | |||||
public static Tensor assign(Tensor @ref, object value, | |||||
bool validate_shape = true, | |||||
bool use_locking = true, | |||||
string name = null) | |||||
{ | |||||
if (@ref.dtype.is_ref_dtype()) | |||||
return gen_state_ops.assign(@ref, | |||||
value, | |||||
validate_shape: validate_shape, | |||||
use_locking: use_locking, | |||||
name: name); | |||||
throw new NotImplementedException("state_ops.assign"); | |||||
//return @ref.assign(value, name: name); | |||||
} | |||||
public static Tensor assign(RefVariable @ref, object value, | |||||
bool validate_shape = true, | |||||
bool use_locking = true, | |||||
string name = null) | |||||
******************************************************************************/ | |||||
using System; | |||||
namespace Tensorflow | |||||
{ | |||||
public class state_ops | |||||
{ | |||||
/// <summary> | |||||
/// Create a variable Operation. | |||||
/// </summary> | |||||
/// <param name="shape"></param> | |||||
/// <param name="dtype"></param> | |||||
/// <param name="name"></param> | |||||
/// <param name="container"></param> | |||||
/// <param name="shared_name"></param> | |||||
/// <returns></returns> | |||||
public static Tensor variable_op_v2(int[] shape, | |||||
TF_DataType dtype, | |||||
string name = "Variable", | |||||
string container = "", | |||||
string shared_name = "") => gen_state_ops.variable_v2(shape, | |||||
dtype, | |||||
name: name, | |||||
container: container, | |||||
shared_name: shared_name); | |||||
public static Tensor assign(Tensor @ref, object value, | |||||
bool validate_shape = true, | |||||
bool use_locking = true, | |||||
string name = null) | |||||
{ | |||||
if (@ref.dtype.is_ref_dtype()) | |||||
return gen_state_ops.assign(@ref, | |||||
value, | |||||
validate_shape: validate_shape, | |||||
use_locking: use_locking, | |||||
name: name); | |||||
return @ref.assign((Tensor)value, name: name); | |||||
} | |||||
public static Tensor assign(RefVariable @ref, object value, | |||||
bool validate_shape = true, | |||||
bool use_locking = true, | |||||
string name = null) | |||||
{ | { | ||||
return gen_state_ops.assign(@ref, | return gen_state_ops.assign(@ref, | ||||
value, | value, | ||||
validate_shape: validate_shape, | validate_shape: validate_shape, | ||||
use_locking: use_locking, | use_locking: use_locking, | ||||
name: name); | |||||
} | |||||
public static Tensor assign_sub(RefVariable @ref, | |||||
Tensor value, | |||||
bool use_locking = false, | |||||
string name = null) => gen_state_ops.assign_sub(@ref, | |||||
value, | |||||
use_locking: use_locking, | |||||
name: name); | |||||
//"""Update 'ref' by adding 'value' to it. | |||||
// | |||||
// This operation outputs "ref" after the update is done. | |||||
// This makes it easier to chain operations that need to use the reset value. | |||||
// | |||||
// Args: | |||||
// ref: A mutable `Tensor`. Must be one of the following types: | |||||
// `float32`, `float64`, `int64`, `int32`, `uint8`, `uint16`, `int16`, | |||||
// `int8`, `complex64`, `complex128`, `qint8`, `quint8`, `qint32`, `half`. | |||||
// Should be from a `Variable` node. | |||||
// value: A `Tensor`. Must have the same type as `ref`. | |||||
// The value to be added to the variable. | |||||
// use_locking: An optional `bool`. Defaults to `False`. | |||||
// If True, the addition will be protected by a lock; | |||||
// otherwise the behavior is undefined, but may exhibit less contention. | |||||
// name: A name for the operation (optional). | |||||
name: name); | |||||
} | |||||
public static Tensor assign_sub(RefVariable @ref, | |||||
Tensor value, | |||||
bool use_locking = false, | |||||
string name = null) => gen_state_ops.assign_sub(@ref, | |||||
value, | |||||
use_locking: use_locking, | |||||
name: name); | |||||
//"""Update 'ref' by adding 'value' to it. | |||||
// | |||||
// This operation outputs "ref" after the update is done. | |||||
// This makes it easier to chain operations that need to use the reset value. | |||||
// | |||||
// Args: | |||||
// ref: A mutable `Tensor`. Must be one of the following types: | |||||
// `float32`, `float64`, `int64`, `int32`, `uint8`, `uint16`, `int16`, | |||||
// `int8`, `complex64`, `complex128`, `qint8`, `quint8`, `qint32`, `half`. | |||||
// Should be from a `Variable` node. | |||||
// value: A `Tensor`. Must have the same type as `ref`. | |||||
// The value to be added to the variable. | |||||
// use_locking: An optional `bool`. Defaults to `False`. | |||||
// If True, the addition will be protected by a lock; | |||||
// otherwise the behavior is undefined, but may exhibit less contention. | |||||
// name: A name for the operation (optional). | |||||
// | // | ||||
// Returns: | // Returns: | ||||
// Same as "ref". Returned as a convenience for operations that want | // Same as "ref". Returned as a convenience for operations that want | ||||
// to use the new value after the variable has been updated. | // to use the new value after the variable has been updated. | ||||
public static Tensor assign_add(RefVariable @ref, | |||||
Tensor value, | |||||
bool use_locking = false, | |||||
string name = null) => gen_state_ops.assign_add(@ref, value, use_locking: use_locking, name: name); | |||||
public static Tensor assign_add(RefVariable @ref, | |||||
Tensor value, | |||||
bool use_locking = false, | |||||
string name = null) => gen_state_ops.assign_add(@ref, value, use_locking: use_locking, name: name); | |||||
public static Tensor scatter_add(RefVariable @ref, Tensor indices, Tensor updates, bool use_locking = false, string name = null) | public static Tensor scatter_add(RefVariable @ref, Tensor indices, Tensor updates, bool use_locking = false, string name = null) | ||||
{ | { | ||||
if (@ref.dtype.is_ref_dtype()) | if (@ref.dtype.is_ref_dtype()) | ||||
return gen_state_ops.scatter_add(@ref, indices, updates, use_locking: use_locking, name: name); | return gen_state_ops.scatter_add(@ref, indices, updates, use_locking: use_locking, name: name); | ||||
throw new NotImplementedException("scatter_add"); | throw new NotImplementedException("scatter_add"); | ||||
} | |||||
} | |||||
} | |||||
} | |||||
} | |||||
} |
@@ -46,7 +46,7 @@ namespace Tensorflow | |||||
trainable: trainable, | trainable: trainable, | ||||
validate_shape: validate_shape, | validate_shape: validate_shape, | ||||
name: name, | name: name, | ||||
dtype: TF_DataType.DtInvalid); | |||||
dtype: dtype); | |||||
} | } | ||||
public static unsafe Tensor placeholder(TF_DataType dtype, TensorShape shape = null, string name = null) | public static unsafe Tensor placeholder(TF_DataType dtype, TensorShape shape = null, string name = null) | ||||
@@ -72,5 +72,10 @@ namespace Tensorflow | |||||
{ | { | ||||
return new Session(graph); | return new Session(graph); | ||||
} | } | ||||
public static Session Session(SessionOptions opts) | |||||
{ | |||||
return new Session(null, opts); | |||||
} | |||||
} | } | ||||
} | } |
@@ -0,0 +1,20 @@ | |||||
<Project Sdk="Microsoft.NET.Sdk"> | |||||
<PropertyGroup> | |||||
<OutputType>Exe</OutputType> | |||||
<TargetFramework>netcoreapp2.2</TargetFramework> | |||||
</PropertyGroup> | |||||
<ItemGroup> | |||||
<None Remove="tensorflow.dll" /> | |||||
</ItemGroup> | |||||
<ItemGroup> | |||||
<PackageReference Include="BenchmarkDotNet" Version="0.11.5" /> | |||||
</ItemGroup> | |||||
<ItemGroup> | |||||
<ProjectReference Include="..\TensorFlowNET.Core\TensorFlowNET.Core.csproj" /> | |||||
</ItemGroup> | |||||
</Project> |
@@ -0,0 +1,9 @@ | |||||
# TensorFlow Text - Text processing in Tensorflow | |||||
TensorFlow Text provides a collection of text related classes and ops ready to use with TensorFlow 2.0. The library can perform the preprocessing regularly required by text-based models, and includes other features useful for sequence modeling not provided by core TensorFlow. | |||||
The benefit of using these ops in your text preprocessing is that they are done in the TensorFlow graph. You do not need to worry about tokenization in training being different than the tokenization at inference, or managing preprocessing scripts. | |||||
https://github.com/tensorflow/text |
@@ -0,0 +1,7 @@ | |||||
<Project Sdk="Microsoft.NET.Sdk"> | |||||
<PropertyGroup> | |||||
<TargetFramework>netcoreapp2.2</TargetFramework> | |||||
</PropertyGroup> | |||||
</Project> |
@@ -0,0 +1,8 @@ | |||||
using System; | |||||
namespace TensorFlowText | |||||
{ | |||||
public class Tokenizer | |||||
{ | |||||
} | |||||
} |
@@ -1,23 +0,0 @@ | |||||
<Project Sdk="Microsoft.NET.Sdk"> | |||||
<PropertyGroup> | |||||
<OutputType>Exe</OutputType> | |||||
<TargetFramework>netcoreapp2.2</TargetFramework> | |||||
<GeneratePackageOnBuild>false</GeneratePackageOnBuild> | |||||
<StartupObject>Keras.Example.Program</StartupObject> | |||||
</PropertyGroup> | |||||
<ItemGroup> | |||||
<PackageReference Include="Colorful.Console" Version="1.2.9" /> | |||||
<PackageReference Include="Newtonsoft.Json" Version="12.0.2" /> | |||||
<PackageReference Include="SciSharp.TensorFlow.Redist" Version="1.14.0" /> | |||||
<PackageReference Include="SharpZipLib" Version="1.1.0" /> | |||||
<PackageReference Include="System.Drawing.Common" Version="4.5.1" /> | |||||
</ItemGroup> | |||||
<ItemGroup> | |||||
<ProjectReference Include="..\..\src\TensorFlowNET.Core\TensorFlowNET.Core.csproj" /> | |||||
<ProjectReference Include="..\..\src\KerasNET.Core\Keras.Core.csproj" /> | |||||
</ItemGroup> | |||||
</Project> |
@@ -1,10 +0,0 @@ | |||||
<?xml version="1.0" encoding="utf-8"?> | |||||
<packages> | |||||
<package id="ArrayFire" version="0.0.2" targetFramework="net472" /> | |||||
<package id="Google.Protobuf" version="3.7.0" targetFramework="net472" /> | |||||
<package id="NumSharp" version="0.10.1" targetFramework="net472" /> | |||||
<package id="System.Buffers" version="4.4.0" targetFramework="net472" /> | |||||
<package id="System.Memory" version="4.5.2" targetFramework="net472" /> | |||||
<package id="System.Numerics.Vectors" version="4.5.0" targetFramework="net472" /> | |||||
<package id="System.Runtime.CompilerServices.Unsafe" version="4.5.2" targetFramework="net472" /> | |||||
</packages> |
@@ -1,41 +0,0 @@ | |||||
<Project Sdk="Microsoft.NET.Sdk"> | |||||
<PropertyGroup> | |||||
<TargetFramework>netcoreapp2.2</TargetFramework> | |||||
<IsPackable>false</IsPackable> | |||||
<AssemblyName>Keras.UnitTest</AssemblyName> | |||||
<RootNamespace>Keras.UnitTest</RootNamespace> | |||||
<ApplicationIcon /> | |||||
<OutputType>Exe</OutputType> | |||||
<StartupObject /> | |||||
</PropertyGroup> | |||||
<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|AnyCPU'"> | |||||
<DefineConstants>DEBUG;TRACE</DefineConstants> | |||||
<AllowUnsafeBlocks>true</AllowUnsafeBlocks> | |||||
</PropertyGroup> | |||||
<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|AnyCPU'"> | |||||
<AllowUnsafeBlocks>true</AllowUnsafeBlocks> | |||||
</PropertyGroup> | |||||
<ItemGroup> | |||||
<PackageReference Include="Microsoft.NET.Test.Sdk" Version="16.2.0" /> | |||||
<PackageReference Include="MSTest.TestAdapter" Version="1.4.0" /> | |||||
<PackageReference Include="MSTest.TestFramework" Version="1.4.0" /> | |||||
<PackageReference Include="SciSharp.TensorFlow.Redist" Version="1.14.0" /> | |||||
</ItemGroup> | |||||
<ItemGroup> | |||||
<ProjectReference Include="..\..\src\KerasNET.Core\Keras.Core.csproj" /> | |||||
<ProjectReference Include="..\..\src\TensorFlowNET.Core\TensorFlowNET.Core.csproj" /> | |||||
<ProjectReference Include="..\KerasNET.Example\Keras.Example.csproj" /> | |||||
</ItemGroup> | |||||
</Project> |
@@ -1,11 +0,0 @@ | |||||
<?xml version="1.0" encoding="utf-8"?> | |||||
<packages> | |||||
<package id="ArrayFire" version="0.0.2" targetFramework="net472" /> | |||||
<package id="MSTest.TestAdapter" version="1.3.2" targetFramework="net472" /> | |||||
<package id="MSTest.TestFramework" version="1.3.2" targetFramework="net472" /> | |||||
<package id="NumSharp" version="0.10.1" targetFramework="net472" /> | |||||
<package id="System.Buffers" version="4.4.0" targetFramework="net472" /> | |||||
<package id="System.Memory" version="4.5.2" targetFramework="net472" /> | |||||
<package id="System.Numerics.Vectors" version="4.5.0" targetFramework="net472" /> | |||||
<package id="System.Runtime.CompilerServices.Unsafe" version="4.5.2" targetFramework="net472" /> | |||||
</packages> |
@@ -1,12 +0,0 @@ | |||||
using System; | |||||
namespace TensorFlowHub.Examples | |||||
{ | |||||
class Program | |||||
{ | |||||
static void Main(string[] args) | |||||
{ | |||||
Console.WriteLine("Hello World!"); | |||||
} | |||||
} | |||||
} |
@@ -1,16 +0,0 @@ | |||||
<Project Sdk="Microsoft.NET.Sdk"> | |||||
<PropertyGroup> | |||||
<OutputType>Exe</OutputType> | |||||
<TargetFramework>netcoreapp2.2</TargetFramework> | |||||
</PropertyGroup> | |||||
<ItemGroup> | |||||
<PackageReference Include="SciSharp.TensorFlow.Redist" Version="1.14.0" /> | |||||
</ItemGroup> | |||||
<ItemGroup> | |||||
<ProjectReference Include="..\..\src\TensorFlowHub\TensorFlowHub.csproj" /> | |||||
</ItemGroup> | |||||
</Project> |
@@ -80,6 +80,16 @@ namespace TensorFlowNET.Examples.ImageProcess | |||||
{ | { | ||||
PrepareData(); | PrepareData(); | ||||
#region For debug purpose | |||||
// predict images | |||||
Predict(null); | |||||
// load saved pb and test new images. | |||||
Test(null); | |||||
#endregion | |||||
var graph = IsImportingGraph ? ImportGraph() : BuildGraph(); | var graph = IsImportingGraph ? ImportGraph() : BuildGraph(); | ||||
with(tf.Session(graph), sess => | with(tf.Session(graph), sess => | ||||
@@ -708,14 +718,77 @@ namespace TensorFlowNET.Examples.ImageProcess | |||||
File.WriteAllText(output_labels, string.Join("\n", image_lists.Keys)); | File.WriteAllText(output_labels, string.Join("\n", image_lists.Keys)); | ||||
} | } | ||||
public void Predict(Session sess) | |||||
/// <summary> | |||||
/// Prediction | |||||
/// labels mapping, it's from output_lables.txt | |||||
/// 0 - daisy | |||||
/// 1 - dandelion | |||||
/// 2 - roses | |||||
/// 3 - sunflowers | |||||
/// 4 - tulips | |||||
/// </summary> | |||||
/// <param name="sess_"></param> | |||||
public void Predict(Session sess_) | |||||
{ | { | ||||
throw new NotImplementedException(); | |||||
if (!File.Exists(output_graph)) | |||||
return; | |||||
var labels = File.ReadAllLines(output_labels); | |||||
// predict image | |||||
var img_path = Path.Join(image_dir, "roses", "12240303_80d87f77a3_n.jpg"); | |||||
var fileBytes = ReadTensorFromImageFile(img_path); | |||||
// import graph and variables | |||||
var graph = Graph.ImportFromPB(output_graph, ""); | |||||
Tensor input = graph.OperationByName("Placeholder"); | |||||
Tensor output = graph.OperationByName("final_result"); | |||||
with(tf.Session(graph), sess => | |||||
{ | |||||
var result = sess.run(output, new FeedItem(input, fileBytes)); | |||||
var prob = np.squeeze(result); | |||||
var idx = np.argmax(prob); | |||||
print($"Prediction result: [{labels[idx]} {prob[idx][0]}] for {img_path}."); | |||||
}); | |||||
} | } | ||||
public void Test(Session sess) | |||||
private NDArray ReadTensorFromImageFile(string file_name, | |||||
int input_height = 299, | |||||
int input_width = 299, | |||||
int input_mean = 0, | |||||
int input_std = 255) | |||||
{ | { | ||||
throw new NotImplementedException(); | |||||
return with(tf.Graph().as_default(), graph => | |||||
{ | |||||
var file_reader = tf.read_file(file_name, "file_reader"); | |||||
var image_reader = tf.image.decode_jpeg(file_reader, channels: 3, name: "jpeg_reader"); | |||||
var caster = tf.cast(image_reader, tf.float32); | |||||
var dims_expander = tf.expand_dims(caster, 0); | |||||
var resize = tf.constant(new int[] { input_height, input_width }); | |||||
var bilinear = tf.image.resize_bilinear(dims_expander, resize); | |||||
var sub = tf.subtract(bilinear, new float[] { input_mean }); | |||||
var normalized = tf.divide(sub, new float[] { input_std }); | |||||
return with(tf.Session(graph), sess => sess.run(normalized)); | |||||
}); | |||||
} | |||||
public void Test(Session sess_) | |||||
{ | |||||
if (!File.Exists(output_graph)) | |||||
return; | |||||
var graph = Graph.ImportFromPB(output_graph); | |||||
var (jpeg_data_tensor, decoded_image_tensor) = add_jpeg_decoding(); | |||||
with(tf.Session(graph), sess => | |||||
{ | |||||
(test_accuracy, predictions) = run_final_eval(sess, null, class_count, image_lists, | |||||
jpeg_data_tensor, decoded_image_tensor, resized_image_tensor, | |||||
bottleneck_tensor); | |||||
}); | |||||
} | } | ||||
} | } | ||||
} | } |
@@ -3,12 +3,18 @@ using System.Collections.Generic; | |||||
using Tensorflow; | using Tensorflow; | ||||
using Keras.Layers; | using Keras.Layers; | ||||
using NumSharp; | using NumSharp; | ||||
using Keras; | |||||
namespace Keras.Example | |||||
namespace TensorFlowNET.Examples | |||||
{ | { | ||||
class Program | |||||
public class Keras : IExample | |||||
{ | { | ||||
static void Main(string[] args) | |||||
public bool Enabled { get; set; } = true; | |||||
public bool IsImportingGraph { get; set; } = false; | |||||
public string Name => "Keras"; | |||||
public bool Run() | |||||
{ | { | ||||
Console.WriteLine("================================== Keras =================================="); | Console.WriteLine("================================== Keras =================================="); | ||||
@@ -25,7 +31,7 @@ namespace Keras.Example | |||||
#region model | #region model | ||||
var m = new Model(); | var m = new Model(); | ||||
//m.Add(new Dense(8, name: "Hidden", activation: tf.nn.relu())).Add(new Dense(1, name:"Output")); | //m.Add(new Dense(8, name: "Hidden", activation: tf.nn.relu())).Add(new Dense(1, name:"Output")); | ||||
m.Add( | m.Add( | ||||
@@ -37,8 +43,9 @@ namespace Keras.Example | |||||
m.train(num_steps, (X, Y)); | m.train(num_steps, (X, Y)); | ||||
#endregion | #endregion | ||||
Console.ReadKey(); | |||||
return true; | |||||
} | } | ||||
static (NDArray, NDArray) XOR(int samples) | static (NDArray, NDArray) XOR(int samples) | ||||
{ | { | ||||
var X = new List<float[]>(); | var X = new List<float[]>(); | ||||
@@ -57,5 +64,35 @@ namespace Keras.Example | |||||
return (np.array(X.ToArray()), np.array(Y.ToArray())); | return (np.array(X.ToArray()), np.array(Y.ToArray())); | ||||
} | } | ||||
public Graph BuildGraph() | |||||
{ | |||||
throw new NotImplementedException(); | |||||
} | |||||
public Graph ImportGraph() | |||||
{ | |||||
throw new NotImplementedException(); | |||||
} | |||||
public void Predict(Session sess) | |||||
{ | |||||
throw new NotImplementedException(); | |||||
} | |||||
public void PrepareData() | |||||
{ | |||||
throw new NotImplementedException(); | |||||
} | |||||
public void Test(Session sess) | |||||
{ | |||||
throw new NotImplementedException(); | |||||
} | |||||
public void Train(Session sess) | |||||
{ | |||||
throw new NotImplementedException(); | |||||
} | |||||
} | } | ||||
} | } |
@@ -15,6 +15,8 @@ | |||||
</ItemGroup> | </ItemGroup> | ||||
<ItemGroup> | <ItemGroup> | ||||
<ProjectReference Include="..\..\src\KerasNET.Core\Keras.Core.csproj" /> | |||||
<ProjectReference Include="..\..\src\TensorFlowNET.Core\TensorFlowNET.Core.csproj" /> | <ProjectReference Include="..\..\src\TensorFlowNET.Core\TensorFlowNET.Core.csproj" /> | ||||
<ProjectReference Include="..\..\src\TensorFlowText\TensorFlowText.csproj" /> | |||||
</ItemGroup> | </ItemGroup> | ||||
</Project> | </Project> |
@@ -0,0 +1,28 @@ | |||||
using Microsoft.VisualStudio.TestTools.UnitTesting; | |||||
using System; | |||||
using System.Collections.Generic; | |||||
using System.Text; | |||||
using System.Threading.Tasks; | |||||
using Tensorflow; | |||||
using Tensorflow.Hub; | |||||
namespace TensorFlowNET.UnitTest.Hub | |||||
{ | |||||
[TestClass] | |||||
public class MnistModelLoaderTest | |||||
{ | |||||
[TestMethod] | |||||
public async Task TestLoad() | |||||
{ | |||||
var loader = new MnistModelLoader(); | |||||
var result = await loader.LoadAsync(new ModelLoadSetting | |||||
{ | |||||
TrainDir = "mnist", | |||||
OneHot = true, | |||||
ValidationSize = 5000, | |||||
}); | |||||
Assert.IsNotNull(result); | |||||
} | |||||
} | |||||
} |
@@ -3,7 +3,7 @@ using Keras.Layers; | |||||
using NumSharp; | using NumSharp; | ||||
using Microsoft.VisualStudio.TestTools.UnitTesting; | using Microsoft.VisualStudio.TestTools.UnitTesting; | ||||
namespace Keras.Test | |||||
namespace TensorFlowNET.UnitTest | |||||
{ | { | ||||
[TestClass] | [TestClass] | ||||
public class BaseTests | public class BaseTests |
@@ -61,6 +61,34 @@ namespace TensorFlowNET.UnitTest | |||||
} | } | ||||
} | } | ||||
[TestMethod] | |||||
public void isFinite() | |||||
{ | |||||
var a = tf.constant(new[] { 1, np.nan, 2, np.nan, 3, np.nan, 4, np.nan }); | |||||
var b = tf.cast(tf.is_finite(a), tf.float32); | |||||
var check = np.array(1.0f, 0.0f, 1.0f, 0.0f, 1.0f, 0.0f, 1.0f, 0.0f); | |||||
using (var sess = tf.Session()) | |||||
{ | |||||
var o = sess.run(b); | |||||
Assert.IsTrue(o.array_equal(check)); | |||||
} | |||||
} | |||||
[TestMethod] | |||||
public void isNan() | |||||
{ | |||||
var a = tf.constant(new[] { 1, np.nan, 2, np.nan, 3, np.nan, 4, np.nan }); | |||||
var b = tf.cast(tf.is_nan(a), tf.float32); | |||||
var check = np.array(0.0f, 1.0f, 0.0f, 1.0f, 0.0f, 1.0f, 0.0f, 1.0f); | |||||
using (var sess = tf.Session()) | |||||
{ | |||||
var o = sess.run(b); | |||||
Assert.IsTrue(o.array_equal(check)); | |||||
} | |||||
} | |||||
[TestMethod] | [TestMethod] | ||||
public void addOpTests() | public void addOpTests() | ||||
{ | { | ||||
@@ -126,7 +126,6 @@ namespace TensorFlowNET.UnitTest | |||||
if (tensors == null) | if (tensors == null) | ||||
return null; | return null; | ||||
return nest.map_structure(self._eval_tensor, tensors); | return nest.map_structure(self._eval_tensor, tensors); | ||||
return null; | |||||
} | } | ||||
protected object _eval_tensor(object tensor) | protected object _eval_tensor(object tensor) | ||||
@@ -145,7 +144,7 @@ namespace TensorFlowNET.UnitTest | |||||
// tensor.dense_shape) | // tensor.dense_shape) | ||||
//return (tensor as Tensor).numpy(); | //return (tensor as Tensor).numpy(); | ||||
} | } | ||||
catch (Exception e) | |||||
catch (Exception) | |||||
{ | { | ||||
throw new ValueError("Unsupported type: " + tensor.GetType()); | throw new ValueError("Unsupported type: " + tensor.GetType()); | ||||
} | } | ||||
@@ -23,7 +23,10 @@ | |||||
</ItemGroup> | </ItemGroup> | ||||
<ItemGroup> | <ItemGroup> | ||||
<ProjectReference Include="..\..\src\KerasNET.Core\Keras.Core.csproj" /> | |||||
<ProjectReference Include="..\..\src\TensorFlowHub\TensorFlowHub.csproj" /> | |||||
<ProjectReference Include="..\..\src\TensorFlowNET.Core\TensorFlowNET.Core.csproj" /> | <ProjectReference Include="..\..\src\TensorFlowNET.Core\TensorFlowNET.Core.csproj" /> | ||||
<ProjectReference Include="..\..\src\TensorFlowText\TensorFlowText.csproj" /> | |||||
<ProjectReference Include="..\TensorFlowNET.Examples\TensorFlowNET.Examples.csproj" /> | <ProjectReference Include="..\TensorFlowNET.Examples\TensorFlowNET.Examples.csproj" /> | ||||
</ItemGroup> | </ItemGroup> | ||||
@@ -18,10 +18,12 @@ namespace TensorFlowNET.UnitTest.control_flow_ops_test | |||||
//var embedding_matrix = variable_scope.get_variable( | //var embedding_matrix = variable_scope.get_variable( | ||||
//"embedding_matrix", initializer: new double[,] { { 2.0 }, { 3.0 } }, use_resource: true); | //"embedding_matrix", initializer: new double[,] { { 2.0 }, { 3.0 } }, use_resource: true); | ||||
/* | |||||
Tensor cond(Tensor it, Tensor _) | Tensor cond(Tensor it, Tensor _) | ||||
{ | { | ||||
return it < 5; | return it < 5; | ||||
} | } | ||||
*/ | |||||
// TODO: below code doesn't compile | // TODO: below code doesn't compile | ||||
//(Tensor, Tensor) body(Tensor it, Tensor cost) | //(Tensor, Tensor) body(Tensor it, Tensor cost) | ||||
@@ -1,7 +1,6 @@ | |||||
using System; | using System; | ||||
using System.Collections.Generic; | using System.Collections.Generic; | ||||
using System.Linq; | using System.Linq; | ||||
using System.Text; | |||||
using Microsoft.VisualStudio.TestTools.UnitTesting; | using Microsoft.VisualStudio.TestTools.UnitTesting; | ||||
using NumSharp; | using NumSharp; | ||||
using Tensorflow; | using Tensorflow; | ||||
@@ -35,10 +34,10 @@ namespace TensorFlowNET.UnitTest.gradients_test | |||||
[TestMethod] | [TestMethod] | ||||
public void testBatchMatMulGradient() | public void testBatchMatMulGradient() | ||||
{ | { | ||||
var a = tf.constant(np.array(Enumerable.Range(1, 18).Select(elem => (float)elem).ToArray()), shape:new []{2, 3, 3}); | |||||
var a = tf.constant(np.array(Enumerable.Range(1, 18).Select(elem => (float)elem).ToArray()), shape: new[] { 2, 3, 3 }); | |||||
var b = tf.divide(a, tf.constant(2.0f)); | var b = tf.divide(a, tf.constant(2.0f)); | ||||
var c = tf.batch_matmul(a, b); | var c = tf.batch_matmul(a, b); | ||||
var g = tf.gradients(c, new[] {a, b}, stop_gradients: new[] {a, b}); | |||||
var g = tf.gradients(c, new[] { a, b }, stop_gradients: new[] { a, b }); | |||||
var checkG = new[] | var checkG = new[] | ||||
{ | { | ||||
3.0f, 7.5f, 12.0f, | 3.0f, 7.5f, 12.0f, | ||||
@@ -64,20 +63,87 @@ namespace TensorFlowNET.UnitTest.gradients_test | |||||
} | } | ||||
} | } | ||||
[TestMethod] | |||||
public void testTanhGradient() | |||||
{ | |||||
var a = tf.constant(1f); | |||||
var b = tf.tanh(a); | |||||
var g = tf.gradients(b, a); | |||||
using (var sess = tf.Session()) | |||||
{ | |||||
var result = sess.run(g); | |||||
var actual = result[0].GetData<float>()[0]; | |||||
self.assertEquals(0.41997434127f, actual); | |||||
} | |||||
} | |||||
[TestMethod] | |||||
public void testLgammaGrad() | |||||
{ | |||||
var a = tf.constant(5f); | |||||
var b = tf.lgamma(a); | |||||
var g = tf.gradients(b, a); | |||||
using (var sess = tf.Session()) | |||||
{ | |||||
var result = sess.run(new object[] { g, b }); | |||||
var actualDeriv = result[0].GetData<float>()[0]; | |||||
var actual = result[1].GetData<float>()[0]; | |||||
self.assertEquals(1.5061177f, actualDeriv); | |||||
self.assertEquals(3.17805386f, actual); | |||||
} | |||||
} | |||||
[TestMethod] | |||||
public void testSliceGrad() | |||||
{ | |||||
var a = tf.tanh(tf.constant(new[] { 2f, 3f }, shape: new[] { 2, 1 })); | |||||
var b = tf.strided_slice(a, | |||||
tf.constant(new[] { 0 }, tf.int32, new[] { 1 }), | |||||
tf.constant(new[] { 1 }, tf.int32, new[] { 1 }), | |||||
tf.constant(new[] { 1 }, tf.int32, new[] { 1 }) | |||||
); | |||||
var g = tf.gradients(b, a); | |||||
with(tf.Session(), sess => | |||||
{ | |||||
var result = sess.run(new object[] { g, b }); | |||||
var actualDeriv = np.squeeze(result[0]); | |||||
var actual = np.squeeze(result[1]); | |||||
self.assertEquals(new float[] { 1, 0 }, new float[] { actualDeriv[0], actualDeriv[1] }); | |||||
self.assertEquals(0.9640276f, (float)actual); | |||||
}); | |||||
} | |||||
[TestMethod] | |||||
public void testConcatGrad() | |||||
{ | |||||
var a1 = tf.constant(new[] { 2f }, shape: new[] { 1 }); | |||||
var a2 = tf.constant(new[] { 3f }, shape: new[] { 1 }); | |||||
var a = tf.concat(new List<Tensor>(new[] { a1, a2 }), 0); | |||||
var g = tf.gradients(a, a1); | |||||
using (var sess = tf.Session()) | |||||
{ | |||||
var result = sess.run(new object[] { g, a }); | |||||
var actualDeriv = result[0].GetData<float>()[0]; | |||||
var actual = result[1].GetData<float>()[0]; | |||||
self.assertEquals(1f, actualDeriv); | |||||
self.assertEquals(2f, actual); | |||||
} | |||||
} | |||||
[Ignore("TODO")] | [Ignore("TODO")] | ||||
[TestMethod] | [TestMethod] | ||||
public void testUnusedOutput() | public void testUnusedOutput() | ||||
{ | { | ||||
//def testUnusedOutput(self): | |||||
// with ops.Graph().as_default(): | |||||
// w = constant(1.0, shape=[2, 2]) | |||||
// x = constant(1.0, shape=[2, 2]) | |||||
// wx = math_ops.matmul(w, x) | |||||
// split_wx = array_ops.split(value=wx, num_or_size_splits=2, axis=0) | |||||
// c = math_ops.reduce_sum(split_wx[1]) | |||||
// gw = gradients.gradients(c, [w])[0] | |||||
// self.assertEquals("MatMul", gw.op.type) | |||||
//def testUnusedOutput(self): | |||||
// with ops.Graph().as_default(): | |||||
// w = constant(1.0, shape=[2, 2]) | |||||
// x = constant(1.0, shape=[2, 2]) | |||||
// wx = math_ops.matmul(w, x) | |||||
// split_wx = array_ops.split(value=wx, num_or_size_splits=2, axis=0) | |||||
// c = math_ops.reduce_sum(split_wx[1]) | |||||
// gw = gradients.gradients(c, [w])[0] | |||||
// self.assertEquals("MatMul", gw.op.type) | |||||
} | } | ||||
[Ignore("TODO")] | [Ignore("TODO")] | ||||
@@ -39,7 +39,7 @@ namespace TensorFlowNET.UnitTest.ops_test | |||||
[TestMethod] | [TestMethod] | ||||
public void TestEager() | public void TestEager() | ||||
{ | { | ||||
Tensor a = null, c = null, d = null, e = null; | |||||
Tensor a = null, c = null; | |||||
object b = null; | object b = null; | ||||
var calls = 0; | var calls = 0; | ||||
Func<Tensor> future = () => | Func<Tensor> future = () => | ||||