Browse Source

remove examples to examples repo.

tags/v0.12
Oceania2018 6 years ago
parent
commit
6f55491508
74 changed files with 0 additions and 8423 deletions
  1. +0
    -18
      TensorFlow.NET.sln
  2. +0
    -45
      src/TensorFlowNET.Visualization/Controllers/ValuesController.cs
  3. +0
    -24
      src/TensorFlowNET.Visualization/Program.cs
  4. +0
    -41
      src/TensorFlowNET.Visualization/Startup.cs
  5. +0
    -17
      src/TensorFlowNET.Visualization/TensorFlowNET.Visualization.csproj
  6. +0
    -9
      src/TensorFlowNET.Visualization/appsettings.Development.json
  7. +0
    -8
      src/TensorFlowNET.Visualization/appsettings.json
  8. +0
    -29
      src/TensorFlowNet.Benchmarks/Program.cs
  9. +0
    -88
      src/TensorFlowNet.Benchmarks/TensorBenchmark.cs
  10. +0
    -33
      src/TensorFlowNet.Benchmarks/TensorFlowBenchmark.csproj
  11. +0
    -20
      src/TensorFlowNet.Benchmarks/TensorFlowNET.Benchmark.csproj
  12. +0
    -76
      src/TensorFlowNet.Benchmarks/Unmanaged/StructCastBenchmark.cs
  13. +0
    -104
      test/TensorFlowNET.Examples.FSharp/FunctionApproximation.fs
  14. +0
    -8
      test/TensorFlowNET.Examples.FSharp/Program.fs
  15. +0
    -21
      test/TensorFlowNET.Examples.FSharp/TensorFlowNET.Examples.FSharp.fsproj
  16. +0
    -1
      test/TensorFlowNET.Examples/AudioProcessing/README.md
  17. +0
    -73
      test/TensorFlowNET.Examples/BasicEagerApi.cs
  18. +0
    -185
      test/TensorFlowNET.Examples/BasicModels/KMeansClustering.cs
  19. +0
    -145
      test/TensorFlowNET.Examples/BasicModels/LinearRegression.cs
  20. +0
    -190
      test/TensorFlowNET.Examples/BasicModels/LogisticRegression.cs
  21. +0
    -221
      test/TensorFlowNET.Examples/BasicModels/NaiveBayesClassifier.cs
  22. +0
    -118
      test/TensorFlowNET.Examples/BasicModels/NearestNeighbor.cs
  23. +0
    -187
      test/TensorFlowNET.Examples/BasicOperations.cs
  24. +0
    -66
      test/TensorFlowNET.Examples/HelloWorld.cs
  25. +0
    -59
      test/TensorFlowNET.Examples/IExample.cs
  26. +0
    -73
      test/TensorFlowNET.Examples/ImageProcessing/CIFAR10-CNN.cs
  27. +0
    -338
      test/TensorFlowNET.Examples/ImageProcessing/DigitRecognitionCNN.cs
  28. +0
    -182
      test/TensorFlowNET.Examples/ImageProcessing/DigitRecognitionNN.cs
  29. +0
    -161
      test/TensorFlowNET.Examples/ImageProcessing/DigitRecognitionRNN.cs
  30. +0
    -84
      test/TensorFlowNET.Examples/ImageProcessing/ImageBackgroundRemoval.cs
  31. +0
    -140
      test/TensorFlowNET.Examples/ImageProcessing/ImageRecognitionInception.cs
  32. +0
    -133
      test/TensorFlowNET.Examples/ImageProcessing/InceptionArchGoogLeNet.cs
  33. +0
    -175
      test/TensorFlowNET.Examples/ImageProcessing/ObjectDetection.cs
  34. +0
    -88
      test/TensorFlowNET.Examples/ImageProcessing/ObjectDetection/Main.cs
  35. +0
    -791
      test/TensorFlowNET.Examples/ImageProcessing/RetrainImageClassifier.cs
  36. +0
    -54
      test/TensorFlowNET.Examples/ImageProcessing/YOLO/Dataset.cs
  37. +0
    -243
      test/TensorFlowNET.Examples/ImageProcessing/YOLO/Main.cs
  38. +0
    -27
      test/TensorFlowNET.Examples/ImageProcessing/YOLO/Utils.cs
  39. +0
    -291
      test/TensorFlowNET.Examples/ImageProcessing/YOLO/YOLOv3.cs
  40. +0
    -51
      test/TensorFlowNET.Examples/ImageProcessing/YOLO/backbone.cs
  41. +0
    -100
      test/TensorFlowNET.Examples/ImageProcessing/YOLO/common.cs
  42. +0
    -94
      test/TensorFlowNET.Examples/ImageProcessing/YOLO/config.cs
  43. +0
    -99
      test/TensorFlowNET.Examples/Keras.cs
  44. +0
    -153
      test/TensorFlowNET.Examples/NeuralNetworks/FullyConnected.cs
  45. +0
    -129
      test/TensorFlowNET.Examples/NeuralNetworks/FullyConnectedInQueue.cs
  46. +0
    -190
      test/TensorFlowNET.Examples/NeuralNetworks/NeuralNetXor.cs
  47. +0
    -120
      test/TensorFlowNET.Examples/Program.cs
  48. +0
    -32
      test/TensorFlowNET.Examples/TensorFlowNET.Examples.csproj
  49. +0
    -164
      test/TensorFlowNET.Examples/TextProcessing/BinaryTextClassification.cs
  50. +0
    -272
      test/TensorFlowNET.Examples/TextProcessing/CnnTextClassification.cs
  51. +0
    -218
      test/TensorFlowNET.Examples/TextProcessing/DataHelpers.cs
  52. +0
    -59
      test/TensorFlowNET.Examples/TextProcessing/NER/BiLstmCrfNer.cs
  53. +0
    -55
      test/TensorFlowNET.Examples/TextProcessing/NER/CRF.cs
  54. +0
    -233
      test/TensorFlowNET.Examples/TextProcessing/NER/LstmCrfNer.cs
  55. +0
    -51
      test/TensorFlowNET.Examples/TextProcessing/NamedEntityRecognition.cs
  56. +0
    -243
      test/TensorFlowNET.Examples/TextProcessing/Word2Vec.cs
  57. +0
    -147
      test/TensorFlowNET.Examples/TextProcessing/cnn_models/CharCnn.cs
  58. +0
    -6
      test/TensorFlowNET.Examples/TextProcessing/cnn_models/ITextModel.cs
  59. +0
    -171
      test/TensorFlowNET.Examples/TextProcessing/cnn_models/VdCnn.cs
  60. +0
    -99
      test/TensorFlowNET.Examples/TextProcessing/cnn_models/WordCnn.cs
  61. +0
    -20
      test/TensorFlowNET.Examples/Utility/ArrayShuffling.cs
  62. +0
    -105
      test/TensorFlowNET.Examples/Utility/CoNLLDataset.cs
  63. +0
    -102
      test/TensorFlowNET.Examples/Utility/Compress.cs
  64. +0
    -81
      test/TensorFlowNET.Examples/Utility/PbtxtParser.cs
  65. +0
    -57
      test/TensorFlowNET.Examples/Utility/Web.cs
  66. +0
    -115
      test/TensorFlowNET.Examples/python/binary_text_classification.py
  67. +0
    -107
      test/TensorFlowNET.Examples/python/linear_regression.py
  68. +0
    -100
      test/TensorFlowNET.Examples/python/logistic_regression.py
  69. +0
    -67
      test/TensorFlowNET.Examples/python/meta_graph.py
  70. +0
    -78
      test/TensorFlowNET.Examples/python/minst_lstm.py
  71. +0
    -48
      test/TensorFlowNET.Examples/python/minst_rnn.py
  72. +0
    -164
      test/TensorFlowNET.Examples/python/neural_network.py
  73. +0
    -126
      test/TensorFlowNET.UnitTest/ExamplesTests/ExamplesTest.cs
  74. +0
    -1
      test/TensorFlowNET.UnitTest/TensorFlowNET.UnitTest.csproj

+ 0
- 18
TensorFlow.NET.sln View File

@@ -5,14 +5,8 @@ VisualStudioVersion = 16.0.29102.190
MinimumVisualStudioVersion = 10.0.40219.1
Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "TensorFlowNET.UnitTest", "test\TensorFlowNET.UnitTest\TensorFlowNET.UnitTest.csproj", "{029A8CF1-CF95-4DCB-98AA-9D3D96A83B3E}"
EndProject
Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "TensorFlowNET.Examples", "test\TensorFlowNET.Examples\TensorFlowNET.Examples.csproj", "{1FE60088-157C-4140-91AB-E96B915E4BAE}"
EndProject
Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "TensorFlowNET.Core", "src\TensorFlowNET.Core\TensorFlowNET.Core.csproj", "{FD682AC0-7B2D-45D3-8B0D-C6D678B04144}"
EndProject
Project("{6EC3EE1D-3C4E-46DD-8F32-0CC8E7565705}") = "TensorFlowNET.Examples.FSharp", "test\TensorFlowNET.Examples.FSharp\TensorFlowNET.Examples.FSharp.fsproj", "{62BC3801-F0D3-44A9-A0AC-712F40C8F961}"
EndProject
Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "TensorFlowBenchmark", "src\TensorFlowNet.Benchmarks\TensorFlowBenchmark.csproj", "{68861442-971A-4196-876E-C9330F0B3C54}"
EndProject
Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "TensorFlowHub", "src\TensorFlowHub\TensorFlowHub.csproj", "{8FD59A5A-97EB-457E-B9F1-D88B0C822C6E}"
EndProject
Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "TensorFlowText", "src\TensorFlowText\TensorFlowText.csproj", "{B598E5D5-BD2D-4191-8532-F2FBAC31AB81}"
@@ -31,22 +25,10 @@ Global
{029A8CF1-CF95-4DCB-98AA-9D3D96A83B3E}.Debug|Any CPU.Build.0 = Debug|Any CPU
{029A8CF1-CF95-4DCB-98AA-9D3D96A83B3E}.Release|Any CPU.ActiveCfg = Release|Any CPU
{029A8CF1-CF95-4DCB-98AA-9D3D96A83B3E}.Release|Any CPU.Build.0 = Release|Any CPU
{1FE60088-157C-4140-91AB-E96B915E4BAE}.Debug|Any CPU.ActiveCfg = Debug|Any CPU
{1FE60088-157C-4140-91AB-E96B915E4BAE}.Debug|Any CPU.Build.0 = Debug|Any CPU
{1FE60088-157C-4140-91AB-E96B915E4BAE}.Release|Any CPU.ActiveCfg = Release|Any CPU
{1FE60088-157C-4140-91AB-E96B915E4BAE}.Release|Any CPU.Build.0 = Release|Any CPU
{FD682AC0-7B2D-45D3-8B0D-C6D678B04144}.Debug|Any CPU.ActiveCfg = Debug|Any CPU
{FD682AC0-7B2D-45D3-8B0D-C6D678B04144}.Debug|Any CPU.Build.0 = Debug|Any CPU
{FD682AC0-7B2D-45D3-8B0D-C6D678B04144}.Release|Any CPU.ActiveCfg = Release|Any CPU
{FD682AC0-7B2D-45D3-8B0D-C6D678B04144}.Release|Any CPU.Build.0 = Release|Any CPU
{62BC3801-F0D3-44A9-A0AC-712F40C8F961}.Debug|Any CPU.ActiveCfg = Debug|Any CPU
{62BC3801-F0D3-44A9-A0AC-712F40C8F961}.Debug|Any CPU.Build.0 = Debug|Any CPU
{62BC3801-F0D3-44A9-A0AC-712F40C8F961}.Release|Any CPU.ActiveCfg = Release|Any CPU
{62BC3801-F0D3-44A9-A0AC-712F40C8F961}.Release|Any CPU.Build.0 = Release|Any CPU
{68861442-971A-4196-876E-C9330F0B3C54}.Debug|Any CPU.ActiveCfg = Debug|Any CPU
{68861442-971A-4196-876E-C9330F0B3C54}.Debug|Any CPU.Build.0 = Debug|Any CPU
{68861442-971A-4196-876E-C9330F0B3C54}.Release|Any CPU.ActiveCfg = Release|Any CPU
{68861442-971A-4196-876E-C9330F0B3C54}.Release|Any CPU.Build.0 = Release|Any CPU
{8FD59A5A-97EB-457E-B9F1-D88B0C822C6E}.Debug|Any CPU.ActiveCfg = Debug|Any CPU
{8FD59A5A-97EB-457E-B9F1-D88B0C822C6E}.Debug|Any CPU.Build.0 = Debug|Any CPU
{8FD59A5A-97EB-457E-B9F1-D88B0C822C6E}.Release|Any CPU.ActiveCfg = Release|Any CPU


+ 0
- 45
src/TensorFlowNET.Visualization/Controllers/ValuesController.cs View File

@@ -1,45 +0,0 @@
using System;
using System.Collections.Generic;
using System.Linq;
using System.Threading.Tasks;
using Microsoft.AspNetCore.Mvc;

namespace TensorFlowNET.Visualization.Controllers
{
[Route("api/[controller]")]
[ApiController]
public class ValuesController : ControllerBase
{
// GET api/values
[HttpGet]
public ActionResult<IEnumerable<string>> Get()
{
return new string[] { "value1", "value2" };
}

// GET api/values/5
[HttpGet("{id}")]
public ActionResult<string> Get(int id)
{
return "value";
}

// POST api/values
[HttpPost]
public void Post([FromBody] string value)
{
}

// PUT api/values/5
[HttpPut("{id}")]
public void Put(int id, [FromBody] string value)
{
}

// DELETE api/values/5
[HttpDelete("{id}")]
public void Delete(int id)
{
}
}
}

+ 0
- 24
src/TensorFlowNET.Visualization/Program.cs View File

@@ -1,24 +0,0 @@
using System;
using System.Collections.Generic;
using System.IO;
using System.Linq;
using System.Threading.Tasks;
using Microsoft.AspNetCore;
using Microsoft.AspNetCore.Hosting;
using Microsoft.Extensions.Configuration;
using Microsoft.Extensions.Logging;

namespace TensorFlowNET.Visualization
{
public class Program
{
public static void Main(string[] args)
{
CreateWebHostBuilder(args).Build().Run();
}

public static IWebHostBuilder CreateWebHostBuilder(string[] args) =>
WebHost.CreateDefaultBuilder(args)
.UseStartup<Startup>();
}
}

+ 0
- 41
src/TensorFlowNET.Visualization/Startup.cs View File

@@ -1,41 +0,0 @@
using System;
using System.Collections.Generic;
using System.Linq;
using System.Threading.Tasks;
using Microsoft.AspNetCore.Builder;
using Microsoft.AspNetCore.Hosting;
using Microsoft.AspNetCore.Mvc;
using Microsoft.Extensions.Configuration;
using Microsoft.Extensions.DependencyInjection;
using Microsoft.Extensions.Logging;
using Microsoft.Extensions.Options;

namespace TensorFlowNET.Visualization
{
public class Startup
{
public Startup(IConfiguration configuration)
{
Configuration = configuration;
}

public IConfiguration Configuration { get; }

// This method gets called by the runtime. Use this method to add services to the container.
public void ConfigureServices(IServiceCollection services)
{
services.AddMvc().SetCompatibilityVersion(CompatibilityVersion.Version_2_2);
}

// This method gets called by the runtime. Use this method to configure the HTTP request pipeline.
public void Configure(IApplicationBuilder app, IHostingEnvironment env)
{
if (env.IsDevelopment())
{
app.UseDeveloperExceptionPage();
}

app.UseMvc();
}
}
}

+ 0
- 17
src/TensorFlowNET.Visualization/TensorFlowNET.Visualization.csproj View File

@@ -1,17 +0,0 @@
<Project Sdk="Microsoft.NET.Sdk.Web">

<PropertyGroup>
<TargetFramework>netcoreapp2.1</TargetFramework>
<AspNetCoreHostingModel>InProcess</AspNetCoreHostingModel>
</PropertyGroup>

<ItemGroup>
<PackageReference Include="Microsoft.AspNetCore.App" />
<PackageReference Include="Microsoft.AspNetCore.Razor.Design" Version="2.2.0" PrivateAssets="All" />
</ItemGroup>

<ItemGroup>
<ProjectReference Include="..\TensorFlowNET.Core\TensorFlowNET.Core.csproj" />
</ItemGroup>

</Project>

+ 0
- 9
src/TensorFlowNET.Visualization/appsettings.Development.json View File

@@ -1,9 +0,0 @@
{
"Logging": {
"LogLevel": {
"Default": "Debug",
"System": "Information",
"Microsoft": "Information"
}
}
}

+ 0
- 8
src/TensorFlowNET.Visualization/appsettings.json View File

@@ -1,8 +0,0 @@
{
"Logging": {
"LogLevel": {
"Default": "Warning"
}
},
"AllowedHosts": "*"
}

+ 0
- 29
src/TensorFlowNet.Benchmarks/Program.cs View File

@@ -1,29 +0,0 @@
using System;
using System.Reflection;
using BenchmarkDotNet.Configs;
using BenchmarkDotNet.Running;
namespace TensorFlowBenchmark
{
class Program
{
static void Main(string[] args)
{
if (args?.Length > 0)
{
for (int i = 0; i < args.Length; i++)
{
string name = $"TensorFlowBenchmark.{args[i]}";
var type = Type.GetType(name);
BenchmarkRunner.Run(type);
}
}
else
{
BenchmarkSwitcher.FromAssembly(Assembly.GetExecutingAssembly()).Run(args, ManualConfig.Create(DefaultConfig.Instance).With(ConfigOptions.DisableOptimizationsValidator));
}
Console.ReadLine();
}
}
}

+ 0
- 88
src/TensorFlowNet.Benchmarks/TensorBenchmark.cs View File

@@ -1,88 +0,0 @@
using System;
using BenchmarkDotNet.Attributes;
using NumSharp;
using Tensorflow;
using static Tensorflow.Binding;
namespace TensorFlowBenchmark
{
[SimpleJob(launchCount: 1, warmupCount: 2, targetCount: 10)]
[MinColumn, MaxColumn, MeanColumn, MedianColumn]
public class TensorBenchmark
{
private double[] data;
[GlobalSetup]
public void Setup()
{
data = new double[100];
}
[Benchmark]
public void ScalarTensor()
{
var g = new Graph();
for (int i = 0; i < 100; i++)
{
using (var tensor = new Tensor(17.0))
{
}
}
}
[Benchmark]
public unsafe void TensorFromFixedPtr()
{
var g = new Graph();
for (int i = 0; i < 100; i++)
{
fixed (double* ptr = &data[0])
{
using (var t = new Tensor((IntPtr)ptr, new long[] { data.Length }, tf.float64, 8 * data.Length))
{
}
}
}
}
[Benchmark]
public void TensorFromArray()
{
var g=new Graph();
for (int i = 0; i < 100; i++)
{
using (var tensor = new Tensor(data))
{
}
}
}
[Benchmark]
public void TensorFromNDArray()
{
var g = new Graph();
for (int i = 0; i < 1000; i++)
{
using (var tensor = new Tensor(new NDArray(data)))
{
}
}
}
//[Benchmark]
//public void Constant()
//{
// for (int i = 0; i < 100; i++)
// {
// //var tensor = new Tensor(new NDArray(data));
// var c = tf.constant(42.0);
// }
//}
}
}

+ 0
- 33
src/TensorFlowNet.Benchmarks/TensorFlowBenchmark.csproj View File

@@ -1,33 +0,0 @@
<Project Sdk="Microsoft.NET.Sdk">
<PropertyGroup>
<OutputType>Exe</OutputType>
<TargetFramework>netcoreapp3.0</TargetFramework>
<NoWin32Manifest>true</NoWin32Manifest>
<AssemblyName>TensorFlowBenchmark</AssemblyName>
<RootNamespace>TensorFlowBenchmark</RootNamespace>
<LangVersion>7.3</LangVersion>
</PropertyGroup>
<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|AnyCPU'">
<AllowUnsafeBlocks>true</AllowUnsafeBlocks>
</PropertyGroup>
<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|AnyCPU'">
<AllowUnsafeBlocks>true</AllowUnsafeBlocks>
</PropertyGroup>
<ItemGroup>
<None Remove="tensorflow.dll" />
</ItemGroup>
<ItemGroup>
<PackageReference Include="BenchmarkDotNet" Version="0.11.5" />
<PackageReference Include="SciSharp.TensorFlow.Redist" Version="1.14.0" />
</ItemGroup>
<ItemGroup>
<ProjectReference Include="..\TensorFlowNET.Core\TensorFlowNET.Core.csproj" />
</ItemGroup>
</Project>

+ 0
- 20
src/TensorFlowNet.Benchmarks/TensorFlowNET.Benchmark.csproj View File

@@ -1,20 +0,0 @@
<Project Sdk="Microsoft.NET.Sdk">
<PropertyGroup>
<OutputType>Exe</OutputType>
<TargetFramework>netcoreapp2.2</TargetFramework>
</PropertyGroup>
<ItemGroup>
<None Remove="tensorflow.dll" />
</ItemGroup>
<ItemGroup>
<PackageReference Include="BenchmarkDotNet" Version="0.11.5" />
</ItemGroup>
<ItemGroup>
<ProjectReference Include="..\TensorFlowNET.Core\TensorFlowNET.Core.csproj" />
</ItemGroup>
</Project>

+ 0
- 76
src/TensorFlowNet.Benchmarks/Unmanaged/StructCastBenchmark.cs View File

@@ -1,76 +0,0 @@
using System;
using System.Runtime.CompilerServices;
using System.Runtime.InteropServices;
using BenchmarkDotNet.Attributes;
using Google.Protobuf.WellKnownTypes;
using NumSharp;
using Tensorflow;
using static Tensorflow.Binding;

namespace TensorFlowBenchmark.Unmanaged
{
public struct UnmanagedStruct
{
public int a;
public long b;
public UnmanagedStruct(int _)
{
a = 2;
b = 3;
}
}

[SimpleJob(launchCount: 1, warmupCount: 2, targetCount: 10)]
[MinColumn, MaxColumn, MeanColumn, MedianColumn]
public unsafe class StructCastBenchmark
{
private static void EnsureIsUnmanaged<T>(T _) where T : unmanaged
{ }

static StructCastBenchmark() //if UnmanagedStruct is not unmanaged struct then this will fail to compile.
=> EnsureIsUnmanaged(new UnmanagedStruct());

private IntPtr data;
private void* dataptr;

[GlobalSetup]
public void Setup()
{
data = Marshal.AllocHGlobal(Marshal.SizeOf<UnmanagedStruct>());
dataptr = data.ToPointer();
}

[Benchmark, MethodImpl(MethodImplOptions.NoOptimization)]
public void Marshal_PtrToStructure()
{
UnmanagedStruct _;
for (int i = 0; i < 10000; i++)
{
_ = Marshal.PtrToStructure<UnmanagedStruct>(data);
}
}

[Benchmark, MethodImpl(MethodImplOptions.NoOptimization)]
public void PointerCast()
{
var dptr = dataptr;
UnmanagedStruct _;
for (int i = 0; i < 10000; i++)
{
_ = *(UnmanagedStruct*) dptr;
}
}

[Benchmark, MethodImpl(MethodImplOptions.NoOptimization)]
public void Unsafe_Read()
{
var dptr = dataptr;
UnmanagedStruct _;
for (int i = 0; i < 10000; i++)
{
_ = Unsafe.Read<UnmanagedStruct>(dptr);
}
}

}
}

+ 0
- 104
test/TensorFlowNET.Examples.FSharp/FunctionApproximation.fs View File

@@ -1,104 +0,0 @@
module FunctionApproximation

//reduced example from https://github.com/tirthajyoti/Machine-Learning-with-Python/blob/master/Function%20Approximation%20by%20Neural%20Network/Function%20approximation%20by%20linear%20model%20and%20deep%20network.ipynb

open NumSharp
open Tensorflow
open System

let run()=
let N_points = 75 // Number of points for constructing function
let x_min = 1.0 // Min of the range of x (feature)
let x_max = 15.0 // Max of the range of x (feature)
let noise_mean = 0.0 // Mean of the Gaussian noise adder
let noise_sd = 10.0 // Std.Dev of the Gaussian noise adder

let linspace points = [| for i in 0 .. (points - 1) -> x_min + (x_max - x_min)/(float)points * (float)i |]

let func_trans(xAr:float []) =
xAr
|>Array.map (fun (x:float) -> (20.0 * x+3.0 * System.Math.Pow(x,2.0)+0.1 * System.Math.Pow(x,3.0))*sin(x)*exp(-0.1*x))
let X_raw = linspace N_points
let Y_raw = func_trans(X_raw)
let X_mtr = Array2D.init X_raw.Length 1 (fun i j -> X_raw.[i])
let X = np.array(X_mtr)

let noise_x = np.random.normal(noise_mean,noise_sd,N_points)
let y = np.array(Y_raw)+noise_x

let X_train = X
let y_train = y

let learning_rate = 0.00001
let training_epochs = 35000
let n_input = 1 // Number of features
let n_output = 1 // Regression output is a number only
let n_hidden_layer_1 = 25 // Hidden layer 1
let n_hidden_layer_2 = 25 // Hidden layer 2

let tf = Binding.New<tensorflow>()
let x = tf.placeholder(tf.float64, new TensorShape(N_points,n_input))
let y = tf.placeholder(tf.float64, new TensorShape(n_output))
let weights = dict[
"hidden_layer_1", tf.Variable(tf.random_normal([|n_input; n_hidden_layer_1|],dtype=tf.float64))
"hidden_layer_2", tf.Variable(tf.random_normal([|n_hidden_layer_1; n_hidden_layer_2|],dtype=tf.float64))
"out", tf.Variable(tf.random_normal([|n_hidden_layer_2; n_output|],dtype=tf.float64))
]
let biases = dict[
"hidden_layer_1", tf.Variable(tf.random_normal([|n_hidden_layer_1|],dtype=tf.float64))
"hidden_layer_2", tf.Variable(tf.random_normal([|n_hidden_layer_2|],dtype=tf.float64))
"out", tf.Variable(tf.random_normal([|n_output|],dtype=tf.float64))
]

// Hidden layer with RELU activation

let layer_1 = tf.add(tf.matmul(x, weights.["hidden_layer_1"]._AsTensor()),biases.["hidden_layer_1"])
let layer_1 = tf.nn.relu(layer_1)

let layer_2 = tf.add(tf.matmul(layer_1, weights.["hidden_layer_2"]._AsTensor()),biases.["hidden_layer_2"])
let layer_2 = tf.nn.relu(layer_2)

// Output layer with linear activation
let ops = tf.add(tf.matmul(layer_2, weights.["out"]._AsTensor()), biases.["out"])
// Define loss and optimizer
let cost = tf.reduce_mean(tf.square(tf.squeeze(ops)-y))

let gs = tf.Variable(1, trainable= false, name= "global_step")

let optimizer = tf.train.GradientDescentOptimizer(learning_rate=(float32)learning_rate).minimize(cost,global_step = gs)

let init = tf.global_variables_initializer()
Tensorflow.Binding.``tf_with``(tf.Session(), fun (sess:Session) ->
sess.run(init) |> ignore
// Loop over epochs
for epoch in [0..training_epochs] do
// Run optimization process (backprop) and cost function (to get loss value)

let result=sess.run([|optimizer:>ITensorOrOperation; gs._AsTensor():>ITensorOrOperation; cost:>ITensorOrOperation|], new FeedItem(x, X_train), new FeedItem(y, y_train))


let loss_value = (double) result.[2];

let step = (int) result.[1];
if epoch % 1000 = 0 then
sprintf "Step %d loss: %f" step loss_value |> Console.WriteLine
let w=sess.run(weights |> Array.ofSeq |> Array.map (fun pair -> pair.Value))
let b = sess.run(biases |> Array.ofSeq |> Array.map (fun pair -> pair.Value))
let yhat=sess.run([|ops:>ITensorOrOperation|],new FeedItem(x,X_train))
for i in [0..(N_points-1)] do
sprintf "pred %f real: %f" ((double)(yhat.[0].[i].[0])) ((double)Y_raw.[i]) |> Console.WriteLine
)





+ 0
- 8
test/TensorFlowNET.Examples.FSharp/Program.fs View File

@@ -1,8 +0,0 @@
// Learn more about F# at http://fsharp.org

open System

[<EntryPoint>]
let main argv =
FunctionApproximation.run()
0 // return an integer exit code

+ 0
- 21
test/TensorFlowNET.Examples.FSharp/TensorFlowNET.Examples.FSharp.fsproj View File

@@ -1,21 +0,0 @@
<Project Sdk="Microsoft.NET.Sdk">

<PropertyGroup>
<OutputType>Exe</OutputType>
<TargetFramework>netcoreapp2.2</TargetFramework>
</PropertyGroup>

<ItemGroup>
<Compile Include="FunctionApproximation.fs" />
<Compile Include="Program.fs" />
</ItemGroup>

<ItemGroup>
<PackageReference Include="SciSharp.TensorFlow.Redist" Version="1.14.0" />
</ItemGroup>

<ItemGroup>
<ProjectReference Include="..\..\src\TensorFlowNET.Core\TensorFlowNET.Core.csproj" />
</ItemGroup>

</Project>

+ 0
- 1
test/TensorFlowNET.Examples/AudioProcessing/README.md View File

@@ -1 +0,0 @@


+ 0
- 73
test/TensorFlowNET.Examples/BasicEagerApi.cs View File

@@ -1,73 +0,0 @@
using System;
using Tensorflow;
using static Tensorflow.Binding;

namespace TensorFlowNET.Examples
{
/// <summary>
/// Basic introduction to TensorFlow's Eager API.
/// https://github.com/aymericdamien/TensorFlow-Examples/blob/master/examples/1_Introduction/basic_eager_api.py
/// </summary>
public class BasicEagerApi : IExample
{
public bool Enabled { get; set; } = false;
public string Name => "Basic Eager";
public bool IsImportingGraph { get; set; } = false;

private Tensor a, b, c, d;

public bool Run()
{
// Set Eager API
Console.WriteLine("Setting Eager mode...");
tf.enable_eager_execution();

// Define constant tensors
Console.WriteLine("Define constant tensors");
a = tf.constant(2);
Console.WriteLine($"a = {a}");
b = tf.constant(3);
Console.WriteLine($"b = {b}");

// Run the operation without the need for tf.Session
Console.WriteLine("Running operations, without tf.Session");
c = a + b;
Console.WriteLine($"a + b = {c}");
d = a * b;
Console.WriteLine($"a * b = {d}");

// Full compatibility with Numpy

return true;
}

public void PrepareData()
{
}

public Graph ImportGraph()
{
throw new NotImplementedException();
}

public Graph BuildGraph()
{
throw new NotImplementedException();
}

public void Predict(Session sess)
{
throw new NotImplementedException();
}

public void Train(Session sess)
{
throw new NotImplementedException();
}

public void Test(Session sess)
{
throw new NotImplementedException();
}
}
}

+ 0
- 185
test/TensorFlowNET.Examples/BasicModels/KMeansClustering.cs View File

@@ -1,185 +0,0 @@
/*****************************************************************************
Copyright 2018 The TensorFlow.NET Authors. All Rights Reserved.

Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at

http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
******************************************************************************/

using NumSharp;
using System;
using System.Diagnostics;
using Tensorflow;
using Tensorflow.Hub;
using static Tensorflow.Binding;

namespace TensorFlowNET.Examples
{
/// <summary>
/// Implement K-Means algorithm with TensorFlow.NET, and apply it to classify
/// handwritten digit images.
/// https://github.com/aymericdamien/TensorFlow-Examples/blob/master/examples/2_BasicModels/kmeans.py
/// </summary>
public class KMeansClustering : IExample
{
public bool Enabled { get; set; } = false;
public string Name => "K-means Clustering";
public bool IsImportingGraph { get; set; } = true;

public int? train_size = null;
public int validation_size = 5000;
public int? test_size = null;
public int batch_size = 1024; // The number of samples per batch

Datasets<MnistDataSet> mnist;
NDArray full_data_x;
int num_steps = 20; // Total steps to train
int k = 25; // The number of clusters
int num_classes = 10; // The 10 digits
int num_features = 784; // Each image is 28x28 pixels

float accuray_test = 0f;

public bool Run()
{
PrepareData();
var graph = ImportGraph();
using (var sess = tf.Session(graph))
{
Train(sess);
}

return accuray_test > 0.70;
}

public void PrepareData()
{
var loader = new MnistModelLoader();

var setting = new ModelLoadSetting
{
TrainDir = ".resources/mnist",
OneHot = true,
TrainSize = train_size,
ValidationSize = validation_size,
TestSize = test_size,
ShowProgressInConsole = true
};

mnist = loader.LoadAsync(setting).Result;

full_data_x = mnist.Train.Data;

// download graph meta data
string url = "https://raw.githubusercontent.com/SciSharp/TensorFlow.NET/master/graph/kmeans.meta";
loader.DownloadAsync(url, ".resources/graph", "kmeans.meta").Wait();
}

public Graph ImportGraph()
{
var graph = tf.Graph().as_default();

tf.train.import_meta_graph(".resources/graph/kmeans.meta");

return graph;
}

public Graph BuildGraph()
{
throw new NotImplementedException();
}

public void Train(Session sess)
{
var graph = tf.Graph();

// Input images
Tensor X = graph.get_operation_by_name("Placeholder"); // tf.placeholder(tf.float32, shape: new TensorShape(-1, num_features));
// Labels (for assigning a label to a centroid and testing)
Tensor Y = graph.get_operation_by_name("Placeholder_1"); // tf.placeholder(tf.float32, shape: new TensorShape(-1, num_classes));

// K-Means Parameters
//var kmeans = new KMeans(X, k, distance_metric: KMeans.COSINE_DISTANCE, use_mini_batch: true);

// Build KMeans graph
//var training_graph = kmeans.training_graph();

var init_vars = tf.global_variables_initializer();
Tensor init_op = graph.get_operation_by_name("cond/Merge");
var train_op = graph.get_operation_by_name("group_deps");
Tensor avg_distance = graph.get_operation_by_name("Mean");
Tensor cluster_idx = graph.get_operation_by_name("Squeeze_1");
NDArray result = null;

sess.run(init_vars, new FeedItem(X, full_data_x));
sess.run(init_op, new FeedItem(X, full_data_x));

// Training
var sw = new Stopwatch();

foreach (var i in range(1, num_steps + 1))
{
sw.Restart();
result = sess.run(new ITensorOrOperation[] { train_op, avg_distance, cluster_idx }, new FeedItem(X, full_data_x));
sw.Stop();

if (i % 4 == 0 || i == 1)
print($"Step {i}, Avg Distance: {result[1]} Elapse: {sw.ElapsedMilliseconds}ms");
}

var idx = result[2].Data<int>();

// Assign a label to each centroid
// Count total number of labels per centroid, using the label of each training
// sample to their closest centroid (given by 'idx')
var counts = np.zeros((k, num_classes), np.float32);

sw.Start();
foreach (var i in range(idx.Count))
{
var x = mnist.Train.Labels[i];
counts[idx[i]] += x;
}

sw.Stop();
print($"Assign a label to each centroid took {sw.ElapsedMilliseconds}ms");

// Assign the most frequent label to the centroid
var labels_map_array = np.argmax(counts, 1);
var labels_map = tf.convert_to_tensor(labels_map_array);

// Evaluation ops
// Lookup: centroid_id -> label
var cluster_label = tf.nn.embedding_lookup(labels_map, cluster_idx);

// Compute accuracy
var correct_prediction = tf.equal(cluster_label, tf.cast(tf.argmax(Y, 1), tf.int32));
var cast = tf.cast(correct_prediction, tf.float32);
var accuracy_op = tf.reduce_mean(cast);

// Test Model
var (test_x, test_y) = (mnist.Test.Data, mnist.Test.Labels);
result = sess.run(accuracy_op, new FeedItem(X, test_x), new FeedItem(Y, test_y));
accuray_test = result;
print($"Test Accuracy: {accuray_test}");
}

public void Predict(Session sess)
{
throw new NotImplementedException();
}

public void Test(Session sess)
{
throw new NotImplementedException();
}
}
}

+ 0
- 145
test/TensorFlowNET.Examples/BasicModels/LinearRegression.cs View File

@@ -1,145 +0,0 @@
/*****************************************************************************
Copyright 2018 The TensorFlow.NET Authors. All Rights Reserved.

Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at

http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
******************************************************************************/

using NumSharp;
using System;
using Tensorflow;
using static Tensorflow.Binding;

namespace TensorFlowNET.Examples
{
/// <summary>
/// A linear regression learning algorithm example using TensorFlow library.
/// https://github.com/aymericdamien/TensorFlow-Examples/blob/master/examples/2_BasicModels/linear_regression.py
/// </summary>
public class LinearRegression : IExample
{
public bool Enabled { get; set; } = true;
public string Name => "Linear Regression";
public bool IsImportingGraph { get; set; } = false;

public int training_epochs = 1000;

// Parameters
float learning_rate = 0.01f;
int display_step = 50;

NumPyRandom rng = np.random;
NDArray train_X, train_Y;
int n_samples;

public bool Run()
{
// Training Data
PrepareData();

// tf Graph Input
var X = tf.placeholder(tf.float32);
var Y = tf.placeholder(tf.float32);

// Set model weights
// We can set a fixed init value in order to debug
// var rnd1 = rng.randn<float>();
// var rnd2 = rng.randn<float>();
var W = tf.Variable(-0.06f, name: "weight");
var b = tf.Variable(-0.73f, name: "bias");

// Construct a linear model
var pred = tf.add(tf.multiply(X, W), b);

// Mean squared error
var cost = tf.reduce_sum(tf.pow(pred - Y, 2.0f)) / (2.0f * n_samples);

// Gradient descent
// Note, minimize() knows to modify W and b because Variable objects are trainable=True by default
var optimizer = tf.train.GradientDescentOptimizer(learning_rate).minimize(cost);

// Initialize the variables (i.e. assign their default value)
var init = tf.global_variables_initializer();

// Start training
using (var sess = tf.Session())
{
// Run the initializer
sess.run(init);

// Fit all training data
for (int epoch = 0; epoch < training_epochs; epoch++)
{
foreach (var (x, y) in zip<float>(train_X, train_Y))
sess.run(optimizer, (X, x), (Y, y));

// Display logs per epoch step
if ((epoch + 1) % display_step == 0)
{
var c = sess.run(cost, (X, train_X), (Y, train_Y));
Console.WriteLine($"Epoch: {epoch + 1} cost={c} " + $"W={sess.run(W)} b={sess.run(b)}");
}
}

Console.WriteLine("Optimization Finished!");
var training_cost = sess.run(cost, (X, train_X), (Y, train_Y));
Console.WriteLine($"Training cost={training_cost} W={sess.run(W)} b={sess.run(b)}");

// Testing example
var test_X = np.array(6.83f, 4.668f, 8.9f, 7.91f, 5.7f, 8.7f, 3.1f, 2.1f);
var test_Y = np.array(1.84f, 2.273f, 3.2f, 2.831f, 2.92f, 3.24f, 1.35f, 1.03f);
Console.WriteLine("Testing... (Mean square loss Comparison)");
var testing_cost = sess.run(tf.reduce_sum(tf.pow(pred - Y, 2.0f)) / (2.0f * test_X.shape[0]),
(X, test_X), (Y, test_Y));
Console.WriteLine($"Testing cost={testing_cost}");
var diff = Math.Abs((float)training_cost - (float)testing_cost);
Console.WriteLine($"Absolute mean square loss difference: {diff}");

return diff < 0.01;
}
}

public void PrepareData()
{
train_X = np.array(3.3f, 4.4f, 5.5f, 6.71f, 6.93f, 4.168f, 9.779f, 6.182f, 7.59f, 2.167f,
7.042f, 10.791f, 5.313f, 7.997f, 5.654f, 9.27f, 3.1f);
train_Y = np.array(1.7f, 2.76f, 2.09f, 3.19f, 1.694f, 1.573f, 3.366f, 2.596f, 2.53f, 1.221f,
2.827f, 3.465f, 1.65f, 2.904f, 2.42f, 2.94f, 1.3f);
n_samples = train_X.shape[0];
}

public Graph ImportGraph()
{
throw new NotImplementedException();
}

public Graph BuildGraph()
{
throw new NotImplementedException();
}

public void Train(Session sess)
{
throw new NotImplementedException();
}

public void Predict(Session sess)
{
throw new NotImplementedException();
}

public void Test(Session sess)
{
throw new NotImplementedException();
}
}
}

+ 0
- 190
test/TensorFlowNET.Examples/BasicModels/LogisticRegression.cs View File

@@ -1,190 +0,0 @@
/*****************************************************************************
Copyright 2018 The TensorFlow.NET Authors. All Rights Reserved.

Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at

http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
******************************************************************************/

using NumSharp;
using System;
using System.Diagnostics;
using System.IO;
using Tensorflow;
using Tensorflow.Hub;
using static Tensorflow.Binding;

namespace TensorFlowNET.Examples
{
/// <summary>
/// A logistic regression learning algorithm example using TensorFlow library.
/// This example is using the MNIST database of handwritten digits
/// https://github.com/aymericdamien/TensorFlow-Examples/blob/master/examples/2_BasicModels/logistic_regression.py
/// </summary>
public class LogisticRegression : IExample
{
public bool Enabled { get; set; } = true;
public string Name => "Logistic Regression";
public bool IsImportingGraph { get; set; } = false;


public int training_epochs = 10;
public int? train_size = null;
public int validation_size = 5000;
public int? test_size = null;
public int batch_size = 100;

private float learning_rate = 0.01f;
private int display_step = 1;

Datasets<MnistDataSet> mnist;

public bool Run()
{
PrepareData();

// tf Graph Input
var x = tf.placeholder(tf.float32, new TensorShape(-1, 784)); // mnist data image of shape 28*28=784
var y = tf.placeholder(tf.float32, new TensorShape(-1, 10)); // 0-9 digits recognition => 10 classes

// Set model weights
var W = tf.Variable(tf.zeros(new Shape(784, 10)));
var b = tf.Variable(tf.zeros(new Shape(10)));

// Construct model
var pred = tf.nn.softmax(tf.matmul(x, W) + b); // Softmax

// Minimize error using cross entropy
var cost = tf.reduce_mean(-tf.reduce_sum(y * tf.log(pred), reduction_indices: 1));

// Gradient Descent
var optimizer = tf.train.GradientDescentOptimizer(learning_rate).minimize(cost);

// Initialize the variables (i.e. assign their default value)
var init = tf.global_variables_initializer();

var sw = new Stopwatch();

using (var sess = tf.Session())
{
// Run the initializer
sess.run(init);

// Training cycle
foreach (var epoch in range(training_epochs))
{
sw.Start();

var avg_cost = 0.0f;
var total_batch = mnist.Train.NumOfExamples / batch_size;
// Loop over all batches
foreach (var i in range(total_batch))
{
var (batch_xs, batch_ys) = mnist.Train.GetNextBatch(batch_size);
// Run optimization op (backprop) and cost op (to get loss value)
(_, float c) = sess.run((optimizer, cost),
(x, batch_xs),
(y, batch_ys));

// Compute average loss
avg_cost += c / total_batch;
}

sw.Stop();

// Display logs per epoch step
if ((epoch + 1) % display_step == 0)
print($"Epoch: {(epoch + 1):D4} Cost: {avg_cost:G9} Elapse: {sw.ElapsedMilliseconds}ms");

sw.Reset();
}

print("Optimization Finished!");
// SaveModel(sess);

// Test model
var correct_prediction = tf.equal(tf.argmax(pred, 1), tf.argmax(y, 1));
// Calculate accuracy
var accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32));
float acc = accuracy.eval(sess, (x, mnist.Test.Data), (y, mnist.Test.Labels));
print($"Accuracy: {acc:F4}");

return acc > 0.9;
}
}

public void PrepareData()
{
mnist = MnistModelLoader.LoadAsync(".resources/mnist", oneHot: true, trainSize: train_size, validationSize: validation_size, testSize: test_size, showProgressInConsole: true).Result;
}

public void SaveModel(Session sess)
{
var saver = tf.train.Saver();
var save_path = saver.save(sess, ".resources/logistic_regression/model.ckpt");
tf.train.write_graph(sess.graph, ".resources/logistic_regression", "model.pbtxt", as_text: true);

FreezeGraph.freeze_graph(input_graph: ".resources/logistic_regression/model.pbtxt",
input_saver: "",
input_binary: false,
input_checkpoint: ".resources/logistic_regression/model.ckpt",
output_node_names: "Softmax",
restore_op_name: "save/restore_all",
filename_tensor_name: "save/Const:0",
output_graph: ".resources/logistic_regression/model.pb",
clear_devices: true,
initializer_nodes: "");
}

public void Predict(Session sess)
{
var graph = new Graph().as_default();
graph.Import(Path.Join(".resources/logistic_regression", "model.pb"));

// restoring the model
// var saver = tf.train.import_meta_graph("logistic_regression/tensorflowModel.ckpt.meta");
// saver.restore(sess, tf.train.latest_checkpoint('logistic_regression'));
var pred = graph.OperationByName("Softmax");
var output = pred.outputs[0];
var x = graph.OperationByName("Placeholder");
var input = x.outputs[0];

// predict
var (batch_xs, batch_ys) = mnist.Train.GetNextBatch(10);
var results = sess.run(output, new FeedItem(input, batch_xs[np.arange(1)]));

if (results[0].argmax() == (batch_ys[0] as NDArray).argmax())
print("predicted OK!");
else
throw new ValueError("predict error, should be 90% accuracy");
}

public Graph ImportGraph()
{
throw new NotImplementedException();
}

public Graph BuildGraph()
{
throw new NotImplementedException();
}

public void Train(Session sess)
{
throw new NotImplementedException();
}

public void Test(Session sess)
{
throw new NotImplementedException();
}
}
}

+ 0
- 221
test/TensorFlowNET.Examples/BasicModels/NaiveBayesClassifier.cs View File

@@ -1,221 +0,0 @@
/*****************************************************************************
Copyright 2018 The TensorFlow.NET Authors. All Rights Reserved.

Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at

http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
******************************************************************************/

using System;
using System.Collections.Generic;
using Tensorflow;
using NumSharp;
using static Tensorflow.Binding;
using System.IO;
using TensorFlowNET.Examples.Utility;

namespace TensorFlowNET.Examples
{
/// <summary>
/// https://github.com/nicolov/naive_bayes_tensorflow
/// </summary>
public class NaiveBayesClassifier : IExample
{
public bool Enabled { get; set; } = true;
public string Name => "Naive Bayes Classifier";
public bool IsImportingGraph { get; set; } = false;

public NDArray X, y;
public Normal dist { get; set; }
public bool Run()
{
PrepareData();

fit(X, y);

// Create a regular grid and classify each point
float x_min = X.amin(0).Data<float>()[0] - 0.5f;
float y_min = X.amin(0).Data<float>()[1] - 0.5f;
float x_max = X.amax(0).Data<float>()[1] + 0.5f;
float y_max = X.amax(0).Data<float>()[1] + 0.5f;

var (xx, yy) = np.meshgrid(np.linspace(x_min, x_max, 30), np.linspace(y_min, y_max, 30));
using (var sess = tf.Session())
{
//var samples = np.vstack<float>(xx.ravel(), yy.ravel());
//samples = np.transpose(samples);
var array = np.Load<double[,]>(Path.Join("nb", "nb_example.npy"));
var samples = np.array(array).astype(np.float32);
var Z = sess.run(predict(samples));
}

return true;
}

public void fit(NDArray X, NDArray y)
{
var unique_y = np.unique(y);

var dic = new Dictionary<int, List<List<float>>>();
// Init uy in dic
foreach (int uy in unique_y.Data<int>())
{
dic.Add(uy, new List<List<float>>());
}
// Separate training points by class
// Shape : nb_classes * nb_samples * nb_features
int maxCount = 0;
for (int i = 0; i < y.size; i++)
{
var curClass = y[i];
var l = dic[curClass];
var pair = new List<float>();
pair.Add(X[i,0]);
pair.Add(X[i, 1]);
l.Add(pair);
if (l.Count > maxCount)
{
maxCount = l.Count;
}
dic[curClass] = l;
}
float[,,] points = new float[dic.Count, maxCount, X.shape[1]];
foreach (KeyValuePair<int, List<List<float>>> kv in dic)
{
int j = (int) kv.Key;
for (int i = 0; i < maxCount; i++)
{
for (int k = 0; k < X.shape[1]; k++)
{
points[j, i, k] = kv.Value[i][k];
}
}

}
var points_by_class = np.array(points);
// estimate mean and variance for each class / feature
// shape : nb_classes * nb_features
var cons = tf.constant(points_by_class);
var tup = tf.nn.moments(cons, new int[]{1});
var mean = tup.Item1;
var variance = tup.Item2;

// Create a 3x2 univariate normal distribution with the
// Known mean and variance
var dist = tf.distributions.Normal(mean, tf.sqrt(variance));
this.dist = dist;
}

public Tensor predict(NDArray X)
{
if (dist == null)
{
throw new ArgumentNullException("cant not find the model (normal distribution)!");
}
int nb_classes = (int) dist.scale().shape[0];
int nb_features = (int)dist.scale().shape[1];

// Conditional probabilities log P(x|c) with shape
// (nb_samples, nb_classes)
var t1= ops.convert_to_tensor(X, TF_DataType.TF_FLOAT);
var t2 = ops.convert_to_tensor(new int[] { 1, nb_classes });
Tensor tile = tf.tile(t1, t2);
var t3 = ops.convert_to_tensor(new int[] { -1, nb_classes, nb_features });
Tensor r = tf.reshape(tile, t3);
var cond_probs = tf.reduce_sum(dist.log_prob(r), 2);
// uniform priors
float[] tem = new float[nb_classes];
for (int i = 0; i < tem.Length; i++)
{
tem[i] = 1.0f / nb_classes;
}
var priors = np.log(np.array<float>(tem));

// posterior log probability, log P(c) + log P(x|c)
var joint_likelihood = tf.add(ops.convert_to_tensor(priors, TF_DataType.TF_FLOAT), cond_probs);
// normalize to get (log)-probabilities

var norm_factor = tf.reduce_logsumexp(joint_likelihood, new int[] { 1 }, keepdims: true);
var log_prob = joint_likelihood - norm_factor;
// exp to get the actual probabilities
return tf.exp(log_prob);
}

public void PrepareData()
{
#region Training data
X = np.array(new float[,] {
{5.1f, 3.5f}, {4.9f, 3.0f}, {4.7f, 3.2f}, {4.6f, 3.1f}, {5.0f, 3.6f}, {5.4f, 3.9f},
{4.6f, 3.4f}, {5.0f, 3.4f}, {4.4f, 2.9f}, {4.9f, 3.1f}, {5.4f, 3.7f}, {4.8f, 3.4f},
{4.8f, 3.0f}, {4.3f, 3.0f}, {5.8f, 4.0f}, {5.7f, 4.4f}, {5.4f, 3.9f}, {5.1f, 3.5f},
{5.7f, 3.8f}, {5.1f, 3.8f}, {5.4f, 3.4f}, {5.1f, 3.7f}, {5.1f, 3.3f}, {4.8f, 3.4f},
{5.0f, 3.0f}, {5.0f, 3.4f}, {5.2f, 3.5f}, {5.2f, 3.4f}, {4.7f, 3.2f}, {4.8f, 3.1f},
{5.4f, 3.4f}, {5.2f, 4.1f}, {5.5f, 4.2f}, {4.9f, 3.1f}, {5.0f, 3.2f}, {5.5f, 3.5f},
{4.9f, 3.6f}, {4.4f, 3.0f}, {5.1f, 3.4f}, {5.0f, 3.5f}, {4.5f, 2.3f}, {4.4f, 3.2f},
{5.0f, 3.5f}, {5.1f, 3.8f}, {4.8f, 3.0f}, {5.1f, 3.8f}, {4.6f, 3.2f}, {5.3f, 3.7f},
{5.0f, 3.3f}, {7.0f, 3.2f}, {6.4f, 3.2f}, {6.9f, 3.1f}, {5.5f, 2.3f}, {6.5f, 2.8f},
{5.7f, 2.8f}, {6.3f, 3.3f}, {4.9f, 2.4f}, {6.6f, 2.9f}, {5.2f, 2.7f}, {5.0f, 2.0f},
{5.9f, 3.0f}, {6.0f, 2.2f}, {6.1f, 2.9f}, {5.6f, 2.9f}, {6.7f, 3.1f}, {5.6f, 3.0f},
{5.8f, 2.7f}, {6.2f, 2.2f}, {5.6f, 2.5f}, {5.9f, 3.0f}, {6.1f, 2.8f}, {6.3f, 2.5f},
{6.1f, 2.8f}, {6.4f, 2.9f}, {6.6f, 3.0f}, {6.8f, 2.8f}, {6.7f, 3.0f}, {6.0f, 2.9f},
{5.7f, 2.6f}, {5.5f, 2.4f}, {5.5f, 2.4f}, {5.8f, 2.7f}, {6.0f, 2.7f}, {5.4f, 3.0f},
{6.0f, 3.4f}, {6.7f, 3.1f}, {6.3f, 2.3f}, {5.6f, 3.0f}, {5.5f, 2.5f}, {5.5f, 2.6f},
{6.1f, 3.0f}, {5.8f, 2.6f}, {5.0f, 2.3f}, {5.6f, 2.7f}, {5.7f, 3.0f}, {5.7f, 2.9f},
{6.2f, 2.9f}, {5.1f, 2.5f}, {5.7f, 2.8f}, {6.3f, 3.3f}, {5.8f, 2.7f}, {7.1f, 3.0f},
{6.3f, 2.9f}, {6.5f, 3.0f}, {7.6f, 3.0f}, {4.9f, 2.5f}, {7.3f, 2.9f}, {6.7f, 2.5f},
{7.2f, 3.6f}, {6.5f, 3.2f}, {6.4f, 2.7f}, {6.8f, 3.0f}, {5.7f, 2.5f}, {5.8f, 2.8f},
{6.4f, 3.2f}, {6.5f, 3.0f}, {7.7f, 3.8f}, {7.7f, 2.6f}, {6.0f, 2.2f}, {6.9f, 3.2f},
{5.6f, 2.8f}, {7.7f, 2.8f}, {6.3f, 2.7f}, {6.7f, 3.3f}, {7.2f, 3.2f}, {6.2f, 2.8f},
{6.1f, 3.0f}, {6.4f, 2.8f}, {7.2f, 3.0f}, {7.4f, 2.8f}, {7.9f, 3.8f}, {6.4f, 2.8f},
{6.3f, 2.8f}, {6.1f, 2.6f}, {7.7f, 3.0f}, {6.3f, 3.4f}, {6.4f, 3.1f}, {6.0f, 3.0f},
{6.9f, 3.1f}, {6.7f, 3.1f}, {6.9f, 3.1f}, {5.8f, 2.7f}, {6.8f, 3.2f}, {6.7f, 3.3f},
{6.7f, 3.0f}, {6.3f, 2.5f}, {6.5f, 3.0f}, {6.2f, 3.4f}, {5.9f, 3.0f}, {5.8f, 3.0f}});

y = np.array(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2);


string url = "https://raw.githubusercontent.com/SciSharp/TensorFlow.NET/master/data/nb_example.npy";
Web.Download(url, "nb", "nb_example.npy");
#endregion
}

public Graph ImportGraph()
{
throw new NotImplementedException();
}

public Graph BuildGraph()
{
throw new NotImplementedException();
}

public void Train(Session sess)
{
throw new NotImplementedException();
}

public void Predict(Session sess)
{
throw new NotImplementedException();
}

public void Test(Session sess)
{
throw new NotImplementedException();
}
}
}

+ 0
- 118
test/TensorFlowNET.Examples/BasicModels/NearestNeighbor.cs View File

@@ -1,118 +0,0 @@
/*****************************************************************************
Copyright 2018 The TensorFlow.NET Authors. All Rights Reserved.

Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at

http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
******************************************************************************/

using NumSharp;
using System;
using Tensorflow;
using Tensorflow.Hub;
using static Tensorflow.Binding;

namespace TensorFlowNET.Examples
{
/// <summary>
/// A nearest neighbor learning algorithm example
/// This example is using the MNIST database of handwritten digits
/// https://github.com/aymericdamien/TensorFlow-Examples/blob/master/examples/2_BasicModels/nearest_neighbor.py
/// </summary>
public class NearestNeighbor : IExample
{
public bool Enabled { get; set; } = true;
public string Name => "Nearest Neighbor";
Datasets<MnistDataSet> mnist;
NDArray Xtr, Ytr, Xte, Yte;
public int? TrainSize = null;
public int ValidationSize = 5000;
public int? TestSize = null;
public bool IsImportingGraph { get; set; } = false;


public bool Run()
{
// tf Graph Input
var xtr = tf.placeholder(tf.float32, new TensorShape(-1, 784));
var xte = tf.placeholder(tf.float32, new TensorShape(784));

// Nearest Neighbor calculation using L1 Distance
// Calculate L1 Distance
var distance = tf.reduce_sum(tf.abs(tf.add(xtr, tf.negative(xte))), reduction_indices: 1);
// Prediction: Get min distance index (Nearest neighbor)
var pred = tf.arg_min(distance, 0);

float accuracy = 0f;
// Initialize the variables (i.e. assign their default value)
var init = tf.global_variables_initializer();
using (var sess = tf.Session())
{
// Run the initializer
sess.run(init);

PrepareData();

foreach(int i in range(Xte.shape[0]))
{
// Get nearest neighbor
long nn_index = sess.run(pred, (xtr, Xtr), (xte, Xte[i]));
// Get nearest neighbor class label and compare it to its true label
int index = (int)nn_index;

if (i % 10 == 0 || i == 0)
print($"Test {i} Prediction: {np.argmax(Ytr[index])} True Class: {np.argmax(Yte[i])}");

// Calculate accuracy
if (np.argmax(Ytr[index]) == np.argmax(Yte[i]))
accuracy += 1f/ Xte.shape[0];
}

print($"Accuracy: {accuracy}");
}

return accuracy > 0.8;
}

public void PrepareData()
{
mnist = MnistModelLoader.LoadAsync(".resources/mnist", oneHot: true, trainSize: TrainSize, validationSize: ValidationSize, testSize: TestSize, showProgressInConsole: true).Result;
// In this example, we limit mnist data
(Xtr, Ytr) = mnist.Train.GetNextBatch(TrainSize == null ? 5000 : TrainSize.Value / 100); // 5000 for training (nn candidates)
(Xte, Yte) = mnist.Test.GetNextBatch(TestSize == null ? 200 : TestSize.Value / 100); // 200 for testing
}

public Graph ImportGraph()
{
throw new NotImplementedException();
}

public Graph BuildGraph()
{
throw new NotImplementedException();
}

public void Train(Session sess)
{
throw new NotImplementedException();
}

public void Predict(Session sess)
{
throw new NotImplementedException();
}

public void Test(Session sess)
{
throw new NotImplementedException();
}
}
}

+ 0
- 187
test/TensorFlowNET.Examples/BasicOperations.cs View File

@@ -1,187 +0,0 @@
using NumSharp;
using System;
using Tensorflow;
using static Tensorflow.Binding;

namespace TensorFlowNET.Examples
{
/// <summary>
/// Basic Operations example using TensorFlow library.
/// https://github.com/aymericdamien/TensorFlow-Examples/blob/master/examples/1_Introduction/basic_operations.py
/// </summary>
public class BasicOperations : IExample
{
public bool Enabled { get; set; } = true;
public string Name => "Basic Operations";
public bool IsImportingGraph { get; set; } = false;

private Session sess;

public bool Run()
{
// Basic constant operations
// The value returned by the constructor represents the output
// of the Constant op.
var a = tf.constant(2);
var b = tf.constant(3);
// Launch the default graph.
using (sess = tf.Session())
{
Console.WriteLine("a=2, b=3");
Console.WriteLine($"Addition with constants: {sess.run(a + b)}");
Console.WriteLine($"Multiplication with constants: {sess.run(a * b)}");
}

// Basic Operations with variable as graph input
// The value returned by the constructor represents the output
// of the Variable op. (define as input when running session)
// tf Graph input
a = tf.placeholder(tf.int16);
b = tf.placeholder(tf.int16);

// Define some operations
var add = tf.add(a, b);
var mul = tf.multiply(a, b);

// Launch the default graph.
using(sess = tf.Session())
{
var feed_dict = new FeedItem[]
{
new FeedItem(a, (short)2),
new FeedItem(b, (short)3)
};
// Run every operation with variable input
Console.WriteLine($"Addition with variables: {sess.run(add, feed_dict)}");
Console.WriteLine($"Multiplication with variables: {sess.run(mul, feed_dict)}");
}

// ----------------
// More in details:
// Matrix Multiplication from TensorFlow official tutorial

// Create a Constant op that produces a 1x2 matrix. The op is
// added as a node to the default graph.
//
// The value returned by the constructor represents the output
// of the Constant op.
var nd1 = np.array(3, 3).reshape(1, 2);
var matrix1 = tf.constant(nd1);

// Create another Constant that produces a 2x1 matrix.
var nd2 = np.array(2, 2).reshape(2, 1);
var matrix2 = tf.constant(nd2);

// Create a Matmul op that takes 'matrix1' and 'matrix2' as inputs.
// The returned value, 'product', represents the result of the matrix
// multiplication.
var product = tf.matmul(matrix1, matrix2);

// To run the matmul op we call the session 'run()' method, passing 'product'
// which represents the output of the matmul op. This indicates to the call
// that we want to get the output of the matmul op back.
//
// All inputs needed by the op are run automatically by the session. They
// typically are run in parallel.
//
// The call 'run(product)' thus causes the execution of threes ops in the
// graph: the two constants and matmul.
//
// The output of the op is returned in 'result' as a numpy `ndarray` object.
using (sess = tf.Session())
{
var result = sess.run(product);
Console.WriteLine(result.ToString()); // ==> [[ 12.]]
};

// `BatchMatMul` is actually embedded into the `MatMul` operation on the tf.dll side. Every time we ask
// for a multiplication between matrices with rank > 2, the first rank - 2 dimensions are checked to be consistent
// across the two matrices and a common matrix multiplication is done on the residual 2 dimensions.
//
// np.array([1, 2, 3, 4, 5, 6, 7, 8, 9, 1, 2, 3, 4, 5, 6, 7, 8, 9, 1, 2, 3, 4, 5, 6, 7, 8, 9]).reshape(3, 3, 3)
// array([[[1, 2, 3],
// [4, 5, 6],
// [7, 8, 9]],
//
// [[1, 2, 3],
// [4, 5, 6],
// [7, 8, 9]],
//
// [[1, 2, 3],
// [4, 5, 6],
// [7, 8, 9]]])
var firstTensor = tf.convert_to_tensor(
np.reshape(
np.array<float>(1, 2, 3, 4, 5, 6, 7, 8, 9, 1, 2, 3, 4, 5, 6, 7, 8, 9, 1, 2, 3, 4, 5, 6, 7, 8, 9),
3, 3, 3));
//
// np.array([0, 1, 0, 1, 0, 1, 0, 1, 0, 0, 1, 0, 1, 0, 1, 0, 1, 0]).reshape(3,3,2)
// array([[[0, 1],
// [0, 1],
// [0, 1]],
//
// [[0, 1],
// [0, 0],
// [1, 0]],
//
// [[1, 0],
// [1, 0],
// [1, 0]]])
var secondTensor = tf.convert_to_tensor(
np.reshape(
np.array<float>(0, 1, 0, 1, 0, 1, 0, 1, 0, 0, 1, 0, 1, 0, 1, 0, 1, 0),
3, 3, 2));
var batchMul = tf.batch_matmul(firstTensor, secondTensor);
var checkTensor = np.array<float>(0, 6, 0, 15, 0, 24, 3, 1, 6, 4, 9, 7, 6, 0, 15, 0, 24, 0);
using (var sess = tf.Session())
{
var result = sess.run(batchMul);
Console.WriteLine(result.ToString());
//
// ==> array([[[0, 6],
// [0, 15],
// [0, 24]],
//
// [[ 3, 1],
// [ 6, 4],
// [ 9, 7]],
//
// [[ 6, 0],
// [15, 0],
// [24, 0]]])
return np.reshape(result, 18)
.array_equal(checkTensor);
}
}

public void PrepareData()
{
}

public Graph ImportGraph()
{
throw new NotImplementedException();
}

public Graph BuildGraph()
{
throw new NotImplementedException();
}

public void Train(Session sess)
{
throw new NotImplementedException();
}

public void Predict(Session sess)
{
throw new NotImplementedException();
}

public void Test(Session sess)
{
throw new NotImplementedException();
}
}
}

+ 0
- 66
test/TensorFlowNET.Examples/HelloWorld.cs View File

@@ -1,66 +0,0 @@
using System;
using Tensorflow;
using static Tensorflow.Binding;

namespace TensorFlowNET.Examples
{
/// <summary>
/// Simple hello world using TensorFlow
/// https://github.com/aymericdamien/TensorFlow-Examples/blob/master/examples/1_Introduction/helloworld.py
/// </summary>
public class HelloWorld : IExample
{
public bool Enabled { get; set; } = true;
public string Name => "Hello World";
public bool IsImportingGraph { get; set; } = false;

public bool Run()
{
/* Create a Constant op
The op is added as a node to the default graph.
The value returned by the constructor represents the output
of the Constant op. */
var str = "Hello, TensorFlow.NET!";
var hello = tf.constant(str);

// Start tf session
using (var sess = tf.Session())
{
// Run the op
var result = sess.run(hello);
Console.WriteLine(result.ToString());
return result.ToString().Equals(str);
}
}

public void PrepareData()
{
}

public Graph ImportGraph()
{
throw new NotImplementedException();
}

public Graph BuildGraph()
{
throw new NotImplementedException();
}

public void Train(Session sess)
{
throw new NotImplementedException();
}

public void Predict(Session sess)
{
throw new NotImplementedException();
}

public void Test(Session sess)
{
throw new NotImplementedException();
}
}
}

+ 0
- 59
test/TensorFlowNET.Examples/IExample.cs View File

@@ -1,59 +0,0 @@
/*****************************************************************************
Copyright 2018 The TensorFlow.NET Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
******************************************************************************/

using Tensorflow;
namespace TensorFlowNET.Examples
{
/// <summary>
/// Interface of Example project
/// All example should implement IExample so the entry program will find it.
/// </summary>
public interface IExample
{
/// <summary>
/// True to run example
/// </summary>
bool Enabled { get; set; }

/// <summary>
/// Set true to import the computation graph instead of building it.
/// </summary>
bool IsImportingGraph { get; set; }

string Name { get; }

bool Run();

/// <summary>
/// Build dataflow graph, train and predict
/// </summary>
/// <returns></returns>
void Train(Session sess);
void Test(Session sess);

void Predict(Session sess);

Graph ImportGraph();

Graph BuildGraph();

/// <summary>
/// Prepare dataset
/// </summary>
void PrepareData();
}
}

+ 0
- 73
test/TensorFlowNET.Examples/ImageProcessing/CIFAR10-CNN.cs View File

@@ -1,73 +0,0 @@
/*****************************************************************************
Copyright 2018 The TensorFlow.NET Authors. All Rights Reserved.

Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at

http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
******************************************************************************/

using System;
using System.Collections.Generic;
using System.Text;
using Tensorflow;
using TensorFlowDatasets;

namespace TensorFlowNET.Examples
{
/// <summary>
/// https://www.tensorflow.org/tutorials/images/deep_cnn
/// </summary>
public class CIFAR10_CNN : IExample
{
public bool Enabled { get; set; } = true;
public bool IsImportingGraph { get; set; } = false;

public string Name => "CIFAR-10 CNN";

public bool Run()
{
PrepareData();

return true;
}

public Graph BuildGraph()
{
throw new NotImplementedException();
}

public Graph ImportGraph()
{
throw new NotImplementedException();
}

public void Predict(Session sess)
{
throw new NotImplementedException();
}

public void PrepareData()
{
var tfds = new DatasetBuilder();
tfds.download_and_prepare();
}

public void Test(Session sess)
{
throw new NotImplementedException();
}

public void Train(Session sess)
{
throw new NotImplementedException();
}
}
}

+ 0
- 338
test/TensorFlowNET.Examples/ImageProcessing/DigitRecognitionCNN.cs View File

@@ -1,338 +0,0 @@
/*****************************************************************************
Copyright 2018 The TensorFlow.NET Authors. All Rights Reserved.

Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at

http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
******************************************************************************/

using NumSharp;
using System;
using System.Diagnostics;
using Tensorflow;
using Tensorflow.Hub;
using static Tensorflow.Binding;

namespace TensorFlowNET.Examples
{
/// <summary>
/// Convolutional Neural Network classifier for Hand Written Digits
/// CNN architecture with two convolutional layers, followed by two fully-connected layers at the end.
/// Use Stochastic Gradient Descent (SGD) optimizer.
/// http://www.easy-tf.com/tf-tutorials/convolutional-neural-nets-cnns/cnn1
/// </summary>
public class DigitRecognitionCNN : IExample
{
public bool Enabled { get; set; } = true;
public bool IsImportingGraph { get; set; } = false;

public string Name => "MNIST CNN";

string logs_path = "logs";

const int img_h = 28, img_w = 28; // MNIST images are 28x28
int n_classes = 10; // Number of classes, one class per digit
int n_channels = 1;

// Hyper-parameters
int epochs = 5; // accuracy > 98%
int batch_size = 100;
float learning_rate = 0.001f;
Datasets<MnistDataSet> mnist;

// Network configuration
// 1st Convolutional Layer
int filter_size1 = 5; // Convolution filters are 5 x 5 pixels.
int num_filters1 = 16; // There are 16 of these filters.
int stride1 = 1; // The stride of the sliding window

// 2nd Convolutional Layer
int filter_size2 = 5; // Convolution filters are 5 x 5 pixels.
int num_filters2 = 32;// There are 32 of these filters.
int stride2 = 1; // The stride of the sliding window

// Fully-connected layer.
int h1 = 128; // Number of neurons in fully-connected layer.

Tensor x, y;
Tensor loss, accuracy, cls_prediction;
Operation optimizer;

int display_freq = 100;
float accuracy_test = 0f;
float loss_test = 1f;

NDArray x_train, y_train;
NDArray x_valid, y_valid;
NDArray x_test, y_test;

public bool Run()
{
PrepareData();
BuildGraph();

using (var sess = tf.Session())
{
Train(sess);
Test(sess);
}

return loss_test < 0.05 && accuracy_test > 0.98;
}

public Graph BuildGraph()
{
var graph = new Graph().as_default();

tf_with(tf.name_scope("Input"), delegate
{
// Placeholders for inputs (x) and outputs(y)
x = tf.placeholder(tf.float32, shape: (-1, img_h, img_w, n_channels), name: "X");
y = tf.placeholder(tf.float32, shape: (-1, n_classes), name: "Y");
});

var conv1 = conv_layer(x, filter_size1, num_filters1, stride1, name: "conv1");
var pool1 = max_pool(conv1, ksize: 2, stride: 2, name: "pool1");
var conv2 = conv_layer(pool1, filter_size2, num_filters2, stride2, name: "conv2");
var pool2 = max_pool(conv2, ksize: 2, stride: 2, name: "pool2");
var layer_flat = flatten_layer(pool2);
var fc1 = fc_layer(layer_flat, h1, "FC1", use_relu: true);
var output_logits = fc_layer(fc1, n_classes, "OUT", use_relu: false);

tf_with(tf.variable_scope("Train"), delegate
{
tf_with(tf.variable_scope("Loss"), delegate
{
loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels: y, logits: output_logits), name: "loss");
});

tf_with(tf.variable_scope("Optimizer"), delegate
{
optimizer = tf.train.AdamOptimizer(learning_rate: learning_rate, name: "Adam-op").minimize(loss);
});

tf_with(tf.variable_scope("Accuracy"), delegate
{
var correct_prediction = tf.equal(tf.argmax(output_logits, 1), tf.argmax(y, 1), name: "correct_pred");
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32), name: "accuracy");
});

tf_with(tf.variable_scope("Prediction"), delegate
{
cls_prediction = tf.argmax(output_logits, axis: 1, name: "predictions");
});
});

return graph;
}

public void Train(Session sess)
{
// Number of training iterations in each epoch
var num_tr_iter = y_train.shape[0] / batch_size;

var init = tf.global_variables_initializer();
sess.run(init);

float loss_val = 100.0f;
float accuracy_val = 0f;

var sw = new Stopwatch();
sw.Start();
foreach (var epoch in range(epochs))
{
print($"Training epoch: {epoch + 1}");
// Randomly shuffle the training data at the beginning of each epoch
(x_train, y_train) = mnist.Randomize(x_train, y_train);

foreach (var iteration in range(num_tr_iter))
{
var start = iteration * batch_size;
var end = (iteration + 1) * batch_size;
var (x_batch, y_batch) = mnist.GetNextBatch(x_train, y_train, start, end);

// Run optimization op (backprop)
sess.run(optimizer, (x, x_batch), (y, y_batch));

if (iteration % display_freq == 0)
{
// Calculate and display the batch loss and accuracy
var result = sess.run(new[] { loss, accuracy }, new FeedItem(x, x_batch), new FeedItem(y, y_batch));
loss_val = result[0];
accuracy_val = result[1];
print($"iter {iteration.ToString("000")}: Loss={loss_val.ToString("0.0000")}, Training Accuracy={accuracy_val.ToString("P")} {sw.ElapsedMilliseconds}ms");
sw.Restart();
}
}

// Run validation after every epoch
(loss_val, accuracy_val) = sess.run((loss, accuracy), (x, x_valid), (y, y_valid));
print("---------------------------------------------------------");
print($"Epoch: {epoch + 1}, validation loss: {loss_val.ToString("0.0000")}, validation accuracy: {accuracy_val.ToString("P")}");
print("---------------------------------------------------------");
}
}

public void Test(Session sess)
{
(loss_test, accuracy_test) = sess.run((loss, accuracy), (x, x_test), (y, y_test));
print("---------------------------------------------------------");
print($"Test loss: {loss_test.ToString("0.0000")}, test accuracy: {accuracy_test.ToString("P")}");
print("---------------------------------------------------------");
}

/// <summary>
/// Create a 2D convolution layer
/// </summary>
/// <param name="x">input from previous layer</param>
/// <param name="filter_size">size of each filter</param>
/// <param name="num_filters">number of filters(or output feature maps)</param>
/// <param name="stride">filter stride</param>
/// <param name="name">layer name</param>
/// <returns>The output array</returns>
private Tensor conv_layer(Tensor x, int filter_size, int num_filters, int stride, string name)
{
return tf_with(tf.variable_scope(name), delegate {

var num_in_channel = x.shape[x.NDims - 1];
var shape = new[] { filter_size, filter_size, num_in_channel, num_filters };
var W = weight_variable("W", shape);
// var tf.summary.histogram("weight", W);
var b = bias_variable("b", new[] { num_filters });
// tf.summary.histogram("bias", b);
var layer = tf.nn.conv2d(x, W,
strides: new[] { 1, stride, stride, 1 },
padding: "SAME");
layer += b;
return tf.nn.relu(layer);
});
}

/// <summary>
/// Create a max pooling layer
/// </summary>
/// <param name="x">input to max-pooling layer</param>
/// <param name="ksize">size of the max-pooling filter</param>
/// <param name="stride">stride of the max-pooling filter</param>
/// <param name="name">layer name</param>
/// <returns>The output array</returns>
private Tensor max_pool(Tensor x, int ksize, int stride, string name)
{
return tf.nn.max_pool(x,
ksize: new[] { 1, ksize, ksize, 1 },
strides: new[] { 1, stride, stride, 1 },
padding: "SAME",
name: name);
}

/// <summary>
/// Flattens the output of the convolutional layer to be fed into fully-connected layer
/// </summary>
/// <param name="layer">input array</param>
/// <returns>flattened array</returns>
private Tensor flatten_layer(Tensor layer)
{
return tf_with(tf.variable_scope("Flatten_layer"), delegate
{
var layer_shape = layer.TensorShape;
var num_features = layer_shape[new Slice(1, 4)].size;
var layer_flat = tf.reshape(layer, new[] { -1, num_features });

return layer_flat;
});
}

/// <summary>
/// Create a weight variable with appropriate initialization
/// </summary>
/// <param name="name"></param>
/// <param name="shape"></param>
/// <returns></returns>
private RefVariable weight_variable(string name, int[] shape)
{
var initer = tf.truncated_normal_initializer(stddev: 0.01f);
return tf.get_variable(name,
dtype: tf.float32,
shape: shape,
initializer: initer);
}

/// <summary>
/// Create a bias variable with appropriate initialization
/// </summary>
/// <param name="name"></param>
/// <param name="shape"></param>
/// <returns></returns>
private RefVariable bias_variable(string name, int[] shape)
{
var initial = tf.constant(0f, shape: shape, dtype: tf.float32);
return tf.get_variable(name,
dtype: tf.float32,
initializer: initial);
}

/// <summary>
/// Create a fully-connected layer
/// </summary>
/// <param name="x">input from previous layer</param>
/// <param name="num_units">number of hidden units in the fully-connected layer</param>
/// <param name="name">layer name</param>
/// <param name="use_relu">boolean to add ReLU non-linearity (or not)</param>
/// <returns>The output array</returns>
private Tensor fc_layer(Tensor x, int num_units, string name, bool use_relu = true)
{
return tf_with(tf.variable_scope(name), delegate
{
var in_dim = x.shape[1];

var W = weight_variable("W_" + name, shape: new[] { in_dim, num_units });
var b = bias_variable("b_" + name, new[] { num_units });

var layer = tf.matmul(x, W) + b;
if (use_relu)
layer = tf.nn.relu(layer);

return layer;
});
}
public void PrepareData()
{
mnist = MnistModelLoader.LoadAsync(".resources/mnist", oneHot: true, showProgressInConsole: true).Result;
(x_train, y_train) = Reformat(mnist.Train.Data, mnist.Train.Labels);
(x_valid, y_valid) = Reformat(mnist.Validation.Data, mnist.Validation.Labels);
(x_test, y_test) = Reformat(mnist.Test.Data, mnist.Test.Labels);

print("Size of:");
print($"- Training-set:\t\t{len(mnist.Train.Data)}");
print($"- Validation-set:\t{len(mnist.Validation.Data)}");
}

/// <summary>
/// Reformats the data to the format acceptable for convolutional layers
/// </summary>
/// <param name="x"></param>
/// <param name="y"></param>
/// <returns></returns>
private (NDArray, NDArray) Reformat(NDArray x, NDArray y)
{
var (img_size, num_ch, num_class) = (np.sqrt(x.shape[1]).astype(np.int32), 1, len(np.unique(np.argmax(y, 1))));
var dataset = x.reshape(x.shape[0], img_size, img_size, num_ch).astype(np.float32);
//y[0] = np.arange(num_class) == y[0];
//var labels = (np.arange(num_class) == y.reshape(y.shape[0], 1, y.shape[1])).astype(np.float32);
return (dataset, y);
}

public Graph ImportGraph() => throw new NotImplementedException();

public void Predict(Session sess) => throw new NotImplementedException();
}
}

+ 0
- 182
test/TensorFlowNET.Examples/ImageProcessing/DigitRecognitionNN.cs View File

@@ -1,182 +0,0 @@
/*****************************************************************************
Copyright 2018 The TensorFlow.NET Authors. All Rights Reserved.

Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at

http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
******************************************************************************/

using NumSharp;
using System;
using System.Diagnostics;
using Tensorflow;
using Tensorflow.Hub;
using static Tensorflow.Binding;

namespace TensorFlowNET.Examples
{
/// <summary>
/// Neural Network classifier for Hand Written Digits
/// Sample Neural Network architecture with two layers implemented for classifying MNIST digits.
/// Use Stochastic Gradient Descent (SGD) optimizer.
/// http://www.easy-tf.com/tf-tutorials/neural-networks
/// </summary>
public class DigitRecognitionNN : IExample
{
public bool Enabled { get; set; } = true;
public bool IsImportingGraph { get; set; } = false;

public string Name => "Digits Recognition Neural Network";

const int img_h = 28;
const int img_w = 28;
int img_size_flat = img_h * img_w; // 784, the total number of pixels
int n_classes = 10; // Number of classes, one class per digit
// Hyper-parameters
int epochs = 10;
int batch_size = 100;
float learning_rate = 0.001f;
int h1 = 200; // number of nodes in the 1st hidden layer
Datasets<MnistDataSet> mnist;

Tensor x, y;
Tensor loss, accuracy;
Operation optimizer;

int display_freq = 100;
float accuracy_test = 0f;
float loss_test = 1f;

public bool Run()
{
PrepareData();
BuildGraph();

using (var sess = tf.Session())
{
Train(sess);
Test(sess);
};

return loss_test < 0.09 && accuracy_test > 0.95;
}

public Graph BuildGraph()
{
var graph = new Graph().as_default();

// Placeholders for inputs (x) and outputs(y)
x = tf.placeholder(tf.float32, shape: (-1, img_size_flat), name: "X");
y = tf.placeholder(tf.float32, shape: (-1, n_classes), name: "Y");

// Create a fully-connected layer with h1 nodes as hidden layer
var fc1 = fc_layer(x, h1, "FC1", use_relu: true);
// Create a fully-connected layer with n_classes nodes as output layer
var output_logits = fc_layer(fc1, n_classes, "OUT", use_relu: false);
// Define the loss function, optimizer, and accuracy
var logits = tf.nn.softmax_cross_entropy_with_logits(labels: y, logits: output_logits);
loss = tf.reduce_mean(logits, name: "loss");
optimizer = tf.train.AdamOptimizer(learning_rate: learning_rate, name: "Adam-op").minimize(loss);
var correct_prediction = tf.equal(tf.argmax(output_logits, 1), tf.argmax(y, 1), name: "correct_pred");
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32), name: "accuracy");

// Network predictions
var cls_prediction = tf.argmax(output_logits, axis: 1, name: "predictions");

return graph;
}

private Tensor fc_layer(Tensor x, int num_units, string name, bool use_relu = true)
{
var in_dim = x.shape[1];

var initer = tf.truncated_normal_initializer(stddev: 0.01f);
var W = tf.get_variable("W_" + name,
dtype: tf.float32,
shape: (in_dim, num_units),
initializer: initer);

var initial = tf.constant(0f, num_units);
var b = tf.get_variable("b_" + name,
dtype: tf.float32,
initializer: initial);

var layer = tf.matmul(x, W) + b;
if (use_relu)
layer = tf.nn.relu(layer);

return layer;
}

public Graph ImportGraph() => throw new NotImplementedException();

public void Predict(Session sess) => throw new NotImplementedException();
public void PrepareData()
{
mnist = MnistModelLoader.LoadAsync(".resources/mnist", oneHot: true, showProgressInConsole: true).Result;
}

public void Train(Session sess)
{
// Number of training iterations in each epoch
var num_tr_iter = mnist.Train.Labels.shape[0] / batch_size;

var init = tf.global_variables_initializer();
sess.run(init);

float loss_val = 100.0f;
float accuracy_val = 0f;

var sw = new Stopwatch();
sw.Start();

foreach (var epoch in range(epochs))
{
print($"Training epoch: {epoch + 1}");
// Randomly shuffle the training data at the beginning of each epoch
var (x_train, y_train) = mnist.Randomize(mnist.Train.Data, mnist.Train.Labels);

foreach (var iteration in range(num_tr_iter))
{
var start = iteration * batch_size;
var end = (iteration + 1) * batch_size;
var (x_batch, y_batch) = mnist.GetNextBatch(x_train, y_train, start, end);

// Run optimization op (backprop)
sess.run(optimizer, (x, x_batch), (y, y_batch));

if (iteration % display_freq == 0)
{
// Calculate and display the batch loss and accuracy
(loss_val, accuracy_val) = sess.run((loss, accuracy), (x, x_batch), (y, y_batch));
print($"iter {iteration.ToString("000")}: Loss={loss_val.ToString("0.0000")}, Training Accuracy={accuracy_val.ToString("P")} {sw.ElapsedMilliseconds}ms");
sw.Restart();
}
}

// Run validation after every epoch
(loss_val, accuracy_val) = sess.run((loss, accuracy), (x, mnist.Validation.Data), (y, mnist.Validation.Labels));
print("---------------------------------------------------------");
print($"Epoch: {epoch + 1}, validation loss: {loss_val.ToString("0.0000")}, validation accuracy: {accuracy_val.ToString("P")}");
print("---------------------------------------------------------");
}
}

public void Test(Session sess)
{
(loss_test, accuracy_test) = sess.run((loss, accuracy), (x, mnist.Test.Data), (y, mnist.Test.Labels));
print("---------------------------------------------------------");
print($"Test loss: {loss_test.ToString("0.0000")}, test accuracy: {accuracy_test.ToString("P")}");
print("---------------------------------------------------------");
}
}
}

+ 0
- 161
test/TensorFlowNET.Examples/ImageProcessing/DigitRecognitionRNN.cs View File

@@ -1,161 +0,0 @@
/*****************************************************************************
Copyright 2018 The TensorFlow.NET Authors. All Rights Reserved.

Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at

http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
******************************************************************************/

using NumSharp;
using System;
using Tensorflow;
using Tensorflow.Hub;
using static Tensorflow.Binding;

namespace TensorFlowNET.Examples
{
/// <summary>
/// Recurrent Neural Network for handwritten digits MNIST.
/// https://medium.com/machine-learning-algorithms/mnist-using-recurrent-neural-network-2d070a5915a2
/// </summary>
public class DigitRecognitionRNN : IExample
{
public bool Enabled { get; set; } = false;
public bool IsImportingGraph { get; set; } = false;

public string Name => "MNIST RNN";

string logs_path = "logs";

// Hyper-parameters
int n_neurons = 128;
float learning_rate = 0.001f;
int batch_size = 128;
int epochs = 10;

int n_steps = 28;
int n_inputs = 28;
int n_outputs = 10;

Datasets<MnistDataSet> mnist;

Tensor x, y;
Tensor loss, accuracy, cls_prediction;
Operation optimizer;

int display_freq = 100;
float accuracy_test = 0f;
float loss_test = 1f;

NDArray x_train, y_train;
NDArray x_valid, y_valid;
NDArray x_test, y_test;

public bool Run()
{
PrepareData();
BuildGraph();

using (var sess = tf.Session())
{
Train(sess);
Test(sess);
}

return loss_test < 0.09 && accuracy_test > 0.95;
}

public Graph BuildGraph()
{
var graph = new Graph().as_default();

var X = tf.placeholder(tf.float32, new[] { -1, n_steps, n_inputs });
var y = tf.placeholder(tf.int32, new[] { -1 });
var cell = tf.nn.rnn_cell.BasicRNNCell(num_units: n_neurons);
var (output, state) = tf.nn.dynamic_rnn(cell, X, dtype: tf.float32);

return graph;
}

public void Train(Session sess)
{
// Number of training iterations in each epoch
var num_tr_iter = y_train.shape[0] / batch_size;

var init = tf.global_variables_initializer();
sess.run(init);

float loss_val = 100.0f;
float accuracy_val = 0f;

foreach (var epoch in range(epochs))
{
print($"Training epoch: {epoch + 1}");
// Randomly shuffle the training data at the beginning of each epoch
(x_train, y_train) = mnist.Randomize(x_train, y_train);

foreach (var iteration in range(num_tr_iter))
{
var start = iteration * batch_size;
var end = (iteration + 1) * batch_size;
var (x_batch, y_batch) = mnist.GetNextBatch(x_train, y_train, start, end);

// Run optimization op (backprop)
sess.run(optimizer, new FeedItem(x, x_batch), new FeedItem(y, y_batch));

if (iteration % display_freq == 0)
{
// Calculate and display the batch loss and accuracy
var result = sess.run(new[] { loss, accuracy }, new FeedItem(x, x_batch), new FeedItem(y, y_batch));
loss_val = result[0];
accuracy_val = result[1];
print($"iter {iteration.ToString("000")}: Loss={loss_val.ToString("0.0000")}, Training Accuracy={accuracy_val.ToString("P")}");
}
}

// Run validation after every epoch
var results1 = sess.run(new[] { loss, accuracy }, new FeedItem(x, x_valid), new FeedItem(y, y_valid));
loss_val = results1[0];
accuracy_val = results1[1];
print("---------------------------------------------------------");
print($"Epoch: {epoch + 1}, validation loss: {loss_val.ToString("0.0000")}, validation accuracy: {accuracy_val.ToString("P")}");
print("---------------------------------------------------------");
}
}

public void Test(Session sess)
{
var result = sess.run(new[] { loss, accuracy }, new FeedItem(x, x_test), new FeedItem(y, y_test));
loss_test = result[0];
accuracy_test = result[1];
print("---------------------------------------------------------");
print($"Test loss: {loss_test.ToString("0.0000")}, test accuracy: {accuracy_test.ToString("P")}");
print("---------------------------------------------------------");
}

public void PrepareData()
{
mnist = MnistModelLoader.LoadAsync(".resources/mnist", oneHot: true, showProgressInConsole: true).Result;
(x_train, y_train) = (mnist.Train.Data, mnist.Train.Labels);
(x_valid, y_valid) = (mnist.Validation.Data, mnist.Validation.Labels);
(x_test, y_test) = (mnist.Test.Data, mnist.Test.Labels);

print("Size of:");
print($"- Training-set:\t\t{len(mnist.Train.Data)}");
print($"- Validation-set:\t{len(mnist.Validation.Data)}");
print($"- Test-set:\t\t{len(mnist.Test.Data)}");
}

public Graph ImportGraph() => throw new NotImplementedException();

public void Predict(Session sess) => throw new NotImplementedException();
}
}

+ 0
- 84
test/TensorFlowNET.Examples/ImageProcessing/ImageBackgroundRemoval.cs View File

@@ -1,84 +0,0 @@
using System;
using System.IO;
using Tensorflow;
using TensorFlowNET.Examples.Utility;
using static Tensorflow.Binding;

namespace TensorFlowNET.Examples
{
/// <summary>
/// This example removes the background from an input image.
///
/// https://github.com/susheelsk/image-background-removal
/// </summary>
public class ImageBackgroundRemoval : IExample
{
public bool Enabled { get; set; } = false;
public bool IsImportingGraph { get; set; } = true;

public string Name => "Image Background Removal";

string dataDir = "deeplabv3";
string modelDir = "deeplabv3_mnv2_pascal_train_aug";
string modelName = "frozen_inference_graph.pb";

public bool Run()
{
PrepareData();

// import GraphDef from pb file
var graph = new Graph().as_default();
graph.Import(Path.Join(dataDir, modelDir, modelName));

Tensor output = graph.OperationByName("SemanticPredictions");

using (var sess = tf.Session(graph))
{
// Runs inference on a single image.
sess.run(output, new FeedItem(output, "[np.asarray(resized_image)]"));
}

return false;
}

public void PrepareData()
{
// get mobile_net_model file
string fileName = "deeplabv3_mnv2_pascal_train_aug_2018_01_29.tar.gz";
string url = $"http://download.tensorflow.org/models/{fileName}";
Web.Download(url, dataDir, fileName);
Compress.ExtractTGZ(Path.Join(dataDir, fileName), dataDir);

// xception_model, better accuracy
/*fileName = "deeplabv3_pascal_train_aug_2018_01_04.tar.gz";
url = $"http://download.tensorflow.org/models/{fileName}";
Web.Download(url, modelDir, fileName);
Compress.ExtractTGZ(Path.Join(modelDir, fileName), modelDir);*/
}

public Graph ImportGraph()
{
throw new NotImplementedException();
}

public Graph BuildGraph()
{
throw new NotImplementedException();
}

public void Train(Session sess)
{
throw new NotImplementedException();
}

public void Predict(Session sess)
{
throw new NotImplementedException();
}

public void Test(Session sess)
{
throw new NotImplementedException();
}
}
}

+ 0
- 140
test/TensorFlowNET.Examples/ImageProcessing/ImageRecognitionInception.cs View File

@@ -1,140 +0,0 @@
using NumSharp;
using System;
using System.Collections.Generic;
using System.Diagnostics;
using System.IO;
using Console = Colorful.Console;
using Tensorflow;
using System.Drawing;
using static Tensorflow.Binding;

namespace TensorFlowNET.Examples
{
/// <summary>
/// Inception v3 is a widely-used image recognition model
/// that has been shown to attain greater than 78.1% accuracy on the ImageNet dataset.
/// The model is the culmination of many ideas developed by multiple researchers over the years.
/// </summary>
public class ImageRecognitionInception : IExample
{
public bool Enabled { get; set; } = true;
public string Name => "Image Recognition Inception";
public bool IsImportingGraph { get; set; } = false;


string dir = "ImageRecognitionInception";
string pbFile = "tensorflow_inception_graph.pb";
string labelFile = "imagenet_comp_graph_label_strings.txt";
List<NDArray> file_ndarrays = new List<NDArray>();

public bool Run()
{
PrepareData();
var graph = new Graph();
//import GraphDef from pb file
graph.Import(Path.Join(dir, pbFile));

var input_name = "input";
var output_name = "output";

var input_operation = graph.OperationByName(input_name);
var output_operation = graph.OperationByName(output_name);

var labels = File.ReadAllLines(Path.Join(dir, labelFile));
var result_labels = new List<string>();
var sw = new Stopwatch();

using (var sess = tf.Session(graph))
{
foreach (var nd in file_ndarrays)
{
sw.Restart();

var results = sess.run(output_operation.outputs[0], (input_operation.outputs[0], nd));
results = np.squeeze(results);
int idx = np.argmax(results);

Console.WriteLine($"{labels[idx]} {results[idx]} in {sw.ElapsedMilliseconds}ms", Color.Tan);
result_labels.Add(labels[idx]);
}
}
return result_labels.Contains("military uniform");
}

private NDArray ReadTensorFromImageFile(string file_name,
int input_height = 224,
int input_width = 224,
int input_mean = 117,
int input_std = 1)
{
var graph = tf.Graph().as_default();

var file_reader = tf.read_file(file_name, "file_reader");
var decodeJpeg = tf.image.decode_jpeg(file_reader, channels: 3, name: "DecodeJpeg");
var cast = tf.cast(decodeJpeg, tf.float32);
var dims_expander = tf.expand_dims(cast, 0);
var resize = tf.constant(new int[] { input_height, input_width });
var bilinear = tf.image.resize_bilinear(dims_expander, resize);
var sub = tf.subtract(bilinear, new float[] { input_mean });
var normalized = tf.divide(sub, new float[] { input_std });

using (var sess = tf.Session(graph))
return sess.run(normalized);
}

public void PrepareData()
{
Directory.CreateDirectory(dir);

// get model file
string url = "https://storage.googleapis.com/download.tensorflow.org/models/inception5h.zip";

Utility.Web.Download(url, dir, "inception5h.zip");

Utility.Compress.UnZip(Path.Join(dir, "inception5h.zip"), dir);

// download sample picture
Directory.CreateDirectory(Path.Join(dir, "img"));
url = $"https://raw.githubusercontent.com/tensorflow/tensorflow/master/tensorflow/examples/label_image/data/grace_hopper.jpg";
Utility.Web.Download(url, Path.Join(dir, "img"), "grace_hopper.jpg");

url = $"https://raw.githubusercontent.com/SciSharp/TensorFlow.NET/master/data/shasta-daisy.jpg";
Utility.Web.Download(url, Path.Join(dir, "img"), "shasta-daisy.jpg");

// load image file
var files = Directory.GetFiles(Path.Join(dir, "img"));
for (int i = 0; i < files.Length; i++)
{
var nd = ReadTensorFromImageFile(files[i]);
file_ndarrays.Add(nd);
}
}

public Graph ImportGraph()
{
throw new NotImplementedException();
}

public Graph BuildGraph()
{
throw new NotImplementedException();
}

public void Train(Session sess)
{
throw new NotImplementedException();
}

public void Predict(Session sess)
{
throw new NotImplementedException();
}

public void Test(Session sess)
{
throw new NotImplementedException();
}
}
}

+ 0
- 133
test/TensorFlowNET.Examples/ImageProcessing/InceptionArchGoogLeNet.cs View File

@@ -1,133 +0,0 @@
using NumSharp;
using System;
using System.IO;
using System.Linq;
using Tensorflow;
using static Tensorflow.Binding;

namespace TensorFlowNET.Examples
{
/// <summary>
/// Inception Architecture for Computer Vision
/// Port from tensorflow\examples\label_image\label_image.py
/// </summary>
public class InceptionArchGoogLeNet : IExample
{
public bool Enabled { get; set; } = false;
public string Name => "Inception Arch GoogLeNet";
public bool IsImportingGraph { get; set; } = false;


string dir = "label_image_data";
string pbFile = "inception_v3_2016_08_28_frozen.pb";
string labelFile = "imagenet_slim_labels.txt";
string picFile = "grace_hopper.jpg";
int input_height = 299;
int input_width = 299;
int input_mean = 0;
int input_std = 255;
string input_name = "import/input";
string output_name = "import/InceptionV3/Predictions/Reshape_1";

public bool Run()
{
PrepareData();

var labels = File.ReadAllLines(Path.Join(dir, labelFile));
var nd = ReadTensorFromImageFile(Path.Join(dir, picFile),
input_height: input_height,
input_width: input_width,
input_mean: input_mean,
input_std: input_std);

var graph = new Graph();
graph.Import(Path.Join(dir, pbFile));
var input_operation = graph.get_operation_by_name(input_name);
var output_operation = graph.get_operation_by_name(output_name);

NDArray results;
using (var sess = tf.Session(graph))
{
results = sess.run(output_operation.outputs[0],
new FeedItem(input_operation.outputs[0], nd));
}

results = np.squeeze(results);

var argsort = results.argsort<float>();
var top_k = argsort.Data<float>()
.Skip(results.size - 5)
.Reverse()
.ToArray();

foreach (float idx in top_k)
Console.WriteLine($"{picFile}: {idx} {labels[(int)idx]}, {results[(int)idx]}");

return true;
}

private NDArray ReadTensorFromImageFile(string file_name,
int input_height = 299,
int input_width = 299,
int input_mean = 0,
int input_std = 255)
{
var graph = tf.Graph().as_default();

var file_reader = tf.read_file(file_name, "file_reader");
var image_reader = tf.image.decode_jpeg(file_reader, channels: 3, name: "jpeg_reader");
var caster = tf.cast(image_reader, tf.float32);
var dims_expander = tf.expand_dims(caster, 0);
var resize = tf.constant(new int[] { input_height, input_width });
var bilinear = tf.image.resize_bilinear(dims_expander, resize);
var sub = tf.subtract(bilinear, new float[] { input_mean });
var normalized = tf.divide(sub, new float[] { input_std });

using (var sess = tf.Session(graph))
return sess.run(normalized);
}

public void PrepareData()
{
Directory.CreateDirectory(dir);

// get model file
string url = "https://storage.googleapis.com/download.tensorflow.org/models/inception_v3_2016_08_28_frozen.pb.tar.gz";
Utility.Web.Download(url, dir, $"{pbFile}.tar.gz");

Utility.Compress.ExtractTGZ(Path.Join(dir, $"{pbFile}.tar.gz"), dir);

// download sample picture
string pic = "grace_hopper.jpg";
url = $"https://raw.githubusercontent.com/tensorflow/tensorflow/master/tensorflow/examples/label_image/data/{pic}";
Utility.Web.Download(url, dir, pic);
}

public Graph ImportGraph()
{
throw new NotImplementedException();
}

public Graph BuildGraph()
{
throw new NotImplementedException();
}

public void Train(Session sess)
{
throw new NotImplementedException();
}

public void Predict(Session sess)
{
throw new NotImplementedException();
}

public void Test(Session sess)
{
throw new NotImplementedException();
}
}
}

+ 0
- 175
test/TensorFlowNET.Examples/ImageProcessing/ObjectDetection.cs View File

@@ -1,175 +0,0 @@
/*****************************************************************************
Copyright 2018 The TensorFlow.NET Authors. All Rights Reserved.

Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at

http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
******************************************************************************/

using NumSharp;
using System;
using System.IO;
using Tensorflow;
using TensorFlowNET.Examples.Utility;
using System.Drawing;
using System.Drawing.Drawing2D;
using System.Linq;
using static Tensorflow.Binding;

namespace TensorFlowNET.Examples
{
public class ObjectDetection : IExample
{
public bool Enabled { get; set; } = true;
public string Name => "Object Detection";
public bool IsImportingGraph { get; set; } = true;

public float MIN_SCORE = 0.5f;

string modelDir = "ssd_mobilenet_v1_coco_2018_01_28";
string imageDir = "images";
string pbFile = "frozen_inference_graph.pb";
string labelFile = "mscoco_label_map.pbtxt";
string picFile = "input.jpg";

NDArray imgArr;

public bool Run()
{
PrepareData();

// read in the input image
imgArr = ReadTensorFromImageFile(Path.Join(imageDir, "input.jpg"));

var graph = IsImportingGraph ? ImportGraph() : BuildGraph();

using (var sess = tf.Session(graph))
Predict(sess);

return true;
}

public Graph ImportGraph()
{
var graph = new Graph().as_default();
graph.Import(Path.Join(modelDir, pbFile));

return graph;
}

public void Predict(Session sess)
{
var graph = tf.get_default_graph();

Tensor tensorNum = graph.OperationByName("num_detections");
Tensor tensorBoxes = graph.OperationByName("detection_boxes");
Tensor tensorScores = graph.OperationByName("detection_scores");
Tensor tensorClasses = graph.OperationByName("detection_classes");
Tensor imgTensor = graph.OperationByName("image_tensor");
Tensor[] outTensorArr = new Tensor[] { tensorNum, tensorBoxes, tensorScores, tensorClasses };

var results = sess.run(outTensorArr, new FeedItem(imgTensor, imgArr));

buildOutputImage(results);
}

public void PrepareData()
{
// get model file
string url = "http://download.tensorflow.org/models/object_detection/ssd_mobilenet_v1_coco_2018_01_28.tar.gz";
Web.Download(url, modelDir, "ssd_mobilenet_v1_coco.tar.gz");

Compress.ExtractTGZ(Path.Join(modelDir, "ssd_mobilenet_v1_coco.tar.gz"), "./");

// download sample picture
url = $"https://github.com/tensorflow/models/raw/master/research/object_detection/test_images/image2.jpg";
Web.Download(url, imageDir, "input.jpg");

// download the pbtxt file
url = $"https://raw.githubusercontent.com/tensorflow/models/master/research/object_detection/data/mscoco_label_map.pbtxt";
Web.Download(url, modelDir, "mscoco_label_map.pbtxt");
}

private NDArray ReadTensorFromImageFile(string file_name)
{
var graph = tf.Graph().as_default();

var file_reader = tf.read_file(file_name, "file_reader");
var decodeJpeg = tf.image.decode_jpeg(file_reader, channels: 3, name: "DecodeJpeg");
var casted = tf.cast(decodeJpeg, TF_DataType.TF_UINT8);
var dims_expander = tf.expand_dims(casted, 0);

using (var sess = tf.Session(graph))
return sess.run(dims_expander);
}

private void buildOutputImage(NDArray[] resultArr)
{
// get pbtxt items
PbtxtItems pbTxtItems = PbtxtParser.ParsePbtxtFile(Path.Join(modelDir, "mscoco_label_map.pbtxt"));

// get bitmap
Bitmap bitmap = new Bitmap(Path.Join(imageDir, "input.jpg"));

var scores = resultArr[2].AsIterator<float>();
var boxes = resultArr[1].GetData<float>();
var id = np.squeeze(resultArr[3]).GetData<float>();
for (int i=0; i< scores.size; i++)
{
float score = scores.MoveNext();
if (score > MIN_SCORE)
{
float top = boxes[i * 4] * bitmap.Height;
float left = boxes[i * 4 + 1] * bitmap.Width;
float bottom = boxes[i * 4 + 2] * bitmap.Height;
float right = boxes[i * 4 + 3] * bitmap.Width;

Rectangle rect = new Rectangle()
{
X = (int)left,
Y = (int)top,
Width = (int)(right - left),
Height = (int)(bottom - top)
};

string name = pbTxtItems.items.Where(w => w.id == id[i]).Select(s=>s.display_name).FirstOrDefault();

drawObjectOnBitmap(bitmap, rect, score, name);
}
}

string path = Path.Join(imageDir, "output.jpg");
bitmap.Save(path);
Console.WriteLine($"Processed image is saved as {path}");
}

private void drawObjectOnBitmap(Bitmap bmp, Rectangle rect, float score, string name)
{
using (Graphics graphic = Graphics.FromImage(bmp))
{
graphic.SmoothingMode = SmoothingMode.AntiAlias;
using (Pen pen = new Pen(Color.Red, 2))
{
graphic.DrawRectangle(pen, rect);

Point p = new Point(rect.Right + 5, rect.Top + 5);
string text = string.Format("{0}:{1}%", name, (int)(score * 100));
graphic.DrawString(text, new Font("Verdana", 8), Brushes.Red, p);
}
}
}

public Graph BuildGraph() => throw new NotImplementedException();
public void Train(Session sess) => throw new NotImplementedException();
public void Test(Session sess) => throw new NotImplementedException();
}
}

+ 0
- 88
test/TensorFlowNET.Examples/ImageProcessing/ObjectDetection/Main.cs View File

@@ -1,88 +0,0 @@
using System;
using System.Collections.Generic;
using System.IO;
using System.Text;
using Tensorflow;
using Tensorflow.Contrib.Train;
using Tensorflow.Estimators;
using Tensorflow.Models.ObjectDetection;
using static Tensorflow.Binding;

namespace TensorFlowNET.Examples.ImageProcessing.ObjectDetection
{
public class Main : IExample
{
public bool Enabled { get; set; } = true;
public bool IsImportingGraph { get; set; } = true;

public string Name => "Object Detection API";

ModelLib model_lib = new ModelLib();

string model_dir = "D:/Projects/PythonLab/tf-models/research/object_detection/models/model";
string pipeline_config_path = "ObjectDetection/Models/faster_rcnn_resnet101_voc07.config";
int num_train_steps = 50;
int sample_1_of_n_eval_examples = 1;
int sample_1_of_n_eval_on_train_examples = 5;

public bool Run()
{
var config = tf.estimator.RunConfig(model_dir: model_dir);

var train_and_eval_dict = model_lib.create_estimator_and_inputs(run_config: config,
hparams: new HParams(true),
pipeline_config_path: pipeline_config_path,
train_steps: num_train_steps,
sample_1_of_n_eval_examples: sample_1_of_n_eval_examples,
sample_1_of_n_eval_on_train_examples: sample_1_of_n_eval_on_train_examples);

var estimator = train_and_eval_dict.estimator;
var train_input_fn = train_and_eval_dict.train_input_fn;
var eval_input_fns = train_and_eval_dict.eval_input_fns;
var eval_on_train_input_fn = train_and_eval_dict.eval_on_train_input_fn;
var predict_input_fn = train_and_eval_dict.predict_input_fn;
var train_steps = train_and_eval_dict.train_steps;

var (train_spec, eval_specs) = model_lib.create_train_and_eval_specs(train_input_fn,
eval_input_fns,
eval_on_train_input_fn,
predict_input_fn,
train_steps,
eval_on_train_data: false);

// Currently only a single Eval Spec is allowed.
tf.estimator.train_and_evaluate(estimator, train_spec, eval_specs[0]);

return true;
}

public Graph BuildGraph()
{
throw new NotImplementedException();
}

public Graph ImportGraph()
{
throw new NotImplementedException();
}

public void Predict(Session sess)
{
throw new NotImplementedException();
}

public void PrepareData()
{
}

public void Train(Session sess)
{
throw new NotImplementedException();
}

void IExample.Test(Session sess)
{
throw new NotImplementedException();
}
}
}

+ 0
- 791
test/TensorFlowNET.Examples/ImageProcessing/RetrainImageClassifier.cs View File

@@ -1,791 +0,0 @@
/*****************************************************************************
Copyright 2018 The TensorFlow.NET Authors. All Rights Reserved.

Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at

http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
******************************************************************************/

using Google.Protobuf;
using NumSharp;
using System;
using System.Collections.Generic;
using System.Diagnostics;
using System.IO;
using System.Linq;
using System.Threading.Tasks;
using Tensorflow;
using TensorFlowNET.Examples.Utility;
using static Tensorflow.Binding;

namespace TensorFlowNET.Examples
{
/// <summary>
/// In this tutorial, we will reuse the feature extraction capabilities from powerful image classifiers trained on ImageNet
/// and simply train a new classification layer on top. Transfer learning is a technique that shortcuts much of this
/// by taking a piece of a model that has already been trained on a related task and reusing it in a new model.
///
/// https://www.tensorflow.org/hub/tutorials/image_retraining
/// </summary>
public class RetrainImageClassifier : IExample
{
public int Priority => 16;

public bool Enabled { get; set; } = true;
public bool IsImportingGraph { get; set; } = true;

public string Name => "Retrain Image Classifier";

const string data_dir = "retrain_images";
string summaries_dir = Path.Join(data_dir, "retrain_logs");
string image_dir = Path.Join(data_dir, "flower_photos");
string bottleneck_dir = Path.Join(data_dir, "bottleneck");
string output_graph = Path.Join(data_dir, "output_graph.pb");
string output_labels = Path.Join(data_dir, "output_labels.txt");
// The location where variable checkpoints will be stored.
string CHECKPOINT_NAME = Path.Join(data_dir, "_retrain_checkpoint");
string tfhub_module = "https://tfhub.dev/google/imagenet/inception_v3/feature_vector/3";
string input_tensor_name = "Placeholder";
string final_tensor_name = "Score";
float testing_percentage = 0.1f;
float validation_percentage = 0.1f;
float learning_rate = 0.01f;
Tensor resized_image_tensor;
Dictionary<string, Dictionary<string, string[]>> image_lists;
int how_many_training_steps = 100;
int eval_step_interval = 10;
int train_batch_size = 100;
int test_batch_size = -1;
int validation_batch_size = 100;
int intermediate_store_frequency = 0;
int class_count = 0;
const int MAX_NUM_IMAGES_PER_CLASS = 134217727;
Operation train_step;
Tensor final_tensor;
Tensor bottleneck_input;
Tensor cross_entropy;
Tensor ground_truth_input;
Tensor bottleneck_tensor;
bool wants_quantization;
float test_accuracy;
NDArray predictions;

public bool Run()
{
PrepareData();

#region For debug purpose

// predict images
// Predict(null);

// load saved pb and test new images.
// Test(null);

#endregion

var graph = IsImportingGraph ? ImportGraph() : BuildGraph();

using (var sess = tf.Session(graph))
{
Train(sess);
}

return test_accuracy > 0.75f;
}

/// <summary>
/// Runs a final evaluation on an eval graph using the test data set.
/// </summary>
/// <param name="train_session"></param>
/// <param name="module_spec"></param>
/// <param name="class_count"></param>
/// <param name="image_lists"></param>
/// <param name="jpeg_data_tensor"></param>
/// <param name="decoded_image_tensor"></param>
/// <param name="resized_image_tensor"></param>
/// <param name="bottleneck_tensor"></param>
private (float, NDArray) run_final_eval(Session train_session, object module_spec, int class_count,
Dictionary<string, Dictionary<string, string[]>> image_lists,
Tensor jpeg_data_tensor, Tensor decoded_image_tensor,
Tensor resized_image_tensor, Tensor bottleneck_tensor)
{
var (test_bottlenecks, test_ground_truth, test_filenames) = get_random_cached_bottlenecks(train_session, image_lists,
test_batch_size, "testing", bottleneck_dir, image_dir, jpeg_data_tensor,
decoded_image_tensor, resized_image_tensor, bottleneck_tensor, tfhub_module);

var (eval_session, _, bottleneck_input, ground_truth_input, evaluation_step,
prediction) = build_eval_session(class_count);

(float accuracy, NDArray prediction1) = eval_session.run((evaluation_step, prediction),
(bottleneck_input, test_bottlenecks),
(ground_truth_input, test_ground_truth));

print($"final test accuracy: {(accuracy * 100).ToString("G4")}% (N={len(test_bottlenecks)})");

return (accuracy, prediction1);
}

private (Session, Tensor, Tensor, Tensor, Tensor, Tensor)
build_eval_session(int class_count)
{
// If quantized, we need to create the correct eval graph for exporting.
var (eval_graph, bottleneck_tensor, resized_input_tensor, wants_quantization) = create_module_graph();
var eval_sess = tf.Session(graph: eval_graph);
Tensor evaluation_step = null;
Tensor prediction = null;

var graph = eval_graph.as_default();
// Add the new layer for exporting.
var (_, _, bottleneck_input, ground_truth_input, final_tensor) =
add_final_retrain_ops(class_count, final_tensor_name, bottleneck_tensor,
wants_quantization, is_training: false);

// Now we need to restore the values from the training graph to the eval
// graph.
tf.train.Saver().restore(eval_sess, CHECKPOINT_NAME);

(evaluation_step, prediction) = add_evaluation_step(final_tensor,
ground_truth_input);

return (eval_sess, resized_input_tensor, bottleneck_input, ground_truth_input,
evaluation_step, prediction);
}

/// <summary>
/// Adds a new softmax and fully-connected layer for training and eval.
///
/// We need to retrain the top layer to identify our new classes, so this function
/// adds the right operations to the graph, along with some variables to hold the
/// weights, and then sets up all the gradients for the backward pass.
///
/// The set up for the softmax and fully-connected layers is based on:
/// https://www.tensorflow.org/tutorials/mnist/beginners/index.html
/// </summary>
/// <param name="class_count"></param>
/// <param name="final_tensor_name"></param>
/// <param name="bottleneck_tensor"></param>
/// <param name="quantize_layer"></param>
/// <param name="is_training"></param>
/// <returns></returns>
private (Operation, Tensor, Tensor, Tensor, Tensor) add_final_retrain_ops(int class_count, string final_tensor_name,
Tensor bottleneck_tensor, bool quantize_layer, bool is_training)
{
var (batch_size, bottleneck_tensor_size) = (bottleneck_tensor.TensorShape.dims[0], bottleneck_tensor.TensorShape.dims[1]);
tf_with(tf.name_scope("input"), scope =>
{
bottleneck_input = tf.placeholder_with_default(
bottleneck_tensor,
shape: bottleneck_tensor.TensorShape.dims,
name: "BottleneckInputPlaceholder");

ground_truth_input = tf.placeholder(tf.int64, new TensorShape(batch_size), name: "GroundTruthInput");
});

// Organizing the following ops so they are easier to see in TensorBoard.
string layer_name = "final_retrain_ops";
Tensor logits = null;
tf_with(tf.name_scope(layer_name), scope =>
{
RefVariable layer_weights = null;
tf_with(tf.name_scope("weights"), delegate
{
var initial_value = tf.truncated_normal(new int[] { bottleneck_tensor_size, class_count }, stddev: 0.001f);
layer_weights = tf.Variable(initial_value, name: "final_weights");
variable_summaries(layer_weights);
});

RefVariable layer_biases = null;
tf_with(tf.name_scope("biases"), delegate
{
layer_biases = tf.Variable(tf.zeros(new TensorShape(class_count)), name: "final_biases");
variable_summaries(layer_biases);
});

tf_with(tf.name_scope("Wx_plus_b"), delegate
{
logits = tf.matmul(bottleneck_input, layer_weights) + layer_biases;
tf.summary.histogram("pre_activations", logits);
});
});

final_tensor = tf.nn.softmax(logits, name: final_tensor_name);

// The tf.contrib.quantize functions rewrite the graph in place for
// quantization. The imported model graph has already been rewritten, so upon
// calling these rewrites, only the newly added final layer will be
// transformed.
if (quantize_layer)
{
throw new NotImplementedException("quantize_layer");
/*if (is_training)
tf.contrib.quantize.create_training_graph();
else
tf.contrib.quantize.create_eval_graph();*/
}

tf.summary.histogram("activations", final_tensor);

// If this is an eval graph, we don't need to add loss ops or an optimizer.
if (!is_training)
return (null, null, bottleneck_input, ground_truth_input, final_tensor);

Tensor cross_entropy_mean = null;
tf_with(tf.name_scope("cross_entropy"), delegate
{
cross_entropy_mean = tf.losses.sparse_softmax_cross_entropy(
labels: ground_truth_input, logits: logits);
});

tf.summary.scalar("cross_entropy", cross_entropy_mean);

tf_with(tf.name_scope("train"), delegate
{
var optimizer = tf.train.GradientDescentOptimizer(learning_rate);
train_step = optimizer.minimize(cross_entropy_mean);
});

return (train_step, cross_entropy_mean, bottleneck_input, ground_truth_input,
final_tensor);
}

private void variable_summaries(RefVariable var)
{
tf_with(tf.name_scope("summaries"), delegate
{
var mean = tf.reduce_mean(var);
tf.summary.scalar("mean", mean);
Tensor stddev = null;
tf_with(tf.name_scope("stddev"), delegate
{
stddev = tf.sqrt(tf.reduce_mean(tf.square(var - mean)));
});
tf.summary.scalar("stddev", stddev);
tf.summary.scalar("max", tf.reduce_max(var));
tf.summary.scalar("min", tf.reduce_min(var));
tf.summary.histogram("histogram", var);
});
}

private (Graph, Tensor, Tensor, bool) create_module_graph()
{
var (height, width) = (299, 299);
var graph = tf.Graph().as_default();
tf.train.import_meta_graph("graph/InceptionV3.meta");
Tensor resized_input_tensor = graph.OperationByName(input_tensor_name); //tf.placeholder(tf.float32, new TensorShape(-1, height, width, 3));
// var m = hub.Module(module_spec);
Tensor bottleneck_tensor = graph.OperationByName("module_apply_default/hub_output/feature_vector/SpatialSqueeze");// m(resized_input_tensor);
var wants_quantization = false;
return (graph, bottleneck_tensor, resized_input_tensor, wants_quantization);
}

private (NDArray, long[], string[]) get_random_cached_bottlenecks(Session sess, Dictionary<string, Dictionary<string, string[]>> image_lists,
int how_many, string category, string bottleneck_dir, string image_dir,
Tensor jpeg_data_tensor, Tensor decoded_image_tensor, Tensor resized_input_tensor,
Tensor bottleneck_tensor, string module_name)
{
var bottlenecks = new List<float[]>();
var ground_truths = new List<long>();
var filenames = new List<string>();
class_count = image_lists.Keys.Count;
if (how_many >= 0)
{
// Retrieve a random sample of bottlenecks.
foreach (var unused_i in range(how_many))
{
int label_index = new Random().Next(class_count);
string label_name = image_lists.Keys.ToArray()[label_index];
int image_index = new Random().Next(MAX_NUM_IMAGES_PER_CLASS);
string image_name = get_image_path(image_lists, label_name, image_index,
image_dir, category);
var bottleneck = get_or_create_bottleneck(
sess, image_lists, label_name, image_index, image_dir, category,
bottleneck_dir, jpeg_data_tensor, decoded_image_tensor,
resized_input_tensor, bottleneck_tensor, module_name);
bottlenecks.Add(bottleneck);
ground_truths.Add(label_index);
filenames.Add(image_name);
}
}
else
{
// Retrieve all bottlenecks.
foreach (var (label_index, label_name) in enumerate(image_lists.Keys.ToArray()))
{
foreach (var (image_index, image_name) in enumerate(image_lists[label_name][category]))
{
var bottleneck = get_or_create_bottleneck(
sess, image_lists, label_name, image_index, image_dir, category,
bottleneck_dir, jpeg_data_tensor, decoded_image_tensor,
resized_input_tensor, bottleneck_tensor, module_name);

bottlenecks.Add(bottleneck);
ground_truths.Add(label_index);
filenames.Add(image_name);
}
}
}

return (bottlenecks.ToArray(), ground_truths.ToArray(), filenames.ToArray());
}

/// <summary>
/// Inserts the operations we need to evaluate the accuracy of our results.
/// </summary>
/// <param name="result_tensor"></param>
/// <param name="ground_truth_tensor"></param>
/// <returns></returns>
private (Tensor, Tensor) add_evaluation_step(Tensor result_tensor, Tensor ground_truth_tensor)
{
Tensor evaluation_step = null, correct_prediction = null, prediction = null;

tf_with(tf.name_scope("accuracy"), scope =>
{
tf_with(tf.name_scope("correct_prediction"), delegate
{
prediction = tf.argmax(result_tensor, 1);
correct_prediction = tf.equal(prediction, ground_truth_tensor);
});

tf_with(tf.name_scope("accuracy"), delegate
{
evaluation_step = tf.reduce_mean(tf.cast(correct_prediction, tf.float32));
});
});

tf.summary.scalar("accuracy", evaluation_step);
return (evaluation_step, prediction);
}

/// <summary>
/// Ensures all the training, testing, and validation bottlenecks are cached.
/// </summary>
/// <param name="sess"></param>
/// <param name="image_lists"></param>
/// <param name="image_dir"></param>
/// <param name="bottleneck_dir"></param>
/// <param name="jpeg_data_tensor"></param>
/// <param name="decoded_image_tensor"></param>
/// <param name="resized_image_tensor"></param>
/// <param name="bottleneck_tensor"></param>
/// <param name="tfhub_module"></param>
private void cache_bottlenecks(Session sess, Dictionary<string, Dictionary<string, string[]>> image_lists,
string image_dir, string bottleneck_dir, Tensor jpeg_data_tensor, Tensor decoded_image_tensor,
Tensor resized_input_tensor, Tensor bottleneck_tensor, string module_name)
{
int how_many_bottlenecks = 0;
var kvs = image_lists.ToArray();
var categories = new string[] {"training", "testing", "validation"};
Parallel.For(0, kvs.Length, i =>
{
var (label_name, label_lists) = kvs[i];

Parallel.For(0, categories.Length, j =>
{
var category = categories[j];
var category_list = label_lists[category];
foreach (var (index, unused_base_name) in enumerate(category_list))
{
get_or_create_bottleneck(sess, image_lists, label_name, index, image_dir, category,
bottleneck_dir, jpeg_data_tensor, decoded_image_tensor,
resized_input_tensor, bottleneck_tensor, module_name);
how_many_bottlenecks++;
if (how_many_bottlenecks % 300 == 0)
print($"{how_many_bottlenecks} bottleneck files created.");
}
});
});
}

private float[] get_or_create_bottleneck(Session sess, Dictionary<string, Dictionary<string, string[]>> image_lists,
string label_name, int index, string image_dir, string category, string bottleneck_dir,
Tensor jpeg_data_tensor, Tensor decoded_image_tensor, Tensor resized_input_tensor,
Tensor bottleneck_tensor, string module_name)
{
var label_lists = image_lists[label_name];
var sub_dir_path = Path.Join(bottleneck_dir, label_name);
Directory.CreateDirectory(sub_dir_path);
string bottleneck_path = get_bottleneck_path(image_lists, label_name, index,
bottleneck_dir, category, module_name);

if (!File.Exists(bottleneck_path))
create_bottleneck_file(bottleneck_path, image_lists, label_name, index,
image_dir, category, sess, jpeg_data_tensor,
decoded_image_tensor, resized_input_tensor,
bottleneck_tensor);
var bottleneck_string = File.ReadAllText(bottleneck_path);
var bottleneck_values = Array.ConvertAll(bottleneck_string.Split(','), x => float.Parse(x));
return bottleneck_values;
}

private void create_bottleneck_file(string bottleneck_path, Dictionary<string, Dictionary<string, string[]>> image_lists,
string label_name, int index, string image_dir, string category, Session sess,
Tensor jpeg_data_tensor, Tensor decoded_image_tensor, Tensor resized_input_tensor, Tensor bottleneck_tensor)
{
// Create a single bottleneck file.
print("Creating bottleneck at " + bottleneck_path);
var image_path = get_image_path(image_lists, label_name, index, image_dir, category);
if (!File.Exists(image_path))
print($"File does not exist {image_path}");

var image_data = File.ReadAllBytes(image_path);
var bottleneck_values = run_bottleneck_on_image(
sess, image_data, jpeg_data_tensor, decoded_image_tensor,
resized_input_tensor, bottleneck_tensor);
var values = bottleneck_values.Data<float>();
var bottleneck_string = string.Join(",", values);
File.WriteAllText(bottleneck_path, bottleneck_string);
}

/// <summary>
/// Runs inference on an image to extract the 'bottleneck' summary layer.
/// </summary>
/// <param name="sess">Current active TensorFlow Session.</param>
/// <param name="image_data">Data of raw JPEG data.</param>
/// <param name="image_data_tensor">Input data layer in the graph.</param>
/// <param name="decoded_image_tensor">Output of initial image resizing and preprocessing.</param>
/// <param name="resized_input_tensor">The input node of the recognition graph.</param>
/// <param name="bottleneck_tensor">Layer before the final softmax.</param>
/// <returns></returns>
private NDArray run_bottleneck_on_image(Session sess, byte[] image_data, Tensor image_data_tensor,
Tensor decoded_image_tensor, Tensor resized_input_tensor, Tensor bottleneck_tensor)
{
// First decode the JPEG image, resize it, and rescale the pixel values.
var resized_input_values = sess.run(decoded_image_tensor, new FeedItem(image_data_tensor, new Tensor(image_data, TF_DataType.TF_STRING)));
// Then run it through the recognition network.
var bottleneck_values = sess.run(bottleneck_tensor, new FeedItem(resized_input_tensor, resized_input_values))[0];
bottleneck_values = np.squeeze(bottleneck_values);
return bottleneck_values;
}

private string get_bottleneck_path(Dictionary<string, Dictionary<string, string[]>> image_lists, string label_name, int index,
string bottleneck_dir, string category, string module_name)
{
module_name = (module_name.Replace("://", "~") // URL scheme.
.Replace('/', '~') // URL and Unix paths.
.Replace(':', '~').Replace('\\', '~')); // Windows paths.
return get_image_path(image_lists, label_name, index, bottleneck_dir,
category) + "_" + module_name + ".txt";
}

private string get_image_path(Dictionary<string, Dictionary<string, string[]>> image_lists, string label_name,
int index, string image_dir, string category)
{
if (!image_lists.ContainsKey(label_name))
print($"Label does not exist {label_name}");

var label_lists = image_lists[label_name];
if (!label_lists.ContainsKey(category))
print($"Category does not exist {category}");
var category_list = label_lists[category];
if (category_list.Length == 0)
print($"Label {label_name} has no images in the category {category}.");

var mod_index = index % len(category_list);
var base_name = category_list[mod_index].Split(Path.DirectorySeparatorChar).Last();
var sub_dir = label_name;
var full_path = Path.Join(image_dir, sub_dir, base_name);
return full_path;
}

/// <summary>
/// Saves an graph to file, creating a valid quantized one if necessary.
/// </summary>
/// <param name="graph_file_name"></param>
/// <param name="class_count"></param>
private void save_graph_to_file(string graph_file_name, int class_count)
{
var (sess, _, _, _, _, _) = build_eval_session(class_count);
var graph = sess.graph;
var output_graph_def = tf.graph_util.convert_variables_to_constants(
sess, graph.as_graph_def(), new string[] { final_tensor_name });
File.WriteAllBytes(graph_file_name, output_graph_def.ToByteArray());
}

public void PrepareData()
{
// get a set of images to teach the network about the new classes
string fileName = "flower_photos.tgz";
string url = $"http://download.tensorflow.org/example_images/{fileName}";
Web.Download(url, data_dir, fileName);
Compress.ExtractTGZ(Path.Join(data_dir, fileName), data_dir);

// download graph meta data
url = "https://raw.githubusercontent.com/SciSharp/TensorFlow.NET/master/graph/InceptionV3.meta";
Web.Download(url, "graph", "InceptionV3.meta");

// download variables.data checkpoint file.
url = "https://github.com/SciSharp/TensorFlow.NET/raw/master/data/tfhub_modules.zip";
Web.Download(url, data_dir, "tfhub_modules.zip");
Compress.UnZip(Path.Join(data_dir, "tfhub_modules.zip"), "tfhub_modules");

// Prepare necessary directories that can be used during training
Directory.CreateDirectory(summaries_dir);
Directory.CreateDirectory(bottleneck_dir);

// Look at the folder structure, and create lists of all the images.
image_lists = create_image_lists();
class_count = len(image_lists);
if (class_count == 0)
print($"No valid folders of images found at {image_dir}");
if (class_count == 1)
print("Only one valid folder of images found at " +
image_dir +
" - multiple classes are needed for classification.");
}

private (Tensor, Tensor) add_jpeg_decoding()
{
// height, width, depth
var input_dim = (299, 299, 3);
var jpeg_data = tf.placeholder(tf.@string, name: "DecodeJPGInput");
var decoded_image = tf.image.decode_jpeg(jpeg_data, channels: input_dim.Item3);
// Convert from full range of uint8 to range [0,1] of float32.
var decoded_image_as_float = tf.image.convert_image_dtype(decoded_image, tf.float32);
var decoded_image_4d = tf.expand_dims(decoded_image_as_float, 0);
var resize_shape = tf.stack(new int[] { input_dim.Item1, input_dim.Item2 });
var resize_shape_as_int = tf.cast(resize_shape, dtype: tf.int32);
var resized_image = tf.image.resize_bilinear(decoded_image_4d, resize_shape_as_int);
return (jpeg_data, resized_image);
}

/// <summary>
/// Builds a list of training images from the file system.
/// </summary>
private Dictionary<string, Dictionary<string, string[]>> create_image_lists()
{
var sub_dirs = tf.gfile.Walk(image_dir)
.Select(x => x.Item1)
.OrderBy(x => x)
.ToArray();

var result = new Dictionary<string, Dictionary<string, string[]>>();

foreach (var sub_dir in sub_dirs)
{
var dir_name = sub_dir.Split(Path.DirectorySeparatorChar).Last();
print($"Looking for images in '{dir_name}'");
var file_list = Directory.GetFiles(sub_dir);
if (len(file_list) < 20)
print($"WARNING: Folder has less than 20 images, which may cause issues.");

var label_name = dir_name.ToLower();
result[label_name] = new Dictionary<string, string[]>();
int testing_count = (int)Math.Floor(file_list.Length * testing_percentage);
int validation_count = (int)Math.Floor(file_list.Length * validation_percentage);
result[label_name]["testing"] = file_list.Take(testing_count).ToArray();
result[label_name]["validation"] = file_list.Skip(testing_count).Take(validation_count).ToArray();
result[label_name]["training"] = file_list.Skip(testing_count + validation_count).ToArray();
}

return result;
}

public Graph ImportGraph()
{
Graph graph;

// Set up the pre-trained graph.
(graph, bottleneck_tensor, resized_image_tensor, wants_quantization) =
create_module_graph();

// Add the new layer that we'll be training.
(train_step, cross_entropy, bottleneck_input,
ground_truth_input, final_tensor) = add_final_retrain_ops(
class_count, final_tensor_name, bottleneck_tensor,
wants_quantization, is_training: true);

return graph;
}

public Graph BuildGraph()
{
throw new NotImplementedException();
}

public void Train(Session sess)
{
var sw = new Stopwatch();

// Initialize all weights: for the module to their pretrained values,
// and for the newly added retraining layer to random initial values.
var init = tf.global_variables_initializer();
sess.run(init);

var (jpeg_data_tensor, decoded_image_tensor) = add_jpeg_decoding();

// We'll make sure we've calculated the 'bottleneck' image summaries and
// cached them on disk.
cache_bottlenecks(sess, image_lists, image_dir,
bottleneck_dir, jpeg_data_tensor,
decoded_image_tensor, resized_image_tensor,
bottleneck_tensor, tfhub_module);

// Create the operations we need to evaluate the accuracy of our new layer.
var (evaluation_step, _) = add_evaluation_step(final_tensor, ground_truth_input);

// Merge all the summaries and write them out to the summaries_dir
var merged = tf.summary.merge_all();
var train_writer = tf.summary.FileWriter(summaries_dir + "/train", sess.graph);
var validation_writer = tf.summary.FileWriter(summaries_dir + "/validation", sess.graph);

// Create a train saver that is used to restore values into an eval graph
// when exporting models.
var train_saver = tf.train.Saver();
train_saver.save(sess, CHECKPOINT_NAME);

sw.Restart();

for (int i = 0; i < how_many_training_steps; i++)
{
var (train_bottlenecks, train_ground_truth, _) = get_random_cached_bottlenecks(
sess, image_lists, train_batch_size, "training",
bottleneck_dir, image_dir, jpeg_data_tensor,
decoded_image_tensor, resized_image_tensor, bottleneck_tensor,
tfhub_module);

// Feed the bottlenecks and ground truth into the graph, and run a training
// step. Capture training summaries for TensorBoard with the `merged` op.
var results = sess.run(
new ITensorOrOperation[] { merged, train_step },
new FeedItem(bottleneck_input, train_bottlenecks),
new FeedItem(ground_truth_input, train_ground_truth));
var train_summary = results[0];

// TODO
// train_writer.add_summary(train_summary, i);

// Every so often, print out how well the graph is training.
bool is_last_step = (i + 1 == how_many_training_steps);
if ((i % eval_step_interval) == 0 || is_last_step)
{
(float train_accuracy, float cross_entropy_value) = sess.run((evaluation_step, cross_entropy),
(bottleneck_input, train_bottlenecks),
(ground_truth_input, train_ground_truth));
print($"{DateTime.Now}: Step {i + 1}: Train accuracy = {train_accuracy * 100}%, Cross entropy = {cross_entropy_value.ToString("G4")}");

var (validation_bottlenecks, validation_ground_truth, _) = get_random_cached_bottlenecks(
sess, image_lists, validation_batch_size, "validation",
bottleneck_dir, image_dir, jpeg_data_tensor,
decoded_image_tensor, resized_image_tensor, bottleneck_tensor,
tfhub_module);

// Run a validation step and capture training summaries for TensorBoard
// with the `merged` op.
(_, float validation_accuracy) = sess.run((merged, evaluation_step),
(bottleneck_input, validation_bottlenecks),
(ground_truth_input, validation_ground_truth));

// validation_writer.add_summary(validation_summary, i);
print($"{DateTime.Now}: Step {i + 1}: Validation accuracy = {validation_accuracy * 100}% (N={len(validation_bottlenecks)}) {sw.ElapsedMilliseconds}ms");
sw.Restart();
}

// Store intermediate results
int intermediate_frequency = intermediate_store_frequency;
if (intermediate_frequency > 0 && i % intermediate_frequency == 0 && i > 0)
{

}
}

// After training is complete, force one last save of the train checkpoint.
train_saver.save(sess, CHECKPOINT_NAME);

// We've completed all our training, so run a final test evaluation on
// some new images we haven't used before.
(test_accuracy, predictions) = run_final_eval(sess, null, class_count, image_lists,
jpeg_data_tensor, decoded_image_tensor, resized_image_tensor,
bottleneck_tensor);

// Write out the trained graph and labels with the weights stored as
// constants.
print($"Save final result to : {output_graph}");
save_graph_to_file(output_graph, class_count);
File.WriteAllText(output_labels, string.Join("\n", image_lists.Keys));
}

/// <summary>
/// Prediction
/// labels mapping, it's from output_lables.txt
/// 0 - daisy
/// 1 - dandelion
/// 2 - roses
/// 3 - sunflowers
/// 4 - tulips
/// </summary>
/// <param name="sess_"></param>
public void Predict(Session sess_)
{
if (!File.Exists(output_graph))
return;

var labels = File.ReadAllLines(output_labels);

// predict image
var img_path = Path.Join(image_dir, "daisy", "5547758_eea9edfd54_n.jpg");
var fileBytes = ReadTensorFromImageFile(img_path);

// import graph and variables
var graph = new Graph();
graph.Import(output_graph, "");

Tensor input = graph.OperationByName(input_tensor_name);
Tensor output = graph.OperationByName(final_tensor_name);

using (var sess = tf.Session(graph))
{
var result = sess.run(output, (input, fileBytes));
var prob = np.squeeze(result);
var idx = np.argmax(prob);
print($"Prediction result: [{labels[idx]} {prob[idx]}] for {img_path}.");
}
}

private NDArray ReadTensorFromImageFile(string file_name,
int input_height = 299,
int input_width = 299,
int input_mean = 0,
int input_std = 255)
{
var graph = tf.Graph().as_default();

var file_reader = tf.read_file(file_name, "file_reader");
var image_reader = tf.image.decode_jpeg(file_reader, channels: 3, name: "jpeg_reader");
var caster = tf.cast(image_reader, tf.float32);
var dims_expander = tf.expand_dims(caster, 0);
var resize = tf.constant(new int[] { input_height, input_width });
var bilinear = tf.image.resize_bilinear(dims_expander, resize);
var sub = tf.subtract(bilinear, new float[] { input_mean });
var normalized = tf.divide(sub, new float[] { input_std });

using (var sess = tf.Session(graph))
return sess.run(normalized);
}

public void Test(Session sess_)
{
if (!File.Exists(output_graph))
return;

var graph = new Graph();
graph.Import(output_graph);
var (jpeg_data_tensor, decoded_image_tensor) = add_jpeg_decoding();

tf_with(tf.Session(graph), sess =>
{
(test_accuracy, predictions) = run_final_eval(sess, null, class_count, image_lists,
jpeg_data_tensor, decoded_image_tensor, resized_image_tensor,
bottleneck_tensor);
});
}
}
}

+ 0
- 54
test/TensorFlowNET.Examples/ImageProcessing/YOLO/Dataset.cs View File

@@ -1,54 +0,0 @@
using NumSharp;
using System;
using System.Collections.Generic;
using System.IO;
using System.Text;
using static Tensorflow.Binding;

namespace TensorFlowNET.Examples.ImageProcessing.YOLO
{
public class Dataset
{
string annot_path;
int[] input_sizes;
int batch_size;
bool data_aug;
int[] train_input_sizes;
NDArray strides;
NDArray anchors;
Dictionary<int, string> classes;
int num_classes;
int anchor_per_scale;
int max_bbox_per_scale;
string[] annotations;
int num_samples;
int batch_count;

public int Length = 0;

public Dataset(string dataset_type, Config cfg)
{
annot_path = dataset_type == "train" ? cfg.TRAIN.ANNOT_PATH : cfg.TEST.ANNOT_PATH;
input_sizes = dataset_type == "train" ? cfg.TRAIN.INPUT_SIZE : cfg.TEST.INPUT_SIZE;
batch_size = dataset_type == "train" ? cfg.TRAIN.BATCH_SIZE : cfg.TEST.BATCH_SIZE;
data_aug = dataset_type == "train" ? cfg.TRAIN.DATA_AUG : cfg.TEST.DATA_AUG;
train_input_sizes = cfg.TRAIN.INPUT_SIZE;
strides = np.array(cfg.YOLO.STRIDES);

classes = Utils.read_class_names(cfg.YOLO.CLASSES);
num_classes = classes.Count;
anchors = np.array(Utils.get_anchors(cfg.YOLO.ANCHORS));
anchor_per_scale = cfg.YOLO.ANCHOR_PER_SCALE;
max_bbox_per_scale = 150;

annotations = load_annotations();
num_samples = len(annotations);
batch_count = 0;
}

string[] load_annotations()
{
return File.ReadAllLines(annot_path);
}
}
}

+ 0
- 243
test/TensorFlowNET.Examples/ImageProcessing/YOLO/Main.cs View File

@@ -1,243 +0,0 @@
using System;
using System.Collections.Generic;
using System.IO;
using System.Linq;
using System.Text;
using Tensorflow;
using static Tensorflow.Binding;

namespace TensorFlowNET.Examples.ImageProcessing.YOLO
{
/// <summary>
/// Implementation of YOLO v3 object detector in Tensorflow
/// https://github.com/YunYang1994/tensorflow-yolov3
/// </summary>
public class Main : IExample
{
public bool Enabled { get; set; } = true;
public bool IsImportingGraph { get; set; } = false;
public string Name => "YOLOv3";

#region args
Dictionary<int, string> classes;
int num_classes;
float learn_rate_init;
float learn_rate_end;
int first_stage_epochs;
int second_stage_epochs;
int warmup_periods;
string time;
float moving_ave_decay;
int max_bbox_per_scale;
int steps_per_period;

Dataset trainset, testset;

Config cfg;

Tensor input_data;
Tensor label_sbbox;
Tensor label_mbbox;
Tensor label_lbbox;
Tensor true_sbboxes;
Tensor true_mbboxes;
Tensor true_lbboxes;
Tensor trainable;

Session sess;
YOLOv3 model;
VariableV1[] net_var;
Tensor giou_loss, conf_loss, prob_loss;
RefVariable global_step;
Tensor learn_rate;
Tensor loss;
List<RefVariable> first_stage_trainable_var_list;
Operation train_op_with_frozen_variables;
Operation train_op_with_all_variables;
Saver loader;
Saver saver;
#endregion

public bool Run()
{
PrepareData();

var graph = IsImportingGraph ? ImportGraph() : BuildGraph();

var options = new SessionOptions();
options.SetConfig(new ConfigProto { AllowSoftPlacement = true });
using (var sess = tf.Session(graph, opts: options))
{
Train(sess);
}

return true;
}

public void Train(Session sess)
{
sess.run(tf.global_variables_initializer());
print($"=> Restoring weights from: {cfg.TRAIN.INITIAL_WEIGHT} ... ");
loader.restore(sess, cfg.TRAIN.INITIAL_WEIGHT);
}

public void Test(Session sess)
{
throw new NotImplementedException();
}

public Graph BuildGraph()
{
var graph = new Graph().as_default();

tf_with(tf.name_scope("define_input"), scope =>
{
input_data = tf.placeholder(dtype: tf.float32, name: "input_data");
label_sbbox = tf.placeholder(dtype: tf.float32, name: "label_sbbox");
label_mbbox = tf.placeholder(dtype: tf.float32, name: "label_mbbox");
label_lbbox = tf.placeholder(dtype: tf.float32, name: "label_lbbox");
true_sbboxes = tf.placeholder(dtype: tf.float32, name: "sbboxes");
true_mbboxes = tf.placeholder(dtype: tf.float32, name: "mbboxes");
true_lbboxes = tf.placeholder(dtype: tf.float32, name: "lbboxes");
trainable = tf.placeholder(dtype: tf.@bool, name: "training");
});

tf_with(tf.name_scope("define_loss"), scope =>
{
model = new YOLOv3(cfg, input_data, trainable);
net_var = tf.global_variables();
(giou_loss, conf_loss, prob_loss) = model.compute_loss(
label_sbbox, label_mbbox, label_lbbox,
true_sbboxes, true_mbboxes, true_lbboxes);
loss = giou_loss + conf_loss + prob_loss;
});

Tensor global_step_update = null;
tf_with(tf.name_scope("learn_rate"), scope =>
{
global_step = tf.Variable(1.0, dtype: tf.float64, trainable: false, name: "global_step");
var warmup_steps = tf.constant(warmup_periods * steps_per_period,
dtype: tf.float64, name: "warmup_steps");
var train_steps = tf.constant((first_stage_epochs + second_stage_epochs) * steps_per_period,
dtype: tf.float64, name: "train_steps");

learn_rate = tf.cond(
pred: global_step < warmup_steps,
true_fn: delegate
{
return global_step / warmup_steps * learn_rate_init;
},
false_fn: delegate
{
return learn_rate_end + 0.5 * (learn_rate_init - learn_rate_end) *
(1 + tf.cos(
(global_step - warmup_steps) / (train_steps - warmup_steps) * Math.PI));
}
);

global_step_update = tf.assign_add(global_step, 1.0f);
});

Operation moving_ave = null;
tf_with(tf.name_scope("define_weight_decay"), scope =>
{
var emv = tf.train.ExponentialMovingAverage(moving_ave_decay);
var vars = tf.trainable_variables().Select(x => (RefVariable)x).ToArray();
moving_ave = emv.apply(vars);
});

tf_with(tf.name_scope("define_first_stage_train"), scope =>
{
first_stage_trainable_var_list = new List<RefVariable>();
foreach (var var in tf.trainable_variables())
{
var var_name = var.op.name;
var var_name_mess = var_name.Split('/');
if (new[] { "conv_sbbox", "conv_mbbox", "conv_lbbox" }.Contains(var_name_mess[0]))
first_stage_trainable_var_list.Add(var as RefVariable);
}

var adam = tf.train.AdamOptimizer(learn_rate);
var first_stage_optimizer = adam.minimize(loss, var_list: first_stage_trainable_var_list);
tf_with(tf.control_dependencies(tf.get_collection<Operation>(tf.GraphKeys.UPDATE_OPS).ToArray()), delegate
{
tf_with(tf.control_dependencies(new ITensorOrOperation[] { first_stage_optimizer, global_step_update }), delegate
{
tf_with(tf.control_dependencies(new[] { moving_ave }), delegate
{
train_op_with_frozen_variables = tf.no_op();
});
});
});
});

tf_with(tf.name_scope("define_second_stage_train"), delegate
{
var second_stage_trainable_var_list = tf.trainable_variables().Select(x => x as RefVariable).ToList();
var adam = tf.train.AdamOptimizer(learn_rate);
var second_stage_optimizer = adam.minimize(loss, var_list: second_stage_trainable_var_list);
tf_with(tf.control_dependencies(tf.get_collection<Operation>(tf.GraphKeys.UPDATE_OPS).ToArray()), delegate
{
tf_with(tf.control_dependencies(new ITensorOrOperation[] { second_stage_optimizer, global_step_update }), delegate
{
tf_with(tf.control_dependencies(new[] { moving_ave }), delegate
{
train_op_with_all_variables = tf.no_op();
});
});
});
});

tf_with(tf.name_scope("loader_and_saver"), delegate
{
loader = tf.train.Saver(net_var);
saver = tf.train.Saver(tf.global_variables(), max_to_keep: 10);
});

tf_with(tf.name_scope("summary"), delegate
{
tf.summary.scalar("learn_rate", learn_rate);
tf.summary.scalar("giou_loss", giou_loss);
tf.summary.scalar("conf_loss", conf_loss);
tf.summary.scalar("prob_loss", prob_loss);
tf.summary.scalar("total_loss", loss);
});

return graph;
}

public Graph ImportGraph()
{
throw new NotImplementedException();
}

public void Predict(Session sess)
{
throw new NotImplementedException();
}

public void PrepareData()
{
cfg = new Config(Name);

string dataDir = Path.Combine(Name, "data");
Directory.CreateDirectory(dataDir);

classes = Utils.read_class_names(cfg.YOLO.CLASSES);
num_classes = classes.Count;

learn_rate_init = cfg.TRAIN.LEARN_RATE_INIT;
learn_rate_end = cfg.TRAIN.LEARN_RATE_END;
first_stage_epochs = cfg.TRAIN.FISRT_STAGE_EPOCHS;
second_stage_epochs = cfg.TRAIN.SECOND_STAGE_EPOCHS;
warmup_periods = cfg.TRAIN.WARMUP_EPOCHS;
DateTime now = DateTime.Now;
time = $"{now.Year}-{now.Month}-{now.Day}-{now.Hour}-{now.Minute}-{now.Minute}";
moving_ave_decay = cfg.YOLO.MOVING_AVE_DECAY;
max_bbox_per_scale = 150;
trainset = new Dataset("train", cfg);
testset = new Dataset("test", cfg);
steps_per_period = trainset.Length;
}
}
}

+ 0
- 27
test/TensorFlowNET.Examples/ImageProcessing/YOLO/Utils.cs View File

@@ -1,27 +0,0 @@
using NumSharp;
using System;
using System.Collections.Generic;
using System.IO;
using System.Linq;
using System.Text;

namespace TensorFlowNET.Examples.ImageProcessing.YOLO
{
class Utils
{
public static Dictionary<int, string> read_class_names(string file)
{
var classes = new Dictionary<int, string>();
foreach (var line in File.ReadAllLines(file))
classes[classes.Count] = line;
return classes;
}

public static NDArray get_anchors(string file)
{
return np.array(File.ReadAllText(file).Split(',')
.Select(x => float.Parse(x))
.ToArray()).reshape(3, 3, 2);
}
}
}

+ 0
- 291
test/TensorFlowNET.Examples/ImageProcessing/YOLO/YOLOv3.cs View File

@@ -1,291 +0,0 @@
using NumSharp;
using System;
using System.Collections.Generic;
using System.Text;
using Tensorflow;
using static Tensorflow.Binding;

namespace TensorFlowNET.Examples.ImageProcessing.YOLO
{
public class YOLOv3
{
Config cfg;
Tensor trainable;
Tensor input_data;
Dictionary<int, string> classes;
int num_class;
NDArray strides;
NDArray anchors;
int anchor_per_scale;
float iou_loss_thresh;
string upsample_method;
Tensor conv_lbbox;
Tensor conv_mbbox;
Tensor conv_sbbox;
Tensor pred_sbbox;
Tensor pred_mbbox;
Tensor pred_lbbox;

public YOLOv3(Config cfg_, Tensor input_data_, Tensor trainable_)
{
cfg = cfg_;
input_data = input_data_;
trainable = trainable_;
classes = Utils.read_class_names(cfg.YOLO.CLASSES);
num_class = len(classes);
strides = np.array(cfg.YOLO.STRIDES);
anchors = Utils.get_anchors(cfg.YOLO.ANCHORS);
anchor_per_scale = cfg.YOLO.ANCHOR_PER_SCALE;
iou_loss_thresh = cfg.YOLO.IOU_LOSS_THRESH;
upsample_method = cfg.YOLO.UPSAMPLE_METHOD;

(conv_lbbox, conv_mbbox, conv_sbbox) = __build_nework(input_data);

tf_with(tf.variable_scope("pred_sbbox"), scope =>
{
pred_sbbox = decode(conv_sbbox, anchors[0], strides[0]);
});

tf_with(tf.variable_scope("pred_mbbox"), scope =>
{
pred_mbbox = decode(conv_mbbox, anchors[1], strides[1]);
});

tf_with(tf.variable_scope("pred_lbbox"), scope =>
{
pred_lbbox = decode(conv_lbbox, anchors[2], strides[2]);
});
}

private (Tensor, Tensor, Tensor) __build_nework(Tensor input_data)
{
Tensor route_1, route_2;
(route_1, route_2, input_data) = backbone.darknet53(input_data, trainable);
input_data = common.convolutional(input_data, new[] { 1, 1, 1024, 512 }, trainable, "conv52");
input_data = common.convolutional(input_data, new[] { 3, 3, 512, 1024 }, trainable, "conv53");
input_data = common.convolutional(input_data, new[] { 1, 1, 1024, 512 }, trainable, "conv54");
input_data = common.convolutional(input_data, new[] { 3, 3, 512, 1024 }, trainable, "conv55");
input_data = common.convolutional(input_data, new[] { 1, 1, 1024, 512 }, trainable, "conv56");

var conv_lobj_branch = common.convolutional(input_data, new[] { 3, 3, 512, 1024 }, trainable, name: "conv_lobj_branch");
var conv_lbbox = common.convolutional(conv_lobj_branch, new[] { 1, 1, 1024, 3 * (num_class + 5) },
trainable: trainable, name: "conv_lbbox", activate: false, bn: false);

input_data = common.convolutional(input_data, new[] { 1, 1, 512, 256 }, trainable, "conv57");
input_data = common.upsample(input_data, name: "upsample0", method: upsample_method);

tf_with(tf.variable_scope("route_1"), delegate
{
input_data = tf.concat(new[] { input_data, route_2 }, axis: -1);
});

input_data = common.convolutional(input_data, new[] { 1, 1, 768, 256 }, trainable, "conv58");
input_data = common.convolutional(input_data, new[] { 3, 3, 256, 512 }, trainable, "conv59");
input_data = common.convolutional(input_data, new[] { 1, 1, 512, 256 }, trainable, "conv60");
input_data = common.convolutional(input_data, new[] { 3, 3, 256, 512 }, trainable, "conv61");
input_data = common.convolutional(input_data, new[] { 1, 1, 512, 256 }, trainable, "conv62");

var conv_mobj_branch = common.convolutional(input_data, new[] { 3, 3, 256, 512 }, trainable, name: "conv_mobj_branch");
conv_mbbox = common.convolutional(conv_mobj_branch, new[] { 1, 1, 512, 3 * (num_class + 5) },
trainable: trainable, name: "conv_mbbox", activate: false, bn: false);

input_data = common.convolutional(input_data, new[] { 1, 1, 256, 128 }, trainable, "conv63");
input_data = common.upsample(input_data, name: "upsample1", method: upsample_method);

tf_with(tf.variable_scope("route_2"), delegate
{
input_data = tf.concat(new[] { input_data, route_1 }, axis: -1);
});

input_data = common.convolutional(input_data, new[] { 1, 1, 384, 128 }, trainable, "conv64");
input_data = common.convolutional(input_data, new[] { 3, 3, 128, 256 }, trainable, "conv65");
input_data = common.convolutional(input_data, new[] { 1, 1, 256, 128 }, trainable, "conv66");
input_data = common.convolutional(input_data, new[] { 3, 3, 128, 256 }, trainable, "conv67");
input_data = common.convolutional(input_data, new[] { 1, 1, 256, 128 }, trainable, "conv68");

var conv_sobj_branch = common.convolutional(input_data, new[] { 3, 3, 128, 256 }, trainable, name: "conv_sobj_branch");
conv_sbbox = common.convolutional(conv_sobj_branch, new[] { 1, 1, 256, 3 * (num_class + 5) },
trainable: trainable, name: "conv_sbbox", activate: false, bn: false);
return (conv_lbbox, conv_mbbox, conv_sbbox);
}

private Tensor decode(Tensor conv_output, NDArray anchors, int stride)
{
var conv_shape = tf.shape(conv_output);
var batch_size = conv_shape[0];
var output_size = conv_shape[1];
anchor_per_scale = len(anchors);

conv_output = tf.reshape(conv_output, new object[] { batch_size, output_size, output_size, anchor_per_scale, 5 + num_class });

var conv_raw_dxdy = conv_output[":", ":", ":", ":", "0:2"];
var conv_raw_dwdh = conv_output[":", ":", ":", ":", "2:4"];
var conv_raw_conf = conv_output[":", ":", ":", ":", "4:5"];
var conv_raw_prob = conv_output[":", ":", ":", ":", "5:"];

var y = tf.tile(tf.range(output_size, dtype: tf.int32)[":", tf.newaxis], new object[] { 1, output_size });
var x = tf.tile(tf.range(output_size, dtype: tf.int32)[tf.newaxis, ":"], new object[] { output_size, 1 });

var xy_grid = tf.concat(new[] { x[":", ":", tf.newaxis], y[":", ":", tf.newaxis] }, axis: -1);
xy_grid = tf.tile(xy_grid[tf.newaxis, ":", ":", tf.newaxis, ":"], new object[] { batch_size, 1, 1, anchor_per_scale, 1 });
xy_grid = tf.cast(xy_grid, tf.float32);

var pred_xy = (tf.sigmoid(conv_raw_dxdy) + xy_grid) * stride;
var pred_wh = (tf.exp(conv_raw_dwdh) * anchors) * stride;
var pred_xywh = tf.concat(new[] { pred_xy, pred_wh }, axis: -1);

var pred_conf = tf.sigmoid(conv_raw_conf);
var pred_prob = tf.sigmoid(conv_raw_prob);

return tf.concat(new[] { pred_xywh, pred_conf, pred_prob }, axis: -1);
}

public (Tensor, Tensor, Tensor) compute_loss(Tensor label_sbbox, Tensor label_mbbox, Tensor label_lbbox,
Tensor true_sbbox, Tensor true_mbbox, Tensor true_lbbox)
{
Tensor giou_loss = null, conf_loss = null, prob_loss = null;
(Tensor, Tensor, Tensor) loss_sbbox = (null, null, null);
(Tensor, Tensor, Tensor) loss_mbbox = (null, null, null);
(Tensor, Tensor, Tensor) loss_lbbox = (null, null, null);

tf_with(tf.name_scope("smaller_box_loss"), delegate
{
loss_sbbox = loss_layer(conv_sbbox, pred_sbbox, label_sbbox, true_sbbox,
anchors: anchors[0], stride: strides[0]);
});

tf_with(tf.name_scope("medium_box_loss"), delegate
{
loss_mbbox = loss_layer(conv_mbbox, pred_mbbox, label_mbbox, true_mbbox,
anchors: anchors[1], stride: strides[1]);
});

tf_with(tf.name_scope("bigger_box_loss"), delegate
{
loss_lbbox = loss_layer(conv_lbbox, pred_lbbox, label_lbbox, true_lbbox,
anchors: anchors[2], stride: strides[2]);
});

tf_with(tf.name_scope("giou_loss"), delegate
{
giou_loss = loss_sbbox.Item1 + loss_mbbox.Item1 + loss_lbbox.Item1;
});

tf_with(tf.name_scope("conf_loss"), delegate
{
conf_loss = loss_sbbox.Item2 + loss_mbbox.Item2 + loss_lbbox.Item2;
});

tf_with(tf.name_scope("prob_loss"), delegate
{
prob_loss = loss_sbbox.Item3 + loss_mbbox.Item3 + loss_lbbox.Item3;
});

return (giou_loss, conf_loss, prob_loss);
}

public (Tensor, Tensor, Tensor) loss_layer(Tensor conv, Tensor pred, Tensor label, Tensor bboxes, NDArray anchors, int stride)
{
var conv_shape = tf.shape(conv);
var batch_size = conv_shape[0];
var output_size = conv_shape[1];
var input_size = stride * output_size;
conv = tf.reshape(conv, new object[] {batch_size, output_size, output_size,
anchor_per_scale, 5 + num_class });
var conv_raw_conf = conv[":", ":", ":", ":", "4:5"];
var conv_raw_prob = conv[":", ":", ":", ":", "5:"];

var pred_xywh = pred[":", ":", ":", ":", "0:4"];
var pred_conf = pred[":", ":", ":", ":", "4:5"];

var label_xywh = label[":", ":", ":", ":", "0:4"];
var respond_bbox = label[":", ":", ":", ":", "4:5"];
var label_prob = label[":", ":", ":", ":", "5:"];

var giou = tf.expand_dims(bbox_giou(pred_xywh, label_xywh), axis: -1);
input_size = tf.cast(input_size, tf.float32);

var bbox_loss_scale = 2.0 - 1.0 * label_xywh[":", ":", ":", ":", "2:3"] * label_xywh[":", ":", ":", ":", "3:4"] / (tf.sqrt(input_size));
var giou_loss = respond_bbox * bbox_loss_scale * (1 - giou);

var iou = bbox_iou(pred_xywh[":", ":", ":", ":", tf.newaxis, ":"], bboxes[":", tf.newaxis, tf.newaxis, tf.newaxis, ":", ":"]);
var max_iou = tf.expand_dims(tf.reduce_max(iou, axis: new[] { -1 }), axis: -1);

var respond_bgd = (1.0 - respond_bbox) * tf.cast(max_iou < iou_loss_thresh, tf.float32);

var conf_focal = focal(respond_bbox, pred_conf);

var conf_loss = conf_focal * (
respond_bbox * tf.nn.sigmoid_cross_entropy_with_logits(labels: respond_bbox, logits: conv_raw_conf) +
respond_bgd * tf.nn.sigmoid_cross_entropy_with_logits(labels: respond_bbox, logits: conv_raw_conf));

var prob_loss = respond_bbox * tf.nn.sigmoid_cross_entropy_with_logits(labels: label_prob, logits: conv_raw_prob);

giou_loss = tf.reduce_mean(tf.reduce_sum(giou_loss, axis: new[] { 1, 2, 3, 4 }));
conf_loss = tf.reduce_mean(tf.reduce_sum(conf_loss, axis: new[] { 1, 2, 3, 4 }));
prob_loss = tf.reduce_mean(tf.reduce_sum(prob_loss, axis: new[] { 1, 2, 3, 4 }));

return (giou_loss, conf_loss, prob_loss);
}

public Tensor focal(Tensor target, Tensor actual, int alpha = 1, int gamma = 2)
{
var focal_loss = alpha * tf.pow(tf.abs(target - actual), gamma);
return focal_loss;
}

public Tensor bbox_giou(Tensor boxes1, Tensor boxes2)
{
boxes1 = tf.concat(new[] { boxes1["...", ":2"] - boxes1["...", "2:"] * 0.5,
boxes1["...", ":2"] + boxes1["...", "2:"] * 0.5}, axis: -1);
boxes2 = tf.concat(new[] { boxes2["...", ":2"] - boxes2["...", "2:"] * 0.5,
boxes2["...", ":2"] + boxes2["...", "2:"] * 0.5}, axis: -1);

boxes1 = tf.concat(new[] { tf.minimum(boxes1["...", ":2"], boxes1["...", "2:"]),
tf.maximum(boxes1["...", ":2"], boxes1["...", "2:"])}, axis: -1);
boxes2 = tf.concat(new[] { tf.minimum(boxes2["...", ":2"], boxes2["...", "2:"]),
tf.maximum(boxes2["...", ":2"], boxes2["...", "2:"])}, axis: -1);

var boxes1_area = (boxes1["...", "2"] - boxes1["...", "0"]) * (boxes1["...", "3"] - boxes1["...", "1"]);
var boxes2_area = (boxes2["...", "2"] - boxes2["...", "0"]) * (boxes2["...", "3"] - boxes2["...", "1"]);

var left_up = tf.maximum(boxes1["...", ":2"], boxes2["...", ":2"]);
var right_down = tf.minimum(boxes1["...", "2:"], boxes2["...", "2:"]);

var inter_section = tf.maximum(right_down - left_up, 0.0f);
var inter_area = inter_section["...", "0"] * inter_section["...", "1"];
var union_area = boxes1_area + boxes2_area - inter_area;
var iou = inter_area / union_area;

var enclose_left_up = tf.minimum(boxes1["...", ":2"], boxes2["...", ":2"]);
var enclose_right_down = tf.maximum(boxes1["...", "2:"], boxes2["...", "2:"]);
var enclose = tf.maximum(enclose_right_down - enclose_left_up, 0.0);
var enclose_area = enclose["...", "0"] * enclose["...", "1"];
var giou = iou - 1.0 * (enclose_area - union_area) / enclose_area;

return giou;
}

public Tensor bbox_iou(Tensor boxes1, Tensor boxes2)
{
var boxes1_area = boxes1["...", "2"] * boxes1["...", "3"];
var boxes2_area = boxes2["...", "2"] * boxes2["...", "3"];

boxes1 = tf.concat(new[] { boxes1["...", ":2"] - boxes1["...", "2:"] * 0.5,
boxes1["...", ":2"] + boxes1["...", "2:"] * 0.5}, axis: -1);
boxes2 = tf.concat(new[] { boxes2["...", ":2"] - boxes2["...", "2:"] * 0.5,
boxes2["...", ":2"] + boxes2["...", "2:"] * 0.5}, axis: -1);

var left_up = tf.maximum(boxes1["...", ":2"], boxes2["...", ":2"]);
var right_down = tf.minimum(boxes1["...", "2:"], boxes2["...", "2:"]);

var inter_section = tf.maximum(right_down - left_up, 0.0);
var inter_area = inter_section["...", "0"] * inter_section["...", "1"];
var union_area = boxes1_area + boxes2_area - inter_area;
var iou = 1.0 * inter_area / union_area;

return iou;
}
}
}

+ 0
- 51
test/TensorFlowNET.Examples/ImageProcessing/YOLO/backbone.cs View File

@@ -1,51 +0,0 @@
using System;
using System.Collections.Generic;
using System.Text;
using Tensorflow;
using static Tensorflow.Binding;

namespace TensorFlowNET.Examples.ImageProcessing.YOLO
{
class backbone
{
public static (Tensor, Tensor, Tensor) darknet53(Tensor input_data, Tensor trainable)
{
return tf_with(tf.variable_scope("darknet"), scope =>
{
input_data = common.convolutional(input_data, filters_shape: new int[] { 3, 3, 3, 32 }, trainable: trainable, name: "conv0");
input_data = common.convolutional(input_data, filters_shape: new int[] { 3, 3, 32, 64 }, trainable: trainable, name: "conv1", downsample: true);

foreach (var i in range(1))
input_data = common.residual_block(input_data, 64, 32, 64, trainable: trainable, name: $"residual{i + 0}");

input_data = common.convolutional(input_data, filters_shape: new[] { 3, 3, 64, 128 },
trainable: trainable, name: "conv4", downsample: true);

foreach (var i in range(2))
input_data = common.residual_block(input_data, 128, 64, 128, trainable: trainable, name: $"residual{i + 1}");

input_data = common.convolutional(input_data, filters_shape: new[] { 3, 3, 128, 256 },
trainable: trainable, name: "conv9", downsample: true);

foreach (var i in range(8))
input_data = common.residual_block(input_data, 256, 128, 256, trainable: trainable, name: $"residual{i + 3}");

var route_1 = input_data;
input_data = common.convolutional(input_data, filters_shape: new int[] { 3, 3, 256, 512 },
trainable: trainable, name: "conv26", downsample: true);

foreach (var i in range(8))
input_data = common.residual_block(input_data, 512, 256, 512, trainable: trainable, name: $"residual{i + 11}");

var route_2 = input_data;
input_data = common.convolutional(input_data, filters_shape: new[] { 3, 3, 512, 1024 },
trainable: trainable, name: "conv43", downsample: true);

foreach (var i in range(4))
input_data = common.residual_block(input_data, 1024, 512, 1024, trainable: trainable, name: $"residual{i + 19}");

return (route_1, route_2, input_data);
});
}
}
}

+ 0
- 100
test/TensorFlowNET.Examples/ImageProcessing/YOLO/common.cs View File

@@ -1,100 +0,0 @@
using System;
using System.Collections.Generic;
using System.Diagnostics;
using System.Linq;
using System.Text;
using Tensorflow;
using static Tensorflow.Binding;

namespace TensorFlowNET.Examples.ImageProcessing.YOLO
{
class common
{
public static Tensor convolutional(Tensor input_data, int[] filters_shape, Tensor trainable,
string name, bool downsample = false, bool activate = true,
bool bn = true)
{
return tf_with(tf.variable_scope(name), scope =>
{
int[] strides;
string padding;

if (downsample)
{
(int pad_h, int pad_w) = ((int)Math.Floor((filters_shape[0] - 2) / 2.0f) + 1, (int)Math.Floor((filters_shape[1] - 2) / 2.0f) + 1);
var paddings = tf.constant(new int[,] { { 0, 0 }, { pad_h, pad_h }, { pad_w, pad_w }, { 0, 0 } });
input_data = tf.pad(input_data, paddings, "CONSTANT");
strides = new[] { 1, 2, 2, 1 };
padding = "VALID";
}
else
{
strides = new int[] { 1, 1, 1, 1 };
padding = "SAME";
}

var weight = tf.get_variable(name: "weight", dtype: tf.float32, trainable: true,
shape: filters_shape, initializer: tf.random_normal_initializer(stddev: 0.01f));

var conv = tf.nn.conv2d(input: input_data, filter: weight, strides: strides, padding: padding);

if (bn)
{
conv = tf.layers.batch_normalization(conv, beta_initializer: tf.zeros_initializer,
gamma_initializer: tf.ones_initializer,
moving_mean_initializer: tf.zeros_initializer,
moving_variance_initializer: tf.ones_initializer, training: trainable);
}
else
{
var bias = tf.get_variable(name: "bias", shape: filters_shape.Last(), trainable: true,
dtype: tf.float32, initializer: tf.constant_initializer(0.0f));
conv = tf.nn.bias_add(conv, bias);
}

if (activate)
conv = tf.nn.leaky_relu(conv, alpha: 0.1f);

return conv;
});
}

public static Tensor upsample(Tensor input_data, string name, string method = "deconv")
{
Debug.Assert(new[] { "resize", "deconv" }.Contains(method));
Tensor output = null;
if (method == "resize")
{
tf_with(tf.variable_scope(name), delegate
{
var input_shape = tf.shape(input_data);
output = tf.image.resize_nearest_neighbor(input_data, new Tensor[] { input_shape[1] * 2, input_shape[2] * 2 });
});
}
else if(method == "deconv")
{
throw new NotImplementedException("upsample.deconv");
}

return output;
}

public static Tensor residual_block(Tensor input_data, int input_channel, int filter_num1,
int filter_num2, Tensor trainable, string name)
{
var short_cut = input_data;

return tf_with(tf.variable_scope(name), scope =>
{
input_data = convolutional(input_data, filters_shape: new int[] { 1, 1, input_channel, filter_num1 },
trainable: trainable, name: "conv1");
input_data = convolutional(input_data, filters_shape: new int[] { 3, 3, filter_num1, filter_num2 },
trainable: trainable, name: "conv2");

var residual_output = input_data + short_cut;

return residual_output;
});
}
}
}

+ 0
- 94
test/TensorFlowNET.Examples/ImageProcessing/YOLO/config.cs View File

@@ -1,94 +0,0 @@
using System;
using System.Collections.Generic;
using System.IO;
using System.Text;

namespace TensorFlowNET.Examples.ImageProcessing.YOLO
{
public class Config
{
public YoloConfig YOLO;
public TrainConfig TRAIN;
public TestConfig TEST;

public Config(string root)
{
YOLO = new YoloConfig(root);
TRAIN = new TrainConfig(root);
TEST = new TestConfig(root);
}

public class YoloConfig
{
string _root;

public string CLASSES;
public string ANCHORS;
public float MOVING_AVE_DECAY = 0.9995f;
public int[] STRIDES = new int[] { 8, 16, 32 };
public int ANCHOR_PER_SCALE = 3;
public float IOU_LOSS_THRESH = 0.5f;
public string UPSAMPLE_METHOD = "resize";
public string ORIGINAL_WEIGHT;
public string DEMO_WEIGHT;

public YoloConfig(string root)
{
_root = root;
CLASSES = Path.Combine(_root, "data", "classes", "coco.names");
ANCHORS = Path.Combine(_root, "data", "anchors", "basline_anchors.txt");
ORIGINAL_WEIGHT = Path.Combine(_root, "checkpoint", "yolov3_coco.ckpt");
DEMO_WEIGHT = Path.Combine(_root, "checkpoint", "yolov3_coco_demo.ckpt");
}
}

public class TrainConfig
{
string _root;

public int BATCH_SIZE = 6;
public int[] INPUT_SIZE = new int[] { 320, 352, 384, 416, 448, 480, 512, 544, 576, 608 };
public bool DATA_AUG = true;
public float LEARN_RATE_INIT = 1e-4f;
public float LEARN_RATE_END = 1e-6f;
public int WARMUP_EPOCHS = 2;
public int FISRT_STAGE_EPOCHS = 20;
public int SECOND_STAGE_EPOCHS = 30;
public string INITIAL_WEIGHT;
public string ANNOT_PATH;

public TrainConfig(string root)
{
_root = root;
INITIAL_WEIGHT = Path.Combine(_root, "checkpoint", "yolov3_coco_demo.ckpt");
ANNOT_PATH = Path.Combine(_root, "data", "dataset", "voc_train.txt");
}
}

public class TestConfig
{
string _root;

public int BATCH_SIZE = 2;
public int[] INPUT_SIZE = new int[] { 544 };
public bool DATA_AUG = false;
public bool WRITE_IMAGE = true;
public string WRITE_IMAGE_PATH;
public string WEIGHT_FILE;
public bool WRITE_IMAGE_SHOW_LABEL = true;
public bool SHOW_LABEL = true;
public int SECOND_STAGE_EPOCHS = 30;
public float SCORE_THRESHOLD = 0.3f;
public float IOU_THRESHOLD = 0.45f;
public string ANNOT_PATH;

public TestConfig(string root)
{
_root = root;
ANNOT_PATH = Path.Combine(_root, "data", "dataset", "voc_test.txt");
WRITE_IMAGE_PATH = Path.Combine(_root, "data", "detection");
WEIGHT_FILE = Path.Combine(_root, "checkpoint", "yolov3_test_loss=9.2099.ckpt-5");
}
}
}
}

+ 0
- 99
test/TensorFlowNET.Examples/Keras.cs View File

@@ -1,99 +0,0 @@
using System;
using System.Collections.Generic;
using Tensorflow;
using Keras.Layers;
using NumSharp;
using Keras;
using static Tensorflow.Binding;

namespace TensorFlowNET.Examples
{
public class Keras : IExample
{
public bool Enabled { get; set; } = true;
public bool IsImportingGraph { get; set; } = false;

public string Name => "Keras";

public bool Run()
{
Console.WriteLine("================================== Keras ==================================");

#region data
var batch_size = 1000;
var (X, Y) = XOR(batch_size);
//var (X, Y, batch_size) = (np.array(new float[,]{{1, 0 },{1, 1 },{0, 0 },{0, 1 }}), np.array(new int[] { 0, 1, 1, 0 }), 4);
#endregion

#region features
var (features, labels) = (new Tensor(X), new Tensor(Y));
var num_steps = 10000;
#endregion

#region model
var m = new Model();

//m.Add(new Dense(8, name: "Hidden", activation: tf.nn.relu())).Add(new Dense(1, name:"Output"));

m.Add(
new ILayer[] {
new Dense(8, name: "Hidden_1", activation: tf.nn.relu()),
new Dense(1, name: "Output")
});

m.train(num_steps, (X, Y));
#endregion

return true;
}

static (NDArray, NDArray) XOR(int samples)
{
var X = new List<float[]>();
var Y = new List<float>();
var r = new Random();
for (int i = 0; i < samples; i++)
{
var x1 = (float)r.Next(0, 2);
var x2 = (float)r.Next(0, 2);
var y = 0.0f;
if (x1 == x2)
y = 1.0f;
X.Add(new float[] { x1, x2 });
Y.Add(y);
}
return (np.array(X.ToArray()), np.array(Y.ToArray()));
}

public Graph BuildGraph()
{
throw new NotImplementedException();
}

public Graph ImportGraph()
{
throw new NotImplementedException();
}

public void Predict(Session sess)
{
throw new NotImplementedException();
}

public void PrepareData()
{
throw new NotImplementedException();
}

public void Test(Session sess)
{
throw new NotImplementedException();
}

public void Train(Session sess)
{
throw new NotImplementedException();
}
}
}

+ 0
- 153
test/TensorFlowNET.Examples/NeuralNetworks/FullyConnected.cs View File

@@ -1,153 +0,0 @@
/*****************************************************************************
Copyright 2018 The TensorFlow.NET Authors. All Rights Reserved.

Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at

http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
******************************************************************************/

using NumSharp;
using System;
using System.Collections.Generic;
using System.Diagnostics;
using System.Text;
using Tensorflow;
using static Tensorflow.Binding;

namespace TensorFlowNET.Examples
{
/// <summary>
/// How to optimise your input pipeline with queues and multi-threading
/// https://blog.metaflow.fr/tensorflow-how-to-optimise-your-input-pipeline-with-queues-and-multi-threading-e7c3874157e0
/// </summary>
public class FullyConnected : IExample
{
public bool Enabled { get; set; } = true;
public bool IsImportingGraph { get; set; }

public string Name => "Fully Connected Neural Network";

Tensor input = null;
Tensor x_inputs_data = null;
Tensor y_inputs_data = null;
Tensor accuracy = null;
Tensor y_true = null;
Tensor loss_op = null;
Operation train_op = null;

public Graph BuildGraph()
{
var g = tf.get_default_graph();
Tensor z = null;
tf_with(tf.variable_scope("placeholder"), delegate
{
input = tf.placeholder(tf.float32, shape: (-1, 1024));
y_true = tf.placeholder(tf.int32, shape: (-1, 1));
});

tf_with(tf.variable_scope("FullyConnected"), delegate
{
var w = tf.get_variable("w", shape: (1024, 1024), initializer: tf.random_normal_initializer(stddev: 0.1f));
var b = tf.get_variable("b", shape: 1024, initializer: tf.constant_initializer(0.1));
z = tf.matmul(input, w) + b;
var y = tf.nn.relu(z);

var w2 = tf.get_variable("w2", shape: (1024, 1), initializer: tf.random_normal_initializer(stddev: 0.1f));
var b2 = tf.get_variable("b2", shape: 1, initializer: tf.constant_initializer(0.1));
z = tf.matmul(y, w2) + b2;
});

tf_with(tf.variable_scope("Loss"), delegate
{
var losses = tf.nn.sigmoid_cross_entropy_with_logits(tf.cast(y_true, tf.float32), z);
loss_op = tf.reduce_mean(losses);
});

tf_with(tf.variable_scope("Accuracy"), delegate
{
var y_pred = tf.cast(z > 0, tf.int32);
accuracy = tf.reduce_mean(tf.cast(tf.equal(y_pred, y_true), tf.float32));
// accuracy = tf.Print(accuracy, data =[accuracy], message = "accuracy:")
});

// We add the training operation, ...
var adam = tf.train.AdamOptimizer(0.01f);
train_op = adam.minimize(loss_op, name: "train_op");

return g;
}

public Graph ImportGraph()
{
throw new NotImplementedException();
}

public void Predict(Session sess)
{
throw new NotImplementedException();
}

public void PrepareData()
{
// batches of 128 samples, each containing 1024 data points
x_inputs_data = tf.random_normal(new[] { 128, 1024 }, mean: 0, stddev: 1);
// We will try to predict this law:
// predict 1 if the sum of the elements is positive and 0 otherwise
y_inputs_data = tf.cast(tf.reduce_sum(x_inputs_data, axis: 1, keepdims: true) > 0, tf.int32);
}

public bool Run()
{
PrepareData();
var g = BuildGraph();
using (var sess = tf.Session())
Train(sess);
return true;
}

public void Test(Session sess)
{
throw new NotImplementedException();
}

public void Train(Session sess)
{
var sw = new Stopwatch();
sw.Start();
// init variables
sess.run(tf.global_variables_initializer());

// check the accuracy before training
var (x_input, y_input) = sess.run((x_inputs_data, y_inputs_data));
sess.run(accuracy, (input, x_input), (y_true, y_input));

// training
foreach (var i in range(5000))
{
// by sampling some input data (fetching)
(x_input, y_input) = sess.run((x_inputs_data, y_inputs_data));
var (_, loss) = sess.run((train_op, loss_op), (input, x_input), (y_true, y_input));

// We regularly check the loss
if (i % 500 == 0)
print($"iter:{i} - loss:{loss}");
}

// Finally, we check our final accuracy
(x_input, y_input) = sess.run((x_inputs_data, y_inputs_data));
sess.run(accuracy, (input, x_input), (y_true, y_input));

print($"Time taken: {sw.Elapsed.TotalSeconds}s");
}
}
}

+ 0
- 129
test/TensorFlowNET.Examples/NeuralNetworks/FullyConnectedInQueue.cs View File

@@ -1,129 +0,0 @@
/*****************************************************************************
Copyright 2018 The TensorFlow.NET Authors. All Rights Reserved.

Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at

http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
******************************************************************************/

using NumSharp;
using System;
using System.Collections.Generic;
using System.Diagnostics;
using System.Text;
using Tensorflow;
using static Tensorflow.Binding;

namespace TensorFlowNET.Examples
{
/// <summary>
/// How to optimise your input pipeline with queues and multi-threading
/// https://blog.metaflow.fr/tensorflow-how-to-optimise-your-input-pipeline-with-queues-and-multi-threading-e7c3874157e0
/// </summary>
public class FullyConnectedInQueue : IExample
{
public bool Enabled { get; set; } = false;
public bool IsImportingGraph { get; set; }

public string Name => "Fully Connected Neural Network In Queue";

Tensor input = null;
Tensor x_inputs_data = null;
Tensor y_inputs_data = null;
Tensor accuracy = null;
Tensor y_true = null;
Tensor loss_op = null;
Operation train_op = null;

public Graph BuildGraph()
{
var g = tf.get_default_graph();

Tensor z = null;

// We build our small model: a basic two layers neural net with ReLU
tf_with(tf.variable_scope("queue"), delegate
{
// enqueue 5 batches
var q = tf.FIFOQueue(capacity: 5, dtype: tf.float32);
// We use the "enqueue" operation so 1 element of the queue is the full batch
var enqueue_op = q.enqueue(x_inputs_data);
var numberOfThreads = 1;
// var qr = tf.train.QueueRunner(q, [enqueue_op] * numberOfThreads);
});

return g;
}

public Graph ImportGraph()
{
throw new NotImplementedException();
}

public void Predict(Session sess)
{
throw new NotImplementedException();
}

public void PrepareData()
{
// batches of 128 samples, each containing 1024 data points
x_inputs_data = tf.random_normal(new[] { 128, 1024 }, mean: 0, stddev: 1);
// We will try to predict this law:
// predict 1 if the sum of the elements is positive and 0 otherwise
y_inputs_data = tf.cast(tf.reduce_sum(x_inputs_data, axis: 1, keepdims: true) > 0, tf.int32);
}

public bool Run()
{
PrepareData();
var g = BuildGraph();
using (var sess = tf.Session())
Train(sess);
return true;
}

public void Test(Session sess)
{
throw new NotImplementedException();
}

public void Train(Session sess)
{
var sw = new Stopwatch();
sw.Start();
// init variables
sess.run(tf.global_variables_initializer());

// check the accuracy before training
var (x_input, y_input) = sess.run((x_inputs_data, y_inputs_data));
sess.run(accuracy, (input, x_input), (y_true, y_input));

// training
foreach (var i in range(5000))
{
// by sampling some input data (fetching)
(x_input, y_input) = sess.run((x_inputs_data, y_inputs_data));
var (_, loss) = sess.run((train_op, loss_op), (input, x_input), (y_true, y_input));

// We regularly check the loss
if (i % 500 == 0)
print($"iter:{i} - loss:{loss}");
}

// Finally, we check our final accuracy
(x_input, y_input) = sess.run((x_inputs_data, y_inputs_data));
sess.run(accuracy, (input, x_input), (y_true, y_input));

print($"Time taken: {sw.Elapsed.TotalSeconds}s");
}
}
}

+ 0
- 190
test/TensorFlowNET.Examples/NeuralNetworks/NeuralNetXor.cs View File

@@ -1,190 +0,0 @@
/*****************************************************************************
Copyright 2018 The TensorFlow.NET Authors. All Rights Reserved.

Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at

http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
******************************************************************************/

using System;
using NumSharp;
using Tensorflow;
using TensorFlowNET.Examples.Utility;
using static Tensorflow.Binding;

namespace TensorFlowNET.Examples
{
/// <summary>
/// Simple vanilla neural net solving the famous XOR problem
/// https://github.com/amygdala/tensorflow-workshop/blob/master/workshop_sections/getting_started/xor/README.md
/// </summary>
public class NeuralNetXor : IExample
{
public bool Enabled { get; set; } = true;
public string Name => "NN XOR";
public bool IsImportingGraph { get; set; } = false;

public int num_steps = 10000;

private NDArray data;

private (Operation, Tensor, Tensor) make_graph(Tensor features,Tensor labels, int num_hidden = 8)
{
var stddev = 1 / Math.Sqrt(2);
var hidden_weights = tf.Variable(tf.truncated_normal(new int []{2, num_hidden}, seed:1, stddev: (float) stddev ));

// Shape [4, num_hidden]
var hidden_activations = tf.nn.relu(tf.matmul(features, hidden_weights));

var output_weights = tf.Variable(tf.truncated_normal(
new[] {num_hidden, 1},
seed: 17,
stddev: (float) (1 / Math.Sqrt(num_hidden))
));

// Shape [4, 1]
var logits = tf.matmul(hidden_activations, output_weights);

// Shape [4]
var predictions = tf.tanh(tf.squeeze(logits));
var loss = tf.reduce_mean(tf.square(predictions - tf.cast(labels, tf.float32)), name:"loss");

var gs = tf.Variable(0, trainable: false, name: "global_step");
var train_op = tf.train.GradientDescentOptimizer(0.2f).minimize(loss, global_step: gs);

return (train_op, loss, gs);
}

public bool Run()
{
PrepareData();
float loss_value = 0;
if (IsImportingGraph)
loss_value = RunWithImportedGraph();
else
loss_value = RunWithBuiltGraph();

return loss_value < 0.0628;
}

private float RunWithImportedGraph()
{
var graph = tf.Graph().as_default();

tf.train.import_meta_graph("graph/xor.meta");

Tensor features = graph.get_operation_by_name("Placeholder");
Tensor labels = graph.get_operation_by_name("Placeholder_1");
Tensor loss = graph.get_operation_by_name("loss");
Tensor train_op = graph.get_operation_by_name("train_op");
Tensor global_step = graph.get_operation_by_name("global_step");

var init = tf.global_variables_initializer();
float loss_value = 0;
// Start tf session
using (var sess = tf.Session(graph))
{
sess.run(init);
var step = 0;

var y_ = np.array(new int[] { 1, 0, 0, 1 }, dtype: np.int32);
while (step < num_steps)
{
// original python:
//_, step, loss_value = sess.run(
// [train_op, gs, loss],
// feed_dict={features: xy, labels: y_}
// )
(_, step, loss_value) = sess.run((train_op, global_step, loss), (features, data), (labels, y_));
if (step == 1 || step % 1000 == 0)
Console.WriteLine($"Step {step} loss: {loss_value}");
}
Console.WriteLine($"Final loss: {loss_value}");
}

return loss_value;
}

private float RunWithBuiltGraph()
{
var graph = tf.Graph().as_default();

var features = tf.placeholder(tf.float32, new TensorShape(4, 2));
var labels = tf.placeholder(tf.int32, new TensorShape(4));

var (train_op, loss, gs) = make_graph(features, labels);

var init = tf.global_variables_initializer();

float loss_value = 0;
// Start tf session
using (var sess = tf.Session(graph))
{
sess.run(init);
var step = 0;

var y_ = np.array(new int[] { 1, 0, 0, 1 }, dtype: np.int32);
while (step < num_steps)
{
(_, step, loss_value) = sess.run((train_op, gs, loss), (features, data), (labels, y_));
if (step == 1 || step % 1000 == 0)
Console.WriteLine($"Step {step} loss: {loss_value}");
}
Console.WriteLine($"Final loss: {loss_value}");
}

return loss_value;
}

public void PrepareData()
{
data = new float[,]
{
{1, 0 },
{1, 1 },
{0, 0 },
{0, 1 }
};

if (IsImportingGraph)
{
// download graph meta data
string url = "https://raw.githubusercontent.com/SciSharp/TensorFlow.NET/master/graph/xor.meta";
Web.Download(url, "graph", "xor.meta");
}
}

public Graph ImportGraph()
{
throw new NotImplementedException();
}

public Graph BuildGraph()
{
throw new NotImplementedException();
}

public void Train(Session sess)
{
throw new NotImplementedException();
}

public void Predict(Session sess)
{
throw new NotImplementedException();
}

public void Test(Session sess)
{
throw new NotImplementedException();
}
}
}

+ 0
- 120
test/TensorFlowNET.Examples/Program.cs View File

@@ -1,120 +0,0 @@
/*****************************************************************************
Copyright 2018 The TensorFlow.NET Authors. All Rights Reserved.

Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at

http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
******************************************************************************/

using System;
using System.Collections.Generic;
using System.Diagnostics;
using System.Drawing;
using System.Linq;
using System.Reflection;
using Tensorflow;
using Console = Colorful.Console;
using static Tensorflow.Binding;

namespace TensorFlowNET.Examples
{
class Program
{
static void Main(string[] args)
{
int finished = 0;
var errors = new List<string>();
var success = new List<string>();

var parsedArgs = ParseArgs(args);

var examples = Assembly.GetEntryAssembly().GetTypes()
.Where(x => x.GetInterfaces().Contains(typeof(IExample)))
.Select(x => (IExample)Activator.CreateInstance(x))
.Where(x => x.Enabled)
.OrderBy(x => x.Name)
.ToArray();

if (parsedArgs.ContainsKey("ex"))
examples = examples.Where(x => x.Name == parsedArgs["ex"]).ToArray();

Console.WriteLine(Environment.OSVersion.ToString(), Color.Yellow);
Console.WriteLine($"TensorFlow Binary v{tf.VERSION}", Color.Yellow);
Console.WriteLine($"TensorFlow.NET v{Assembly.GetAssembly(typeof(TF_DataType)).GetName().Version}", Color.Yellow);

for (var i = 0; i < examples.Length; i++)
Console.WriteLine($"[{i}]: {examples[i].Name}");

var key = "0";
if (examples.Length > 1)
{
Console.Write($"Choose one example to run, hit [Enter] to run all: ", Color.Yellow);
key = Console.ReadLine();
}

var sw = new Stopwatch();
for (var i = 0; i < examples.Length; i++)
{
if (i.ToString() != key && key != "") continue;

var example = examples[i];
Console.WriteLine($"{DateTime.UtcNow} Starting {example.Name}", Color.White);

try
{
sw.Restart();
bool isSuccess = example.Run();
sw.Stop();

if (isSuccess)
success.Add($"Example: {example.Name} in {sw.Elapsed.TotalSeconds}s");
else
errors.Add($"Example: {example.Name} in {sw.Elapsed.TotalSeconds}s");
}
catch (Exception ex)
{
errors.Add($"Example: {example.Name}");
Console.WriteLine(ex);
}

finished++;
Console.WriteLine($"{DateTime.UtcNow} Completed {example.Name}", Color.White);
}

success.ForEach(x => Console.WriteLine($"{x} is OK!", Color.Green));
errors.ForEach(x => Console.WriteLine($"{x} is Failed!", Color.Red));

Console.WriteLine($"{finished} of {examples.Length} example(s) are completed.");
Console.ReadLine();
}

private static Dictionary<string, string> ParseArgs(string[] args)
{
var parsed = new Dictionary<string, string>();

for (int i = 0; i < args.Length; i++)
{
string key = args[i].Substring(1);
switch (key)
{
case "ex":
parsed.Add(key, args[++i]);
break;
default:
break;
}
}

return parsed;
}
}
}

+ 0
- 32
test/TensorFlowNET.Examples/TensorFlowNET.Examples.csproj View File

@@ -1,32 +0,0 @@
<Project Sdk="Microsoft.NET.Sdk">

<PropertyGroup>
<OutputType>Exe</OutputType>
<TargetFramework>netcoreapp3.0</TargetFramework>
<GeneratePackageOnBuild>false</GeneratePackageOnBuild>
</PropertyGroup>

<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|AnyCPU'">
<DefineConstants>DEBUG;TRACE</DefineConstants>
</PropertyGroup>

<ItemGroup>
<Compile Remove="Keras.cs" />
</ItemGroup>

<ItemGroup>
<PackageReference Include="Colorful.Console" Version="1.2.9" />
<PackageReference Include="Newtonsoft.Json" Version="12.0.2" />
<PackageReference Include="SciSharp.TensorFlow.Redist" Version="1.14.0" />
<PackageReference Include="SharpZipLib" Version="1.1.0" />
<PackageReference Include="System.Drawing.Common" Version="4.5.1" />
</ItemGroup>

<ItemGroup>
<ProjectReference Include="..\..\src\TensorFlowDatasets\TensorFlowDatasets.csproj" />
<ProjectReference Include="..\..\src\TensorFlowNET.Core\TensorFlowNET.Core.csproj" />
<ProjectReference Include="..\..\src\TensorFlowNET.Models\TensorFlowNET.Models.csproj" />
<ProjectReference Include="..\..\src\TensorFlowText\TensorFlowText.csproj" />
<ProjectReference Include="..\..\src\TensorFlowHub\TensorFlowHub.csproj" />
</ItemGroup>
</Project>

+ 0
- 164
test/TensorFlowNET.Examples/TextProcessing/BinaryTextClassification.cs View File

@@ -1,164 +0,0 @@
using System;
using System.Collections.Generic;
using System.IO;
using Tensorflow;
using Newtonsoft.Json;
using System.Linq;
using NumSharp;

namespace TensorFlowNET.Examples
{
/// <summary>
/// This example classifies movie reviews as positive or negative using the text of the review.
/// This is a binary—or two-class—classification, an important and widely applicable kind of machine learning problem.
/// https://github.com/tensorflow/docs/blob/master/site/en/tutorials/keras/basic_text_classification.ipynb
/// </summary>
public class BinaryTextClassification : IExample
{
public bool Enabled { get; set; } = false;
public string Name => "Binary Text Classification";
public bool IsImportingGraph { get; set; } = true;

string dir = "binary_text_classification";
string dataFile = "imdb.zip";
NDArray train_data, train_labels, test_data, test_labels;

public bool Run()
{
PrepareData();

Console.WriteLine($"Training entries: {train_data.shape[0]}, labels: {train_labels.shape[0]}");

// A dictionary mapping words to an integer index
var word_index = GetWordIndex();
/*train_data = keras.preprocessing.sequence.pad_sequences(train_data,
value: word_index["<PAD>"],
padding: "post",
maxlen: 256);

test_data = keras.preprocessing.sequence.pad_sequences(test_data,
value: word_index["<PAD>"],
padding: "post",
maxlen: 256);*/

// input shape is the vocabulary count used for the movie reviews (10,000 words)
int vocab_size = 10000;

var model = keras.Sequential();
var layer = keras.layers.Embedding(vocab_size, 16);
model.add(layer);

return false;
}

public void PrepareData()
{
Directory.CreateDirectory(dir);

// get model file
string url = $"https://github.com/SciSharp/TensorFlow.NET/raw/master/data/{dataFile}";

Utility.Web.Download(url, dir, "imdb.zip");
Utility.Compress.UnZip(Path.Join(dir, $"imdb.zip"), dir);

// prepare training dataset
var x_train = ReadData(Path.Join(dir, "x_train.txt"));
var labels_train = ReadData(Path.Join(dir, "y_train.txt"));
var indices_train = ReadData(Path.Join(dir, "indices_train.txt"));
x_train = x_train[indices_train];
labels_train = labels_train[indices_train];

var x_test = ReadData(Path.Join(dir, "x_test.txt"));
var labels_test = ReadData(Path.Join(dir, "y_test.txt"));
var indices_test = ReadData(Path.Join(dir, "indices_test.txt"));
x_test = x_test[indices_test];
labels_test = labels_test[indices_test];

// not completed
var xs = x_train.hstack(x_test);
var labels = labels_train.hstack(labels_test);

var idx = x_train.size;
var y_train = labels_train;
var y_test = labels_test;

// convert x_train
train_data = new NDArray(np.int32, (x_train.size, 256));
/*for (int i = 0; i < x_train.size; i++)
train_data[i] = x_train[i].Data<string>()[1].Split(',').Select(x => int.Parse(x)).ToArray();*/

test_data = new NDArray(np.int32, (x_test.size, 256));
/*for (int i = 0; i < x_test.size; i++)
test_data[i] = x_test[i].Data<string>()[1].Split(',').Select(x => int.Parse(x)).ToArray();*/

train_labels = y_train;
test_labels = y_test;
}

private NDArray ReadData(string file)
{
var lines = File.ReadAllLines(file);
var nd = new NDArray(lines[0].StartsWith("[") ? typeof(string) : np.int32, new Shape(lines.Length));

if (lines[0].StartsWith("["))
{
for (int i = 0; i < lines.Length; i++)
{
/*var matches = Regex.Matches(lines[i], @"\d+\s*");
var data = new int[matches.Count];
for (int j = 0; j < data.Length; j++)
data[j] = Convert.ToInt32(matches[j].Value);
nd[i] = data.ToArray();*/
nd[i] = lines[i].Substring(1, lines[i].Length - 2).Replace(" ", string.Empty);
}
}
else
{
for (int i = 0; i < lines.Length; i++)
nd[i] = Convert.ToInt32(lines[i]);
}
return nd;
}

private Dictionary<string, int> GetWordIndex()
{
var result = new Dictionary<string, int>();
var json = File.ReadAllText(Path.Join(dir, "imdb_word_index.json"));
var dict = JsonConvert.DeserializeObject<Dictionary<string, int>>(json);

dict.Keys.Select(k => result[k] = dict[k] + 3).ToList();
result["<PAD>"] = 0;
result["<START>"] = 1;
result["<UNK>"] = 2; // unknown
result["<UNUSED>"] = 3;

return result;
}

public Graph ImportGraph()
{
throw new NotImplementedException();
}

public Graph BuildGraph()
{
throw new NotImplementedException();
}

public void Train(Session sess)
{
throw new NotImplementedException();
}

public void Predict(Session sess)
{
throw new NotImplementedException();
}

public void Test(Session sess)
{
throw new NotImplementedException();
}
}
}

+ 0
- 272
test/TensorFlowNET.Examples/TextProcessing/CnnTextClassification.cs View File

@@ -1,272 +0,0 @@
/*****************************************************************************
Copyright 2018 The TensorFlow.NET Authors. All Rights Reserved.

Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at

http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
******************************************************************************/

using System;
using System.Collections.Generic;
using System.Diagnostics;
using System.IO;
using System.Linq;
using NumSharp;
using Tensorflow;
using Tensorflow.Sessions;
using TensorFlowNET.Examples.Text;
using TensorFlowNET.Examples.Utility;
using static Tensorflow.Binding;

namespace TensorFlowNET.Examples
{
/// <summary>
/// https://github.com/dongjun-Lee/text-classification-models-tf
/// </summary>
public class CnnTextClassification : IExample
{
public bool Enabled { get; set; } = true;
public string Name => "CNN Text Classification";
public int? DataLimit = null;
public bool IsImportingGraph { get; set; } = false;

const string dataDir = "cnn_text";
string dataFileName = "dbpedia_csv.tar.gz";

string TRAIN_PATH = $"{dataDir}/dbpedia_csv/train.csv";
string TEST_PATH = $"{dataDir}/dbpedia_csv/test.csv";
int NUM_CLASS = 14;
int BATCH_SIZE = 64;
int NUM_EPOCHS = 10;
int WORD_MAX_LEN = 100;
int CHAR_MAX_LEN = 1014;
float loss_value = 0;
double max_accuracy = 0;

int alphabet_size = -1;
int vocabulary_size = -1;
NDArray train_x, valid_x, train_y, valid_y;

ITextModel textModel;
public string ModelName = "word_cnn"; // word_cnn | char_cnn | vd_cnn | word_rnn | att_rnn | rcnn

public bool Run()
{
PrepareData();
var graph = IsImportingGraph ? ImportGraph() : BuildGraph();

using (var sess = tf.Session(graph))
Train(sess);

return max_accuracy > 0.9;
}

// TODO: this originally is an SKLearn utility function. it randomizes train and test which we don't do here
private (NDArray, NDArray, NDArray, NDArray) train_test_split(NDArray x, NDArray y, float test_size = 0.3f)
{
Console.WriteLine("Splitting in Training and Testing data...");
int len = x.shape[0];
//int classes = y.Data<int>().Distinct().Count();
//int samples = len / classes;
int train_size = (int)Math.Round(len * (1 - test_size));
train_x = x[new Slice(stop: train_size), new Slice()];
valid_x = x[new Slice(start: train_size), new Slice()];
train_y = y[new Slice(stop: train_size)];
valid_y = y[new Slice(start: train_size)];
Console.WriteLine("\tDONE");

return (train_x, valid_x, train_y, valid_y);
}

private void FillWithShuffledLabels(int[][] x, int[] y, int[][] shuffled_x, int[] shuffled_y, Random random, Dictionary<int, HashSet<int>> labels)
{
int i = 0;
var label_keys = labels.Keys.ToArray();
while (i < shuffled_x.Length)
{
var key = label_keys[random.Next(label_keys.Length)];
var set = labels[key];
var index = set.First();
if (set.Count == 0)
{
labels.Remove(key); // remove the set as it is empty
label_keys = labels.Keys.ToArray();
}
shuffled_x[i] = x[index];
shuffled_y[i] = y[index];
i++;
}
}

private IEnumerable<(NDArray, NDArray, int)> batch_iter(NDArray inputs, NDArray outputs, int batch_size, int num_epochs)
{
var num_batches_per_epoch = (len(inputs) - 1) / batch_size + 1;
var total_batches = num_batches_per_epoch * num_epochs;
foreach (var epoch in range(num_epochs))
{
foreach (var batch_num in range(num_batches_per_epoch))
{
var start_index = batch_num * batch_size;
var end_index = Math.Min((batch_num + 1) * batch_size, len(inputs));
if (end_index <= start_index)
break;
yield return (inputs[new Slice(start_index, end_index)], outputs[new Slice(start_index, end_index)], total_batches);
}
}
}

public void PrepareData()
{
// full dataset https://github.com/le-scientifique/torchDatasets/raw/master/dbpedia_csv.tar.gz
var url = "https://raw.githubusercontent.com/SciSharp/TensorFlow.NET/master/data/dbpedia_subset.zip";
Web.Download(url, dataDir, "dbpedia_subset.zip");
Compress.UnZip(Path.Combine(dataDir, "dbpedia_subset.zip"), Path.Combine(dataDir, "dbpedia_csv"));

Console.WriteLine("Building dataset...");
var (x, y) = (new int[0][], new int[0]);

if(ModelName == "char_cnn")
{
(x, y, alphabet_size) = DataHelpers.build_char_dataset(TRAIN_PATH, "char_cnn", CHAR_MAX_LEN);
}
else
{
var word_dict = DataHelpers.build_word_dict(TRAIN_PATH);
vocabulary_size = len(word_dict);
(x, y) = DataHelpers.build_word_dataset(TRAIN_PATH, word_dict, WORD_MAX_LEN);
}

Console.WriteLine("\tDONE ");

var (train_x, valid_x, train_y, valid_y) = train_test_split(x, y, test_size: 0.15f);
Console.WriteLine("Training set size: " + train_x.shape[0]);
Console.WriteLine("Test set size: " + valid_x.shape[0]);
}

public Graph ImportGraph()
{
var graph = tf.Graph().as_default();

// download graph meta data
var meta_file = "word_cnn.meta";
var meta_path = Path.Combine("graph", meta_file);
if (File.GetLastWriteTime(meta_path) < new DateTime(2019, 05, 11))
{
// delete old cached file which contains errors
Console.WriteLine("Discarding cached file: " + meta_path);
if(File.Exists(meta_path))
File.Delete(meta_path);
}
var url = "https://raw.githubusercontent.com/SciSharp/TensorFlow.NET/master/graph/" + meta_file;
Web.Download(url, "graph", meta_file);

Console.WriteLine("Import graph...");
tf.train.import_meta_graph(Path.Join("graph", meta_file));
Console.WriteLine("\tDONE ");

return graph;
}

public Graph BuildGraph()
{
var graph = tf.Graph().as_default();

switch (ModelName)
{
case "word_cnn":
textModel = new WordCnn(vocabulary_size, WORD_MAX_LEN, NUM_CLASS);
break;
case "char_cnn":
textModel = new CharCnn(alphabet_size, CHAR_MAX_LEN, NUM_CLASS);
break;
}

return graph;
}

public void Train(Session sess)
{
var graph = tf.get_default_graph();

sess.run(tf.global_variables_initializer());
var saver = tf.train.Saver(tf.global_variables());

var train_batches = batch_iter(train_x, train_y, BATCH_SIZE, NUM_EPOCHS);
var num_batches_per_epoch = (len(train_x) - 1) / BATCH_SIZE + 1;

Tensor is_training = graph.OperationByName("is_training");
Tensor model_x = graph.OperationByName("x");
Tensor model_y = graph.OperationByName("y");
Tensor loss = graph.OperationByName("loss/Mean");
Operation optimizer = graph.OperationByName("loss/Adam");
Tensor global_step = graph.OperationByName("Variable");
Tensor accuracy = graph.OperationByName("accuracy/accuracy");

var sw = new Stopwatch();
sw.Start();

int step = 0;
foreach (var (x_batch, y_batch, total) in train_batches)
{
(_, step, loss_value) = sess.run((optimizer, global_step, loss),
(model_x, x_batch), (model_y, y_batch), (is_training, true));
if (step % 10 == 0)
{
Console.WriteLine($"Training on batch {step}/{total} loss: {loss_value.ToString("0.0000")} {sw.ElapsedMilliseconds}ms.");
sw.Restart();
}

if (step % 100 == 0)
{
// Test accuracy with validation data for each epoch.
var valid_batches = batch_iter(valid_x, valid_y, BATCH_SIZE, 1);
var (sum_accuracy, cnt) = (0.0f, 0);
foreach (var (valid_x_batch, valid_y_batch, total_validation_batches) in valid_batches)
{
var valid_feed_dict = new FeedDict
{
[model_x] = valid_x_batch,
[model_y] = valid_y_batch,
[is_training] = false
};
float accuracy_value = sess.run(accuracy, (model_x, valid_x_batch), (model_y, valid_y_batch), (is_training, false));
sum_accuracy += accuracy_value;
cnt += 1;
}

var valid_accuracy = sum_accuracy / cnt;

print($"\nValidation Accuracy = {valid_accuracy.ToString("P")}\n");
// Save model
if (valid_accuracy > max_accuracy)
{
max_accuracy = valid_accuracy;
saver.save(sess, $"{dataDir}/word_cnn.ckpt", global_step: step);
print("Model is saved.\n");
}
}
}
}

public void Predict(Session sess)
{
throw new NotImplementedException();
}

public void Test(Session sess)
{
throw new NotImplementedException();
}
}
}

+ 0
- 218
test/TensorFlowNET.Examples/TextProcessing/DataHelpers.cs View File

@@ -1,218 +0,0 @@
using NumSharp;
using System;
using System.Collections.Generic;
using System.IO;
using System.Linq;
using System.Security.Cryptography;
using System.Text;
using System.Text.RegularExpressions;
using TensorFlowNET.Examples.Utility;

namespace TensorFlowNET.Examples
{
public class DataHelpers
{
public static Dictionary<string, int> build_word_dict(string path)
{
var contents = File.ReadAllLines(path);
var words = new List<string>();
foreach (var content in contents)
words.AddRange(clean_str(content).Split(' ').Where(x => x.Length > 1));
var word_counter = words.GroupBy(x => x)
.Select(x => new { Word = x.Key, Count = x.Count() })
.OrderByDescending(x => x.Count)
.ToArray();
var word_dict = new Dictionary<string, int>();
word_dict["<pad>"] = 0;
word_dict["<unk>"] = 1;
word_dict["<eos>"] = 2;
foreach (var word in word_counter)
word_dict[word.Word] = word_dict.Count;
return word_dict;
}

public static (int[][], int[]) build_word_dataset(string path, Dictionary<string, int> word_dict, int document_max_len)
{
var contents = File.ReadAllLines(path);
var x = contents.Select(c => (clean_str(c) + " <eos>")
.Split(' ').Take(document_max_len)
.Select(w => word_dict.ContainsKey(w) ? word_dict[w] : word_dict["<unk>"]).ToArray())
.ToArray();
for (int i = 0; i < x.Length; i++)
if (x[i].Length == document_max_len)
x[i][document_max_len - 1] = word_dict["<eos>"];
else
Array.Resize(ref x[i], document_max_len);
var y = contents.Select(c => int.Parse(c.Substring(0, c.IndexOf(','))) - 1).ToArray();
return (x, y);
}

public static (int[][], int[], int) build_char_dataset(string path, string model, int document_max_len, int? limit = null, bool shuffle=true)
{
string alphabet = "abcdefghijklmnopqrstuvwxyz0123456789-,;.!?:’'\"/|_#$%ˆ&*˜‘+=<>()[]{} ";
/*if (step == "train")
df = pd.read_csv(TRAIN_PATH, names =["class", "title", "content"]);*/
var char_dict = new Dictionary<string, int>();
char_dict["<pad>"] = 0;
char_dict["<unk>"] = 1;
foreach (char c in alphabet)
char_dict[c.ToString()] = char_dict.Count;
var contents = File.ReadAllLines(path);
if (shuffle)
new Random(17).Shuffle(contents);
//File.WriteAllLines("text_classification/dbpedia_csv/train_6400.csv", contents.Take(6400));
var size = limit == null ? contents.Length : limit.Value;

var x = new int[size][];
var y = new int[size];
var tenth = size / 10;
var percent = 0;
for (int i = 0; i < size; i++)
{
if ((i + 1) % tenth == 0)
{
percent += 10;
Console.WriteLine($"\t{percent}%");
}

string[] parts = contents[i].ToLower().Split(",\"").ToArray();
string content = parts[2];
content = content.Substring(0, content.Length - 1);
var a = new int[document_max_len];
for (int j = 0; j < document_max_len; j++)
{
if (j >= content.Length)
a[j] = char_dict["<pad>"];
else
a[j] = char_dict.ContainsKey(content[j].ToString()) ? char_dict[content[j].ToString()] : char_dict["<unk>"];
}
x[i] = a;
y[i] = int.Parse(parts[0]);
}

return (x, y, alphabet.Length + 2);
}

/// <summary>
/// Loads MR polarity data from files, splits the data into words and generates labels.
/// Returns split sentences and labels.
/// </summary>
/// <param name="positive_data_file"></param>
/// <param name="negative_data_file"></param>
/// <returns></returns>
public static (string[], NDArray) load_data_and_labels(string positive_data_file, string negative_data_file)
{
Directory.CreateDirectory("CnnTextClassification");
Utility.Web.Download(positive_data_file, "CnnTextClassification", "rt -polarity.pos");
Utility.Web.Download(negative_data_file, "CnnTextClassification", "rt-polarity.neg");

// Load data from files
var positive_examples = File.ReadAllLines("CnnTextClassification/rt-polarity.pos")
.Select(x => x.Trim())
.ToArray();

var negative_examples = File.ReadAllLines("CnnTextClassification/rt-polarity.neg")
.Select(x => x.Trim())
.ToArray();

var x_text = new List<string>();
x_text.AddRange(positive_examples);
x_text.AddRange(negative_examples);
x_text = x_text.Select(x => clean_str(x)).ToList();

var positive_labels = positive_examples.Select(x => new int[2] { 0, 1 }).ToArray();
var negative_labels = negative_examples.Select(x => new int[2] { 1, 0 }).ToArray();
var y = np.concatenate(new NDArray[] { new int[][][] { positive_labels, negative_labels } });
return (x_text.ToArray(), y);
}

private static string clean_str(string str)
{
str = Regex.Replace(str, "[^A-Za-z0-9(),!?]", " ");
str = Regex.Replace(str, ",", " ");
return str;
}

/// <summary>
/// Padding
/// </summary>
/// <param name="sequences"></param>
/// <param name="pad_tok">the char to pad with</param>
/// <returns>a list of list where each sublist has same length</returns>
public static (int[][], int[]) pad_sequences(int[][] sequences, int pad_tok = 0)
{
int max_length = sequences.Select(x => x.Length).Max();
return _pad_sequences(sequences, pad_tok, max_length);
}

public static (int[][][], int[][]) pad_sequences(int[][][] sequences, int pad_tok = 0)
{
int max_length_word = sequences.Select(x => x.Select(w => w.Length).Max()).Max();
int[][][] sequence_padded;
var sequence_length = new int[sequences.Length][];
for (int i = 0; i < sequences.Length; i++)
{
// all words are same length now
var (sp, sl) = _pad_sequences(sequences[i], pad_tok, max_length_word);
sequence_length[i] = sl;
}

int max_length_sentence = sequences.Select(x => x.Length).Max();
(sequence_padded, _) = _pad_sequences(sequences, np.repeat(pad_tok, max_length_word).GetData<int>().ToArray(), max_length_sentence);
(sequence_length, _) = _pad_sequences(sequence_length, 0, max_length_sentence);

return (sequence_padded, sequence_length);
}

private static (int[][], int[]) _pad_sequences(int[][] sequences, int pad_tok, int max_length)
{
var sequence_length = new int[sequences.Length];
for (int i = 0; i < sequences.Length; i++)
{
sequence_length[i] = sequences[i].Length;
Array.Resize(ref sequences[i], max_length);
}

return (sequences, sequence_length);
}

private static (int[][][], int[]) _pad_sequences(int[][][] sequences, int[] pad_tok, int max_length)
{
var sequence_length = new int[sequences.Length];
for (int i = 0; i < sequences.Length; i++)
{
sequence_length[i] = sequences[i].Length;
Array.Resize(ref sequences[i], max_length);
for (int j = 0; j < max_length - sequence_length[i]; j++)
{
sequences[i][max_length - j - 1] = new int[pad_tok.Length];
Array.Copy(pad_tok, sequences[i][max_length - j - 1], pad_tok.Length);
}
}

return (sequences, sequence_length);
}

public static string CalculateMD5Hash(string input)
{
// step 1, calculate MD5 hash from input
MD5 md5 = System.Security.Cryptography.MD5.Create();
byte[] inputBytes = System.Text.Encoding.ASCII.GetBytes(input);
byte[] hash = md5.ComputeHash(inputBytes);
// step 2, convert byte array to hex string
StringBuilder sb = new StringBuilder();
for (int i = 0; i < hash.Length; i++)
{
sb.Append(hash[i].ToString("X2"));
}
return sb.ToString();
}
}
}

+ 0
- 59
test/TensorFlowNET.Examples/TextProcessing/NER/BiLstmCrfNer.cs View File

@@ -1,59 +0,0 @@
using System;
using System.IO;
using Tensorflow;
using Tensorflow.Estimators;

namespace TensorFlowNET.Examples
{
/// <summary>
/// Bidirectional LSTM-CRF Models for Sequence Tagging
/// https://github.com/guillaumegenthial/tf_ner/tree/master/models/lstm_crf
/// </summary>
public class BiLstmCrfNer : IExample
{
public bool Enabled { get; set; } = false;
public bool IsImportingGraph { get; set; } = false;

public string Name => "bi-LSTM + CRF NER";

public bool Run()
{
PrepareData();
return false;
}

public void PrepareData()
{
var hp = new HyperParams("BiLstmCrfNer");
hp.filepath_words = Path.Combine(hp.data_root_dir, "vocab.words.txt");
hp.filepath_chars = Path.Combine(hp.data_root_dir, "vocab.chars.txt");
hp.filepath_tags = Path.Combine(hp.data_root_dir, "vocab.tags.txt");
hp.filepath_glove = Path.Combine(hp.data_root_dir, "glove.npz");
}

public Graph ImportGraph()
{
throw new NotImplementedException();
}

public Graph BuildGraph()
{
throw new NotImplementedException();
}

public void Train(Session sess)
{
throw new NotImplementedException();
}

public void Predict(Session sess)
{
throw new NotImplementedException();
}

public void Test(Session sess)
{
throw new NotImplementedException();
}
}
}

+ 0
- 55
test/TensorFlowNET.Examples/TextProcessing/NER/CRF.cs View File

@@ -1,55 +0,0 @@
using System;
using Tensorflow;

namespace TensorFlowNET.Examples
{
/// <summary>
/// The CRF module implements a linear-chain CRF layer for learning to predict tag sequences.
/// This variant of the CRF is factored into unary potentials for every element
/// in the sequence and binary potentials for every transition between output tags.
///
/// tensorflow\contrib\crf\python\ops\crf.py
/// </summary>
public class CRF : IExample
{
public bool Enabled { get; set; } = true;
public bool IsImportingGraph { get; set; } = false;

public string Name => "CRF";

public bool Run()
{
return true;
}

public void PrepareData()
{

}

public Graph ImportGraph()
{
throw new NotImplementedException();
}

public Graph BuildGraph()
{
throw new NotImplementedException();
}

public void Train(Session sess)
{
throw new NotImplementedException();
}

public void Predict(Session sess)
{
throw new NotImplementedException();
}

public void Test(Session sess)
{
throw new NotImplementedException();
}
}
}

+ 0
- 233
test/TensorFlowNET.Examples/TextProcessing/NER/LstmCrfNer.cs View File

@@ -1,233 +0,0 @@
using NumSharp;
using System;
using System.Collections.Generic;
using System.IO;
using System.Linq;
using Tensorflow;
using Tensorflow.Estimators;
using TensorFlowNET.Examples.Utility;
using static Tensorflow.Binding;
using static TensorFlowNET.Examples.DataHelpers;

namespace TensorFlowNET.Examples.Text.NER
{
/// <summary>
/// A NER model using Tensorflow (LSTM + CRF + chars embeddings).
/// State-of-the-art performance (F1 score between 90 and 91).
///
/// https://github.com/guillaumegenthial/sequence_tagging
/// </summary>
public class LstmCrfNer : IExample
{
public bool Enabled { get; set; } = true;
public bool IsImportingGraph { get; set; } = true;

public string Name => "LSTM + CRF NER";

HyperParams hp;
int nwords, nchars, ntags;
CoNLLDataset dev, train;

Tensor word_ids_tensor;
Tensor sequence_lengths_tensor;
Tensor char_ids_tensor;
Tensor word_lengths_tensor;
Tensor labels_tensor;
Tensor dropout_tensor;
Tensor lr_tensor;
Operation train_op;
Tensor loss;
Tensor merged;

public bool Run()
{
PrepareData();
var graph = tf.Graph().as_default();

tf.train.import_meta_graph("graph/lstm_crf_ner.meta");

float loss_value = 0f;

//add_summary();
word_ids_tensor = graph.OperationByName("word_ids");
sequence_lengths_tensor = graph.OperationByName("sequence_lengths");
char_ids_tensor = graph.OperationByName("char_ids");
word_lengths_tensor = graph.OperationByName("word_lengths");
labels_tensor = graph.OperationByName("labels");
dropout_tensor = graph.OperationByName("dropout");
lr_tensor = graph.OperationByName("lr");
train_op = graph.OperationByName("train_step/Adam");
loss = graph.OperationByName("Mean");
//merged = graph.OperationByName("Merge/MergeSummary");

var init = tf.global_variables_initializer();

using (var sess = tf.Session())
{
sess.run(init);

foreach (var epoch in range(hp.epochs))
{
Console.Write($"Epoch {epoch + 1} out of {hp.epochs}, ");
loss_value = run_epoch(sess, train, dev, epoch);
print($"train loss: {loss_value}");
}
}

return loss_value < 0.1;
}

private float run_epoch(Session sess, CoNLLDataset train, CoNLLDataset dev, int epoch)
{
float accuracy = 0;
// iterate over dataset
var batches = minibatches(train, hp.batch_size);
foreach (var(words, labels) in batches)
{
var (fd, _) = get_feed_dict(words, labels, hp.lr, hp.dropout);
(_, accuracy) = sess.run((train_op, loss), feed_dict: fd);
}

return accuracy;
}

private IEnumerable<((int[][], int[])[], int[][])> minibatches(CoNLLDataset data, int minibatch_size)
{
var x_batch = new List<(int[][], int[])>();
var y_batch = new List<int[]>();
foreach(var (x, y) in data.GetItems())
{
if (len(y_batch) == minibatch_size)
{
yield return (x_batch.ToArray(), y_batch.ToArray());
x_batch.Clear();
y_batch.Clear();
}

var x3 = (x.Select(x1 => x1.Item1).ToArray(), x.Select(x2 => x2.Item2).ToArray());
x_batch.Add(x3);
y_batch.Add(y);
}

if (len(y_batch) > 0)
yield return (x_batch.ToArray(), y_batch.ToArray());
}

/// <summary>
/// Given some data, pad it and build a feed dictionary
/// </summary>
/// <param name="words">
/// list of sentences. A sentence is a list of ids of a list of
/// words. A word is a list of ids
/// </param>
/// <param name="labels">list of ids</param>
/// <param name="lr">learning rate</param>
/// <param name="dropout">keep prob</param>
private (FeedItem[], int[]) get_feed_dict((int[][], int[])[] words, int[][] labels, float lr = 0f, float dropout = 0f)
{
int[] sequence_lengths;
int[][] word_lengths;
int[][] word_ids;
int[][][] char_ids;

if (true) // use_chars
{
(char_ids, word_ids) = (words.Select(x => x.Item1).ToArray(), words.Select(x => x.Item2).ToArray());
(word_ids, sequence_lengths) = pad_sequences(word_ids, pad_tok: 0);
(char_ids, word_lengths) = pad_sequences(char_ids, pad_tok: 0);
}

// build feed dictionary
var feeds = new List<FeedItem>();
feeds.Add(new FeedItem(word_ids_tensor, np.array(word_ids)));
feeds.Add(new FeedItem(sequence_lengths_tensor, np.array(sequence_lengths)));
if(true) // use_chars
{
feeds.Add(new FeedItem(char_ids_tensor, np.array(char_ids)));
feeds.Add(new FeedItem(word_lengths_tensor, np.array(word_lengths)));
}

(labels, _) = pad_sequences(labels, 0);
feeds.Add(new FeedItem(labels_tensor, np.array(labels)));

feeds.Add(new FeedItem(lr_tensor, lr));

feeds.Add(new FeedItem(dropout_tensor, dropout));

return (feeds.ToArray(), sequence_lengths);
}

public void PrepareData()
{
hp = new HyperParams("LstmCrfNer")
{
epochs = 50,
dropout = 0.5f,
batch_size = 20,
lr_method = "adam",
lr = 0.001f,
lr_decay = 0.9f,
clip = false,
epoch_no_imprv = 3,
hidden_size_char = 100,
hidden_size_lstm = 300
};
hp.filepath_dev = hp.filepath_test = hp.filepath_train = Path.Combine(hp.data_root_dir, "test.txt");

// Loads vocabulary, processing functions and embeddings
hp.filepath_words = Path.Combine(hp.data_root_dir, "words.txt");
hp.filepath_tags = Path.Combine(hp.data_root_dir, "tags.txt");
hp.filepath_chars = Path.Combine(hp.data_root_dir, "chars.txt");

string url = "https://raw.githubusercontent.com/SciSharp/TensorFlow.NET/master/data/lstm_crf_ner.zip";
Web.Download(url, hp.data_root_dir, "lstm_crf_ner.zip");
Compress.UnZip(Path.Combine(hp.data_root_dir, "lstm_crf_ner.zip"), hp.data_root_dir);

// 1. vocabulary
/*vocab_tags = load_vocab(hp.filepath_tags);

nwords = vocab_words.Count;
nchars = vocab_chars.Count;
ntags = vocab_tags.Count;*/

// 2. get processing functions that map str -> id
dev = new CoNLLDataset(hp.filepath_dev, hp);
train = new CoNLLDataset(hp.filepath_train, hp);

// download graph meta data
var meta_file = "lstm_crf_ner.meta";
var meta_path = Path.Combine("graph", meta_file);
url = "https://raw.githubusercontent.com/SciSharp/TensorFlow.NET/master/graph/" + meta_file;
Web.Download(url, "graph", meta_file);

}

public Graph ImportGraph()
{
throw new NotImplementedException();
}

public Graph BuildGraph()
{
throw new NotImplementedException();
}

public void Train(Session sess)
{
throw new NotImplementedException();
}

public void Predict(Session sess)
{
throw new NotImplementedException();
}

public void Test(Session sess)
{
throw new NotImplementedException();
}
}
}

+ 0
- 51
test/TensorFlowNET.Examples/TextProcessing/NamedEntityRecognition.cs View File

@@ -1,51 +0,0 @@
using System;
using Tensorflow;

namespace TensorFlowNET.Examples
{
/// <summary>
/// https://github.com/guillaumegenthial/tf_ner
/// </summary>
public class NamedEntityRecognition : IExample
{
public bool Enabled { get; set; } = false;
public string Name => "NER";
public bool IsImportingGraph { get; set; } = false;


public void Train(Session sess)
{
throw new NotImplementedException();
}

public void PrepareData()
{
throw new NotImplementedException();
}

public Graph ImportGraph()
{
throw new NotImplementedException();
}

public Graph BuildGraph()
{
throw new NotImplementedException();
}

public bool Run()
{
throw new NotImplementedException();
}

public void Predict(Session sess)
{
throw new NotImplementedException();
}

public void Test(Session sess)
{
throw new NotImplementedException();
}
}
}

+ 0
- 243
test/TensorFlowNET.Examples/TextProcessing/Word2Vec.cs View File

@@ -1,243 +0,0 @@
using NumSharp;
using System;
using System.Collections.Generic;
using System.IO;
using System.Linq;
using Tensorflow;
using TensorFlowNET.Examples.Utility;
using static Tensorflow.Binding;

namespace TensorFlowNET.Examples
{
/// <summary>
/// Implement Word2Vec algorithm to compute vector representations of words.
/// https://github.com/aymericdamien/TensorFlow-Examples/blob/master/examples/2_BasicModels/word2vec.py
/// </summary>
public class Word2Vec : IExample
{
public bool Enabled { get; set; } = true;
public string Name => "Word2Vec";
public bool IsImportingGraph { get; set; } = true;

// Training Parameters
float learning_rate = 0.1f;
int batch_size = 128;
int num_steps = 30000; //3000000;
int display_step = 1000; //10000;
int eval_step = 5000;//200000;

// Evaluation Parameters
string[] eval_words = new string[] { "five", "of", "going", "hardware", "american", "britain" };
string[] text_words;
List<WordId> word2id;
int[] data;

// Word2Vec Parameters
int embedding_size = 200; // Dimension of the embedding vector
int max_vocabulary_size = 50000; // Total number of different words in the vocabulary
int min_occurrence = 10; // Remove all words that does not appears at least n times
int skip_window = 3; // How many words to consider left and right
int num_skips = 2; // How many times to reuse an input to generate a label
int num_sampled = 64; // Number of negative examples to sample

int data_index = 0;
int top_k = 8; // number of nearest neighbors
float average_loss = 0;

public bool Run()
{
PrepareData();

var graph = tf.Graph().as_default();

tf.train.import_meta_graph($"graph{Path.DirectorySeparatorChar}word2vec.meta");

// Input data
Tensor X = graph.OperationByName("Placeholder");
// Input label
Tensor Y = graph.OperationByName("Placeholder_1");

// Compute the average NCE loss for the batch
Tensor loss_op = graph.OperationByName("Mean");
// Define the optimizer
var train_op = graph.OperationByName("GradientDescent");
Tensor cosine_sim_op = graph.OperationByName("MatMul_1");

// Initialize the variables (i.e. assign their default value)
var init = tf.global_variables_initializer();

using (var sess = tf.Session(graph))
{
// Run the initializer
sess.run(init);

var x_test = (from word in eval_words
join id in word2id on word equals id.Word into wi
from wi2 in wi.DefaultIfEmpty()
select wi2 == null ? 0 : wi2.Id).ToArray();

foreach (var step in range(1, num_steps + 1))
{
// Get a new batch of data
var (batch_x, batch_y) = next_batch(batch_size, num_skips, skip_window);

(_, float loss) = sess.run((train_op, loss_op), (X, batch_x), (Y, batch_y));
average_loss += loss;

if (step % display_step == 0 || step == 1)
{
if (step > 1)
average_loss /= display_step;

print($"Step {step}, Average Loss= {average_loss.ToString("F4")}");
average_loss = 0;
}

// Evaluation
if (step % eval_step == 0 || step == 1)
{
print("Evaluation...");
var sim = sess.run(cosine_sim_op, (X, x_test));
foreach(var i in range(len(eval_words)))
{
var nearest = (0f - sim[i]).argsort<float>()
.Data<int>()
.Skip(1)
.Take(top_k)
.ToArray();
string log_str = $"\"{eval_words[i]}\" nearest neighbors:";
foreach (var k in range(top_k))
log_str = $"{log_str} {word2id.First(x => x.Id == nearest[k]).Word},";
print(log_str);
}
}
}
}

return average_loss < 100;
}

// Generate training batch for the skip-gram model
private (NDArray, NDArray) next_batch(int batch_size, int num_skips, int skip_window)
{
var batch = np.ndarray(new Shape(batch_size), dtype: np.int32);
var labels = np.ndarray((batch_size, 1), dtype: np.int32);
// get window size (words left and right + current one)
int span = 2 * skip_window + 1;
var buffer = new Queue<int>(span);
if (data_index + span > data.Length)
data_index = 0;
data.Skip(data_index).Take(span).ToList().ForEach(x => buffer.Enqueue(x));
data_index += span;

foreach (var i in range(batch_size / num_skips))
{
var context_words = range(span).Where(x => x != skip_window).ToArray();
var words_to_use = new int[] { 1, 6 };
foreach(var (j, context_word) in enumerate(words_to_use))
{
batch[i * num_skips + j] = buffer.ElementAt(skip_window);
labels[i * num_skips + j, 0] = buffer.ElementAt(context_word);
}

if (data_index == len(data))
{
//buffer.extend(data[0:span]);
data_index = span;
}
else
{
buffer.Enqueue(data[data_index]);
data_index += 1;
}
}

// Backtrack a little bit to avoid skipping words in the end of a batch
data_index = (data_index + len(data) - span) % len(data);

return (batch, labels);
}

public void PrepareData()
{
// Download graph meta
var url = "https://github.com/SciSharp/TensorFlow.NET/raw/master/graph/word2vec.meta";
Web.Download(url, "graph", "word2vec.meta");

// Download a small chunk of Wikipedia articles collection
url = "https://raw.githubusercontent.com/SciSharp/TensorFlow.NET/master/data/text8.zip";
Web.Download(url, "word2vec", "text8.zip");
// Unzip the dataset file. Text has already been processed
Compress.UnZip($"word2vec{Path.DirectorySeparatorChar}text8.zip", "word2vec");

int wordId = 0;
text_words = File.ReadAllText($"word2vec{Path.DirectorySeparatorChar}text8").Trim().ToLower().Split();
// Build the dictionary and replace rare words with UNK token
word2id = text_words.GroupBy(x => x)
.Select(x => new WordId
{
Word = x.Key,
Occurrence = x.Count()
})
.Where(x => x.Occurrence >= min_occurrence) // Remove samples with less than 'min_occurrence' occurrences
.OrderByDescending(x => x.Occurrence) // Retrieve the most common words
.Select(x => new WordId
{
Word = x.Word,
Id = ++wordId, // Assign an id to each word
Occurrence = x.Occurrence
})
.ToList();

// Retrieve a word id, or assign it index 0 ('UNK') if not in dictionary
data = (from word in text_words
join id in word2id on word equals id.Word into wi
from wi2 in wi.DefaultIfEmpty()
select wi2 == null ? 0 : wi2.Id).ToArray();

word2id.Insert(0, new WordId { Word = "UNK", Id = 0, Occurrence = data.Count(x => x == 0) });

print($"Words count: {text_words.Length}");
print($"Unique words: {text_words.Distinct().Count()}");
print($"Vocabulary size: {word2id.Count}");
print($"Most common words: {string.Join(", ", word2id.Take(10))}");
}

public Graph ImportGraph()
{
throw new NotImplementedException();
}

public Graph BuildGraph()
{
throw new NotImplementedException();
}

public void Train(Session sess)
{
throw new NotImplementedException();
}

public void Predict(Session sess)
{
throw new NotImplementedException();
}

public void Test(Session sess)
{
throw new NotImplementedException();
}

private class WordId
{
public string Word { get; set; }
public int Id { get; set; }
public int Occurrence { get; set; }

public override string ToString()
{
return Word + " " + Id + " " + Occurrence;
}
}
}
}

+ 0
- 147
test/TensorFlowNET.Examples/TextProcessing/cnn_models/CharCnn.cs View File

@@ -1,147 +0,0 @@
using Tensorflow;
using static Tensorflow.Binding;

namespace TensorFlowNET.Examples.Text
{
public class CharCnn : ITextModel
{
public CharCnn(int alphabet_size, int document_max_len, int num_class)
{
var learning_rate = 0.001f;
var filter_sizes = new int[] { 7, 7, 3, 3, 3, 3 };
var num_filters = 256;
var kernel_initializer = tf.truncated_normal_initializer(stddev: 0.05f);

var x = tf.placeholder(tf.int32, new TensorShape(-1, document_max_len), name: "x");
var y = tf.placeholder(tf.int32, new TensorShape(-1), name: "y");
var is_training = tf.placeholder(tf.@bool, new TensorShape(), name: "is_training");
var global_step = tf.Variable(0, trainable: false);
var keep_prob = tf.where(is_training, 0.5f, 1.0f);

var x_one_hot = tf.one_hot(x, alphabet_size);
var x_expanded = tf.expand_dims(x_one_hot, -1);

// ============= Convolutional Layers =============
Tensor pool1 = null, pool2 = null;
Tensor conv3 = null, conv4 = null, conv5 = null, conv6 = null;
Tensor h_pool = null;

tf_with(tf.name_scope("conv-maxpool-1"), delegate
{
var conv1 = tf.layers.conv2d(x_expanded,
filters: num_filters,
kernel_size: new[] { filter_sizes[0], alphabet_size },
kernel_initializer: kernel_initializer,
activation: tf.nn.relu());

pool1 = tf.layers.max_pooling2d(conv1,
pool_size: new[] { 3, 1 },
strides: new[] { 3, 1 });
pool1 = tf.transpose(pool1, new[] { 0, 1, 3, 2 });
});

tf_with(tf.name_scope("conv-maxpool-2"), delegate
{
var conv2 = tf.layers.conv2d(pool1,
filters: num_filters,
kernel_size: new[] {filter_sizes[1], num_filters },
kernel_initializer: kernel_initializer,
activation: tf.nn.relu());

pool2 = tf.layers.max_pooling2d(conv2,
pool_size: new[] { 3, 1 },
strides: new[] { 3, 1 });
pool2 = tf.transpose(pool2, new[] { 0, 1, 3, 2 });
});

tf_with(tf.name_scope("conv-3"), delegate
{
conv3 = tf.layers.conv2d(pool2,
filters: num_filters,
kernel_size: new[] { filter_sizes[2], num_filters },
kernel_initializer: kernel_initializer,
activation: tf.nn.relu());
conv3 = tf.transpose(conv3, new[] { 0, 1, 3, 2 });
});

tf_with(tf.name_scope("conv-4"), delegate
{
conv4 = tf.layers.conv2d(conv3,
filters: num_filters,
kernel_size: new[] { filter_sizes[3], num_filters },
kernel_initializer: kernel_initializer,
activation: tf.nn.relu());
conv4 = tf.transpose(conv4, new[] { 0, 1, 3, 2 });
});

tf_with(tf.name_scope("conv-5"), delegate
{
conv5 = tf.layers.conv2d(conv4,
filters: num_filters,
kernel_size: new[] { filter_sizes[4], num_filters },
kernel_initializer: kernel_initializer,
activation: tf.nn.relu());
conv5 = tf.transpose(conv5, new[] { 0, 1, 3, 2 });
});

tf_with(tf.name_scope("conv-maxpool-6"), delegate
{
conv6 = tf.layers.conv2d(conv5,
filters: num_filters,
kernel_size: new[] { filter_sizes[5], num_filters },
kernel_initializer: kernel_initializer,
activation: tf.nn.relu());

var pool6 = tf.layers.max_pooling2d(conv6,
pool_size: new[] { 3, 1 },
strides: new[] { 3, 1 });
pool6 = tf.transpose(pool6, new[] { 0, 2, 1, 3 });

h_pool = tf.reshape(pool6, new[] { -1, 34 * num_filters });
});

// ============= Fully Connected Layers =============
Tensor fc1_out = null, fc2_out = null;
Tensor logits = null;
Tensor predictions = null;

tf_with(tf.name_scope("fc-1"), delegate
{
fc1_out = tf.layers.dense(h_pool,
1024,
activation: tf.nn.relu(),
kernel_initializer: kernel_initializer);
});

tf_with(tf.name_scope("fc-2"), delegate
{
fc2_out = tf.layers.dense(fc1_out,
1024,
activation: tf.nn.relu(),
kernel_initializer: kernel_initializer);
});

tf_with(tf.name_scope("fc-3"), delegate
{
logits = tf.layers.dense(fc2_out,
num_class,
kernel_initializer: kernel_initializer);
predictions = tf.argmax(logits, -1, output_type: tf.int32);
});

tf_with(tf.name_scope("loss"), delegate
{
var y_one_hot = tf.one_hot(y, num_class);
var loss = tf.reduce_mean(
tf.nn.softmax_cross_entropy_with_logits_v2(logits: logits, labels: y_one_hot));
var optimizer = tf.train.AdamOptimizer(learning_rate).minimize(loss, global_step: global_step);
});

tf_with(tf.name_scope("accuracy"), delegate
{
var correct_predictions = tf.equal(predictions, y);
var accuracy = tf.reduce_mean(tf.cast(correct_predictions, tf.float32), name: "accuracy");
});
}
}
}

+ 0
- 6
test/TensorFlowNET.Examples/TextProcessing/cnn_models/ITextModel.cs View File

@@ -1,6 +0,0 @@
namespace TensorFlowNET.Examples.Text
{
interface ITextModel
{
}
}

+ 0
- 171
test/TensorFlowNET.Examples/TextProcessing/cnn_models/VdCnn.cs View File

@@ -1,171 +0,0 @@
using System.Collections.Generic;
using System.Linq;
using Tensorflow;
using static Tensorflow.Binding;

namespace TensorFlowNET.Examples.Text
{
public class VdCnn : ITextModel
{
private int embedding_size;
private int[] filter_sizes;
private int[] num_filters;
private int[] num_blocks;
private float learning_rate;
private IInitializer cnn_initializer;
private IInitializer fc_initializer;
public Tensor x { get; private set; }
public Tensor y { get; private set; }
public Tensor is_training { get; private set; }
private RefVariable global_step;
private RefVariable embeddings;
private Tensor x_emb;
private Tensor x_expanded;
private Tensor logits;
private Tensor predictions;
private Tensor loss;

public VdCnn(int alphabet_size, int document_max_len, int num_class)
{
embedding_size = 16;
filter_sizes = new int[] { 3, 3, 3, 3, 3 };
num_filters = new int[] { 64, 64, 128, 256, 512 };
num_blocks = new int[] { 2, 2, 2, 2 };
learning_rate = 0.001f;
cnn_initializer = tensorflow.keras.initializers.he_normal();
fc_initializer = tf.truncated_normal_initializer(stddev: 0.05f);

x = tf.placeholder(tf.int32, new TensorShape(-1, document_max_len), name: "x");
y = tf.placeholder(tf.int32, new TensorShape(-1), name: "y");
is_training = tf.placeholder(tf.@bool, new TensorShape(), name: "is_training");
global_step = tf.Variable(0, trainable: false);

// Embedding Layer
tf_with(tf.name_scope("embedding"), delegate
{
var init_embeddings = tf.random_uniform(new int[] { alphabet_size, embedding_size }, -1.0f, 1.0f);
embeddings = tf.get_variable("embeddings", initializer: init_embeddings);
x_emb = tf.nn.embedding_lookup(embeddings, x);
x_expanded = tf.expand_dims(x_emb, -1);
});

Tensor conv0 = null;
Tensor conv1 = null;
Tensor conv2 = null;
Tensor conv3 = null;
Tensor conv4 = null;
Tensor h_flat = null;
Tensor fc1_out = null;
Tensor fc2_out = null;

// First Convolution Layer
tf_with(tf.variable_scope("conv-0"), delegate
{
conv0 = tf.layers.conv2d(x_expanded,
filters: num_filters[0],
kernel_size: new int[] { filter_sizes[0], embedding_size },
kernel_initializer: cnn_initializer,
activation: tf.nn.relu());

conv0 = tf.transpose(conv0, new int[] { 0, 1, 3, 2 });
});

tf_with(tf.name_scope("conv-block-1"), delegate {
conv1 = conv_block(conv0, 1);
});

tf_with(tf.name_scope("conv-block-2"), delegate {
conv2 = conv_block(conv1, 2);
});

tf_with(tf.name_scope("conv-block-3"), delegate {
conv3 = conv_block(conv2, 3);
});

tf_with(tf.name_scope("conv-block-4"), delegate
{
conv4 = conv_block(conv3, 4, max_pool: false);
});

// ============= k-max Pooling =============
tf_with(tf.name_scope("k-max-pooling"), delegate
{
var h = tf.transpose(tf.squeeze(conv4, new int[] { -1 }), new int[] { 0, 2, 1 });
var top_k = tf.nn.top_k(h, k: 8, sorted: false)[0];
h_flat = tf.reshape(top_k, new int[] { -1, 512 * 8 });
});

// ============= Fully Connected Layers =============
tf_with(tf.name_scope("fc-1"), scope =>
{
fc1_out = tf.layers.dense(h_flat, 2048, activation: tf.nn.relu(), kernel_initializer: fc_initializer);
});

tf_with(tf.name_scope("fc-2"), scope =>
{
fc2_out = tf.layers.dense(fc1_out, 2048, activation: tf.nn.relu(), kernel_initializer: fc_initializer);
});

tf_with(tf.name_scope("fc-3"), scope =>
{
logits = tf.layers.dense(fc2_out, num_class, activation: null, kernel_initializer: fc_initializer);
predictions = tf.argmax(logits, -1, output_type: tf.int32);
});

// ============= Loss and Accuracy =============
tf_with(tf.name_scope("loss"), delegate
{
var y_one_hot = tf.one_hot(y, num_class);
loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits_v2(logits: logits, labels: y_one_hot));

var update_ops = tf.get_collection<object>(tf.GraphKeys.UPDATE_OPS);
tf_with(tf.control_dependencies(update_ops.Select(x => (Operation)x).ToArray()), delegate
{
var adam = tf.train.AdamOptimizer(learning_rate);
adam.minimize(loss, global_step: global_step);
});
});
}

private Tensor conv_block(Tensor input, int i, bool max_pool = true)
{
return tf_with(tf.variable_scope($"conv-block-{i}"), delegate
{
Tensor conv = null;
// Two "conv-batch_norm-relu" layers.
foreach (var j in Enumerable.Range(0, 2))
{
tf_with(tf.variable_scope($"conv-{j}"), delegate
{
// convolution
conv = tf.layers.conv2d(
input,
filters: num_filters[i],
kernel_size: new int[] { filter_sizes[i], num_filters[i - 1] },
kernel_initializer: cnn_initializer,
activation: null);
// batch normalization
conv = tf.layers.batch_normalization(conv, training: is_training);
// relu
conv = tf.nn.relu(conv);
conv = tf.transpose(conv, new int[] { 0, 1, 3, 2 });
});
}
if (max_pool)
{
// Max pooling
return tf.layers.max_pooling2d(
conv,
pool_size: new int[] { 3, 1 },
strides: new int[] { 2, 1 },
padding: "SAME");
}
else
{
return conv;
}
});
}
}
}

+ 0
- 99
test/TensorFlowNET.Examples/TextProcessing/cnn_models/WordCnn.cs View File

@@ -1,99 +0,0 @@
/*****************************************************************************
Copyright 2018 The TensorFlow.NET Authors. All Rights Reserved.

Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at

http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
******************************************************************************/

using System.Collections.Generic;
using Tensorflow;
using static Tensorflow.Binding;

namespace TensorFlowNET.Examples.Text
{
public class WordCnn : ITextModel
{
public WordCnn(int vocabulary_size, int document_max_len, int num_class)
{
var embedding_size = 128;
var learning_rate = 0.001f;
var filter_sizes = new int[3, 4, 5];
var num_filters = 100;

var x = tf.placeholder(tf.int32, new TensorShape(-1, document_max_len), name: "x");
var y = tf.placeholder(tf.int32, new TensorShape(-1), name: "y");
var is_training = tf.placeholder(tf.@bool, new TensorShape(), name: "is_training");
var global_step = tf.Variable(0, trainable: false);
var keep_prob = tf.where(is_training, 0.5f, 1.0f);
Tensor x_emb = null;

tf_with(tf.name_scope("embedding"), scope =>
{
var init_embeddings = tf.random_uniform(new int[] { vocabulary_size, embedding_size });
var embeddings = tf.get_variable("embeddings", initializer: init_embeddings);
x_emb = tf.nn.embedding_lookup(embeddings, x);
x_emb = tf.expand_dims(x_emb, -1);
});

var pooled_outputs = new List<Tensor>();
for (int len = 0; len < filter_sizes.Rank; len++)
{
int filter_size = filter_sizes.GetLength(len);
var conv = tf.layers.conv2d(
x_emb,
filters: num_filters,
kernel_size: new int[] { filter_size, embedding_size },
strides: new int[] { 1, 1 },
padding: "VALID",
activation: tf.nn.relu());

var pool = tf.layers.max_pooling2d(
conv,
pool_size: new[] { document_max_len - filter_size + 1, 1 },
strides: new[] { 1, 1 },
padding: "VALID");

pooled_outputs.Add(pool);
}

var h_pool = tf.concat(pooled_outputs, 3);
var h_pool_flat = tf.reshape(h_pool, new TensorShape(-1, num_filters * filter_sizes.Rank));
Tensor h_drop = null;
tf_with(tf.name_scope("dropout"), delegate
{
h_drop = tf.nn.dropout(h_pool_flat, keep_prob);
});

Tensor logits = null;
Tensor predictions = null;
tf_with(tf.name_scope("output"), delegate
{
logits = tf.layers.dense(h_drop, num_class);
predictions = tf.argmax(logits, -1, output_type: tf.int32);
});

tf_with(tf.name_scope("loss"), delegate
{
var sscel = tf.nn.sparse_softmax_cross_entropy_with_logits(logits: logits, labels: y);
var loss = tf.reduce_mean(sscel);
var adam = tf.train.AdamOptimizer(learning_rate);
var optimizer = adam.minimize(loss, global_step: global_step);
});

tf_with(tf.name_scope("accuracy"), delegate
{
var correct_predictions = tf.equal(predictions, y);
var accuracy = tf.reduce_mean(tf.cast(correct_predictions, TF_DataType.TF_FLOAT), name: "accuracy");
});
}
}
}

+ 0
- 20
test/TensorFlowNET.Examples/Utility/ArrayShuffling.cs View File

@@ -1,20 +0,0 @@
using System;
namespace TensorFlowNET.Examples.Utility
{
public static class ArrayShuffling
{
public static T[] Shuffle<T>(this Random rng, T[] array)
{
int n = array.Length;
while (n > 1)
{
int k = rng.Next(n--);
T temp = array[n];
array[n] = array[k];
array[k] = temp;
}
return array;
}
}
}

+ 0
- 105
test/TensorFlowNET.Examples/Utility/CoNLLDataset.cs View File

@@ -1,105 +0,0 @@
using System.Collections.Generic;
using System.IO;
using System.Linq;
using Tensorflow.Estimators;

namespace TensorFlowNET.Examples.Utility
{
public class CoNLLDataset
{
static Dictionary<string, int> vocab_chars;
static Dictionary<string, int> vocab_words;
static Dictionary<string, int> vocab_tags;

HyperParams _hp;
string _path;

public CoNLLDataset(string path, HyperParams hp)
{
if (vocab_chars == null)
vocab_chars = load_vocab(hp.filepath_chars);

if (vocab_words == null)
vocab_words = load_vocab(hp.filepath_words);

if (vocab_tags == null)
vocab_tags = load_vocab(hp.filepath_tags);

_path = path;
}

private (int[], int) processing_word(string word)
{
var char_ids = word.ToCharArray().Select(x => vocab_chars[x.ToString()]).ToArray();

// 1. preprocess word
if (true) // lowercase
word = word.ToLower();
if (false) // isdigit
word = "$NUM$";

// 2. get id of word
int id = vocab_words.GetValueOrDefault(word, vocab_words["$UNK$"]);
return (char_ids, id);
}

private int processing_tag(string word)
{
// 1. preprocess word
if (false) // lowercase
word = word.ToLower();
if (false) // isdigit
word = "$NUM$";

// 2. get id of word
int id = vocab_tags.GetValueOrDefault(word, -1);

return id;
}

private Dictionary<string, int> load_vocab(string filename)
{
var dict = new Dictionary<string, int>();
int i = 0;
File.ReadAllLines(filename)
.Select(x => dict[x] = i++)
.Count();
return dict;
}

public IEnumerable<((int[], int)[], int[])> GetItems()
{
var lines = File.ReadAllLines(_path);

int niter = 0;
var words = new List<(int[], int)>();
var tags = new List<int>();

foreach (var l in lines)
{
string line = l.Trim();
if (string.IsNullOrEmpty(line) || line.StartsWith("-DOCSTART-"))
{
if (words.Count > 0)
{
niter++;
yield return (words.ToArray(), tags.ToArray());
words.Clear();
tags.Clear();
}
}
else
{
var ls = line.Split(' ');
// process word
var word = processing_word(ls[0]);
var tag = processing_tag(ls[1]);

words.Add(word);
tags.Add(tag);
}
}
}
}
}

+ 0
- 102
test/TensorFlowNET.Examples/Utility/Compress.cs View File

@@ -1,102 +0,0 @@
/*****************************************************************************
Copyright 2018 The TensorFlow.NET Authors. All Rights Reserved.

Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at

http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
******************************************************************************/

using ICSharpCode.SharpZipLib.Core;
using ICSharpCode.SharpZipLib.GZip;
using ICSharpCode.SharpZipLib.Tar;
using System;
using System.IO;
using System.IO.Compression;
using System.Linq;
using System.Threading;
using System.Threading.Tasks;

namespace TensorFlowNET.Examples.Utility
{
public class Compress
{
public static void ExtractGZip(string gzipFileName, string targetDir)
{
// Use a 4K buffer. Any larger is a waste.
byte[] dataBuffer = new byte[4096];

using (System.IO.Stream fs = new FileStream(gzipFileName, FileMode.Open, FileAccess.Read))
{
using (GZipInputStream gzipStream = new GZipInputStream(fs))
{
// Change this to your needs
string fnOut = Path.Combine(targetDir, Path.GetFileNameWithoutExtension(gzipFileName));

using (FileStream fsOut = File.Create(fnOut))
{
StreamUtils.Copy(gzipStream, fsOut, dataBuffer);
}
}
}
}

public static void UnZip(String gzArchiveName, String destFolder)
{
var flag = gzArchiveName.Split(Path.DirectorySeparatorChar).Last().Split('.').First() + ".bin";
if (File.Exists(Path.Combine(destFolder, flag))) return;

Console.WriteLine($"Extracting.");
var task = Task.Run(() =>
{
ZipFile.ExtractToDirectory(gzArchiveName, destFolder);
});

while (!task.IsCompleted)
{
Thread.Sleep(200);
Console.Write(".");
}

File.Create(Path.Combine(destFolder, flag));
Console.WriteLine("");
Console.WriteLine("Extracting is completed.");
}

public static void ExtractTGZ(String gzArchiveName, String destFolder)
{
var flag = gzArchiveName.Split(Path.DirectorySeparatorChar).Last().Split('.').First() + ".bin";
if (File.Exists(Path.Combine(destFolder, flag))) return;

Console.WriteLine($"Extracting.");
var task = Task.Run(() =>
{
using (var inStream = File.OpenRead(gzArchiveName))
{
using (var gzipStream = new GZipInputStream(inStream))
{
using (TarArchive tarArchive = TarArchive.CreateInputTarArchive(gzipStream))
tarArchive.ExtractContents(destFolder);
}
}
});

while (!task.IsCompleted)
{
Thread.Sleep(200);
Console.Write(".");
}

File.Create(Path.Combine(destFolder, flag));
Console.WriteLine("");
Console.WriteLine("Extracting is completed.");
}
}
}

+ 0
- 81
test/TensorFlowNET.Examples/Utility/PbtxtParser.cs View File

@@ -1,81 +0,0 @@
/*****************************************************************************
Copyright 2018 The TensorFlow.NET Authors. All Rights Reserved.

Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at

http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
******************************************************************************/

using Newtonsoft.Json;
using System.Collections.Generic;

namespace TensorFlowNET.Examples.Utility
{
public class PbtxtItem
{
public string name { get; set; }
public int id { get; set; }
public string display_name { get; set; }
}
public class PbtxtItems
{
public List<PbtxtItem> items { get; set; }
}

public class PbtxtParser
{
public static PbtxtItems ParsePbtxtFile(string filePath)
{
string line;
string newText = "{\"items\":[";

using (System.IO.StreamReader reader = new System.IO.StreamReader(filePath))
{

while ((line = reader.ReadLine()) != null)
{
string newline = string.Empty;

if (line.Contains("{"))
{
newline = line.Replace("item", "").Trim();
//newText += line.Insert(line.IndexOf("=") + 1, "\"") + "\",";
newText += newline;
}
else if (line.Contains("}"))
{
newText = newText.Remove(newText.Length - 1);
newText += line;
newText += ",";
}
else
{
newline = line.Replace(":", "\":").Trim();
newline = "\"" + newline;// newline.Insert(0, "\"");
newline += ",";

newText += newline;
}

}

newText = newText.Remove(newText.Length - 1);
newText += "]}";

reader.Close();
}

PbtxtItems items = JsonConvert.DeserializeObject<PbtxtItems>(newText);

return items;
}
}
}

+ 0
- 57
test/TensorFlowNET.Examples/Utility/Web.cs View File

@@ -1,57 +0,0 @@
/*****************************************************************************
Copyright 2018 The TensorFlow.NET Authors. All Rights Reserved.

Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at

http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
******************************************************************************/

using System;
using System.IO;
using System.Linq;
using System.Net;
using System.Threading;
using System.Threading.Tasks;

namespace TensorFlowNET.Examples.Utility
{
public class Web
{
public static bool Download(string url, string destDir, string destFileName)
{
if (destFileName == null)
destFileName = url.Split(Path.DirectorySeparatorChar).Last();

Directory.CreateDirectory(destDir);

string relativeFilePath = Path.Combine(destDir, destFileName);

if (File.Exists(relativeFilePath))
{
Console.WriteLine($"{relativeFilePath} already exists.");
return false;
}

var wc = new WebClient();
Console.WriteLine($"Downloading {relativeFilePath}");
var download = Task.Run(() => wc.DownloadFile(url, relativeFilePath));
while (!download.IsCompleted)
{
Thread.Sleep(1000);
Console.Write(".");
}
Console.WriteLine("");
Console.WriteLine($"Downloaded {relativeFilePath}");

return true;
}
}
}

+ 0
- 115
test/TensorFlowNET.Examples/python/binary_text_classification.py View File

@@ -1,115 +0,0 @@

from __future__ import absolute_import, division, print_function

import tensorflow as tf
from tensorflow import keras

import numpy as np

print(tf.__version__)

imdb = keras.datasets.imdb

(train_data, train_labels), (test_data, test_labels) = imdb.load_data(num_words=10000)

print("Training entries: {}, labels: {}".format(len(train_data), len(train_labels)))
print(train_data[0])
len(train_data[0]), len(train_data[1])

# A dictionary mapping words to an integer index
word_index = imdb.get_word_index()

# The first indices are reserved
word_index = {k:(v+3) for k,v in word_index.items()}
word_index["<PAD>"] = 0
word_index["<START>"] = 1
word_index["<UNK>"] = 2 # unknown
word_index["<UNUSED>"] = 3

reverse_word_index = dict([(value, key) for (key, value) in word_index.items()])

def decode_review(text):
return ' '.join([reverse_word_index.get(i, '?') for i in text])

decode_review(train_data[0])


train_data = keras.preprocessing.sequence.pad_sequences(train_data,
value=word_index["<PAD>"],
padding='post',
maxlen=256)

test_data = keras.preprocessing.sequence.pad_sequences(test_data,
value=word_index["<PAD>"],
padding='post',
maxlen=256)


print(train_data[0])

# input shape is the vocabulary count used for the movie reviews (10,000 words)
vocab_size = 10000

model = keras.Sequential()
model.add(keras.layers.Embedding(vocab_size, 16))
model.add(keras.layers.GlobalAveragePooling1D())
model.add(keras.layers.Dense(16, activation=tf.nn.relu))
model.add(keras.layers.Dense(1, activation=tf.nn.sigmoid))

model.summary()

model.compile(optimizer='adam',
loss='binary_crossentropy',
metrics=['accuracy'])


x_val = train_data[:10000]
partial_x_train = train_data[10000:]

y_val = train_labels[:10000]
partial_y_train = train_labels[10000:]

history = model.fit(partial_x_train,
partial_y_train,
epochs=20,
batch_size=512,
validation_data=(x_val, y_val),
verbose=1)

results = model.evaluate(test_data, test_labels)
print(results)

history_dict = history.history
history_dict.keys()

import matplotlib.pyplot as plt

acc = history_dict['acc']
val_acc = history_dict['val_acc']
loss = history_dict['loss']
val_loss = history_dict['val_loss']

epochs = range(1, len(acc) + 1)

# "bo" is for "blue dot"
plt.plot(epochs, loss, 'bo', label='Training loss')
# b is for "solid blue line"
plt.plot(epochs, val_loss, 'b', label='Validation loss')
plt.title('Training and validation loss')
plt.xlabel('Epochs')
plt.ylabel('Loss')
plt.legend()

plt.show()


plt.clf() # clear figure

plt.plot(epochs, acc, 'bo', label='Training acc')
plt.plot(epochs, val_acc, 'b', label='Validation acc')
plt.title('Training and validation accuracy')
plt.xlabel('Epochs')
plt.ylabel('Accuracy')
plt.legend()

plt.show()

+ 0
- 107
test/TensorFlowNET.Examples/python/linear_regression.py View File

@@ -1,107 +0,0 @@
'''
A linear regression learning algorithm example using TensorFlow library.
Author: Aymeric Damien
Project: https://github.com/aymericdamien/TensorFlow-Examples/
'''

from __future__ import print_function

import tensorflow as tf
import numpy
import matplotlib.pyplot as plt
rng = numpy.random

# Parameters
learning_rate = 0.01
training_epochs = 1000
display_step = 10

# Training Data
train_X = numpy.asarray([3.3,4.4,5.5,6.71,6.93,4.168,9.779,6.182,7.59,2.167,
7.042,10.791,5.313,7.997,5.654,9.27,3.1])
train_Y = numpy.asarray([1.7,2.76,2.09,3.19,1.694,1.573,3.366,2.596,2.53,1.221,
2.827,3.465,1.65,2.904,2.42,2.94,1.3])
n_samples = train_X.shape[0]

if False:
# tf Graph Input
X = tf.placeholder("float")
Y = tf.placeholder("float")

# Set model weights
W = tf.Variable(-0.06, name="weight")
b = tf.Variable(-0.73, name="bias")

# Construct a linear model
mul = tf.multiply(X, W)
pred = tf.add(mul, b)

# Mean squared error
sub = pred-Y
pow = tf.pow(sub, 2)

reduce = tf.reduce_sum(pow)
cost = reduce/(2*n_samples)
# Gradient descent
# Note, minimize() knows to modify W and b because Variable objects are trainable=True by default
grad = tf.train.GradientDescentOptimizer(learning_rate)
optimizer = grad.minimize(cost)
# tf.train.export_meta_graph(filename='save_model.meta');
else:
# tf Graph Input
new_saver = tf.train.import_meta_graph("linear_regression.meta")
nodes = tf.get_default_graph()._nodes_by_name;
optimizer = nodes["GradientDescent"]
cost = nodes["truediv"].outputs[0]
X = nodes["Placeholder"].outputs[0]
Y = nodes["Placeholder_1"].outputs[0]
W = nodes["weight"].outputs[0]
b = nodes["bias"].outputs[0]
pred = nodes["Add"].outputs[0]

# Initialize the variables (i.e. assign their default value)
init = tf.global_variables_initializer()

# Start training
with tf.Session() as sess:

# Run the initializer
sess.run(init)

# Fit all training data
for epoch in range(training_epochs):
for (x, y) in zip(train_X, train_Y):
sess.run(optimizer, feed_dict={X: x, Y: y})

# Display logs per epoch step
if (epoch+1) % display_step == 0:
c = sess.run(cost, feed_dict={X: train_X, Y:train_Y})
print("Epoch:", '%04d' % (epoch+1), "cost=", "{:.9f}".format(c), \
"W=", sess.run(W), "b=", sess.run(b))

print("Optimization Finished!")
training_cost = sess.run(cost, feed_dict={X: train_X, Y: train_Y})
print("Training cost=", training_cost, "W=", sess.run(W), "b=", sess.run(b), '\n')

# Graphic display
plt.plot(train_X, train_Y, 'ro', label='Original data')
plt.plot(train_X, sess.run(W) * train_X + sess.run(b), label='Fitted line')
plt.legend()
plt.show()

# Testing example, as requested (Issue #2)
test_X = numpy.asarray([6.83, 4.668, 8.9, 7.91, 5.7, 8.7, 3.1, 2.1])
test_Y = numpy.asarray([1.84, 2.273, 3.2, 2.831, 2.92, 3.24, 1.35, 1.03])

print("Testing... (Mean square loss Comparison)")
testing_cost = sess.run(
tf.reduce_sum(tf.pow(pred - Y, 2)) / (2 * test_X.shape[0]),
feed_dict={X: test_X, Y: test_Y}) # same function as cost above
print("Testing cost=", testing_cost)
print("Absolute mean square loss difference:", abs(
training_cost - testing_cost))

plt.plot(test_X, test_Y, 'bo', label='Testing data')
plt.plot(train_X, sess.run(W) * train_X + sess.run(b), label='Fitted line')
plt.legend()
plt.show()

+ 0
- 100
test/TensorFlowNET.Examples/python/logistic_regression.py View File

@@ -1,100 +0,0 @@
'''
A logistic regression learning algorithm example using TensorFlow library.
This example is using the MNIST database of handwritten digits
(http://yann.lecun.com/exdb/mnist/)
Author: Aymeric Damien
Project: https://github.com/aymericdamien/TensorFlow-Examples/
'''

from __future__ import print_function

import tensorflow as tf

# Import MNIST data
from tensorflow.examples.tutorials.mnist import input_data
mnist = input_data.read_data_sets("/tmp/data/", one_hot=True)

# Parameters
learning_rate = 0.01
training_epochs = 10
batch_size = 100
display_step = 1

# tf Graph Input
x = tf.placeholder(tf.float32, [None, 784]) # mnist data image of shape 28*28=784
y = tf.placeholder(tf.float32, [None, 10]) # 0-9 digits recognition => 10 classes

# Set model weights
W = tf.Variable(tf.zeros([784, 10]))
b = tf.Variable(tf.zeros([10]))

# Construct model
pred = tf.nn.softmax(tf.matmul(x, W) + b) # Softmax

# Minimize error using cross entropy
cost = tf.reduce_mean(-tf.reduce_sum(y*tf.log(pred), reduction_indices=1))
# Gradient Descent
optimizer = tf.train.GradientDescentOptimizer(learning_rate).minimize(cost)

# Initialize the variables (i.e. assign their default value)
init = tf.global_variables_initializer()

# Start training
with tf.Session() as sess:

# Run the initializer
sess.run(init)

# Training cycle
for epoch in range(training_epochs):
avg_cost = 0.
total_batch = int(mnist.train.num_examples/batch_size)
# Loop over all batches
for i in range(total_batch):
batch_xs, batch_ys = mnist.train.next_batch(batch_size)
# Run optimization op (backprop) and cost op (to get loss value)
_, c = sess.run([optimizer, cost], feed_dict={x: batch_xs,
y: batch_ys})
# Compute average loss
avg_cost += c / total_batch
# Display logs per epoch step
if (epoch+1) % display_step == 0:
print("Epoch:", '%04d' % (epoch+1), "cost=", "{:.9f}".format(avg_cost))

print("Optimization Finished!")

# Test model
correct_prediction = tf.equal(tf.argmax(pred, 1), tf.argmax(y, 1))
# Calculate accuracy
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
print("Accuracy:", accuracy.eval({x: mnist.test.images, y: mnist.test.labels}))

# predict
# results = sess.run(pred, feed_dict={x: batch_xs[:1]})

# save model
saver = tf.train.Saver()
save_path = saver.save(sess, "logistic_regression/model.ckpt")
tf.train.write_graph(sess.graph.as_graph_def(),'logistic_regression','model.pbtxt', as_text=True)

freeze_graph.freeze_graph(input_graph = 'logistic_regression/model.pbtxt',
input_saver = "",
input_binary = False,
input_checkpoint = 'logistic_regression/model.ckpt',
output_node_names = "Softmax",
restore_op_name = "save/restore_all",
filename_tensor_name = "save/Const:0",
output_graph = 'logistic_regression/model.pb',
clear_devices = True,
initializer_nodes = "")

# restoring the model
saver = tf.train.import_meta_graph('logistic_regression/tensorflowModel.ckpt.meta')
saver.restore(sess,tf.train.latest_checkpoint('logistic_regression'))

# predict
# pred = graph._nodes_by_name["Softmax"]
# output = pred.outputs[0]
# x = graph._nodes_by_name["Placeholder"]
# input = x.outputs[0]
# results = sess.run(output, feed_dict={input: batch_xs[:1]})

+ 0
- 67
test/TensorFlowNET.Examples/python/meta_graph.py View File

@@ -1,67 +0,0 @@

import tensorflow as tf
import math

# Creates an inference graph.
# Hidden 1
images = tf.constant(1.2, tf.float32, shape=[100, 28])
with tf.name_scope("hidden1"):
weights = tf.Variable(
tf.truncated_normal([28, 128],
stddev=1.0 / math.sqrt(float(28))),
name="weights")
biases = tf.Variable(tf.zeros([128]),
name="biases")
hidden1 = tf.nn.relu(tf.matmul(images, weights) + biases)
# Hidden 2
with tf.name_scope("hidden2"):
weights = tf.Variable(
tf.truncated_normal([128, 32],
stddev=1.0 / math.sqrt(float(128))),
name="weights")
biases = tf.Variable(tf.zeros([32]),
name="biases")
hidden2 = tf.nn.relu(tf.matmul(hidden1, weights) + biases)
# Linear
with tf.name_scope("softmax_linear"):
weights = tf.Variable(
tf.truncated_normal([32, 10],
stddev=1.0 / math.sqrt(float(32))),
name="weights")
biases = tf.Variable(tf.zeros([10]),
name="biases")
logits = tf.matmul(hidden2, weights) + biases
tf.add_to_collection("logits", logits)

init_all_op = tf.global_variables_initializer()

with tf.Session() as sess:
# Initializes all the variables.
sess.run(init_all_op)
# Runs to logit.
sess.run(logits)
# Creates a saver.
saver0 = tf.train.Saver()
saver0.save(sess, 'my-save-dir/my-model-10000')
# Generates MetaGraphDef.
saver0.export_meta_graph('my-save-dir/my-model-10000.meta')


# Then later import it and extend it to a training graph.
with tf.Session() as sess:
new_saver = tf.train.import_meta_graph('my-save-dir/my-model-10000.meta')
new_saver.restore(sess, 'my-save-dir/my-model-10000')
# Addes loss and train.
labels = tf.constant(0, tf.int32, shape=[100], name="labels")
batch_size = tf.size(labels)
logits = tf.get_collection("logits")[0]
loss = tf.losses.sparse_softmax_cross_entropy(labels=labels,
logits=logits)

tf.summary.scalar('loss', loss)
# Creates the gradient descent optimizer with the given learning rate.
optimizer = tf.train.GradientDescentOptimizer(0.01)

# Runs train_op.
train_op = optimizer.minimize(loss)
sess.run(train_op)

+ 0
- 78
test/TensorFlowNET.Examples/python/minst_lstm.py View File

@@ -1,78 +0,0 @@
import tensorflow as tf
from tensorflow.contrib import rnn

#import mnist dataset
from tensorflow.examples.tutorials.mnist import input_data
mnist=input_data.read_data_sets("/tmp/data/",one_hot=True)

#define constants
#unrolled through 28 time steps
time_steps=28
#hidden LSTM units
num_units=128
#rows of 28 pixels
n_input=28
#learning rate for adam
learning_rate=0.001
#mnist is meant to be classified in 10 classes(0-9).
n_classes=10
#size of batch
batch_size=128


#weights and biases of appropriate shape to accomplish above task
out_weights=tf.Variable(tf.random_normal([num_units,n_classes]))
out_bias=tf.Variable(tf.random_normal([n_classes]))

#defining placeholders
#input image placeholder
x=tf.placeholder("float",[None,time_steps,n_input])
#input label placeholder
y=tf.placeholder("float",[None,n_classes])

#processing the input tensor from [batch_size,n_steps,n_input] to "time_steps" number of [batch_size,n_input] tensors
input=tf.unstack(x ,time_steps,1)

#defining the network
lstm_layer=rnn.BasicLSTMCell(num_units,forget_bias=1)
outputs,_=rnn.static_rnn(lstm_layer,input,dtype="float32")

#converting last output of dimension [batch_size,num_units] to [batch_size,n_classes] by out_weight multiplication
prediction=tf.matmul(outputs[-1],out_weights)+out_bias

#loss_function
loss=tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=prediction,labels=y))
#optimization
opt=tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(loss)

#model evaluation
correct_prediction=tf.equal(tf.argmax(prediction,1),tf.argmax(y,1))
accuracy=tf.reduce_mean(tf.cast(correct_prediction,tf.float32))

#initialize variables
init=tf.global_variables_initializer()
with tf.Session() as sess:
sess.run(init)
iter=1
while iter<800:
batch_x,batch_y=mnist.train.next_batch(batch_size=batch_size)

batch_x=batch_x.reshape((batch_size,time_steps,n_input))

sess.run(opt, feed_dict={x: batch_x, y: batch_y})

if iter %10==0:
acc=sess.run(accuracy,feed_dict={x:batch_x,y:batch_y})
los=sess.run(loss,feed_dict={x:batch_x,y:batch_y})
print("For iter ",iter)
print("Accuracy ",acc)
print("Loss ",los)
print("__________________")

iter=iter+1

#calculating test accuracy
test_data = mnist.test.images[:128].reshape((-1, time_steps, n_input))
test_label = mnist.test.labels[:128]
print("Testing Accuracy:", sess.run(accuracy, feed_dict={x: test_data, y: test_label}))


+ 0
- 48
test/TensorFlowNET.Examples/python/minst_rnn.py View File

@@ -1,48 +0,0 @@
import tensorflow as tf

# hyperparameters
n_neurons = 128
learning_rate = 0.001
batch_size = 128
n_epochs = 10
# parameters
n_steps = 28 # 28 rows
n_inputs = 28 # 28 cols
n_outputs = 10 # 10 classes
# build a rnn model
X = tf.placeholder(tf.float32, [None, n_steps, n_inputs])
y = tf.placeholder(tf.int32, [None])
cell = tf.nn.rnn_cell.BasicRNNCell(num_units=n_neurons)
output, state = tf.nn.dynamic_rnn(cell, X, dtype=tf.float32)
logits = tf.layers.dense(state, n_outputs)
cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=y, logits=logits)
loss = tf.reduce_mean(cross_entropy)
optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(loss)
prediction = tf.nn.in_top_k(logits, y, 1)
accuracy = tf.reduce_mean(tf.cast(prediction, tf.float32))

# input data
from tensorflow.examples.tutorials.mnist import input_data
mnist = input_data.read_data_sets("MNIST_data/")
X_test = mnist.test.images # X_test shape: [num_test, 28*28]
X_test = X_test.reshape([-1, n_steps, n_inputs])
y_test = mnist.test.labels

# initialize the variables
init = tf.global_variables_initializer()
# train the model
with tf.Session() as sess:
sess.run(init)
n_batches = mnist.train.num_examples // batch_size
for epoch in range(n_epochs):
for batch in range(n_batches):
X_train, y_train = mnist.train.next_batch(batch_size)
X_train = X_train.reshape([-1, n_steps, n_inputs])
sess.run(optimizer, feed_dict={X: X_train, y: y_train})
loss_train, acc_train = sess.run(
[loss, accuracy], feed_dict={X: X_train, y: y_train})
print('Epoch: {}, Train Loss: {:.3f}, Train Acc: {:.3f}'.format(
epoch + 1, loss_train, acc_train))
loss_test, acc_test = sess.run(
[loss, accuracy], feed_dict={X: X_test, y: y_test})
print('Test Loss: {:.3f}, Test Acc: {:.3f}'.format(loss_test, acc_test))

+ 0
- 164
test/TensorFlowNET.Examples/python/neural_network.py View File

@@ -1,164 +0,0 @@

# imports
import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt

img_h = img_w = 28 # MNIST images are 28x28
img_size_flat = img_h * img_w # 28x28=784, the total number of pixels
n_classes = 10 # Number of classes, one class per digit

def load_data(mode='train'):
"""
Function to (download and) load the MNIST data
:param mode: train or test
:return: images and the corresponding labels
"""
from tensorflow.examples.tutorials.mnist import input_data
mnist = input_data.read_data_sets("MNIST_data/", one_hot=True)
if mode == 'train':
x_train, y_train, x_valid, y_valid = mnist.train.images, mnist.train.labels, \
mnist.validation.images, mnist.validation.labels
return x_train, y_train, x_valid, y_valid
elif mode == 'test':
x_test, y_test = mnist.test.images, mnist.test.labels
return x_test, y_test

def randomize(x, y):
""" Randomizes the order of data samples and their corresponding labels"""
permutation = np.random.permutation(y.shape[0])
shuffled_x = x[permutation, :]
shuffled_y = y[permutation]
return shuffled_x, shuffled_y

def get_next_batch(x, y, start, end):
x_batch = x[start:end]
y_batch = y[start:end]
return x_batch, y_batch

# Load MNIST data
x_train, y_train, x_valid, y_valid = load_data(mode='train')
print("Size of:")
print("- Training-set:\t\t{}".format(len(y_train)))
print("- Validation-set:\t{}".format(len(y_valid)))

print('x_train:\t{}'.format(x_train.shape))
print('y_train:\t{}'.format(y_train.shape))
print('x_train:\t{}'.format(x_valid.shape))
print('y_valid:\t{}'.format(y_valid.shape))

print(y_valid[:5, :])

# Hyper-parameters
epochs = 10 # Total number of training epochs
batch_size = 100 # Training batch size
display_freq = 100 # Frequency of displaying the training results
learning_rate = 0.001 # The optimization initial learning rate

h1 = 200 # number of nodes in the 1st hidden layer

# weight and bais wrappers
def weight_variable(name, shape):
"""
Create a weight variable with appropriate initialization
:param name: weight name
:param shape: weight shape
:return: initialized weight variable
"""
initer = tf.truncated_normal_initializer(stddev=0.01)
return tf.get_variable('W_' + name,
dtype=tf.float32,
shape=shape,
initializer=initer)


def bias_variable(name, shape):
"""
Create a bias variable with appropriate initialization
:param name: bias variable name
:param shape: bias variable shape
:return: initialized bias variable
"""
initial = tf.constant(0., shape=shape, dtype=tf.float32)
return tf.get_variable('b_' + name,
dtype=tf.float32,
initializer=initial)

def fc_layer(x, num_units, name, use_relu=True):
"""
Create a fully-connected layer
:param x: input from previous layer
:param num_units: number of hidden units in the fully-connected layer
:param name: layer name
:param use_relu: boolean to add ReLU non-linearity (or not)
:return: The output array
"""
in_dim = x.get_shape()[1]
W = weight_variable(name, shape=[in_dim, num_units])
b = bias_variable(name, [num_units])
layer = tf.matmul(x, W)
layer += b
if use_relu:
layer = tf.nn.relu(layer)
return layer

# Create the graph for the linear model
# Placeholders for inputs (x) and outputs(y)
x = tf.placeholder(tf.float32, shape=[None, img_size_flat], name='X')
y = tf.placeholder(tf.float32, shape=[None, n_classes], name='Y')

# Create a fully-connected layer with h1 nodes as hidden layer
fc1 = fc_layer(x, h1, 'FC1', use_relu=True)
# Create a fully-connected layer with n_classes nodes as output layer
output_logits = fc_layer(fc1, n_classes, 'OUT', use_relu=False)

# Define the loss function, optimizer, and accuracy
logits = tf.nn.softmax_cross_entropy_with_logits(labels=y, logits=output_logits)
loss = tf.reduce_mean(logits, name='loss')
optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate, name='Adam-op').minimize(loss)
correct_prediction = tf.equal(tf.argmax(output_logits, 1), tf.argmax(y, 1), name='correct_pred')
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32), name='accuracy')

# Network predictions
cls_prediction = tf.argmax(output_logits, axis=1, name='predictions')

# export graph
#tf.train.export_meta_graph(filename='neural_network.meta', graph=tf.get_default_graph(), clear_extraneous_savers= True, as_text = True)

# Create the op for initializing all variables
init = tf.global_variables_initializer()

# Create an interactive session (to keep the session in the other cells)
sess = tf.InteractiveSession()
# Initialize all variables
sess.run(init)
# Number of training iterations in each epoch
num_tr_iter = int(len(y_train) / batch_size)
for epoch in range(epochs):
print('Training epoch: {}'.format(epoch + 1))
# Randomly shuffle the training data at the beginning of each epoch
x_train, y_train = randomize(x_train, y_train)
for iteration in range(num_tr_iter):
start = iteration * batch_size
end = (iteration + 1) * batch_size
x_batch, y_batch = get_next_batch(x_train, y_train, start, end)

# Run optimization op (backprop)
feed_dict_batch = {x: x_batch, y: y_batch}
sess.run(optimizer, feed_dict=feed_dict_batch)

if iteration % display_freq == 0:
# Calculate and display the batch loss and accuracy
loss_batch, acc_batch = sess.run([loss, accuracy],
feed_dict=feed_dict_batch)

print("iter {0:3d}:\t Loss={1:.2f},\tTraining Accuracy={2:.01%}".
format(iteration, loss_batch, acc_batch))

# Run validation after every epoch
feed_dict_valid = {x: x_valid[:1000], y: y_valid[:1000]}
loss_valid, acc_valid = sess.run([loss, accuracy], feed_dict=feed_dict_valid)
print('---------------------------------------------------------')
print("Epoch: {0}, validation loss: {1:.2f}, validation accuracy: {2:.01%}".
format(epoch + 1, loss_valid, acc_valid))
print('---------------------------------------------------------')

+ 0
- 126
test/TensorFlowNET.UnitTest/ExamplesTests/ExamplesTest.cs View File

@@ -1,126 +0,0 @@
using Microsoft.VisualStudio.TestTools.UnitTesting;
using Tensorflow;
using TensorFlowNET.Examples;
using static Tensorflow.Binding;
namespace TensorFlowNET.ExamplesTests
{
[TestClass]
public class ExamplesTest
{
[TestMethod]
public void BasicOperations()
{
tf.Graph().as_default();
new BasicOperations() { Enabled = true }.Run();
}
[TestMethod]
public void HelloWorld()
{
tf.Graph().as_default();
new HelloWorld() { Enabled = true }.Run();
}
[TestMethod]
public void ImageRecognition()
{
tf.Graph().as_default();
new HelloWorld() { Enabled = true }.Run();
}
[Ignore]
[TestMethod]
public void InceptionArchGoogLeNet()
{
tf.Graph().as_default();
new InceptionArchGoogLeNet() { Enabled = true }.Run();
}
[Ignore]
[TestMethod]
public void KMeansClustering()
{
tf.Graph().as_default();
new KMeansClustering() { Enabled = true, IsImportingGraph = true, train_size = 500, validation_size = 100, test_size = 100, batch_size =100 }.Run();
}
[TestMethod]
public void LinearRegression()
{
tf.Graph().as_default();
new LinearRegression() { Enabled = true }.Run();
}
[TestMethod]
public void LogisticRegression()
{
tf.Graph().as_default();
new LogisticRegression() { Enabled = true, training_epochs=10, train_size = 500, validation_size = 100, test_size = 100 }.Run();
}
[Ignore]
[TestMethod]
public void NaiveBayesClassifier()
{
tf.Graph().as_default();
new NaiveBayesClassifier() { Enabled = false }.Run();
}
[Ignore]
[TestMethod]
public void NamedEntityRecognition()
{
tf.Graph().as_default();
new NamedEntityRecognition() { Enabled = true }.Run();
}
[TestMethod]
public void NearestNeighbor()
{
tf.Graph().as_default();
new NearestNeighbor() { Enabled = true, TrainSize = 500, ValidationSize = 100, TestSize = 100 }.Run();
}
[Ignore]
[TestMethod]
public void WordCnnTextClassification()
=> new CnnTextClassification { Enabled = true, ModelName = "word_cnn", DataLimit =100 }.Run();
[Ignore]
[TestMethod]
public void CharCnnTextClassification()
=> new CnnTextClassification { Enabled = true, ModelName = "char_cnn", DataLimit = 100 }.Run();
[Ignore]
[TestMethod]
public void TextClassificationWithMovieReviews()
{
tf.Graph().as_default();
new BinaryTextClassification() { Enabled = true }.Run();
}
[TestMethod]
public void NeuralNetXor()
{
tf.Graph().as_default();
Assert.IsTrue(new NeuralNetXor() { Enabled = true, IsImportingGraph = false }.Run());
}
[Ignore("Not working")]
[TestMethod]
public void NeuralNetXor_ImportedGraph()
{
tf.Graph().as_default();
Assert.IsTrue(new NeuralNetXor() { Enabled = true, IsImportingGraph = true }.Run());
}
[Ignore("Not working")]
[TestMethod]
public void ObjectDetection()
{
tf.Graph().as_default();
Assert.IsTrue(new ObjectDetection() { Enabled = true, IsImportingGraph = true }.Run());
}
}
}

+ 0
- 1
test/TensorFlowNET.UnitTest/TensorFlowNET.UnitTest.csproj View File

@@ -39,7 +39,6 @@
<ProjectReference Include="..\..\src\TensorFlowHub\TensorFlowHub.csproj" />
<ProjectReference Include="..\..\src\TensorFlowNET.Core\TensorFlowNET.Core.csproj" />
<ProjectReference Include="..\..\src\TensorFlowText\TensorFlowText.csproj" />
<ProjectReference Include="..\TensorFlowNET.Examples\TensorFlowNET.Examples.csproj" />
</ItemGroup>

<ItemGroup>


Loading…
Cancel
Save