From c632c40096254e535b07af03273e5d57f3028e96 Mon Sep 17 00:00:00 2001 From: quanap5kr Date: Mon, 7 Dec 2020 16:39:00 +0900 Subject: [PATCH 1/6] scan unit test --- .../ManagedAPI/ControlFlowApiTest.cs | 17 +++++++++++++++++ 1 file changed, 17 insertions(+) diff --git a/test/TensorFlowNET.UnitTest/ManagedAPI/ControlFlowApiTest.cs b/test/TensorFlowNET.UnitTest/ManagedAPI/ControlFlowApiTest.cs index bad1f926..c1754393 100644 --- a/test/TensorFlowNET.UnitTest/ManagedAPI/ControlFlowApiTest.cs +++ b/test/TensorFlowNET.UnitTest/ManagedAPI/ControlFlowApiTest.cs @@ -1,4 +1,5 @@ using Microsoft.VisualStudio.TestTools.UnitTesting; +using NumSharp; using System; using Tensorflow; using static Tensorflow.Binding; @@ -45,5 +46,21 @@ namespace TensorFlowNET.UnitTest.ManagedAPI var r = tf.while_loop(c, b, i); Assert.AreEqual(10, (int)r); } + + [TestMethod, Ignore] + public void ScanFunctionGraphMode() + { + tf.compat.v1.disable_eager_execution(); + Func fn = (prev, current) => tf.add(prev, current); + var input = tf.placeholder(TF_DataType.TF_FLOAT, new TensorShape(6)); + var scan = tf.scan(fn, input); + + using (var sess = tf.Session()) + { + sess.run(tf.global_variables_initializer()); + var result = sess.run(scan, new FeedItem(input, np.array(1, 2, 3, 4, 5, 6))); + Assert.AreEqual(new float[] { 1, 3, 6, 10, 15, 21 }, result.ToArray()); + } + } } } From fe82fd5c267f620c3bd39ea2de2b7eabfac2ab2b Mon Sep 17 00:00:00 2001 From: quanap5kr Date: Mon, 7 Dec 2020 16:45:52 +0900 Subject: [PATCH 2/6] scan unit test --- test/TensorFlowNET.UnitTest/ManagedAPI/ControlFlowApiTest.cs | 2 ++ 1 file changed, 2 insertions(+) diff --git a/test/TensorFlowNET.UnitTest/ManagedAPI/ControlFlowApiTest.cs b/test/TensorFlowNET.UnitTest/ManagedAPI/ControlFlowApiTest.cs index c1754393..1cd17866 100644 --- a/test/TensorFlowNET.UnitTest/ManagedAPI/ControlFlowApiTest.cs +++ b/test/TensorFlowNET.UnitTest/ManagedAPI/ControlFlowApiTest.cs @@ -47,10 +47,12 @@ namespace TensorFlowNET.UnitTest.ManagedAPI Assert.AreEqual(10, (int)r); } + [TestMethod, Ignore] public void ScanFunctionGraphMode() { tf.compat.v1.disable_eager_execution(); + Func fn = (prev, current) => tf.add(prev, current); var input = tf.placeholder(TF_DataType.TF_FLOAT, new TensorShape(6)); var scan = tf.scan(fn, input); From 9d7d326e6b35d2dc4de32530d62d09a06b20b83f Mon Sep 17 00:00:00 2001 From: nhirschey Date: Mon, 7 Dec 2020 22:44:42 +0000 Subject: [PATCH 3/6] Add F# example to readme - Added example of eager mode linear regression to the readme - Added the example data to the C# example --- README.md | 75 ++++++++++++++++++++++++++++++++++++++++++++++++++++--- 1 file changed, 72 insertions(+), 3 deletions(-) diff --git a/README.md b/README.md index 19583211..67b732c4 100644 --- a/README.md +++ b/README.md @@ -33,6 +33,14 @@ In comparison to other projects, like for instance [TensorFlowSharp](https://www | tf.net 0.15 | x | x | | | tf.net 0.14 | x | | | +Read the docs & book [The Definitive Guide to Tensorflow.NET](https://tensorflownet.readthedocs.io/en/latest/FrontCover.html). + +There are many examples reside at [TensorFlow.NET Examples](https://github.com/SciSharp/TensorFlow.NET-Examples). + +Troubleshooting of running example or installation, please refer [here](tensorflowlib/README.md). + +#### C# Example + Install TF.NET and TensorFlow binary through NuGet. ```sh ### install tensorflow C#/F# binding @@ -63,6 +71,13 @@ int training_steps = 1000; float learning_rate = 0.01f; int display_step = 100; +// Sample data +train_X = np.array(3.3f, 4.4f, 5.5f, 6.71f, 6.93f, 4.168f, 9.779f, 6.182f, 7.59f, 2.167f, + 7.042f, 10.791f, 5.313f, 7.997f, 5.654f, 9.27f, 3.1f); +train_Y = np.array(1.7f, 2.76f, 2.09f, 3.19f, 1.694f, 1.573f, 3.366f, 2.596f, 2.53f, 1.221f, + 2.827f, 3.465f, 1.65f, 2.904f, 2.42f, 2.94f, 1.3f); +n_samples = train_X.shape[0]; + // We can set a fixed init value in order to demo var W = tf.Variable(-0.06f, name: "weight"); var b = tf.Variable(-0.73f, name: "bias"); @@ -142,11 +157,65 @@ model.fit(x_train[new Slice(0, 1000)], y_train[new Slice(0, 1000)], validation_split: 0.2f); ``` -Read the docs & book [The Definitive Guide to Tensorflow.NET](https://tensorflownet.readthedocs.io/en/latest/FrontCover.html). +#### F# Example -There are many examples reside at [TensorFlow.NET Examples](https://github.com/SciSharp/TensorFlow.NET-Examples). +Linear Regression in `Eager` mode: + +```fsharp +#r "nuget: TensorFlow.Net" +#r "nuget: TensorFlow.Keras" +#r "nuget: SciSharp.TensorFlow.Redist" +#r "nuget: NumSharp" + +open System +open NumSharp +open Tensorflow +open Tensorflow.Keras + +let tf = Binding.New() +tf.enable_eager_execution() + +// Parameters +let training_steps = 1000 +let learning_rate = 0.01f +let display_step = 100 + +// Sample data +let train_X = + np.array(3.3f, 4.4f, 5.5f, 6.71f, 6.93f, 4.168f, 9.779f, 6.182f, 7.59f, 2.167f, + 7.042f, 10.791f, 5.313f, 7.997f, 5.654f, 9.27f, 3.1f) +let train_Y = + np.array(1.7f, 2.76f, 2.09f, 3.19f, 1.694f, 1.573f, 3.366f, 2.596f, 2.53f, 1.221f, + 2.827f, 3.465f, 1.65f, 2.904f, 2.42f, 2.94f, 1.3f) +let n_samples = train_X.shape.[0] + +// We can set a fixed init value in order to demo +let W = tf.Variable(-0.06f,name = "weight") +let b = tf.Variable(-0.73f, name = "bias") +let optimizer = KerasApi.keras.optimizers.SGD(learning_rate) + +// Run training for the given number of steps. +for step = 1 to (training_steps + 1) do + // Run the optimization to update W and b values. + // Wrap computation inside a GradientTape for automatic differentiation. + use g = tf.GradientTape() + // Linear regressoin (Wx + b). + let pred = W * train_X + b + // Mean square error. + let loss = tf.reduce_sum(tf.pow(pred - train_Y,2)) / (2 * n_samples) + // should stop recording + // compute gradients + let gradients = g.gradient(loss,struct (W,b)) + + // Update W and b following gradients. + optimizer.apply_gradients(Binding.zip(gradients, struct (W,b))) + + if (step % display_step) = 0 then + let pred = W * train_X + b + let loss = tf.reduce_sum(tf.pow(pred-train_Y,2)) / (2 * n_samples) + printfn $"step: {step}, loss: {loss.numpy()}, W: {W.numpy()}, b: {b.numpy()}" +``` -Troubleshooting of running example or installation, please refer [here](tensorflowlib/README.md). ### Contribute: From 143714da51391068aca93b6f11882e74e850e49e Mon Sep 17 00:00:00 2001 From: nhirschey Date: Mon, 7 Dec 2020 16:19:38 -0700 Subject: [PATCH 4/6] Update declaration of train_X, train_Y, and n_samples in C# Updated the variable declaration to match the code in the C# jupyter notebook: https://github.com/SciSharp/SciSharpCube/blob/master/home/samples/LinearRegression.ipynb --- README.md | 2 ++ 1 file changed, 2 insertions(+) diff --git a/README.md b/README.md index 67b732c4..89c0f61d 100644 --- a/README.md +++ b/README.md @@ -72,6 +72,8 @@ float learning_rate = 0.01f; int display_step = 100; // Sample data +NDArray train_X, train_Y; +int n_samples; train_X = np.array(3.3f, 4.4f, 5.5f, 6.71f, 6.93f, 4.168f, 9.779f, 6.182f, 7.59f, 2.167f, 7.042f, 10.791f, 5.313f, 7.997f, 5.654f, 9.27f, 3.1f); train_Y = np.array(1.7f, 2.76f, 2.09f, 3.19f, 1.694f, 1.573f, 3.366f, 2.596f, 2.53f, 1.221f, From e2e9c361ad0919d082194a9f4649afb9b9357c88 Mon Sep 17 00:00:00 2001 From: Nicholas Hirschey Date: Fri, 11 Dec 2020 16:56:13 +0000 Subject: [PATCH 5/6] use `open type` for F# readme example This is the proper way to mimic the C# `using static` code. --- README.md | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/README.md b/README.md index 89c0f61d..1cf19e92 100644 --- a/README.md +++ b/README.md @@ -169,12 +169,12 @@ Linear Regression in `Eager` mode: #r "nuget: SciSharp.TensorFlow.Redist" #r "nuget: NumSharp" -open System open NumSharp open Tensorflow -open Tensorflow.Keras +open type Tensorflow.Binding +open type Tensorflow.KerasApi -let tf = Binding.New() +let tf = New() tf.enable_eager_execution() // Parameters @@ -194,7 +194,7 @@ let n_samples = train_X.shape.[0] // We can set a fixed init value in order to demo let W = tf.Variable(-0.06f,name = "weight") let b = tf.Variable(-0.73f, name = "bias") -let optimizer = KerasApi.keras.optimizers.SGD(learning_rate) +let optimizer = keras.optimizers.SGD(learning_rate) // Run training for the given number of steps. for step = 1 to (training_steps + 1) do @@ -210,7 +210,7 @@ for step = 1 to (training_steps + 1) do let gradients = g.gradient(loss,struct (W,b)) // Update W and b following gradients. - optimizer.apply_gradients(Binding.zip(gradients, struct (W,b))) + optimizer.apply_gradients(zip(gradients, struct (W,b))) if (step % display_step) = 0 then let pred = W * train_X + b From ffda41a12ea3dbb1e374c0a4363e2919c2f87093 Mon Sep 17 00:00:00 2001 From: Nicholas Hirschey Date: Fri, 11 Dec 2020 16:59:21 +0000 Subject: [PATCH 6/6] typo fix. --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index 1cf19e92..f067f9a4 100644 --- a/README.md +++ b/README.md @@ -201,7 +201,7 @@ for step = 1 to (training_steps + 1) do // Run the optimization to update W and b values. // Wrap computation inside a GradientTape for automatic differentiation. use g = tf.GradientTape() - // Linear regressoin (Wx + b). + // Linear regression (Wx + b). let pred = W * train_X + b // Mean square error. let loss = tf.reduce_sum(tf.pow(pred - train_Y,2)) / (2 * n_samples)