|
|
@@ -33,6 +33,14 @@ In comparison to other projects, like for instance [TensorFlowSharp](https://www |
|
|
|
| tf.net 0.15 | x | x | | |
|
|
|
| tf.net 0.14 | x | | | |
|
|
|
|
|
|
|
Read the docs & book [The Definitive Guide to Tensorflow.NET](https://tensorflownet.readthedocs.io/en/latest/FrontCover.html). |
|
|
|
|
|
|
|
There are many examples reside at [TensorFlow.NET Examples](https://github.com/SciSharp/TensorFlow.NET-Examples). |
|
|
|
|
|
|
|
Troubleshooting of running example or installation, please refer [here](tensorflowlib/README.md). |
|
|
|
|
|
|
|
#### C# Example |
|
|
|
|
|
|
|
Install TF.NET and TensorFlow binary through NuGet. |
|
|
|
```sh |
|
|
|
### install tensorflow C#/F# binding |
|
|
@@ -63,6 +71,15 @@ int training_steps = 1000; |
|
|
|
float learning_rate = 0.01f; |
|
|
|
int display_step = 100; |
|
|
|
|
|
|
|
// Sample data |
|
|
|
NDArray train_X, train_Y; |
|
|
|
int n_samples; |
|
|
|
train_X = np.array(3.3f, 4.4f, 5.5f, 6.71f, 6.93f, 4.168f, 9.779f, 6.182f, 7.59f, 2.167f, |
|
|
|
7.042f, 10.791f, 5.313f, 7.997f, 5.654f, 9.27f, 3.1f); |
|
|
|
train_Y = np.array(1.7f, 2.76f, 2.09f, 3.19f, 1.694f, 1.573f, 3.366f, 2.596f, 2.53f, 1.221f, |
|
|
|
2.827f, 3.465f, 1.65f, 2.904f, 2.42f, 2.94f, 1.3f); |
|
|
|
n_samples = train_X.shape[0]; |
|
|
|
|
|
|
|
// We can set a fixed init value in order to demo |
|
|
|
var W = tf.Variable(-0.06f, name: "weight"); |
|
|
|
var b = tf.Variable(-0.73f, name: "bias"); |
|
|
@@ -142,11 +159,65 @@ model.fit(x_train[new Slice(0, 1000)], y_train[new Slice(0, 1000)], |
|
|
|
validation_split: 0.2f); |
|
|
|
``` |
|
|
|
|
|
|
|
Read the docs & book [The Definitive Guide to Tensorflow.NET](https://tensorflownet.readthedocs.io/en/latest/FrontCover.html). |
|
|
|
#### F# Example |
|
|
|
|
|
|
|
There are many examples reside at [TensorFlow.NET Examples](https://github.com/SciSharp/TensorFlow.NET-Examples). |
|
|
|
Linear Regression in `Eager` mode: |
|
|
|
|
|
|
|
```fsharp |
|
|
|
#r "nuget: TensorFlow.Net" |
|
|
|
#r "nuget: TensorFlow.Keras" |
|
|
|
#r "nuget: SciSharp.TensorFlow.Redist" |
|
|
|
#r "nuget: NumSharp" |
|
|
|
|
|
|
|
open NumSharp |
|
|
|
open Tensorflow |
|
|
|
open type Tensorflow.Binding |
|
|
|
open type Tensorflow.KerasApi |
|
|
|
|
|
|
|
let tf = New<tensorflow>() |
|
|
|
tf.enable_eager_execution() |
|
|
|
|
|
|
|
// Parameters |
|
|
|
let training_steps = 1000 |
|
|
|
let learning_rate = 0.01f |
|
|
|
let display_step = 100 |
|
|
|
|
|
|
|
// Sample data |
|
|
|
let train_X = |
|
|
|
np.array(3.3f, 4.4f, 5.5f, 6.71f, 6.93f, 4.168f, 9.779f, 6.182f, 7.59f, 2.167f, |
|
|
|
7.042f, 10.791f, 5.313f, 7.997f, 5.654f, 9.27f, 3.1f) |
|
|
|
let train_Y = |
|
|
|
np.array(1.7f, 2.76f, 2.09f, 3.19f, 1.694f, 1.573f, 3.366f, 2.596f, 2.53f, 1.221f, |
|
|
|
2.827f, 3.465f, 1.65f, 2.904f, 2.42f, 2.94f, 1.3f) |
|
|
|
let n_samples = train_X.shape.[0] |
|
|
|
|
|
|
|
// We can set a fixed init value in order to demo |
|
|
|
let W = tf.Variable(-0.06f,name = "weight") |
|
|
|
let b = tf.Variable(-0.73f, name = "bias") |
|
|
|
let optimizer = keras.optimizers.SGD(learning_rate) |
|
|
|
|
|
|
|
// Run training for the given number of steps. |
|
|
|
for step = 1 to (training_steps + 1) do |
|
|
|
// Run the optimization to update W and b values. |
|
|
|
// Wrap computation inside a GradientTape for automatic differentiation. |
|
|
|
use g = tf.GradientTape() |
|
|
|
// Linear regression (Wx + b). |
|
|
|
let pred = W * train_X + b |
|
|
|
// Mean square error. |
|
|
|
let loss = tf.reduce_sum(tf.pow(pred - train_Y,2)) / (2 * n_samples) |
|
|
|
// should stop recording |
|
|
|
// compute gradients |
|
|
|
let gradients = g.gradient(loss,struct (W,b)) |
|
|
|
|
|
|
|
// Update W and b following gradients. |
|
|
|
optimizer.apply_gradients(zip(gradients, struct (W,b))) |
|
|
|
|
|
|
|
if (step % display_step) = 0 then |
|
|
|
let pred = W * train_X + b |
|
|
|
let loss = tf.reduce_sum(tf.pow(pred-train_Y,2)) / (2 * n_samples) |
|
|
|
printfn $"step: {step}, loss: {loss.numpy()}, W: {W.numpy()}, b: {b.numpy()}" |
|
|
|
``` |
|
|
|
|
|
|
|
Troubleshooting of running example or installation, please refer [here](tensorflowlib/README.md). |
|
|
|
|
|
|
|
### Contribute: |
|
|
|
|
|
|
|