Browse Source

Operator overloading between Variable and Tensor should return Tensor.

tags/v0.20
Oceania2018 5 years ago
parent
commit
9e68b9483b
4 changed files with 63 additions and 91 deletions
  1. +28
    -49
      README.md
  2. +13
    -0
      src/TensorFlowNET.Core/Operations/gen_math_ops.cs
  3. +21
    -41
      src/TensorFlowNET.Core/Variables/ResourceVariable.Operators.cs
  4. +1
    -1
      test/TensorFlowNET.UnitTest/Basics/VariableTest.cs

+ 28
- 49
README.md View File

@@ -11,8 +11,6 @@

*master branch is based on tensorflow 2.2 now, v0.15-tensorflow1.15 is from tensorflow1.15.*

TF.NET is a member project of [SciSharp STACK](https://github.com/SciSharp).


![tensors_flowing](docs/assets/tensors_flowing.gif)

@@ -56,59 +54,40 @@ using static Tensorflow.Binding;
Linear Regression:

```c#
// We can set a fixed init value in order to debug
// Parameters
int training_steps = 1000;
float learning_rate = 0.01f;
int display_step = 100;

// We can set a fixed init value in order to demo
var W = tf.Variable(-0.06f, name: "weight");
var b = tf.Variable(-0.73f, name: "bias");
var optimizer = tf.optimizers.SGD(learning_rate);

// Construct a linear model
var pred = tf.add(tf.multiply(X, W), b);

// Mean squared error
var cost = tf.reduce_sum(tf.pow(pred - Y, 2.0f)) / (2.0f * n_samples);

// Gradient descent
// Note, minimize() knows to modify W and b because Variable objects are trainable=True by default
var optimizer = tf.train.GradientDescentOptimizer(learning_rate).minimize(cost);

// Initialize the variables (i.e. assign their default value)
var init = tf.global_variables_initializer();

// Start training
using(tf.Session())
// Run training for the given number of steps.
foreach (var step in range(1, training_steps + 1))
{
// Run the initializer
sess.run(init);

// Fit all training data
for (int epoch = 0; epoch < training_epochs; epoch++)
// Run the optimization to update W and b values.
// Wrap computation inside a GradientTape for automatic differentiation.
using var g = tf.GradientTape();
// Linear regression (Wx + b).
var pred = W * X + b;
// Mean square error.
var loss = tf.reduce_sum(tf.pow(pred - Y, 2)) / (2 * n_samples);
// should stop recording
// Compute gradients.
var gradients = g.gradient(loss, (W, b));

// Update W and b following gradients.
optimizer.apply_gradients(zip(gradients, (W, b)));

if (step % display_step == 0)
{
foreach (var (x, y) in zip<float>(train_X, train_Y))
sess.run(optimizer, (X, x), (Y, y));

// Display logs per epoch step
if ((epoch + 1) % display_step == 0)
{
var c = sess.run(cost, (X, train_X), (Y, train_Y));
Console.WriteLine($"Epoch: {epoch + 1} cost={c} " + $"W={sess.run(W)} b={sess.run(b)}");
}
pred = W * X + b;
loss = tf.reduce_sum(tf.pow(pred - Y, 2)) / (2 * n_samples);
print($"step: {step}, loss: {loss.numpy()}, W: {W.numpy()}, b: {b.numpy()}");
}

Console.WriteLine("Optimization Finished!");
var training_cost = sess.run(cost, (X, train_X), (Y, train_Y));
Console.WriteLine($"Training cost={training_cost} W={sess.run(W)} b={sess.run(b)}");

// Testing example
var test_X = np.array(6.83f, 4.668f, 8.9f, 7.91f, 5.7f, 8.7f, 3.1f, 2.1f);
var test_Y = np.array(1.84f, 2.273f, 3.2f, 2.831f, 2.92f, 3.24f, 1.35f, 1.03f);
Console.WriteLine("Testing... (Mean square loss Comparison)");
var testing_cost = sess.run(tf.reduce_sum(tf.pow(pred - Y, 2.0f)) / (2.0f * test_X.shape[0]),
(X, test_X), (Y, test_Y));
Console.WriteLine($"Testing cost={testing_cost}");
var diff = Math.Abs((float)training_cost - (float)testing_cost);
Console.WriteLine($"Absolute mean square loss difference: {diff}");

return diff < 0.01;
});
}
```

Run this example in [Jupyter Notebook](https://github.com/SciSharp/SciSharpCube).


+ 13
- 0
src/TensorFlowNET.Core/Operations/gen_math_ops.cs View File

@@ -551,6 +551,19 @@ namespace Tensorflow

public static Tensor greater<Tx, Ty>(Tx x, Ty y, string name = null)
{
if (tf.context.executing_eagerly())
{
var results = EagerTensorPass.Create();
var inputs = EagerTensorPass.From(x, y);
Status status = c_api.TFE_FastPathExecute(tf.context, tf.context.device_name,
"Greater", name,
inputs.Points, inputs.Length,
null, null,
results.Points, results.Length);
status.Check(true);
return results[0].Resolve();
}

var _op = _op_def_lib._apply_op_helper("Greater", name: name, args: new { x, y });

return _op.outputs[0];


+ 21
- 41
src/TensorFlowNET.Core/Variables/ResourceVariable.Operators.cs View File

@@ -24,24 +24,24 @@ namespace Tensorflow
{
public static OpDefLibrary _op_def_lib = new OpDefLibrary();

public static ResourceVariable operator +(ResourceVariable x, int y) => op_helper("add", x, y);
public static ResourceVariable operator +(ResourceVariable x, float y) => op_helper("add", x, y);
public static ResourceVariable operator +(ResourceVariable x, double y) => op_helper("add", x, y);
public static ResourceVariable operator +(ResourceVariable x, ResourceVariable y) => op_helper("add", x, y);
public static ResourceVariable operator -(ResourceVariable x, int y) => op_helper("sub", x, y);
public static ResourceVariable operator -(ResourceVariable x, float y) => op_helper("sub", x, y);
public static ResourceVariable operator -(ResourceVariable x, double y) => op_helper("sub", x, y);
public static ResourceVariable operator -(ResourceVariable x, Tensor y) => op_helper("sub", x, y);
public static ResourceVariable operator -(ResourceVariable x, ResourceVariable y) => op_helper("sub", x, y);
public static Tensor operator +(ResourceVariable x, int y) => op_helper("add", x, y);
public static Tensor operator +(ResourceVariable x, float y) => op_helper("add", x, y);
public static Tensor operator +(ResourceVariable x, double y) => op_helper("add", x, y);
public static Tensor operator +(ResourceVariable x, ResourceVariable y) => op_helper("add", x, y);
public static Tensor operator -(ResourceVariable x, int y) => op_helper("sub", x, y);
public static Tensor operator -(ResourceVariable x, float y) => op_helper("sub", x, y);
public static Tensor operator -(ResourceVariable x, double y) => op_helper("sub", x, y);
public static Tensor operator -(ResourceVariable x, Tensor y) => op_helper("sub", x, y);
public static Tensor operator -(ResourceVariable x, ResourceVariable y) => op_helper("sub", x, y);

public static ResourceVariable operator *(ResourceVariable x, ResourceVariable y) => op_helper("mul", x, y);
public static ResourceVariable operator *(ResourceVariable x, NDArray y) => op_helper("mul", x, y);
public static Tensor operator *(ResourceVariable x, ResourceVariable y) => op_helper("mul", x, y);
public static Tensor operator *(ResourceVariable x, NDArray y) => op_helper("mul", x, y);

public static ResourceVariable operator <(ResourceVariable x, Tensor y) => less(x.value(), y);
public static Tensor operator <(ResourceVariable x, Tensor y) => op_helper("less", x, y);

public static ResourceVariable operator >(ResourceVariable x, Tensor y) => greater(x.value(), y);
public static Tensor operator >(ResourceVariable x, Tensor y) => op_helper("greater", x, y);

private static ResourceVariable op_helper<T>(string default_name, ResourceVariable x, T y)
private static Tensor op_helper<T>(string default_name, ResourceVariable x, T y)
=> tf_with(ops.name_scope(null, default_name, new { x, y }), scope =>
{
string name = scope;
@@ -61,39 +61,19 @@ namespace Tensorflow
case "mul":
result = gen_math_ops.mul(xVal, yTensor, name: name);
break;
case "less":
result = gen_math_ops.less(xVal, yTensor, name);
break;
case "greater":
result = gen_math_ops.greater(xVal, yTensor, name);
break;
default:
throw new NotImplementedException("");
}

// x.assign(result);
// result.ResourceVar = x;
return tf.Variable(result);
return result;
});

private static ResourceVariable less<Tx, Ty>(Tx x, Ty y, string name = null)
{
if (tf.context.executing_eagerly())
{
var results = EagerTensorPass.Create();
var inputs = EagerTensorPass.From(x, y);
Status status = c_api.TFE_FastPathExecute(tf.context, tf.context.device_name,
"Less", name,
inputs.Points, inputs.Length,
null, null,
results.Points, results.Length);
status.Check(true);
return tf.Variable(results[0].Resolve());
}

var _op = _op_def_lib._apply_op_helper("Less", name: name, args: new { x, y });

return tf.Variable(_op.outputs[0]);
}
private static ResourceVariable greater<Tx, Ty>(Tx x, Ty y, string name = null)
{
var _op = _op_def_lib._apply_op_helper("Greater", name: name, args: new { x, y });

return tf.Variable(_op.outputs[0]);
}
}
}

+ 1
- 1
test/TensorFlowNET.UnitTest/Basics/VariableTest.cs View File

@@ -57,7 +57,7 @@ namespace TensorFlowNET.UnitTest.Basics
{
var x = tf.Variable(10, name: "x");
for (int i = 0; i < 5; i++)
x = x + 1;
x.assign(x + 1);

Assert.AreEqual(15, (int)x.numpy());
}


Loading…
Cancel
Save