@@ -47,10 +47,14 @@ namespace Tensorflow | |||||
x = math_ops.conj(x); | x = math_ops.conj(x); | ||||
y = math_ops.conj(y); | y = math_ops.conj(y); | ||||
var r1 = math_ops.reduce_sum(gen_math_ops.mul(grad, y), rx); | |||||
var r2 = math_ops.reduce_sum(gen_math_ops.mul(x, grad), ry); | |||||
return (gen_array_ops.reshape(r1, sx), gen_array_ops.reshape(r2, sy)); | |||||
var mul1 = gen_math_ops.mul(grad, y); | |||||
var mul2 = gen_math_ops.mul(x, grad); | |||||
var reduce_sum1 = math_ops.reduce_sum(mul1, rx); | |||||
var reduce_sum2 = math_ops.reduce_sum(mul2, ry); | |||||
var reshape1 = gen_array_ops.reshape(reduce_sum1, sx); | |||||
var reshape2 = gen_array_ops.reshape(reduce_sum2, sy); | |||||
return (reshape1, reshape2); | |||||
} | } | ||||
public static (Tensor, Tensor) _SubGrad(Operation op, Tensor grad) | public static (Tensor, Tensor) _SubGrad(Operation op, Tensor grad) | ||||
@@ -129,9 +133,12 @@ namespace Tensorflow | |||||
var (rx, ry) = gen_array_ops.broadcast_gradient_args(sx, sy); | var (rx, ry) = gen_array_ops.broadcast_gradient_args(sx, sy); | ||||
x = math_ops.conj(x); | x = math_ops.conj(x); | ||||
y = math_ops.conj(y); | y = math_ops.conj(y); | ||||
y = math_ops.conj(z); | |||||
var gx = gen_array_ops.reshape(math_ops.reduce_sum(grad * y * gen_math_ops.pow(x, y - 1.0), rx), sx); | |||||
Tensor log_x = null; | |||||
z = math_ops.conj(z); | |||||
var pow = gen_math_ops.pow(x, y - 1.0f); | |||||
var mul = grad * y * pow; | |||||
var reduce_sum = math_ops.reduce_sum(mul, rx); | |||||
var gx = gen_array_ops.reshape(reduce_sum, sx); | |||||
// Avoid false singularity at x = 0 | // Avoid false singularity at x = 0 | ||||
Tensor mask = null; | Tensor mask = null; | ||||
if (x.dtype.is_complex()) | if (x.dtype.is_complex()) | ||||
@@ -142,8 +149,10 @@ namespace Tensorflow | |||||
var safe_x = array_ops.where(mask, x, ones); | var safe_x = array_ops.where(mask, x, ones); | ||||
var x1 = gen_array_ops.log(safe_x); | var x1 = gen_array_ops.log(safe_x); | ||||
var y1 = array_ops.zeros_like(x); | var y1 = array_ops.zeros_like(x); | ||||
log_x = array_ops.where(mask, x1, y1); | |||||
var gy = gen_array_ops.reshape(math_ops.reduce_sum(grad * z * log_x, ry), sy); | |||||
var log_x = array_ops.where(mask, x1, y1); | |||||
var mul1 = grad * z * log_x; | |||||
var reduce_sum1 = math_ops.reduce_sum(mul1, ry); | |||||
var gy = gen_array_ops.reshape(reduce_sum1, sy); | |||||
return (gx, gy); | return (gx, gy); | ||||
} | } | ||||
@@ -196,11 +196,11 @@ namespace Tensorflow | |||||
_create_op_helper(op, true); | _create_op_helper(op, true); | ||||
Console.Write($"create_op: {op_type} '{node_def.Name}'"); | |||||
/*Console.Write($"create_op: {op_type} '{node_def.Name}'"); | |||||
Console.Write($", inputs: {(inputs.Length == 0 ? "empty" : String.Join(", ", inputs.Select(x => x.name)))}"); | Console.Write($", inputs: {(inputs.Length == 0 ? "empty" : String.Join(", ", inputs.Select(x => x.name)))}"); | ||||
Console.Write($", control_inputs: {(control_inputs.Length == 0 ? "empty" : String.Join(", ", control_inputs.Select(x => x.name)))}"); | Console.Write($", control_inputs: {(control_inputs.Length == 0 ? "empty" : String.Join(", ", control_inputs.Select(x => x.name)))}"); | ||||
Console.Write($", outputs: {(op.outputs.Length == 0 ? "empty" : String.Join(", ", op.outputs.Select(x => x.name)))}"); | Console.Write($", outputs: {(op.outputs.Length == 0 ? "empty" : String.Join(", ", op.outputs.Select(x => x.name)))}"); | ||||
Console.WriteLine(); | |||||
Console.WriteLine();*/ | |||||
return op; | return op; | ||||
} | } | ||||
@@ -1,5 +1,4 @@ | |||||
using Newtonsoft.Json; | |||||
using System; | |||||
using System; | |||||
using System.Collections.Generic; | using System.Collections.Generic; | ||||
using System.Linq; | using System.Linq; | ||||
using System.Runtime.InteropServices; | using System.Runtime.InteropServices; | ||||
@@ -15,7 +14,7 @@ namespace Tensorflow | |||||
private Tensor[] _outputs; | private Tensor[] _outputs; | ||||
public Tensor[] outputs => _outputs; | public Tensor[] outputs => _outputs; | ||||
[JsonIgnore] | |||||
//[JsonIgnore] | |||||
public Tensor output => _outputs.FirstOrDefault(); | public Tensor output => _outputs.FirstOrDefault(); | ||||
public int NumControlOutputs => c_api.TF_OperationNumControlOutputs(_handle); | public int NumControlOutputs => c_api.TF_OperationNumControlOutputs(_handle); | ||||
@@ -1,5 +1,4 @@ | |||||
using Google.Protobuf.Collections; | using Google.Protobuf.Collections; | ||||
using Newtonsoft.Json; | |||||
using System; | using System; | ||||
using System.Collections.Generic; | using System.Collections.Generic; | ||||
using System.Linq; | using System.Linq; | ||||
@@ -13,15 +12,15 @@ namespace Tensorflow | |||||
private readonly IntPtr _handle; // _c_op in python | private readonly IntPtr _handle; // _c_op in python | ||||
private Graph _graph; | private Graph _graph; | ||||
[JsonIgnore] | |||||
//[JsonIgnore] | |||||
public Graph graph => _graph; | public Graph graph => _graph; | ||||
[JsonIgnore] | |||||
//[JsonIgnore] | |||||
public int _id => _id_value; | public int _id => _id_value; | ||||
[JsonIgnore] | |||||
//[JsonIgnore] | |||||
public int _id_value; | public int _id_value; | ||||
public string type => OpType; | public string type => OpType; | ||||
[JsonIgnore] | |||||
//[JsonIgnore] | |||||
public Operation op => this; | public Operation op => this; | ||||
public TF_DataType dtype => TF_DataType.DtInvalid; | public TF_DataType dtype => TF_DataType.DtInvalid; | ||||
private Status status = new Status(); | private Status status = new Status(); | ||||
@@ -52,10 +52,4 @@ Upgraded to TensorFlow 1.13 RC2. | |||||
<Content CopyToOutputDirectory="PreserveNewest" Include="./runtimes/win-x64/native/tensorflow.dll" Link="tensorflow.dll" Pack="true" PackagePath="runtimes/win-x64/native/tensorflow.dll" /> | <Content CopyToOutputDirectory="PreserveNewest" Include="./runtimes/win-x64/native/tensorflow.dll" Link="tensorflow.dll" Pack="true" PackagePath="runtimes/win-x64/native/tensorflow.dll" /> | ||||
</ItemGroup> | </ItemGroup> | ||||
<ItemGroup> | |||||
<Reference Include="Newtonsoft.Json"> | |||||
<HintPath>C:\Program Files\dotnet\sdk\NuGetFallbackFolder\newtonsoft.json\9.0.1\lib\netstandard1.0\Newtonsoft.Json.dll</HintPath> | |||||
</Reference> | |||||
</ItemGroup> | |||||
</Project> | </Project> |
@@ -1,5 +1,4 @@ | |||||
using Newtonsoft.Json; | |||||
using NumSharp.Core; | |||||
using NumSharp.Core; | |||||
using System; | using System; | ||||
using System.Collections.Generic; | using System.Collections.Generic; | ||||
using System.Linq; | using System.Linq; | ||||
@@ -18,13 +17,13 @@ namespace Tensorflow | |||||
private readonly IntPtr _handle; | private readonly IntPtr _handle; | ||||
private int _id; | private int _id; | ||||
[JsonIgnore] | |||||
//[JsonIgnore] | |||||
public int Id => _id; | public int Id => _id; | ||||
[JsonIgnore] | |||||
//[JsonIgnore] | |||||
public Graph graph => op?.graph; | public Graph graph => op?.graph; | ||||
[JsonIgnore] | |||||
//[JsonIgnore] | |||||
public Operation op { get; } | public Operation op { get; } | ||||
[JsonIgnore] | |||||
//[JsonIgnore] | |||||
public Tensor[] outputs => op.outputs; | public Tensor[] outputs => op.outputs; | ||||
/// <summary> | /// <summary> | ||||
@@ -104,7 +103,7 @@ namespace Tensorflow | |||||
public int NDims => rank; | public int NDims => rank; | ||||
[JsonIgnore] | |||||
//[JsonIgnore] | |||||
public Operation[] Consumers => consumers(); | public Operation[] Consumers => consumers(); | ||||
public string Device => op.Device; | public string Device => op.Device; | ||||
@@ -351,7 +351,7 @@ namespace Tensorflow | |||||
return (oper, out_grads) => | return (oper, out_grads) => | ||||
{ | { | ||||
Console.WriteLine($"get_gradient_function: {oper.type} '{oper.name}'"); | |||||
// Console.WriteLine($"get_gradient_function: {oper.type} '{oper.name}'"); | |||||
switch (oper.type) | switch (oper.type) | ||||
{ | { | ||||
@@ -1,5 +1,4 @@ | |||||
using Newtonsoft.Json; | |||||
using NumSharp.Core; | |||||
using NumSharp.Core; | |||||
using System; | using System; | ||||
using System.Collections.Generic; | using System.Collections.Generic; | ||||
using System.Text; | using System.Text; | ||||
@@ -13,17 +12,15 @@ namespace TensorFlowNET.Examples | |||||
/// </summary> | /// </summary> | ||||
public class LinearRegression : Python, IExample | public class LinearRegression : Python, IExample | ||||
{ | { | ||||
private NumPyRandom rng = np.random; | |||||
NumPyRandom rng = np.random; | |||||
// Parameters | |||||
float learning_rate = 0.01f; | |||||
int training_epochs = 1000; | |||||
int display_step = 50; | |||||
public void Run() | public void Run() | ||||
{ | { | ||||
var graph = tf.Graph().as_default(); | |||||
// Parameters | |||||
float learning_rate = 0.01f; | |||||
int training_epochs = 1000; | |||||
int display_step = 10; | |||||
// Training Data | // Training Data | ||||
var train_X = np.array(3.3f, 4.4f, 5.5f, 6.71f, 6.93f, 4.168f, 9.779f, 6.182f, 7.59f, 2.167f, | var train_X = np.array(3.3f, 4.4f, 5.5f, 6.71f, 6.93f, 4.168f, 9.779f, 6.182f, 7.59f, 2.167f, | ||||
7.042f, 10.791f, 5.313f, 7.997f, 5.654f, 9.27f, 3.1f); | 7.042f, 10.791f, 5.313f, 7.997f, 5.654f, 9.27f, 3.1f); | ||||
@@ -31,46 +28,28 @@ namespace TensorFlowNET.Examples | |||||
2.827f, 3.465f, 1.65f, 2.904f, 2.42f, 2.94f, 1.3f); | 2.827f, 3.465f, 1.65f, 2.904f, 2.42f, 2.94f, 1.3f); | ||||
var n_samples = train_X.shape[0]; | var n_samples = train_X.shape[0]; | ||||
var graph = tf.Graph().as_default(); | |||||
// tf Graph Input | // tf Graph Input | ||||
var X = tf.placeholder(tf.float32); | var X = tf.placeholder(tf.float32); | ||||
var Y = tf.placeholder(tf.float32); | var Y = tf.placeholder(tf.float32); | ||||
// Set model weights | // Set model weights | ||||
//var rnd1 = rng.randn<float>(); | |||||
//var rnd2 = rng.randn<float>(); | |||||
// We can set a fixed init value in order to debug | |||||
// var rnd1 = rng.randn<float>(); | |||||
// var rnd2 = rng.randn<float>(); | |||||
var W = tf.Variable(-0.06f, name: "weight"); | var W = tf.Variable(-0.06f, name: "weight"); | ||||
var b = tf.Variable(-0.73f, name: "bias"); | var b = tf.Variable(-0.73f, name: "bias"); | ||||
var mul = tf.multiply(X, W); | |||||
var pred = tf.add(mul, b); | |||||
// Construct a linear model | |||||
var pred = tf.add(tf.multiply(X, W), b); | |||||
// Mean squared error | // Mean squared error | ||||
var sub = pred - Y; | |||||
var pow = tf.pow(sub, 2.0f); | |||||
var reduce = tf.reduce_sum(pow); | |||||
var cost = reduce / (2.0f * n_samples); | |||||
var cost = tf.reduce_sum(tf.pow(pred - Y, 2.0f)) / (2.0f * n_samples); | |||||
// radient descent | // radient descent | ||||
// Note, minimize() knows to modify W and b because Variable objects are trainable=True by default | // Note, minimize() knows to modify W and b because Variable objects are trainable=True by default | ||||
var grad = tf.train.GradientDescentOptimizer(learning_rate); | |||||
var optimizer = grad.minimize(cost); | |||||
//tf.train.export_meta_graph(filename: "linear_regression.meta.bin"); | |||||
// import meta | |||||
// var new_saver = tf.train.import_meta_graph("linear_regression.meta.bin"); | |||||
var text = JsonConvert.SerializeObject(graph, new JsonSerializerSettings | |||||
{ | |||||
Formatting = Formatting.Indented | |||||
}); | |||||
/*var cost = graph.OperationByName("truediv").output; | |||||
var pred = graph.OperationByName("Add").output; | |||||
var optimizer = graph.OperationByName("GradientDescent"); | |||||
var X = graph.OperationByName("Placeholder").output; | |||||
var Y = graph.OperationByName("Placeholder_1").output; | |||||
var W = graph.OperationByName("weight").output; | |||||
var b = graph.OperationByName("bias").output;*/ | |||||
var optimizer = tf.train.GradientDescentOptimizer(learning_rate).minimize(cost); | |||||
// Initialize the variables (i.e. assign their default value) | // Initialize the variables (i.e. assign their default value) | ||||
var init = tf.global_variables_initializer(); | var init = tf.global_variables_initializer(); | ||||
@@ -89,22 +68,33 @@ namespace TensorFlowNET.Examples | |||||
sess.run(optimizer, | sess.run(optimizer, | ||||
new FeedItem(X, x), | new FeedItem(X, x), | ||||
new FeedItem(Y, y)); | new FeedItem(Y, y)); | ||||
var rW = sess.run(W); | |||||
} | } | ||||
// Display logs per epoch step | // Display logs per epoch step | ||||
/*if ((epoch + 1) % display_step == 0) | |||||
if ((epoch + 1) % display_step == 0) | |||||
{ | { | ||||
var c = sess.run(cost, | var c = sess.run(cost, | ||||
new FeedItem(X, train_X), | new FeedItem(X, train_X), | ||||
new FeedItem(Y, train_Y)); | new FeedItem(Y, train_Y)); | ||||
var rW = sess.run(W); | |||||
Console.WriteLine($"Epoch: {epoch + 1} cost={c} " + | |||||
$"W={rW} b={sess.run(b)}"); | |||||
}*/ | |||||
Console.WriteLine($"Epoch: {epoch + 1} cost={c} " + $"W={sess.run(W)} b={sess.run(b)}"); | |||||
} | |||||
} | } | ||||
Console.WriteLine("Optimization Finished!"); | Console.WriteLine("Optimization Finished!"); | ||||
var training_cost = sess.run(cost, | |||||
new FeedItem(X, train_X), | |||||
new FeedItem(Y, train_Y)); | |||||
Console.WriteLine($"Training cost={training_cost} W={sess.run(W)} b={sess.run(b)}"); | |||||
// Testing example | |||||
var test_X = np.array(6.83f, 4.668f, 8.9f, 7.91f, 5.7f, 8.7f, 3.1f, 2.1f); | |||||
var test_Y = np.array(1.84f, 2.273f, 3.2f, 2.831f, 2.92f, 3.24f, 1.35f, 1.03f); | |||||
Console.WriteLine("Testing... (Mean square loss Comparison)"); | |||||
var testing_cost = sess.run(tf.reduce_sum(tf.pow(pred - Y, 2.0f)) / (2.0f * test_X.shape[0]), | |||||
new FeedItem(X, test_X), | |||||
new FeedItem(Y, test_Y)); | |||||
Console.WriteLine($"Testing cost={testing_cost}"); | |||||
Console.WriteLine($"Absolute mean square loss difference: {Math.Abs((float)training_cost - (float)testing_cost)}"); | |||||
}); | }); | ||||
} | } | ||||
} | } | ||||
@@ -6,7 +6,6 @@ | |||||
</PropertyGroup> | </PropertyGroup> | ||||
<ItemGroup> | <ItemGroup> | ||||
<PackageReference Include="Newtonsoft.Json" Version="12.0.1" /> | |||||
<PackageReference Include="NumSharp" Version="0.7.3" /> | <PackageReference Include="NumSharp" Version="0.7.3" /> | ||||
<PackageReference Include="TensorFlow.NET" Version="0.3.0" /> | <PackageReference Include="TensorFlow.NET" Version="0.3.0" /> | ||||
</ItemGroup> | </ItemGroup> | ||||
@@ -23,6 +23,23 @@ namespace TensorFlowNET.UnitTest | |||||
{ | { | ||||
var new_saver = tf.train.import_meta_graph("C:/tmp/my-model.meta"); | var new_saver = tf.train.import_meta_graph("C:/tmp/my-model.meta"); | ||||
}); | }); | ||||
//tf.train.export_meta_graph(filename: "linear_regression.meta.bin"); | |||||
// import meta | |||||
/*tf.train.import_meta_graph("linear_regression.meta.bin"); | |||||
var cost = graph.OperationByName("truediv").output; | |||||
var pred = graph.OperationByName("Add").output; | |||||
var optimizer = graph.OperationByName("GradientDescent"); | |||||
var X = graph.OperationByName("Placeholder").output; | |||||
var Y = graph.OperationByName("Placeholder_1").output; | |||||
var W = graph.OperationByName("weight").output; | |||||
var b = graph.OperationByName("bias").output;*/ | |||||
/*var text = JsonConvert.SerializeObject(graph, new JsonSerializerSettings | |||||
{ | |||||
Formatting = Formatting.Indented | |||||
});*/ | |||||
} | } | ||||
public void ImportSavedModel() | public void ImportSavedModel() | ||||