diff --git a/src/TensorFlowNET.Core/APIs/tf.nn.cs b/src/TensorFlowNET.Core/APIs/tf.nn.cs index ffb84d4f..0525c6ec 100644 --- a/src/TensorFlowNET.Core/APIs/tf.nn.cs +++ b/src/TensorFlowNET.Core/APIs/tf.nn.cs @@ -157,6 +157,9 @@ namespace Tensorflow }); } + public Tensor l2_loss(Tensor t, string name = null) + => nn_ops.l2_loss(t, name: name); + /// /// Local Response Normalization. /// diff --git a/src/TensorFlowNET.Core/Operations/nn_ops.cs b/src/TensorFlowNET.Core/Operations/nn_ops.cs index f6efe229..307b1f8a 100644 --- a/src/TensorFlowNET.Core/Operations/nn_ops.cs +++ b/src/TensorFlowNET.Core/Operations/nn_ops.cs @@ -128,6 +128,9 @@ namespace Tensorflow return _softmax(logits, gen_nn_ops.softmax, axis, name); } + public static Tensor l2_loss(Tensor t, string name = null) + => tf.Context.ExecuteOp("L2Loss", name, new ExecuteOpArgs(t)); + public static Tensor leaky_relu(Tensor features, float alpha = 0.2f, string name = null) { return tf_with(ops.name_scope(name, "LeakyRelu", new { features, alpha }), scope => diff --git a/test/TensorFlowNET.UnitTest/ManagedAPI/ActivationFunctionTest.cs b/test/TensorFlowNET.UnitTest/ManagedAPI/ActivationFunctionTest.cs index 6f816d8f..bf8e1cbf 100644 --- a/test/TensorFlowNET.UnitTest/ManagedAPI/ActivationFunctionTest.cs +++ b/test/TensorFlowNET.UnitTest/ManagedAPI/ActivationFunctionTest.cs @@ -2,7 +2,7 @@ using Tensorflow; using static Tensorflow.Binding; -namespace TensorFlowNET.UnitTest.nn_test +namespace TensorFlowNET.UnitTest.NenuralNetwork { [TestClass] public class ActivationFunctionTest : EagerModeTestBase diff --git a/test/TensorFlowNET.UnitTest/ManagedAPI/NeuralNetworkTest.cs b/test/TensorFlowNET.UnitTest/ManagedAPI/NeuralNetworkTest.cs new file mode 100644 index 00000000..f1b9f08a --- /dev/null +++ b/test/TensorFlowNET.UnitTest/ManagedAPI/NeuralNetworkTest.cs @@ -0,0 +1,18 @@ +using Microsoft.VisualStudio.TestTools.UnitTesting; +using static Tensorflow.Binding; +using Tensorflow.NumPy; + +namespace TensorFlowNET.UnitTest.NenuralNetwork +{ + [TestClass] + public class NeuralNetworkTest : EagerModeTestBase + { + [TestMethod] + public void l2_loss() + { + var x = tf.Variable(np.array(new[,] { { 1, 2, 3, 4 }, { 5, 6, 7, 8 } }), dtype: tf.float32); + var l2 = tf.nn.l2_loss(x); + Assert.AreEqual(l2.numpy(), 102f); + } + } +}