@@ -0,0 +1,9 @@ | |||||
using System; | |||||
using System.Collections.Generic; | |||||
using System.Text; | |||||
namespace Tensorflow.Keras.ArgsDefinition { | |||||
public class SoftmaxArgs : LayerArgs { | |||||
public Axis axis { get; set; } = -1; | |||||
} | |||||
} |
@@ -24,12 +24,10 @@ namespace Tensorflow.Keras.Layers { | |||||
} | } | ||||
protected override Tensors Call ( Tensors inputs, Tensor state = null, bool? training = null ) { | protected override Tensors Call ( Tensors inputs, Tensor state = null, bool? training = null ) { | ||||
Tensor output = inputs; | Tensor output = inputs; | ||||
if ( alpha != 1f ) { | |||||
output = tf.where(output > 0f, output, alpha * (tf.exp(output) - 1f)); | |||||
} | |||||
output = tf.where(output > 0f, output, | |||||
tf.multiply(alpha, tf.sub(tf.exp(output), 1f))); | |||||
return output; | return output; | ||||
} | } | ||||
public override Shape ComputeOutputShape ( Shape input_shape ) { | public override Shape ComputeOutputShape ( Shape input_shape ) { | ||||
return input_shape; | return input_shape; | ||||
} | } | ||||
@@ -0,0 +1,24 @@ | |||||
using System; | |||||
using System.Collections.Generic; | |||||
using System.Text; | |||||
using Tensorflow.Keras.ArgsDefinition; | |||||
using Tensorflow.Keras.Engine; | |||||
using static Tensorflow.Binding; | |||||
namespace Tensorflow.Keras.Layers { | |||||
public class Exponential : Layer { | |||||
public Exponential ( LayerArgs args ) : base(args) { | |||||
// Exponential has no args | |||||
} | |||||
protected override void build ( Tensors inputs ) { | |||||
built = true; | |||||
} | |||||
protected override Tensors Call ( Tensors inputs, Tensor state = null, bool? training = null ) { | |||||
Tensor output = inputs; | |||||
return tf.exp(output); | |||||
} | |||||
public override Shape ComputeOutputShape ( Shape input_shape ) { | |||||
return input_shape; | |||||
} | |||||
} | |||||
} |
@@ -0,0 +1,22 @@ | |||||
using System; | |||||
using System.Collections.Generic; | |||||
using System.Text; | |||||
using Tensorflow.Keras.ArgsDefinition; | |||||
using Tensorflow.Keras.Engine; | |||||
using static Tensorflow.Binding; | |||||
namespace Tensorflow.Keras.Layers { | |||||
public class HardSigmoid : Layer { | |||||
public HardSigmoid ( LayerArgs args ) : base(args) { | |||||
// hard sigmoid has no arguments | |||||
} | |||||
protected override Tensors Call ( Tensors inputs, Tensor state = null, bool? training = null ) { | |||||
Tensor x = inputs; | |||||
return tf.clip_by_value( | |||||
tf.add(tf.multiply(x, 0.2f), 0.5f), 0f, 1f); | |||||
} | |||||
public override Shape ComputeOutputShape ( Shape input_shape ) { | |||||
return input_shape; | |||||
} | |||||
} | |||||
} |
@@ -23,7 +23,9 @@ namespace Tensorflow.Keras.Layers { | |||||
} | } | ||||
protected override Tensors Call ( Tensors inputs, Tensor state = null, bool? training = null ) { | protected override Tensors Call ( Tensors inputs, Tensor state = null, bool? training = null ) { | ||||
Tensor output = inputs; | Tensor output = inputs; | ||||
return tf.where(output > 0f, scale * output, scale * alpha * (tf.exp(output) - 1f)); | |||||
return tf.where(output > 0f, | |||||
tf.multiply(scale, output), | |||||
tf.multiply(scale, tf.multiply(alpha, tf.sub(tf.exp(output), 1f)))); | |||||
} | } | ||||
public override Shape ComputeOutputShape ( Shape input_shape ) { | public override Shape ComputeOutputShape ( Shape input_shape ) { | ||||
return input_shape; | return input_shape; | ||||
@@ -0,0 +1,24 @@ | |||||
using System; | |||||
using System.Collections.Generic; | |||||
using System.Text; | |||||
using Tensorflow.Keras.ArgsDefinition; | |||||
using Tensorflow.Keras.Engine; | |||||
using static Tensorflow.Binding; | |||||
namespace Tensorflow.Keras.Layers { | |||||
public class Softmax : Layer { | |||||
Axis axis; | |||||
public Softmax ( SoftmaxArgs args ) : base(args) { | |||||
axis = args.axis; | |||||
} | |||||
protected override Tensors Call ( Tensors inputs, Tensor state = null, bool? training = null ) { | |||||
Tensor x = inputs; | |||||
Tensor e = tf.exp(tf.sub(x, tf.reduce_max(x, axis: this.axis, keepdims: true))); | |||||
Tensor s = tf.reduce_sum(e, axis: this.axis, keepdims: true); | |||||
return tf.div(e, s); | |||||
} | |||||
public override Shape ComputeOutputShape ( Shape input_shape ) { | |||||
return input_shape; | |||||
} | |||||
} | |||||
} |
@@ -0,0 +1,22 @@ | |||||
using System; | |||||
using System.Collections.Generic; | |||||
using System.Text; | |||||
using Tensorflow.Keras.ArgsDefinition; | |||||
using Tensorflow.Keras.Engine; | |||||
using static Tensorflow.Binding; | |||||
namespace Tensorflow.Keras.Layers { | |||||
public class Softplus : Layer { | |||||
public Softplus ( LayerArgs args ) : base(args) { | |||||
// Softplus has no arguments | |||||
} | |||||
protected override Tensors Call ( Tensors inputs, Tensor state = null, bool? training = null ) { | |||||
Tensor x = inputs; | |||||
return tf.log( | |||||
tf.add(tf.exp(x), 1f)); | |||||
} | |||||
public override Shape ComputeOutputShape ( Shape input_shape ) { | |||||
return input_shape; | |||||
} | |||||
} | |||||
} |
@@ -0,0 +1,22 @@ | |||||
using System; | |||||
using System.Collections.Generic; | |||||
using System.Text; | |||||
using Tensorflow.Keras.ArgsDefinition; | |||||
using Tensorflow.Keras.Engine; | |||||
using static Tensorflow.Binding; | |||||
namespace Tensorflow.Keras.Layers { | |||||
public class Softsign : Layer { | |||||
public Softsign ( LayerArgs args ) : base(args) { | |||||
// Softsign has no arguments | |||||
} | |||||
protected override Tensors Call ( Tensors inputs, Tensor state = null, bool? training = null ) { | |||||
Tensor x = inputs; | |||||
// x / (abs(x) + 1) | |||||
return tf.div(x, tf.add(1f, tf.abs(x))); | |||||
} | |||||
public override Shape ComputeOutputShape ( Shape input_shape ) { | |||||
return input_shape; | |||||
} | |||||
} | |||||
} |
@@ -0,0 +1,23 @@ | |||||
using System; | |||||
using System.Collections.Generic; | |||||
using System.Text; | |||||
using Tensorflow.Keras.ArgsDefinition; | |||||
using Tensorflow.Keras.Engine; | |||||
using static Tensorflow.Binding; | |||||
namespace Tensorflow.Keras.Layers { | |||||
public class Swish : Layer { | |||||
public Swish ( LayerArgs args ) : base(args) { | |||||
// Swish has no arguments | |||||
} | |||||
protected override Tensors Call ( Tensors inputs, Tensor state = null, bool? training = null ) { | |||||
Tensor x = inputs; | |||||
// x / (1 + exp(-x)) | |||||
return tf.div(x, (tf.add(1f, tf.exp(tf.negative(x))))); | |||||
} | |||||
public override Shape ComputeOutputShape ( Shape input_shape ) { | |||||
return input_shape; | |||||
} | |||||
} | |||||
} |
@@ -0,0 +1,22 @@ | |||||
using System; | |||||
using System.Collections.Generic; | |||||
using System.Text; | |||||
using Tensorflow.Keras.ArgsDefinition; | |||||
using Tensorflow.Keras.Engine; | |||||
using static Tensorflow.Binding; | |||||
namespace Tensorflow.Keras.Layers { | |||||
public class Tanh : Layer { | |||||
public Tanh ( LayerArgs args ) : base(args) { | |||||
// Tanh has no arguments | |||||
} | |||||
protected override Tensors Call ( Tensors inputs, Tensor state = null, bool? training = null ) { | |||||
Tensor x = inputs; | |||||
return tf.tanh(x); | |||||
} | |||||
public override Shape ComputeOutputShape ( Shape input_shape ) { | |||||
return input_shape; | |||||
} | |||||
} | |||||
} |
@@ -0,0 +1,22 @@ | |||||
using Tensorflow.NumPy; | |||||
using System.Collections.Generic; | |||||
using Tensorflow.Keras.ArgsDefinition; | |||||
using Tensorflow.Keras.Engine; | |||||
using static Tensorflow.Binding; | |||||
using static Tensorflow.KerasApi; | |||||
namespace Tensorflow.Keras.Layers { | |||||
public partial class LayersApi { | |||||
public ELU ELU ( float alpha = 0.1f ) | |||||
=> new ELU(new ELUArgs { Alpha = alpha }); | |||||
public SELU SELU () | |||||
=> new SELU(new LayerArgs { }); | |||||
public Softmax Softmax ( Axis axis ) => new Softmax(new SoftmaxArgs { axis = axis }); | |||||
public Softplus Softplus () => new Softplus(new LayerArgs { }); | |||||
public HardSigmoid HardSigmoid () => new HardSigmoid(new LayerArgs { }); | |||||
public Softsign Softsign () => new Softsign(new LayerArgs { }); | |||||
public Swish Swish () => new Swish(new LayerArgs { }); | |||||
public Tanh Tanh () => new Tanh(new LayerArgs { }); | |||||
public Exponential Exponential () => new Exponential(new LayerArgs { }); | |||||
} | |||||
} |
@@ -1,22 +1,88 @@ | |||||
using Microsoft.VisualStudio.TestTools.UnitTesting; | using Microsoft.VisualStudio.TestTools.UnitTesting; | ||||
using System; | using System; | ||||
using System.Collections.Generic; | using System.Collections.Generic; | ||||
using System.Text; | |||||
using static Tensorflow.Binding; | |||||
using Tensorflow.NumPy; | using Tensorflow.NumPy; | ||||
using static Tensorflow.KerasApi; | using static Tensorflow.KerasApi; | ||||
using Tensorflow; | using Tensorflow; | ||||
namespace TensorFlowNET.Keras.UnitTest | |||||
{ | |||||
[TestClass] | |||||
public class ActivationTest : EagerModeTestBase | |||||
{ | |||||
[TestMethod] | |||||
public void LeakyReLU() | |||||
{ | |||||
var layer = keras.layers.LeakyReLU(); | |||||
Tensor output = layer.Apply(np.array(-3.0f, -1.0f, 0.0f, 2.0f)); | |||||
Equal(new[] { -0.9f, -0.3f, 0.0f, 2.0f }, output.ToArray<float>()); | |||||
} | |||||
} | |||||
namespace TensorFlowNET.Keras.UnitTest { | |||||
[TestClass] | |||||
public class ActivationTest : EagerModeTestBase { | |||||
[TestMethod] | |||||
public void LeakyReLU () { | |||||
var layer = keras.layers.LeakyReLU(); | |||||
Tensor output = layer.Apply(np.array(-3.0f, -1.0f, 0.0f, 2.0f)); | |||||
Equal(new[] { -0.9f, -0.3f, 0.0f, 2.0f }, output.ToArray<float>()); | |||||
} | |||||
[TestMethod] | |||||
public void ELU () { | |||||
Tensors input = tf.constant(new float[] { -3f, -2f, -1f, 0f, 1f, 2f }); | |||||
Tensor output = keras.layers.ELU().Apply(input); | |||||
NDArray expected = new NDArray(new float[] { -0.0950213f, -0.08646648f, -0.06321206f, 0f, 1f, 2f }); | |||||
Assert.AreEqual(expected.numpy(), output.numpy()); | |||||
} | |||||
[TestMethod] | |||||
public void SELU () { | |||||
Tensor input = tf.constant(new float[] { -3f, -2f, -1f, 0f, 1f, 2f }); | |||||
Tensor output = keras.layers.SELU().Apply(input); | |||||
NDArray expected = new NDArray(new float[] { -1.6705688f, -1.5201665f, -1.1113307f, 0f, 1.050701f, 2.101402f }); | |||||
Assert.AreEqual(expected.numpy(), output.numpy()); | |||||
} | |||||
[TestMethod] | |||||
public void Softmax () { | |||||
Tensor input = tf.constant(new float[] { -3f, -2f, -1f, 0f, 1f, 2f }); | |||||
Tensor output = keras.layers.Softmax(new Axis(-1)).Apply(input); | |||||
NDArray expected = new NDArray(new float[] { 0.0042697787f, 0.011606461f, 0.031549633f, 0.085760795f, 0.23312202f, 0.6336913f }); | |||||
Assert.AreEqual(expected.numpy(), output.numpy()); | |||||
} | |||||
[TestMethod] | |||||
public void Softplus () { | |||||
Tensor input = tf.constant(new float[] { -3f, -2f, -1f, 0f, 1f, 2f }); | |||||
Tensor output = keras.layers.Softplus().Apply(input); | |||||
NDArray expected = new NDArray(new float[] { 0.04858733f, 0.12692805f, 0.31326166f, 0.6931472f, 1.3132616f, 2.126928f }); | |||||
Assert.AreEqual(expected, output.numpy()); | |||||
} | |||||
[TestMethod] | |||||
public void Softsign () { | |||||
Tensor input = tf.constant(new float[] { -3f, -2f, -1f, 0f, 1f, 2f }); | |||||
Tensor output = keras.layers.Softsign().Apply(input); | |||||
NDArray expected = new NDArray(new float[] { -0.75f, -0.66666667f, -0.5f, 0f, 0.5f, 0.66666667f }); | |||||
Assert.AreEqual(expected, output.numpy()); | |||||
} | |||||
[TestMethod] | |||||
public void Exponential () { | |||||
Tensor input = tf.constant(new float[] { -3f, -2f, -1f, 0f, 1f, 2f }); | |||||
Tensor output = keras.layers.Exponential().Apply(input); | |||||
NDArray expected = new NDArray(new float[] { 0.049787067f, 0.13533528f, 0.36787945f, 1f, 2.7182817f, 7.389056f }); | |||||
Assert.AreEqual(expected, output.numpy()); | |||||
} | |||||
[TestMethod] | |||||
public void HardSigmoid () { | |||||
Tensor input = tf.constant(new float[] { -3f, -2f, -1f, 0f, 1f, 2f }); | |||||
Tensor output = keras.layers.HardSigmoid().Apply(input); | |||||
// Note, this should be [0, 0.1, 0.3, 0.5, 0.7, 0.9] | |||||
// But somehow the second element will have 0.099999994 | |||||
// Probably because there is an accuracy loss somewhere | |||||
NDArray expected = new NDArray(new float[] { 0f, 0.099999994f, 0.3f, 0.5f, 0.7f, 0.9f }); | |||||
Assert.AreEqual(expected, output.numpy()); | |||||
} | |||||
[TestMethod] | |||||
public void Swish () { | |||||
Tensor input = tf.constant(new float[] { -3f, -2f, -1f, 0f, 1f, 2f }); | |||||
Tensor output = keras.layers.Swish().Apply(input); | |||||
NDArray expected = new NDArray(new float[] { -0.14227762f, -0.23840584f, -0.26894143f, 0f, 0.7310586f, 1.761594f }); | |||||
Assert.AreEqual(expected, output.numpy()); | |||||
} | |||||
} | |||||
} | } |