Browse Source

Added all keras activations

tags/v0.12
Eli Belash 6 years ago
parent
commit
074d06dc7f
4 changed files with 229 additions and 10 deletions
  1. +16
    -1
      src/TensorFlowNET.Core/APIs/tf.layers.cs
  2. +32
    -0
      src/TensorFlowNET.Core/APIs/tf.math.cs
  3. +2
    -2
      src/TensorFlowNET.Core/APIs/tf.tensor.cs
  4. +179
    -7
      src/TensorFlowNET.Core/Operations/Activation/gen_nn_ops.activations.cs

+ 16
- 1
src/TensorFlowNET.Core/APIs/tf.layers.cs View File

@@ -144,6 +144,20 @@ namespace Tensorflow
return layer.apply(inputs);
}

/// <summary>
/// Densely-connected layer class. aka fully-connected<br></br>
/// `outputs = activation(inputs * kernel + bias)`
/// </summary>
/// <param name="inputs"></param>
/// <param name="units">Python integer, dimensionality of the output space.</param>
/// <param name="activation"></param>
/// <param name="use_bias">Boolean, whether the layer uses a bias.</param>
/// <param name="kernel_initializer"></param>
/// <param name="bias_initializer"></param>
/// <param name="trainable"></param>
/// <param name="name"></param>
/// <param name="reuse"></param>
/// <returns></returns>
public Tensor dense(Tensor inputs,
int units,
IActivation activation = null,
@@ -160,7 +174,8 @@ namespace Tensorflow
var layer = new Dense(units, activation,
use_bias: use_bias,
bias_initializer: bias_initializer,
kernel_initializer: kernel_initializer);
kernel_initializer: kernel_initializer,
trainable: trainable);

return layer.apply(inputs);
}


+ 32
- 0
src/TensorFlowNET.Core/APIs/tf.math.cs View File

@@ -14,6 +14,8 @@
limitations under the License.
******************************************************************************/

using Tensorflow.Operations;

namespace Tensorflow
{
public partial class tensorflow
@@ -211,6 +213,36 @@ namespace Tensorflow
/// <returns></returns>
public Tensor _clip_by_value(Tensor t, Tensor clip_value_min, Tensor clip_value_max, string name = null)
=> gen_math_ops._clip_by_value(t, clip_value_min, clip_value_max);
/// <summary>
/// Clips tensor values to a specified min and max.
/// </summary>
/// <param name="t">
/// A <c>Tensor</c>.
/// </param>
/// <param name="clip_value_min">
/// A 0-D (scalar) <c>Tensor</c>, or a <c>Tensor</c> with the same shape
/// as <c>t</c>. The minimum value to clip by.
/// </param>
/// <param name="clip_value_max">
/// A 0-D (scalar) <c>Tensor</c>, or a <c>Tensor</c> with the same shape
/// as <c>t</c>. The maximum value to clip by.
/// </param>
/// <param name="name">
/// If specified, the created operation in the graph will be this one, otherwise it will be named 'ClipByValue'.
/// </param>
/// <returns>
/// A clipped <c>Tensor</c> with the same shape as input 't'.
/// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result.
/// </returns>
/// <remarks>
/// Given a tensor <c>t</c>, this operation returns a tensor of the same type and
/// shape as <c>t</c> with its values clipped to <c>clip_value_min</c> and <c>clip_value_max</c>.
/// Any values less than <c>clip_value_min</c> are set to <c>clip_value_min</c>. Any values
/// greater than <c>clip_value_max</c> are set to <c>clip_value_max</c>.
/// </remarks>
public Tensor clip_by_value (Tensor t, Tensor clip_value_min, Tensor clip_value_max, string name = "ClipByValue")
=> gen_ops.clip_by_value(t, clip_value_min, clip_value_max, name);

public Tensor sub(Tensor a, Tensor b)
=> gen_math_ops.sub(a, b);


+ 2
- 2
src/TensorFlowNET.Core/APIs/tf.tensor.cs View File

@@ -18,8 +18,8 @@ namespace Tensorflow
{
public partial class tensorflow
{
public Tensor convert_to_tensor(object value,
string name = null) => ops.convert_to_tensor(value, name: name);
public Tensor convert_to_tensor(object value, TF_DataType dtype = TF_DataType.DtInvalid, string name = null, TF_DataType preferred_dtype = TF_DataType.DtInvalid)
=> ops.convert_to_tensor(value, dtype, name, preferred_dtype);

public Tensor strided_slice(Tensor input, Tensor begin, Tensor end, Tensor strides = null,
int begin_mask = 0,


+ 179
- 7
src/TensorFlowNET.Core/Operations/Activation/gen_nn_ops.activations.cs View File

@@ -14,20 +14,192 @@
limitations under the License.
******************************************************************************/

using System;
using static Tensorflow.Binding;

namespace Tensorflow.Operations.Activation
{
public class sigmoid : IActivation
{
public Tensor Activate(Tensor x, string name = null)
{
return tf.sigmoid(x);
}
}

public class tanh : IActivation
{
public Tensor Activate(Tensor x, string name = null)
{
return tf.tanh(x);
}
}

public class leakyrelu : IActivation
{
private readonly float _alpha;

public leakyrelu(float alpha = 0.3f) {
_alpha = alpha;
}

public Tensor Activate(Tensor x, string name = null)
{
return nn_ops.leaky_relu(x, _alpha);
}
}

public class elu : IActivation
{
private readonly float _alpha;

public elu(float alpha = 0.1f)
{
_alpha = alpha;
}

public Tensor Activate(Tensor x, string name = null)
{
var res = gen_ops.elu(x);
if (Math.Abs(_alpha - 0.1f) < 0.00001f)
{
return res;
}

return array_ops.@where(x > 0, res, _alpha * res);
}
}

public class softmax : IActivation
{
private readonly int _axis;

/// <summary>Initializes a new instance of the <see cref="T:System.Object"></see> class.</summary>
public softmax(int axis = -1)
{
_axis = axis;
}

public Tensor Activate(Tensor x, string name = null)
{
return nn_ops.softmax(x, _axis);
}
}

public class softplus : IActivation
{
public Tensor Activate(Tensor x, string name = null)
{
return gen_ops.softplus(x);
}
}

public class softsign : IActivation
{
public Tensor Activate(Tensor x, string name = null)
{
return gen_ops.softsign(x);
}
}

public class linear : IActivation
{
public Tensor Activate(Tensor x, string name = null)
{
return x;
}
}


public class exponential : IActivation
{
public Tensor Activate(Tensor x, string name = null)
{
return tf.exp(x, name: name);
}
}


public class relu : IActivation
{
public Tensor Activate(Tensor features, string name = null)
private readonly float _threshold;
private readonly float _alpha;
private readonly float? _maxValue;

public relu(float threshold = 0f, float alpha = 0.2f, float? max_value = null)
{
_threshold = threshold;
_alpha = alpha;
_maxValue = max_value;
}

public Tensor Activate(Tensor x, string name = null)
{
OpDefLibrary _op_def_lib = new OpDefLibrary();
//based on keras/backend.py
if (Math.Abs(_alpha) > 0.000001f)
{
if (!_maxValue.HasValue && Math.Abs(_threshold) < 0.0001)
{
return nn_ops.leaky_relu(x, _alpha);
}
}

Tensor negative_part;
if (Math.Abs(_threshold) > 0.000001f)
{
negative_part = gen_ops.relu(-x + _threshold);
} else
{
negative_part = gen_ops.relu(-x + _threshold);
}

if (Math.Abs(_threshold) > 0.000001f)
{
x = x * math_ops.cast(tf.greater(x, _threshold), TF_DataType.TF_FLOAT);
} else if (Math.Abs(_maxValue.Value - 6f) < 0.0001f)
{
x = gen_ops.relu6(x);
} else
{
x = gen_ops.relu(x);
}

bool clip_max = _maxValue.HasValue;
if (clip_max)
{
Tensor maxval = constant_op.constant(_maxValue, x.dtype.as_base_dtype());
var zero = constant_op.constant(0.0f, x.dtype.as_base_dtype());
x = gen_ops.clip_by_value(x, zero, maxval);
}

var _op = _op_def_lib._apply_op_helper("Relu", name: name, args: new
if (Math.Abs(_alpha) > 0.00001)
{
features
});
var a = constant_op.constant(_alpha, x.dtype.as_base_dtype());
x -= a * negative_part;
}

return _op.outputs[0];
return x;
}
}

public class selu : IActivation
{
public Tensor Activate(Tensor x, string name = null)
{
const float alpha = 1.6732632423543772848170429916717f;
const float scale = 1.0507009873554804934193349852946f;
return scale * new elu(alpha).Activate(x, name);
}
}

public class hard_sigmoid : IActivation
{
public Tensor Activate(Tensor x, string name = null)
{
x = (0.2 * x) + 0.5;
var zero = tf.convert_to_tensor(0.0f, x.dtype.as_base_dtype());
var one = tf.convert_to_tensor(1.0f, x.dtype.as_base_dtype());
return tf.clip_by_value(x, zero, one);
}
}
}
}

Loading…
Cancel
Save