using System;
using Tensorflow.Framework.Models;
using Tensorflow.Keras.ArgsDefinition;
using Tensorflow.Keras.ArgsDefinition.Core;
using Tensorflow.Keras.ArgsDefinition.Rnn;
using Tensorflow.Keras.Engine;
using Tensorflow.Keras.Layers.Rnn;
using Tensorflow.NumPy;
using static Tensorflow.Binding;
using static Tensorflow.KerasApi;
namespace Tensorflow.Keras.Layers
{
public partial class LayersApi : ILayersApi
{
public IPreprocessing preprocessing { get; } = new Preprocessing();
///
/// Layer that normalizes its inputs.
/// Batch normalization applies a transformation that maintains the mean output close to 0 and the output standard deviation close to 1.
/// Importantly, batch normalization works differently during training and during inference.
///
/// http://arxiv.org/abs/1502.03167
///
/// The axis that should be normalized (typically the features axis).
/// For instance, after a Conv2D layer with data_format="channels_first", set axis=1 in BatchNormalization.
///
/// Momentum for the moving average.
/// Small float added to variance to avoid dividing by zero.
/// If True, add offset of beta to normalized tensor. If False, beta is ignored.
/// If True, multiply by gamma. If False, gamma is not used. When the next layer is linear (also e.g. nn.relu), this can be disabled since the scaling will be done by the next layer.
/// Initializer for the beta weight.
/// Initializer for the gamma weight.
/// Initializer for the moving mean.
/// Initializer for the moving variance.
/// Boolean, if True the variables will be marked as trainable.
/// Layer name.
/// Whether to use Batch Renormalization. This adds extra variables during training. The inference is the same for either value of this parameter.
/// Momentum used to update the moving means and standard deviations with renorm.
/// Unlike momentum, this affects training and should be neither too small (which would add noise) nor too large (which would give stale estimates).
/// Note that momentum is still applied to get the means and variances for inference.
///
/// Tensor of the same shape as input.
public ILayer BatchNormalization(int axis = -1,
float momentum = 0.99f,
float epsilon = 0.001f,
bool center = true,
bool scale = true,
IInitializer beta_initializer = null,
IInitializer gamma_initializer = null,
IInitializer moving_mean_initializer = null,
IInitializer moving_variance_initializer = null,
bool trainable = true,
string name = null,
bool renorm = false,
float renorm_momentum = 0.99f)
=> new BatchNormalization(new BatchNormalizationArgs
{
Axis = axis,
Momentum = momentum,
Epsilon = epsilon,
Center = center,
Scale = scale,
BetaInitializer = beta_initializer ?? tf.zeros_initializer,
GammaInitializer = gamma_initializer ?? tf.ones_initializer,
MovingMeanInitializer = moving_mean_initializer ?? tf.zeros_initializer,
MovingVarianceInitializer = moving_variance_initializer ?? tf.ones_initializer,
Renorm = renorm,
RenormMomentum = renorm_momentum,
Trainable = trainable,
Name = name
});
///
/// 1D convolution layer (e.g. temporal convolution).
/// This layer creates a convolution kernel that is convolved with the layer input over a single spatial(or temporal) dimension to produce a tensor of outputs.If use_bias is True, a bias vector is created and added to the outputs.Finally, if activation is not None, it is applied to the outputs as well.
///
/// Integer, the dimensionality of the output space (i.e. the number of output filters in the convolution)
/// An integer specifying the width of the 1D convolution window.
/// An integer specifying the stride of the convolution window . Specifying any stride value != 1 is incompatible with specifying any dilation_rate value != 1.
/// one of "valid" or "same" (case-insensitive). "valid" means no padding. "same" results in padding evenly to the left/right or up/down of the input such that output has the same height/width dimension as the input.
/// A string, one of channels_last (default) or channels_first. The ordering of the dimensions in the inputs. channels_last corresponds to inputs with shape (batch_size, height, width, channels) while channels_first corresponds to inputs with shape (batch_size, channels, height, width). It defaults to the image_data_format value found in your Keras config file at ~/.keras/keras.json. If you never set it, then it will be channels_last.
/// An integer specifying the dilation rate to use for dilated convolution.Currently, specifying any dilation_rate value != 1 is incompatible with specifying any stride value != 1.
/// A positive integer specifying the number of groups in which the input is split along the channel axis. Each group is convolved separately with filters / groups filters. The output is the concatenation of all the groups results along the channel axis. Input channels and filters must both be divisible by groups.
/// Activation function to use. If you don't specify anything, no activation is applied (see keras.activations).
/// Boolean, whether the layer uses a bias vector.
/// Initializer for the kernel weights matrix (see keras.initializers).
/// Initializer for the bias vector (see keras.initializers).
/// A tensor of rank 3 representing activation(conv1d(inputs, kernel) + bias).
public ILayer Conv1D(int filters,
Shape kernel_size,
int strides = 1,
string padding = "valid",
string data_format = "channels_last",
int dilation_rate = 1,
int groups = 1,
string activation = null,
bool use_bias = true,
string kernel_initializer = "glorot_uniform",
string bias_initializer = "zeros")
=> new Conv1D(new Conv1DArgs
{
Rank = 1,
Filters = filters,
KernelSize = kernel_size ?? new Shape(1, 5),
Strides = strides,
Padding = padding,
DataFormat = data_format,
DilationRate = dilation_rate,
Groups = groups,
UseBias = use_bias,
Activation = keras.activations.GetActivationFromName(activation),
KernelInitializer = GetInitializerByName(kernel_initializer),
BiasInitializer = GetInitializerByName(bias_initializer)
});
///
/// 2D convolution layer (e.g. spatial convolution over images).
/// This layer creates a convolution kernel that is convolved with the layer input to produce a tensor of outputs.
/// If use_bias is True, a bias vector is created and added to the outputs.Finally, if activation is not None, it is applied to the outputs as well.
///
/// Integer, the dimensionality of the output space (i.e. the number of output filters in the convolution)
/// An integer or tuple/list of 2 integers, specifying the height and width of the 2D convolution window. Can be a single integer to specify the same value for all spatial dimensions.
/// An integer or tuple/list of 2 integers, specifying the strides of the convolution along the height and width. Can be a single integer to specify the same value for all spatial dimensions. Specifying any stride value != 1 is incompatible with specifying any dilation_rate value != 1.
/// one of "valid" or "same" (case-insensitive). "valid" means no padding. "same" results in padding evenly to the left/right or up/down of the input such that output has the same height/width dimension as the input.
/// A string, one of channels_last (default) or channels_first. The ordering of the dimensions in the inputs. channels_last corresponds to inputs with shape (batch_size, height, width, channels) while channels_first corresponds to inputs with shape (batch_size, channels, height, width). It defaults to the image_data_format value found in your Keras config file at ~/.keras/keras.json. If you never set it, then it will be channels_last.
/// an integer or tuple/list of 2 integers, specifying the dilation rate to use for dilated convolution. Can be a single integer to specify the same value for all spatial dimensions. Currently, specifying any dilation_rate value != 1 is incompatible with specifying any stride value != 1.
/// A positive integer specifying the number of groups in which the input is split along the channel axis. Each group is convolved separately with filters / groups filters. The output is the concatenation of all the groups results along the channel axis. Input channels and filters must both be divisible by groups.
/// Activation function to use. If you don't specify anything, no activation is applied (see keras.activations).
/// Boolean, whether the layer uses a bias vector.
/// Initializer for the kernel weights matrix (see keras.initializers).
/// Initializer for the bias vector (see keras.initializers).
/// Regularizer function applied to the kernel weights matrix (see keras.regularizers).
/// Regularizer function applied to the bias vector (see keras.regularizers).
/// Regularizer function applied to the output of the layer (its "activation") (see keras.regularizers).
/// A tensor of rank 4+ representing activation(conv2d(inputs, kernel) + bias).
public ILayer Conv2D(int filters,
Shape kernel_size = null,
Shape strides = null,
string padding = "valid",
string data_format = null,
Shape dilation_rate = null,
int groups = 1,
Activation activation = null,
bool use_bias = true,
IInitializer kernel_initializer = null,
IInitializer bias_initializer = null,
IRegularizer kernel_regularizer = null,
IRegularizer bias_regularizer = null,
IRegularizer activity_regularizer = null)
=> new Conv2D(new Conv2DArgs
{
Rank = 2,
Filters = filters,
KernelSize = (kernel_size == null) ? (5, 5) : kernel_size,
Strides = strides == null ? (1, 1) : strides,
Padding = padding,
DataFormat = data_format,
DilationRate = dilation_rate == null ? (1, 1) : dilation_rate,
Groups = groups,
UseBias = use_bias,
KernelRegularizer = kernel_regularizer,
KernelInitializer = kernel_initializer == null ? tf.glorot_uniform_initializer : kernel_initializer,
BiasInitializer = bias_initializer == null ? tf.zeros_initializer : bias_initializer,
BiasRegularizer = bias_regularizer,
ActivityRegularizer = activity_regularizer,
Activation = activation ?? keras.activations.Linear,
});
///
/// 2D convolution layer (e.g. spatial convolution over images).
/// This layer creates a convolution kernel that is convolved with the layer input to produce a tensor of outputs.
/// If use_bias is True, a bias vector is created and added to the outputs.Finally, if activation is not None, it is applied to the outputs as well.
///
/// Integer, the dimensionality of the output space (i.e. the number of output filters in the convolution)
/// An integer or tuple/list of 2 integers, specifying the height and width of the 2D convolution window. Can be a single integer to specify the same value for all spatial dimensions.
/// An integer or tuple/list of 2 integers, specifying the strides of the convolution along the height and width. Can be a single integer to specify the same value for all spatial dimensions. Specifying any stride value != 1 is incompatible with specifying any dilation_rate value != 1.
/// one of "valid" or "same" (case-insensitive). "valid" means no padding. "same" results in padding evenly to the left/right or up/down of the input such that output has the same height/width dimension as the input.
/// A string, one of channels_last (default) or channels_first. The ordering of the dimensions in the inputs. channels_last corresponds to inputs with shape (batch_size, height, width, channels) while channels_first corresponds to inputs with shape (batch_size, channels, height, width). It defaults to the image_data_format value found in your Keras config file at ~/.keras/keras.json. If you never set it, then it will be channels_last.
/// an integer or tuple/list of 2 integers, specifying the dilation rate to use for dilated convolution. Can be a single integer to specify the same value for all spatial dimensions. Currently, specifying any dilation_rate value != 1 is incompatible with specifying any stride value != 1.
/// A positive integer specifying the number of groups in which the input is split along the channel axis. Each group is convolved separately with filters / groups filters. The output is the concatenation of all the groups results along the channel axis. Input channels and filters must both be divisible by groups.
/// Activation function to use. If you don't specify anything, no activation is applied (see keras.activations).
/// Boolean, whether the layer uses a bias vector.
/// The name of the initializer for the kernel weights matrix (see keras.initializers).
/// The name of the initializer for the bias vector (see keras.initializers).
/// The name of the regularizer function applied to the kernel weights matrix (see keras.regularizers).
/// The name of the regularizer function applied to the bias vector (see keras.regularizers).
/// The name of the regularizer function applied to the output of the layer (its "activation") (see keras.regularizers).
/// A tensor of rank 4+ representing activation(conv2d(inputs, kernel) + bias).
public ILayer Conv2D(int filters,
Shape kernel_size = null,
Shape strides = null,
string padding = "valid",
string data_format = null,
Shape dilation_rate = null,
int groups = 1,
string activation = null,
bool use_bias = true,
string kernel_initializer = "glorot_uniform",
string bias_initializer = "zeros")
=> new Conv2D(new Conv2DArgs
{
Rank = 2,
Filters = filters,
KernelSize = (kernel_size == null) ? (5,5) : kernel_size,
Strides = strides == null ? (1, 1) : strides,
Padding = padding,
DataFormat = data_format,
DilationRate = dilation_rate == null ? (1, 1) : dilation_rate,
Groups = groups,
UseBias = use_bias,
KernelInitializer = GetInitializerByName(kernel_initializer),
BiasInitializer = GetInitializerByName(bias_initializer),
Activation = keras.activations.GetActivationFromName(activation)
});
///
/// Transposed convolution layer (sometimes called Deconvolution).
///
/// Integer, the dimensionality of the output space (i.e. the number of output filters in the convolution)
/// An integer or tuple/list of 2 integers, specifying the height and width of the 2D convolution window. Can be a single integer to specify the same value for all spatial dimensions.
/// An integer or tuple/list of 2 integers, specifying the strides of the convolution along the height and width. Can be a single integer to specify the same value for all spatial dimensions. Specifying any stride value != 1 is incompatible with specifying any dilation_rate value != 1.
/// one of "valid" or "same" (case-insensitive). "valid" means no padding. "same" results in padding evenly to the left/right or up/down of the input such that output has the same height/width dimension as the input.
/// A string, one of channels_last (default) or channels_first. The ordering of the dimensions in the inputs. channels_last corresponds to inputs with shape (batch_size, height, width, channels) while channels_first corresponds to inputs with shape (batch_size, channels, height, width). It defaults to the image_data_format value found in your Keras config file at ~/.keras/keras.json. If you never set it, then it will be channels_last.
/// an integer or tuple/list of 2 integers, specifying the dilation rate to use for dilated convolution. Can be a single integer to specify the same value for all spatial dimensions. Currently, specifying any dilation_rate value != 1 is incompatible with specifying any stride value != 1.
/// Activation function to use. If you don't specify anything, no activation is applied (see keras.activations).
/// Boolean, whether the layer uses a bias vector.
/// The name of the initializer for the kernel weights matrix (see keras.initializers).
/// The name of the initializer for the bias vector (see keras.initializers).
/// The name of the regularizer function applied to the kernel weights matrix (see keras.regularizers).
/// The name of the regularizer function applied to the bias vector (see keras.regularizers).
/// The name of the regularizer function applied to the output of the layer (its "activation") (see keras.regularizers).
/// A tensor of rank 4+ representing activation(conv2d(inputs, kernel) + bias).
public ILayer Conv2DTranspose(int filters,
Shape kernel_size = null,
Shape strides = null,
string output_padding = "valid",
string data_format = null,
Shape dilation_rate = null,
string activation = null,
bool use_bias = true,
string kernel_initializer = null,
string bias_initializer = null,
string kernel_regularizer = null,
string bias_regularizer = null,
string activity_regularizer = null)
=> new Conv2DTranspose(new Conv2DArgs
{
Rank = 2,
Filters = filters,
KernelSize = (kernel_size == null) ? (5, 5) : kernel_size,
Strides = strides == null ? (1, 1) : strides,
Padding = output_padding,
DataFormat = data_format,
DilationRate = dilation_rate == null ? (1, 1) : dilation_rate,
UseBias = use_bias,
KernelInitializer = GetInitializerByName(kernel_initializer),
BiasInitializer = GetInitializerByName(bias_initializer),
Activation = keras.activations.GetActivationFromName(activation)
});
///
/// Just your regular densely-connected NN layer.
///
/// Dense implements the operation: output = activation(dot(input, kernel) + bias) where activation is the
/// element-wise activation function passed as the activation argument, kernel is a weights matrix created by the layer,
/// and bias is a bias vector created by the layer (only applicable if use_bias is True).
///
/// Positive integer, dimensionality of the output space.
/// Activation function to use. If you don't specify anything, no activation is applied (ie. "linear" activation: a(x) = x).
/// Initializer for the kernel weights matrix.
/// Boolean, whether the layer uses a bias vector.
/// Initializer for the bias vector.
/// N-D tensor with shape: (batch_size, ..., input_dim). The most common situation would be a 2D input with shape (batch_size, input_dim).
/// N-D tensor with shape: (batch_size, ..., units). For instance, for a 2D input with shape (batch_size, input_dim), the output would have shape (batch_size, units).
public ILayer Dense(int units,
Activation activation = null,
IInitializer kernel_initializer = null,
bool use_bias = true,
IInitializer bias_initializer = null,
Shape input_shape = null)
=> new Dense(new DenseArgs
{
Units = units,
Activation = activation ?? keras.activations.Linear,
KernelInitializer = kernel_initializer ?? tf.glorot_uniform_initializer,
BiasInitializer = bias_initializer ?? (use_bias ? tf.zeros_initializer : null),
InputShape = input_shape
});
///
/// Just your regular densely-connected NN layer.
///
/// Dense implements the operation: output = activation(dot(input, kernel) + bias) where activation is the
/// element-wise activation function passed as the activation argument, kernel is a weights matrix created by the layer,
/// and bias is a bias vector created by the layer (only applicable if use_bias is True).
///
/// Positive integer, dimensionality of the output space.
/// N-D tensor with shape: (batch_size, ..., units). For instance, for a 2D input with shape (batch_size, input_dim), the output would have shape (batch_size, units).
public ILayer Dense(int units)
=> new Dense(new DenseArgs
{
Units = units,
Activation = keras.activations.GetActivationFromName("linear")
});
///
/// Just your regular densely-connected NN layer.
///
/// Dense implements the operation: output = activation(dot(input, kernel) + bias) where activation is the
/// element-wise activation function passed as the activation argument, kernel is a weights matrix created by the layer,
/// and bias is a bias vector created by the layer (only applicable if use_bias is True).
///
/// Positive integer, dimensionality of the output space.
/// Activation function to use. If you don't specify anything, no activation is applied (ie. "linear" activation: a(x) = x).
/// N-D tensor with shape: (batch_size, ..., input_dim). The most common situation would be a 2D input with shape (batch_size, input_dim).
/// N-D tensor with shape: (batch_size, ..., units). For instance, for a 2D input with shape (batch_size, input_dim), the output would have shape (batch_size, units).
public ILayer Dense(int units,
string activation = null,
Shape input_shape = null)
=> new Dense(new DenseArgs
{
Units = units,
Activation = keras.activations.GetActivationFromName(activation),
InputShape = input_shape
});
///
/// Densely-connected layer class. aka fully-connected
/// `outputs = activation(inputs * kernel + bias)`
///
///
/// Python integer, dimensionality of the output space.
///
/// Boolean, whether the layer uses a bias.
///
///
///
///
///
///
public Tensor dense(Tensor inputs,
int units,
Activation activation = null,
bool use_bias = true,
IInitializer kernel_initializer = null,
IInitializer bias_initializer = null,
bool trainable = true,
string name = null,
bool? reuse = null)
{
if (bias_initializer == null)
bias_initializer = tf.zeros_initializer;
var layer = new Dense(new DenseArgs
{
Units = units,
Activation = activation,
UseBias = use_bias,
BiasInitializer = bias_initializer,
KernelInitializer = kernel_initializer,
Trainable = trainable,
Name = name
});
return layer.Apply(inputs);
}
public ILayer EinsumDense(string equation,
Shape output_shape,
string bias_axes,
Activation activation = null,
IInitializer kernel_initializer= null,
IInitializer bias_initializer= null,
IRegularizer kernel_regularizer= null,
IRegularizer bias_regularizer= null,
IRegularizer activity_regularizer= null,
Action kernel_constraint= null,
Action bias_constraint= null) =>
new EinsumDense(new EinsumDenseArgs()
{
Equation = equation,
OutputShape = output_shape,
BiasAxes = bias_axes,
Activation = activation,
KernelInitializer = kernel_initializer ?? tf.glorot_uniform_initializer,
BiasInitializer = bias_initializer ?? tf.zeros_initializer,
KernelRegularizer = kernel_regularizer,
BiasRegularizer = bias_regularizer,
ActivityRegularizer = activity_regularizer,
KernelConstraint = kernel_constraint,
BiasConstraint = bias_constraint
});
///
/// Applies Dropout to the input.
/// The Dropout layer randomly sets input units to 0 with a frequency of rate at each step during training time,
/// which helps prevent overfitting.Inputs not set to 0 are scaled up by 1/(1 - rate) such that the sum over all inputs is unchanged.
///
/// Float between 0 and 1. Fraction of the input units to drop.
/// 1D integer tensor representing the shape of the binary dropout mask that will be multiplied with the input. For instance,
/// if your inputs have shape (batch_size, timesteps, features) and you want the dropout mask to be the same for all timesteps,
/// you can use noise_shape=(batch_size, 1, features).
///
/// An integer to use as random seed.
///
public ILayer Dropout(float rate, Shape noise_shape = null, int? seed = null)
=> new Dropout(new DropoutArgs
{
Rate = rate,
NoiseShape = noise_shape,
Seed = seed
});
///
/// Turns positive integers (indexes) into dense vectors of fixed size.
/// This layer can only be used as the first layer in a model.
/// e.g. [[4], [20]] -> [[0.25, 0.1], [0.6, -0.2]]
/// https://www.tensorflow.org/api_docs/python/tf/keras/layers/Embedding
///
/// Size of the vocabulary, i.e. maximum integer index + 1.
/// Dimension of the dense embedding.
/// Initializer for the embeddings matrix (see keras.initializers).
///
///
public ILayer Embedding(int input_dim,
int output_dim,
IInitializer embeddings_initializer = null,
bool mask_zero = false,
Shape input_shape = null,
int input_length = -1)
=> new Embedding(new EmbeddingArgs
{
InputDim = input_dim,
OutputDim = output_dim,
MaskZero = mask_zero,
InputShape = input_shape ?? input_length,
InputLength = input_length,
EmbeddingsInitializer = embeddings_initializer
});
///
/// Flattens the input. Does not affect the batch size.
///
/// A string, one of channels_last (default) or channels_first. The ordering of the dimensions in the inputs.
/// channels_last corresponds to inputs with shape (batch, ..., channels) while channels_first corresponds to inputs with shape (batch, channels, ...).
/// It defaults to the image_data_format value found in your Keras config file at ~/.keras/keras.json.
/// If you never set it, then it will be "channels_last".
///
///
public ILayer Flatten(string data_format = null)
=> new Flatten(new FlattenArgs
{
DataFormat = data_format
});
///
/// `Input()` is used to instantiate a Keras tensor.
/// Keras tensor is a TensorFlow symbolic tensor object, which we augment with certain attributes that allow us
/// to build a Keras model just by knowing the inputs and outputs of the model.
///
/// A shape tuple not including the batch size.
/// An optional name string for the layer. Should be unique in a model (do not reuse the same name twice). It will be autogenerated if it isn't provided.
/// A boolean specifying whether the placeholder to be created is sparse. Only one of 'ragged' and 'sparse' can be True.
/// Note that, if sparse is False, sparse tensors can still be passed into the input - they will be densified with a default value of 0.
///
/// A boolean specifying whether the placeholder to be created is ragged. Only one of 'ragged' and 'sparse' can be True.
/// In this case, values of 'None' in the 'shape' argument represent ragged dimensions. For more information about RaggedTensors, see this guide.
///
/// A tensor.
public Tensors Input(Shape shape = null,
int batch_size = -1,
string name = null,
TF_DataType dtype = TF_DataType.DtInvalid,
bool sparse = false,
Tensor tensor = null,
bool ragged = false,
TypeSpec type_spec = null,
Shape batch_input_shape = null,
Shape batch_shape = null)
{
if(sparse && ragged)
{
throw new ValueError("Cannot set both `sparse` and `ragged` to `true` in a Keras `Input`.");
}
InputLayerArgs input_layer_config = new()
{
Name = name,
DType = dtype,
Sparse = sparse,
Ragged = ragged,
InputTensor = tensor,
// skip the `type_spec`
};
if(shape is not null && batch_input_shape is not null)
{
throw new ValueError("Only provide the `shape` OR `batch_input_shape` argument "
+ "to Input, not both at the same time.");
}
if(batch_input_shape is null && shape is null && tensor is null && type_spec is null)
{
throw new ValueError("Please provide to Input a `shape` or a `tensor` or a `type_spec` argument. Note that " +
"`shape` does not include the batch dimension.");
}
if(batch_input_shape is not null)
{
shape = batch_input_shape["1:"];
input_layer_config.BatchInputShape = batch_input_shape;
}
else
{
input_layer_config.BatchSize = batch_size;
input_layer_config.InputShape = shape;
}
var input_layer = new InputLayer(input_layer_config);
return input_layer.InboundNodes[0].Outputs;
}
public ILayer InputLayer(Shape input_shape,
string name = null,
bool sparse = false,
bool ragged = false)
=> new InputLayer(new InputLayerArgs
{
InputShape = input_shape,
Name = name,
Sparse = sparse,
Ragged = ragged
});
///
/// Average pooling operation for spatial data.
///
///
///
///
///
///
public ILayer AveragePooling2D(Shape pool_size = null,
Shape strides = null,
string padding = "valid",
string data_format = null)
=> new AveragePooling2D(new AveragePooling2DArgs
{
PoolSize = pool_size ?? (2, 2),
Strides = strides,
Padding = padding,
DataFormat = data_format
});
///
/// Max pooling operation for 1D temporal data.
///
/// Integer, size of the max pooling window.
/// Integer, or null. Specifies how much the pooling window moves for each pooling step. If null, it will default to pool_size.
/// One of "valid" or "same" (case-insensitive). "valid" means no padding.
/// "same" results in padding evenly to the left/right or up/down of the input such that output has the same height/width dimension as the input.
///
///
/// A string, one of channels_last (default) or channels_first. The ordering of the dimensions in the inputs.
/// channels_last corresponds to inputs with shape (batch, steps, features) while channels_first corresponds to inputs with shape (batch, features, steps).
///
///
public ILayer MaxPooling1D(int? pool_size = null,
int? strides = null,
string padding = "valid",
string data_format = null)
=> new MaxPooling1D(new Pooling1DArgs
{
PoolSize = pool_size ?? 2,
Strides = strides ?? (pool_size ?? 2),
Padding = padding,
DataFormat = data_format
});
///
/// Max pooling operation for 2D spatial data.
/// Downsamples the input representation by taking the maximum value over the window defined by pool_size for each dimension along the features axis.
/// The window is shifted by strides in each dimension. The resulting output when using "valid" padding option has a shape(number of rows or columns)
/// of: output_shape = (input_shape - pool_size + 1) / strides)
/// The resulting output shape when using the "same" padding option is: output_shape = input_shape / strides
///
///
/// Integer or tuple of 2 integers, window size over which to take the maximum.
/// (2, 2) will take the max value over a 2x2 pooling window. If only one integer is specified, the same window length will be used for both dimensions.
///
///
/// Integer, tuple of 2 integers, or null. Strides values. Specifies how far the pooling window moves for each pooling step.
/// If null, it will default to pool_size.
///
/// One of "valid" or "same" (case-insensitive). "valid" means no padding.
/// "same" results in padding evenly to the left/right or up/down of the input such that output has the same height/width dimension as the input.
///
///
/// A string, one of channels_last (default) or channels_first. The ordering of the dimensions in the inputs.
/// channels_last corresponds to inputs with shape (batch, height, width, channels) while channels_first corresponds to
/// inputs with shape (batch, channels, height, width).
/// It defaults to the image_data_format value found in your Keras config file at ~/.keras/keras.json.
/// If you never set it, then it will be "channels_last"
///
public ILayer MaxPooling2D(Shape pool_size = null,
Shape strides = null,
string padding = "valid",
string data_format = null)
=> new MaxPooling2D(new MaxPooling2DArgs
{
PoolSize = pool_size ?? (2, 2),
Strides = strides,
Padding = padding,
DataFormat = data_format
});
///
/// Max pooling layer for 2D inputs (e.g. images).
///
/// The tensor over which to pool. Must have rank 4.
///
/// Integer or tuple of 2 integers, window size over which to take the maximum.
/// (2, 2) will take the max value over a 2x2 pooling window. If only one integer is specified, the same window length will be used for both dimensions.
///
///
/// Integer, tuple of 2 integers, or null. Strides values. Specifies how far the pooling window moves for each pooling step.
/// If null, it will default to pool_size.
///
/// One of "valid" or "same" (case-insensitive). "valid" means no padding.
/// "same" results in padding evenly to the left/right or up/down of the input such that output has the same height/width dimension as the input.
///
///
/// A string, one of channels_last (default) or channels_first. The ordering of the dimensions in the inputs.
/// channels_last corresponds to inputs with shape (batch, height, width, channels) while channels_first corresponds to
/// inputs with shape (batch, channels, height, width).
/// It defaults to the image_data_format value found in your Keras config file at ~/.keras/keras.json.
/// If you never set it, then it will be "channels_last"
/// A name for the layer
///
public Tensor max_pooling2d(Tensor inputs,
int[] pool_size,
int[] strides,
string padding = "valid",
string data_format = "channels_last",
string name = null)
{
var layer = new MaxPooling2D(new MaxPooling2DArgs
{
PoolSize = pool_size,
Strides = strides,
Padding = padding,
DataFormat = data_format,
Name = name
});
return layer.Apply(inputs);
}
public ILayer LayerNormalization(Axis? axis,
float epsilon = 1e-3f,
bool center = true,
bool scale = true,
IInitializer beta_initializer = null,
IInitializer gamma_initializer = null)
=> new LayerNormalization(new LayerNormalizationArgs
{
Axis = axis ?? -1,
Epsilon = epsilon,
Center = center,
Scale = scale,
BetaInitializer = beta_initializer ?? tf.zeros_initializer
});
///
/// Leaky version of a Rectified Linear Unit.
///
/// Negative slope coefficient.
///
public ILayer LeakyReLU(float alpha = 0.3f)
=> new LeakyReLu(new LeakyReLuArgs
{
Alpha = alpha
});
public IRnnCell SimpleRNNCell(
int units,
string activation = "tanh",
bool use_bias = true,
string kernel_initializer = "glorot_uniform",
string recurrent_initializer = "orthogonal",
string bias_initializer = "zeros",
float dropout = 0f,
float recurrent_dropout = 0f)
=> new SimpleRNNCell(new SimpleRNNCellArgs
{
Units = units,
Activation = keras.activations.GetActivationFromName(activation),
UseBias = use_bias,
KernelInitializer = GetInitializerByName(kernel_initializer),
RecurrentInitializer = GetInitializerByName(recurrent_initializer),
Dropout = dropout,
RecurrentDropout = recurrent_dropout
});
public IRnnCell StackedRNNCells(
IEnumerable cells)
=> new StackedRNNCells(new StackedRNNCellsArgs
{
Cells = cells.ToList()
});
///
///
///
/// Positive integer, dimensionality of the output space.
/// The name of the activation function to use. Default: hyperbolic tangent (tanh)..
///
public ILayer SimpleRNN(int units,
string activation = "tanh",
string kernel_initializer = "glorot_uniform",
string recurrent_initializer = "orthogonal",
string bias_initializer = "zeros",
bool return_sequences = false,
bool return_state = false)
=> new SimpleRNN(new SimpleRNNArgs
{
Units = units,
Activation = keras.activations.GetActivationFromName(activation),
KernelInitializer = GetInitializerByName(kernel_initializer),
RecurrentInitializer = GetInitializerByName(recurrent_initializer),
BiasInitializer = GetInitializerByName(bias_initializer),
ReturnSequences = return_sequences,
ReturnState = return_state
});
///
///
///
///
///
///
///
///
///
///
///
public ILayer RNN(
IRnnCell cell,
bool return_sequences = false,
bool return_state = false,
bool go_backwards = false,
bool stateful = false,
bool unroll = false,
bool time_major = false)
=> new RNN(new RNNArgs
{
Cell = cell,
ReturnSequences = return_sequences,
ReturnState = return_state,
GoBackwards = go_backwards,
Stateful = stateful,
Unroll = unroll,
TimeMajor = time_major
});
public ILayer RNN(
IEnumerable cell,
bool return_sequences = false,
bool return_state = false,
bool go_backwards = false,
bool stateful = false,
bool unroll = false,
bool time_major = false)
=> new RNN(new RNNArgs
{
Cells = cell.ToList(),
ReturnSequences = return_sequences,
ReturnState = return_state,
GoBackwards = go_backwards,
Stateful = stateful,
Unroll = unroll,
TimeMajor = time_major
});
///
/// Long Short-Term Memory layer - Hochreiter 1997.
///
/// Positive integer, dimensionality of the output space.
/// Activation function to use. If you pass null, no activation is applied (ie. "linear" activation: a(x) = x).
/// Activation function to use for the recurrent step. If you pass null, no activation is applied (ie. "linear" activation: a(x) = x).
/// Boolean (default True), whether the layer uses a bias vector.
/// Initializer for the kernel weights matrix, used for the linear transformation of the inputs. Default: glorot_uniform.
/// Initializer for the recurrent_kernel weights matrix, used for the linear transformation of the recurrent state. Default: orthogonal.
/// Initializer for the bias vector. Default: zeros.
/// Boolean (default True). If True, add 1 to the bias of the forget gate at initialization. Setting it to true will also force bias_initializer="zeros". This is recommended in Jozefowicz et al..
/// Float between 0 and 1. Fraction of the units to drop for the linear transformation of the inputs. Default: 0.
/// Float between 0 and 1. Fraction of the units to drop for the linear transformation of the recurrent state. Default: 0.
///
/// Boolean. Whether to return the last output. in the output sequence, or the full sequence. Default: False.
/// Whether to return the last state in addition to the output. Default: False.
/// Boolean (default false). If True, process the input sequence backwards and return the reversed sequence.
/// Boolean (default False). If True, the last state for each sample at index i in a batch will be used as initial state for the sample of index i in the following batch.
///
/// The shape format of the inputs and outputs tensors. If True, the inputs and outputs will be in shape [timesteps, batch, feature],
/// whereas in the False case, it will be [batch, timesteps, feature]. Using time_major = True is a bit more efficient because it avoids transposes at the
/// beginning and end of the RNN calculation. However, most TensorFlow data is batch-major, so by default this function accepts input and emits output in batch-major form.
///
/// Boolean (default False). If True, the network will be unrolled, else a symbolic loop will be used. Unrolling can speed-up a RNN,
/// although it tends to be more memory-intensive. Unrolling is only suitable for short sequences.
///
///
public ILayer LSTM(int units,
Activation activation = null,
Activation recurrent_activation = null,
bool use_bias = true,
IInitializer kernel_initializer = null,
IInitializer recurrent_initializer = null,
IInitializer bias_initializer = null,
bool unit_forget_bias = true,
float dropout = 0f,
float recurrent_dropout = 0f,
int implementation = 2,
bool return_sequences = false,
bool return_state = false,
bool go_backwards = false,
bool stateful = false,
bool time_major = false,
bool unroll = false)
=> new LSTM(new LSTMArgs
{
Units = units,
Activation = activation ?? keras.activations.Tanh,
RecurrentActivation = recurrent_activation ?? keras.activations.Sigmoid,
KernelInitializer = kernel_initializer ?? tf.glorot_uniform_initializer,
RecurrentInitializer = recurrent_initializer ?? tf.orthogonal_initializer,
BiasInitializer = bias_initializer ?? tf.zeros_initializer,
Dropout = dropout,
RecurrentDropout = recurrent_dropout,
Implementation = implementation,
ReturnSequences = return_sequences,
ReturnState = return_state,
GoBackwards = go_backwards,
Stateful = stateful,
TimeMajor = time_major,
Unroll = unroll
});
///
///
///
///
///
///
///
public ILayer Rescaling(float scale,
float offset = 0,
Shape input_shape = null)
=> new Rescaling(new RescalingArgs
{
Scale = scale,
Offset = offset,
InputShape = input_shape
});
///
///
///
///
public ILayer Add()
=> new Add(new MergeArgs { });
///
///
///
///
public ILayer Subtract()
=> new Subtract(new MergeArgs { });
///
/// Global max pooling operation for spatial data.
///
///
public ILayer GlobalAveragePooling2D()
=> new GlobalAveragePooling2D(new Pooling2DArgs { });
///
/// Global average pooling operation for temporal data.
///
/// A string, one of channels_last (default) or channels_first. The ordering of the dimensions in the inputs.
/// channels_last corresponds to inputs with shape (batch, steps, features) while channels_first corresponds to inputs with shape (batch, features, steps).
///
///
public ILayer GlobalAveragePooling1D(string data_format = "channels_last")
=> new GlobalAveragePooling1D(new Pooling1DArgs { DataFormat = data_format });
///
/// Global max pooling operation for spatial data.
///
/// A string, one of channels_last (default) or channels_first. The ordering of the dimensions in the inputs.
/// channels_last corresponds to inputs with shape (batch, height, width, channels) while channels_first corresponds to inputs with shape (batch, channels, height, width).
///
public ILayer GlobalAveragePooling2D(string data_format = "channels_last")
=> new GlobalAveragePooling2D(new Pooling2DArgs { DataFormat = data_format });
///
/// Global max pooling operation for 1D temporal data.
/// Downsamples the input representation by taking the maximum value over the time dimension.
///
/// A string, one of channels_last (default) or channels_first. The ordering of the dimensions in the inputs.
/// channels_last corresponds to inputs with shape (batch, steps, features) while channels_first corresponds to inputs with shape (batch, features, steps).
///
///
public ILayer GlobalMaxPooling1D(string data_format = "channels_last")
=> new GlobalMaxPooling1D(new Pooling1DArgs { DataFormat = data_format });
///
/// Global max pooling operation for spatial data.
///
/// A string, one of channels_last (default) or channels_first. The ordering of the dimensions in the inputs.
/// channels_last corresponds to inputs with shape (batch, height, width, channels) while channels_first corresponds to inputs with shape (batch, channels, height, width).
///
public ILayer GlobalMaxPooling2D(string data_format = "channels_last")
=> new GlobalMaxPooling2D(new Pooling2DArgs { DataFormat = data_format });
///
/// Get an weights initializer from its name.
///
/// The name of the initializer. One of zeros, ones, and glorot_uniform.
///
IInitializer GetInitializerByName(string name)
=> name switch
{
"glorot_uniform" => tf.glorot_uniform_initializer,
"zeros" => tf.zeros_initializer,
"ones" => tf.ones_initializer,
"orthogonal" => tf.orthogonal_initializer,
_ => tf.glorot_uniform_initializer
};
public ILayer CategoryEncoding(int num_tokens, string output_mode = "one_hot", bool sparse = false, NDArray count_weights = null)
=> new CategoryEncoding(new CategoryEncodingArgs
{
NumTokens = num_tokens,
OutputMode = output_mode,
Sparse = sparse,
CountWeights = count_weights
});
public ILayer Normalization(Shape? input_shape = null, int? axis = -1, float? mean = null, float? variance = null, bool invert = false)
=> new Normalization(new NormalizationArgs
{
InputShape = input_shape,
Axis = axis,
Mean = mean,
Variance = variance,
Invert = invert
});
}
}