diff --git a/src/TensorFlowNET.Keras/Layers/LayersApi.cs b/src/TensorFlowNET.Keras/Layers/LayersApi.cs index 9b889635..458e87f5 100644 --- a/src/TensorFlowNET.Keras/Layers/LayersApi.cs +++ b/src/TensorFlowNET.Keras/Layers/LayersApi.cs @@ -12,25 +12,31 @@ namespace Tensorflow.Keras.Layers public Preprocessing preprocessing { get; } = new Preprocessing(); /// - /// Functional interface for the batch normalization layer. + /// Layer that normalizes its inputs. + /// Batch normalization applies a transformation that maintains the mean output close to 0 and the output standard deviation close to 1. + /// Importantly, batch normalization works differently during training and during inference. + /// /// http://arxiv.org/abs/1502.03167 /// - /// - /// - /// - /// - /// - /// - /// - /// - /// - /// - /// - /// - /// - /// - /// - /// + /// The axis that should be normalized (typically the features axis). + /// For instance, after a Conv2D layer with data_format="channels_first", set axis=1 in BatchNormalization. + /// + /// Momentum for the moving average. + /// Small float added to variance to avoid dividing by zero. + /// If True, add offset of beta to normalized tensor. If False, beta is ignored. + /// If True, multiply by gamma. If False, gamma is not used. When the next layer is linear (also e.g. nn.relu), this can be disabled since the scaling will be done by the next layer. + /// Initializer for the beta weight. + /// Initializer for the gamma weight. + /// Initializer for the moving mean. + /// Initializer for the moving variance. + /// Boolean, if True the variables will be marked as trainable. + /// Layer name. + /// Whether to use Batch Renormalization. This adds extra variables during training. The inference is the same for either value of this parameter. + /// Momentum used to update the moving means and standard deviations with renorm. + /// Unlike momentum, this affects training and should be neither too small (which would add noise) nor too large (which would give stale estimates). + /// Note that momentum is still applied to get the means and variances for inference. + /// + /// Tensor of the same shape as input. public BatchNormalization BatchNormalization(int axis = -1, float momentum = 0.99f, float epsilon = 0.001f, @@ -62,23 +68,25 @@ namespace Tensorflow.Keras.Layers }); /// - /// + /// 2D convolution layer (e.g. spatial convolution over images). + /// This layer creates a convolution kernel that is convolved with the layer input to produce a tensor of outputs. + /// If use_bias is True, a bias vector is created and added to the outputs.Finally, if activation is not None, it is applied to the outputs as well. /// - /// - /// - /// - /// - /// - /// - /// - /// tf.keras.activations - /// - /// - /// - /// - /// - /// - /// + /// Integer, the dimensionality of the output space (i.e. the number of output filters in the convolution) + /// An integer or tuple/list of 2 integers, specifying the height and width of the 2D convolution window. Can be a single integer to specify the same value for all spatial dimensions. + /// An integer or tuple/list of 2 integers, specifying the strides of the convolution along the height and width. Can be a single integer to specify the same value for all spatial dimensions. Specifying any stride value != 1 is incompatible with specifying any dilation_rate value != 1. + /// one of "valid" or "same" (case-insensitive). "valid" means no padding. "same" results in padding evenly to the left/right or up/down of the input such that output has the same height/width dimension as the input. + /// A string, one of channels_last (default) or channels_first. The ordering of the dimensions in the inputs. channels_last corresponds to inputs with shape (batch_size, height, width, channels) while channels_first corresponds to inputs with shape (batch_size, channels, height, width). It defaults to the image_data_format value found in your Keras config file at ~/.keras/keras.json. If you never set it, then it will be channels_last. + /// an integer or tuple/list of 2 integers, specifying the dilation rate to use for dilated convolution. Can be a single integer to specify the same value for all spatial dimensions. Currently, specifying any dilation_rate value != 1 is incompatible with specifying any stride value != 1. + /// A positive integer specifying the number of groups in which the input is split along the channel axis. Each group is convolved separately with filters / groups filters. The output is the concatenation of all the groups results along the channel axis. Input channels and filters must both be divisible by groups. + /// Activation function to use. If you don't specify anything, no activation is applied (see keras.activations). + /// Boolean, whether the layer uses a bias vector. + /// Initializer for the kernel weights matrix (see keras.initializers). + /// Initializer for the bias vector (see keras.initializers). + /// Regularizer function applied to the kernel weights matrix (see keras.regularizers). + /// Regularizer function applied to the bias vector (see keras.regularizers). + /// Regularizer function applied to the output of the layer (its "activation") (see keras.regularizers). + /// A tensor of rank 4+ representing activation(conv2d(inputs, kernel) + bias). public Conv2D Conv2D(int filters, TensorShape kernel_size = null, TensorShape strides = null, @@ -112,6 +120,26 @@ namespace Tensorflow.Keras.Layers Activation = activation ?? keras.activations.Linear }); + /// + /// 2D convolution layer (e.g. spatial convolution over images). + /// This layer creates a convolution kernel that is convolved with the layer input to produce a tensor of outputs. + /// If use_bias is True, a bias vector is created and added to the outputs.Finally, if activation is not None, it is applied to the outputs as well. + /// + /// Integer, the dimensionality of the output space (i.e. the number of output filters in the convolution) + /// An integer or tuple/list of 2 integers, specifying the height and width of the 2D convolution window. Can be a single integer to specify the same value for all spatial dimensions. + /// An integer or tuple/list of 2 integers, specifying the strides of the convolution along the height and width. Can be a single integer to specify the same value for all spatial dimensions. Specifying any stride value != 1 is incompatible with specifying any dilation_rate value != 1. + /// one of "valid" or "same" (case-insensitive). "valid" means no padding. "same" results in padding evenly to the left/right or up/down of the input such that output has the same height/width dimension as the input. + /// A string, one of channels_last (default) or channels_first. The ordering of the dimensions in the inputs. channels_last corresponds to inputs with shape (batch_size, height, width, channels) while channels_first corresponds to inputs with shape (batch_size, channels, height, width). It defaults to the image_data_format value found in your Keras config file at ~/.keras/keras.json. If you never set it, then it will be channels_last. + /// an integer or tuple/list of 2 integers, specifying the dilation rate to use for dilated convolution. Can be a single integer to specify the same value for all spatial dimensions. Currently, specifying any dilation_rate value != 1 is incompatible with specifying any stride value != 1. + /// A positive integer specifying the number of groups in which the input is split along the channel axis. Each group is convolved separately with filters / groups filters. The output is the concatenation of all the groups results along the channel axis. Input channels and filters must both be divisible by groups. + /// Activation function to use. If you don't specify anything, no activation is applied (see keras.activations). + /// Boolean, whether the layer uses a bias vector. + /// The name of the initializer for the kernel weights matrix (see keras.initializers). + /// The name of the initializer for the bias vector (see keras.initializers). + /// The name of the regularizer function applied to the kernel weights matrix (see keras.regularizers). + /// The name of the regularizer function applied to the bias vector (see keras.regularizers). + /// The name of the regularizer function applied to the output of the layer (its "activation") (see keras.regularizers). + /// A tensor of rank 4+ representing activation(conv2d(inputs, kernel) + bias). public Conv2D Conv2D(int filters, TensorShape kernel_size = null, TensorShape strides = null, @@ -145,24 +173,24 @@ namespace Tensorflow.Keras.Layers /// /// Transposed convolution layer (sometimes called Deconvolution). /// - /// - /// - /// - /// - /// - /// - /// - /// - /// - /// - /// - /// - /// - /// + /// Integer, the dimensionality of the output space (i.e. the number of output filters in the convolution) + /// An integer or tuple/list of 2 integers, specifying the height and width of the 2D convolution window. Can be a single integer to specify the same value for all spatial dimensions. + /// An integer or tuple/list of 2 integers, specifying the strides of the convolution along the height and width. Can be a single integer to specify the same value for all spatial dimensions. Specifying any stride value != 1 is incompatible with specifying any dilation_rate value != 1. + /// one of "valid" or "same" (case-insensitive). "valid" means no padding. "same" results in padding evenly to the left/right or up/down of the input such that output has the same height/width dimension as the input. + /// A string, one of channels_last (default) or channels_first. The ordering of the dimensions in the inputs. channels_last corresponds to inputs with shape (batch_size, height, width, channels) while channels_first corresponds to inputs with shape (batch_size, channels, height, width). It defaults to the image_data_format value found in your Keras config file at ~/.keras/keras.json. If you never set it, then it will be channels_last. + /// an integer or tuple/list of 2 integers, specifying the dilation rate to use for dilated convolution. Can be a single integer to specify the same value for all spatial dimensions. Currently, specifying any dilation_rate value != 1 is incompatible with specifying any stride value != 1. + /// Activation function to use. If you don't specify anything, no activation is applied (see keras.activations). + /// Boolean, whether the layer uses a bias vector. + /// The name of the initializer for the kernel weights matrix (see keras.initializers). + /// The name of the initializer for the bias vector (see keras.initializers). + /// The name of the regularizer function applied to the kernel weights matrix (see keras.regularizers). + /// The name of the regularizer function applied to the bias vector (see keras.regularizers). + /// The name of the regularizer function applied to the output of the layer (its "activation") (see keras.regularizers). + /// A tensor of rank 4+ representing activation(conv2d(inputs, kernel) + bias). public Conv2DTranspose Conv2DTranspose(int filters, TensorShape kernel_size = null, TensorShape strides = null, - string padding = "valid", + string output_padding = "valid", string data_format = null, TensorShape dilation_rate = null, string activation = null, @@ -178,7 +206,7 @@ namespace Tensorflow.Keras.Layers Filters = filters, KernelSize = kernel_size, Strides = strides == null ? (1, 1) : strides, - Padding = padding, + Padding = output_padding, DataFormat = data_format, DilationRate = dilation_rate == null ? (1, 1) : dilation_rate, UseBias = use_bias, @@ -187,6 +215,20 @@ namespace Tensorflow.Keras.Layers Activation = GetActivationByName(activation) }); + /// + /// Just your regular densely-connected NN layer. + /// + /// Dense implements the operation: output = activation(dot(input, kernel) + bias) where activation is the + /// element-wise activation function passed as the activation argument, kernel is a weights matrix created by the layer, + /// and bias is a bias vector created by the layer (only applicable if use_bias is True). + /// + /// Positive integer, dimensionality of the output space. + /// Activation function to use. If you don't specify anything, no activation is applied (ie. "linear" activation: a(x) = x). + /// Initializer for the kernel weights matrix. + /// Boolean, whether the layer uses a bias vector. + /// Initializer for the bias vector. + /// N-D tensor with shape: (batch_size, ..., input_dim). The most common situation would be a 2D input with shape (batch_size, input_dim). + /// N-D tensor with shape: (batch_size, ..., units). For instance, for a 2D input with shape (batch_size, input_dim), the output would have shape (batch_size, units). public Dense Dense(int units, Activation activation = null, IInitializer kernel_initializer = null, @@ -202,6 +244,15 @@ namespace Tensorflow.Keras.Layers InputShape = input_shape }); + /// + /// Just your regular densely-connected NN layer. + /// + /// Dense implements the operation: output = activation(dot(input, kernel) + bias) where activation is the + /// element-wise activation function passed as the activation argument, kernel is a weights matrix created by the layer, + /// and bias is a bias vector created by the layer (only applicable if use_bias is True). + /// + /// Positive integer, dimensionality of the output space. + /// N-D tensor with shape: (batch_size, ..., units). For instance, for a 2D input with shape (batch_size, input_dim), the output would have shape (batch_size, units). public Dense Dense(int units) => new Dense(new DenseArgs { @@ -209,6 +260,17 @@ namespace Tensorflow.Keras.Layers Activation = GetActivationByName("linear") }); + /// + /// Just your regular densely-connected NN layer. + /// + /// Dense implements the operation: output = activation(dot(input, kernel) + bias) where activation is the + /// element-wise activation function passed as the activation argument, kernel is a weights matrix created by the layer, + /// and bias is a bias vector created by the layer (only applicable if use_bias is True). + /// + /// Positive integer, dimensionality of the output space. + /// Activation function to use. If you don't specify anything, no activation is applied (ie. "linear" activation: a(x) = x). + /// N-D tensor with shape: (batch_size, ..., input_dim). The most common situation would be a 2D input with shape (batch_size, input_dim). + /// N-D tensor with shape: (batch_size, ..., units). For instance, for a 2D input with shape (batch_size, input_dim), the output would have shape (batch_size, units). public Dense Dense(int units, string activation = null, TensorShape input_shape = null) @@ -260,6 +322,18 @@ namespace Tensorflow.Keras.Layers return layer.Apply(inputs); } + /// + /// Applies Dropout to the input. + /// The Dropout layer randomly sets input units to 0 with a frequency of rate at each step during training time, + /// which helps prevent overfitting.Inputs not set to 0 are scaled up by 1/(1 - rate) such that the sum over all inputs is unchanged. + /// + /// Float between 0 and 1. Fraction of the input units to drop. + /// 1D integer tensor representing the shape of the binary dropout mask that will be multiplied with the input. For instance, + /// if your inputs have shape (batch_size, timesteps, features) and you want the dropout mask to be the same for all timesteps, + /// you can use noise_shape=(batch_size, 1, features). + /// + /// An integer to use as random seed. + /// public Dropout Dropout(float rate, TensorShape noise_shape = null, int? seed = null) => new Dropout(new DropoutArgs { @@ -295,6 +369,15 @@ namespace Tensorflow.Keras.Layers EmbeddingsInitializer = embeddings_initializer }); + /// + /// Flattens the input. Does not affect the batch size. + /// + /// A string, one of channels_last (default) or channels_first. The ordering of the dimensions in the inputs. + /// channels_last corresponds to inputs with shape (batch, ..., channels) while channels_first corresponds to inputs with shape (batch, channels, ...). + /// It defaults to the image_data_format value found in your Keras config file at ~/.keras/keras.json. + /// If you never set it, then it will be "channels_last". + /// + /// public Flatten Flatten(string data_format = null) => new Flatten(new FlattenArgs { @@ -303,12 +386,18 @@ namespace Tensorflow.Keras.Layers /// /// `Input()` is used to instantiate a Keras tensor. + /// Keras tensor is a TensorFlow symbolic tensor object, which we augment with certain attributes that allow us + /// to build a Keras model just by knowing the inputs and outputs of the model. /// /// A shape tuple not including the batch size. - /// - /// - /// - /// + /// An optional name string for the layer. Should be unique in a model (do not reuse the same name twice). It will be autogenerated if it isn't provided. + /// A boolean specifying whether the placeholder to be created is sparse. Only one of 'ragged' and 'sparse' can be True. + /// Note that, if sparse is False, sparse tensors can still be passed into the input - they will be densified with a default value of 0. + /// + /// A boolean specifying whether the placeholder to be created is ragged. Only one of 'ragged' and 'sparse' can be True. + /// In this case, values of 'None' in the 'shape' argument represent ragged dimensions. For more information about RaggedTensors, see this guide. + /// + /// A tensor. public Tensors Input(TensorShape shape, string name = null, bool sparse = false, @@ -325,35 +414,90 @@ namespace Tensorflow.Keras.Layers return input_layer.InboundNodes[0].Outputs; } + /// + /// Max pooling operation for 1D temporal data. + /// + /// Integer, size of the max pooling window. + /// Integer, or null. Specifies how much the pooling window moves for each pooling step. If null, it will default to pool_size. + /// One of "valid" or "same" (case-insensitive). "valid" means no padding. + /// "same" results in padding evenly to the left/right or up/down of the input such that output has the same height/width dimension as the input. + /// + /// + /// A string, one of channels_last (default) or channels_first. The ordering of the dimensions in the inputs. + /// channels_last corresponds to inputs with shape (batch, steps, features) while channels_first corresponds to inputs with shape (batch, features, steps). + /// + /// public MaxPooling1D MaxPooling1D(int? pool_size = null, int? strides = null, - string padding = "valid") + string padding = "valid", + string data_format = null) => new MaxPooling1D(new Pooling1DArgs { PoolSize = pool_size ?? 2, Strides = strides ?? (pool_size ?? 2), - Padding = padding + Padding = padding, + DataFormat = data_format }); + /// + /// Max pooling operation for 2D spatial data. + /// Downsamples the input representation by taking the maximum value over the window defined by pool_size for each dimension along the features axis. + /// The window is shifted by strides in each dimension. The resulting output when using "valid" padding option has a shape(number of rows or columns) + /// of: output_shape = (input_shape - pool_size + 1) / strides) + /// The resulting output shape when using the "same" padding option is: output_shape = input_shape / strides + /// + /// + /// Integer or tuple of 2 integers, window size over which to take the maximum. + /// (2, 2) will take the max value over a 2x2 pooling window. If only one integer is specified, the same window length will be used for both dimensions. + /// + /// + /// Integer, tuple of 2 integers, or null. Strides values. Specifies how far the pooling window moves for each pooling step. + /// If null, it will default to pool_size. + /// + /// One of "valid" or "same" (case-insensitive). "valid" means no padding. + /// "same" results in padding evenly to the left/right or up/down of the input such that output has the same height/width dimension as the input. + /// + /// + /// A string, one of channels_last (default) or channels_first. The ordering of the dimensions in the inputs. + /// channels_last corresponds to inputs with shape (batch, height, width, channels) while channels_first corresponds to + /// inputs with shape (batch, channels, height, width). + /// It defaults to the image_data_format value found in your Keras config file at ~/.keras/keras.json. + /// If you never set it, then it will be "channels_last" + /// public MaxPooling2D MaxPooling2D(TensorShape pool_size = null, TensorShape strides = null, - string padding = "valid") + string padding = "valid", + string data_format = null) => new MaxPooling2D(new MaxPooling2DArgs { PoolSize = pool_size ?? (2, 2), Strides = strides, - Padding = padding + Padding = padding, + DataFormat = data_format }); /// /// Max pooling layer for 2D inputs (e.g. images). /// /// The tensor over which to pool. Must have rank 4. - /// - /// - /// - /// - /// + /// + /// Integer or tuple of 2 integers, window size over which to take the maximum. + /// (2, 2) will take the max value over a 2x2 pooling window. If only one integer is specified, the same window length will be used for both dimensions. + /// + /// + /// Integer, tuple of 2 integers, or null. Strides values. Specifies how far the pooling window moves for each pooling step. + /// If null, it will default to pool_size. + /// + /// One of "valid" or "same" (case-insensitive). "valid" means no padding. + /// "same" results in padding evenly to the left/right or up/down of the input such that output has the same height/width dimension as the input. + /// + /// + /// A string, one of channels_last (default) or channels_first. The ordering of the dimensions in the inputs. + /// channels_last corresponds to inputs with shape (batch, height, width, channels) while channels_first corresponds to + /// inputs with shape (batch, channels, height, width). + /// It defaults to the image_data_format value found in your Keras config file at ~/.keras/keras.json. + /// If you never set it, then it will be "channels_last" + /// A name for the layer /// public Tensor max_pooling2d(Tensor inputs, int[] pool_size, @@ -385,8 +529,19 @@ namespace Tensorflow.Keras.Layers Alpha = alpha }); + /// + /// Fully-connected RNN where the output is to be fed back to input. + /// + /// Positive integer, dimensionality of the output space. + /// public Layer SimpleRNN(int units) => SimpleRNN(units, "tanh"); + /// + /// Fully-connected RNN where the output is to be fed back to input. + /// + /// Positive integer, dimensionality of the output space. + /// Activation function to use. If you pass null, no activation is applied (ie. "linear" activation: a(x) = x). + /// public Layer SimpleRNN(int units, Activation activation = null) => new SimpleRNN(new SimpleRNNArgs @@ -395,6 +550,12 @@ namespace Tensorflow.Keras.Layers Activation = activation }); + /// + /// + /// + /// Positive integer, dimensionality of the output space. + /// The name of the activation function to use. Default: hyperbolic tangent (tanh).. + /// public Layer SimpleRNN(int units, string activation = "tanh") => new SimpleRNN(new SimpleRNNArgs @@ -403,6 +564,27 @@ namespace Tensorflow.Keras.Layers Activation = GetActivationByName(activation) }); + /// + /// + /// + /// + /// + /// + /// + /// + /// + /// + /// + /// + /// + /// + /// + /// + /// + /// + /// + /// + /// public Layer LSTM(int units, Activation activation = null, Activation recurrent_activation = null, @@ -439,6 +621,13 @@ namespace Tensorflow.Keras.Layers Unroll = unroll }); + /// + /// + /// + /// + /// + /// + /// public Rescaling Rescaling(float scale, float offset = 0, TensorShape input_shape = null) @@ -449,28 +638,72 @@ namespace Tensorflow.Keras.Layers InputShape = input_shape }); + /// + /// + /// + /// public Add Add() => new Add(new MergeArgs { }); + /// + /// + /// + /// public Subtract Subtract() => new Subtract(new MergeArgs { }); + /// + /// Global max pooling operation for spatial data. + /// + /// public GlobalAveragePooling2D GlobalAveragePooling2D() => new GlobalAveragePooling2D(new Pooling2DArgs { }); + /// + /// Global average pooling operation for temporal data. + /// + /// A string, one of channels_last (default) or channels_first. The ordering of the dimensions in the inputs. + /// channels_last corresponds to inputs with shape (batch, steps, features) while channels_first corresponds to inputs with shape (batch, features, steps). + /// + /// public GlobalAveragePooling1D GlobalAveragePooling1D(string data_format = "channels_last") => new GlobalAveragePooling1D(new Pooling1DArgs { DataFormat = data_format }); + /// + /// Global max pooling operation for spatial data. + /// + /// A string, one of channels_last (default) or channels_first. The ordering of the dimensions in the inputs. + /// channels_last corresponds to inputs with shape (batch, height, width, channels) while channels_first corresponds to inputs with shape (batch, channels, height, width). + /// public GlobalAveragePooling2D GlobalAveragePooling2D(string data_format = "channels_last") => new GlobalAveragePooling2D(new Pooling2DArgs { DataFormat = data_format }); + /// + /// Global max pooling operation for 1D temporal data. + /// Downsamples the input representation by taking the maximum value over the time dimension. + /// + /// A string, one of channels_last (default) or channels_first. The ordering of the dimensions in the inputs. + /// channels_last corresponds to inputs with shape (batch, steps, features) while channels_first corresponds to inputs with shape (batch, features, steps). + /// + /// public GlobalMaxPooling1D GlobalMaxPooling1D(string data_format = "channels_last") => new GlobalMaxPooling1D(new Pooling1DArgs { DataFormat = data_format }); + /// + /// Global max pooling operation for spatial data. + /// + /// A string, one of channels_last (default) or channels_first. The ordering of the dimensions in the inputs. + /// channels_last corresponds to inputs with shape (batch, height, width, channels) while channels_first corresponds to inputs with shape (batch, channels, height, width). + /// public GlobalMaxPooling2D GlobalMaxPooling2D(string data_format = "channels_last") => new GlobalMaxPooling2D(new Pooling2DArgs { DataFormat = data_format }); + /// + /// Get an activation function layer from its name. + /// + /// The name of the activation function. One of linear, relu, sigmoid, and tanh. + /// Activation GetActivationByName(string name) => name switch @@ -482,6 +715,11 @@ namespace Tensorflow.Keras.Layers _ => keras.activations.Linear }; + /// + /// Get an weights initializer from its name. + /// + /// The name of the initializer. One of zeros, ones, and glorot_uniform. + /// IInitializer GetInitializerByName(string name) => name switch {