diff --git a/src/TensorFlowNET.Core/Keras/Layers/ILayersApi.cs b/src/TensorFlowNET.Core/Keras/Layers/ILayersApi.cs
index a19508d4..0e339304 100644
--- a/src/TensorFlowNET.Core/Keras/Layers/ILayersApi.cs
+++ b/src/TensorFlowNET.Core/Keras/Layers/ILayersApi.cs
@@ -105,6 +105,15 @@ namespace Tensorflow.Keras.Layers
bool use_bias = true,
IInitializer bias_initializer = null,
Shape input_shape = null);
+
+ public ILayer Dense(int units,
+ string activation = null,
+ IInitializer kernel_initializer = null,
+ bool use_bias = true,
+ IInitializer bias_initializer = null,
+ IRegularizer kernel_regularizer = null,
+ IRegularizer bias_regularizer = null,
+ Shape input_shape = null);
public ILayer Dropout(float rate, Shape noise_shape = null, int? seed = null);
diff --git a/src/TensorFlowNET.Keras/Layers/Core/Dense.cs b/src/TensorFlowNET.Keras/Layers/Core/Dense.cs
index db5d626e..77603c45 100644
--- a/src/TensorFlowNET.Keras/Layers/Core/Dense.cs
+++ b/src/TensorFlowNET.Keras/Layers/Core/Dense.cs
@@ -57,6 +57,7 @@ namespace Tensorflow.Keras.Layers
"kernel",
shape: new Shape(last_dim, args.Units),
initializer: args.KernelInitializer,
+ regularizer: args.KernelRegularizer,
dtype: DType,
trainable: true);
if (args.UseBias)
@@ -64,6 +65,7 @@ namespace Tensorflow.Keras.Layers
"bias",
shape: new Shape(args.Units),
initializer: args.BiasInitializer,
+ regularizer: args.BiasRegularizer,
dtype: DType,
trainable: true);
diff --git a/src/TensorFlowNET.Keras/Layers/LayersApi.cs b/src/TensorFlowNET.Keras/Layers/LayersApi.cs
index 0bdcbc84..761a336d 100644
--- a/src/TensorFlowNET.Keras/Layers/LayersApi.cs
+++ b/src/TensorFlowNET.Keras/Layers/LayersApi.cs
@@ -303,6 +303,41 @@ namespace Tensorflow.Keras.Layers
Units = units,
Activation = keras.activations.GetActivationFromName("linear")
});
+ ///
+ /// Just your regular densely-connected NN layer.
+ ///
+ /// Dense implements the operation: output = activation(dot(input, kernel) + bias) where activation is the
+ /// element-wise activation function passed as the activation argument, kernel is a weights matrix created by the layer,
+ /// and bias is a bias vector created by the layer (only applicable if use_bias is True).
+ ///
+ /// Positive integer, dimensionality of the output space.
+ /// Activation function to use. If you don't specify anything, no activation is applied (ie. "linear" activation: a(x) = x).
+ /// Initializer for the kernel weights matrix.
+ /// Boolean, whether the layer uses a bias vector.
+ /// Initializer for the bias vector.
+ /// A regularizer that applies a L1 regularization penalty for kernel.
+ /// A regularizer that applies a L1 regularization penalty for bias.
+ /// N-D tensor with shape: (batch_size, ..., input_dim). The most common situation would be a 2D input with shape (batch_size, input_dim).
+ /// N-D tensor with shape: (batch_size, ..., units). For instance, for a 2D input with shape (batch_size, input_dim), the output would have shape (batch_size, units).
+
+ public ILayer Dense(int units,
+ string activation = null,
+ IInitializer kernel_initializer = null,
+ bool use_bias = true,
+ IInitializer bias_initializer = null,
+ IRegularizer kernel_regularizer = null,
+ IRegularizer bias_regularizer = null,
+ Shape input_shape = null)
+ => new Dense(new DenseArgs
+ {
+ Units = units,
+ Activation = keras.activations.GetActivationFromName(activation),
+ KernelInitializer = kernel_initializer ?? tf.glorot_uniform_initializer,
+ BiasInitializer = bias_initializer ?? (use_bias ? tf.zeros_initializer : null),
+ InputShape = input_shape,
+ KernelRegularizer = kernel_regularizer,
+ BiasRegularizer = bias_regularizer
+ });
///
/// Just your regular densely-connected NN layer.