diff --git a/.github/FUNDING.yml b/.github/FUNDING.yml
index fdf00590..1f011157 100644
--- a/.github/FUNDING.yml
+++ b/.github/FUNDING.yml
@@ -9,4 +9,4 @@ community_bridge: # Replace with a single Community Bridge project-name e.g., cl
liberapay: # Replace with a single Liberapay username
issuehunt: # Replace with a single IssueHunt username
otechie: # Replace with a single Otechie username
-custom: ['https://bit.ly/2op1mu5']# Replace with up to 4 custom sponsorship URLs e.g., ['link1', 'link2']
+custom: []# Replace with up to 4 custom sponsorship URLs e.g., ['link1', 'link2']
diff --git a/src/TensorFlowNET.Core/Gradients/Tape.ComputeGradient.cs b/src/TensorFlowNET.Core/Gradients/Tape.ComputeGradient.cs
index 3a5f4efa..0d0ecbe2 100644
--- a/src/TensorFlowNET.Core/Gradients/Tape.ComputeGradient.cs
+++ b/src/TensorFlowNET.Core/Gradients/Tape.ComputeGradient.cs
@@ -26,7 +26,6 @@ namespace Tensorflow.Gradients
tensor_tape_,
state.op_tape);
- int gcCollectFlag = 0;
while (!op_stack.empty())
{
var op = op_stack.Dequeue();
@@ -155,9 +154,6 @@ namespace Tensorflow.Gradients
op_stack.Enqueue(op_id);
}
}
-
- if (gcCollectFlag++ % 10 == 0)
- GC.Collect();
}
if (state.op_tape.Count > 0)
diff --git a/src/TensorFlowNET.Core/Operations/NnOps/ConvolutionInternal.cs b/src/TensorFlowNET.Core/Operations/NnOps/ConvolutionInternal.cs
index dbf53988..958d79f4 100644
--- a/src/TensorFlowNET.Core/Operations/NnOps/ConvolutionInternal.cs
+++ b/src/TensorFlowNET.Core/Operations/NnOps/ConvolutionInternal.cs
@@ -90,11 +90,12 @@ namespace Tensorflow.Operations
strides.Insert(0, 1);
dilations.Insert(0, 1);
- var expanded = tf.expand_dims(input, spatial_start_dim);
+ input = array_ops.expand_dims(input, spatial_start_dim);
+ filters = array_ops.expand_dims(filters, 0);
result = gen_nn_ops.conv2d(new Conv2dParams
{
- Input = expanded,
+ Input = input,
Filter = filters,
Strides = strides.ToArray(),
Padding = padding,
@@ -102,7 +103,7 @@ namespace Tensorflow.Operations
Dilations = dilations.ToArray(),
Name = name
});
- result = tf.squeeze(result, squeeze_dims: spatial_start_dim);
+ result = array_ops.squeeze(result, new[] { spatial_start_dim });
}
});
diff --git a/src/TensorFlowNET.Core/Operations/array_ops.cs b/src/TensorFlowNET.Core/Operations/array_ops.cs
index 638559e6..049d874e 100644
--- a/src/TensorFlowNET.Core/Operations/array_ops.cs
+++ b/src/TensorFlowNET.Core/Operations/array_ops.cs
@@ -730,7 +730,7 @@ namespace Tensorflow
/// A `Tensor`. Has the same type as `input`.
/// Contains the same data as `input`, but has one or more dimensions of
/// size 1 removed.
- public static Tensor squeeze(Tensor input, int[] axis = null, string name = null, int[] squeeze_dims = null)
+ public static Tensor squeeze(Tensor input, int[] axis = null, string name = null)
=> gen_array_ops.squeeze(input, axis, name);
public static Tensor identity(Tensor input, string name = null)
diff --git a/src/TensorFlowNET.Keras/Layers/Convolution/Convolutional.cs b/src/TensorFlowNET.Keras/Layers/Convolution/Convolutional.cs
index 218d1369..5ac2dd00 100644
--- a/src/TensorFlowNET.Keras/Layers/Convolution/Convolutional.cs
+++ b/src/TensorFlowNET.Keras/Layers/Convolution/Convolutional.cs
@@ -48,11 +48,11 @@ namespace Tensorflow.Keras.Layers
public Convolutional(ConvolutionalArgs args) : base(args)
{
this.args = args;
- args.KernelSize = conv_utils.normalize_tuple(args.KernelSize.dims.Select(x => (int)x).ToArray(), args.Rank, "kernel_size");
- args.Strides = conv_utils.normalize_tuple(args.Strides.dims.Select(x => (int)x).ToArray(), args.Rank, "strides");
+ args.KernelSize = conv_utils.normalize_tuple(args.KernelSize.as_int_list(), args.Rank, "kernel_size");
+ args.Strides = conv_utils.normalize_tuple(args.Strides.as_int_list(), args.Rank, "strides");
args.Padding = conv_utils.normalize_padding(args.Padding);
args.DataFormat = conv_utils.normalize_data_format(args.DataFormat);
- args.DilationRate = conv_utils.normalize_tuple(args.DilationRate.dims.Select(x => (int)x).ToArray(), args.Rank, "dilation_rate");
+ args.DilationRate = conv_utils.normalize_tuple(args.DilationRate.as_int_list(), args.Rank, "dilation_rate");
inputSpec = new InputSpec(ndim: rank + 2);
_tf_data_format = conv_utils.convert_data_format(data_format, rank + 2);
}
diff --git a/src/TensorFlowNET.Keras/Layers/LayersApi.cs b/src/TensorFlowNET.Keras/Layers/LayersApi.cs
index d99d8bae..c6929a4b 100644
--- a/src/TensorFlowNET.Keras/Layers/LayersApi.cs
+++ b/src/TensorFlowNET.Keras/Layers/LayersApi.cs
@@ -68,64 +68,6 @@ namespace Tensorflow.Keras.Layers
Name = name
});
- ///
- /// 1D convolution layer (e.g. temporal convolution).
- /// This layer creates a convolution kernel that is convolved with the layer input over a single spatial(or temporal) dimension to produce a tensor of outputs.If use_bias is True, a bias vector is created and added to the outputs.Finally, if activation is not None, it is applied to the outputs as well.
- ///
- /// Integer, the dimensionality of the output space (i.e. the number of output filters in the convolution)
- /// An integer specifying the width of the 1D convolution window.
- /// An integer specifying the stride of the convolution window . Specifying any stride value != 1 is incompatible with specifying any dilation_rate value != 1.
- /// one of "valid" or "same" (case-insensitive). "valid" means no padding. "same" results in padding evenly to the left/right or up/down of the input such that output has the same height/width dimension as the input.
- /// A string, one of channels_last (default) or channels_first. The ordering of the dimensions in the inputs. channels_last corresponds to inputs with shape (batch_size, height, width, channels) while channels_first corresponds to inputs with shape (batch_size, channels, height, width). It defaults to the image_data_format value found in your Keras config file at ~/.keras/keras.json. If you never set it, then it will be channels_last.
- /// An integer specifying the dilation rate to use for dilated convolution.Currently, specifying any dilation_rate value != 1 is incompatible with specifying any stride value != 1.
- /// A positive integer specifying the number of groups in which the input is split along the channel axis. Each group is convolved separately with filters / groups filters. The output is the concatenation of all the groups results along the channel axis. Input channels and filters must both be divisible by groups.
- /// Activation function to use. If you don't specify anything, no activation is applied (see keras.activations).
- /// Boolean, whether the layer uses a bias vector.
- /// Initializer for the kernel weights matrix (see keras.initializers).
- /// Initializer for the bias vector (see keras.initializers).
- /// Regularizer function applied to the kernel weights matrix (see keras.regularizers).
- /// Regularizer function applied to the bias vector (see keras.regularizers).
- /// Regularizer function applied to the output of the layer (its "activation") (see keras.regularizers).
- /// A tensor of rank 3 representing activation(conv1d(inputs, kernel) + bias).
- public Conv1D Conv1D(int filters,
- int? kernel_size = null,
- int? strides = null,
- string padding = "valid",
- string data_format = null,
- int? dilation_rate = null,
- int groups = 1,
- Activation activation = null,
- bool use_bias = true,
- IInitializer kernel_initializer = null,
- IInitializer bias_initializer = null,
- IRegularizer kernel_regularizer = null,
- IRegularizer bias_regularizer = null,
- IRegularizer activity_regularizer = null)
- {
- // Special case: Conv1D will be implemented as Conv2D with H=1, so we need to add a 1-sized dimension to the kernel.
- // Lower-level logic handles the stride and dilation_rate, but the kernel_size needs to be set properly here.
-
- var kernel = (kernel_size == null) ? (1, 5) : (1, kernel_size.Value);
- return new Conv1D(new Conv1DArgs
- {
- Rank = 1,
- Filters = filters,
- KernelSize = kernel,
- Strides = strides == null ? 1 : strides,
- Padding = padding,
- DataFormat = data_format,
- DilationRate = dilation_rate == null ? 1 : dilation_rate,
- Groups = groups,
- UseBias = use_bias,
- KernelInitializer = kernel_initializer == null ? tf.glorot_uniform_initializer : kernel_initializer,
- BiasInitializer = bias_initializer == null ? tf.zeros_initializer : bias_initializer,
- KernelRegularizer = kernel_regularizer,
- BiasRegularizer = bias_regularizer,
- ActivityRegularizer = activity_regularizer,
- Activation = activation ?? keras.activations.Linear
- });
- }
-
///
/// 1D convolution layer (e.g. temporal convolution).
/// This layer creates a convolution kernel that is convolved with the layer input over a single spatial(or temporal) dimension to produce a tensor of outputs.If use_bias is True, a bias vector is created and added to the outputs.Finally, if activation is not None, it is applied to the outputs as well.
@@ -143,7 +85,7 @@ namespace Tensorflow.Keras.Layers
/// Initializer for the bias vector (see keras.initializers).
/// A tensor of rank 3 representing activation(conv1d(inputs, kernel) + bias).
public Conv1D Conv1D(int filters,
- int? kernel_size = null,
+ Shape? kernel_size = null,
int? strides = null,
string padding = "valid",
string data_format = null,
@@ -157,12 +99,11 @@ namespace Tensorflow.Keras.Layers
// Special case: Conv1D will be implemented as Conv2D with H=1, so we need to add a 1-sized dimension to the kernel.
// Lower-level logic handles the stride and dilation_rate, but the kernel_size needs to be set properly here.
- var kernel = (kernel_size == null) ? (1, 5) : (1, kernel_size.Value);
return new Conv1D(new Conv1DArgs
{
Rank = 1,
Filters = filters,
- KernelSize = kernel,
+ KernelSize = kernel_size ?? new Shape(1, 5),
Strides = strides == null ? 1 : strides,
Padding = padding,
DataFormat = data_format,
diff --git a/src/TensorFlowNET.Keras/Saving/hdf5_format.cs b/src/TensorFlowNET.Keras/Saving/hdf5_format.cs
index b6f2fef1..0c340477 100644
--- a/src/TensorFlowNET.Keras/Saving/hdf5_format.cs
+++ b/src/TensorFlowNET.Keras/Saving/hdf5_format.cs
@@ -210,19 +210,19 @@ namespace Tensorflow.Keras.Saving
}
}
- private static void save_attributes_to_hdf5_group(long f,string name ,Array data)
+ private static void save_attributes_to_hdf5_group(long f, string name, Array data)
{
int num_chunks = 1;
-
+
var chunked_data = Split(data, num_chunks);
- int getSize= 0;
-
- string getType = data.Length>0?data.GetValue(0).GetType().Name.ToLower():"string";
+ int getSize = 0;
+
+ string getType = data.Length > 0 ? data.GetValue(0).GetType().Name.ToLower() : "string";
switch (getType)
{
case "single":
- getSize=sizeof(float);
+ getSize = sizeof(float);
break;
case "double":
getSize = sizeof(double);
@@ -237,30 +237,25 @@ namespace Tensorflow.Keras.Saving
getSize = sizeof(long);
break;
default:
- getSize=-1;
+ getSize = -1;
break;
}
int getCount = chunked_data.Count;
-
- if (getSize != -1) {
- num_chunks = (int)Math.Ceiling((double)(getCount * getSize) / (double)HDF5_OBJECT_HEADER_LIMIT);
+
+ if (getSize != -1)
+ {
+ num_chunks = (int)Math.Ceiling((double)(getCount * getSize) / HDF5_OBJECT_HEADER_LIMIT);
if (num_chunks > 1) chunked_data = Split(data, num_chunks);
}
-
+
if (num_chunks > 1)
{
foreach (var (chunk_id, chunk_data) in enumerate(chunked_data))
- {
-
WriteAttrs(f, getType, $"{name}{chunk_id}", chunk_data.ToArray());
-
- }
-
}
- else {
-
- WriteAttrs(f, getType,name, data);
-
+ else
+ {
+ WriteAttrs(f, getType, name, data);
}
}