diff --git a/src/TensorFlowNET.Core/NumPy/ShapeHelper.cs b/src/TensorFlowNET.Core/NumPy/ShapeHelper.cs index 4f4db76d..aea4e678 100644 --- a/src/TensorFlowNET.Core/NumPy/ShapeHelper.cs +++ b/src/TensorFlowNET.Core/NumPy/ShapeHelper.cs @@ -100,6 +100,10 @@ namespace Tensorflow.NumPy if (shape.ndim != shape2.Length) return false; return Enumerable.SequenceEqual(shape.dims, shape2); + case int[] shape3: + if (shape.ndim != shape3.Length) + return false; + return Enumerable.SequenceEqual(shape.as_int_list(), shape3); default: return false; } diff --git a/src/TensorFlowNET.Keras/BackendImpl.cs b/src/TensorFlowNET.Keras/BackendImpl.cs index 9b822971..d0102b70 100644 --- a/src/TensorFlowNET.Keras/BackendImpl.cs +++ b/src/TensorFlowNET.Keras/BackendImpl.cs @@ -347,19 +347,21 @@ namespace Tensorflow.Keras string data_format = null, Shape dilation_rate = null) { + /* var force_transpose = false; if (data_format == "channels_first" && !dilation_rate.Equals(new[] { 1, 1 })) force_transpose = true; - // x, tf_data_format = _preprocess_conv2d_input(x, data_format, force_transpose) + x, tf_data_format = _preprocess_conv2d_input(x, data_format, force_transpose) + */ var tf_data_format = "NHWC"; padding = padding.ToUpper(); strides = new Shape(1, strides[0], strides[1], 1); - if (dilation_rate.Equals(new long[] { 1, 1 })) + if (dilation_rate.Equals(new[] { 1, 1 })) x = nn_impl.conv2d_transpose(x, kernel, output_shape, strides, padding: padding, data_format: tf_data_format); else - throw new NotImplementedException(""); + throw new NotImplementedException("dilation_rate other than [1,1] is not yet supported"); return x; }