From 22effd6553732142143ed95a16337be038954e7a Mon Sep 17 00:00:00 2001 From: Oceania2018 Date: Mon, 12 Jul 2021 20:06:55 -0500 Subject: [PATCH] Using Shape instead of TensorShape. --- src/TensorFlowNET.Console/MemoryBasicTest.cs | 2 +- .../MemoryFuncGraphTest.cs | 2 +- src/TensorFlowNET.Console/MemoryKerasTest.cs | 4 +- src/TensorFlowNET.Core/APIs/tf.array.cs | 8 +- src/TensorFlowNET.Core/APIs/tf.compat.v1.cs | 2 +- src/TensorFlowNET.Core/APIs/tf.image.cs | 2 +- src/TensorFlowNET.Core/APIs/tf.init.cs | 2 +- src/TensorFlowNET.Core/APIs/tf.linalg.cs | 2 +- src/TensorFlowNET.Core/APIs/tf.math.cs | 8 +- src/TensorFlowNET.Core/APIs/tf.queue.cs | 18 +- src/TensorFlowNET.Core/APIs/tf.random.cs | 10 +- src/TensorFlowNET.Core/APIs/tf.reshape.cs | 2 +- src/TensorFlowNET.Core/APIs/tf.sparse.cs | 2 +- src/TensorFlowNET.Core/APIs/tf.tile.cs | 2 +- src/TensorFlowNET.Core/Binding.Util.cs | 3 +- src/TensorFlowNET.Core/Binding.cs | 4 +- src/TensorFlowNET.Core/Data/DatasetManager.cs | 2 +- src/TensorFlowNET.Core/Data/DatasetV2.cs | 2 +- src/TensorFlowNET.Core/Data/IDatasetV2.cs | 2 +- src/TensorFlowNET.Core/Data/OwnedIterator.cs | 2 +- .../Eager/EagerRunner.TFE_FastPathExecute.cs | 2 +- .../Eager/EagerTensor.ToString.cs | 2 +- src/TensorFlowNET.Core/Eager/EagerTensor.cs | 6 - .../Framework/Models/DenseSpec.cs | 6 +- .../Framework/Models/TensorSpec.cs | 2 +- .../Framework/common_shapes.py.cs | 6 +- .../Framework/tensor_shape.cs | 15 +- .../Functions/ConcreteFunction.cs | 2 +- .../Gradients/TapeTensor.cs | 4 +- .../Gradients/gradients_util.cs | 2 +- .../Gradients/image_grad.cs | 4 +- src/TensorFlowNET.Core/Gradients/math_grad.cs | 8 +- src/TensorFlowNET.Core/Gradients/nn_grad.cs | 3 +- .../ops.gradient_function_mapping.cs | 2 +- .../Graphs/AutoGraphAttribute.cs | 4 +- src/TensorFlowNET.Core/Graphs/FuncGraph.cs | 6 +- src/TensorFlowNET.Core/Graphs/Graph.cs | 6 +- .../Convolution/ConvolutionalArgs.cs | 6 +- .../Keras/ArgsDefinition/LayerArgs.cs | 4 +- .../Normalization/BatchNormalizationArgs.cs | 2 +- .../ArgsDefinition/Pooling/Pooling2DArgs.cs | 4 +- .../Regularization/DropoutArgs.cs | 2 +- .../ArgsDefinition/Reshaping/ReshapeArgs.cs | 2 +- .../Reshaping/UpSampling2DArgs.cs | 2 +- .../Keras/Engine/InputSpec.cs | 4 +- src/TensorFlowNET.Core/Keras/Layers/ILayer.cs | 4 +- .../Implementation/NumPyImpl.Creation.cs | 10 +- src/TensorFlowNET.Core/Numpy/Shape.cs | 173 ++++++++-- .../ControlFlows/ControlFlowState.cs | 8 +- .../Operations/ControlFlows/GradLoopState.cs | 2 +- .../Operations/ControlFlows/WhileContext.cs | 12 +- .../Initializers/InitializerArgs.cs | 4 +- .../Operations/Initializers/Zeros.cs | 4 +- .../Operations/Losses/losses_impl.py.cs | 4 +- .../Operations/NnOps/BasicLSTMCell.cs | 2 +- .../Operations/NnOps/BasicRNNCell.cs | 2 +- .../Operations/NnOps/ConvolutionInternal.cs | 4 +- .../Operations/NnOps/LayerRNNCell.cs | 2 +- .../Operations/NnOps/RNNCell.cs | 6 +- .../Operations/NnOps/rnn.cs | 38 +-- .../Operations/NnOps/rnn_cell_impl.cs | 18 +- .../Operations/OpDefLibrary.cs | 6 +- .../Operations/Queues/FIFOQueue.cs | 2 +- .../Operations/Queues/PaddingFIFOQueue.cs | 2 +- .../Operations/Queues/PriorityQueue.cs | 4 +- .../Operations/Queues/QueueBase.cs | 4 +- .../Operations/Queues/RandomShuffleQueue.cs | 2 +- .../Operations/_GraphTensorArray.cs | 16 +- .../Operations/array_ops.cs | 42 +-- src/TensorFlowNET.Core/Operations/clip_ops.cs | 4 +- .../Operations/confusion_matrix.py.cs | 4 +- .../Operations/control_flow_ops.cs | 8 +- .../Operations/dataset_ops.cs | 44 +-- .../Operations/functional_ops.cs | 6 +- .../Operations/gen_array_ops.cs | 6 +- .../Operations/gen_data_flow_ops.cs | 12 +- .../Operations/gen_math_ops.cs | 8 +- src/TensorFlowNET.Core/Operations/gen_ops.cs | 158 ++++----- .../Operations/gen_resource_variable_ops.cs | 2 +- .../Operations/image_ops_impl.cs | 130 ++++---- .../Operations/linalg_ops.cs | 4 +- src/TensorFlowNET.Core/Operations/map_fn.cs | 6 +- src/TensorFlowNET.Core/Operations/math_ops.cs | 64 ++-- .../Operations/nn_impl.py.cs | 8 +- src/TensorFlowNET.Core/Operations/nn_ops.cs | 10 +- .../Operations/random_ops.cs | 2 +- .../Operations/resource_variable_ops.cs | 6 +- .../Operations/string_ops.cs | 6 +- .../Operations/weights_broadcast_ops.cs | 8 +- .../Tensors/Ragged/RaggedTensor.cs | 4 +- .../Tensors/Ragged/SparseTensor.cs | 10 +- .../Tensors/Tensor.Creation.cs | 4 +- .../Tensors/Tensor.Explicit.cs | 4 +- .../Tensors/Tensor.String.cs | 6 +- src/TensorFlowNET.Core/Tensors/Tensor.cs | 30 +- src/TensorFlowNET.Core/Tensors/TensorArray.cs | 2 +- .../Tensors/TensorShape.Convert.cs | 47 --- .../Tensors/TensorShape.Equals.cs | 37 --- src/TensorFlowNET.Core/Tensors/TensorShape.cs | 306 ------------------ src/TensorFlowNET.Core/Tensors/Tensors.cs | 2 +- .../Tensors/c_api.tensor.cs | 4 +- src/TensorFlowNET.Core/Tensors/constant_op.cs | 20 +- src/TensorFlowNET.Core/Tensors/tensor_util.cs | 55 ++-- src/TensorFlowNET.Core/Tensors/tf.constant.cs | 6 +- .../Training/Saving/BaseSaverBuilder.cs | 2 +- .../Saving/ResourceVariableSaveable.cs | 2 +- .../Training/Saving/SaveableObject.cs | 4 +- .../Training/SlotCreator.cs | 10 +- .../Variables/BaseResourceVariable.cs | 8 +- .../Variables/IVariableV1.cs | 2 +- .../Variables/RefVariable.cs | 6 +- .../Variables/ResourceVariable.cs | 8 +- .../Variables/VariableArgs.cs | 2 +- .../Variables/VariableScope.cs | 2 +- .../Variables/_UnreadVariable.cs | 2 +- .../Variables/_VariableStore.cs | 6 +- .../Variables/variable_scope.py.cs | 2 +- src/TensorFlowNET.Core/ops.cs | 1 - src/TensorFlowNET.Core/tensorflow.cs | 4 +- src/TensorFlowNET.Keras/BackendImpl.cs | 12 +- .../Engine/DataAdapters/DataAdapter.cs | 4 +- src/TensorFlowNET.Keras/Engine/Functional.cs | 4 +- .../Engine/Layer.AddWeights.cs | 2 +- .../Engine/Layer.Layers.cs | 2 +- src/TensorFlowNET.Keras/Engine/Layer.cs | 4 +- .../Engine/LossesContainer.cs | 2 +- src/TensorFlowNET.Keras/Engine/Node.cs | 4 +- src/TensorFlowNET.Keras/Engine/Sequential.cs | 4 +- src/TensorFlowNET.Keras/KerasInterface.cs | 4 +- .../Layers/Convolution/Conv2DTranspose.cs | 10 +- .../Layers/Convolution/Convolutional.cs | 10 +- src/TensorFlowNET.Keras/Layers/Core/Dense.cs | 6 +- .../Layers/Core/InputLayer.cs | 2 +- .../Layers/LayersApi.Reshaping.cs | 4 +- src/TensorFlowNET.Keras/Layers/LayersApi.cs | 36 +-- .../Layers/Merging/Concatenate.cs | 4 +- .../Normalization/BatchNormalization.cs | 12 +- .../Layers/Preprocessing/Resizing.cs | 4 +- .../Layers/Preprocessing/TextVectorization.cs | 2 +- src/TensorFlowNET.Keras/Layers/RNN.cs | 8 +- .../Layers/Rescaling/Rescaling.cs | 2 +- .../Layers/Reshaping/Flatten.cs | 2 +- .../Layers/Reshaping/Reshape.cs | 6 +- .../Losses/SparseCategoricalCrossentropy.cs | 4 +- src/TensorFlowNET.Keras/Metrics/Metric.cs | 4 +- src/TensorFlowNET.Keras/Metrics/MetricsApi.cs | 4 +- .../Optimizers/OptimizerV2.cs | 2 +- ...processing.image_dataset_from_directory.cs | 2 +- ...eprocessing.paths_and_labels_to_dataset.cs | 4 +- .../Saving/KerasObjectLoader.cs | 4 +- .../Saving/TensorShapeConfig.cs | 4 +- src/TensorFlowNET.Keras/Utils/losses_utils.cs | 2 +- .../Basics/QueueTest.cs | 2 +- .../ControlFlowTest/ShapeTestCase.cs | 6 +- .../FunctionalOpsTest/ScanTestCase.cs | 4 +- .../OperationsTest.cs | 96 +++--- .../Layers/LayersTest.cs | 6 +- .../Basics/SessionTest.cs | 8 +- .../Basics/TrainSaverTest.cs | 4 +- .../ManagedAPI/ControlFlowApiTest.cs | 2 +- .../ManagedAPI/LinalgTest.cs | 2 +- .../ManagedAPI/MathApiTest.cs | 2 +- .../ManagedAPI/TensorOperate.cs | 2 +- .../Utilities/FluentExtension.cs | 4 +- 164 files changed, 836 insertions(+), 1134 deletions(-) delete mode 100644 src/TensorFlowNET.Core/Tensors/TensorShape.Convert.cs delete mode 100644 src/TensorFlowNET.Core/Tensors/TensorShape.Equals.cs delete mode 100644 src/TensorFlowNET.Core/Tensors/TensorShape.cs diff --git a/src/TensorFlowNET.Console/MemoryBasicTest.cs b/src/TensorFlowNET.Console/MemoryBasicTest.cs index ce5251d8..3b0deeab 100644 --- a/src/TensorFlowNET.Console/MemoryBasicTest.cs +++ b/src/TensorFlowNET.Console/MemoryBasicTest.cs @@ -151,7 +151,7 @@ namespace Tensorflow public Action Dataset => (epoch, iterate) => { - TensorShape shape = (16, 32, 32, 3); + Shape shape = (16, 32, 32, 3); var images = np.arange(shape.size).astype(np.float32).reshape(shape.dims); var data_handler = new DataHandler(new DataHandlerArgs { diff --git a/src/TensorFlowNET.Console/MemoryFuncGraphTest.cs b/src/TensorFlowNET.Console/MemoryFuncGraphTest.cs index c43d0a2b..8c7ccaaf 100644 --- a/src/TensorFlowNET.Console/MemoryFuncGraphTest.cs +++ b/src/TensorFlowNET.Console/MemoryFuncGraphTest.cs @@ -22,7 +22,7 @@ namespace Tensorflow Tensor permutation(Tensor tensor) { - TensorShape shape = (8, 64, 64, 3); + Shape shape = (8, 64, 64, 3); var images = np.arange(shape.size).astype(np.float32).reshape(shape.dims); return tf.constant(images); } diff --git a/src/TensorFlowNET.Console/MemoryKerasTest.cs b/src/TensorFlowNET.Console/MemoryKerasTest.cs index 98d20356..5cd452ff 100644 --- a/src/TensorFlowNET.Console/MemoryKerasTest.cs +++ b/src/TensorFlowNET.Console/MemoryKerasTest.cs @@ -19,7 +19,7 @@ namespace Tensorflow public Action InputLayer => (epoch, iterate) => { - TensorShape shape = (32, 256, 256, 3); // 48M + Shape shape = (32, 256, 256, 3); // 48M var images = np.arange(shape.size).astype(np.float32).reshape(shape.dims); var inputs = keras.Input((shape.dims[1], shape.dims[2], 3)); @@ -31,7 +31,7 @@ namespace Tensorflow public Action Prediction => (epoch, iterate) => { - TensorShape shape = (32, 256, 256, 3); // 48M + Shape shape = (32, 256, 256, 3); // 48M var images = np.arange(shape.size).astype(np.float32).reshape(shape.dims); var inputs = keras.Input((shape.dims[1], shape.dims[2], 3)); diff --git a/src/TensorFlowNET.Core/APIs/tf.array.cs b/src/TensorFlowNET.Core/APIs/tf.array.cs index 2bc856c8..8574b838 100644 --- a/src/TensorFlowNET.Core/APIs/tf.array.cs +++ b/src/TensorFlowNET.Core/APIs/tf.array.cs @@ -65,7 +65,7 @@ namespace Tensorflow /// /// /// - public Tensor broadcast_to(Tensor input, TensorShape shape, string name = null) + public Tensor broadcast_to(Tensor input, Shape shape, string name = null) => gen_array_ops.broadcast_to(input, shape, name: name); public Tensor check_numerics(Tensor tensor, string message, string name = null) @@ -85,7 +85,7 @@ namespace Tensorflow return tf_with(ops.name_scope(name), scope => { var tensor = ops.convert_to_tensor(axis, name: "concat_dim", dtype: dtypes.int32); - Debug.Assert(tensor.TensorShape.ndim == 0); + Debug.Assert(tensor.shape.ndim == 0); return identity(values.First(), name: scope); }); } @@ -152,7 +152,7 @@ namespace Tensorflow /// /// /// - public Tensor transpose(T1 a, TensorShape perm = null, string name = "transpose", bool conjugate = false) + public Tensor transpose(T1 a, Shape perm = null, string name = "transpose", bool conjugate = false) => array_ops.transpose(a, perm, name, conjugate); /// @@ -246,7 +246,7 @@ namespace Tensorflow /// /// A `Tensor`. The default value to produce when output is not fed. /// - /// A `tf.TensorShape` or list of `int`s. The (possibly partial) shape of + /// A `tf.Shape` or list of `int`s. The (possibly partial) shape of /// the tensor. /// /// A name for the operation (optional). diff --git a/src/TensorFlowNET.Core/APIs/tf.compat.v1.cs b/src/TensorFlowNET.Core/APIs/tf.compat.v1.cs index e5dc6a40..bef37843 100644 --- a/src/TensorFlowNET.Core/APIs/tf.compat.v1.cs +++ b/src/TensorFlowNET.Core/APIs/tf.compat.v1.cs @@ -25,7 +25,7 @@ namespace Tensorflow => tf.Context.graph_mode(); public IVariableV1 get_variable(string name, - TensorShape shape = null, + Shape shape = null, TF_DataType dtype = TF_DataType.DtInvalid, object initializer = null, // IInitializer or Tensor bool? trainable = null, diff --git a/src/TensorFlowNET.Core/APIs/tf.image.cs b/src/TensorFlowNET.Core/APIs/tf.image.cs index b0c71f71..9230b50d 100644 --- a/src/TensorFlowNET.Core/APIs/tf.image.cs +++ b/src/TensorFlowNET.Core/APIs/tf.image.cs @@ -58,7 +58,7 @@ namespace Tensorflow string name = null) => image_ops_impl.resize_images(images, size, method, preserve_aspect_ratio, antialias, name); - public Tensor resize_images_v2(Tensor images, TensorShape size, string method = ResizeMethod.BILINEAR, bool preserve_aspect_ratio = false, bool antialias = false, + public Tensor resize_images_v2(Tensor images, Shape size, string method = ResizeMethod.BILINEAR, bool preserve_aspect_ratio = false, bool antialias = false, string name = null) => image_ops_impl.resize_images_v2(images, size, method, preserve_aspect_ratio, antialias, name); diff --git a/src/TensorFlowNET.Core/APIs/tf.init.cs b/src/TensorFlowNET.Core/APIs/tf.init.cs index b5ebc469..0681258e 100644 --- a/src/TensorFlowNET.Core/APIs/tf.init.cs +++ b/src/TensorFlowNET.Core/APIs/tf.init.cs @@ -96,7 +96,7 @@ namespace Tensorflow seed: seed, dtype: dtype); - public IInitializer zeros_initializer(TensorShape shape = null, + public IInitializer zeros_initializer(Shape shape = null, TF_DataType dtype = TF_DataType.TF_FLOAT) => new Zeros(shape: shape, dtype: dtype); } diff --git a/src/TensorFlowNET.Core/APIs/tf.linalg.cs b/src/TensorFlowNET.Core/APIs/tf.linalg.cs index 6687839c..7d4e418a 100644 --- a/src/TensorFlowNET.Core/APIs/tf.linalg.cs +++ b/src/TensorFlowNET.Core/APIs/tf.linalg.cs @@ -27,7 +27,7 @@ namespace Tensorflow public Tensor eye(int num_rows, int num_columns = -1, - TensorShape batch_shape = null, + Shape batch_shape = null, TF_DataType dtype = TF_DataType.TF_DOUBLE, string name = null) => ops.eye(num_rows, num_columns: num_columns, batch_shape: batch_shape, dtype: dtype, name: name); diff --git a/src/TensorFlowNET.Core/APIs/tf.math.cs b/src/TensorFlowNET.Core/APIs/tf.math.cs index 83f63c86..3d337286 100644 --- a/src/TensorFlowNET.Core/APIs/tf.math.cs +++ b/src/TensorFlowNET.Core/APIs/tf.math.cs @@ -50,7 +50,7 @@ namespace Tensorflow Tensor maxlength = null, TF_DataType dtype = TF_DataType.TF_INT32, string name = null, - TensorShape axis = null, + Shape axis = null, bool binary_output = false) => math_ops.bincount(arr, weights: weights, minlength: minlength, maxlength: maxlength, dtype: dtype, name: name, axis: axis, binary_output: binary_output); @@ -327,7 +327,7 @@ namespace Tensorflow => gen_math_ops.log(x, name); public Tensor equal(Tensor x, Tensor y, string name = null) - => gen_math_ops.equal(x, y, name); + => gen_math_ops.equal(x, y, name: name); /// /// Computes arctangent of `y/x` element-wise, respecting signs of the arguments. @@ -453,7 +453,7 @@ namespace Tensorflow public static Tensor truediv(Tensor x, Tensor y, string name = null) => math_ops.truediv(x, y, name: name); - public Tensor range(object start, object limit = null, object delta = null, TF_DataType dtype = TF_DataType.DtInvalid, string name = "range") + public Tensor range(object start, object limit = null, object delta = null, TF_DataType? dtype = null, string name = "range") => math_ops.range(start, limit: limit, delta: delta, dtype: dtype, name: name); public Tensor real(Tensor input, string name = null) @@ -522,7 +522,7 @@ namespace Tensorflow return math_ops.reduce_sum(input, keepdims: keepdims, name: name); } - public Tensor reduce_sum(Tensor input, TensorShape axis, int? reduction_indices = null, + public Tensor reduce_sum(Tensor input, Shape axis, int? reduction_indices = null, bool keepdims = false, string name = null) => math_ops.reduce_sum(input, axis, keepdims: keepdims, name: name); diff --git a/src/TensorFlowNET.Core/APIs/tf.queue.cs b/src/TensorFlowNET.Core/APIs/tf.queue.cs index e3258813..a4757890 100644 --- a/src/TensorFlowNET.Core/APIs/tf.queue.cs +++ b/src/TensorFlowNET.Core/APIs/tf.queue.cs @@ -32,7 +32,7 @@ namespace Tensorflow /// public PaddingFIFOQueue PaddingFIFOQueue(int capacity, TF_DataType[] dtypes, - TensorShape[] shapes, + Shape[] shapes, string[] names = null, string shared_name = null, string name = "padding_fifo_queue") @@ -45,7 +45,7 @@ namespace Tensorflow public PaddingFIFOQueue PaddingFIFOQueue(int capacity, TF_DataType dtype, - TensorShape shape, + Shape shape, string shared_name = null, string name = "padding_fifo_queue") => new PaddingFIFOQueue(capacity, @@ -66,7 +66,7 @@ namespace Tensorflow /// public FIFOQueue FIFOQueue(int capacity, TF_DataType[] dtypes, - TensorShape[] shapes = null, + Shape[] shapes = null, string[] names = null, string shared_name = null, string name = "fifo_queue") @@ -79,12 +79,12 @@ namespace Tensorflow public FIFOQueue FIFOQueue(int capacity, TF_DataType dtype, - TensorShape shape = null, + Shape shape = null, string shared_name = null, string name = "fifo_queue") => new FIFOQueue(capacity, new[] { dtype }, - new[] { shape ?? new TensorShape() }, + new[] { shape ?? Shape.Null }, shared_name: shared_name, name: name); @@ -99,26 +99,26 @@ namespace Tensorflow /// public PriorityQueue PriorityQueue(int capacity, TF_DataType dtype, - TensorShape shape = null, + Shape shape = null, string shared_name = null, string name = "priority_queue") => new PriorityQueue(capacity, new[] { dtype }, - new[] { shape ?? new TensorShape() }, + new[] { shape ?? Shape.Null }, shared_name: shared_name, name: name); public RandomShuffleQueue RandomShuffleQueue(int capacity, int min_after_dequeue, TF_DataType dtype, - TensorShape shape = null, + Shape shape = null, int? seed = null, string shared_name = null, string name = "random_shuffle_queue") => new RandomShuffleQueue(capacity, min_after_dequeue: min_after_dequeue, new[] { dtype }, - new[] { shape ?? new TensorShape() }, + new[] { shape ?? Shape.Null }, seed: seed, shared_name: shared_name, name: name); diff --git a/src/TensorFlowNET.Core/APIs/tf.random.cs b/src/TensorFlowNET.Core/APIs/tf.random.cs index 877c52ac..bfc203a6 100644 --- a/src/TensorFlowNET.Core/APIs/tf.random.cs +++ b/src/TensorFlowNET.Core/APIs/tf.random.cs @@ -32,7 +32,7 @@ namespace Tensorflow /// /// /// - public Tensor normal(TensorShape shape, + public Tensor normal(Shape shape, float mean = 0.0f, float stddev = 1.0f, TF_DataType dtype = TF_DataType.TF_FLOAT, @@ -49,7 +49,7 @@ namespace Tensorflow /// /// /// - public Tensor truncated_normal(TensorShape shape, + public Tensor truncated_normal(Shape shape, float mean = 0.0f, float stddev = 1.0f, TF_DataType dtype = TF_DataType.TF_FLOAT, @@ -63,7 +63,7 @@ namespace Tensorflow string name = null, TF_DataType output_dtype = TF_DataType.DtInvalid) => random_ops.multinomial(logits, num_samples, seed: seed, name: name, output_dtype: output_dtype); - public Tensor uniform(TensorShape shape, + public Tensor uniform(Shape shape, float minval = 0, float maxval = 1, TF_DataType dtype = TF_DataType.TF_FLOAT, @@ -77,7 +77,7 @@ namespace Tensorflow } } - public Tensor random_uniform(TensorShape shape, + public Tensor random_uniform(Shape shape, float minval = 0, float maxval = 1, TF_DataType dtype = TF_DataType.TF_FLOAT, @@ -85,7 +85,7 @@ namespace Tensorflow string name = null) => random.uniform(shape, minval: minval, maxval: maxval, dtype: dtype, seed: seed, name: name); - public Tensor truncated_normal(TensorShape shape, + public Tensor truncated_normal(Shape shape, float mean = 0.0f, float stddev = 1.0f, TF_DataType dtype = TF_DataType.TF_FLOAT, diff --git a/src/TensorFlowNET.Core/APIs/tf.reshape.cs b/src/TensorFlowNET.Core/APIs/tf.reshape.cs index 9702e1dd..cdd5194a 100644 --- a/src/TensorFlowNET.Core/APIs/tf.reshape.cs +++ b/src/TensorFlowNET.Core/APIs/tf.reshape.cs @@ -19,7 +19,7 @@ namespace Tensorflow public partial class tensorflow { public Tensor reshape(Tensor tensor, - TensorShape shape, + Shape shape, string name = null) => gen_array_ops.reshape(tensor, shape, name); diff --git a/src/TensorFlowNET.Core/APIs/tf.sparse.cs b/src/TensorFlowNET.Core/APIs/tf.sparse.cs index 7de02f33..f124f610 100644 --- a/src/TensorFlowNET.Core/APIs/tf.sparse.cs +++ b/src/TensorFlowNET.Core/APIs/tf.sparse.cs @@ -47,7 +47,7 @@ namespace Tensorflow /// /// Dense `Tensor` of shape `output_shape`. Has the same type as `sparse_values`. public Tensor sparse_to_dense(Tensor sparse_indices, - TensorShape output_shape, + Shape output_shape, T sparse_values, T default_value = default, bool validate_indices = true, diff --git a/src/TensorFlowNET.Core/APIs/tf.tile.cs b/src/TensorFlowNET.Core/APIs/tf.tile.cs index 7066ff82..be03e453 100644 --- a/src/TensorFlowNET.Core/APIs/tf.tile.cs +++ b/src/TensorFlowNET.Core/APIs/tf.tile.cs @@ -25,7 +25,7 @@ namespace Tensorflow public Tensor tile(Tensor input, object[] multiples, string name = null) => gen_array_ops.tile(input, multiples, name); - public Tensor tile(Tensor input, TensorShape multiples, string name = null) + public Tensor tile(Tensor input, Shape multiples, string name = null) { var multiples_tensor = constant_op.constant(multiples); return gen_array_ops.tile(input, multiples_tensor, name); diff --git a/src/TensorFlowNET.Core/Binding.Util.cs b/src/TensorFlowNET.Core/Binding.Util.cs index 91888e4b..846efe01 100644 --- a/src/TensorFlowNET.Core/Binding.Util.cs +++ b/src/TensorFlowNET.Core/Binding.Util.cs @@ -168,7 +168,7 @@ namespace Tensorflow return ndArray.ndim == 0 ? 1 : (int)ndArray.dims[0]; case IEnumerable enumerable: return enumerable.OfType().Count(); - case TensorShape arr: + case Shape arr: return arr.ndim; } throw new NotImplementedException("len() not implemented for type: " + a.GetType()); @@ -532,7 +532,6 @@ namespace Tensorflow var type = data.GetType(); switch (data) { - case TensorShape: case Shape: return TF_DataType.TF_INT64; case Axis: diff --git a/src/TensorFlowNET.Core/Binding.cs b/src/TensorFlowNET.Core/Binding.cs index b0ee8de2..a257dd6c 100644 --- a/src/TensorFlowNET.Core/Binding.cs +++ b/src/TensorFlowNET.Core/Binding.cs @@ -8,12 +8,12 @@ namespace Tensorflow /// /// Alias to null, similar to python's None. - /// For TensorShape, please use Unknown + /// For Shape, please use Unknown /// public static readonly object None = null; /// - /// Used for TensorShape None + /// Used for Shape None /// /// public static readonly int Unknown = -1; diff --git a/src/TensorFlowNET.Core/Data/DatasetManager.cs b/src/TensorFlowNET.Core/Data/DatasetManager.cs index a038a533..b5518505 100644 --- a/src/TensorFlowNET.Core/Data/DatasetManager.cs +++ b/src/TensorFlowNET.Core/Data/DatasetManager.cs @@ -6,7 +6,7 @@ namespace Tensorflow { public class DatasetManager { - public IDatasetV2 from_generator(IEnumerable generator, TF_DataType[] output_types, TensorShape[] output_shapes) + public IDatasetV2 from_generator(IEnumerable generator, TF_DataType[] output_types, Shape[] output_shapes) => new GeneratorDataset(); /// diff --git a/src/TensorFlowNET.Core/Data/DatasetV2.cs b/src/TensorFlowNET.Core/Data/DatasetV2.cs index b460501b..103d7cff 100644 --- a/src/TensorFlowNET.Core/Data/DatasetV2.cs +++ b/src/TensorFlowNET.Core/Data/DatasetV2.cs @@ -19,7 +19,7 @@ namespace Tensorflow public TensorSpec[] structure { get; set; } - public TensorShape[] output_shapes => structure.Select(x => x.shape).ToArray(); + public Shape[] output_shapes => structure.Select(x => x.shape).ToArray(); public TF_DataType[] output_types => structure.Select(x => x.dtype).ToArray(); diff --git a/src/TensorFlowNET.Core/Data/IDatasetV2.cs b/src/TensorFlowNET.Core/Data/IDatasetV2.cs index 88d8bcb6..5cfeb27c 100644 --- a/src/TensorFlowNET.Core/Data/IDatasetV2.cs +++ b/src/TensorFlowNET.Core/Data/IDatasetV2.cs @@ -10,7 +10,7 @@ namespace Tensorflow Tensor variant_tensor { get; set; } - TensorShape[] output_shapes { get; } + Shape[] output_shapes { get; } TF_DataType[] output_types { get; } diff --git a/src/TensorFlowNET.Core/Data/OwnedIterator.cs b/src/TensorFlowNET.Core/Data/OwnedIterator.cs index 0a955929..eb91272c 100644 --- a/src/TensorFlowNET.Core/Data/OwnedIterator.cs +++ b/src/TensorFlowNET.Core/Data/OwnedIterator.cs @@ -37,7 +37,7 @@ namespace Tensorflow { var results = ops.iterator_get_next(_iterator_resource, _dataset.output_types, _dataset.output_shapes); foreach(var (i, tensor) in enumerate(results)) - tensor.set_shape(_element_spec[i].shape); + tensor.shape = _element_spec[i].shape; return results; } catch (OutOfRangeError ex) diff --git a/src/TensorFlowNET.Core/Eager/EagerRunner.TFE_FastPathExecute.cs b/src/TensorFlowNET.Core/Eager/EagerRunner.TFE_FastPathExecute.cs index ddf815e3..5a491dd7 100644 --- a/src/TensorFlowNET.Core/Eager/EagerRunner.TFE_FastPathExecute.cs +++ b/src/TensorFlowNET.Core/Eager/EagerRunner.TFE_FastPathExecute.cs @@ -298,7 +298,7 @@ namespace Tensorflow.Eager c_api.TFE_OpSetAttrStringList(op, key, values3, values3.Select(x => Convert.ToUInt64(x.Length)).ToArray(), values3.Length); attr_list_sizes[key] = values3.Length; } - else if (type == TF_AttrType.TF_ATTR_SHAPE && values is TensorShape[] values1) + else if (type == TF_AttrType.TF_ATTR_SHAPE && values is Shape[] values1) { // Make one pass through the input counting the total number of // dims across all the input lists. diff --git a/src/TensorFlowNET.Core/Eager/EagerTensor.ToString.cs b/src/TensorFlowNET.Core/Eager/EagerTensor.ToString.cs index fbb4b6a2..e503c3ca 100644 --- a/src/TensorFlowNET.Core/Eager/EagerTensor.ToString.cs +++ b/src/TensorFlowNET.Core/Eager/EagerTensor.ToString.cs @@ -3,6 +3,6 @@ public partial class EagerTensor { public override string ToString() - => $"tf.Tensor: shape={TensorShape}, dtype={dtype.as_numpy_name()}, numpy={tensor_util.to_numpy_string(this)}"; + => $"tf.Tensor: shape={shape}, dtype={dtype.as_numpy_name()}, numpy={tensor_util.to_numpy_string(this)}"; } } diff --git a/src/TensorFlowNET.Core/Eager/EagerTensor.cs b/src/TensorFlowNET.Core/Eager/EagerTensor.cs index f1ce52a5..addb93de 100644 --- a/src/TensorFlowNET.Core/Eager/EagerTensor.cs +++ b/src/TensorFlowNET.Core/Eager/EagerTensor.cs @@ -17,12 +17,6 @@ namespace Tensorflow.Eager public override int rank => c_api.TFE_TensorHandleNumDims(EagerTensorHandle, tf.Status.Handle); - public override void set_shape(TensorShape shape) - { - if (!shape.is_compatible_with(this.shape)) - throw new ValueError($"Tensor's shape is not compatible."); - } - public static int GetRank(IntPtr handle) { var tfe_tensor_handle = c_api.TFE_EagerTensorHandle(handle); diff --git a/src/TensorFlowNET.Core/Framework/Models/DenseSpec.cs b/src/TensorFlowNET.Core/Framework/Models/DenseSpec.cs index 37747cca..1af29e22 100644 --- a/src/TensorFlowNET.Core/Framework/Models/DenseSpec.cs +++ b/src/TensorFlowNET.Core/Framework/Models/DenseSpec.cs @@ -5,8 +5,8 @@ /// public class DenseSpec : TypeSpec { - protected TensorShape _shape; - public TensorShape shape => _shape; + protected Shape _shape; + public Shape shape => _shape; protected TF_DataType _dtype; public TF_DataType dtype => _dtype; @@ -14,7 +14,7 @@ protected string _name; public string name => _name; - public DenseSpec(TensorShape shape, TF_DataType dtype = TF_DataType.TF_FLOAT, string name = null) + public DenseSpec(Shape shape, TF_DataType dtype = TF_DataType.TF_FLOAT, string name = null) { _shape = shape; _dtype = dtype; diff --git a/src/TensorFlowNET.Core/Framework/Models/TensorSpec.cs b/src/TensorFlowNET.Core/Framework/Models/TensorSpec.cs index 5f333547..b6a279db 100644 --- a/src/TensorFlowNET.Core/Framework/Models/TensorSpec.cs +++ b/src/TensorFlowNET.Core/Framework/Models/TensorSpec.cs @@ -4,7 +4,7 @@ namespace Tensorflow.Framework.Models { public class TensorSpec : DenseSpec { - public TensorSpec(TensorShape shape, TF_DataType dtype = TF_DataType.TF_FLOAT, string name = null) : + public TensorSpec(Shape shape, TF_DataType dtype = TF_DataType.TF_FLOAT, string name = null) : base(shape, dtype, name) { diff --git a/src/TensorFlowNET.Core/Framework/common_shapes.py.cs b/src/TensorFlowNET.Core/Framework/common_shapes.py.cs index b067cf95..9bb793da 100644 --- a/src/TensorFlowNET.Core/Framework/common_shapes.py.cs +++ b/src/TensorFlowNET.Core/Framework/common_shapes.py.cs @@ -34,8 +34,8 @@ namespace Tensorflow.Framework /// /// Helper functions for is_broadcast_compatible and broadcast_shape. /// - /// A `TensorShape` - /// A `TensorShape` + /// A `Shape` + /// A `Shape` /// Returns None if the shapes are not broadcast compatible, /// a list of the broadcast dimensions otherwise. /// @@ -51,7 +51,7 @@ namespace Tensorflow.Framework public static bool has_fully_defined_shape(Tensor tensor) { - return tensor.TensorShape.is_fully_defined(); + return tensor.shape.IsFullyDefined; } } } diff --git a/src/TensorFlowNET.Core/Framework/tensor_shape.cs b/src/TensorFlowNET.Core/Framework/tensor_shape.cs index c88fb876..2ad1a11b 100644 --- a/src/TensorFlowNET.Core/Framework/tensor_shape.cs +++ b/src/TensorFlowNET.Core/Framework/tensor_shape.cs @@ -28,7 +28,7 @@ namespace Tensorflow.Framework { bool _shape_is_compatible_0dim(Shape _this, Shape _other) { - var __other = tensor_shape.as_shape(_other); + var __other = _other; if (_this.dims == null || __other.dims == null) return true; @@ -54,9 +54,9 @@ namespace Tensorflow.Framework !self.IsSparseTensor; } - public static Dimension dimension_at_index(TensorShape shape, int index) + public static Dimension dimension_at_index(Shape shape, int index) { - return shape.rank < 0 ? + return shape.ndim < 0 ? new Dimension(-1) : new Dimension(shape.dims[index]); } @@ -64,19 +64,16 @@ namespace Tensorflow.Framework public static int dimension_value(Dimension dimension) => (int)dimension.value; - public static TensorShape as_shape(this Shape shape) - => new TensorShape(shape.dims); - - public static TensorShape most_specific_compatible_shape(this TensorShape self, TensorShape other) + public static Shape most_specific_compatible_shape(this Shape self, Shape other) { - var dims = range(self.rank).Select(x => -1L).ToArray(); + var dims = range(self.ndim).Select(x => -1L).ToArray(); foreach(var (i, (d1, d2)) in enumerate(zip(self.dims, other.dims))) { if (d1 == d2) dims[i] = d1; } - return new TensorShape(dims); + return new Shape(dims); } } } diff --git a/src/TensorFlowNET.Core/Functions/ConcreteFunction.cs b/src/TensorFlowNET.Core/Functions/ConcreteFunction.cs index c1f9788c..affc0b61 100644 --- a/src/TensorFlowNET.Core/Functions/ConcreteFunction.cs +++ b/src/TensorFlowNET.Core/Functions/ConcreteFunction.cs @@ -72,7 +72,7 @@ namespace Tensorflow.Functions } /*public ConcreteFunction(Func func, - TF_DataType[] dtypes, TensorShape[] shapes) + TF_DataType[] dtypes, Shape[] shapes) { string func_name = $"{func.Method.Name}_{ops.uid_function()}"; diff --git a/src/TensorFlowNET.Core/Gradients/TapeTensor.cs b/src/TensorFlowNET.Core/Gradients/TapeTensor.cs index be030321..fe24e1d1 100644 --- a/src/TensorFlowNET.Core/Gradients/TapeTensor.cs +++ b/src/TensorFlowNET.Core/Gradients/TapeTensor.cs @@ -6,9 +6,9 @@ namespace Tensorflow.Gradients { long id; TF_DataType dtype; - TensorShape shape; + Shape shape; - public TapeTensor(long id, TF_DataType dtype, TensorShape shape) + public TapeTensor(long id, TF_DataType dtype, Shape shape) { this.id = id; this.dtype = dtype; diff --git a/src/TensorFlowNET.Core/Gradients/gradients_util.cs b/src/TensorFlowNET.Core/Gradients/gradients_util.cs index 1f401a7f..f4c714ee 100644 --- a/src/TensorFlowNET.Core/Gradients/gradients_util.cs +++ b/src/TensorFlowNET.Core/Gradients/gradients_util.cs @@ -234,7 +234,7 @@ namespace Tensorflow in_grad.Tag == null && // maybe a IndexedSlice t_in.dtype != TF_DataType.TF_RESOURCE) { - in_grad.set_shape(t_in.TensorShape); + in_grad.shape = t_in.shape; } _SetGrad(grads, t_in, in_grad); diff --git a/src/TensorFlowNET.Core/Gradients/image_grad.cs b/src/TensorFlowNET.Core/Gradients/image_grad.cs index fd7f098f..3c26f389 100644 --- a/src/TensorFlowNET.Core/Gradients/image_grad.cs +++ b/src/TensorFlowNET.Core/Gradients/image_grad.cs @@ -27,9 +27,9 @@ namespace Tensorflow.Gradients { var grad = grads[0]; var image = op.inputs[0]; - var shape = new TensorShape(image.shape.dims.Skip(1).Take(2).ToArray()); + var shape = new Shape(image.shape.dims.Skip(1).Take(2).ToArray()); Tensor image_shape = null; - if (shape.is_fully_defined()) + if (shape.IsFullyDefined) image_shape = constant_op.constant(image.shape.dims.Skip(1).Take(2).ToArray()); else image_shape = array_ops.shape(image)["1:3"]; diff --git a/src/TensorFlowNET.Core/Gradients/math_grad.cs b/src/TensorFlowNET.Core/Gradients/math_grad.cs index 4eb1087e..d927b85e 100644 --- a/src/TensorFlowNET.Core/Gradients/math_grad.cs +++ b/src/TensorFlowNET.Core/Gradients/math_grad.cs @@ -810,8 +810,8 @@ namespace Tensorflow.Gradients private static (Tensor, Tensor, bool)[] SmartBroadcastGradientArgs(Tensor x, Tensor y, Tensor grad) { Tensor sx, sy; - if (x.TensorShape.is_fully_defined() && - y.TensorShape.is_fully_defined()) + if (x.shape.IsFullyDefined && + y.shape.IsFullyDefined) { sx = array_ops.shape(x); sy = array_ops.shape(y); @@ -825,8 +825,8 @@ namespace Tensorflow.Gradients var (rx, ry) = gen_array_ops.broadcast_gradient_args(sx, sy); return new[] { - (sx, rx, !x.TensorShape.Equals(grad.TensorShape)), - (sy, ry, !y.TensorShape.Equals(grad.TensorShape)) + (sx, rx, !x.shape.Equals(grad.shape)), + (sy, ry, !y.shape.Equals(grad.shape)) }; } } diff --git a/src/TensorFlowNET.Core/Gradients/nn_grad.cs b/src/TensorFlowNET.Core/Gradients/nn_grad.cs index 4c316ad3..a6113944 100644 --- a/src/TensorFlowNET.Core/Gradients/nn_grad.cs +++ b/src/TensorFlowNET.Core/Gradients/nn_grad.cs @@ -17,6 +17,7 @@ using System; using System.Linq; using Tensorflow.Operations; +using static Tensorflow.Binding; namespace Tensorflow.Gradients { @@ -323,7 +324,7 @@ namespace Tensorflow.Gradients // Compute linear indices(flattened to 1D). var cast1 = math_ops.cast(outerdim, TF_DataType.TF_INT64); - var range2 = math_ops.range(0L, cast1 * in_lastdim, in_lastdim); + var range2 = math_ops.range(tf.constant(0L), cast1 * in_lastdim, in_lastdim); var dim2 = array_ops.expand_dims(range2, -1); var cast2 = math_ops.cast(dim2, TF_DataType.TF_INT32); var ind = array_ops.reshape(ind_2d + cast2, new int[] { -1 }); diff --git a/src/TensorFlowNET.Core/Gradients/ops.gradient_function_mapping.cs b/src/TensorFlowNET.Core/Gradients/ops.gradient_function_mapping.cs index 6de42037..a64713ae 100644 --- a/src/TensorFlowNET.Core/Gradients/ops.gradient_function_mapping.cs +++ b/src/TensorFlowNET.Core/Gradients/ops.gradient_function_mapping.cs @@ -56,7 +56,7 @@ namespace Tensorflow null, args: new object[] { oper, out_grads }) as Tensor[]; foreach (var result in results.Where(x => x != null)) - tf.Logger.Debug($"Gradient: {result.name} {result.TensorShape}"); + tf.Logger.Debug($"Gradient: {result.name} {result.shape}"); return results; } ); diff --git a/src/TensorFlowNET.Core/Graphs/AutoGraphAttribute.cs b/src/TensorFlowNET.Core/Graphs/AutoGraphAttribute.cs index 133c29a4..31cc9c0b 100644 --- a/src/TensorFlowNET.Core/Graphs/AutoGraphAttribute.cs +++ b/src/TensorFlowNET.Core/Graphs/AutoGraphAttribute.cs @@ -44,7 +44,7 @@ namespace Tensorflow.Graphs if (args.Arguments[0] is Tensors inputs) { originalInputs = inputs; - var new_inputs = inputs.Select(x => tf.placeholder(x.dtype, shape: x.TensorShape, name: "inputs")).ToArray(); + var new_inputs = inputs.Select(x => tf.placeholder(x.dtype, shape: x.shape, name: "inputs")).ToArray(); args.Arguments[0] = new Tensors(new_inputs); } else @@ -56,7 +56,7 @@ namespace Tensorflow.Graphs if (args.Arguments[i] is EagerTensor tensor) { originalInputs.Add(tensor); - args.Arguments[i] = tf.placeholder(tensor.dtype, shape: tensor.TensorShape, name: "inputs"); + args.Arguments[i] = tf.placeholder(tensor.dtype, shape: tensor.shape, name: "inputs"); } } } diff --git a/src/TensorFlowNET.Core/Graphs/FuncGraph.cs b/src/TensorFlowNET.Core/Graphs/FuncGraph.cs index e2db2ca2..33207892 100644 --- a/src/TensorFlowNET.Core/Graphs/FuncGraph.cs +++ b/src/TensorFlowNET.Core/Graphs/FuncGraph.cs @@ -103,7 +103,7 @@ namespace Tensorflow.Graphs } const int _EAGER_CONST_THRESHOLD = 128; - public Tensor capture(Tensor tensor, string name = null, TensorShape shape = null) + public Tensor capture(Tensor tensor, string name = null, Shape shape = null) { if(tensor is EagerTensor) { @@ -167,7 +167,7 @@ namespace Tensorflow.Graphs return graph_const; } - Tensor _capture_helper(Tensor tensor, string name, TensorShape shape = null) + Tensor _capture_helper(Tensor tensor, string name, Shape shape = null) { Tensor placeholder = null; if (!_captures.ContainsKey(tensor.Id)) @@ -206,7 +206,7 @@ namespace Tensorflow.Graphs Tensor _create_substitute_placeholder(Tensor value, string name = null, TF_DataType dtype = TF_DataType.DtInvalid, - TensorShape shape = null) + Shape shape = null) { if (shape is null) shape = value.shape; diff --git a/src/TensorFlowNET.Core/Graphs/Graph.cs b/src/TensorFlowNET.Core/Graphs/Graph.cs index 85e8fd94..ff05aad9 100644 --- a/src/TensorFlowNET.Core/Graphs/Graph.cs +++ b/src/TensorFlowNET.Core/Graphs/Graph.cs @@ -515,20 +515,20 @@ namespace Tensorflow return (Tensor)this.as_graph_element(name, allow_tensor: true, allow_operation: false); } - public TensorShape GetTensorShape(TF_Output output) + public Shape GetTensorShape(TF_Output output) { var status = tf.Status; var ndim = c_api.TF_GraphGetTensorNumDims(_handle, output, status.Handle); status.Check(); if (ndim == -1) - return new TensorShape(); + return Shape.Null; var dims = new long[ndim]; c_api.TF_GraphGetTensorShape(_handle, output, dims, dims.Length, status.Handle); status.Check(); - return new TensorShape(dims.Select(x => (int)x).ToArray()); + return new Shape(dims.Select(x => (int)x).ToArray()); } public virtual void Exit() diff --git a/src/TensorFlowNET.Core/Keras/ArgsDefinition/Convolution/ConvolutionalArgs.cs b/src/TensorFlowNET.Core/Keras/ArgsDefinition/Convolution/ConvolutionalArgs.cs index d70b7d67..4f050228 100644 --- a/src/TensorFlowNET.Core/Keras/ArgsDefinition/Convolution/ConvolutionalArgs.cs +++ b/src/TensorFlowNET.Core/Keras/ArgsDefinition/Convolution/ConvolutionalArgs.cs @@ -8,16 +8,16 @@ namespace Tensorflow.Keras.ArgsDefinition public int Rank { get; set; } = 2; public int Filters { get; set; } public int NumSpatialDims { get; set; } = Unknown; - public TensorShape KernelSize { get; set; } = 5; + public Shape KernelSize { get; set; } = 5; /// /// specifying the stride length of the convolution. /// - public TensorShape Strides { get; set; } = (1, 1); + public Shape Strides { get; set; } = (1, 1); public string Padding { get; set; } = "valid"; public string DataFormat { get; set; } - public TensorShape DilationRate { get; set; } = (1, 1); + public Shape DilationRate { get; set; } = (1, 1); public int Groups { get; set; } = 1; public Activation Activation { get; set; } public bool UseBias { get; set; } diff --git a/src/TensorFlowNET.Core/Keras/ArgsDefinition/LayerArgs.cs b/src/TensorFlowNET.Core/Keras/ArgsDefinition/LayerArgs.cs index f86eca12..4df4fb2b 100644 --- a/src/TensorFlowNET.Core/Keras/ArgsDefinition/LayerArgs.cs +++ b/src/TensorFlowNET.Core/Keras/ArgsDefinition/LayerArgs.cs @@ -25,12 +25,12 @@ /// /// Only applicable to input layers. /// - public TensorShape InputShape { get; set; } + public Shape InputShape { get; set; } /// /// Only applicable to input layers. /// - public TensorShape BatchInputShape { get; set; } + public Shape BatchInputShape { get; set; } public int BatchSize { get; set; } = -1; diff --git a/src/TensorFlowNET.Core/Keras/ArgsDefinition/Normalization/BatchNormalizationArgs.cs b/src/TensorFlowNET.Core/Keras/ArgsDefinition/Normalization/BatchNormalizationArgs.cs index 56a69830..954ede57 100644 --- a/src/TensorFlowNET.Core/Keras/ArgsDefinition/Normalization/BatchNormalizationArgs.cs +++ b/src/TensorFlowNET.Core/Keras/ArgsDefinition/Normalization/BatchNormalizationArgs.cs @@ -4,7 +4,7 @@ namespace Tensorflow.Keras.ArgsDefinition { public class BatchNormalizationArgs : LayerArgs { - public TensorShape Axis { get; set; } = -1; + public Shape Axis { get; set; } = -1; public float Momentum { get; set; } = 0.99f; public float Epsilon { get; set; } = 1e-3f; public bool Center { get; set; } = true; diff --git a/src/TensorFlowNET.Core/Keras/ArgsDefinition/Pooling/Pooling2DArgs.cs b/src/TensorFlowNET.Core/Keras/ArgsDefinition/Pooling/Pooling2DArgs.cs index 9fafb658..1260af4c 100644 --- a/src/TensorFlowNET.Core/Keras/ArgsDefinition/Pooling/Pooling2DArgs.cs +++ b/src/TensorFlowNET.Core/Keras/ArgsDefinition/Pooling/Pooling2DArgs.cs @@ -10,12 +10,12 @@ /// /// specifying the size of the pooling window. /// - public TensorShape PoolSize { get; set; } + public Shape PoolSize { get; set; } /// /// specifying the strides of the pooling operation. /// - public TensorShape Strides { get; set; } + public Shape Strides { get; set; } /// /// The padding method, either 'valid' or 'same'. diff --git a/src/TensorFlowNET.Core/Keras/ArgsDefinition/Regularization/DropoutArgs.cs b/src/TensorFlowNET.Core/Keras/ArgsDefinition/Regularization/DropoutArgs.cs index 186281ea..c41c6fe8 100644 --- a/src/TensorFlowNET.Core/Keras/ArgsDefinition/Regularization/DropoutArgs.cs +++ b/src/TensorFlowNET.Core/Keras/ArgsDefinition/Regularization/DropoutArgs.cs @@ -11,7 +11,7 @@ /// 1D integer tensor representing the shape of the /// binary dropout mask that will be multiplied with the input. /// - public TensorShape NoiseShape { get; set; } + public Shape NoiseShape { get; set; } /// /// random seed. diff --git a/src/TensorFlowNET.Core/Keras/ArgsDefinition/Reshaping/ReshapeArgs.cs b/src/TensorFlowNET.Core/Keras/ArgsDefinition/Reshaping/ReshapeArgs.cs index 0a260b74..77bca8ad 100644 --- a/src/TensorFlowNET.Core/Keras/ArgsDefinition/Reshaping/ReshapeArgs.cs +++ b/src/TensorFlowNET.Core/Keras/ArgsDefinition/Reshaping/ReshapeArgs.cs @@ -2,7 +2,7 @@ { public class ReshapeArgs : LayerArgs { - public TensorShape TargetShape { get; set; } + public Shape TargetShape { get; set; } public object[] TargetShapeObjects { get; set; } } } diff --git a/src/TensorFlowNET.Core/Keras/ArgsDefinition/Reshaping/UpSampling2DArgs.cs b/src/TensorFlowNET.Core/Keras/ArgsDefinition/Reshaping/UpSampling2DArgs.cs index 049010ad..7fdda32d 100644 --- a/src/TensorFlowNET.Core/Keras/ArgsDefinition/Reshaping/UpSampling2DArgs.cs +++ b/src/TensorFlowNET.Core/Keras/ArgsDefinition/Reshaping/UpSampling2DArgs.cs @@ -2,7 +2,7 @@ { public class UpSampling2DArgs : LayerArgs { - public TensorShape Size { get; set; } + public Shape Size { get; set; } public string DataFormat { get; set; } /// /// 'nearest', 'bilinear' diff --git a/src/TensorFlowNET.Core/Keras/Engine/InputSpec.cs b/src/TensorFlowNET.Core/Keras/Engine/InputSpec.cs index 198e8162..7280594b 100644 --- a/src/TensorFlowNET.Core/Keras/Engine/InputSpec.cs +++ b/src/TensorFlowNET.Core/Keras/Engine/InputSpec.cs @@ -27,14 +27,14 @@ namespace Tensorflow.Keras.Engine public int? ndim; public int? min_ndim; Dictionary axes; - TensorShape shape; + Shape shape; public int[] AllAxisDim; public InputSpec(TF_DataType dtype = TF_DataType.DtInvalid, int? ndim = null, int? min_ndim = null, Dictionary axes = null, - TensorShape shape = null) + Shape shape = null) { this.ndim = ndim; if (axes == null) diff --git a/src/TensorFlowNET.Core/Keras/Layers/ILayer.cs b/src/TensorFlowNET.Core/Keras/Layers/ILayer.cs index 4e23b11a..271fece0 100644 --- a/src/TensorFlowNET.Core/Keras/Layers/ILayer.cs +++ b/src/TensorFlowNET.Core/Keras/Layers/ILayer.cs @@ -16,8 +16,8 @@ namespace Tensorflow.Keras List trainable_variables { get; } List trainable_weights { get; } List non_trainable_weights { get; } - TensorShape output_shape { get; } - TensorShape BatchInputShape { get; } + Shape output_shape { get; } + Shape BatchInputShape { get; } TF_DataType DType { get; } int count_params(); LayerArgs get_config(); diff --git a/src/TensorFlowNET.Core/NumPy/Implementation/NumPyImpl.Creation.cs b/src/TensorFlowNET.Core/NumPy/Implementation/NumPyImpl.Creation.cs index 6ad41ff5..32007ea5 100644 --- a/src/TensorFlowNET.Core/NumPy/Implementation/NumPyImpl.Creation.cs +++ b/src/TensorFlowNET.Core/NumPy/Implementation/NumPyImpl.Creation.cs @@ -28,7 +28,7 @@ namespace Tensorflow.NumPy diag_len = N + k; } - var diagonal_ = array_ops.ones(new TensorShape(diag_len), dtype: dtype); + var diagonal_ = array_ops.ones(new Shape(diag_len), dtype: dtype); var tensor = array_ops.matrix_diag(diagonal: diagonal_, num_rows: N, num_cols: M.Value, k: k); return new NDArray(tensor); } @@ -38,13 +38,13 @@ namespace Tensorflow.NumPy { var start_tensor = array_ops.constant(start, dtype: dtype); var stop_tensor = array_ops.constant(stop, dtype: dtype); - var num_tensor = array_ops.constant(num); + // var step_tensor = array_ops.constant(np.nan); Tensor result = null; if (endpoint) { - result = math_ops.linspace(start_tensor, stop_tensor, num_tensor, axis: axis); + result = math_ops.linspace(start_tensor, stop_tensor, num, axis: axis); } else { @@ -53,10 +53,10 @@ namespace Tensorflow.NumPy var step = (stop_tensor - start_tensor) / num; var new_stop = math_ops.cast(stop_tensor, step.dtype) - step; start_tensor = math_ops.cast(start_tensor, new_stop.dtype); - result = math_ops.linspace(start_tensor, new_stop, num_tensor, axis: axis); + result = math_ops.linspace(start_tensor, new_stop, num, axis: axis); } else - result = math_ops.linspace(start_tensor, stop_tensor, num_tensor, axis: axis); + result = math_ops.linspace(start_tensor, stop_tensor, num, axis: axis); } return new NDArray(result); diff --git a/src/TensorFlowNET.Core/Numpy/Shape.cs b/src/TensorFlowNET.Core/Numpy/Shape.cs index c0b6048d..936d06fe 100644 --- a/src/TensorFlowNET.Core/Numpy/Shape.cs +++ b/src/TensorFlowNET.Core/Numpy/Shape.cs @@ -7,16 +7,27 @@ namespace Tensorflow { public class Shape { - public int ndim => _dims.Length; + public int ndim => _dims == null ? -1 : _dims.Length; long[] _dims; public long[] dims => _dims; - public Shape() + private Shape() { } + public Shape(TensorShapeProto proto) + { + _dims = proto.Dim.Select(x => x.Size).ToArray(); + } + + public void Deconstruct(out long h, out long w) + { + h = dims[0]; + w = dims[1]; + } + public Shape(params int[] dims) - => _dims = dims.Select(x => Convert.ToInt64(x)).ToArray(); + => _dims = dims?.Select(x => Convert.ToInt64(x))?.ToArray(); public Shape(params long[] dims) => _dims = dims; @@ -25,10 +36,10 @@ namespace Tensorflow => new Shape(dims); public static implicit operator Shape(long[] dims) - => new Shape(dims); + => dims == null ? null : new Shape(dims); public static implicit operator Shape(int[] dims) - => new Shape(dims); + => dims == null ? null : new Shape(dims); public static implicit operator Shape((int, int) dims) => new Shape(dims.Item1, dims.Item2); @@ -57,16 +68,39 @@ namespace Tensorflow public bool IsEmpty => size == 0; public bool IsScalar => ndim == 0; + public bool IsNull => _dims == null; + + public bool IsFullyDefined => ndim > -1 && dims.Count(x => x < 1) == 0; + + public static Shape Scalar => new Shape(new long[0]); + public static Shape Null => new Shape(); + + public long this[int n] + { + get => dims[n]; + set => dims[n] = value; + } + + public Shape this[Slice slice] + { + get + { + if (!slice.Stop.HasValue) + slice.Stop = dims.Length - slice.Start + 1; - public static Shape Scalar - => new Shape(new long[0]); + if (slice.Start.HasValue == false || slice.Length.HasValue == false) + throw new ArgumentException("Slice must has Start and Length."); - public long this[int n] => dims[n]; + return new Shape(dims.Skip(slice.Start.Value) + .Take(slice.Length.Value) + .ToArray()); + } + } /// /// Returns the size this shape represents. /// - public ulong size + public long size { get { @@ -85,44 +119,133 @@ namespace Tensorflow computed *= val; } - return (ulong)computed; + return computed; } } - public bool is_fully_defined() - { - return ndim > -1 && dims != null && dims.Count(x => x < 1) == 0; - } - - public bool is_compatible_with(TensorShape shape2) + public bool is_compatible_with(Shape shape2) { if (dims != null && shape2.dims != null) { if (dims.Contains(-1) || shape2.dims.Contains(-1)) return true; - if (size != (ulong)shape2.size) + if (size != shape2.size) return false; } return true; } + public Shape with_rank_at_least(int rank) + { + if (ndim < rank) + throw new ValueError($"Shape {this} must have rank at least {rank}"); + else + return this; + } + + public Shape with_rank(int rank) + { + return merge_with(unknown_shape(rank: rank)); + } + + /// + /// Returns an unknown Shape, optionally with a known rank. + /// + /// + /// + public Shape unknown_shape(int rank = -1) + { + if (rank == -1) + return Shape.Null; + else + return new Shape(Enumerable.Repeat(-1L, rank).ToArray()); + } + + public Shape concatenate(long[] other) + { + return concatenate(new Shape(other)); + } + + /// + /// Returns the concatenation of the dimension in `self` and `other`. + /// + /// + /// + public Shape concatenate(Shape other) + { + var otherShape = other; + + if (ndim < 0 || otherShape.ndim < 0) + return Shape.Null; + else + { + var concatenate_dims = new long[ndim + otherShape.ndim]; + for (int i = 0; i < ndim; i++) + concatenate_dims[i] = dims[i]; + + for (int i = 0; i < otherShape.ndim; i++) + concatenate_dims[ndim + i] = otherShape.dims[i]; + + return new Shape(concatenate_dims); + } + } + + /// + /// Returns a `Shape` combining the information in `self` and `other`. + /// + /// + /// + public Shape merge_with(Shape other) + { + if (dims == null) + return other; + + var new_dims = new List(); + + foreach (var i in Enumerable.Range(0, ndim)) + { + var dim = new Dimension(dims[i]); + var merged = dim.merge_with(new Dimension(other.dims[i])); + new_dims.Add(merged.value); + } + + return new Shape(new_dims.ToArray()); + } + + public void assert_has_rank(int rank) + { + if (rank != ndim) + throw new ValueError(String.Format("Shape {0} must have rank {1}", ndim, rank)); + } + public override bool Equals(object obj) { - if(obj is Shape shape) + switch (obj) { - if (shape.ndim != ndim) + case Shape shape1: + if (ndim == -1 && shape1.ndim == -1) + return false; + else if (ndim != shape1.ndim) + return false; + return Enumerable.SequenceEqual(shape1.dims, dims); + case long[] shape2: + if (ndim != shape2.Length) + return false; + return Enumerable.SequenceEqual(dims, shape2); + default: return false; - if (Enumerable.SequenceEqual(dims, shape.dims)) - return true; } - return base.Equals(obj); } public override string ToString() - { - return "(" + string.Join(", ", _dims) + ")"; - } + => ndim switch + { + -1 => "", + 0 => "()", + 1 => $"({dims[0]},)", + _ => $"{string.Join(", ", _dims).Replace("-1", "None")}" + }; } } diff --git a/src/TensorFlowNET.Core/Operations/ControlFlows/ControlFlowState.cs b/src/TensorFlowNET.Core/Operations/ControlFlows/ControlFlowState.cs index da621999..a6390c79 100644 --- a/src/TensorFlowNET.Core/Operations/ControlFlows/ControlFlowState.cs +++ b/src/TensorFlowNET.Core/Operations/ControlFlows/ControlFlowState.cs @@ -261,7 +261,7 @@ namespace Tensorflow.Operations.ControlFlows public Tensor ZerosLikeForExit(Tensor val) { Tensor result = null; - var val_shape = val.TensorShape; + var val_shape = val.shape; var forward_ctxt = val.op._get_control_flow_context(); var outer_forward_ctxt = forward_ctxt.outer_context; if (outer_forward_ctxt != null) @@ -278,7 +278,7 @@ namespace Tensorflow.Operations.ControlFlows { // If the shape is known statically, just create a zero tensor // with the right shape. - if (val_shape.is_fully_defined()) + if (val_shape.IsFullyDefined) result = array_ops.zeros(val_shape.dims, val.dtype); else result = array_ops.zeros_like(val, optimize: false); @@ -299,8 +299,8 @@ namespace Tensorflow.Operations.ControlFlows // depend on its value at iteration i. So use zeros as the // gradients for all iterations > 0. var dtype = b_merge.op.inputs[0].dtype; - var shape = b_merge.op.inputs[0].TensorShape; - if (shape.is_fully_defined()) + var shape = b_merge.op.inputs[0].shape; + if (shape.IsFullyDefined) { grad_state.grad_context.Enter(); // Create a zeros and use it for iterations > 0. diff --git a/src/TensorFlowNET.Core/Operations/ControlFlows/GradLoopState.cs b/src/TensorFlowNET.Core/Operations/ControlFlows/GradLoopState.cs index 5b7bb919..a807bdb5 100644 --- a/src/TensorFlowNET.Core/Operations/ControlFlows/GradLoopState.cs +++ b/src/TensorFlowNET.Core/Operations/ControlFlows/GradLoopState.cs @@ -258,7 +258,7 @@ namespace Tensorflow.Operations.ControlFlows throw new NotImplementedException("AddBackpropAccumulatedValue"); } pop = gen_data_flow_ops.stack_pop_v2(history_value, value.dtype.as_base_dtype()); - pop.set_shape(value.TensorShape); + pop.shape = value.shape; grad_context.Exit(); }); var parallel_iterations = grad_context.parallel_iterations; diff --git a/src/TensorFlowNET.Core/Operations/ControlFlows/WhileContext.cs b/src/TensorFlowNET.Core/Operations/ControlFlows/WhileContext.cs index 8b584963..8bd430a8 100644 --- a/src/TensorFlowNET.Core/Operations/ControlFlows/WhileContext.cs +++ b/src/TensorFlowNET.Core/Operations/ControlFlows/WhileContext.cs @@ -117,7 +117,7 @@ namespace Tensorflow.Operations internal LoopVar BuildLoop(Func, Tensor> pred, Func, LoopVar> body, LoopVar loop_vars, - TensorShape[] shape_invariants, + Shape[] shape_invariants, bool return_same_structure) where TItem : IFromMergeVars, new() { // Keep original_loop_vars to identify which are TensorArrays @@ -159,9 +159,9 @@ namespace Tensorflow.Operations throw new NotImplementedException("_convert_tensorarray_to_flow"); } - private TensorShape _get_shape_invariant(Tensor var, int[] shape = null) + private Shape _get_shape_invariant(Tensor var, int[] shape = null) { - return var.TensorShape; + return var.shape; } /// @@ -178,7 +178,7 @@ namespace Tensorflow.Operations Func, LoopVar> body, LoopVar original_loop_vars, Tensor[] loop_vars, - TensorShape[] shape_invariants) where TItem : IFromMergeVars, new() + Shape[] shape_invariants) where TItem : IFromMergeVars, new() { var flat_loop_vars = nest.flatten2(original_loop_vars) .Select(x => (ITensorOrTensorArray)x) @@ -459,8 +459,8 @@ namespace Tensorflow.Operations // dynamically from the forward inference. Getting the shape right // for the zeros is only needed for the base case when the loop exits // without running any iterations. - var shape = grad.TensorShape; - if (shape.is_fully_defined()) + var shape = grad.shape; + if (shape.IsFullyDefined) { if (outer_context != null) outer_context.Enter(); diff --git a/src/TensorFlowNET.Core/Operations/Initializers/InitializerArgs.cs b/src/TensorFlowNET.Core/Operations/Initializers/InitializerArgs.cs index 756f33a7..9df8b5bd 100644 --- a/src/TensorFlowNET.Core/Operations/Initializers/InitializerArgs.cs +++ b/src/TensorFlowNET.Core/Operations/Initializers/InitializerArgs.cs @@ -3,11 +3,11 @@ public class InitializerArgs { public string Name { get; set; } - public TensorShape Shape { get; set; } + public Shape Shape { get; set; } public TF_DataType DType { get; set; } public bool VerifyShape { get; set; } - public InitializerArgs(TensorShape shape, + public InitializerArgs(Shape shape, TF_DataType dtype = TF_DataType.DtInvalid, bool verify_shape = false, string name = null) diff --git a/src/TensorFlowNET.Core/Operations/Initializers/Zeros.cs b/src/TensorFlowNET.Core/Operations/Initializers/Zeros.cs index a4de9508..5d045292 100644 --- a/src/TensorFlowNET.Core/Operations/Initializers/Zeros.cs +++ b/src/TensorFlowNET.Core/Operations/Initializers/Zeros.cs @@ -18,10 +18,10 @@ namespace Tensorflow.Operations.Initializers { public class Zeros : IInitializer { - TensorShape shape; + Shape shape; TF_DataType dtype; - public Zeros(TensorShape shape = null, TF_DataType dtype = TF_DataType.TF_FLOAT) + public Zeros(Shape shape = null, TF_DataType dtype = TF_DataType.TF_FLOAT) { this.shape = shape; this.dtype = dtype; diff --git a/src/TensorFlowNET.Core/Operations/Losses/losses_impl.py.cs b/src/TensorFlowNET.Core/Operations/Losses/losses_impl.py.cs index facb0aac..a412f07e 100644 --- a/src/TensorFlowNET.Core/Operations/Losses/losses_impl.py.cs +++ b/src/TensorFlowNET.Core/Operations/Losses/losses_impl.py.cs @@ -135,8 +135,8 @@ namespace Tensorflow if (weights > 0) { var weights_tensor = ops.convert_to_tensor(weights); - var labels_rank = labels.TensorShape.ndim; - var weights_shape = weights_tensor.TensorShape; + var labels_rank = labels.shape.ndim; + var weights_shape = weights_tensor.shape; var weights_rank = weights_shape.ndim; if (labels_rank > -1 && weights_rank > -1) diff --git a/src/TensorFlowNET.Core/Operations/NnOps/BasicLSTMCell.cs b/src/TensorFlowNET.Core/Operations/NnOps/BasicLSTMCell.cs index 4641bd87..d3592514 100644 --- a/src/TensorFlowNET.Core/Operations/NnOps/BasicLSTMCell.cs +++ b/src/TensorFlowNET.Core/Operations/NnOps/BasicLSTMCell.cs @@ -46,7 +46,7 @@ namespace Tensorflow _activation = tf.nn.tanh(); } - protected override void build(TensorShape input_shape) + protected override void build(Shape input_shape) { var input_depth = input_shape.dims.Last(); var h_depth = _num_units; diff --git a/src/TensorFlowNET.Core/Operations/NnOps/BasicRNNCell.cs b/src/TensorFlowNET.Core/Operations/NnOps/BasicRNNCell.cs index 1dc1db30..17d51363 100644 --- a/src/TensorFlowNET.Core/Operations/NnOps/BasicRNNCell.cs +++ b/src/TensorFlowNET.Core/Operations/NnOps/BasicRNNCell.cs @@ -50,7 +50,7 @@ namespace Tensorflow _activation = activation; } - protected override void build(TensorShape inputs_shape) + protected override void build(Shape inputs_shape) { var input_depth = inputs_shape.dims[inputs_shape.ndim - 1]; diff --git a/src/TensorFlowNET.Core/Operations/NnOps/ConvolutionInternal.cs b/src/TensorFlowNET.Core/Operations/NnOps/ConvolutionInternal.cs index 67701430..0e041836 100644 --- a/src/TensorFlowNET.Core/Operations/NnOps/ConvolutionInternal.cs +++ b/src/TensorFlowNET.Core/Operations/NnOps/ConvolutionInternal.cs @@ -38,8 +38,8 @@ namespace Tensorflow.Operations public Tensor Apply(Tensors input, IVariableV1 filters) { - var filters_rank = filters.shape.rank; - var inputs_rank = input.shape.rank; + var filters_rank = filters.shape.ndim; + var inputs_rank = input.shape.ndim; var num_spatial_dims = args.NumSpatialDims; if (args.Rank == 1) { diff --git a/src/TensorFlowNET.Core/Operations/NnOps/LayerRNNCell.cs b/src/TensorFlowNET.Core/Operations/NnOps/LayerRNNCell.cs index 49bef229..7394cb7f 100644 --- a/src/TensorFlowNET.Core/Operations/NnOps/LayerRNNCell.cs +++ b/src/TensorFlowNET.Core/Operations/NnOps/LayerRNNCell.cs @@ -49,7 +49,7 @@ namespace Tensorflow _keras_style = false; } - protected virtual void build(TensorShape inputs_shape) + protected virtual void build(Shape inputs_shape) { } diff --git a/src/TensorFlowNET.Core/Operations/NnOps/RNNCell.cs b/src/TensorFlowNET.Core/Operations/NnOps/RNNCell.cs index 42afc262..7c5b21b6 100644 --- a/src/TensorFlowNET.Core/Operations/NnOps/RNNCell.cs +++ b/src/TensorFlowNET.Core/Operations/NnOps/RNNCell.cs @@ -42,7 +42,7 @@ namespace Tensorflow /// This operation results in an output matrix with `self.output_size` columns. /// If `self.state_size` is an integer, this operation also results in a new /// state matrix with `self.state_size` columns. If `self.state_size` is a - /// (possibly nested tuple of) TensorShape object(s), then it should return a + /// (possibly nested tuple of) Shape object(s), then it should return a /// matching structure of Tensors having shape `[batch_size].concatenate(s)` /// for each `s` in `self.batch_size`. /// @@ -70,9 +70,9 @@ namespace Tensorflow public List trainable_weights => throw new NotImplementedException(); public List non_trainable_weights => throw new NotImplementedException(); - public TensorShape output_shape => throw new NotImplementedException(); + public Shape output_shape => throw new NotImplementedException(); - public TensorShape BatchInputShape => throw new NotImplementedException(); + public Shape BatchInputShape => throw new NotImplementedException(); public TF_DataType DType => throw new NotImplementedException(); protected bool built = false; diff --git a/src/TensorFlowNET.Core/Operations/NnOps/rnn.cs b/src/TensorFlowNET.Core/Operations/NnOps/rnn.cs index 5130e763..164facca 100644 --- a/src/TensorFlowNET.Core/Operations/NnOps/rnn.cs +++ b/src/TensorFlowNET.Core/Operations/NnOps/rnn.cs @@ -118,14 +118,14 @@ namespace Tensorflow.Operations VariableScope varscope = scope1; // Obtain the first sequence of the input var first_input = inputs[0]; - if (first_input.TensorShape.rank != 1) + if (first_input.shape.ndim != 1) { - var input_shape = first_input.TensorShape.with_rank_at_least(2); + var input_shape = first_input.shape.with_rank_at_least(2); fixed_batch_size = input_shape.dims[0]; var flat_inputs = nest.flatten2(inputs); foreach (var flat_input in flat_inputs) { - input_shape = flat_input.TensorShape.with_rank_at_least(2); + input_shape = flat_input.shape.with_rank_at_least(2); batch_size = tensor_shape.dimension_at_index(input_shape, 0); var input_size = input_shape[new Slice(1)]; fixed_batch_size.merge_with(batch_size); @@ -138,7 +138,7 @@ namespace Tensorflow.Operations } } else - fixed_batch_size = first_input.TensorShape.with_rank_at_least(1).dims[0]; + fixed_batch_size = first_input.shape.with_rank_at_least(1).dims[0]; if (tensor_shape.dimension_value(fixed_batch_size) >= 0) batch_size = tensor_shape.dimension_value(fixed_batch_size); @@ -243,7 +243,7 @@ namespace Tensorflow.Operations var input_shape = array_ops.shape(flat_input[0]); var time_steps = input_shape.slice(0); var batch_size = _best_effort_input_batch_size(flat_input); - var inputs_got_shape = flat_input.Select(input_ => input_.TensorShape.with_rank_at_least(3)).ToArray(); + var inputs_got_shape = flat_input.Select(input_ => input_.shape.with_rank_at_least(3)).ToArray(); var dims = inputs_got_shape[0].dims.Take(2).ToArray(); var (const_time_steps, const_batch_size) = (dims[0], dims[1]); @@ -292,7 +292,7 @@ namespace Tensorflow.Operations string base_name = null; tf_with(ops.name_scope("dynamic_rnn"), scope => base_name = scope); - Func _create_ta = (name, element_shape, dtype_) => + Func _create_ta = (name, element_shape, dtype_) => { var ta = new TensorArray(dtype: dtype_, size: time_steps, @@ -309,7 +309,7 @@ namespace Tensorflow.Operations foreach (var (i, out_size) in enumerate(flat_output_size)) { output_ta.Add(_create_ta($"output_{i}", - new TensorShape(const_batch_size).concatenate( + new Shape(const_batch_size).concatenate( _maybe_tensor_shape_from_tensor(out_size)), _infer_state_dtype(dtype, state))); } @@ -317,7 +317,7 @@ namespace Tensorflow.Operations foreach (var (i, flat_input_i) in enumerate(flat_input)) { input_ta.Add(_create_ta($"input_{i}", - new TensorShape(flat_input_i.dims.Skip(1).ToArray()), + new Shape(flat_input_i.dims.Skip(1).ToArray()), flat_input_i.dtype)); } @@ -350,7 +350,7 @@ namespace Tensorflow.Operations input_t = input_ta.Select(ta => ta.read(time1)).ToArray(); // Restore some shape information foreach (var (input_, shape) in zip(input_t, inputs_got_shape)) - input_.set_shape(shape[new Slice(1)]); + input_.shape = shape[new Slice(1)]; } else { @@ -397,17 +397,17 @@ namespace Tensorflow.Operations foreach (var (output, output_size) in zip(final_outputs, flat_output_size)) { var shape = rnn_cell_impl._concat(new int[] { (int)const_time_steps, (int)const_batch_size }, output_size, @static: true); - output.set_shape(shape); + output.shape = shape; } return (final_outputs[0], final_state); } - private static TensorShape _maybe_tensor_shape_from_tensor(Tensor shape) - => shape.TensorShape; + private static Shape _maybe_tensor_shape_from_tensor(Tensor shape) + => shape.shape; - private static TensorShape _maybe_tensor_shape_from_tensor(int shape) - => new TensorShape(shape); + private static Shape _maybe_tensor_shape_from_tensor(int shape) + => new Shape(shape); private static TF_DataType _infer_state_dtype(TF_DataType explicit_dtype, Tensor state) { @@ -424,7 +424,7 @@ namespace Tensorflow.Operations /// public static Tensor _transpose_batch_time(Tensor x) { - var x_static_shape = x.TensorShape; + var x_static_shape = x.shape; if (x_static_shape.ndim == 1) return x; @@ -436,12 +436,12 @@ namespace Tensorflow.Operations }; var x_t = array_ops.transpose(x, array_ops.concat(con1, 0)); - var dims = new [] { x_static_shape.dims[1], x_static_shape.dims[0] } + var dims = new long[] { x_static_shape.dims[1], x_static_shape.dims[0] } .ToList(); dims.AddRange(x_static_shape.dims.Skip(2)); - var shape = new TensorShape(dims.ToArray()); + var shape = new Shape(dims.ToArray()); - x_t.set_shape(shape); + x_t.shape = shape; return x_t; } @@ -455,7 +455,7 @@ namespace Tensorflow.Operations { foreach (var input_ in flat_input) { - var shape = input_.TensorShape; + var shape = input_.shape; if (shape.ndim < 0) continue; if (shape.ndim < 2) diff --git a/src/TensorFlowNET.Core/Operations/NnOps/rnn_cell_impl.cs b/src/TensorFlowNET.Core/Operations/NnOps/rnn_cell_impl.cs index c76d768d..49fe843b 100644 --- a/src/TensorFlowNET.Core/Operations/NnOps/rnn_cell_impl.cs +++ b/src/TensorFlowNET.Core/Operations/NnOps/rnn_cell_impl.cs @@ -32,18 +32,18 @@ namespace Tensorflow.Operations else if (p.ndim != 1) throw new ValueError($"prefix tensor must be either a scalar or vector, but saw tensor: {p}"); - var s_tensor_shape = new TensorShape(suffix); + var s_tensor_shape = new Shape(suffix); var s_static = s_tensor_shape.ndim > -1 ? s_tensor_shape.dims : null; - var s = s_tensor_shape.is_fully_defined() ? + var s = s_tensor_shape.IsFullyDefined ? constant_op.constant(s_tensor_shape.dims, dtype: dtypes.int32) : null; if (@static) { if (p_static is null) return null; - var shape = new TensorShape(p_static).concatenate(s_static); + var shape = new Shape(p_static).concatenate(s_static); throw new NotImplementedException("RNNCell _concat"); } else @@ -54,24 +54,24 @@ namespace Tensorflow.Operations } } - public static TensorShape _concat(int[] prefix, int suffix, bool @static = false) + public static Shape _concat(int[] prefix, int suffix, bool @static = false) { - var p = new TensorShape(prefix); + var p = new Shape(prefix); var p_static = prefix; - var p_tensor = p.is_fully_defined() ? constant_op.constant(p.as_list(), dtype: dtypes.int32) : null; + var p_tensor = p.IsFullyDefined ? constant_op.constant(p, dtype: dtypes.int32) : null; - var s_tensor_shape = new TensorShape(suffix); + var s_tensor_shape = new Shape(suffix); var s_static = s_tensor_shape.ndim > -1 ? s_tensor_shape.dims : null; - var s_tensor = s_tensor_shape.is_fully_defined() ? + var s_tensor = s_tensor_shape.IsFullyDefined ? constant_op.constant(s_tensor_shape.dims, dtype: dtypes.int32) : null; if (@static) { if (p_static is null) return null; - var shape = new TensorShape(p_static).concatenate(s_static); + var shape = new Shape(p_static).concatenate(s_static); return shape; } else diff --git a/src/TensorFlowNET.Core/Operations/OpDefLibrary.cs b/src/TensorFlowNET.Core/Operations/OpDefLibrary.cs index 560b681e..9dc876b5 100644 --- a/src/TensorFlowNET.Core/Operations/OpDefLibrary.cs +++ b/src/TensorFlowNET.Core/Operations/OpDefLibrary.cs @@ -345,7 +345,7 @@ namespace Tensorflow return ByteString.CopyFromUtf8(value ?? string.Empty); } - public TensorShapeProto _MakeShape(TensorShape shape, AttrDef attr_def) + public TensorShapeProto _MakeShape(Shape shape, AttrDef attr_def) { return shape.as_proto(); } @@ -400,7 +400,7 @@ namespace Tensorflow if (value == null && attr_def.DefaultValue != null) attr_value.Shape = attr_def.DefaultValue.Shape; - if (value is TensorShape val1) + if (value is Shape val1) attr_value.Shape = val1.as_proto(); else if (value is long[] val2) attr_value.Shape = tensor_util.as_shape(val2); @@ -409,7 +409,7 @@ namespace Tensorflow break; case "list(shape)": - attr_value.List.Shape.AddRange((value as TensorShape[]).Select(x => _MakeShape(x, attr_def))); + attr_value.List.Shape.AddRange((value as Shape[]).Select(x => _MakeShape(x, attr_def))); break; default: throw new TypeError($"SetAttrValue: can't not convert attr_def.Type '{attr_def.Type}' to protos."); diff --git a/src/TensorFlowNET.Core/Operations/Queues/FIFOQueue.cs b/src/TensorFlowNET.Core/Operations/Queues/FIFOQueue.cs index 4d594fc0..0f824b9b 100644 --- a/src/TensorFlowNET.Core/Operations/Queues/FIFOQueue.cs +++ b/src/TensorFlowNET.Core/Operations/Queues/FIFOQueue.cs @@ -22,7 +22,7 @@ namespace Tensorflow.Queues { public FIFOQueue(int capacity, TF_DataType[] dtypes, - TensorShape[] shapes, + Shape[] shapes, string[] names = null, string shared_name = null, string name = "fifo_queue") diff --git a/src/TensorFlowNET.Core/Operations/Queues/PaddingFIFOQueue.cs b/src/TensorFlowNET.Core/Operations/Queues/PaddingFIFOQueue.cs index a6c92a41..d18f9022 100644 --- a/src/TensorFlowNET.Core/Operations/Queues/PaddingFIFOQueue.cs +++ b/src/TensorFlowNET.Core/Operations/Queues/PaddingFIFOQueue.cs @@ -25,7 +25,7 @@ namespace Tensorflow.Queues { public PaddingFIFOQueue(int capacity, TF_DataType[] dtypes, - TensorShape[] shapes, + Shape[] shapes, string[] names = null, string shared_name = null, string name = "padding_fifo_queue") diff --git a/src/TensorFlowNET.Core/Operations/Queues/PriorityQueue.cs b/src/TensorFlowNET.Core/Operations/Queues/PriorityQueue.cs index 421b90b5..e54427bc 100644 --- a/src/TensorFlowNET.Core/Operations/Queues/PriorityQueue.cs +++ b/src/TensorFlowNET.Core/Operations/Queues/PriorityQueue.cs @@ -24,7 +24,7 @@ namespace Tensorflow.Queues { public PriorityQueue(int capacity, TF_DataType[] dtypes, - TensorShape[] shapes, + Shape[] shapes, string[] names = null, string shared_name = null, string name = "priority_queue") @@ -44,7 +44,7 @@ namespace Tensorflow.Queues _dtypes = dtypes1.ToArray(); var shapes1 = shapes.ToList(); - shapes1.Insert(0, new TensorShape()); + shapes1.Insert(0, Shape.Null); _shapes = shapes1.ToArray(); } diff --git a/src/TensorFlowNET.Core/Operations/Queues/QueueBase.cs b/src/TensorFlowNET.Core/Operations/Queues/QueueBase.cs index 727cbf95..992646ee 100644 --- a/src/TensorFlowNET.Core/Operations/Queues/QueueBase.cs +++ b/src/TensorFlowNET.Core/Operations/Queues/QueueBase.cs @@ -23,12 +23,12 @@ namespace Tensorflow.Queues public class QueueBase { protected TF_DataType[] _dtypes; - protected TensorShape[] _shapes; + protected Shape[] _shapes; protected string[] _names; protected Tensor _queue_ref; protected string _name; - public QueueBase(TF_DataType[] dtypes, TensorShape[] shapes, string[] names) + public QueueBase(TF_DataType[] dtypes, Shape[] shapes, string[] names) { _dtypes = dtypes; _shapes = shapes; diff --git a/src/TensorFlowNET.Core/Operations/Queues/RandomShuffleQueue.cs b/src/TensorFlowNET.Core/Operations/Queues/RandomShuffleQueue.cs index fcbf2c88..3f15c593 100644 --- a/src/TensorFlowNET.Core/Operations/Queues/RandomShuffleQueue.cs +++ b/src/TensorFlowNET.Core/Operations/Queues/RandomShuffleQueue.cs @@ -26,7 +26,7 @@ namespace Tensorflow.Queues public RandomShuffleQueue(int capacity, int min_after_dequeue, TF_DataType[] dtypes, - TensorShape[] shapes, + Shape[] shapes, string[] names = null, int? seed = null, string shared_name = null, diff --git a/src/TensorFlowNET.Core/Operations/_GraphTensorArray.cs b/src/TensorFlowNET.Core/Operations/_GraphTensorArray.cs index 19443db9..2c6527d6 100644 --- a/src/TensorFlowNET.Core/Operations/_GraphTensorArray.cs +++ b/src/TensorFlowNET.Core/Operations/_GraphTensorArray.cs @@ -37,7 +37,7 @@ namespace Tensorflow.Operations bool _infer_shape; public bool infer_shape => _infer_shape; public bool _dynamic_size; - public List _element_shape; + public List _element_shape; public List _colocate_with; @@ -47,7 +47,7 @@ namespace Tensorflow.Operations public _GraphTensorArray(TF_DataType dtype, Tensor size, bool? dynamic_size = null, bool? clear_after_read = null, string tensor_array_name = null, Tensor handle = null, Tensor flow = null, - bool infer_shape = true, TensorShape element_shape = null, + bool infer_shape = true, Shape element_shape = null, bool colocate_with_first_write_call = true, string name = null) { clear_after_read = clear_after_read ?? true; @@ -66,12 +66,12 @@ namespace Tensorflow.Operations if (element_shape == null) { _infer_shape = infer_shape; - _element_shape = new List { }; + _element_shape = new List { }; } else { _infer_shape = true; - _element_shape = new List { element_shape }; + _element_shape = new List { element_shape }; } tf_with(ops.name_scope(name, "TensorArray", new { handle, size, flow }), scope => @@ -124,7 +124,7 @@ namespace Tensorflow.Operations value = ops.convert_to_tensor(value, preferred_dtype: _dtype, name: "value"); if (_infer_shape) { - var shape = new TensorShape(value.TensorShape.dims.Skip(1).ToArray()); + var shape = new Shape(value.shape.dims.Skip(1).ToArray()); _merge_element_shape(shape); } @@ -149,7 +149,7 @@ namespace Tensorflow.Operations }); } - public void _merge_element_shape(TensorShape shape) + public void _merge_element_shape(Shape shape) { _element_shape.Add(shape); } @@ -169,7 +169,7 @@ namespace Tensorflow.Operations name: name); if (_element_shape != null) - value.set_shape(_element_shape[0].dims); + value.shape = _element_shape[0].dims; return value; } @@ -207,7 +207,7 @@ namespace Tensorflow.Operations public Tensor gather(Tensor indices, string name = null) { - var element_shape = new TensorShape(); + var element_shape = Shape.Null; if (_element_shape.Count > 0) element_shape = _element_shape[0]; diff --git a/src/TensorFlowNET.Core/Operations/array_ops.cs b/src/TensorFlowNET.Core/Operations/array_ops.cs index 13db8194..a38761f5 100644 --- a/src/TensorFlowNET.Core/Operations/array_ops.cs +++ b/src/TensorFlowNET.Core/Operations/array_ops.cs @@ -71,7 +71,7 @@ namespace Tensorflow verify_shape: verify_shape, allow_broadcast: false); - public static Tensor zeros(TensorShape shape, TF_DataType dtype = TF_DataType.TF_FLOAT, string name = null) + public static Tensor zeros(Shape shape, TF_DataType dtype = TF_DataType.TF_FLOAT, string name = null) { dtype = dtype.as_base_dtype(); @@ -130,9 +130,9 @@ namespace Tensorflow var tensor_tensor = ops.convert_to_tensor(tensor, name: "tensor"); var mask_tensor = ops.convert_to_tensor(mask, name: "mask"); - var shape_mask = mask_tensor.TensorShape; + var shape_mask = mask_tensor.shape; var ndims_mask = shape_mask.ndim; - var shape_tensor = tensor_tensor.TensorShape; + var shape_tensor = tensor_tensor.shape; if (ndims_mask < 1) throw new ValueError("mask cannot be scalar."); @@ -146,9 +146,9 @@ namespace Tensorflow }, 0); tensor_tensor = reshape(tensor_tensor, shape1); var first_dim = shape_tensor.dims.Skip(axis).Take(ndims_mask).First(); - var s1 = tensor_shape.as_shape(shape_tensor.dims.Take(axis).ToArray()); + var s1 = new Shape(shape_tensor.dims.Take(axis).ToArray()); var s2 = s1.concatenate(new[] { first_dim }).concatenate(shape_tensor.dims.Skip(axis + ndims_mask).ToArray()); - tensor_tensor.set_shape(s2); + tensor_tensor.shape = s2; mask_tensor = reshape(mask_tensor, new[] { -1 }); return _apply_mask_1d(tensor_tensor, mask_tensor, axis); @@ -186,10 +186,10 @@ namespace Tensorflow private static Tensor _constant_if_small(int value, Tensor shape) { - return shape < 1000UL; + return shape < 1000L; } - private static Tensor _constant_if_small(T value, TensorShape shape, TF_DataType dtype, string name) + private static Tensor _constant_if_small(T value, Shape shape, TF_DataType dtype, string name) { if (shape.size < 1000) { @@ -364,7 +364,7 @@ namespace Tensorflow tensor = ops.convert_to_tensor(tensor, name: "tensor"); // is_fully_defined return unexpected value. - if (optimize && tensor.shape.is_fully_defined() && dtype != TF_DataType.TF_VARIANT) + if (optimize && tensor.shape.IsFullyDefined && dtype != TF_DataType.TF_VARIANT) { } @@ -384,7 +384,7 @@ namespace Tensorflow public static Tensor reshape(Tensor tensor, Tensor shape, string name = null) => gen_array_ops.reshape(tensor, shape, name: name); - public static Tensor reshape(Tensor tensor, TensorShape shape, string name = null) + public static Tensor reshape(Tensor tensor, Shape shape, string name = null) => gen_array_ops.reshape(tensor, shape, name: name); public static Tensor reshape(Tensor tensor, object[] shape, string name = null) @@ -427,7 +427,7 @@ namespace Tensorflow }); } - public static Tensor ones(TensorShape shape, TF_DataType dtype = TF_DataType.TF_FLOAT, string name = null) + public static Tensor ones(Shape shape, TF_DataType dtype = TF_DataType.TF_FLOAT, string name = null) => tf_with(ops.name_scope(name, "ones", shape), scope => { dtype = dtype.as_base_dtype(); @@ -502,7 +502,7 @@ namespace Tensorflow return ops.convert_to_tensor(values, name: name); } - var value_shape = ops.convert_to_tensor(values[0], name: name).TensorShape; + var value_shape = ops.convert_to_tensor(values[0], name: name).shape; return gen_array_ops.pack(values, axis: axis, name: name); } @@ -512,7 +512,7 @@ namespace Tensorflow if (num == null) { value = ops.convert_to_tensor(value); - var value_shape = value.TensorShape; + var value_shape = value.shape; num = (int)value_shape.dims[axis]; } @@ -588,8 +588,8 @@ namespace Tensorflow if (!tf.Context.executing_eagerly()) { - var input_shape = input.TensorShape; - if (optimize && input.ndim > -1 && input_shape.is_fully_defined()) + var input_shape = input.shape; + if (optimize && input.ndim > -1 && input_shape.IsFullyDefined) { var nd = np.array(input.shape.dims).astype(out_type.as_system_dtype()); return constant_op.constant(nd, name: name); @@ -610,7 +610,7 @@ namespace Tensorflow var input_shape = input_tensor.shape; if (optimize) { - if (input_shape.is_fully_defined()) + if (input_shape.IsFullyDefined) { return constant_op.constant(input_shape.size, dtype: out_type, name: name); } @@ -633,7 +633,7 @@ namespace Tensorflow tensor = ops.convert_to_tensor(tensor, name: "tensor"); // is_fully_defined return unexpected value. - if (optimize && tensor.shape.is_fully_defined() && dtype != TF_DataType.TF_VARIANT) + if (optimize && tensor.shape.IsFullyDefined && dtype != TF_DataType.TF_VARIANT) { } @@ -906,7 +906,7 @@ namespace Tensorflow return gen_array_ops.gather_v2(@params, indices, axis, name: name); } - public static Tensor transpose(T1 a, TensorShape perm, string name = "transpose", bool conjugate = false) + public static Tensor transpose(T1 a, Shape perm, string name = "transpose", bool conjugate = false) { return tf_with(ops.name_scope(name, "transpose", new { a }), scope => { @@ -1005,9 +1005,9 @@ namespace Tensorflow if (!tf.Context.executing_eagerly()) { var paddings_constant = tensor_util.constant_value(paddings); - var input_shape = result.op.inputs[0].TensorShape; + var input_shape = result.op.inputs[0].shape; if (input_shape.ndim > -1 && - !result.TensorShape.is_fully_defined() && + !result.shape.IsFullyDefined && !(paddings_constant is null)) { var new_shape = new List(); @@ -1018,14 +1018,14 @@ namespace Tensorflow else new_shape.Add(np.sum(padding) + dim); } - result.set_shape(new_shape.ToArray()); + result.shape = new_shape.ToArray(); } } return result; } - public static Tensor placeholder(TF_DataType dtype, TensorShape shape = null, string name = null) + public static Tensor placeholder(TF_DataType dtype, Shape shape = null, string name = null) { if (tf.Context.executing_eagerly()) throw new RuntimeError("tf.placeholder() is not compatible with eager execution."); diff --git a/src/TensorFlowNET.Core/Operations/clip_ops.cs b/src/TensorFlowNET.Core/Operations/clip_ops.cs index 62eb8410..b782c469 100644 --- a/src/TensorFlowNET.Core/Operations/clip_ops.cs +++ b/src/TensorFlowNET.Core/Operations/clip_ops.cs @@ -29,9 +29,9 @@ namespace Tensorflow var t_min = math_ops.minimum(values, clip_value_max); // Assert that the shape is compatible with the initial shape, // to prevent unintentional broadcasting. - _ = values.TensorShape.merge_with(t_min.shape); + _ = values.shape.merge_with(t_min.shape); var t_max = math_ops.maximum(t_min, clip_value_min, name: name); - _ = values.TensorShape.merge_with(t_max.shape); + _ = values.shape.merge_with(t_max.shape); return t_max; }); diff --git a/src/TensorFlowNET.Core/Operations/confusion_matrix.py.cs b/src/TensorFlowNET.Core/Operations/confusion_matrix.py.cs index 8d457147..8b7989e6 100644 --- a/src/TensorFlowNET.Core/Operations/confusion_matrix.py.cs +++ b/src/TensorFlowNET.Core/Operations/confusion_matrix.py.cs @@ -38,9 +38,9 @@ namespace Tensorflow { predictions = ops.convert_to_tensor(predictions); labels = ops.convert_to_tensor(labels); - var predictions_shape = predictions.TensorShape; + var predictions_shape = predictions.shape; var predictions_rank = predictions_shape.ndim; - var labels_shape = labels.TensorShape; + var labels_shape = labels.shape; var labels_rank = labels_shape.ndim; if (labels_rank > -1 && predictions_rank > -1) { diff --git a/src/TensorFlowNET.Core/Operations/control_flow_ops.cs b/src/TensorFlowNET.Core/Operations/control_flow_ops.cs index 06b4d4aa..862b636f 100644 --- a/src/TensorFlowNET.Core/Operations/control_flow_ops.cs +++ b/src/TensorFlowNET.Core/Operations/control_flow_ops.cs @@ -319,7 +319,7 @@ namespace Tensorflow return gen_array_ops.identity(data, name: name); } - public static void _SetShapeInvariants(Tensor[] input_vars, Tensor[] enter_vars, TensorShape[] shapes = null) + public static void _SetShapeInvariants(Tensor[] input_vars, Tensor[] enter_vars, Shape[] shapes = null) { if (shapes == null) return; @@ -327,7 +327,7 @@ namespace Tensorflow var flat_shapes = nest.flatten2(shapes); foreach (var (inp, var, shape) in zip(input_vars, enter_vars, flat_shapes)) { - var.set_shape(shape); + var.shape = shape; } } @@ -706,7 +706,7 @@ namespace Tensorflow /// /// public static TItem while_loop(Func cond, Func body, TItem loop_vars, - TensorShape[] shape_invariants = null, + Shape[] shape_invariants = null, int parallel_iterations = 10, bool back_prop = true, bool swap_memory = false, @@ -803,7 +803,7 @@ namespace Tensorflow data, frame_name, is_constant, parallel_iterations, name: name); if (use_input_shape) - result.set_shape(data.TensorShape); + result.shape = data.shape; return result; } diff --git a/src/TensorFlowNET.Core/Operations/dataset_ops.cs b/src/TensorFlowNET.Core/Operations/dataset_ops.cs index 9fda99f7..9407fd5a 100644 --- a/src/TensorFlowNET.Core/Operations/dataset_ops.cs +++ b/src/TensorFlowNET.Core/Operations/dataset_ops.cs @@ -7,7 +7,7 @@ namespace Tensorflow { public class dataset_ops { - public Tensor tensor_dataset(Tensor[] components, TensorShape[] output_shapes, string name = null) + public Tensor tensor_dataset(Tensor[] components, Shape[] output_shapes, string name = null) => tf.Context.ExecuteOp("TensorDataset", name, new ExecuteOpArgs() { OpInputArgs = new object[] { components } @@ -20,29 +20,29 @@ namespace Tensorflow /// /// /// - public Tensor tensor_slice_dataset(Tensor[] components, TensorShape[] output_shapes, string name = null) + public Tensor tensor_slice_dataset(Tensor[] components, Shape[] output_shapes, string name = null) => tf.Context.ExecuteOp("TensorSliceDataset", name, new ExecuteOpArgs() { OpInputArgs = new object[] { components } }.SetAttributes(new { output_shapes })); - public Tensor range_dataset(Tensor start, Tensor stop, Tensor step, TF_DataType[] output_types, TensorShape[] output_shapes, string name = null) + public Tensor range_dataset(Tensor start, Tensor stop, Tensor step, TF_DataType[] output_types, Shape[] output_shapes, string name = null) => tf.Context.ExecuteOp("RangeDataset", name, new ExecuteOpArgs(start, stop, step) .SetAttributes(new { output_types, output_shapes })); - public Tensor repeat_dataset(Tensor input_dataset, Tensor count, TF_DataType[] output_types, TensorShape[] output_shapes, string name = null) + public Tensor repeat_dataset(Tensor input_dataset, Tensor count, TF_DataType[] output_types, Shape[] output_shapes, string name = null) => tf.Context.ExecuteOp("RepeatDataset", name, new ExecuteOpArgs(input_dataset, count) .SetAttributes(new { output_types, output_shapes })); public Tensor shard_dataset(Tensor input_dataset, Tensor num_shards, Tensor index, - TF_DataType[] output_types, TensorShape[] output_shapes, + TF_DataType[] output_types, Shape[] output_shapes, bool require_non_empty = false, string name = null) => tf.Context.ExecuteOp("ShardDataset", name, new ExecuteOpArgs(input_dataset, num_shards, index) .SetAttributes(new { require_non_empty, output_types, output_shapes })); public Tensor zip_dataset(Tensor[] input_datasets, TF_DataType[] output_types, - TensorShape[] output_shapes, + Shape[] output_shapes, string name = null) => tf.Context.ExecuteOp("ZipDataset", name, new ExecuteOpArgs() { @@ -51,14 +51,14 @@ namespace Tensorflow public Tensor shuffle_dataset_v3(Tensor input_dataset, Tensor buffer_size, Tensor seed, Tensor seed2, Tensor seed_generator, - TF_DataType[] output_types, TensorShape[] output_shapes, + TF_DataType[] output_types, Shape[] output_shapes, bool reshuffle_each_iteration = true, string name = null) => tf.Context.ExecuteOp("ShuffleDatasetV3", name, new ExecuteOpArgs(input_dataset, buffer_size, seed, seed2, seed_generator) .SetAttributes(new { reshuffle_each_iteration, output_types, output_shapes })); public Tensor skip_dataset(Tensor input_dataset, Tensor count, - TF_DataType[] output_types, TensorShape[] output_shapes, + TF_DataType[] output_types, Shape[] output_shapes, string name = null) => tf.Context.ExecuteOp("SkipDataset", name, new ExecuteOpArgs(input_dataset, count) .SetAttributes(new { output_types, output_shapes })); @@ -67,13 +67,13 @@ namespace Tensorflow => tf.Context.ExecuteOp("DummySeedGenerator", name, new ExecuteOpArgs()); public Tensor concatenate_dataset(Tensor input_dataset, Tensor another_dataset, - TF_DataType[] output_types, TensorShape[] output_shapes, + TF_DataType[] output_types, Shape[] output_shapes, string name = null) => tf.Context.ExecuteOp("ConcatenateDataset", name, new ExecuteOpArgs(input_dataset, another_dataset) .SetAttributes(new { output_types, output_shapes })); public Tensor cache_dataset_v2(Tensor input_dataset, Tensor filename, Tensor cache, - TF_DataType[] output_types, TensorShape[] output_shapes, + TF_DataType[] output_types, Shape[] output_shapes, string name = null) => tf.Context.ExecuteOp("CacheDatasetV2", name, new ExecuteOpArgs(input_dataset, filename, cache) .SetAttributes(new { output_types, output_shapes })); @@ -91,7 +91,7 @@ namespace Tensorflow /// public Tensor batch_dataset_v2(Tensor input_dataset, Tensor buffer_size, Tensor drop_remainder, - TF_DataType[] output_types, TensorShape[] output_shapes, + TF_DataType[] output_types, Shape[] output_shapes, bool parallel_copy = false, string name = null) => tf.Context.ExecuteOp("BatchDatasetV2", name, @@ -118,7 +118,7 @@ namespace Tensorflow /// /// public Tensor prefetch_dataset(Tensor input_dataset, Tensor buffer_size, - TF_DataType[] output_types, TensorShape[] output_shapes, + TF_DataType[] output_types, Shape[] output_shapes, int? slack_period = 0, bool legacy_autotune = true, string name = null) @@ -141,7 +141,7 @@ namespace Tensorflow /// /// public Tensor take_dataset(Tensor input_dataset, Tensor count, - TF_DataType[] output_types, TensorShape[] output_shapes, + TF_DataType[] output_types, Shape[] output_shapes, string name = null) => tf.Context.ExecuteOp("TakeDataset", name, new ExecuteOpArgs(input_dataset, count) .SetAttributes(new { output_types, output_shapes })); @@ -157,7 +157,7 @@ namespace Tensorflow /// /// public Tensor optimize_dataset(Tensor input_dataset, Tensor optimizations, - TF_DataType[] output_types, TensorShape[] output_shapes, + TF_DataType[] output_types, Shape[] output_shapes, string[] optimization_configs = null, string name = null) => tf.Context.ExecuteOp("OptimizeDataset", name, new ExecuteOpArgs(input_dataset, optimizations) @@ -170,7 +170,7 @@ namespace Tensorflow public Tensor optimize_dataset_v2(Tensor input_dataset, Tensor optimizations_enabled, Tensor optimizations_disabled, Tensor optimizations_default, - TF_DataType[] output_types, TensorShape[] output_shapes, + TF_DataType[] output_types, Shape[] output_shapes, string[] optimization_configs = null, string name = null) => tf.Context.ExecuteOp("OptimizeDatasetV2", name, new ExecuteOpArgs(input_dataset, @@ -193,7 +193,7 @@ namespace Tensorflow /// /// public Tensor model_dataset(Tensor input_dataset, - TF_DataType[] output_types, TensorShape[] output_shapes, + TF_DataType[] output_types, Shape[] output_shapes, AutotuneAlgorithm algorithm, long cpu_budget, long ram_budget, string name = null) => tf.Context.ExecuteOp("ModelDataset", name, new ExecuteOpArgs(input_dataset) @@ -213,7 +213,7 @@ namespace Tensorflow /// /// /// A tuple of `Tensor` objects (handle, deleter). - public (Tensor, Tensor) anonymous_iterator_v2(TF_DataType[] output_types, TensorShape[] output_shapes, string name = null) + public (Tensor, Tensor) anonymous_iterator_v2(TF_DataType[] output_types, Shape[] output_shapes, string name = null) { var results = tf.Context.ExecuteOp("AnonymousIteratorV2", name, new ExecuteOpArgs().SetAttributes(new { output_types, output_shapes })); @@ -237,7 +237,7 @@ namespace Tensorflow /// /// /// - public Tensor map_dataset(Tensor dataset, ConcreteFunction f, TF_DataType[] output_types, TensorShape[] output_shapes, + public Tensor map_dataset(Tensor dataset, ConcreteFunction f, TF_DataType[] output_types, Shape[] output_shapes, bool use_inter_op_parallelism = true, bool preserve_cardinality = false, string name = null) => tf.Context.ExecuteOp("MapDataset", name, new ExecuteOpArgs(dataset, new Tensor[0]) .SetAttributes(new @@ -258,7 +258,7 @@ namespace Tensorflow /// /// /// - public Tensor filter_dataset(Tensor dataset, ConcreteFunction predicate, TF_DataType[] output_types, TensorShape[] output_shapes, + public Tensor filter_dataset(Tensor dataset, ConcreteFunction predicate, TF_DataType[] output_types, Shape[] output_shapes, string name = null) => tf.Context.ExecuteOp("FilterDataset", name, new ExecuteOpArgs(dataset, new Tensor[0]) .SetAttributes(new @@ -277,7 +277,7 @@ namespace Tensorflow /// /// /// - public Tensor flat_map_dataset(Tensor dataset, ConcreteFunction f, TF_DataType[] output_types, TensorShape[] output_shapes, + public Tensor flat_map_dataset(Tensor dataset, ConcreteFunction f, TF_DataType[] output_types, Shape[] output_shapes, string name = null) => tf.Context.ExecuteOp("FlatMapDataset", name, new ExecuteOpArgs(dataset, new Tensor[0]) .SetAttributes(new { f, output_types, output_shapes })); @@ -294,7 +294,7 @@ namespace Tensorflow /// /// public Tensor parallel_map_dataset_v2(Tensor dataset, Tensor num_parallel_calls, ConcreteFunction f, - TF_DataType[] output_types, TensorShape[] output_shapes, + TF_DataType[] output_types, Shape[] output_shapes, bool use_inter_op_parallelism = true, string deterministic = "default", bool preserve_cardinality = false, @@ -329,7 +329,7 @@ namespace Tensorflow /// /// /// - public Tensor[] iterator_get_next(Tensor iterator, TF_DataType[] output_types, TensorShape[] output_shapes, string name = null) + public Tensor[] iterator_get_next(Tensor iterator, TF_DataType[] output_types, Shape[] output_shapes, string name = null) => tf.Context.ExecuteOp("IteratorGetNext", name, new ExecuteOpArgs(iterator) .SetAttributes(new { output_types, output_shapes })); } diff --git a/src/TensorFlowNET.Core/Operations/functional_ops.cs b/src/TensorFlowNET.Core/Operations/functional_ops.cs index 003b93da..89b2ce40 100644 --- a/src/TensorFlowNET.Core/Operations/functional_ops.cs +++ b/src/TensorFlowNET.Core/Operations/functional_ops.cs @@ -169,16 +169,16 @@ namespace Tensorflow var results_flat = bodyItem.Accs_ta.Select(r => r.stack()).ToArray(); - var n_static = new Dimension(tensor_shape.dimension_value(elems_flat[0].TensorShape.with_rank_at_least(1).dims[0])); + var n_static = new Dimension(tensor_shape.dimension_value(elems_flat[0].shape.with_rank_at_least(1).dims[0])); foreach (var elem in elems_flat.Skip(1)) { - n_static.merge_with(new Dimension(tensor_shape.dimension_value(elem.TensorShape.with_rank_at_least(1).dims[0]))); + n_static.merge_with(new Dimension(tensor_shape.dimension_value(elem.shape.with_rank_at_least(1).dims[0]))); } foreach (Tensor r in results_flat) { - r.set_shape(new TensorShape(n_static).concatenate(r.dims.Skip(1).ToArray())); + r.shape = new Shape(n_static).concatenate(r.dims.Skip(1).ToArray()); } // todo get working when the above caching_device is fixed diff --git a/src/TensorFlowNET.Core/Operations/gen_array_ops.cs b/src/TensorFlowNET.Core/Operations/gen_array_ops.cs index 80c524ef..65599a4c 100644 --- a/src/TensorFlowNET.Core/Operations/gen_array_ops.cs +++ b/src/TensorFlowNET.Core/Operations/gen_array_ops.cs @@ -482,11 +482,7 @@ namespace Tensorflow /// A name for the operation (optional). /// `Tensor`. Has the same type as `s0`. public static Tensor broadcast_args(Tensor s0, Tensor s1, string name = null) - { - var _op = tf.OpDefLib._apply_op_helper("BroadcastArgs", name, args: new { s0, s1, name }); - - return _op.outputs[0]; - } + => tf.Context.ExecuteOp("BroadcastArgs", name, new ExecuteOpArgs(s0, s1)); /// /// Broadcast an array for a compatible shape. diff --git a/src/TensorFlowNET.Core/Operations/gen_data_flow_ops.cs b/src/TensorFlowNET.Core/Operations/gen_data_flow_ops.cs index 233d1344..4a637728 100644 --- a/src/TensorFlowNET.Core/Operations/gen_data_flow_ops.cs +++ b/src/TensorFlowNET.Core/Operations/gen_data_flow_ops.cs @@ -41,7 +41,7 @@ namespace Tensorflow } public static (Tensor, Tensor) tensor_array_v3(T size, TF_DataType dtype = TF_DataType.DtInvalid, - TensorShape element_shape = null, bool dynamic_size = false, bool clear_after_read = true, + Shape element_shape = null, bool dynamic_size = false, bool clear_after_read = true, bool identical_element_shapes = false, string tensor_array_name = "", string name = null) { var _op = tf.OpDefLib._apply_op_helper("TensorArrayV3", name, new @@ -72,7 +72,7 @@ namespace Tensorflow return _op.output; } - public static Tensor padding_fifo_queue_v2(TF_DataType[] component_types, TensorShape[] shapes, + public static Tensor padding_fifo_queue_v2(TF_DataType[] component_types, Shape[] shapes, int capacity = -1, string container = "", string shared_name = "", string name = null) { @@ -88,7 +88,7 @@ namespace Tensorflow return _op.output; } - public static Tensor fifo_queue_v2(TF_DataType[] component_types, TensorShape[] shapes, + public static Tensor fifo_queue_v2(TF_DataType[] component_types, Shape[] shapes, int capacity = -1, string container = "", string shared_name = "", string name = null) { @@ -104,7 +104,7 @@ namespace Tensorflow return _op.output; } - public static Tensor priority_queue_v2(TF_DataType[] component_types, TensorShape[] shapes, + public static Tensor priority_queue_v2(TF_DataType[] component_types, Shape[] shapes, int capacity = -1, string container = "", string shared_name = "", string name = null) { @@ -120,7 +120,7 @@ namespace Tensorflow return _op.output; } - public static Tensor random_shuffle_queue_v2(TF_DataType[] component_types, TensorShape[] shapes, + public static Tensor random_shuffle_queue_v2(TF_DataType[] component_types, Shape[] shapes, int capacity = -1, int min_after_dequeue = 0, int seed = 0, int seed2 = 0, string container = "", string shared_name = "", string name = null) { @@ -259,7 +259,7 @@ namespace Tensorflow } public static Tensor tensor_array_gather_v3(Tensor handle, Tensor indices, Tensor flow_in, - TF_DataType dtype, TensorShape element_shape = null, string name = null) + TF_DataType dtype, Shape element_shape = null, string name = null) { var _op = tf.OpDefLib._apply_op_helper("TensorArrayGatherV3", name, new { diff --git a/src/TensorFlowNET.Core/Operations/gen_math_ops.cs b/src/TensorFlowNET.Core/Operations/gen_math_ops.cs index c91a4e5d..d0571315 100644 --- a/src/TensorFlowNET.Core/Operations/gen_math_ops.cs +++ b/src/TensorFlowNET.Core/Operations/gen_math_ops.cs @@ -397,8 +397,12 @@ namespace Tensorflow /// /// /// - public static Tensor equal(Tx x, Ty y, string name = null) - => tf.Context.ExecuteOp("Equal", name, new ExecuteOpArgs(x, y)); + public static Tensor equal(Tx x, Ty y, bool incompatible_shape_error = true, string name = null) + => tf.Context.ExecuteOp("Equal", name, new ExecuteOpArgs(x, y) + .SetAttributes(new + { + incompatible_shape_error + })); /// /// Returns the truth value of (x != y) element-wise. diff --git a/src/TensorFlowNET.Core/Operations/gen_ops.cs b/src/TensorFlowNET.Core/Operations/gen_ops.cs index a1e26d7f..11cb6de8 100644 --- a/src/TensorFlowNET.Core/Operations/gen_ops.cs +++ b/src/TensorFlowNET.Core/Operations/gen_ops.cs @@ -88,7 +88,7 @@ namespace Tensorflow.Operations /// /// Returns a Tensor of same shape and type as the elements of inputs. /// - public static Tensor accumulate_n_v2(Tensor[] inputs, TensorShape shape, string name = "AccumulateNV2") + public static Tensor accumulate_n_v2(Tensor[] inputs, Shape shape, string name = "AccumulateNV2") { var dict = new Dictionary(); dict["inputs"] = inputs; @@ -754,7 +754,7 @@ namespace Tensorflow.Operations /// container. /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. /// - public static Tensor anonymous_iterator(TF_DataType[] output_types, TensorShape[] output_shapes, string name = "AnonymousIterator") + public static Tensor anonymous_iterator(TF_DataType[] output_types, Shape[] output_shapes, string name = "AnonymousIterator") { var dict = new Dictionary(); dict["output_types"] = output_types; @@ -2559,7 +2559,7 @@ namespace Tensorflow.Operations /// incomplete element has some undefined components in its value tuple, /// and may be updated using BarrierInsertMany. /// - public static Tensor barrier(TF_DataType[] component_types, TensorShape[] shapes = null, int? capacity = null, string container = null, string shared_name = null, string name = "Barrier") + public static Tensor barrier(TF_DataType[] component_types, Shape[] shapes = null, int? capacity = null, string container = null, string shared_name = null, string name = "Barrier") { var dict = new Dictionary(); dict["component_types"] = component_types; @@ -2883,7 +2883,7 @@ namespace Tensorflow.Operations /// /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. /// - public static Tensor batch_dataset(Tensor input_dataset, Tensor batch_size, TF_DataType[] output_types, TensorShape[] output_shapes, string name = "BatchDataset") + public static Tensor batch_dataset(Tensor input_dataset, Tensor batch_size, TF_DataType[] output_types, Shape[] output_shapes, string name = "BatchDataset") { var dict = new Dictionary(); dict["input_dataset"] = input_dataset; @@ -2918,7 +2918,7 @@ namespace Tensorflow.Operations /// /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. /// - public static Tensor batch_dataset_v2(Tensor input_dataset, Tensor batch_size, Tensor drop_remainder, TF_DataType[] output_types, TensorShape[] output_shapes, string name = "BatchDatasetV2") + public static Tensor batch_dataset_v2(Tensor input_dataset, Tensor batch_size, Tensor drop_remainder, TF_DataType[] output_types, Shape[] output_shapes, string name = "BatchDatasetV2") { var dict = new Dictionary(); dict["input_dataset"] = input_dataset; @@ -4367,7 +4367,7 @@ namespace Tensorflow.Operations /// /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. /// - public static Tensor bytes_produced_stats_dataset(Tensor input_dataset, Tensor tag, TF_DataType[] output_types, TensorShape[] output_shapes, string name = "BytesProducedStatsDataset") + public static Tensor bytes_produced_stats_dataset(Tensor input_dataset, Tensor tag, TF_DataType[] output_types, Shape[] output_shapes, string name = "BytesProducedStatsDataset") { var dict = new Dictionary(); dict["input_dataset"] = input_dataset; @@ -4587,7 +4587,7 @@ namespace Tensorflow.Operations /// (e.g. cannot be opened, contains tensors of the wrong shape / size), an error /// will the returned when used. /// - public static Tensor cache_dataset(Tensor input_dataset, Tensor filename, TF_DataType[] output_types, TensorShape[] output_shapes, string name = "CacheDataset") + public static Tensor cache_dataset(Tensor input_dataset, Tensor filename, TF_DataType[] output_types, Shape[] output_shapes, string name = "CacheDataset") { var dict = new Dictionary(); dict["input_dataset"] = input_dataset; @@ -4802,7 +4802,7 @@ namespace Tensorflow.Operations /// /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. /// - public static Tensor collective_bcast_recv(TF_DataType T, int group_size, int group_key, int instance_key, TensorShape shape, string name = "CollectiveBcastRecv") + public static Tensor collective_bcast_recv(TF_DataType T, int group_size, int group_key, int instance_key, Shape shape, string name = "CollectiveBcastRecv") { var dict = new Dictionary(); dict["T"] = T; @@ -4837,7 +4837,7 @@ namespace Tensorflow.Operations /// /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. /// - public static Tensor collective_bcast_send(Tensor input, int group_size, int group_key, int instance_key, TensorShape shape, string name = "CollectiveBcastSend") + public static Tensor collective_bcast_send(Tensor input, int group_size, int group_key, int instance_key, Shape shape, string name = "CollectiveBcastSend") { var dict = new Dictionary(); dict["input"] = input; @@ -5187,7 +5187,7 @@ namespace Tensorflow.Operations /// /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. /// - public static Tensor concatenate_dataset(Tensor input_dataset, Tensor another_dataset, TF_DataType[] output_types, TensorShape[] output_shapes, string name = "ConcatenateDataset") + public static Tensor concatenate_dataset(Tensor input_dataset, Tensor another_dataset, TF_DataType[] output_types, Shape[] output_shapes, string name = "ConcatenateDataset") { var dict = new Dictionary(); dict["input_dataset"] = input_dataset; @@ -5232,7 +5232,7 @@ namespace Tensorflow.Operations /// resets the aggregate to 0, and increments the global_step recorded by /// the accumulator. /// - public static Tensor conditional_accumulator(TF_DataType dtype, TensorShape shape, string container = null, string shared_name = null, string name = "ConditionalAccumulator") + public static Tensor conditional_accumulator(TF_DataType dtype, Shape shape, string container = null, string shared_name = null, string name = "ConditionalAccumulator") { var dict = new Dictionary(); dict["dtype"] = dtype; @@ -7271,7 +7271,7 @@ namespace Tensorflow.Operations /// The components of the single element of input. /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. /// - public static Tensor[] dataset_to_single_element(Tensor dataset, TF_DataType[] output_types, TensorShape[] output_shapes, string name = "DatasetToSingleElement") + public static Tensor[] dataset_to_single_element(Tensor dataset, TF_DataType[] output_types, Shape[] output_shapes, string name = "DatasetToSingleElement") { var dict = new Dictionary(); dict["dataset"] = dataset; @@ -8294,7 +8294,7 @@ namespace Tensorflow.Operations /// /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. /// - public static Tensor dense_to_sparse_batch_dataset(Tensor input_dataset, Tensor batch_size, Tensor row_shape, TF_DataType[] output_types, TensorShape[] output_shapes, string name = "DenseToSparseBatchDataset") + public static Tensor dense_to_sparse_batch_dataset(Tensor input_dataset, Tensor batch_size, Tensor row_shape, TF_DataType[] output_types, Shape[] output_shapes, string name = "DenseToSparseBatchDataset") { var dict = new Dictionary(); dict["input_dataset"] = input_dataset; @@ -10059,7 +10059,7 @@ namespace Tensorflow.Operations /// Raises an error if the input tensor's shape does not match the specified shape. /// Returns the input tensor otherwise. /// - public static Tensor ensure_shape(Tensor input, TensorShape shape, string name = "EnsureShape") + public static Tensor ensure_shape(Tensor input, Shape shape, string name = "EnsureShape") { var dict = new Dictionary(); dict["input"] = input; @@ -10584,7 +10584,7 @@ namespace Tensorflow.Operations /// The handle to the queue. /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. /// - public static Tensor f_i_f_o_queue(TF_DataType[] component_types, TensorShape[] shapes = null, int? capacity = null, string container = null, string shared_name = null, string name = "FIFOQueue") + public static Tensor f_i_f_o_queue(TF_DataType[] component_types, Shape[] shapes = null, int? capacity = null, string container = null, string shared_name = null, string name = "FIFOQueue") { var dict = new Dictionary(); dict["component_types"] = component_types; @@ -10632,7 +10632,7 @@ namespace Tensorflow.Operations /// The handle to the queue. /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. /// - public static Tensor f_i_f_o_queue_v2(TF_DataType[] component_types, TensorShape[] shapes = null, int? capacity = null, string container = null, string shared_name = null, string name = "FIFOQueueV2") + public static Tensor f_i_f_o_queue_v2(TF_DataType[] component_types, Shape[] shapes = null, int? capacity = null, string container = null, string shared_name = null, string name = "FIFOQueueV2") { var dict = new Dictionary(); dict["component_types"] = component_types; @@ -10670,7 +10670,7 @@ namespace Tensorflow.Operations /// \"Fake\" output value. This should not be consumed by another op. /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. /// - public static Tensor fake_param(TF_DataType dtype, TensorShape shape, string name = "FakeParam") + public static Tensor fake_param(TF_DataType dtype, Shape shape, string name = "FakeParam") { var dict = new Dictionary(); dict["dtype"] = dtype; @@ -11048,7 +11048,7 @@ namespace Tensorflow.Operations /// /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. /// - public static Tensor filter_by_last_component_dataset(Tensor input_dataset, TF_DataType[] output_types, TensorShape[] output_shapes, string name = "FilterByLastComponentDataset") + public static Tensor filter_by_last_component_dataset(Tensor input_dataset, TF_DataType[] output_types, Shape[] output_shapes, string name = "FilterByLastComponentDataset") { var dict = new Dictionary(); dict["input_dataset"] = input_dataset; @@ -13428,7 +13428,7 @@ namespace Tensorflow.Operations /// /// The current implementation memmaps the tensor from a file. /// - public static Tensor immutable_const(TF_DataType dtype, TensorShape shape, string memory_region_name, string name = "ImmutableConst") + public static Tensor immutable_const(TF_DataType dtype, Shape shape, string memory_region_name, string name = "ImmutableConst") { var dict = new Dictionary(); dict["dtype"] = dtype; @@ -13547,7 +13547,7 @@ namespace Tensorflow.Operations /// A tensor that will be provided using the infeed mechanism. /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. /// - public static Tensor infeed_dequeue(TF_DataType dtype, TensorShape shape, string name = "InfeedDequeue") + public static Tensor infeed_dequeue(TF_DataType dtype, Shape shape, string name = "InfeedDequeue") { var dict = new Dictionary(); dict["dtype"] = dtype; @@ -13577,7 +13577,7 @@ namespace Tensorflow.Operations /// /// simultaneously as an XLA tuple. /// - public static Tensor[] infeed_dequeue_tuple(TF_DataType[] dtypes, TensorShape[] shapes, string name = "InfeedDequeueTuple") + public static Tensor[] infeed_dequeue_tuple(TF_DataType[] dtypes, Shape[] shapes, string name = "InfeedDequeueTuple") { var dict = new Dictionary(); dict["dtypes"] = dtypes; @@ -13608,7 +13608,7 @@ namespace Tensorflow.Operations /// /// Returns the description of the operation /// - public static Operation infeed_enqueue(Tensor input, TensorShape shape = null, int? device_ordinal = null, string name = "InfeedEnqueue") + public static Operation infeed_enqueue(Tensor input, Shape shape = null, int? device_ordinal = null, string name = "InfeedEnqueue") { var dict = new Dictionary(); dict["input"] = input; @@ -13641,7 +13641,7 @@ namespace Tensorflow.Operations /// /// Returns the description of the operation /// - public static Operation infeed_enqueue_tuple(Tensor[] inputs, TensorShape[] shapes, int? device_ordinal = null, string name = "InfeedEnqueueTuple") + public static Operation infeed_enqueue_tuple(Tensor[] inputs, Shape[] shapes, int? device_ordinal = null, string name = "InfeedEnqueueTuple") { var dict = new Dictionary(); dict["inputs"] = inputs; @@ -14163,7 +14163,7 @@ namespace Tensorflow.Operations /// or "IteratorGetNext" op. /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. /// - public static Tensor iterator(string shared_name, string container, TF_DataType[] output_types, TensorShape[] output_shapes, string name = "Iterator") + public static Tensor iterator(string shared_name, string container, TF_DataType[] output_types, Shape[] output_shapes, string name = "Iterator") { var dict = new Dictionary(); dict["shared_name"] = shared_name; @@ -14195,7 +14195,7 @@ namespace Tensorflow.Operations /// A handle to an iterator resource. /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. /// - public static Tensor iterator_from_string_handle(Tensor string_handle, TF_DataType[] output_types = null, TensorShape[] output_shapes = null, string name = "IteratorFromStringHandle") + public static Tensor iterator_from_string_handle(Tensor string_handle, TF_DataType[] output_types = null, Shape[] output_shapes = null, string name = "IteratorFromStringHandle") { var dict = new Dictionary(); dict["string_handle"] = string_handle; @@ -14224,7 +14224,7 @@ namespace Tensorflow.Operations /// /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. /// - public static Tensor[] iterator_get_next(Tensor iterator, TF_DataType[] output_types, TensorShape[] output_shapes, string name = "IteratorGetNext") + public static Tensor[] iterator_get_next(Tensor iterator, TF_DataType[] output_types, Shape[] output_shapes, string name = "IteratorGetNext") { var dict = new Dictionary(); dict["iterator"] = iterator; @@ -14253,7 +14253,7 @@ namespace Tensorflow.Operations /// /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. /// - public static Tensor iterator_get_next_as_optional(Tensor iterator, TF_DataType[] output_types, TensorShape[] output_shapes, string name = "IteratorGetNextAsOptional") + public static Tensor iterator_get_next_as_optional(Tensor iterator, TF_DataType[] output_types, Shape[] output_shapes, string name = "IteratorGetNextAsOptional") { var dict = new Dictionary(); dict["iterator"] = iterator; @@ -14286,7 +14286,7 @@ namespace Tensorflow.Operations /// the calling thread is not a member of the thread pool used to execute parallel /// operations (e.g. in eager mode). /// - public static Tensor[] iterator_get_next_sync(Tensor iterator, TF_DataType[] output_types, TensorShape[] output_shapes, string name = "IteratorGetNextSync") + public static Tensor[] iterator_get_next_sync(Tensor iterator, TF_DataType[] output_types, Shape[] output_shapes, string name = "IteratorGetNextSync") { var dict = new Dictionary(); dict["iterator"] = iterator; @@ -14495,7 +14495,7 @@ namespace Tensorflow.Operations /// /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. /// - public static Tensor latency_stats_dataset(Tensor input_dataset, Tensor tag, TF_DataType[] output_types, TensorShape[] output_shapes, string name = "LatencyStatsDataset") + public static Tensor latency_stats_dataset(Tensor input_dataset, Tensor tag, TF_DataType[] output_types, Shape[] output_shapes, string name = "LatencyStatsDataset") { var dict = new Dictionary(); dict["input_dataset"] = input_dataset; @@ -17568,7 +17568,7 @@ namespace Tensorflow.Operations /// values. Each value must be a scalar. Data can be inserted into the table using /// the insert operations. It does not support the initialization operation. /// - public static Tensor mutable_dense_hash_table(Tensor empty_key, TF_DataType value_dtype, string container = null, string shared_name = null, bool? use_node_name_sharing = null, TensorShape value_shape = null, int? initial_num_buckets = null, float? max_load_factor = null, string name = "MutableDenseHashTable") + public static Tensor mutable_dense_hash_table(Tensor empty_key, TF_DataType value_dtype, string container = null, string shared_name = null, bool? use_node_name_sharing = null, Shape value_shape = null, int? initial_num_buckets = null, float? max_load_factor = null, string name = "MutableDenseHashTable") { var dict = new Dictionary(); dict["empty_key"] = empty_key; @@ -17636,7 +17636,7 @@ namespace Tensorflow.Operations /// values. Each value must be a scalar. Data can be inserted into the table using /// the insert operations. It does not support the initialization operation. /// - public static Tensor mutable_dense_hash_table_v2(Tensor empty_key, TF_DataType value_dtype, string container = null, string shared_name = null, bool? use_node_name_sharing = null, TensorShape value_shape = null, int? initial_num_buckets = null, float? max_load_factor = null, string name = "MutableDenseHashTableV2") + public static Tensor mutable_dense_hash_table_v2(Tensor empty_key, TF_DataType value_dtype, string container = null, string shared_name = null, bool? use_node_name_sharing = null, Shape value_shape = null, int? initial_num_buckets = null, float? max_load_factor = null, string name = "MutableDenseHashTableV2") { var dict = new Dictionary(); dict["empty_key"] = empty_key; @@ -17742,7 +17742,7 @@ namespace Tensorflow.Operations /// values. Each value must be a vector. Data can be inserted into the table using /// the insert operations. It does not support the initialization operation. /// - public static Tensor mutable_hash_table_of_tensors(TF_DataType key_dtype, TF_DataType value_dtype, string container = null, string shared_name = null, bool? use_node_name_sharing = null, TensorShape value_shape = null, string name = "MutableHashTableOfTensors") + public static Tensor mutable_hash_table_of_tensors(TF_DataType key_dtype, TF_DataType value_dtype, string container = null, string shared_name = null, bool? use_node_name_sharing = null, Shape value_shape = null, string name = "MutableHashTableOfTensors") { var dict = new Dictionary(); dict["key_dtype"] = key_dtype; @@ -17794,7 +17794,7 @@ namespace Tensorflow.Operations /// values. Each value must be a vector. Data can be inserted into the table using /// the insert operations. It does not support the initialization operation. /// - public static Tensor mutable_hash_table_of_tensors_v2(TF_DataType key_dtype, TF_DataType value_dtype, string container = null, string shared_name = null, bool? use_node_name_sharing = null, TensorShape value_shape = null, string name = "MutableHashTableOfTensorsV2") + public static Tensor mutable_hash_table_of_tensors_v2(TF_DataType key_dtype, TF_DataType value_dtype, string container = null, string shared_name = null, bool? use_node_name_sharing = null, Shape value_shape = null, string name = "MutableHashTableOfTensorsV2") { var dict = new Dictionary(); dict["key_dtype"] = key_dtype; @@ -18607,7 +18607,7 @@ namespace Tensorflow.Operations /// /// Creates a dataset by applying optimizations to input_dataset. /// - public static Tensor optimize_dataset(Tensor input_dataset, Tensor optimizations, TF_DataType[] output_types, TensorShape[] output_shapes, string name = "OptimizeDataset") + public static Tensor optimize_dataset(Tensor input_dataset, Tensor optimizations, TF_DataType[] output_types, Shape[] output_shapes, string name = "OptimizeDataset") { var dict = new Dictionary(); dict["input_dataset"] = input_dataset; @@ -18654,7 +18654,7 @@ namespace Tensorflow.Operations /// /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. /// - public static Tensor[] optional_get_value(Tensor optional, TF_DataType[] output_types, TensorShape[] output_shapes, string name = "OptionalGetValue") + public static Tensor[] optional_get_value(Tensor optional, TF_DataType[] output_types, Shape[] output_shapes, string name = "OptionalGetValue") { var dict = new Dictionary(); dict["optional"] = optional; @@ -19037,7 +19037,7 @@ namespace Tensorflow.Operations /// /// block indefinitely until data is available. /// - public static Tensor outfeed_dequeue(TF_DataType dtype, TensorShape shape, int? device_ordinal = null, string name = "OutfeedDequeue") + public static Tensor outfeed_dequeue(TF_DataType dtype, Shape shape, int? device_ordinal = null, string name = "OutfeedDequeue") { var dict = new Dictionary(); dict["dtype"] = dtype; @@ -19075,7 +19075,7 @@ namespace Tensorflow.Operations /// tuple. This operations will block indefinitely until data is available. /// Output i corresponds to XLA tuple element i. /// - public static Tensor[] outfeed_dequeue_tuple(TF_DataType[] dtypes, TensorShape[] shapes, int? device_ordinal = null, string name = "OutfeedDequeueTuple") + public static Tensor[] outfeed_dequeue_tuple(TF_DataType[] dtypes, Shape[] shapes, int? device_ordinal = null, string name = "OutfeedDequeueTuple") { var dict = new Dictionary(); dict["dtypes"] = dtypes; @@ -19303,7 +19303,7 @@ namespace Tensorflow.Operations /// /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. /// - public static Tensor padded_batch_dataset(Tensor input_dataset, Tensor batch_size, Tensor[] padded_shapes, Tensor[] padding_values, TensorShape[] output_shapes, string name = "PaddedBatchDataset") + public static Tensor padded_batch_dataset(Tensor input_dataset, Tensor batch_size, Tensor[] padded_shapes, Tensor[] padding_values, Shape[] output_shapes, string name = "PaddedBatchDataset") { var dict = new Dictionary(); dict["input_dataset"] = input_dataset; @@ -19347,7 +19347,7 @@ namespace Tensorflow.Operations /// /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. /// - public static Tensor padded_batch_dataset_v2(Tensor input_dataset, Tensor batch_size, Tensor[] padded_shapes, Tensor[] padding_values, Tensor drop_remainder, TensorShape[] output_shapes, string name = "PaddedBatchDatasetV2") + public static Tensor padded_batch_dataset_v2(Tensor input_dataset, Tensor batch_size, Tensor[] padded_shapes, Tensor[] padding_values, Tensor drop_remainder, Shape[] output_shapes, string name = "PaddedBatchDatasetV2") { var dict = new Dictionary(); dict["input_dataset"] = input_dataset; @@ -19401,7 +19401,7 @@ namespace Tensorflow.Operations /// to 0 in the shape attr. In this case DequeueMany will pad up to the maximum /// size of any given element in the minibatch. See below for details. /// - public static Tensor padding_f_i_f_o_queue(TF_DataType[] component_types, TensorShape[] shapes = null, int? capacity = null, string container = null, string shared_name = null, string name = "PaddingFIFOQueue") + public static Tensor padding_f_i_f_o_queue(TF_DataType[] component_types, Shape[] shapes = null, int? capacity = null, string container = null, string shared_name = null, string name = "PaddingFIFOQueue") { var dict = new Dictionary(); dict["component_types"] = component_types; @@ -19458,7 +19458,7 @@ namespace Tensorflow.Operations /// to 0 in the shape attr. In this case DequeueMany will pad up to the maximum /// size of any given element in the minibatch. See below for details. /// - public static Tensor padding_f_i_f_o_queue_v2(TF_DataType[] component_types, TensorShape[] shapes = null, int? capacity = null, string container = null, string shared_name = null, string name = "PaddingFIFOQueueV2") + public static Tensor padding_f_i_f_o_queue_v2(TF_DataType[] component_types, Shape[] shapes = null, int? capacity = null, string container = null, string shared_name = null, string name = "PaddingFIFOQueueV2") { var dict = new Dictionary(); dict["component_types"] = component_types; @@ -19511,7 +19511,7 @@ namespace Tensorflow.Operations /// will copy pieces of the input into the output as they become available, in /// some situations this can provide a performance benefit. /// - public static Tensor parallel_concat(Tensor[] values, TensorShape shape, string name = "ParallelConcat") + public static Tensor parallel_concat(Tensor[] values, Shape shape, string name = "ParallelConcat") { var dict = new Dictionary(); dict["values"] = values; @@ -19729,7 +19729,7 @@ namespace Tensorflow.Operations /// dense_values : /// The Operation can be fetched from any of the Tensorreturned in the tuple values, by fetching the Operation property. /// - public static (Tensor[] sparse_indices, Tensor[] sparse_values, Tensor[] sparse_shapes, Tensor[] dense_values) parse_example(Tensor serialized, Tensor names, Tensor[] sparse_keys, Tensor[] dense_keys, Tensor[] dense_defaults, TF_DataType[] sparse_types, TensorShape[] dense_shapes, string name = "ParseExample") + public static (Tensor[] sparse_indices, Tensor[] sparse_values, Tensor[] sparse_shapes, Tensor[] dense_values) parse_example(Tensor serialized, Tensor names, Tensor[] sparse_keys, Tensor[] dense_keys, Tensor[] dense_defaults, TF_DataType[] sparse_types, Shape[] dense_shapes, string name = "ParseExample") { var dict = new Dictionary(); dict["serialized"] = serialized; @@ -19801,7 +19801,7 @@ namespace Tensorflow.Operations /// /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. /// - public static Tensor parse_example_dataset(Tensor input_dataset, Tensor num_parallel_calls, Tensor[] dense_defaults, string[] sparse_keys, string[] dense_keys, TF_DataType[] sparse_types, TensorShape[] dense_shapes, TF_DataType[] output_types, TensorShape[] output_shapes, string name = "ParseExampleDataset") + public static Tensor parse_example_dataset(Tensor input_dataset, Tensor num_parallel_calls, Tensor[] dense_defaults, string[] sparse_keys, string[] dense_keys, TF_DataType[] sparse_types, Shape[] dense_shapes, TF_DataType[] output_types, Shape[] output_shapes, string name = "ParseExampleDataset") { var dict = new Dictionary(); dict["input_dataset"] = input_dataset; @@ -19923,7 +19923,7 @@ namespace Tensorflow.Operations /// feature_list_dense_lengths : /// The Operation can be fetched from any of the Tensorreturned in the tuple values, by fetching the Operation property. /// - public static (Tensor[] context_sparse_indices, Tensor[] context_sparse_values, Tensor[] context_sparse_shapes, Tensor[] context_dense_values, Tensor[] feature_list_sparse_indices, Tensor[] feature_list_sparse_values, Tensor[] feature_list_sparse_shapes, Tensor[] feature_list_dense_values, Tensor[] feature_list_dense_lengths) parse_sequence_example(Tensor serialized, Tensor debug_name, Tensor[] context_dense_defaults, string[] feature_list_dense_missing_assumed_empty, string[] context_sparse_keys, string[] context_dense_keys, string[] feature_list_sparse_keys, string[] feature_list_dense_keys, int? Ncontext_sparse = null, int? Ncontext_dense = null, int? Nfeature_list_sparse = null, int? Nfeature_list_dense = null, TF_DataType[] context_sparse_types = null, TF_DataType[] feature_list_dense_types = null, TensorShape[] context_dense_shapes = null, TF_DataType[] feature_list_sparse_types = null, TensorShape[] feature_list_dense_shapes = null, string name = "ParseSequenceExample") + public static (Tensor[] context_sparse_indices, Tensor[] context_sparse_values, Tensor[] context_sparse_shapes, Tensor[] context_dense_values, Tensor[] feature_list_sparse_indices, Tensor[] feature_list_sparse_values, Tensor[] feature_list_sparse_shapes, Tensor[] feature_list_dense_values, Tensor[] feature_list_dense_lengths) parse_sequence_example(Tensor serialized, Tensor debug_name, Tensor[] context_dense_defaults, string[] feature_list_dense_missing_assumed_empty, string[] context_sparse_keys, string[] context_dense_keys, string[] feature_list_sparse_keys, string[] feature_list_dense_keys, int? Ncontext_sparse = null, int? Ncontext_dense = null, int? Nfeature_list_sparse = null, int? Nfeature_list_dense = null, TF_DataType[] context_sparse_types = null, TF_DataType[] feature_list_dense_types = null, Shape[] context_dense_shapes = null, TF_DataType[] feature_list_sparse_types = null, Shape[] feature_list_dense_shapes = null, string name = "ParseSequenceExample") { var dict = new Dictionary(); dict["serialized"] = serialized; @@ -20029,7 +20029,7 @@ namespace Tensorflow.Operations /// dense_values : /// The Operation can be fetched from any of the Tensorreturned in the tuple values, by fetching the Operation property. /// - public static (Tensor[] sparse_indices, Tensor[] sparse_values, Tensor[] sparse_shapes, Tensor[] dense_values) parse_single_example(Tensor serialized, Tensor[] dense_defaults, int num_sparse, string[] sparse_keys, string[] dense_keys, TF_DataType[] sparse_types, TensorShape[] dense_shapes, string name = "ParseSingleExample") + public static (Tensor[] sparse_indices, Tensor[] sparse_values, Tensor[] sparse_shapes, Tensor[] dense_values) parse_single_example(Tensor serialized, Tensor[] dense_defaults, int num_sparse, string[] sparse_keys, string[] dense_keys, TF_DataType[] sparse_types, Shape[] dense_shapes, string name = "ParseSingleExample") { var dict = new Dictionary(); dict["serialized"] = serialized; @@ -20140,7 +20140,7 @@ namespace Tensorflow.Operations /// feature_list_dense_values : /// The Operation can be fetched from any of the Tensorreturned in the tuple values, by fetching the Operation property. /// - public static (Tensor[] context_sparse_indices, Tensor[] context_sparse_values, Tensor[] context_sparse_shapes, Tensor[] context_dense_values, Tensor[] feature_list_sparse_indices, Tensor[] feature_list_sparse_values, Tensor[] feature_list_sparse_shapes, Tensor[] feature_list_dense_values) parse_single_sequence_example(Tensor serialized, Tensor feature_list_dense_missing_assumed_empty, Tensor[] context_sparse_keys, Tensor[] context_dense_keys, Tensor[] feature_list_sparse_keys, Tensor[] feature_list_dense_keys, Tensor[] context_dense_defaults, Tensor debug_name, TF_DataType[] context_sparse_types = null, TF_DataType[] feature_list_dense_types = null, TensorShape[] context_dense_shapes = null, TF_DataType[] feature_list_sparse_types = null, TensorShape[] feature_list_dense_shapes = null, string name = "ParseSingleSequenceExample") + public static (Tensor[] context_sparse_indices, Tensor[] context_sparse_values, Tensor[] context_sparse_shapes, Tensor[] context_dense_values, Tensor[] feature_list_sparse_indices, Tensor[] feature_list_sparse_values, Tensor[] feature_list_sparse_shapes, Tensor[] feature_list_dense_values) parse_single_sequence_example(Tensor serialized, Tensor feature_list_dense_missing_assumed_empty, Tensor[] context_sparse_keys, Tensor[] context_dense_keys, Tensor[] feature_list_sparse_keys, Tensor[] feature_list_dense_keys, Tensor[] context_dense_defaults, Tensor debug_name, TF_DataType[] context_sparse_types = null, TF_DataType[] feature_list_dense_types = null, Shape[] context_dense_shapes = null, TF_DataType[] feature_list_sparse_types = null, Shape[] feature_list_dense_shapes = null, string name = "ParseSingleSequenceExample") { var dict = new Dictionary(); dict["serialized"] = serialized; @@ -20224,7 +20224,7 @@ namespace Tensorflow.Operations /// intended as a way to represent a value that will always be fed, and to /// provide attrs that enable the fed value to be checked at runtime. /// - public static Tensor placeholder(TF_DataType dtype, TensorShape shape = null, string name = "Placeholder") + public static Tensor placeholder(TF_DataType dtype, Shape shape = null, string name = "Placeholder") { var dict = new Dictionary(); dict["dtype"] = dtype; @@ -20258,7 +20258,7 @@ namespace Tensorflow.Operations /// intended as a way to represent a value that will always be fed, and to /// provide attrs that enable the fed value to be checked at runtime. /// - public static Tensor placeholder_v2(TF_DataType dtype, TensorShape shape, string name = "PlaceholderV2") + public static Tensor placeholder_v2(TF_DataType dtype, Shape shape, string name = "PlaceholderV2") { var dict = new Dictionary(); dict["dtype"] = dtype; @@ -20284,7 +20284,7 @@ namespace Tensorflow.Operations /// A placeholder tensor that defaults to input if it is not fed. /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. /// - public static Tensor placeholder_with_default(Tensor input, TensorShape shape, string name = "PlaceholderWithDefault") + public static Tensor placeholder_with_default(Tensor input, Shape shape, string name = "PlaceholderWithDefault") { var dict = new Dictionary(); dict["input"] = input; @@ -20403,7 +20403,7 @@ namespace Tensorflow.Operations /// /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. /// - public static Tensor prefetch_dataset(Tensor input_dataset, Tensor buffer_size, TF_DataType[] output_types, TensorShape[] output_shapes, string name = "PrefetchDataset") + public static Tensor prefetch_dataset(Tensor input_dataset, Tensor buffer_size, TF_DataType[] output_types, Shape[] output_shapes, string name = "PrefetchDataset") { var dict = new Dictionary(); dict["input_dataset"] = input_dataset; @@ -20532,7 +20532,7 @@ namespace Tensorflow.Operations /// and DequeueMany) on a PriorityQueue will all require (resp. output) one extra /// entry in their input (resp. output) lists. /// - public static Tensor priority_queue(TensorShape[] shapes, TF_DataType[] component_types = null, int? capacity = null, string container = null, string shared_name = null, string name = "PriorityQueue") + public static Tensor priority_queue(Shape[] shapes, TF_DataType[] component_types = null, int? capacity = null, string container = null, string shared_name = null, string name = "PriorityQueue") { var dict = new Dictionary(); dict["shapes"] = shapes; @@ -20587,7 +20587,7 @@ namespace Tensorflow.Operations /// and DequeueMany) on a PriorityQueue will all require (resp. output) one extra /// entry in their input (resp. output) lists. /// - public static Tensor priority_queue_v2(TensorShape[] shapes, TF_DataType[] component_types = null, int? capacity = null, string container = null, string shared_name = null, string name = "PriorityQueueV2") + public static Tensor priority_queue_v2(Shape[] shapes, TF_DataType[] component_types = null, int? capacity = null, string container = null, string shared_name = null, string name = "PriorityQueueV2") { var dict = new Dictionary(); dict["shapes"] = shapes; @@ -22781,7 +22781,7 @@ namespace Tensorflow.Operations /// /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. /// - public static Tensor random_dataset(Tensor seed, Tensor seed2, TF_DataType[] output_types, TensorShape[] output_shapes, string name = "RandomDataset") + public static Tensor random_dataset(Tensor seed, Tensor seed2, TF_DataType[] output_types, Shape[] output_shapes, string name = "RandomDataset") { var dict = new Dictionary(); dict["seed"] = seed; @@ -23035,7 +23035,7 @@ namespace Tensorflow.Operations /// The handle to the queue. /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. /// - public static Tensor random_shuffle_queue(TF_DataType[] component_types, TensorShape[] shapes = null, int? capacity = null, int? min_after_dequeue = null, int? seed = null, int? seed2 = null, string container = null, string shared_name = null, string name = "RandomShuffleQueue") + public static Tensor random_shuffle_queue(TF_DataType[] component_types, Shape[] shapes = null, int? capacity = null, int? min_after_dequeue = null, int? seed = null, int? seed2 = null, string container = null, string shared_name = null, string name = "RandomShuffleQueue") { var dict = new Dictionary(); dict["component_types"] = component_types; @@ -23101,7 +23101,7 @@ namespace Tensorflow.Operations /// The handle to the queue. /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. /// - public static Tensor random_shuffle_queue_v2(TF_DataType[] component_types, TensorShape[] shapes = null, int? capacity = null, int? min_after_dequeue = null, int? seed = null, int? seed2 = null, string container = null, string shared_name = null, string name = "RandomShuffleQueueV2") + public static Tensor random_shuffle_queue_v2(TF_DataType[] component_types, Shape[] shapes = null, int? capacity = null, int? min_after_dequeue = null, int? seed = null, int? seed2 = null, string container = null, string shared_name = null, string name = "RandomShuffleQueueV2") { var dict = new Dictionary(); dict["component_types"] = component_types; @@ -23322,7 +23322,7 @@ namespace Tensorflow.Operations /// /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. /// - public static Tensor range_dataset(Tensor start, Tensor stop, Tensor step, TF_DataType[] output_types, TensorShape[] output_shapes, string name = "RangeDataset") + public static Tensor range_dataset(Tensor start, Tensor stop, Tensor step, TF_DataType[] output_types, Shape[] output_shapes, string name = "RangeDataset") { var dict = new Dictionary(); dict["start"] = start; @@ -24458,7 +24458,7 @@ namespace Tensorflow.Operations /// /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. /// - public static Tensor repeat_dataset(Tensor input_dataset, Tensor count, TF_DataType[] output_types, TensorShape[] output_shapes, string name = "RepeatDataset") + public static Tensor repeat_dataset(Tensor input_dataset, Tensor count, TF_DataType[] output_types, Shape[] output_shapes, string name = "RepeatDataset") { var dict = new Dictionary(); dict["input_dataset"] = input_dataset; @@ -29811,7 +29811,7 @@ namespace Tensorflow.Operations /// /// pseudorandomly. /// - public static Tensor shuffle_and_repeat_dataset(Tensor input_dataset, Tensor buffer_size, Tensor seed, Tensor seed2, Tensor count, TF_DataType[] output_types, TensorShape[] output_shapes, string name = "ShuffleAndRepeatDataset") + public static Tensor shuffle_and_repeat_dataset(Tensor input_dataset, Tensor buffer_size, Tensor seed, Tensor seed2, Tensor count, TF_DataType[] output_types, Shape[] output_shapes, string name = "ShuffleAndRepeatDataset") { var dict = new Dictionary(); dict["input_dataset"] = input_dataset; @@ -29862,7 +29862,7 @@ namespace Tensorflow.Operations /// /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. /// - public static Tensor shuffle_dataset(Tensor input_dataset, Tensor buffer_size, Tensor seed, Tensor seed2, TF_DataType[] output_types, TensorShape[] output_shapes, bool? reshuffle_each_iteration = null, string name = "ShuffleDataset") + public static Tensor shuffle_dataset(Tensor input_dataset, Tensor buffer_size, Tensor seed, Tensor seed2, TF_DataType[] output_types, Shape[] output_shapes, bool? reshuffle_each_iteration = null, string name = "ShuffleDataset") { var dict = new Dictionary(); dict["input_dataset"] = input_dataset; @@ -30084,7 +30084,7 @@ namespace Tensorflow.Operations /// /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. /// - public static Tensor skip_dataset(Tensor input_dataset, Tensor count, TF_DataType[] output_types, TensorShape[] output_shapes, string name = "SkipDataset") + public static Tensor skip_dataset(Tensor input_dataset, Tensor count, TF_DataType[] output_types, Shape[] output_shapes, string name = "SkipDataset") { var dict = new Dictionary(); dict["input_dataset"] = input_dataset; @@ -30222,7 +30222,7 @@ namespace Tensorflow.Operations /// /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. /// - public static Tensor slide_dataset(Tensor input_dataset, Tensor window_size, Tensor window_shift, Tensor window_stride, TF_DataType[] output_types, TensorShape[] output_shapes, string name = "SlideDataset") + public static Tensor slide_dataset(Tensor input_dataset, Tensor window_size, Tensor window_shift, Tensor window_stride, TF_DataType[] output_types, Shape[] output_shapes, string name = "SlideDataset") { var dict = new Dictionary(); dict["input_dataset"] = input_dataset; @@ -31745,7 +31745,7 @@ namespace Tensorflow.Operations /// resets the aggregate to 0, and increments the global_step recorded by /// the accumulator. /// - public static Tensor sparse_conditional_accumulator(TF_DataType dtype, TensorShape shape, string container = null, string shared_name = null, string name = "SparseConditionalAccumulator") + public static Tensor sparse_conditional_accumulator(TF_DataType dtype, Shape shape, string container = null, string shared_name = null, string name = "SparseConditionalAccumulator") { var dict = new Dictionary(); dict["dtype"] = dtype; @@ -33565,7 +33565,7 @@ namespace Tensorflow.Operations /// /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. /// - public static Tensor sql_dataset(Tensor driver_name, Tensor data_source_name, Tensor query, TF_DataType[] output_types, TensorShape[] output_shapes, string name = "SqlDataset") + public static Tensor sql_dataset(Tensor driver_name, Tensor data_source_name, Tensor query, TF_DataType[] output_types, Shape[] output_shapes, string name = "SqlDataset") { var dict = new Dictionary(); dict["driver_name"] = driver_name; @@ -35743,7 +35743,7 @@ namespace Tensorflow.Operations /// /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. /// - public static Tensor take_dataset(Tensor input_dataset, Tensor count, TF_DataType[] output_types, TensorShape[] output_shapes, string name = "TakeDataset") + public static Tensor take_dataset(Tensor input_dataset, Tensor count, TF_DataType[] output_types, Shape[] output_shapes, string name = "TakeDataset") { var dict = new Dictionary(); dict["input_dataset"] = input_dataset; @@ -35953,7 +35953,7 @@ namespace Tensorflow.Operations /// var = state_ops.assign_add(var, [[6.0, 7.0]]) /// final = state_ops._destroy_temporary_variable(var, var_name=var_name) /// - public static Tensor temporary_variable(TensorShape shape, TF_DataType dtype, string var_name = null, string name = "TemporaryVariable") + public static Tensor temporary_variable(Shape shape, TF_DataType dtype, string var_name = null, string name = "TemporaryVariable") { var dict = new Dictionary(); dict["shape"] = shape; @@ -36028,7 +36028,7 @@ namespace Tensorflow.Operations /// lengths : /// The Operation can be fetched from any of the Tensorreturned in the tuple values, by fetching the Operation property. /// - public static (Tensor value, Tensor lengths) tensor_array_concat_v2(Tensor handle, Tensor flow_in, TF_DataType dtype, TensorShape element_shape_except0 = null, string name = "TensorArrayConcatV2") + public static (Tensor value, Tensor lengths) tensor_array_concat_v2(Tensor handle, Tensor flow_in, TF_DataType dtype, Shape element_shape_except0 = null, string name = "TensorArrayConcatV2") { var dict = new Dictionary(); dict["handle"] = handle; @@ -36089,7 +36089,7 @@ namespace Tensorflow.Operations /// /// All elements must have the same shape (excepting the first dimension). /// - public static (Tensor value, Tensor lengths) tensor_array_concat_v3(Tensor handle, Tensor flow_in, TF_DataType dtype, TensorShape element_shape_except0 = null, string name = "TensorArrayConcatV3") + public static (Tensor value, Tensor lengths) tensor_array_concat_v3(Tensor handle, Tensor flow_in, TF_DataType dtype, Shape element_shape_except0 = null, string name = "TensorArrayConcatV3") { var dict = new Dictionary(); dict["handle"] = handle; @@ -36124,7 +36124,7 @@ namespace Tensorflow.Operations /// /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. /// - public static Tensor tensor_array_gather_v2(Tensor handle, Tensor indices, Tensor flow_in, TF_DataType dtype, TensorShape element_shape = null, string name = "TensorArrayGatherV2") + public static Tensor tensor_array_gather_v2(Tensor handle, Tensor indices, Tensor flow_in, TF_DataType dtype, Shape element_shape = null, string name = "TensorArrayGatherV2") { var dict = new Dictionary(); dict["handle"] = handle; @@ -36169,7 +36169,7 @@ namespace Tensorflow.Operations /// /// All elements selected by indices must have the same shape. /// - public static Tensor tensor_array_gather_v3(Tensor handle, Tensor indices, Tensor flow_in, TF_DataType dtype, TensorShape element_shape = null, string name = "TensorArrayGatherV3") + public static Tensor tensor_array_gather_v3(Tensor handle, Tensor indices, Tensor flow_in, TF_DataType dtype, Shape element_shape = null, string name = "TensorArrayGatherV3") { var dict = new Dictionary(); dict["handle"] = handle; @@ -36613,7 +36613,7 @@ namespace Tensorflow.Operations /// /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. /// - public static Tensor tensor_array_v2(Tensor size, TF_DataType dtype, TensorShape element_shape = null, bool? dynamic_size = null, bool? clear_after_read = null, string tensor_array_name = null, string name = "TensorArrayV2") + public static Tensor tensor_array_v2(Tensor size, TF_DataType dtype, Shape element_shape = null, bool? dynamic_size = null, bool? clear_after_read = null, string tensor_array_name = null, string name = "TensorArrayV2") { var dict = new Dictionary(); dict["size"] = size; @@ -36679,7 +36679,7 @@ namespace Tensorflow.Operations /// /// Write data via Write and read via Read or Pack. /// - public static (Tensor handle, Tensor flow) tensor_array_v3(Tensor size, TF_DataType dtype, TensorShape element_shape = null, bool? dynamic_size = null, bool? clear_after_read = null, bool? identical_element_shapes = null, string tensor_array_name = null, string name = "TensorArrayV3") + public static (Tensor handle, Tensor flow) tensor_array_v3(Tensor size, TF_DataType dtype, Shape element_shape = null, bool? dynamic_size = null, bool? clear_after_read = null, bool? identical_element_shapes = null, string tensor_array_name = null, string name = "TensorArrayV3") { var dict = new Dictionary(); dict["size"] = size; @@ -36776,7 +36776,7 @@ namespace Tensorflow.Operations /// /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. /// - public static Tensor tensor_dataset(Tensor[] components, TensorShape[] output_shapes, string name = "TensorDataset") + public static Tensor tensor_dataset(Tensor[] components, Shape[] output_shapes, string name = "TensorDataset") { var dict = new Dictionary(); dict["components"] = components; @@ -37144,7 +37144,7 @@ namespace Tensorflow.Operations /// /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. /// - public static Tensor tensor_slice_dataset(Tensor[] components, TensorShape[] output_shapes, string name = "TensorSliceDataset") + public static Tensor tensor_slice_dataset(Tensor[] components, Shape[] output_shapes, string name = "TensorSliceDataset") { var dict = new Dictionary(); dict["components"] = components; @@ -37899,7 +37899,7 @@ namespace Tensorflow.Operations /// /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. /// - public static Tensor unbatch_dataset(Tensor input_dataset, TF_DataType[] output_types, TensorShape[] output_shapes, string name = "UnbatchDataset") + public static Tensor unbatch_dataset(Tensor input_dataset, TF_DataType[] output_types, Shape[] output_shapes, string name = "UnbatchDataset") { var dict = new Dictionary(); dict["input_dataset"] = input_dataset; @@ -38656,7 +38656,7 @@ namespace Tensorflow.Operations /// /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. /// - public static Tensor var_handle_op(TF_DataType dtype, TensorShape shape, string container = null, string shared_name = null, string name = "VarHandleOp") + public static Tensor var_handle_op(TF_DataType dtype, Shape shape, string container = null, string shared_name = null, string name = "VarHandleOp") { var dict = new Dictionary(); dict["dtype"] = dtype; @@ -38710,7 +38710,7 @@ namespace Tensorflow.Operations /// /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. /// - public static Tensor variable(TensorShape shape, TF_DataType dtype, string container = null, string shared_name = null, string name = "Variable") + public static Tensor variable(Shape shape, TF_DataType dtype, string container = null, string shared_name = null, string name = "Variable") { var dict = new Dictionary(); dict["shape"] = shape; @@ -38787,7 +38787,7 @@ namespace Tensorflow.Operations /// TODO(zhifengc/mrry): Adds a pointer to a more detail document /// about sharing states in tensorflow. /// - public static Tensor variable_v2(TensorShape shape, TF_DataType dtype, string container = null, string shared_name = null, string name = "VariableV2") + public static Tensor variable_v2(Shape shape, TF_DataType dtype, string container = null, string shared_name = null, string name = "VariableV2") { var dict = new Dictionary(); dict["shape"] = shape; @@ -39064,7 +39064,7 @@ namespace Tensorflow.Operations /// /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. /// - public static Tensor zip_dataset(Tensor[] input_datasets, TF_DataType[] output_types, TensorShape[] output_shapes, string name = "ZipDataset") + public static Tensor zip_dataset(Tensor[] input_datasets, TF_DataType[] output_types, Shape[] output_shapes, string name = "ZipDataset") { var dict = new Dictionary(); dict["input_datasets"] = input_datasets; diff --git a/src/TensorFlowNET.Core/Operations/gen_resource_variable_ops.cs b/src/TensorFlowNET.Core/Operations/gen_resource_variable_ops.cs index e9c4a1f2..33090325 100644 --- a/src/TensorFlowNET.Core/Operations/gen_resource_variable_ops.cs +++ b/src/TensorFlowNET.Core/Operations/gen_resource_variable_ops.cs @@ -94,7 +94,7 @@ namespace Tensorflow /// /// /// - public static Tensor var_handle_op(TF_DataType dtype, TensorShape shape, + public static Tensor var_handle_op(TF_DataType dtype, Shape shape, string container = "", string shared_name = "", string name = null) { if (tf.Context.executing_eagerly()) diff --git a/src/TensorFlowNET.Core/Operations/image_ops_impl.cs b/src/TensorFlowNET.Core/Operations/image_ops_impl.cs index 917dbd6b..f7302c22 100644 --- a/src/TensorFlowNET.Core/Operations/image_ops_impl.cs +++ b/src/TensorFlowNET.Core/Operations/image_ops_impl.cs @@ -53,11 +53,11 @@ namespace Tensorflow internal static long[] _ImageDimensions(Tensor image, int rank) { - if (image.TensorShape.is_fully_defined()) - return image.TensorShape.as_list(); + if (image.shape.IsFullyDefined) + return image.shape.dims; else { - var static_shape = image.TensorShape.with_rank(rank).as_list(); + var static_shape = image.shape.with_rank(rank).dims; var dynamic_shape = array_ops.unstack(array_ops.shape(image), rank); long[] ss_storage = null; @@ -82,23 +82,23 @@ namespace Tensorflow internal static Operation[] _CheckAtLeast3DImage(Tensor image, bool require_static) { - TensorShape image_shape; + Shape image_shape; try { - if (image.TensorShape.ndim == Unknown) + if (image.shape.ndim == Unknown) { - image_shape = image.TensorShape.with_rank(3); + image_shape = image.shape.with_rank(3); } else { - image_shape = image.TensorShape.with_rank_at_least(3); + image_shape = image.shape.with_rank_at_least(3); } } catch (ValueError) { throw new ValueError("'image' must be at least three-dimensional."); } - if (require_static & !image_shape.is_fully_defined()) + if (require_static & !image_shape.IsFullyDefined) { throw new ValueError("\'image\' must be fully defined."); } @@ -110,11 +110,11 @@ namespace Tensorflow } } - var image_shape_last_three_elements = new TensorShape(new[] { + var image_shape_last_three_elements = new Shape(new[] { image_shape.dims[image_shape.dims.Length - 1], image_shape.dims[image_shape.dims.Length - 2], image_shape.dims[image_shape.dims.Length - 3]}); - if (!image_shape_last_three_elements.is_fully_defined()) + if (!image_shape_last_three_elements.IsFullyDefined) { Tensor image_shape_ = array_ops.shape(image); var image_shape_return = tf.constant(new[] { @@ -142,15 +142,15 @@ namespace Tensorflow internal static Tensor fix_image_flip_shape(Tensor image, Tensor result) { - TensorShape image_shape = image.shape; + Shape image_shape = image.shape; if (image_shape == image_shape.unknown_shape()) { // c# defaults null types to 0 anyhow, so this should be a pretty equivalent port - result.set_shape(new TensorShape(new long[] { 0, 0, 0 })); + result.shape = new long[] { 0, 0, 0 }; } else { - result.set_shape(image_shape); + result.shape = image_shape; } return result; } @@ -173,7 +173,7 @@ namespace Tensorflow { image = ops.convert_to_tensor(image, name: "image"); image = _AssertAtLeast3DImage(image); - TensorShape shape = image.shape; + Shape shape = image.shape; if (shape.ndim == 3 || shape.ndim == Unknown) { Tensor uniform_random = random_ops.random_uniform(new int[] { }, 0f, 1.0f, seed: seed); @@ -219,7 +219,7 @@ namespace Tensorflow { image = ops.convert_to_tensor(image, name: "image"); image = _AssertAtLeast3DImage(image); - TensorShape shape = image.shape; + Shape shape = image.shape; if (shape.ndim == 3 || shape.ndim == Unknown) { return fix_image_flip_shape(image, gen_array_ops.reverse(image, new { flip_index })); @@ -245,10 +245,10 @@ namespace Tensorflow // can't get k to convert to tensor without throwing error about it being an int--- // might rework later. for now, k2 == k as Tensor Tensor k2 = ops.convert_to_tensor(k, dtype: dtypes.int32, name: "k"); - k2.TensorShape.assert_has_rank(0); + k2.shape.assert_has_rank(0); k2 = gen_ops.mod(k2, tf.constant(4)); - TensorShape shape = image.shape; + Shape shape = image.shape; if (shape.ndim == 3 || shape.ndim == Unknown) { return _rot90_3D(image, k, scope); @@ -284,7 +284,7 @@ namespace Tensorflow math_ops.equal(k, 3), _rot270()}; var result = control_flow_ops.case_v2(cases, callable_default: () => new Tensor[] { image }, exclusive: true, name: name_scope); - result.set_shape(new long[] { -1, -1, image.TensorShape.dims[2] }); + result.shape = new long[] { -1, -1, image.shape.dims[2] }; return result; } @@ -295,7 +295,7 @@ namespace Tensorflow { image = ops.convert_to_tensor(image, name: "image"); image = _AssertAtLeast3DImage(image); - TensorShape shape = image.shape; + Shape shape = image.shape; if (shape.ndim == 3 || shape.ndim == Unknown) { return array_ops.transpose(image, new[] { 1, 0, 2 }, name: name); @@ -322,14 +322,14 @@ namespace Tensorflow return image; _AssertAtLeast3DImage(image); - var rank = image.TensorShape.ndim; + var rank = image.shape.ndim; if (rank != 3 && rank != 4) throw new ValueError(String.Format(@"`image` should either be a Tensor with rank = 3 or rank = 4. Had rank = {0}", rank)); object[] _get_dim(Tensor tensor, int idx) { - var static_shape = tensor.TensorShape.dims[idx]; + var static_shape = tensor.shape.dims[idx]; if (static_shape != (int)None) return new object[2] { static_shape, false }; return new object[2] { array_ops.shape(tensor)[idx], true }; @@ -445,7 +445,7 @@ or rank = 4. Had rank = {0}", rank)); image = ops.convert_to_tensor(image, name: "image"); bool is_batch = true; - TensorShape image_shape = image.shape; + Shape image_shape = image.shape; if (image_shape.ndim == 3) { is_batch = false; @@ -455,7 +455,7 @@ or rank = 4. Had rank = {0}", rank)); { is_batch = false; image = array_ops.expand_dims(image, 0); - image.set_shape(new TensorShape(0, 0, 0, 0)); + image.shape = new Shape(0, 0, 0, 0); } else if (image_shape.ndim != 4) { @@ -494,7 +494,7 @@ or rank = 4. Had rank = {0}", rank)); ); var padded = array_ops.pad(image, paddings); - TensorShape padded_shape_result() + Shape padded_shape_result() { long[] i_remnants = { }; foreach (var i in new[] { bhwd[0], target_height, target_width, bhwd[3] }) @@ -502,10 +502,10 @@ or rank = 4. Had rank = {0}", rank)); return null; else i_remnants[i_remnants.Length] = i; - return new TensorShape(i_remnants); + return new Shape(i_remnants); }; - TensorShape padded_shape = padded_shape_result(); - padded.set_shape(padded_shape); + Shape padded_shape = padded_shape_result(); + padded.shape = padded_shape; if (!is_batch) { @@ -524,7 +524,7 @@ or rank = 4. Had rank = {0}", rank)); image = ops.convert_to_tensor(image, name: "image"); bool is_batch = true; - TensorShape image_shape = image.shape; + Shape image_shape = image.shape; if (image_shape.ndim == 3) { is_batch = false; @@ -534,7 +534,7 @@ or rank = 4. Had rank = {0}", rank)); { is_batch = false; image = array_ops.expand_dims(image, 0); - image.set_shape(new TensorShape(new long[] { 0, 0, 0, 0 })); + image.shape = new long[] { 0, 0, 0, 0 }; } else if (image_shape.ndim != 4) { @@ -573,7 +573,7 @@ or rank = 4. Had rank = {0}", rank)); image, array_ops.stack(new[] { 0, offset_height, offset_width, 0 }), array_ops.stack(new[] { -1, target_height, target_width, -1 })); - TensorShape cropped_shape_result() + Shape cropped_shape_result() { long[] i_remnants = { }; foreach (var i in new[] { bhwd[0], target_height, target_width, bhwd[3] }) @@ -581,10 +581,10 @@ or rank = 4. Had rank = {0}", rank)); return null; else i_remnants[i_remnants.Length] = i; - return new TensorShape(i_remnants); + return new Shape(i_remnants); }; var cropped_shape = cropped_shape_result(); - cropped.set_shape(cropped_shape); + cropped.shape = cropped_shape; if (!is_batch) { @@ -601,7 +601,7 @@ or rank = 4. Had rank = {0}", rank)); return tf_with(ops.name_scope(null, "resize_image_with_crop_or_pad", new[] { image }), delegate { image = ops.convert_to_tensor(image, name: "image"); - TensorShape image_shape = image.shape; + Shape image_shape = image.shape; bool is_batch = true; if (image_shape.ndim == 3) { @@ -612,7 +612,7 @@ or rank = 4. Had rank = {0}", rank)); { is_batch = false; image = array_ops.expand_dims(image, 0); - image.set_shape(new TensorShape(new int[] { 0, 0, 0, 0 })); + image.shape = new long[] { 0, 0, 0, 0 }; } else if (image_shape.ndim != 4) { @@ -684,7 +684,7 @@ or rank = 4. Had rank = {0}", rank)); Tensor resized = pad_to_bounding_box(cropped, offset_pad_height, offset_pad_width, (int)target_height, (int)target_width); - if (resized.TensorShape.ndim == Unknown) + if (resized.shape.ndim == Unknown) throw new ValueError("resized contains no shape."); var _rhrw_ = _ImageDimensions(resized, rank: 4); @@ -713,20 +713,20 @@ or rank = 4. Had rank = {0}", rank)); { return tf_with(ops.name_scope(name, "resize", new[] { images, size }), delegate { - if (images.TensorShape.ndim == Unknown) + if (images.shape.ndim == Unknown) throw new ValueError("\'images\' contains no shape."); bool is_batch = true; - if (images.TensorShape.ndim == 3) + if (images.shape.ndim == 3) { is_batch = false; images = array_ops.expand_dims(images, 0); } - else if (images.TensorShape.ndim != 4) + else if (images.shape.ndim != 4) throw new ValueError("\'images\' must have either 3 or 4 dimensions."); var (height, width) = (images.dims[1], images.dims[2]); - if (!size.TensorShape.is_compatible_with(new[] { 2 })) + if (!size.shape.is_compatible_with(new[] { 2 })) throw new ValueError(@"\'size\' must be a 1-D Tensor of 2 elements: new_height, new_width"); @@ -782,7 +782,7 @@ new_height, new_width"); images = resizer_fn(images, size); - images.set_shape(new TensorShape(new long[] { Unknown, new_height_const, new_width_const, Unknown })); + images.shape = new Shape(Unknown, new_height_const, new_width_const, Unknown); if (!is_batch) images = array_ops.squeeze(images, axis: new int[] { 0 }); @@ -862,7 +862,7 @@ new_height, new_width"); return tf_with(ops.name_scope(null, "resize_image_with_pad", new[] { image }), delegate { image = ops.convert_to_tensor(image, name: "tensor"); - var image_shape = image.TensorShape; + var image_shape = image.shape; bool is_batch = true; if (image_shape.ndim == 3) { @@ -873,7 +873,7 @@ new_height, new_width"); { is_batch = false; image = array_ops.expand_dims(image, 0); - image.set_shape(new TensorShape(new[] { Unknown, Unknown, Unknown, Unknown })); + image.shape = new Shape(Unknown, Unknown, Unknown, Unknown); } else if (image_shape.ndim != 4) { @@ -928,7 +928,7 @@ new_height, new_width"); var padded = pad_to_bounding_box(resized, p_height, p_width, target_height, target_width); - if (padded.TensorShape.ndim == Unknown) + if (padded.shape.ndim == Unknown) throw new ValueError("padded contains no shape."); _ImageDimensions(padded, rank: 4); @@ -1175,7 +1175,7 @@ new_height, new_width"); return tf_with(ops.name_scope(name, "adjust_jpeg_quality", new[] { image }), delegate { image = ops.convert_to_tensor(image, name: "image"); - var channels = image.TensorShape.as_list()[image.TensorShape.dims.Length - 1]; + var channels = image.shape[image.shape.dims.Length - 1]; var orig_dtype = image.dtype; // python code checks to ensure jpeq_quality is a tensor; unnecessary here since // it is passed as a tensor @@ -1327,7 +1327,7 @@ new_height, new_width"); {0.587f, -0.27455667f, -0.52273617f}, {0.114f, -0.32134392f, 0.31119955f}}; Tensor kernel = ops.convert_to_tensor(_rgb_to_yiq_kernel, dtype: images.dtype, name: "kernel"); - var ndims = images.TensorShape.ndim; + var ndims = images.shape.ndim; return math_ops.tensordot(images, kernel, axes: new int[] { ndims - 1, 0 }); } @@ -1338,7 +1338,7 @@ new_height, new_width"); {0.95598634f, -0.27201283f, -1.10674021f}, {0.6208248f, -0.64720424f, 1.70423049f}}; Tensor kernel = ops.convert_to_tensor(_yiq_to_rgb_kernel, dtype: images.dtype, name: "kernel"); - var ndims = images.TensorShape.ndim; + var ndims = images.shape.ndim; return math_ops.tensordot(images, kernel, axes: new int[] { ndims - 1, 0 }); } @@ -1349,7 +1349,7 @@ new_height, new_width"); {0.587f, -0.28886916f, -0.51496512f}, {0.114f, 0.43601035f, -0.10001026f}}; Tensor kernel = ops.convert_to_tensor(_rgb_to_yuv_kernel, dtype: images.dtype, name: "kernel"); - var ndims = images.TensorShape.ndim; + var ndims = images.shape.ndim; return math_ops.tensordot(images, kernel, axes: new int[] { ndims - 1, 0 }); } @@ -1360,15 +1360,15 @@ new_height, new_width"); {0f, -0.394642334f, 2.03206185f}, {1.13988303f, -0.58062185f, 0f}}; Tensor kernel = ops.convert_to_tensor(_yuv_to_rgb_kernel, dtype: images.dtype, name: "kernel"); - var ndims = images.TensorShape.ndim; + var ndims = images.shape.ndim; return math_ops.tensordot(images, kernel, axes: new int[] { ndims - 1, 0 }); } internal static (Tensor, Tensor, Operation[]) _verify_compatible_image_shapes(Tensor img1, Tensor img2) { - TensorShape shape1 = img1.TensorShape.with_rank_at_least(3); - TensorShape shape2 = img2.TensorShape.with_rank_at_least(3); - shape1 = new TensorShape(shape1.dims.Skip(shape1.dims.Length - 3).Take(shape1.dims.Length - (shape1.dims.Length - 3)).ToArray()); + Shape shape1 = img1.shape.with_rank_at_least(3); + Shape shape2 = img2.shape.with_rank_at_least(3); + shape1 = new Shape(shape1.dims.Skip(shape1.dims.Length - 3).Take(shape1.dims.Length - (shape1.dims.Length - 3)).ToArray()); tensor_shape.assert_is_compatible_with(self: new Tensor(shape1.dims), other: new Tensor(shape2.dims.Skip(shape2.dims.Length - 3).Take(shape2.dims.Length - (shape2.dims.Length - 3)).ToArray())); if (shape1.ndim != -1 && shape2.ndim != -1) @@ -1653,7 +1653,7 @@ new_height, new_width"); public static (Tensor, Tensor) image_gradients(Tensor image) { - if (image.TensorShape.ndim != 4) + if (image.shape.ndim != 4) throw new ValueError(String.Format(@"image_gradients expects a 4D tensor [batch_size, h, w, d], not {0}.", image.shape)); var image_shape = array_ops.shape(image); @@ -1674,7 +1674,7 @@ new_height, new_width"); public static Tensor sobel_edges(Tensor image) { - var static_image_shape = image.TensorShape; + var static_image_shape = image.shape; var image_shape = array_ops.shape(image); var kernels = new Tensor(new int[,] {{-1, -2, -1}, {0, 0, 0}, {1, 2, 1}, {-1, 0, 1}, {-2, 0, 2}, {-1, 0, 1}}); @@ -1695,7 +1695,7 @@ new_height, new_width"); var shape = array_ops.concat(new Tensor[] { image_shape, ops.convert_to_tensor(num_kernels) }, 0); output = array_ops.reshape(output, shape: shape); - output.set_shape(static_image_shape.concatenate(new int[] { num_kernels })); + output.shape = static_image_shape.concatenate(new int[] { num_kernels }); return output; } @@ -1930,7 +1930,7 @@ new_height, new_width"); return tf_with(ops.name_scope(name, "non_max_suppression_padded"), delegate { if (!pad_to_max_output_size) - if (boxes.TensorShape.rank != -1 && boxes.TensorShape.rank > 2) + if (boxes.shape.ndim != -1 && boxes.shape.ndim > 2) throw new ValueError(String.Format( "'pad_to_max_output_size' (value {0}) must be true for 'batched input'", pad_to_max_output_size)); if (name == null) @@ -1943,11 +1943,11 @@ new_height, new_width"); // 0, slice(None, num_valid, None) // which is what I tried to replicate below, but i don't think that Unknown is the exact // equivalent to None, and don't know about the slice function bit. - idx = idx[0, slice(Unknown, num_valid.TensorShape.ndim, Unknown).ToArray()[0]]; + idx = idx[0, slice(Unknown, num_valid.shape.ndim, Unknown).ToArray()[0]]; else { var batch_dims = array_ops.concat(new Tensor[] { - new Tensor(array_ops.shape(boxes).dims.Take(boxes.TensorShape.dims.Length - 2).ToArray()), + new Tensor(array_ops.shape(boxes).dims.Take(boxes.shape.dims.Length - 2).ToArray()), array_ops.expand_dims(max_output_size, 0) }, 0); idx = array_ops.reshape(idx, batch_dims); @@ -1984,8 +1984,8 @@ new_height, new_width"); return (sorted_scores, sorted_boxes, sorted_scores_indices); } - var batch_dims = array_ops.shape(boxes).dims.Take(boxes.TensorShape.dims.Length - 2).ToArray(); - var num_boxes = array_ops.shape(boxes).dims[boxes.TensorShape.dims.Length - 2]; + var batch_dims = array_ops.shape(boxes).dims.Take(boxes.shape.dims.Length - 2).ToArray(); + var num_boxes = array_ops.shape(boxes).dims[boxes.shape.dims.Length - 2]; boxes = array_ops.reshape(boxes, new[] { -1, num_boxes, 4 }); scores = array_ops.reshape(scores, new[] { -1, num_boxes }); var batch_size = array_ops.shape(boxes).dims[0]; @@ -2059,14 +2059,14 @@ new_height, new_width"); body: (Tensor[] args) => suppression_loop_body(args), loop_vars: new object[] { boxes, iou_threshold, - array_ops.zeros(new TensorShape(batch_size), dtypes.int32), + array_ops.zeros(new Shape(batch_size), dtypes.int32), constant_op.constant(0) }, - shape_invariants: new TensorShape[] { - new TensorShape(new int[] {Unknown, Unknown, 4}), - new TensorShape(new int[] {}), - new TensorShape(new int[] {Unknown}), - new TensorShape(new int[] {}) + shape_invariants: new Shape[] { + new Shape(new int[] {Unknown, Unknown, 4}), + new Shape(new int[] {}), + new Shape(new int[] {Unknown}), + new Shape(new int[] {}) } ); */ diff --git a/src/TensorFlowNET.Core/Operations/linalg_ops.cs b/src/TensorFlowNET.Core/Operations/linalg_ops.cs index d383830c..33fbe953 100644 --- a/src/TensorFlowNET.Core/Operations/linalg_ops.cs +++ b/src/TensorFlowNET.Core/Operations/linalg_ops.cs @@ -7,7 +7,7 @@ namespace Tensorflow { public Tensor eye(int num_rows, int num_columns = -1, - TensorShape batch_shape = null, + Shape batch_shape = null, TF_DataType dtype = TF_DataType.TF_DOUBLE, string name = null) { @@ -19,7 +19,7 @@ namespace Tensorflow bool is_square = num_columns == num_rows; var diag_size = Math.Min(num_rows, num_columns); if (batch_shape == null) - batch_shape = new TensorShape(new int[0]); + batch_shape = new Shape(new int[0]); var diag_shape = batch_shape.dims.concat(new long[] { diag_size }); long[] shape = null; diff --git a/src/TensorFlowNET.Core/Operations/map_fn.cs b/src/TensorFlowNET.Core/Operations/map_fn.cs index 3cf671fb..1803ac55 100644 --- a/src/TensorFlowNET.Core/Operations/map_fn.cs +++ b/src/TensorFlowNET.Core/Operations/map_fn.cs @@ -123,16 +123,16 @@ namespace Tensorflow maximum_iterations: tf.constant(n)); var results_flat = r_a.Accs_ta.Select(r => r.stack()).ToArray(); - var n_static = new Dimension(tensor_shape.dimension_value(elems_flat[0].TensorShape.with_rank_at_least(1).dims[0])); + var n_static = new Dimension(tensor_shape.dimension_value(elems_flat[0].shape.with_rank_at_least(1).dims[0])); foreach (var elem in elems_flat.Skip(1)) { - n_static.merge_with(new Dimension(tensor_shape.dimension_value(elem.TensorShape.with_rank_at_least(1).dims[0]))); + n_static.merge_with(new Dimension(tensor_shape.dimension_value(elem.shape.with_rank_at_least(1).dims[0]))); } foreach (Tensor r in results_flat) { - r.set_shape(new TensorShape(n_static).concatenate(r.dims.Skip(1).ToArray())); + r.shape = new Shape(n_static).concatenate(r.dims.Skip(1).ToArray()); } // todo get working when the above caching_device is fixed diff --git a/src/TensorFlowNET.Core/Operations/math_ops.cs b/src/TensorFlowNET.Core/Operations/math_ops.cs index 7db11573..c4aac693 100644 --- a/src/TensorFlowNET.Core/Operations/math_ops.cs +++ b/src/TensorFlowNET.Core/Operations/math_ops.cs @@ -422,25 +422,33 @@ namespace Tensorflow public static Tensor lgamma(Tensor x, string name = null) => gen_math_ops.lgamma(x, name: name); - public static Tensor linspace(Tensor start, Tensor stop, Tensor num, string name = null, int axis = 0) + public static Tensor linspace(Tensor start, Tensor stop, int num = 50, string name = null, int axis = 0) { return tf_with(ops.name_scope(name, "linspace", new { start, stop }), scope => { + var num_int_tensor = array_ops.constant(num); + var num_tensor = array_ops.constant(num, dtype: start.dtype); + + var broadcast_shape = array_ops.broadcast_dynamic_shape(array_ops.shape(start), array_ops.shape(stop)); + start = gen_array_ops.broadcast_to(start, broadcast_shape); + stop = gen_array_ops.broadcast_to(stop, broadcast_shape); + var expanded_start = array_ops.expand_dims(start, axis: axis); var expanded_stop = array_ops.expand_dims(stop, axis: axis); + var shape = array_ops.shape(expanded_start); var ndims = array_ops.shape(shape)[0]; var axis_tensor = array_ops.where_v2(constant_op.constant(axis >= 0), x: axis, y: ndims + axis); // The purpose is to avoid having negative values when repeating. - var num_fill = gen_math_ops.maximum(num - 2, 0); - var n_steps = gen_math_ops.maximum(num - 1, 1); + var num_fill = gen_math_ops.maximum(num_int_tensor - 2, 0); + var n_steps = gen_math_ops.maximum(num_int_tensor - 1, 1); var delta = (expanded_stop - expanded_start) / cast(n_steps, expanded_stop.dtype); - var range_end = array_ops.where_v2(num >= 0, n_steps, -1); + var range_end = array_ops.where_v2(num_int_tensor >= 0, n_steps, -1); var desired_range = cast(range(1, range_end, dtype: dtypes.int64), delta.dtype); - var mask = gen_math_ops.equal(axis, range(ndims)); + var mask = gen_math_ops.equal(axis_tensor, range(ndims)); var desired_range_shape = array_ops.where_v2(mask, num_fill, 1); desired_range = array_ops.reshape(desired_range, desired_range_shape); var res = expanded_start + delta * desired_range; @@ -450,7 +458,7 @@ namespace Tensorflow var all_tensors = new[] { expanded_start, res, expanded_stop }; var concatenated = array_ops.concat(all_tensors, axis: axis); var begin = array_ops.zeros_like(shape); - var size = array_ops.where_v2(mask, num, shape); + var size = array_ops.where_v2(mask, num_int_tensor, shape); return array_ops.slice(concatenated, begin, size); }); @@ -745,7 +753,7 @@ namespace Tensorflow return tf.Context.ExecuteOp("Pow", name, new ExecuteOpArgs(x_tensor, y_tensor)); }); - public static Tensor range(object start, object limit = null, object delta = null, TF_DataType dtype = TF_DataType.DtInvalid, string name = "range") + public static Tensor range(object start, object limit = null, object delta = null, TF_DataType? dtype = null, string name = "range") { if (limit == null) { @@ -753,36 +761,14 @@ namespace Tensorflow start = 0; } - if (dtype == TF_DataType.DtInvalid) - { - if (limit is Tensor tensor) - dtype = tensor.dtype; - else - dtype = limit.GetType().as_tf_dtype(); - } - - - if (delta == null) - { - if (dtype == TF_DataType.TF_INT32) - delta = 1; - else if (dtype == TF_DataType.TF_INT64) - delta = 1L; - else if (dtype == TF_DataType.TF_FLOAT) - delta = 1.0f; - else if (dtype == TF_DataType.TF_DOUBLE) - delta = 1.0d; - else - delta = 1; - } + var dtype1 = dtype ?? limit.GetDataType(); return tf_with(ops.name_scope(name, "Range", new { start, limit, delta }), scope => { name = scope; - var start1 = ops.convert_to_tensor(start, name: "start", dtype: dtype); - var limit1 = ops.convert_to_tensor(limit, name: "limit", dtype: dtype); - var delta1 = ops.convert_to_tensor(delta, name: "delta", dtype: dtype); - + var start1 = ops.convert_to_tensor(start, name: "start", dtype: dtype1); + var limit1 = ops.convert_to_tensor(limit, name: "limit", dtype: dtype1); + var delta1 = ops.convert_to_tensor(delta ?? 1, name: "delta", dtype: dtype1); return gen_math_ops.range(start1, limit1, delta1, name); }); } @@ -860,7 +846,7 @@ namespace Tensorflow Tensor maxlength = null, TF_DataType dtype = TF_DataType.TF_INT32, string name = null, - TensorShape axis = null, + Shape axis = null, bool binary_output = false) => tf_with(ops.name_scope(name, "bincount"), scope => { @@ -906,9 +892,9 @@ namespace Tensorflow { Tensor _tensordot_reshape(Tensor a, int[] axes, bool flipped = false) { - if (a.TensorShape.is_fully_defined() && isinstance(axes, (typeof(List), typeof(Tuple)))) + if (a.shape.IsFullyDefined && isinstance(axes, (typeof(List), typeof(Tuple)))) { - var shape_a = a.TensorShape.as_list(); + var shape_a = a.shape.dims; // axes int iter = 0; @@ -950,11 +936,11 @@ namespace Tensorflow + ops.convert_to_tensor(list(axes)); // new_shape - TensorShape new_shape; + Shape new_shape; if (flipped) - new_shape = new TensorShape(new int[] { prod_axes, prod_free }); + new_shape = new Shape(new int[] { prod_axes, prod_free }); else - new_shape = new TensorShape(new int[] { prod_free, prod_axes }); + new_shape = new Shape(new int[] { prod_free, prod_axes }); } throw new NotImplementedException("_tensordot_reshape"); diff --git a/src/TensorFlowNET.Core/Operations/nn_impl.py.cs b/src/TensorFlowNET.Core/Operations/nn_impl.py.cs index 5704d881..e7779063 100644 --- a/src/TensorFlowNET.Core/Operations/nn_impl.py.cs +++ b/src/TensorFlowNET.Core/Operations/nn_impl.py.cs @@ -25,11 +25,11 @@ namespace Tensorflow public static Tensor conv2d_transpose(Tensor value = null, IVariableV1 filter = null, Tensor output_shape = null, - TensorShape strides = null, + Shape strides = null, string padding = "SAME", string data_format = "NHWC", string name = null, - TensorShape dilations = null) + Shape dilations = null) { if (dilations == null) dilations = (1, 1, 1, 1); @@ -186,7 +186,7 @@ namespace Tensorflow { return tf_with(ops.name_scope("count_nonzero", "count_nonzero", new { input_tensor }), scope => { - var zero = array_ops.zeros(new Shape(), dtype: input_tensor.dtype); + var zero = array_ops.zeros(Shape.Null, dtype: input_tensor.dtype); var nonzero_count = math_ops.reduce_sum( math_ops.cast(gen_math_ops.not_equal(input_tensor, zero), dtype: dtype), name: "nonzero_count"); return nonzero_count; @@ -200,7 +200,7 @@ namespace Tensorflow name = scope; logits = ops.convert_to_tensor(logits, name: "logits"); labels = ops.convert_to_tensor(labels, name: "labels"); - labels.TensorShape.merge_with(logits.TensorShape); + labels.shape.merge_with(logits.shape); var zeros = array_ops.zeros_like(logits, dtype: logits.dtype); var cond = (logits >= zeros); diff --git a/src/TensorFlowNET.Core/Operations/nn_ops.cs b/src/TensorFlowNET.Core/Operations/nn_ops.cs index 6d69a55f..f6efe229 100644 --- a/src/TensorFlowNET.Core/Operations/nn_ops.cs +++ b/src/TensorFlowNET.Core/Operations/nn_ops.cs @@ -96,7 +96,7 @@ namespace Tensorflow var keep_mask = random_tensor >= rate; ret = x * scale * math_ops.cast(keep_mask, x.dtype); if (!tf.executing_eagerly()) - ret.set_shape(x.TensorShape); + ret.shape = x.shape; return ret; }); } @@ -203,14 +203,14 @@ namespace Tensorflow var precise_logits = logits.dtype == TF_DataType.TF_HALF ? math_ops.cast(logits, dtypes.float32) : logits; // Store label shape for result later. - var labels_static_shape = labels.TensorShape; + var labels_static_shape = labels.shape; var labels_shape = array_ops.shape(labels); /*bool static_shapes_fully_defined = ( labels_static_shape.is_fully_defined() && logits.get_shape()[:-1].is_fully_defined());*/ // Check if no reshapes are required. - if (logits.TensorShape.ndim == 2) + if (logits.shape.ndim == 2) { var (cost, _) = gen_nn_ops.sparse_softmax_cross_entropy_with_logits( precise_logits, labels, name: name); @@ -236,7 +236,7 @@ namespace Tensorflow name = scope; var precise_logits = logits; var input_rank = array_ops.rank(precise_logits); - var shape = logits.TensorShape; + var shape = logits.shape; if (axis != -1) throw new NotImplementedException("softmax_cross_entropy_with_logits_v2_helper axis != -1"); @@ -282,7 +282,7 @@ namespace Tensorflow // Set output shape if known. if (!tf.Context.executing_eagerly()) { - var shape = logits.TensorShape; + var shape = logits.shape; if (shape != null && shape.ndim > 0) { var product = 1L; diff --git a/src/TensorFlowNET.Core/Operations/random_ops.cs b/src/TensorFlowNET.Core/Operations/random_ops.cs index d973ea7f..9f823c96 100644 --- a/src/TensorFlowNET.Core/Operations/random_ops.cs +++ b/src/TensorFlowNET.Core/Operations/random_ops.cs @@ -30,7 +30,7 @@ namespace Tensorflow /// /// /// - public static Tensor random_normal(TensorShape shape, + public static Tensor random_normal(Shape shape, float mean = 0.0f, float stddev = 1.0f, TF_DataType dtype = TF_DataType.TF_FLOAT, diff --git a/src/TensorFlowNET.Core/Operations/resource_variable_ops.cs b/src/TensorFlowNET.Core/Operations/resource_variable_ops.cs index 4340f65f..bc4e28b4 100644 --- a/src/TensorFlowNET.Core/Operations/resource_variable_ops.cs +++ b/src/TensorFlowNET.Core/Operations/resource_variable_ops.cs @@ -48,7 +48,7 @@ namespace Tensorflow /// /// /// - public static Tensor eager_safe_variable_handle(Tensor initial_value, TensorShape shape, + public static Tensor eager_safe_variable_handle(Tensor initial_value, Shape shape, string shared_name, string name, bool graph_mode) { var dtype = initial_value.dtype.as_base_dtype(); @@ -66,7 +66,7 @@ namespace Tensorflow /// /// /// - public static Tensor variable_handle_from_shape_and_dtype(TensorShape shape, TF_DataType dtype, + public static Tensor variable_handle_from_shape_and_dtype(Shape shape, TF_DataType dtype, string shared_name, string name, bool graph_mode, Tensor initial_value = null) { var container = ops.get_default_graph().Container; @@ -161,7 +161,7 @@ namespace Tensorflow var data = new HandleData(); data.ShapeAndType.Add(new HandleShapeAndType { - Shape = handle.TensorShape.as_proto(), + Shape = handle.shape.as_shape_proto(), Dtype = handle.dtype.as_datatype_enum() }); return data; diff --git a/src/TensorFlowNET.Core/Operations/string_ops.cs b/src/TensorFlowNET.Core/Operations/string_ops.cs index 7c73eb37..1e50c4ad 100644 --- a/src/TensorFlowNET.Core/Operations/string_ops.cs +++ b/src/TensorFlowNET.Core/Operations/string_ops.cs @@ -96,9 +96,9 @@ namespace Tensorflow } }.SetAttributes(new { maxsplit })); var (indices, values, shape) = (result[0], result[1], result[2]); - indices.set_shape(new TensorShape(-1, 2)); - values.set_shape(new TensorShape(-1)); - shape.set_shape(new TensorShape(2)); + indices.shape = new Shape(-1, 2); + values.shape = new Shape(-1); + shape.shape = new Shape(2); var sparse_result = new SparseTensor(indices, values, shape); return RaggedTensor.from_value_rowids(sparse_result.values, diff --git a/src/TensorFlowNET.Core/Operations/weights_broadcast_ops.cs b/src/TensorFlowNET.Core/Operations/weights_broadcast_ops.cs index 8895c147..8453fa25 100644 --- a/src/TensorFlowNET.Core/Operations/weights_broadcast_ops.cs +++ b/src/TensorFlowNET.Core/Operations/weights_broadcast_ops.cs @@ -29,10 +29,10 @@ namespace Tensorflow weights, dtype: values.dtype.as_base_dtype(), name: "weights"); // Try static check for exact match. - var weights_shape = weights.TensorShape; - var values_shape = values.TensorShape; - if (weights_shape.is_fully_defined() && - values_shape.is_fully_defined()) + var weights_shape = weights.shape; + var values_shape = values.shape; + if (weights_shape.IsFullyDefined && + values_shape.IsFullyDefined) return weights; return math_ops.multiply( diff --git a/src/TensorFlowNET.Core/Tensors/Ragged/RaggedTensor.cs b/src/TensorFlowNET.Core/Tensors/Ragged/RaggedTensor.cs index 60c754c3..4f85e108 100644 --- a/src/TensorFlowNET.Core/Tensors/Ragged/RaggedTensor.cs +++ b/src/TensorFlowNET.Core/Tensors/Ragged/RaggedTensor.cs @@ -34,13 +34,13 @@ namespace Tensorflow Tensor _row_splits => _row_partition.row_splits; public TF_DataType dtype => _values.dtype; - public TensorShape shape + public Shape shape { get { var nrows = _row_partition.static_nrows; var ncols = _row_partition.static_uniform_row_length; - return new TensorShape(nrows, ncols); + return new Shape(nrows, ncols); } } diff --git a/src/TensorFlowNET.Core/Tensors/Ragged/SparseTensor.cs b/src/TensorFlowNET.Core/Tensors/Ragged/SparseTensor.cs index 987d8d1d..54ba2a5f 100644 --- a/src/TensorFlowNET.Core/Tensors/Ragged/SparseTensor.cs +++ b/src/TensorFlowNET.Core/Tensors/Ragged/SparseTensor.cs @@ -55,12 +55,12 @@ namespace Tensorflow void _init() { - var indices_shape = indices.TensorShape.with_rank(2); - var values_shape = values.TensorShape.with_rank(1); - var dense_shape_shape = dense_shape.TensorShape.with_rank(1); + var indices_shape = indices.shape.with_rank(2); + var values_shape = values.shape.with_rank(1); + var dense_shape_shape = dense_shape.shape.with_rank(1); - indices_shape["0"].merge_with(values_shape[0]); - indices_shape["1"].merge_with(dense_shape_shape[0]); + indices_shape["0"].merge_with(new Shape(values_shape[0])); + indices_shape["1"].merge_with(new Shape(dense_shape_shape[0])); } public static implicit operator Tensor(SparseTensor indexedSlices) diff --git a/src/TensorFlowNET.Core/Tensors/Tensor.Creation.cs b/src/TensorFlowNET.Core/Tensors/Tensor.Creation.cs index d4419073..ba0d6009 100644 --- a/src/TensorFlowNET.Core/Tensors/Tensor.Creation.cs +++ b/src/TensorFlowNET.Core/Tensors/Tensor.Creation.cs @@ -76,7 +76,7 @@ namespace Tensorflow public Tensor(ulong value) => InitTensor(new[] { value }, Shape.Scalar); public Tensor(float value) => InitTensor(new[] { value }, Shape.Scalar); public Tensor(double value) => InitTensor(new[] { value }, Shape.Scalar); - public Tensor(string value) => InitTensor(new[] { value }, TensorShape.Scalar); + public Tensor(string value) => InitTensor(new[] { value }, Shape.Scalar); #endregion #region 1d array @@ -116,7 +116,7 @@ namespace Tensorflow protected unsafe void InitTensor(byte[] bytes, TF_DataType dtype) { if (dtype == TF_DataType.TF_STRING) - _handle = StringTensor(new byte[][] { bytes }, TensorShape.Scalar); + _handle = StringTensor(new byte[][] { bytes }, Shape.Scalar); else throw new NotImplementedException(""); isCreatedInGraphMode = !tf.executing_eagerly(); diff --git a/src/TensorFlowNET.Core/Tensors/Tensor.Explicit.cs b/src/TensorFlowNET.Core/Tensors/Tensor.Explicit.cs index 79c4a593..8e0fb77f 100644 --- a/src/TensorFlowNET.Core/Tensors/Tensor.Explicit.cs +++ b/src/TensorFlowNET.Core/Tensors/Tensor.Explicit.cs @@ -138,10 +138,10 @@ namespace Tensorflow if (tensor == null) throw new ArgumentNullException(nameof(tensor)); - if (tensor.TensorShape.ndim != 0) + if (tensor.shape.ndim != 0) throw new ArgumentException("Tensor must have 0 dimensions in order to convert to scalar"); - if (tensor.TensorShape.size != 1) + if (tensor.shape.size != 1) throw new ArgumentException("Tensor must have size 1 in order to convert to scalar"); } diff --git a/src/TensorFlowNET.Core/Tensors/Tensor.String.cs b/src/TensorFlowNET.Core/Tensors/Tensor.String.cs index 2c5a5038..642e3571 100644 --- a/src/TensorFlowNET.Core/Tensors/Tensor.String.cs +++ b/src/TensorFlowNET.Core/Tensors/Tensor.String.cs @@ -10,7 +10,7 @@ namespace Tensorflow { const int TF_TSRING_SIZE = 24; - public IntPtr StringTensor(string[] strings, TensorShape shape) + public IntPtr StringTensor(string[] strings, Shape shape) { // convert string array to byte[][] var buffer = new byte[strings.Length][]; @@ -20,7 +20,7 @@ namespace Tensorflow return StringTensor(buffer, shape); } - public IntPtr StringTensor(byte[][] buffer, TensorShape shape) + public IntPtr StringTensor(byte[][] buffer, Shape shape) { var handle = c_api.TF_AllocateTensor(TF_DataType.TF_STRING, shape.ndim == 0 ? null : shape.dims, @@ -63,7 +63,7 @@ namespace Tensorflow // [offset1, offset2,...,offsetn, s1size, s1bytes, s2size, s2bytes,...,snsize,snbytes] // long size = 1; - foreach (var s in TensorShape.dims) + foreach (var s in shape.dims) size *= s; var buffer = new byte[size][]; diff --git a/src/TensorFlowNET.Core/Tensors/Tensor.cs b/src/TensorFlowNET.Core/Tensors/Tensor.cs index bf8089de..3c185cb4 100644 --- a/src/TensorFlowNET.Core/Tensors/Tensor.cs +++ b/src/TensorFlowNET.Core/Tensors/Tensor.cs @@ -111,7 +111,10 @@ namespace Tensorflow { get { - var dims = new long[rank < 0 ? 0 : rank]; + if (rank < 0) + return Shape.Null; + + var dims = new Shape(new long[rank]); if (_handle == IntPtr.Zero) { @@ -128,6 +131,13 @@ namespace Tensorflow set { + if (this is EagerTensor) + { + if(!shape.is_compatible_with(value)) + throw new ValueError($"Tensor's shape is not compatible."); + return; + } + if (value == null) c_api.TF_GraphSetTensorShape(graph, _as_tf_output(), null, -1, tf.Status.Handle); else @@ -142,22 +152,12 @@ namespace Tensorflow return rank < 0 ? null : shape.dims.Select(x => (int)x).ToArray(); } - public TensorShape TensorShape => rank < 0 ? new TensorShape() : shape; - /// /// Keras History: (Layer, (node_index, tensor_index)) /// public KerasHistory KerasHistory { get; set; } public Tensor KerasMask { get; set; } - /// - /// Updates the shape of this tensor. - /// - public virtual void set_shape(TensorShape shape) - { - this.shape = shape.rank >= 0 ? shape : null; - } - /// /// Updates the shape of this tensor. /// @@ -250,11 +250,11 @@ namespace Tensorflow switch (rank) { case -1: - return $"tf.Tensor '{name}' shape={TensorShape} dtype={dtype.as_numpy_name()}"; + return $"tf.Tensor '{name}' shape={shape} dtype={dtype.as_numpy_name()}"; case 0: - return $"tf.Tensor '{name}' shape={TensorShape} dtype={dtype.as_numpy_name()}"; + return $"tf.Tensor '{name}' shape={shape} dtype={dtype.as_numpy_name()}"; default: - return $"tf.Tensor '{name}' shape={TensorShape} dtype={dtype.as_numpy_name()}"; + return $"tf.Tensor '{name}' shape={shape} dtype={dtype.as_numpy_name()}"; } } @@ -263,7 +263,7 @@ namespace Tensorflow if (dtype == TF_DataType.TF_STRING) { long size = 1; - foreach (var s in TensorShape.dims) + foreach (var s in shape.dims) size *= s; var tstr = TensorDataPointer; diff --git a/src/TensorFlowNET.Core/Tensors/TensorArray.cs b/src/TensorFlowNET.Core/Tensors/TensorArray.cs index ccb2a1c7..52b364b7 100644 --- a/src/TensorFlowNET.Core/Tensors/TensorArray.cs +++ b/src/TensorFlowNET.Core/Tensors/TensorArray.cs @@ -37,7 +37,7 @@ namespace Tensorflow public TensorArray(TF_DataType dtype, Tensor size = default, bool? clear_after_read = null, bool? dynamic_size = null, string tensor_array_name = null, Tensor handle = null, Tensor flow = null, - bool infer_shape = true, TensorShape element_shape = null, + bool infer_shape = true, Shape element_shape = null, bool colocate_with_first_write_call = true, string name = null) { _implementation = new _GraphTensorArray(dtype, diff --git a/src/TensorFlowNET.Core/Tensors/TensorShape.Convert.cs b/src/TensorFlowNET.Core/Tensors/TensorShape.Convert.cs deleted file mode 100644 index fee26f00..00000000 --- a/src/TensorFlowNET.Core/Tensors/TensorShape.Convert.cs +++ /dev/null @@ -1,47 +0,0 @@ -using System.Linq; -using Tensorflow.NumPy; - -namespace Tensorflow -{ - public partial class TensorShape - { - public void Deconstruct(out long h, out long w) - { - h = dims[0]; - w = dims[1]; - } - - public static implicit operator TensorShape(Shape shape) => new TensorShape((long[])shape.dims.Clone()); - public static implicit operator Shape(TensorShape shape) => shape == null ? null : new Shape((long[])shape.dims.Clone()); - - public static implicit operator int[](TensorShape shape) => shape == null ? null : shape.dims.Select(x => (int)x).ToArray(); //we clone to avoid any changes - public static implicit operator TensorShape(int[] dims) => dims == null ? null : new TensorShape(dims); - - public static implicit operator long[](TensorShape shape) => shape == null ? null : (long[])shape.dims.Clone(); //we clone to avoid any changes - public static implicit operator TensorShape(long[] dims) => dims == null ? null : new TensorShape(dims); - - public static explicit operator long(TensorShape shape) => shape.size; - public static implicit operator TensorShape(long dim) => new TensorShape(dim); - - public static explicit operator (long, long)(TensorShape shape) => shape.dims.Length == 2 ? (shape.dims[0], shape.dims[1]) : (0, 0); - public static implicit operator TensorShape((long, long) dims) => new TensorShape(dims.Item1, dims.Item2); - - public static explicit operator (long, long, long)(TensorShape shape) => shape.dims.Length == 3 ? (shape.dims[0], shape.dims[1], shape.dims[2]) : (0, 0, 0); - public static implicit operator TensorShape((long, long, long) dims) => new TensorShape(dims.Item1, dims.Item2, dims.Item3); - - public static explicit operator (long, long, long, long)(TensorShape shape) => shape.dims.Length == 4 ? (shape.dims[0], shape.dims[1], shape.dims[2], shape.dims[3]) : (0, 0, 0, 0); - public static implicit operator TensorShape((long, long, long, long) dims) => new TensorShape(dims.Item1, dims.Item2, dims.Item3, dims.Item4); - - public static explicit operator (long, long, long, long, long)(TensorShape shape) => shape.dims.Length == 5 ? (shape.dims[0], shape.dims[1], shape.dims[2], shape.dims[3], shape.dims[4]) : (0, 0, 0, 0, 0); - public static implicit operator TensorShape((long, long, long, long, long) dims) => new TensorShape(dims.Item1, dims.Item2, dims.Item3, dims.Item4, dims.Item5); - - public static explicit operator (long, long, long, long, long, long)(TensorShape shape) => shape.dims.Length == 6 ? (shape.dims[0], shape.dims[1], shape.dims[2], shape.dims[3], shape.dims[4], shape.dims[5]) : (0, 0, 0, 0, 0, 0); - public static implicit operator TensorShape((long, long, long, long, long, long) dims) => new TensorShape(dims.Item1, dims.Item2, dims.Item3, dims.Item4, dims.Item5, dims.Item6); - - public static explicit operator (long, long, long, long, long, long, long)(TensorShape shape) => shape.dims.Length == 7 ? (shape.dims[0], shape.dims[1], shape.dims[2], shape.dims[3], shape.dims[4], shape.dims[5], shape.dims[6]) : (0, 0, 0, 0, 0, 0, 0); - public static implicit operator TensorShape((long, long, long, long, long, long, long) dims) => new TensorShape(dims.Item1, dims.Item2, dims.Item3, dims.Item4, dims.Item5, dims.Item6, dims.Item7); - - public static explicit operator (long, long, long, long, long, long, long, long)(TensorShape shape) => shape.dims.Length == 8 ? (shape.dims[0], shape.dims[1], shape.dims[2], shape.dims[3], shape.dims[4], shape.dims[5], shape.dims[6], shape.dims[7]) : (0, 0, 0, 0, 0, 0, 0, 0); - public static implicit operator TensorShape((long, long, long, long, long, long, long, long) dims) => new TensorShape(dims.Item1, dims.Item2, dims.Item3, dims.Item4, dims.Item5, dims.Item6, dims.Item7, dims.Item8); - } -} diff --git a/src/TensorFlowNET.Core/Tensors/TensorShape.Equals.cs b/src/TensorFlowNET.Core/Tensors/TensorShape.Equals.cs deleted file mode 100644 index 3a6f9cdb..00000000 --- a/src/TensorFlowNET.Core/Tensors/TensorShape.Equals.cs +++ /dev/null @@ -1,37 +0,0 @@ -using System; -using System.Linq; - -namespace Tensorflow -{ - public partial class TensorShape - { - public override bool Equals(Object obj) - { - switch (obj) - { - case TensorShape shape1: - if (rank == -1 && shape1.rank == -1) - return false; - else if (rank != shape1.rank) - return false; - return Enumerable.SequenceEqual(shape1.dims, dims); - case long[] shape2: - if (rank != shape2.Length) - return false; - return Enumerable.SequenceEqual(dims, shape2); - default: - return false; - } - } - - /*public static bool operator ==(TensorShape shape1, TensorShape shape2) - { - return false; - } - - public static bool operator !=(TensorShape shape1, TensorShape shape2) - { - return false; - }*/ - } -} diff --git a/src/TensorFlowNET.Core/Tensors/TensorShape.cs b/src/TensorFlowNET.Core/Tensors/TensorShape.cs deleted file mode 100644 index 4fe422c3..00000000 --- a/src/TensorFlowNET.Core/Tensors/TensorShape.cs +++ /dev/null @@ -1,306 +0,0 @@ -using Tensorflow.NumPy; -using System; -using System.Collections.Generic; -using System.Diagnostics.CodeAnalysis; -using System.Linq; -using System.Runtime.CompilerServices; -using static Tensorflow.Binding; - -namespace Tensorflow -{ - /// - /// Represents the shape of a `Tensor`. - /// - /// https://www.tensorflow.org/api_docs/python/tf/TensorShape - public partial class TensorShape - { - private readonly Shape shape; - - /// - /// Returns a list of Dimensions, or None if the shape is unspecified. - /// - public long[] dims => shape.dims; - - /// - /// Returns the rank of this shape. - /// - public int ndim => rank; - - private int _rank; - /// - /// Returns the rank of this shape. - /// - public int rank => _rank > -1 ? shape.ndim : -1; - - /// - /// Returns the size this shape represents. - /// - public long size - { - get - { - var dims = shape.dims; - var computed = 1L; - for (int i = 0; i < dims.Length; i++) - { - var val = dims[i]; - if (val <= 0) - continue; - computed *= val; - } - - return computed; - } - } - - public TensorShape() - { - _rank = -1; - shape = new Shape(); - } - - public static TensorShape Scalar - => new TensorShape(new long[0]); - - public TensorShape(TensorShapeProto proto) - { - if (proto.UnknownRank) return; - switch (proto.Dim.Count) - { - case 0: shape = new Shape(new long[0]); - break; - default: - var protodims = proto.Dim; - var len = protodims.Count; - var dims = new long[len]; - for (int i = 0; i < len; i++) - dims[i] = protodims[i].Size; - shape = new Shape(dims); - break; - } - } - - public TensorShape(params int[] dims) - { - switch (dims.Length) - { - case 0: - shape = new Shape(new long[0]); - break; - default: - shape = new Shape(dims.Select(x => Convert.ToInt64(x)).ToArray()); - break; - } - } - - public TensorShape(params long[] dims) - { - switch (dims.Length) - { - case 0: shape = new Shape(new long[0]); - break; - default: shape = new Shape(dims); - break; - } - } - - public TensorShape(long[][] dims) - { - if (dims.Length == 1) - { - switch (dims[0].Length) - { - case 0: shape = new Shape(new long[0]); - break; - default: shape = new Shape(dims[0]); - break; - } - } - else - { - throw new NotImplementedException("TensorShape int[][] dims"); - } - } - - /// - /// - /// - /// - /// - /// When is not an Index. - [SuppressMessage("ReSharper", "PossibleInvalidOperationException")] - public TensorShape this[Slice slice] - { - get - { - if (!slice.Stop.HasValue) - slice.Stop = dims.Length - slice.Start + 1; - - if (slice.Start.HasValue == false || slice.Length.HasValue == false) - throw new ArgumentException("Slice must has Start and Length."); - - return new TensorShape(dims.Skip(slice.Start.Value) - .Take(slice.Length.Value) - .ToArray()); - } - } - - public long this[int index] => index < 0 ? dims[ndim + index] : dims[index]; - - /// - /// Returns True iff `self` is fully defined in every dimension. - /// - /// - public bool is_fully_defined() - { - return rank > -1 && dims != null && dims.Count(x => x < 1) == 0; - } - - public bool is_compatible_with(TensorShape shape2) - { - if (dims != null && shape2.dims != null) - { - if (dims.Contains(-1) || shape2.dims.Contains(-1)) - return true; - - if (shape.size != (ulong)shape2.size) - return false; - } - - return true; - } - - public void assert_has_rank(int rank) - { - if (rank != ndim) - throw new ValueError(String.Format("Shape {0} must have rank {1}", ndim, rank)); - } - - [SuppressMessage("ReSharper", "ParameterHidesMember")] - public TensorShape with_rank_at_least(int rank) - { - if (ndim < rank) - throw new ValueError($"Shape {this} must have rank at least {rank}"); - else - return this; - } - - public TensorShape with_rank(int rank) - { - return merge_with(unknown_shape(rank: rank)); - } - - /// - /// Returns an unknown TensorShape, optionally with a known rank. - /// - /// - /// - public TensorShape unknown_shape(int rank = -1) - { - if (rank == -1) - return new TensorShape(-1); - else - return new TensorShape(Enumerable.Repeat(-1L, rank).ToArray()); - } - - /// - /// Returns the concatenation of the dimension in `self` and `other`. - /// - /// - /// - [MethodImpl(MethodImplOptions.AggressiveInlining)] - public TensorShape concatenate(long[] other) - { - return concatenate(new TensorShape(other)); - } - - /// - /// Returns the concatenation of the dimension in `self` and `other`. - /// - /// - /// - public TensorShape concatenate(TensorShape other) - { - var otherShape = other; - - if (ndim < 0 || otherShape.ndim < 0) - return new TensorShape(); - else - { - var concatenate_dims = new long[ndim + otherShape.ndim]; - for (int i = 0; i < ndim; i++) - concatenate_dims[i] = dims[i]; - - for (int i = 0; i < otherShape.ndim; i++) - concatenate_dims[ndim + i] = otherShape.dims[i]; - - return new TensorShape(concatenate_dims); - } - } - - /// - /// Returns a `TensorShape` combining the information in `self` and `other`. - /// - /// - /// - public TensorShape merge_with(TensorShape other) - { - if (dims == null) - return other; - - var new_dims = new List(); - - foreach (var i in range(ndim)) - { - var dim = new Dimension(dims[i]); - var merged = dim.merge_with(new Dimension(other.dims[i])); - new_dims.Add(merged.value); - } - - return new TensorShape(new_dims.ToArray()); - } - - /// - /// Returns a cloned array from . - /// - public long[] as_list() - { - if (shape.IsEmpty) - throw new ValueError("as_list() is not defined on an unknown TensorShape."); - return (long[])dims.Clone(); - } - - public long[] as_list_long() - { - if (shape.IsEmpty) - throw new ValueError("as_list() is not defined on an unknown TensorShape."); - return dims.Select(x => Convert.ToInt64(x)).ToArray(); - } - - public long num_elements() - { - if (is_fully_defined()) - { - var size = 1L; - foreach (var dim in dims) - size *= dim; - return size; - } - - return -1; - } - - public override string ToString() - { - switch (rank) - { - case -1: - return $""; - case 0: - return $"()"; - default: - return $"{string.Join(",", shape).Replace("-1", "None")}"; - } - } - } -} diff --git a/src/TensorFlowNET.Core/Tensors/Tensors.cs b/src/TensorFlowNET.Core/Tensors/Tensors.cs index a9bfe159..c1d59e39 100644 --- a/src/TensorFlowNET.Core/Tensors/Tensors.cs +++ b/src/TensorFlowNET.Core/Tensors/Tensors.cs @@ -18,7 +18,7 @@ namespace Tensorflow List items = new List(); public TF_DataType dtype => items.First().dtype; - public TensorShape shape => items.First().TensorShape; + public Shape shape => items.First().shape; public int rank => items.First().rank; public Graph graph => items.First().graph; public bool IsCreatedInGraphMode => items.First().IsCreatedInGraphMode; diff --git a/src/TensorFlowNET.Core/Tensors/c_api.tensor.cs b/src/TensorFlowNET.Core/Tensors/c_api.tensor.cs index 5917439e..c4e59aa7 100644 --- a/src/TensorFlowNET.Core/Tensors/c_api.tensor.cs +++ b/src/TensorFlowNET.Core/Tensors/c_api.tensor.cs @@ -106,8 +106,8 @@ namespace Tensorflow public static unsafe IntPtr TF_NewTensor(Shape shape, TF_DataType dtype, void* data) { - var length = shape.size * (ulong)dtype.get_datatype_size(); - var handle = TF_AllocateTensor(dtype, shape.dims, shape.ndim, length); + var length = shape.size * dtype.get_datatype_size(); + var handle = TF_AllocateTensor(dtype, shape.dims, shape.ndim, (ulong)length); var tensor = TF_TensorData(handle); if (tensor != IntPtr.Zero) System.Buffer.MemoryCopy(data, tensor.ToPointer(), length, length); diff --git a/src/TensorFlowNET.Core/Tensors/constant_op.cs b/src/TensorFlowNET.Core/Tensors/constant_op.cs index b69c4477..8547dd38 100644 --- a/src/TensorFlowNET.Core/Tensors/constant_op.cs +++ b/src/TensorFlowNET.Core/Tensors/constant_op.cs @@ -38,7 +38,7 @@ namespace Tensorflow /// Optional name for the tensor. /// public static Tensor constant(object value, TF_DataType dtype = TF_DataType.DtInvalid, - int[] shape = null, bool verify_shape = false, + Shape shape = null, bool verify_shape = false, bool allow_broadcast = true, string name = "Const") { if(tf.executing_eagerly()) @@ -110,8 +110,6 @@ namespace Tensorflow return val; case Shape val: return new EagerTensor(val.dims, new Shape(val.ndim)); - case TensorShape val: - return new EagerTensor(val.dims, ctx.DeviceName); case string val: return new EagerTensor(new[] { val }, Shape.Scalar); case string[] val: @@ -139,23 +137,23 @@ namespace Tensorflow static Tensor convert_to_eager_tensor(object value, TF_DataType dtype, - TensorShape shape, + Shape shape, string name, bool verify_shape, bool allow_broadcast) { var t = convert_to_eager_tensor(value, tf.Context, dtype: dtype); - if (shape == null) + if (shape is null || shape.IsNull) return t; - if (t.shape.dims.SequenceEqual(shape.dims)) + if (t.shape.Equals(shape)) return t; if (verify_shape) throw new TypeError($"Expected Tensor's shape: {shape}, got {t.shape}."); - var num_t = t.TensorShape.num_elements(); - if (num_t == shape.num_elements()) + var num_t = t.shape.size; + if (num_t == shape.size) return _eager_reshape(t, shape, tf.Context); if (num_t == 1) { @@ -170,7 +168,7 @@ namespace Tensorflow static Tensor convert_to_graph_tensor(object value, TF_DataType dtype, - TensorShape shape, + Shape shape, string name, bool verify_shape, bool allow_broadcast) @@ -202,14 +200,14 @@ namespace Tensorflow } /// - /// Function to convert TensorShape to Tensor. + /// Function to convert Shape to Tensor. /// /// /// /// /// /// - public static Tensor _tensor_shape_tensor_conversion_function(TensorShape s, + public static Tensor _tensor_shape_tensor_conversion_function(Shape s, TF_DataType dtype = TF_DataType.DtInvalid, string name = null, bool as_ref = false) diff --git a/src/TensorFlowNET.Core/Tensors/tensor_util.cs b/src/TensorFlowNET.Core/Tensors/tensor_util.cs index 5a007695..98060436 100644 --- a/src/TensorFlowNET.Core/Tensors/tensor_util.cs +++ b/src/TensorFlowNET.Core/Tensors/tensor_util.cs @@ -180,7 +180,7 @@ namespace Tensorflow return tensor_proto; } - public static TensorShape constant_value_as_shape(Tensor tensor) + public static Shape constant_value_as_shape(Tensor tensor) { bool hasattr(Graph property, string attr) { @@ -196,12 +196,12 @@ namespace Tensorflow if (tensor.GetType() == typeof(EagerTensor)) { if(tensor.dtype == TF_DataType.TF_INT64) - return new TensorShape(tensor.ToArray()); + return new Shape(tensor.ToArray()); else - return new TensorShape(tensor.ToArray()); + return new Shape(tensor.ToArray()); } - if (tensor.TensorShape.ndim == 0) + if (tensor.shape.ndim == 0) { var value_ = constant_value(tensor); if (value_ == null) @@ -212,13 +212,13 @@ known scalar with value '-1' to describe an unknown shape."); throw new ValueError( String.Format(@"Received a scalar value {0} as shape; require a statically known scalar with value '-1' to describe an unknown shape.", value_)); - return tensor.TensorShape.unknown_shape(-1); + return tensor.shape.unknown_shape(-1); } - var shape = tensor.TensorShape.with_rank(1); - if (shape == new TensorShape(new int[] { 1 })) + var shape = tensor.shape.with_rank(1); + if (shape == new Shape(new int[] { 1 })) { - return new TensorShape(new int[] { }); + return new Shape(new int[] { }); } else if (tensor.op.type == "Cast") { @@ -227,10 +227,10 @@ scalar with value '-1' to describe an unknown shape.", value_)); return pre_cast; var cast_dtype = dtypes.as_tf_dtype((Type)tensor.op.get_attr("DstT")); if (!Array.Exists(new[] { dtypes.int32, dtypes.int64 }, cast_dtype_ => cast_dtype_ == cast_dtype)) - return tensor.TensorShape.unknown_shape((int)shape.dims[0]); + return tensor.shape.unknown_shape((int)shape.dims[0]); long[] x_ = { }; - foreach (var x in pre_cast.as_list()) + foreach (var x in pre_cast.dims) if (x != -1) x_[x_.Length] = x; else @@ -243,7 +243,7 @@ scalar with value '-1' to describe an unknown shape.", value_)); y_[y_.Length] = y; else y_[y_.Length] = -1; - return new TensorShape(y_); + return new Shape(y_); } else if (tensor.op.type == "Shape") { @@ -251,7 +251,7 @@ scalar with value '-1' to describe an unknown shape.", value_)); } else if (tensor.op.type == "Pack") { - var ret_ = new TensorShape(new int[] { }); + var ret_ = new Shape(new int[] { }); if ((int)tensor.op.get_attr("axis") != 0) throw new ValueError(String.Format( @"Since rank 1 inputs are expected, Pack's axis: {0} must be 0, otherwise it @@ -278,7 +278,7 @@ would not be rank 1.", tensor.op.get_attr("axis"))); } else if (tensor.op.type == "Concat") { - var ret_ = new TensorShape(new int[] { }); + var ret_ = new Shape(new int[] { }); var inputlist_ = new ArraySegment(tensor.op.inputs, 1, tensor.op.inputs.Length - 1); @@ -336,7 +336,7 @@ would not be rank 1.", tensor.op.get_attr("axis"))); if ((iter + strides) > prev_.Length) break; } - var ret_ = new TensorShape(prev); + var ret_ = new Shape(prev); return ret_; } } @@ -363,7 +363,7 @@ would not be rank 1.", tensor.op.get_attr("axis"))); } } - var ret = tensor.TensorShape.unknown_shape((int)shape.dims[0]); + var ret = tensor.shape.unknown_shape((int)shape.dims[0]); var value = constant_value(tensor); if (!(value is null)) { @@ -371,7 +371,7 @@ would not be rank 1.", tensor.op.get_attr("axis"))); foreach (var (index, d) in enumerate(value.ToArray())) d_[index] = d >= 0 ? d : -1; - ret = ret.merge_with(new TensorShape(d_)); + ret = ret.merge_with(new Shape(d_)); } return ret; } @@ -402,19 +402,14 @@ would not be rank 1.", tensor.op.get_attr("axis"))); return shape; } - public static TensorShape to_shape(long[] dims) + public static Shape to_shape(long[] dims) { - return new TensorShape(dims.Select(x => (int)x).ToArray()); + return new Shape(dims.Select(x => (int)x).ToArray()); } - public static TensorShape to_shape(int[] dims) + public static Shape to_shape(int[] dims) { - return new TensorShape(dims); - } - - public static TensorShape as_shape(this Shape shape) - { - return new TensorShape(shape.dims); + return new Shape(dims); } public static TensorShapeProto as_shape_proto(this Shape tshape) @@ -433,12 +428,12 @@ would not be rank 1.", tensor.op.get_attr("axis"))); return shape; } - public static TensorShape reshape(this Shape shape, int[] dims) + public static Shape reshape(this Shape shape, int[] dims) { - return new TensorShape(dims); + return new Shape(dims); } - public static TensorShapeProto as_proto(this TensorShape tshape) + public static TensorShapeProto as_proto(this Shape tshape) { TensorShapeProto shape = new TensorShapeProto(); @@ -518,12 +513,12 @@ would not be rank 1.", tensor.op.get_attr("axis"))); if (shape.ndim == 0) return array[0].ToString(); - var display = "["; + var display = "array(["; if (array.Length < 10) display += string.Join(", ", array); else display += string.Join(", ", array.Take(3)) + " ... " + string.Join(", ", array.Skip(array.Length - 3)); - return display + "]"; + return display + "])"; } diff --git a/src/TensorFlowNET.Core/Tensors/tf.constant.cs b/src/TensorFlowNET.Core/Tensors/tf.constant.cs index 3bf6614c..6a62d34a 100644 --- a/src/TensorFlowNET.Core/Tensors/tf.constant.cs +++ b/src/TensorFlowNET.Core/Tensors/tf.constant.cs @@ -28,7 +28,7 @@ namespace Tensorflow /// public Tensor constant(object value, TF_DataType dtype = TF_DataType.DtInvalid, - TensorShape shape = null, + Shape shape = null, string name = "Const") => constant_op.constant(value, dtype: dtype, @@ -37,13 +37,13 @@ namespace Tensorflow verify_shape: false, allow_broadcast: true); - public Tensor zeros(TensorShape shape, TF_DataType dtype = TF_DataType.TF_FLOAT, string name = null) + public Tensor zeros(Shape shape, TF_DataType dtype = TF_DataType.TF_FLOAT, string name = null) => array_ops.zeros(shape, dtype, name); public Tensor zeros(Tensor shape, TF_DataType dtype = TF_DataType.TF_FLOAT, string name = null) => array_ops.zeros(shape, dtype, name); - public Tensor ones(TensorShape shape, TF_DataType dtype = TF_DataType.TF_FLOAT, string name = null) + public Tensor ones(Shape shape, TF_DataType dtype = TF_DataType.TF_FLOAT, string name = null) => array_ops.ones(shape, dtype, name); public Tensor size(Tensor input, diff --git a/src/TensorFlowNET.Core/Training/Saving/BaseSaverBuilder.cs b/src/TensorFlowNET.Core/Training/Saving/BaseSaverBuilder.cs index 21b9df91..10a85d9d 100644 --- a/src/TensorFlowNET.Core/Training/Saving/BaseSaverBuilder.cs +++ b/src/TensorFlowNET.Core/Training/Saving/BaseSaverBuilder.cs @@ -214,7 +214,7 @@ namespace Tensorflow // string tensors as "HostMemory" inputs. foreach (var saveable in saveables) { - List shapes = null; + List shapes = null; if (reshape) { throw new NotImplementedException("_AddRestoreOps"); diff --git a/src/TensorFlowNET.Core/Training/Saving/ResourceVariableSaveable.cs b/src/TensorFlowNET.Core/Training/Saving/ResourceVariableSaveable.cs index 30c3405c..167c635a 100644 --- a/src/TensorFlowNET.Core/Training/Saving/ResourceVariableSaveable.cs +++ b/src/TensorFlowNET.Core/Training/Saving/ResourceVariableSaveable.cs @@ -35,7 +35,7 @@ namespace Tensorflow this.name = name; } - public override Operation restore(Tensor[] restored_tensors, TensorShape[] restored_shapes = null) + public override Operation restore(Tensor[] restored_tensors, Shape[] restored_shapes = null) { var restored_tensor = restored_tensors[0]; restored_tensor = array_ops.identity(restored_tensor); diff --git a/src/TensorFlowNET.Core/Training/Saving/SaveableObject.cs b/src/TensorFlowNET.Core/Training/Saving/SaveableObject.cs index 60de456f..c86075f8 100644 --- a/src/TensorFlowNET.Core/Training/Saving/SaveableObject.cs +++ b/src/TensorFlowNET.Core/Training/Saving/SaveableObject.cs @@ -40,12 +40,12 @@ namespace Tensorflow this.name = name; } - public virtual Operation restore(Tensor[] restored_tensors, TensorShape[] restored_shapes = null) + public virtual Operation restore(Tensor[] restored_tensors, Shape[] restored_shapes = null) { var restored_tensor = restored_tensors[0]; return gen_state_ops.assign(op, restored_tensor, - validate_shape: restored_shapes == null && op.shape.is_fully_defined()); + validate_shape: restored_shapes == null && op.shape.IsFullyDefined); } } } diff --git a/src/TensorFlowNET.Core/Training/SlotCreator.cs b/src/TensorFlowNET.Core/Training/SlotCreator.cs index d8b597d9..df9983ab 100644 --- a/src/TensorFlowNET.Core/Training/SlotCreator.cs +++ b/src/TensorFlowNET.Core/Training/SlotCreator.cs @@ -32,7 +32,7 @@ namespace Tensorflow.Train /// public IVariableV1 create_slot(RefVariable primary, Tensor val, string name, bool colocate_with_primary = true) { - var validate_shape = val.TensorShape.is_fully_defined(); + var validate_shape = val.shape.IsFullyDefined; var prefix = primary.Op.name; return tf_with(tf.variable_scope(name: null, prefix + "/" + name), delegate { @@ -53,7 +53,7 @@ namespace Tensorflow.Train if (dtype == TF_DataType.DtInvalid) dtype = primary.dtype; var slot_shape = primary.shape; - if (slot_shape.is_fully_defined()) + if (slot_shape.IsFullyDefined) { var initializer = new Zeros(); return create_slot_with_initializer( @@ -70,10 +70,10 @@ namespace Tensorflow.Train /// Creates a slot initialized using an `Initializer`. /// /// - public IVariableV1 create_slot_with_initializer(IVariableV1 primary, IInitializer initializer, TensorShape shape, + public IVariableV1 create_slot_with_initializer(IVariableV1 primary, IInitializer initializer, Shape shape, TF_DataType dtype, string name, bool colocate_with_primary = true) { - var validate_shape = shape.is_fully_defined(); + var validate_shape = shape.IsFullyDefined; var prefix = primary.Op.name; return tf_with(new variable_scope(string.Empty, prefix + "/" + name), delegate { @@ -92,7 +92,7 @@ namespace Tensorflow.Train /// /// private IVariableV1 _create_slot_var(IVariableV1 primary, object val, string scope, bool validate_shape, - TensorShape shape, TF_DataType dtype) + Shape shape, TF_DataType dtype) { bool use_resource = primary is ResourceVariable; if (resource_variable_ops.is_resource_variable(primary)) diff --git a/src/TensorFlowNET.Core/Variables/BaseResourceVariable.cs b/src/TensorFlowNET.Core/Variables/BaseResourceVariable.cs index a898fed5..2c730d23 100644 --- a/src/TensorFlowNET.Core/Variables/BaseResourceVariable.cs +++ b/src/TensorFlowNET.Core/Variables/BaseResourceVariable.cs @@ -37,8 +37,8 @@ namespace Tensorflow public Tensor Handle => handle; protected Tensor _graph_element; public Tensor GraphElement => _graph_element; - protected TensorShape _shape; - public TensorShape shape => _shape; + protected Shape _shape; + public Shape shape => _shape; protected Operation initializer_op; public Operation Initializer => initializer_op; @@ -141,11 +141,11 @@ namespace Tensorflow // _maybe_set_handle_data(_dtype, _handle, result); // have to set shape when converting to substituent placeholder - if (result.TensorShape.ndim == -1) + if (result.shape.ndim == -1) { c_api.TF_GraphSetTensorShape(result.graph, result._as_tf_output(), - shape.as_list_long(), + shape.dims, shape.ndim, tf.Status.Handle); tf.Status.Check(true); diff --git a/src/TensorFlowNET.Core/Variables/IVariableV1.cs b/src/TensorFlowNET.Core/Variables/IVariableV1.cs index 01c5d2ec..f4f716c3 100644 --- a/src/TensorFlowNET.Core/Variables/IVariableV1.cs +++ b/src/TensorFlowNET.Core/Variables/IVariableV1.cs @@ -45,7 +45,7 @@ namespace Tensorflow Tensor GraphElement { get; } Graph Graph { get; } TF_DataType dtype { get; } - TensorShape shape { get; } + Shape shape { get; } Tensor assign_add(T delta, bool use_locking = false, string name = null, bool read_value = true); Tensor assign_sub(T delta, bool use_locking = false, string name = null, bool read_value = true); IVariableV1 assign_sub_lazy_load(Tensor delta, string name = null); diff --git a/src/TensorFlowNET.Core/Variables/RefVariable.cs b/src/TensorFlowNET.Core/Variables/RefVariable.cs index 36fdfed2..67c12c42 100644 --- a/src/TensorFlowNET.Core/Variables/RefVariable.cs +++ b/src/TensorFlowNET.Core/Variables/RefVariable.cs @@ -50,7 +50,7 @@ namespace Tensorflow public Operation Op => _variable.op; public TF_DataType dtype => _variable.dtype; - public TensorShape shape => _variable.shape; + public Shape shape => _variable.shape; public string Device => ""; public string Name => _variable.name; @@ -192,8 +192,8 @@ namespace Tensorflow // Manually overrides the variable's shape with the initial value's. if (validate_shape) { - var initial_value_shape = _initial_value.TensorShape; - if (!initial_value_shape.is_fully_defined()) + var initial_value_shape = _initial_value.shape; + if (!initial_value_shape.IsFullyDefined) throw new ValueError($"initial_value must have a shape specified: {_initial_value}"); } diff --git a/src/TensorFlowNET.Core/Variables/ResourceVariable.cs b/src/TensorFlowNET.Core/Variables/ResourceVariable.cs index 8405f26c..f4a8eb1f 100644 --- a/src/TensorFlowNET.Core/Variables/ResourceVariable.cs +++ b/src/TensorFlowNET.Core/Variables/ResourceVariable.cs @@ -36,7 +36,7 @@ namespace Tensorflow TF_DataType dtype = TF_DataType.DtInvalid, string import_scope = "", VariableAggregation aggregation = VariableAggregation.None, - TensorShape shape = null) + Shape shape = null) { if (variable_def != null) { @@ -64,7 +64,7 @@ namespace Tensorflow string name = null, TF_DataType dtype = TF_DataType.DtInvalid, VariableAggregation aggregation = VariableAggregation.None, - TensorShape shape = null) + Shape shape = null) { var init_from_fn = initial_value.GetType().Name == "Func`1" || initial_value.GetType().GetInterface("IInitializer") != null; @@ -112,7 +112,7 @@ namespace Tensorflow } }); - _shape = shape ?? _initial_value.TensorShape; + _shape = shape ?? _initial_value.shape; if (_in_graph_mode) { @@ -162,7 +162,7 @@ namespace Tensorflow handle = g.as_graph_element(prepend_name_scope) as Tensor; _handle_name = handle.name; _name = handle.name; - _shape = new TensorShape(handle.op.get_attr("shape") as TensorShapeProto); + _shape = new Shape(handle.op.get_attr("shape") as TensorShapeProto); prepend_name_scope = ops.prepend_name_scope(variable_def.InitializerName, import_scope: import_scope); initializer_op = g.as_graph_element(prepend_name_scope) as Operation; diff --git a/src/TensorFlowNET.Core/Variables/VariableArgs.cs b/src/TensorFlowNET.Core/Variables/VariableArgs.cs index b4f63779..ed1e3b98 100644 --- a/src/TensorFlowNET.Core/Variables/VariableArgs.cs +++ b/src/TensorFlowNET.Core/Variables/VariableArgs.cs @@ -8,7 +8,7 @@ namespace Tensorflow public object InitialValue { get; set; } public Func Getter { get; set; } public string Name { get; set; } - public TensorShape Shape { get; set; } + public Shape Shape { get; set; } public TF_DataType DType { get; set; } = TF_DataType.DtInvalid; public IInitializer Initializer { get; set; } public bool Trainable { get; set; } diff --git a/src/TensorFlowNET.Core/Variables/VariableScope.cs b/src/TensorFlowNET.Core/Variables/VariableScope.cs index cda2919f..c9a6fffb 100644 --- a/src/TensorFlowNET.Core/Variables/VariableScope.cs +++ b/src/TensorFlowNET.Core/Variables/VariableScope.cs @@ -49,7 +49,7 @@ namespace Tensorflow public IVariableV1 get_variable(_VariableStore var_store, string name, - TensorShape shape = null, + Shape shape = null, TF_DataType dtype = TF_DataType.DtInvalid, object initializer = null, // IInitializer or Tensor bool? trainable = null, diff --git a/src/TensorFlowNET.Core/Variables/_UnreadVariable.cs b/src/TensorFlowNET.Core/Variables/_UnreadVariable.cs index 22ec4c0a..f5d0504e 100644 --- a/src/TensorFlowNET.Core/Variables/_UnreadVariable.cs +++ b/src/TensorFlowNET.Core/Variables/_UnreadVariable.cs @@ -10,7 +10,7 @@ namespace Tensorflow { public override string Name => _in_graph_mode ? _parent_op.name : "UnreadVariable"; - public _UnreadVariable(Tensor handle, TF_DataType dtype, TensorShape shape, + public _UnreadVariable(Tensor handle, TF_DataType dtype, Shape shape, bool in_graph_mode, string unique_id) { _dtype = dtype; diff --git a/src/TensorFlowNET.Core/Variables/_VariableStore.cs b/src/TensorFlowNET.Core/Variables/_VariableStore.cs index e608fe94..0570fd06 100644 --- a/src/TensorFlowNET.Core/Variables/_VariableStore.cs +++ b/src/TensorFlowNET.Core/Variables/_VariableStore.cs @@ -39,7 +39,7 @@ namespace Tensorflow } public IVariableV1 get_variable(string name, - TensorShape shape = null, + Shape shape = null, TF_DataType dtype = TF_DataType.TF_FLOAT, object initializer = null, // IInitializer or Tensor bool? reuse = null, @@ -64,7 +64,7 @@ namespace Tensorflow } private IVariableV1 _true_getter(string name, - TensorShape shape = null, + Shape shape = null, TF_DataType dtype = TF_DataType.TF_FLOAT, object initializer = null, bool? trainable = null, @@ -113,7 +113,7 @@ namespace Tensorflow } private IVariableV1 _get_single_variable(string name, - TensorShape shape = null, + Shape shape = null, TF_DataType dtype = TF_DataType.DtInvalid, IInitializer initializer = null, Tensor init_value = null, diff --git a/src/TensorFlowNET.Core/Variables/variable_scope.py.cs b/src/TensorFlowNET.Core/Variables/variable_scope.py.cs index f21f195b..31f3285e 100644 --- a/src/TensorFlowNET.Core/Variables/variable_scope.py.cs +++ b/src/TensorFlowNET.Core/Variables/variable_scope.py.cs @@ -298,7 +298,7 @@ namespace Tensorflow // TODO for Switch/Case public static RefVariable get_variable(string embeddingMatrix, IInitializer initializer, bool use_resource, - TensorShape shape = null, + Shape shape = null, TF_DataType dtype = TF_DataType.DtInvalid, bool trainable = false, bool validate_shape = true) diff --git a/src/TensorFlowNET.Core/ops.cs b/src/TensorFlowNET.Core/ops.cs index e86c45b9..ef4c1506 100644 --- a/src/TensorFlowNET.Core/ops.cs +++ b/src/TensorFlowNET.Core/ops.cs @@ -164,7 +164,6 @@ namespace Tensorflow ResourceVariable varVal => varVal._TensorConversionFunction(dtype: dtype, name: name, as_ref: as_ref), Axis ts => constant_op.constant(ts.axis, dtype: dtype, name: name), Shape ts => constant_op.constant(ts.dims, dtype: dtype, name: name), - TensorShape ts => constant_op.constant(ts.dims, dtype: dtype, name: name), string str => constant_op.constant(str, dtype: tf.@string, name: name), string[] str => constant_op.constant(str, dtype: tf.@string, name: name), IEnumerable objects => array_ops._autopacking_conversion_function(objects, dtype: dtype, name: name), diff --git a/src/TensorFlowNET.Core/tensorflow.cs b/src/TensorFlowNET.Core/tensorflow.cs index 60b22f71..acaa6a1a 100644 --- a/src/TensorFlowNET.Core/tensorflow.cs +++ b/src/TensorFlowNET.Core/tensorflow.cs @@ -74,7 +74,7 @@ namespace Tensorflow string name = null, TF_DataType dtype = TF_DataType.DtInvalid, VariableAggregation aggregation = VariableAggregation.None, - int[] shape = null) + Shape shape = null) => new ResourceVariable(data, trainable: trainable, validate_shape: validate_shape, @@ -83,7 +83,7 @@ namespace Tensorflow aggregation: aggregation, shape: shape); - public Tensor placeholder(TF_DataType dtype, TensorShape shape = null, string name = null) + public Tensor placeholder(TF_DataType dtype, Shape shape = null, string name = null) => array_ops.placeholder(dtype, shape, name); public void enable_eager_execution() diff --git a/src/TensorFlowNET.Keras/BackendImpl.cs b/src/TensorFlowNET.Keras/BackendImpl.cs index e1563055..e439eb9d 100644 --- a/src/TensorFlowNET.Keras/BackendImpl.cs +++ b/src/TensorFlowNET.Keras/BackendImpl.cs @@ -64,7 +64,7 @@ namespace Tensorflow.Keras _GRAPH_VARIABLES[graph.graph_key] = v; } - public Tensor placeholder(TensorShape shape = null, + public Tensor placeholder(Shape shape = null, int ndim = -1, TF_DataType dtype = TF_DataType.DtInvalid, bool sparse = false, @@ -300,9 +300,9 @@ namespace Tensorflow.Keras int new_height = original_shape[rows] < 0 ? -1 : (int)original_shape[rows] * height_factor; int new_width = original_shape[cols] < 0 ? -1 : (int)original_shape[cols] * width_factor; - TensorShape output_shape = data_format == "channels_first" ? + Shape output_shape = data_format == "channels_first" ? (-1, -1, new_height, new_width) : (-1, new_height, new_width, -1); - x.set_shape(output_shape); + x.shape = output_shape; return x; } @@ -329,10 +329,10 @@ namespace Tensorflow.Keras public Tensor conv2d_transpose(Tensor x, IVariableV1 kernel, Tensor output_shape, - TensorShape strides = null, + Shape strides = null, string padding = "valid", string data_format = null, - TensorShape dilation_rate = null) + Shape dilation_rate = null) { var force_transpose = false; if (data_format == "channels_first" && !dilation_rate.Equals(new[] { 1, 1 })) @@ -340,7 +340,7 @@ namespace Tensorflow.Keras // x, tf_data_format = _preprocess_conv2d_input(x, data_format, force_transpose) var tf_data_format = "NHWC"; padding = padding.ToUpper(); - strides = new TensorShape(1, strides[0], strides[1], 1); + strides = new Shape(1, strides[0], strides[1], 1); if (dilation_rate.Equals(new[] { 1, 1 })) x = nn_impl.conv2d_transpose(x, kernel, output_shape, strides, padding: padding, diff --git a/src/TensorFlowNET.Keras/Engine/DataAdapters/DataAdapter.cs b/src/TensorFlowNET.Keras/Engine/DataAdapters/DataAdapter.cs index c57c1a8e..3314f5c4 100644 --- a/src/TensorFlowNET.Keras/Engine/DataAdapters/DataAdapter.cs +++ b/src/TensorFlowNET.Keras/Engine/DataAdapters/DataAdapter.cs @@ -21,9 +21,9 @@ namespace Tensorflow.Keras.Engine.DataAdapters public virtual (Tensor, Tensor) Expand1d(Tensor x, Tensor y) { - if (x.TensorShape.ndim == 1) + if (x.shape.ndim == 1) x = array_ops.expand_dims(x, axis: -1); - if (y.TensorShape.ndim == 1) + if (y.shape.ndim == 1) y = array_ops.expand_dims(y, axis: -1); return (x, y); } diff --git a/src/TensorFlowNET.Keras/Engine/Functional.cs b/src/TensorFlowNET.Keras/Engine/Functional.cs index 78038cff..7ce2c8d9 100644 --- a/src/TensorFlowNET.Keras/Engine/Functional.cs +++ b/src/TensorFlowNET.Keras/Engine/Functional.cs @@ -12,7 +12,7 @@ namespace Tensorflow.Keras.Engine /// public partial class Functional : Model { - TensorShape _build_input_shape; + Shape _build_input_shape; bool _compute_output_and_mask_jointly; bool _expects_training_arg; bool _expects_mask_arg; @@ -338,7 +338,7 @@ namespace Tensorflow.Keras.Engine tf.Logger.Debug($"Depth {depth}: {node.Layer}: {node.Layer.Name}"); var outputs = node.Layer.Apply(layer_inputs, is_training: training); foreach (var output in outputs.Where(x => x != null)) - tf.Logger.Information($"Depth {depth}: {node.Layer}: {node.Layer.Name} {output.TensorShape}"); + tf.Logger.Information($"Depth {depth}: {node.Layer}: {node.Layer.Name} {output.shape}"); // Update tensor_dict for next input foreach (var (x_id, y) in zip(node.FlatOutputIds, outputs)) tensor_dict[x_id] = new Queue(Enumerable.Range(0, tensor_usage_count[x_id]).Select(x => y)); diff --git a/src/TensorFlowNET.Keras/Engine/Layer.AddWeights.cs b/src/TensorFlowNET.Keras/Engine/Layer.AddWeights.cs index 1edbd168..feb5e8e4 100644 --- a/src/TensorFlowNET.Keras/Engine/Layer.AddWeights.cs +++ b/src/TensorFlowNET.Keras/Engine/Layer.AddWeights.cs @@ -7,7 +7,7 @@ namespace Tensorflow.Keras.Engine public partial class Layer { protected virtual IVariableV1 add_weight(string name, - TensorShape shape, + Shape shape, TF_DataType dtype = TF_DataType.TF_FLOAT, IInitializer initializer = null, IRegularizer regularizer = null, diff --git a/src/TensorFlowNET.Keras/Engine/Layer.Layers.cs b/src/TensorFlowNET.Keras/Engine/Layer.Layers.cs index ceb3afa4..32535838 100644 --- a/src/TensorFlowNET.Keras/Engine/Layer.Layers.cs +++ b/src/TensorFlowNET.Keras/Engine/Layer.Layers.cs @@ -13,7 +13,7 @@ namespace Tensorflow.Keras.Engine _layers.AddRange(layers); } - public virtual TensorShape ComputeOutputShape(TensorShape input_shape) + public virtual Shape ComputeOutputShape(Shape input_shape) => throw new NotImplementedException(""); } } diff --git a/src/TensorFlowNET.Keras/Engine/Layer.cs b/src/TensorFlowNET.Keras/Engine/Layer.cs index 3c936a8b..50d0fbe9 100644 --- a/src/TensorFlowNET.Keras/Engine/Layer.cs +++ b/src/TensorFlowNET.Keras/Engine/Layer.cs @@ -72,7 +72,7 @@ namespace Tensorflow.Keras.Engine protected bool computePreviousMask; protected List updates; - public TensorShape BatchInputShape => args.BatchInputShape; + public Shape BatchInputShape => args.BatchInputShape; List inboundNodes; public List InboundNodes => inboundNodes; @@ -84,7 +84,7 @@ namespace Tensorflow.Keras.Engine public CallContext CallContext => callContext.Value; public Tensor[] input => inboundNodes[0].input_tensors; public Dictionary> NodesByDepth { get; set; } - public TensorShape output_shape => inboundNodes[0].Outputs.shape; + public Shape output_shape => inboundNodes[0].Outputs.shape; public Layer(LayerArgs args) { this.args = args; diff --git a/src/TensorFlowNET.Keras/Engine/LossesContainer.cs b/src/TensorFlowNET.Keras/Engine/LossesContainer.cs index 974aa6ca..6a91450d 100644 --- a/src/TensorFlowNET.Keras/Engine/LossesContainer.cs +++ b/src/TensorFlowNET.Keras/Engine/LossesContainer.cs @@ -54,7 +54,7 @@ namespace Tensorflow.Keras.Engine else { // Ok for a model to have no compiled loss. - return array_ops.zeros(new TensorShape()); + return array_ops.zeros(Shape.Null); } } diff --git a/src/TensorFlowNET.Keras/Engine/Node.cs b/src/TensorFlowNET.Keras/Engine/Node.cs index f1651a0f..9a5302ce 100644 --- a/src/TensorFlowNET.Keras/Engine/Node.cs +++ b/src/TensorFlowNET.Keras/Engine/Node.cs @@ -37,8 +37,8 @@ namespace Tensorflow.Keras.Engine public int[] tensor_indices; public Tensors input_tensors => is_input ? Outputs : args.InputTensors; public Tensors Outputs => args.Outputs; - public TensorShape[] input_shapes; - public TensorShape[] output_shapes; + public Shape[] input_shapes; + public Shape[] output_shapes; public List KerasInputs { get; set; } = new List(); ILayer _layer; public ILayer Layer => _layer; diff --git a/src/TensorFlowNET.Keras/Engine/Sequential.cs b/src/TensorFlowNET.Keras/Engine/Sequential.cs index 2b37d2bf..d41a5572 100644 --- a/src/TensorFlowNET.Keras/Engine/Sequential.cs +++ b/src/TensorFlowNET.Keras/Engine/Sequential.cs @@ -33,10 +33,10 @@ namespace Tensorflow.Keras.Engine bool _compute_output_and_mask_jointly; bool _auto_track_sub_layers; - TensorShape _inferred_input_shape; + Shape _inferred_input_shape; bool _has_explicit_input_shape; - public TensorShape output_shape => outputs[0].TensorShape; + public Shape output_shape => outputs[0].shape; public Sequential(SequentialArgs args) : base(args.Inputs, args.Outputs, name: args.Name) diff --git a/src/TensorFlowNET.Keras/KerasInterface.cs b/src/TensorFlowNET.Keras/KerasInterface.cs index b5209e76..b2fe5747 100644 --- a/src/TensorFlowNET.Keras/KerasInterface.cs +++ b/src/TensorFlowNET.Keras/KerasInterface.cs @@ -65,9 +65,9 @@ namespace Tensorflow.Keras /// If set, the layer will not create a placeholder tensor. /// /// - public Tensor Input(TensorShape shape = null, + public Tensor Input(Shape shape = null, int batch_size = -1, - TensorShape batch_input_shape = null, + Shape batch_input_shape = null, TF_DataType dtype = TF_DataType.DtInvalid, string name = null, bool sparse = false, diff --git a/src/TensorFlowNET.Keras/Layers/Convolution/Conv2DTranspose.cs b/src/TensorFlowNET.Keras/Layers/Convolution/Conv2DTranspose.cs index c5deeb33..9ef4db18 100644 --- a/src/TensorFlowNET.Keras/Layers/Convolution/Conv2DTranspose.cs +++ b/src/TensorFlowNET.Keras/Layers/Convolution/Conv2DTranspose.cs @@ -37,7 +37,7 @@ namespace Tensorflow.Keras.Layers var channel_axis = _get_channel_axis(); var input_dim = input_shape[-1]; - var kernel_shape = new TensorShape(kernel_size[0], kernel_size[1], filters, input_dim); + var kernel_shape = new Shape(kernel_size[0], kernel_size[1], filters, input_dim); kernel = add_weight(name: "kernel", shape: kernel_shape, @@ -62,7 +62,7 @@ namespace Tensorflow.Keras.Layers if (data_format == "channels_first") (h_axis, w_axis) = (2, 3); var (height, width) = (-1, -1); - if(inputs.shape.rank > -1) + if(inputs.shape.ndim > -1) { var dims = inputs.shape.dims; (height, width) = ((int)dims[h_axis], (int)dims[w_axis]); @@ -105,7 +105,7 @@ namespace Tensorflow.Keras.Layers if (!tf.Context.executing_eagerly()) { var out_shape = ComputeOutputShape(inputs.shape); - outputs.set_shape(out_shape); + outputs.shape = out_shape; } if (use_bias) @@ -117,7 +117,7 @@ namespace Tensorflow.Keras.Layers return outputs; } - public override TensorShape ComputeOutputShape(TensorShape input_shape) + public override Shape ComputeOutputShape(Shape input_shape) { var output_shape = input_shape.dims; var (c_axis, h_axis, w_axis) = (3, 1, 2); @@ -144,7 +144,7 @@ namespace Tensorflow.Keras.Layers stride: (int)stride_w, dilation: (int)dilation_rate[1]); - return new TensorShape(output_shape); + return new Shape(output_shape); } } } diff --git a/src/TensorFlowNET.Keras/Layers/Convolution/Convolutional.cs b/src/TensorFlowNET.Keras/Layers/Convolution/Convolutional.cs index 7c25b8db..3c5e0d5d 100644 --- a/src/TensorFlowNET.Keras/Layers/Convolution/Convolutional.cs +++ b/src/TensorFlowNET.Keras/Layers/Convolution/Convolutional.cs @@ -30,11 +30,11 @@ namespace Tensorflow.Keras.Layers ConvolutionalArgs args; protected int rank => args.Rank; protected int filters => args.Filters; - protected TensorShape kernel_size => args.KernelSize; - protected TensorShape strides => args.Strides; + protected Shape kernel_size => args.KernelSize; + protected Shape strides => args.Strides; protected string padding => args.Padding; protected string data_format => args.DataFormat; - protected TensorShape dilation_rate => args.DilationRate; + protected Shape dilation_rate => args.DilationRate; protected Activation activation => args.Activation; protected bool use_bias => args.UseBias; protected IInitializer kernel_initializer => args.KernelInitializer; @@ -59,12 +59,12 @@ namespace Tensorflow.Keras.Layers protected override void build(Tensors inputs) { - TensorShape input_shape = inputs.shape; + Shape input_shape = inputs.shape; int channel_axis = data_format == "channels_first" ? 1 : -1; var input_channel = channel_axis < 0 ? input_shape.dims[input_shape.ndim + channel_axis] : input_shape.dims[channel_axis]; - TensorShape kernel_shape = kernel_size.dims.concat(new long[] { input_channel / args.Groups, filters }); + Shape kernel_shape = kernel_size.dims.concat(new long[] { input_channel / args.Groups, filters }); kernel = add_weight(name: "kernel", shape: kernel_shape, initializer: kernel_initializer, diff --git a/src/TensorFlowNET.Keras/Layers/Core/Dense.cs b/src/TensorFlowNET.Keras/Layers/Core/Dense.cs index 166bf2f9..fb813455 100644 --- a/src/TensorFlowNET.Keras/Layers/Core/Dense.cs +++ b/src/TensorFlowNET.Keras/Layers/Core/Dense.cs @@ -43,21 +43,21 @@ namespace Tensorflow.Keras.Layers protected override void build(Tensors inputs) { - TensorShape input_shape = inputs.shape; + Shape input_shape = inputs.shape; var last_dim = input_shape.dims.Last(); var axes = new Dictionary(); axes[-1] = (int)last_dim; inputSpec = new InputSpec(min_ndim: 2, axes: axes); kernel = add_weight( "kernel", - shape: new TensorShape(last_dim, args.Units), + shape: new Shape(last_dim, args.Units), initializer: args.KernelInitializer, dtype: DType, trainable: true); if (args.UseBias) bias = add_weight( "bias", - shape: new TensorShape(args.Units), + shape: new Shape(args.Units), initializer: args.BiasInitializer, dtype: DType, trainable: true); diff --git a/src/TensorFlowNET.Keras/Layers/Core/InputLayer.cs b/src/TensorFlowNET.Keras/Layers/Core/InputLayer.cs index 1b9c9633..6b064716 100644 --- a/src/TensorFlowNET.Keras/Layers/Core/InputLayer.cs +++ b/src/TensorFlowNET.Keras/Layers/Core/InputLayer.cs @@ -96,7 +96,7 @@ namespace Tensorflow.Keras.Layers }); node.Connect(this); - typeSpec = new TensorSpec(args.InputTensor.TensorShape, + typeSpec = new TensorSpec(args.InputTensor.shape, dtype: args.InputTensor.dtype, name: Name); } diff --git a/src/TensorFlowNET.Keras/Layers/LayersApi.Reshaping.cs b/src/TensorFlowNET.Keras/Layers/LayersApi.Reshaping.cs index 81494408..71f9ef3b 100644 --- a/src/TensorFlowNET.Keras/Layers/LayersApi.Reshaping.cs +++ b/src/TensorFlowNET.Keras/Layers/LayersApi.Reshaping.cs @@ -27,7 +27,7 @@ namespace Tensorflow.Keras.Layers /// /// /// - public UpSampling2D UpSampling2D(TensorShape size = null, + public UpSampling2D UpSampling2D(Shape size = null, string data_format = null, string interpolation = "nearest") => new UpSampling2D(new UpSampling2DArgs @@ -40,7 +40,7 @@ namespace Tensorflow.Keras.Layers /// /// /// - public Reshape Reshape(TensorShape target_shape) + public Reshape Reshape(Shape target_shape) => new Reshape(new ReshapeArgs { TargetShape = target_shape diff --git a/src/TensorFlowNET.Keras/Layers/LayersApi.cs b/src/TensorFlowNET.Keras/Layers/LayersApi.cs index 1b5e9c24..ed2f91d9 100644 --- a/src/TensorFlowNET.Keras/Layers/LayersApi.cs +++ b/src/TensorFlowNET.Keras/Layers/LayersApi.cs @@ -195,11 +195,11 @@ namespace Tensorflow.Keras.Layers /// Regularizer function applied to the output of the layer (its "activation") (see keras.regularizers). /// A tensor of rank 4+ representing activation(conv2d(inputs, kernel) + bias). public Conv2D Conv2D(int filters, - TensorShape kernel_size = null, - TensorShape strides = null, + Shape kernel_size = null, + Shape strides = null, string padding = "valid", string data_format = null, - TensorShape dilation_rate = null, + Shape dilation_rate = null, int groups = 1, Activation activation = null, bool use_bias = true, @@ -248,11 +248,11 @@ namespace Tensorflow.Keras.Layers /// The name of the regularizer function applied to the output of the layer (its "activation") (see keras.regularizers). /// A tensor of rank 4+ representing activation(conv2d(inputs, kernel) + bias). public Conv2D Conv2D(int filters, - TensorShape kernel_size = null, - TensorShape strides = null, + Shape kernel_size = null, + Shape strides = null, string padding = "valid", string data_format = null, - TensorShape dilation_rate = null, + Shape dilation_rate = null, int groups = 1, string activation = null, bool use_bias = true, @@ -292,11 +292,11 @@ namespace Tensorflow.Keras.Layers /// The name of the regularizer function applied to the output of the layer (its "activation") (see keras.regularizers). /// A tensor of rank 4+ representing activation(conv2d(inputs, kernel) + bias). public Conv2DTranspose Conv2DTranspose(int filters, - TensorShape kernel_size = null, - TensorShape strides = null, + Shape kernel_size = null, + Shape strides = null, string output_padding = "valid", string data_format = null, - TensorShape dilation_rate = null, + Shape dilation_rate = null, string activation = null, bool use_bias = true, string kernel_initializer = null, @@ -338,7 +338,7 @@ namespace Tensorflow.Keras.Layers IInitializer kernel_initializer = null, bool use_bias = true, IInitializer bias_initializer = null, - TensorShape input_shape = null) + Shape input_shape = null) => new Dense(new DenseArgs { Units = units, @@ -377,7 +377,7 @@ namespace Tensorflow.Keras.Layers /// N-D tensor with shape: (batch_size, ..., units). For instance, for a 2D input with shape (batch_size, input_dim), the output would have shape (batch_size, units). public Dense Dense(int units, string activation = null, - TensorShape input_shape = null) + Shape input_shape = null) => new Dense(new DenseArgs { Units = units, @@ -438,7 +438,7 @@ namespace Tensorflow.Keras.Layers /// /// An integer to use as random seed. /// - public Dropout Dropout(float rate, TensorShape noise_shape = null, int? seed = null) + public Dropout Dropout(float rate, Shape noise_shape = null, int? seed = null) => new Dropout(new DropoutArgs { Rate = rate, @@ -461,7 +461,7 @@ namespace Tensorflow.Keras.Layers int output_dim, IInitializer embeddings_initializer = null, bool mask_zero = false, - TensorShape input_shape = null, + Shape input_shape = null, int input_length = -1) => new Embedding(new EmbeddingArgs { @@ -502,7 +502,7 @@ namespace Tensorflow.Keras.Layers /// In this case, values of 'None' in the 'shape' argument represent ragged dimensions. For more information about RaggedTensors, see this guide. /// /// A tensor. - public Tensors Input(TensorShape shape, + public Tensors Input(Shape shape, string name = null, bool sparse = false, bool ragged = false) @@ -518,7 +518,7 @@ namespace Tensorflow.Keras.Layers return input_layer.InboundNodes[0].Outputs; } - public InputLayer InputLayer(TensorShape input_shape, + public InputLayer InputLayer(Shape input_shape, string name = null, bool sparse = false, bool ragged = false) @@ -580,8 +580,8 @@ namespace Tensorflow.Keras.Layers /// It defaults to the image_data_format value found in your Keras config file at ~/.keras/keras.json. /// If you never set it, then it will be "channels_last" /// - public MaxPooling2D MaxPooling2D(TensorShape pool_size = null, - TensorShape strides = null, + public MaxPooling2D MaxPooling2D(Shape pool_size = null, + Shape strides = null, string padding = "valid", string data_format = null) => new MaxPooling2D(new MaxPooling2DArgs @@ -752,7 +752,7 @@ namespace Tensorflow.Keras.Layers /// public Rescaling Rescaling(float scale, float offset = 0, - TensorShape input_shape = null) + Shape input_shape = null) => new Rescaling(new RescalingArgs { Scale = scale, diff --git a/src/TensorFlowNET.Keras/Layers/Merging/Concatenate.cs b/src/TensorFlowNET.Keras/Layers/Merging/Concatenate.cs index a4309949..676d5752 100644 --- a/src/TensorFlowNET.Keras/Layers/Merging/Concatenate.cs +++ b/src/TensorFlowNET.Keras/Layers/Merging/Concatenate.cs @@ -25,12 +25,12 @@ namespace Tensorflow.Keras.Layers protected override void build(Tensors inputs) { - /*var shape_set = new HashSet(); + /*var shape_set = new HashSet(); var reduced_inputs_shapes = inputs.Select(x => x.shape).ToArray(); for (var i = 0; i < reduced_inputs_shapes.Length; i++) { int seq = -1; - TensorShape shape = reduced_inputs_shapes[i].Where(x => + Shape shape = reduced_inputs_shapes[i].Where(x => { seq++; return seq != i; diff --git a/src/TensorFlowNET.Keras/Layers/Normalization/BatchNormalization.cs b/src/TensorFlowNET.Keras/Layers/Normalization/BatchNormalization.cs index 4ec40b2c..1a29badf 100644 --- a/src/TensorFlowNET.Keras/Layers/Normalization/BatchNormalization.cs +++ b/src/TensorFlowNET.Keras/Layers/Normalization/BatchNormalization.cs @@ -36,7 +36,7 @@ namespace Tensorflow.Keras.Layers bool fused; int[] axis; string _data_format; - TensorShape kernel_size; + Shape kernel_size; IInitializer beta_initializer => args.BetaInitializer; IInitializer gamma_initializer => args.GammaInitializer; IInitializer moving_mean_initializer => args.MovingMeanInitializer; @@ -55,7 +55,7 @@ namespace Tensorflow.Keras.Layers protected override void build(Tensors inputs) { - TensorShape input_shape = inputs.shape; + Shape input_shape = inputs.shape; var ndims = input_shape.ndim; foreach (var (idx, x) in enumerate(axis)) if (x < 0) @@ -121,7 +121,7 @@ namespace Tensorflow.Keras.Layers built = true; } - public override TensorShape ComputeOutputShape(TensorShape input_shape) + public override Shape ComputeOutputShape(Shape input_shape) { return input_shape; } @@ -148,7 +148,7 @@ namespace Tensorflow.Keras.Layers { Tensor outputs = null; var training_tensor = training == null - ? tf.placeholder(tf.@bool, TensorShape.Scalar) + ? tf.placeholder(tf.@bool, Shape.Scalar) : tf.logical_and(training.Value, Trainable); if (fused) { @@ -198,13 +198,13 @@ namespace Tensorflow.Keras.Layers outputs = nn_impl.batch_normalization(inputs, mean, variance, offset_tensor, scale_tensor, epsilon); // If some components of the shape got lost due to adjustments, fix that. - outputs.set_shape(input_shape); + outputs.shape = input_shape; return outputs; } private Tensor _fused_batch_norm(Tensor inputs, Tensor training) { - TensorShape input_batch_size = null; + Shape input_batch_size = null; var use_fused_avg_updates = true; float exponential_avg_factor = 0; if (use_fused_avg_updates) diff --git a/src/TensorFlowNET.Keras/Layers/Preprocessing/Resizing.cs b/src/TensorFlowNET.Keras/Layers/Preprocessing/Resizing.cs index 9d0589bc..603e2b07 100644 --- a/src/TensorFlowNET.Keras/Layers/Preprocessing/Resizing.cs +++ b/src/TensorFlowNET.Keras/Layers/Preprocessing/Resizing.cs @@ -24,9 +24,9 @@ namespace Tensorflow.Keras.Layers return image_ops_impl.resize_images_v2(inputs, new[] { args.Height, args.Width }, method: args.Interpolation); } - public override TensorShape ComputeOutputShape(TensorShape input_shape) + public override Shape ComputeOutputShape(Shape input_shape) { - return new TensorShape(input_shape.dims[0], args.Height, args.Width, input_shape.dims[3]); + return new Shape(input_shape.dims[0], args.Height, args.Width, input_shape.dims[3]); } public static Resizing from_config(JObject config) diff --git a/src/TensorFlowNET.Keras/Layers/Preprocessing/TextVectorization.cs b/src/TensorFlowNET.Keras/Layers/Preprocessing/TextVectorization.cs index 038f419b..6d37eaa1 100644 --- a/src/TensorFlowNET.Keras/Layers/Preprocessing/TextVectorization.cs +++ b/src/TensorFlowNET.Keras/Layers/Preprocessing/TextVectorization.cs @@ -33,7 +33,7 @@ namespace Tensorflow.Keras.Layers public override void adapt(IDatasetV2 data, bool reset_state = true) { var shape = data.output_shapes[0]; - if (shape.rank == 1) + if (shape.ndim == 1) data = data.map(tensor => array_ops.expand_dims(tensor, -1)); build(data.variant_tensor); var preprocessed_inputs = data.map(_preprocess); diff --git a/src/TensorFlowNET.Keras/Layers/RNN.cs b/src/TensorFlowNET.Keras/Layers/RNN.cs index 411869e4..293c27fb 100644 --- a/src/TensorFlowNET.Keras/Layers/RNN.cs +++ b/src/TensorFlowNET.Keras/Layers/RNN.cs @@ -44,14 +44,14 @@ namespace Tensorflow.Keras.Layers // false case, output from previous timestep is returned for masked timestep. var zeroOutputForMask = (bool)args.Kwargs.Get("zero_output_for_mask", false); - TensorShape input_shape; - var propIS = (TensorShape)args.Kwargs.Get("input_shape", null); + Shape input_shape; + var propIS = (Shape)args.Kwargs.Get("input_shape", null); var propID = (int?)args.Kwargs.Get("input_dim", null); var propIL = (int?)args.Kwargs.Get("input_length", null); if (propIS == null && (propID != null || propIL != null)) { - input_shape = new TensorShape( + input_shape = new Shape( propIL ?? -1, propID ?? -1); args.Kwargs["input_shape"] = input_shape; @@ -113,7 +113,7 @@ namespace Tensorflow.Keras.Layers var myIndexerProperty = state_size.GetType().GetProperty("Item"); return myIndexerProperty != null && myIndexerProperty.GetIndexParameters().Length == 1 - && !(state_size.GetType() == typeof(TensorShape)); + && !(state_size.GetType() == typeof(Shape)); } } } diff --git a/src/TensorFlowNET.Keras/Layers/Rescaling/Rescaling.cs b/src/TensorFlowNET.Keras/Layers/Rescaling/Rescaling.cs index 10609c6b..bcdc222d 100644 --- a/src/TensorFlowNET.Keras/Layers/Rescaling/Rescaling.cs +++ b/src/TensorFlowNET.Keras/Layers/Rescaling/Rescaling.cs @@ -24,7 +24,7 @@ namespace Tensorflow.Keras.Layers return math_ops.cast(inputs, args.DType) * scale + offset; } - public override TensorShape ComputeOutputShape(TensorShape input_shape) + public override Shape ComputeOutputShape(Shape input_shape) { return input_shape; } diff --git a/src/TensorFlowNET.Keras/Layers/Reshaping/Flatten.cs b/src/TensorFlowNET.Keras/Layers/Reshaping/Flatten.cs index 1b59ca82..539b5f62 100644 --- a/src/TensorFlowNET.Keras/Layers/Reshaping/Flatten.cs +++ b/src/TensorFlowNET.Keras/Layers/Reshaping/Flatten.cs @@ -37,7 +37,7 @@ namespace Tensorflow.Keras.Layers else { var input_shape = inputs.shape; - var rank = inputs.shape.rank; + var rank = inputs.shape.ndim; if (rank == 1) return array_ops.expand_dims(inputs, axis: 1); var batch_dim = tensor_shape.dimension_value(input_shape[0]); diff --git a/src/TensorFlowNET.Keras/Layers/Reshaping/Reshape.cs b/src/TensorFlowNET.Keras/Layers/Reshaping/Reshape.cs index ecabc8f1..e73537b3 100644 --- a/src/TensorFlowNET.Keras/Layers/Reshaping/Reshape.cs +++ b/src/TensorFlowNET.Keras/Layers/Reshaping/Reshape.cs @@ -31,11 +31,11 @@ namespace Tensorflow.Keras.Layers var result = array_ops.reshape(inputs, shape); if (!tf.Context.executing_eagerly()) - result.set_shape(ComputeOutputShape(inputs.shape)); + result.shape = ComputeOutputShape(inputs.shape); return result; } - public override TensorShape ComputeOutputShape(TensorShape input_shape) + public override Shape ComputeOutputShape(Shape input_shape) { if (input_shape.dims.Skip(1).Contains(-1)) { @@ -43,7 +43,7 @@ namespace Tensorflow.Keras.Layers } else { - input_shape = input_shape.dims[0]; + input_shape = new Shape(input_shape.dims[0]); var output_shape = input_shape.concatenate(args.TargetShape.dims); return output_shape; } diff --git a/src/TensorFlowNET.Keras/Losses/SparseCategoricalCrossentropy.cs b/src/TensorFlowNET.Keras/Losses/SparseCategoricalCrossentropy.cs index 2cf24fc3..0f6e4645 100644 --- a/src/TensorFlowNET.Keras/Losses/SparseCategoricalCrossentropy.cs +++ b/src/TensorFlowNET.Keras/Losses/SparseCategoricalCrossentropy.cs @@ -16,8 +16,8 @@ namespace Tensorflow.Keras.Losses // Try to adjust the shape so that rank of labels = rank of logits - 1. var output_shape = array_ops.shape_v2(output); - var output_rank = output.TensorShape.ndim; - var target_rank = target.TensorShape.ndim; + var output_rank = output.shape.ndim; + var target_rank = target.shape.ndim; var update_shape = target_rank != output_rank - 1; if (update_shape) { diff --git a/src/TensorFlowNET.Keras/Metrics/Metric.cs b/src/TensorFlowNET.Keras/Metrics/Metric.cs index 2a34ef53..21457f15 100644 --- a/src/TensorFlowNET.Keras/Metrics/Metric.cs +++ b/src/TensorFlowNET.Keras/Metrics/Metric.cs @@ -28,7 +28,7 @@ namespace Tensorflow.Keras.Metrics } protected override IVariableV1 add_weight(string name, - TensorShape shape = null, + Shape shape = null, TF_DataType dtype = TF_DataType.TF_FLOAT, IInitializer initializer = null, IRegularizer regularizer = null, @@ -38,7 +38,7 @@ namespace Tensorflow.Keras.Metrics Func getter = null) { if (shape == null) - shape = new TensorShape(new int[0]); + shape = new Shape(new int[0]); return tf_with(ops.init_scope(), delegate { diff --git a/src/TensorFlowNET.Keras/Metrics/MetricsApi.cs b/src/TensorFlowNET.Keras/Metrics/MetricsApi.cs index 592d2568..c8d54fc9 100644 --- a/src/TensorFlowNET.Keras/Metrics/MetricsApi.cs +++ b/src/TensorFlowNET.Keras/Metrics/MetricsApi.cs @@ -25,8 +25,8 @@ namespace Tensorflow.Keras.Metrics /// Sparse categorical accuracy values. public Tensor sparse_categorical_accuracy(Tensor y_true, Tensor y_pred) { - var y_pred_rank = y_pred.TensorShape.ndim; - var y_true_rank = y_true.TensorShape.ndim; + var y_pred_rank = y_pred.shape.ndim; + var y_true_rank = y_true.shape.ndim; // If the shape of y_true is (num_samples, 1), squeeze to (num_samples,) if (y_true_rank != -1 && y_pred_rank != -1 && y_true.shape.ndim == y_pred.shape.ndim) diff --git a/src/TensorFlowNET.Keras/Optimizers/OptimizerV2.cs b/src/TensorFlowNET.Keras/Optimizers/OptimizerV2.cs index 2383c5d1..73e35d02 100644 --- a/src/TensorFlowNET.Keras/Optimizers/OptimizerV2.cs +++ b/src/TensorFlowNET.Keras/Optimizers/OptimizerV2.cs @@ -257,7 +257,7 @@ namespace Tensorflow.Keras.Optimizers } ResourceVariable add_weight(string name, - TensorShape shape, + Shape shape, TF_DataType dtype = TF_DataType.TF_FLOAT, IInitializer initializer = null, bool trainable = false, diff --git a/src/TensorFlowNET.Keras/Preprocessings/Preprocessing.image_dataset_from_directory.cs b/src/TensorFlowNET.Keras/Preprocessings/Preprocessing.image_dataset_from_directory.cs index ca69924e..ded952bc 100644 --- a/src/TensorFlowNET.Keras/Preprocessings/Preprocessing.image_dataset_from_directory.cs +++ b/src/TensorFlowNET.Keras/Preprocessings/Preprocessing.image_dataset_from_directory.cs @@ -32,7 +32,7 @@ namespace Tensorflow.Keras string[] class_names = null, string color_mode = "rgb", int batch_size = 32, - TensorShape image_size = null, + Shape image_size = null, bool shuffle = true, int? seed = null, float validation_split = 0.2f, diff --git a/src/TensorFlowNET.Keras/Preprocessings/Preprocessing.paths_and_labels_to_dataset.cs b/src/TensorFlowNET.Keras/Preprocessings/Preprocessing.paths_and_labels_to_dataset.cs index dba2cded..184d7d5b 100644 --- a/src/TensorFlowNET.Keras/Preprocessings/Preprocessing.paths_and_labels_to_dataset.cs +++ b/src/TensorFlowNET.Keras/Preprocessings/Preprocessing.paths_and_labels_to_dataset.cs @@ -7,7 +7,7 @@ namespace Tensorflow.Keras public partial class Preprocessing { public IDatasetV2 paths_and_labels_to_dataset(string[] image_paths, - TensorShape image_size, + Shape image_size, int num_channels, int[] labels, string label_mode, @@ -26,7 +26,7 @@ namespace Tensorflow.Keras return img_ds; } - Tensor path_to_image(Tensor path, TensorShape image_size, int num_channels, string interpolation) + Tensor path_to_image(Tensor path, Shape image_size, int num_channels, string interpolation) { var img = tf.io.read_file(path); img = tf.image.decode_image( diff --git a/src/TensorFlowNET.Keras/Saving/KerasObjectLoader.cs b/src/TensorFlowNET.Keras/Saving/KerasObjectLoader.cs index 621d79c5..fc8cab0c 100644 --- a/src/TensorFlowNET.Keras/Saving/KerasObjectLoader.cs +++ b/src/TensorFlowNET.Keras/Saving/KerasObjectLoader.cs @@ -157,7 +157,7 @@ namespace Tensorflow.Keras.Saving } } - bool _try_build_layer(Model obj, int node_id, TensorShape build_input_shape) + bool _try_build_layer(Model obj, int node_id, Shape build_input_shape) { if (obj.Built) return true; @@ -165,7 +165,7 @@ namespace Tensorflow.Keras.Saving return false; } - bool _try_build_layer(Layer obj, int node_id, TensorShape build_input_shape) + bool _try_build_layer(Layer obj, int node_id, Shape build_input_shape) { if (obj.Built) return true; diff --git a/src/TensorFlowNET.Keras/Saving/TensorShapeConfig.cs b/src/TensorFlowNET.Keras/Saving/TensorShapeConfig.cs index dd5f49c5..4c2ecc0d 100644 --- a/src/TensorFlowNET.Keras/Saving/TensorShapeConfig.cs +++ b/src/TensorFlowNET.Keras/Saving/TensorShapeConfig.cs @@ -9,7 +9,7 @@ namespace Tensorflow.Keras.Saving public string ClassName { get; set; } public int?[] Items { get; set; } - public static implicit operator TensorShape(TensorShapeConfig shape) - => shape == null ? null : new TensorShape(shape.Items.Select(x => x.HasValue ? x.Value : -1).ToArray()); + public static implicit operator Shape(TensorShapeConfig shape) + => shape == null ? null : new Shape(shape.Items.Select(x => x.HasValue ? x.Value : -1).ToArray()); } } diff --git a/src/TensorFlowNET.Keras/Utils/losses_utils.cs b/src/TensorFlowNET.Keras/Utils/losses_utils.cs index ec6f6e4e..8a8772fd 100644 --- a/src/TensorFlowNET.Keras/Utils/losses_utils.cs +++ b/src/TensorFlowNET.Keras/Utils/losses_utils.cs @@ -45,7 +45,7 @@ namespace Tensorflow.Keras.Utils public static (Tensor, Tensor) squeeze_or_expand_dimensions(Tensor y_pred, Tensor sample_weight) { - var weights_shape = sample_weight.TensorShape; + var weights_shape = sample_weight.shape; var weights_rank = weights_shape.ndim; if (weights_rank == 0) return (y_pred, sample_weight); diff --git a/test/TensorFlowNET.Graph.UnitTest/Basics/QueueTest.cs b/test/TensorFlowNET.Graph.UnitTest/Basics/QueueTest.cs index 4fa1a7da..f0a4ea84 100644 --- a/test/TensorFlowNET.Graph.UnitTest/Basics/QueueTest.cs +++ b/test/TensorFlowNET.Graph.UnitTest/Basics/QueueTest.cs @@ -12,7 +12,7 @@ namespace TensorFlowNET.UnitTest.Basics public void PaddingFIFOQueue() { var numbers = tf.placeholder(tf.int32); - var queue = tf.PaddingFIFOQueue(10, tf.int32, new TensorShape(-1)); + var queue = tf.PaddingFIFOQueue(10, tf.int32, new Shape(-1)); var enqueue = queue.enqueue(numbers); var dequeue_many = queue.dequeue_many(n: 3); diff --git a/test/TensorFlowNET.Graph.UnitTest/ControlFlowTest/ShapeTestCase.cs b/test/TensorFlowNET.Graph.UnitTest/ControlFlowTest/ShapeTestCase.cs index dc7d5af8..667f336f 100644 --- a/test/TensorFlowNET.Graph.UnitTest/ControlFlowTest/ShapeTestCase.cs +++ b/test/TensorFlowNET.Graph.UnitTest/ControlFlowTest/ShapeTestCase.cs @@ -14,9 +14,9 @@ namespace TensorFlowNET.UnitTest.ControlFlowTest public void testShape() { var tensor = constant_op.constant(new[] { 1.0, 2.0 }); - self.assertEquals(new int[] { 2 }, tensor.shape); - self.assertEquals(new int[] { 2 }, - control_flow_ops.with_dependencies(new[] { constant_op.constant(1.0).op }, tensor).shape); + self.assertEquals(new long[] { 2 }, tensor.shape.dims); + self.assertEquals(new long[] { 2 }, + control_flow_ops.with_dependencies(new[] { constant_op.constant(1.0).op }, tensor).shape.dims); } } diff --git a/test/TensorFlowNET.Graph.UnitTest/FunctionalOpsTest/ScanTestCase.cs b/test/TensorFlowNET.Graph.UnitTest/FunctionalOpsTest/ScanTestCase.cs index 6e9c707a..88b0b0b7 100644 --- a/test/TensorFlowNET.Graph.UnitTest/FunctionalOpsTest/ScanTestCase.cs +++ b/test/TensorFlowNET.Graph.UnitTest/FunctionalOpsTest/ScanTestCase.cs @@ -19,7 +19,7 @@ namespace TensorFlowNET.UnitTest.FunctionalOpsTest var sess = tf.Session().as_default(); - var input = tf.placeholder(TF_DataType.TF_INT32, new TensorShape(6)); + var input = tf.placeholder(TF_DataType.TF_INT32, new Shape(6)); var scan = functional_ops.scan(fn, input); var result = sess.run(scan, (input, np.array(1, 2, 3, 4, 5, 6))); Assert.AreEqual(result, np.array(1, 3, 6, 10, 15, 21)); @@ -32,7 +32,7 @@ namespace TensorFlowNET.UnitTest.FunctionalOpsTest var sess = tf.Session().as_default(); - var input = tf.placeholder(TF_DataType.TF_INT32, new TensorShape(6)); + var input = tf.placeholder(TF_DataType.TF_INT32, new Shape(6)); var scan = functional_ops.scan(fn, input, reverse: true); var result = sess.run(scan, (input, np.array(1, 2, 3, 4, 5, 6))); Assert.AreEqual(result, np.array(21, 20, 18, 15, 11, 6)); diff --git a/test/TensorFlowNET.Graph.UnitTest/OperationsTest.cs b/test/TensorFlowNET.Graph.UnitTest/OperationsTest.cs index df34d51d..ac0c6b18 100644 --- a/test/TensorFlowNET.Graph.UnitTest/OperationsTest.cs +++ b/test/TensorFlowNET.Graph.UnitTest/OperationsTest.cs @@ -188,8 +188,8 @@ namespace TensorFlowNET.UnitTest.Basics var secondIntFeed = Enumerable.Repeat(secondIntVal, rows * cols).ToArray(); var intResult = firstIntFeed.Sum() + secondIntFeed.Sum(); - var a = tf.placeholder(tf.int32, shape: new TensorShape(rows, cols)); - var b = tf.placeholder(tf.int32, shape: new TensorShape(rows, cols)); + var a = tf.placeholder(tf.int32, shape: new Shape(rows, cols)); + var b = tf.placeholder(tf.int32, shape: new Shape(rows, cols)); var c = tf.reduce_sum(tf.reduce_sum(tf.add(a, b), 1)); using (var sess = tf.Session()) @@ -237,8 +237,8 @@ namespace TensorFlowNET.UnitTest.Basics var secondFloatFeed = Enumerable.Repeat(secondFloatVal, rows * cols).ToArray(); var floatResult = firstFloatFeed.Sum() + secondFloatFeed.Sum(); - a = tf.placeholder(tf.float32, shape: new TensorShape(rows, cols)); - b = tf.placeholder(tf.float32, shape: new TensorShape(rows, cols)); + a = tf.placeholder(tf.float32, shape: new Shape(rows, cols)); + b = tf.placeholder(tf.float32, shape: new Shape(rows, cols)); c = tf.reduce_sum(tf.reduce_sum(tf.add(a, b), 1)); using (var sess = tf.Session()) @@ -286,8 +286,8 @@ namespace TensorFlowNET.UnitTest.Basics var secondDoubleFeed = Enumerable.Repeat(secondDoubleVal, rows * cols).ToArray(); var doubleResult = firstDoubleFeed.Sum() + secondDoubleFeed.Sum(); - a = tf.placeholder(tf.float64, shape: new TensorShape(rows, cols)); - b = tf.placeholder(tf.float64, shape: new TensorShape(rows, cols)); + a = tf.placeholder(tf.float64, shape: new Shape(rows, cols)); + b = tf.placeholder(tf.float64, shape: new Shape(rows, cols)); c = tf.reduce_sum(tf.reduce_sum(tf.add(a, b), 1)); using (var sess = tf.Session()) @@ -343,8 +343,8 @@ namespace TensorFlowNET.UnitTest.Basics var intResult = firstIntFeed.Sum() - secondIntFeed.Sum(); var intResultTwo = -firstIntFeed.Sum(); - var a = tf.placeholder(tf.int32, shape: new TensorShape(rows, cols)); - var b = tf.placeholder(tf.int32, shape: new TensorShape(rows, cols)); + var a = tf.placeholder(tf.int32, shape: new Shape(rows, cols)); + var b = tf.placeholder(tf.int32, shape: new Shape(rows, cols)); var c = tf.reduce_sum(tf.reduce_sum(tf.sub(a, b), 1)); using (var sess = tf.Session()) @@ -402,8 +402,8 @@ namespace TensorFlowNET.UnitTest.Basics var floatResult = firstFloatFeed.Sum() - secondFloatFeed.Sum(); var floatResultTwo = -firstFloatFeed.Sum(); - a = tf.placeholder(tf.float32, shape: new TensorShape(rows, cols)); - b = tf.placeholder(tf.float32, shape: new TensorShape(rows, cols)); + a = tf.placeholder(tf.float32, shape: new Shape(rows, cols)); + b = tf.placeholder(tf.float32, shape: new Shape(rows, cols)); c = tf.reduce_sum(tf.reduce_sum(tf.sub(a, b), 1)); using (var sess = tf.Session()) @@ -461,8 +461,8 @@ namespace TensorFlowNET.UnitTest.Basics var doubleResult = firstDoubleFeed.Sum() - secondDoubleFeed.Sum(); var doubleResultTwo = -firstDoubleFeed.Sum(); - a = tf.placeholder(tf.float64, shape: new TensorShape(rows, cols)); - b = tf.placeholder(tf.float64, shape: new TensorShape(rows, cols)); + a = tf.placeholder(tf.float64, shape: new Shape(rows, cols)); + b = tf.placeholder(tf.float64, shape: new Shape(rows, cols)); c = tf.reduce_sum(tf.reduce_sum(tf.sub(a, b), 1)); using (var sess = tf.Session()) @@ -584,8 +584,8 @@ namespace TensorFlowNET.UnitTest.Basics var secondIntFeed = Enumerable.Repeat(secondIntVal, rows * cols).ToArray(); var intResult = MultiplyArray(firstIntFeed, secondIntFeed).Sum(); - var a = tf.placeholder(tf.int32, shape: new TensorShape(rows, cols)); - var b = tf.placeholder(tf.int32, shape: new TensorShape(rows, cols)); + var a = tf.placeholder(tf.int32, shape: new Shape(rows, cols)); + var b = tf.placeholder(tf.int32, shape: new Shape(rows, cols)); var c = tf.reduce_sum(tf.reduce_sum(tf.multiply(a, b), 1)); using (var sess = tf.Session()) @@ -633,8 +633,8 @@ namespace TensorFlowNET.UnitTest.Basics var secondFloatFeed = Enumerable.Repeat(secondFloatVal, rows * cols).ToArray(); var floatResult = MultiplyArray(firstFloatFeed, secondFloatFeed).Sum(); - a = tf.placeholder(tf.float32, shape: new TensorShape(rows, cols)); - b = tf.placeholder(tf.float32, shape: new TensorShape(rows, cols)); + a = tf.placeholder(tf.float32, shape: new Shape(rows, cols)); + b = tf.placeholder(tf.float32, shape: new Shape(rows, cols)); c = tf.reduce_sum(tf.reduce_sum(tf.multiply(a, b), 1)); using (var sess = tf.Session()) @@ -682,8 +682,8 @@ namespace TensorFlowNET.UnitTest.Basics var secondDoubleFeed = Enumerable.Repeat(secondDoubleVal, rows * cols).ToArray(); var doubleResult = MultiplyArray(firstDoubleFeed, secondDoubleFeed).Sum(); - a = tf.placeholder(tf.float64, shape: new TensorShape(rows, cols)); - b = tf.placeholder(tf.float64, shape: new TensorShape(rows, cols)); + a = tf.placeholder(tf.float64, shape: new Shape(rows, cols)); + b = tf.placeholder(tf.float64, shape: new Shape(rows, cols)); c = tf.reduce_sum(tf.reduce_sum(tf.multiply(a, b), 1)); using (var sess = tf.Session()) @@ -739,8 +739,8 @@ namespace TensorFlowNET.UnitTest.Basics var secondIntFeed = Enumerable.Repeat(secondIntVal, rows * cols).ToArray(); var intResult = (int)(firstIntFeed.Sum() / (float)secondIntVal); - var a = tf.placeholder(tf.int32, shape: new TensorShape(rows, cols)); - var b = tf.placeholder(tf.int32, shape: new TensorShape(rows, cols)); + var a = tf.placeholder(tf.int32, shape: new Shape(rows, cols)); + var b = tf.placeholder(tf.int32, shape: new Shape(rows, cols)); var c = tf.reduce_sum(tf.reduce_sum(gen_math_ops.floor_div(a, b), 1)); using (var sess = tf.Session()) @@ -788,8 +788,8 @@ namespace TensorFlowNET.UnitTest.Basics var secondFloatFeed = Enumerable.Repeat(secondFloatVal, rows * cols).ToArray(); var floatResult = MultiplyArray(firstFloatFeed, secondFloatFeed.Select(x => 1 / x).ToArray()).Sum(); - a = tf.placeholder(tf.float32, shape: new TensorShape(rows, cols)); - b = tf.placeholder(tf.float32, shape: new TensorShape(rows, cols)); + a = tf.placeholder(tf.float32, shape: new Shape(rows, cols)); + b = tf.placeholder(tf.float32, shape: new Shape(rows, cols)); c = tf.reduce_sum(tf.reduce_sum(tf.divide(a, b), 1)); using (var sess = tf.Session()) @@ -837,8 +837,8 @@ namespace TensorFlowNET.UnitTest.Basics var secondDoubleFeed = Enumerable.Repeat(secondDoubleVal, rows * cols).ToArray(); var doubleResult = MultiplyArray(firstDoubleFeed, secondDoubleFeed.Select(x => 1 / x).ToArray()).Sum(); - a = tf.placeholder(tf.float64, shape: new TensorShape(rows, cols)); - b = tf.placeholder(tf.float64, shape: new TensorShape(rows, cols)); + a = tf.placeholder(tf.float64, shape: new Shape(rows, cols)); + b = tf.placeholder(tf.float64, shape: new Shape(rows, cols)); c = tf.reduce_sum(tf.reduce_sum(tf.divide(a, b), 1)); using (var sess = tf.Session()) @@ -893,8 +893,8 @@ namespace TensorFlowNET.UnitTest.Basics var intResult = firstIntFeed.Count(elem => elem > intThreshold); var intResultTwo = firstIntFeed.Count(elem => elem < intThreshold); - var a = tf.placeholder(tf.int32, shape: new TensorShape(rows, cols)); - var b = tf.placeholder(tf.int32, shape: new TensorShape(rows, cols)); + var a = tf.placeholder(tf.int32, shape: new Shape(rows, cols)); + var b = tf.placeholder(tf.int32, shape: new Shape(rows, cols)); var c = tf.reduce_sum(tf.reduce_sum(tf.cast(tf.greater(a, b), tf.int32), 1)); using (var sess = tf.Session()) @@ -942,8 +942,8 @@ namespace TensorFlowNET.UnitTest.Basics var floatResult = firstFloatFeed.Count(elem => elem > floatThreshold); var floatResultTwo = firstFloatFeed.Count(elem => elem < floatThreshold); - a = tf.placeholder(tf.float32, shape: new TensorShape(rows, cols)); - b = tf.placeholder(tf.float32, shape: new TensorShape(rows, cols)); + a = tf.placeholder(tf.float32, shape: new Shape(rows, cols)); + b = tf.placeholder(tf.float32, shape: new Shape(rows, cols)); c = tf.reduce_sum(tf.reduce_sum(tf.cast(tf.greater(a, b), tf.int32), 1)); using (var sess = tf.Session()) @@ -991,8 +991,8 @@ namespace TensorFlowNET.UnitTest.Basics var doubleResult = firstDoubleFeed.Count(elem => elem > doubleThreshold); var doubleResultTwo = firstDoubleFeed.Count(elem => elem < doubleThreshold); - a = tf.placeholder(tf.float64, shape: new TensorShape(rows, cols)); - b = tf.placeholder(tf.float64, shape: new TensorShape(rows, cols)); + a = tf.placeholder(tf.float64, shape: new Shape(rows, cols)); + b = tf.placeholder(tf.float64, shape: new Shape(rows, cols)); c = tf.reduce_sum(tf.reduce_sum(tf.cast(tf.greater(a, b), tf.int32), 1)); using (var sess = tf.Session()) @@ -1047,8 +1047,8 @@ namespace TensorFlowNET.UnitTest.Basics var intResult = firstIntFeed.Count(elem => elem < intThreshold); var intResultTwo = firstIntFeed.Count(elem => elem > intThreshold); - var a = tf.placeholder(tf.int32, shape: new TensorShape(rows, cols)); - var b = tf.placeholder(tf.int32, shape: new TensorShape(rows, cols)); + var a = tf.placeholder(tf.int32, shape: new Shape(rows, cols)); + var b = tf.placeholder(tf.int32, shape: new Shape(rows, cols)); var c = tf.reduce_sum(tf.reduce_sum(tf.cast(tf.less(a, b), tf.int32), 1)); using (var sess = tf.Session()) @@ -1096,8 +1096,8 @@ namespace TensorFlowNET.UnitTest.Basics var floatResult = firstFloatFeed.Count(elem => elem < floatThreshold); var floatResultTwo = firstFloatFeed.Count(elem => elem > floatThreshold); - a = tf.placeholder(tf.float32, shape: new TensorShape(rows, cols)); - b = tf.placeholder(tf.float32, shape: new TensorShape(rows, cols)); + a = tf.placeholder(tf.float32, shape: new Shape(rows, cols)); + b = tf.placeholder(tf.float32, shape: new Shape(rows, cols)); c = tf.reduce_sum(tf.reduce_sum(tf.cast(tf.less(a, b), tf.int32), 1)); using (var sess = tf.Session()) @@ -1145,8 +1145,8 @@ namespace TensorFlowNET.UnitTest.Basics var doubleResult = firstDoubleFeed.Count(elem => elem < doubleThreshold); var doubleResultTwo = firstDoubleFeed.Count(elem => elem > doubleThreshold); - a = tf.placeholder(tf.float64, shape: new TensorShape(rows, cols)); - b = tf.placeholder(tf.float64, shape: new TensorShape(rows, cols)); + a = tf.placeholder(tf.float64, shape: new Shape(rows, cols)); + b = tf.placeholder(tf.float64, shape: new Shape(rows, cols)); c = tf.reduce_sum(tf.reduce_sum(tf.cast(tf.less(a, b), tf.int32), 1)); using (var sess = tf.Session()) @@ -1201,8 +1201,8 @@ namespace TensorFlowNET.UnitTest.Basics var intResult = firstIntFeed.Count(elem => elem >= intThreshold); var intResultTwo = firstIntFeed.Count(elem => elem <= intThreshold); - var a = tf.placeholder(tf.int32, shape: new TensorShape(rows, cols)); - var b = tf.placeholder(tf.int32, shape: new TensorShape(rows, cols)); + var a = tf.placeholder(tf.int32, shape: new Shape(rows, cols)); + var b = tf.placeholder(tf.int32, shape: new Shape(rows, cols)); var c = tf.reduce_sum(tf.reduce_sum(tf.cast(tf.greater_equal(a, b), tf.int32), 1)); using (var sess = tf.Session()) @@ -1250,8 +1250,8 @@ namespace TensorFlowNET.UnitTest.Basics var floatResult = firstFloatFeed.Count(elem => elem >= floatThreshold); var floatResultTwo = firstFloatFeed.Count(elem => elem <= floatThreshold); - a = tf.placeholder(tf.float32, shape: new TensorShape(rows, cols)); - b = tf.placeholder(tf.float32, shape: new TensorShape(rows, cols)); + a = tf.placeholder(tf.float32, shape: new Shape(rows, cols)); + b = tf.placeholder(tf.float32, shape: new Shape(rows, cols)); c = tf.reduce_sum(tf.reduce_sum(tf.cast(tf.greater_equal(a, b), tf.int32), 1)); using (var sess = tf.Session()) @@ -1299,8 +1299,8 @@ namespace TensorFlowNET.UnitTest.Basics var doubleResult = firstDoubleFeed.Count(elem => elem >= doubleThreshold); var doubleResultTwo = firstDoubleFeed.Count(elem => elem <= doubleThreshold); - a = tf.placeholder(tf.float64, shape: new TensorShape(rows, cols)); - b = tf.placeholder(tf.float64, shape: new TensorShape(rows, cols)); + a = tf.placeholder(tf.float64, shape: new Shape(rows, cols)); + b = tf.placeholder(tf.float64, shape: new Shape(rows, cols)); c = tf.reduce_sum(tf.reduce_sum(tf.cast(tf.greater_equal(a, b), tf.int32), 1)); using (var sess = tf.Session()) @@ -1355,8 +1355,8 @@ namespace TensorFlowNET.UnitTest.Basics var intResult = firstIntFeed.Count(elem => elem <= intThreshold); var intResultTwo = firstIntFeed.Count(elem => elem >= intThreshold); - var a = tf.placeholder(tf.int32, shape: new TensorShape(rows, cols)); - var b = tf.placeholder(tf.int32, shape: new TensorShape(rows, cols)); + var a = tf.placeholder(tf.int32, shape: new Shape(rows, cols)); + var b = tf.placeholder(tf.int32, shape: new Shape(rows, cols)); var c = tf.reduce_sum(tf.reduce_sum(tf.cast(tf.less_equal(a, b), tf.int32), 1)); using (var sess = tf.Session()) @@ -1404,8 +1404,8 @@ namespace TensorFlowNET.UnitTest.Basics var floatResult = firstFloatFeed.Count(elem => elem <= floatThreshold); var floatResultTwo = firstFloatFeed.Count(elem => elem >= floatThreshold); - a = tf.placeholder(tf.float32, shape: new TensorShape(rows, cols)); - b = tf.placeholder(tf.float32, shape: new TensorShape(rows, cols)); + a = tf.placeholder(tf.float32, shape: new Shape(rows, cols)); + b = tf.placeholder(tf.float32, shape: new Shape(rows, cols)); c = tf.reduce_sum(tf.reduce_sum(tf.cast(tf.less_equal(a, b), tf.int32), 1)); using (var sess = tf.Session()) @@ -1453,8 +1453,8 @@ namespace TensorFlowNET.UnitTest.Basics var doubleResult = firstDoubleFeed.Count(elem => elem <= doubleThreshold); var doubleResultTwo = firstDoubleFeed.Count(elem => elem >= doubleThreshold); - a = tf.placeholder(tf.float64, shape: new TensorShape(rows, cols)); - b = tf.placeholder(tf.float64, shape: new TensorShape(rows, cols)); + a = tf.placeholder(tf.float64, shape: new Shape(rows, cols)); + b = tf.placeholder(tf.float64, shape: new Shape(rows, cols)); c = tf.reduce_sum(tf.reduce_sum(tf.cast(tf.less_equal(a, b), tf.int32), 1)); using (var sess = tf.Session()) diff --git a/test/TensorFlowNET.Keras.UnitTest/Layers/LayersTest.cs b/test/TensorFlowNET.Keras.UnitTest/Layers/LayersTest.cs index a2221aa0..865d7520 100644 --- a/test/TensorFlowNET.Keras.UnitTest/Layers/LayersTest.cs +++ b/test/TensorFlowNET.Keras.UnitTest/Layers/LayersTest.cs @@ -39,7 +39,7 @@ namespace TensorFlowNET.Keras.UnitTest var layers = keras.layers; var inputs = keras.Input(shape: 784); - Assert.AreEqual((-1, 784), inputs.TensorShape); + Assert.AreEqual((-1, 784), inputs.shape); var dense = layers.Dense(64, activation: keras.activations.Relu); var x = dense.Apply(inputs); @@ -73,7 +73,7 @@ namespace TensorFlowNET.Keras.UnitTest model.summary(); Assert.AreEqual(model.Layers.Count, 8); var result = model.predict(tf.constant(np.arange(24).astype(np.float32)[np.newaxis, Slice.All])); - Assert.AreEqual(result.shape, new TensorShape(1, 24)); + Assert.AreEqual(result.shape, new Shape(1, 24)); model.fit(np.arange(24).astype(np.float32)[np.newaxis, Slice.All], np.arange(24).astype(np.float32)[np.newaxis, Slice.All], verbose: 0); } @@ -86,7 +86,7 @@ namespace TensorFlowNET.Keras.UnitTest var emb = keras.layers.Embedding(256, 12, input_length: 4); var input_array = np.arange(12).reshape((3, 4)).astype(np.float32); var output = emb.Apply(input_array); - Assert.AreEqual(new TensorShape(3, 4, 12), output.shape); + Assert.AreEqual(new Shape(3, 4, 12), output.shape); } /// diff --git a/test/TensorFlowNET.UnitTest/Basics/SessionTest.cs b/test/TensorFlowNET.UnitTest/Basics/SessionTest.cs index 6b642cdd..3694fd8e 100644 --- a/test/TensorFlowNET.UnitTest/Basics/SessionTest.cs +++ b/test/TensorFlowNET.UnitTest/Basics/SessionTest.cs @@ -77,7 +77,7 @@ namespace TensorFlowNET.UnitTest public void Autocast_Case1() { var sess = tf.Session().as_default(); - var input = tf.placeholder(tf.float32, shape: new TensorShape(6)); + var input = tf.placeholder(tf.float32, shape: new Shape(6)); var op = tf.reshape(input, new int[] { 2, 3 }); sess.run(tf.global_variables_initializer()); var ret = sess.run(op, feed_dict: (input, np.array(1, 2, 3, 4, 5, 6))); @@ -91,7 +91,7 @@ namespace TensorFlowNET.UnitTest public void Autocast_Case2() { var sess = tf.Session().as_default(); - var input = tf.placeholder(tf.float64, shape: new TensorShape(6)); + var input = tf.placeholder(tf.float64, shape: new Shape(6)); var op = tf.reshape(input, new int[] { 2, 3 }); sess.run(tf.global_variables_initializer()); var ret = sess.run(op, feed_dict: (input, np.array(1, 2, 3, 4, 5, 6).astype(np.float32) + 0.1f)); @@ -105,7 +105,7 @@ namespace TensorFlowNET.UnitTest public void Autocast_Case3() { var sess = tf.Session().as_default(); - var input = tf.placeholder(tf.int64, shape: new TensorShape(6)); + var input = tf.placeholder(tf.int64, shape: new Shape(6)); var op = tf.reshape(input, new int[] { 2, 3 }); sess.run(tf.global_variables_initializer()); var ret = sess.run(op, feed_dict: (input, np.array(1, 2, 3, 4, 5, 6).astype(np.float32) + 0.1f)); @@ -119,7 +119,7 @@ namespace TensorFlowNET.UnitTest public void Autocast_Case4() { var sess = tf.Session().as_default(); - var input = tf.placeholder(tf.byte8, shape: new TensorShape(6)); + var input = tf.placeholder(tf.byte8, shape: new Shape(6)); var op = tf.reshape(input, new int[] { 2, 3 }); sess.run(tf.global_variables_initializer()); var ret = sess.run(op, feed_dict: (input, np.array(1, 2, 3, 4, 5, 6).astype(np.float32) + 0.1f)); diff --git a/test/TensorFlowNET.UnitTest/Basics/TrainSaverTest.cs b/test/TensorFlowNET.UnitTest/Basics/TrainSaverTest.cs index 17ea37c3..60020744 100644 --- a/test/TensorFlowNET.UnitTest/Basics/TrainSaverTest.cs +++ b/test/TensorFlowNET.UnitTest/Basics/TrainSaverTest.cs @@ -75,8 +75,8 @@ namespace TensorFlowNET.UnitTest.Basics public void Save2() { - var v1 = tf.compat.v1.get_variable("v1", shape: new TensorShape(3), initializer: tf.zeros_initializer); - var v2 = tf.compat.v1.get_variable("v2", shape: new TensorShape(5), initializer: tf.zeros_initializer); + var v1 = tf.compat.v1.get_variable("v1", shape: new Shape(3), initializer: tf.zeros_initializer); + var v2 = tf.compat.v1.get_variable("v2", shape: new Shape(5), initializer: tf.zeros_initializer); var inc_v1 = v1.assign(v1.AsTensor() + 1.0f); var dec_v2 = v2.assign(v2.AsTensor() - 1.0f); diff --git a/test/TensorFlowNET.UnitTest/ManagedAPI/ControlFlowApiTest.cs b/test/TensorFlowNET.UnitTest/ManagedAPI/ControlFlowApiTest.cs index 6e699bf4..8366c070 100644 --- a/test/TensorFlowNET.UnitTest/ManagedAPI/ControlFlowApiTest.cs +++ b/test/TensorFlowNET.UnitTest/ManagedAPI/ControlFlowApiTest.cs @@ -54,7 +54,7 @@ namespace TensorFlowNET.UnitTest.ManagedAPI tf.compat.v1.disable_eager_execution(); Func fn = (prev, current) => tf.add(prev, current); - var input = tf.placeholder(TF_DataType.TF_FLOAT, new TensorShape(6)); + var input = tf.placeholder(TF_DataType.TF_FLOAT, new Shape(6)); var scan = tf.scan(fn, input); using (var sess = tf.Session()) diff --git a/test/TensorFlowNET.UnitTest/ManagedAPI/LinalgTest.cs b/test/TensorFlowNET.UnitTest/ManagedAPI/LinalgTest.cs index 73c6415b..6594651e 100644 --- a/test/TensorFlowNET.UnitTest/ManagedAPI/LinalgTest.cs +++ b/test/TensorFlowNET.UnitTest/ManagedAPI/LinalgTest.cs @@ -11,7 +11,7 @@ namespace TensorFlowNET.UnitTest.ManagedAPI { var tensor = tf.linalg.eye(3); - Assert.AreEqual((3, 3), tensor.TensorShape); + Assert.AreEqual(tensor.shape, (3, 3)); Assert.AreEqual(0.0f, (double)tensor[2, 0]); Assert.AreEqual(0.0f, (double)tensor[2, 1]); diff --git a/test/TensorFlowNET.UnitTest/ManagedAPI/MathApiTest.cs b/test/TensorFlowNET.UnitTest/ManagedAPI/MathApiTest.cs index 78f57b20..42ac641b 100644 --- a/test/TensorFlowNET.UnitTest/ManagedAPI/MathApiTest.cs +++ b/test/TensorFlowNET.UnitTest/ManagedAPI/MathApiTest.cs @@ -43,7 +43,7 @@ namespace TensorFlowNET.UnitTest.ManagedAPI Assert.IsTrue(Enumerable.SequenceEqual(new[] { 3.9f, -8.6f }, x3.ToArray())); var x4 = tf.reduce_sum(b, 1, keepdims: true); - Assert.AreEqual((2, 1), x4.TensorShape); + Assert.AreEqual((2, 1), x4.shape); var x5 = tf.reduce_sum(b, (0, 1)); Assert.AreEqual(-4.7f, (float)x5); diff --git a/test/TensorFlowNET.UnitTest/ManagedAPI/TensorOperate.cs b/test/TensorFlowNET.UnitTest/ManagedAPI/TensorOperate.cs index 8f38f45c..43c6c429 100644 --- a/test/TensorFlowNET.UnitTest/ManagedAPI/TensorOperate.cs +++ b/test/TensorFlowNET.UnitTest/ManagedAPI/TensorOperate.cs @@ -73,7 +73,7 @@ namespace TensorFlowNET.UnitTest.ManagedAPI } })); #endregion - Assert.AreEqual((4, 2, 1, 2), actual_transposed_a.TensorShape); + Assert.AreEqual((4, 2, 1, 2), actual_transposed_a.shape); Assert.AreEqual(expected_transposed_a.numpy(), actual_transposed_a.numpy()); } diff --git a/test/TensorFlowNET.UnitTest/Utilities/FluentExtension.cs b/test/TensorFlowNET.UnitTest/Utilities/FluentExtension.cs index ba7b3829..7be72297 100644 --- a/test/TensorFlowNET.UnitTest/Utilities/FluentExtension.cs +++ b/test/TensorFlowNET.UnitTest/Utilities/FluentExtension.cs @@ -42,13 +42,13 @@ namespace TensorFlowNET.UnitTest public AndConstraint BeOfSize(int size, string because = null, params object[] becauseArgs) { - Subject.size.Should().Be((ulong)size, because, becauseArgs); + Subject.size.Should().Be(size, because, becauseArgs); return new AndConstraint(this); } public AndConstraint NotBeOfSize(int size, string because = null, params object[] becauseArgs) { - Subject.size.Should().NotBe((ulong)size, because, becauseArgs); + Subject.size.Should().NotBe(size, because, becauseArgs); return new AndConstraint(this); }