Browse Source

Merge branch 'rnn-dev;' of github.com:Wanglongzhi2001/TensorFlow.NET into rnn-dev

pull/1106/head
Wanglongzhi2001 2 years ago
parent
commit
da86e67a5b
57 changed files with 604 additions and 0 deletions
  1. +12
    -0
      src/TensorFlowNET.Core/Keras/ArgsDefinition/Rnn/RNNArgs.cs
  2. +8
    -0
      src/TensorFlowNET.Core/Keras/ArgsDefinition/Rnn/StackedRNNCellsArgs.cs
  3. +6
    -0
      src/TensorFlowNET.Core/Keras/Layers/ILayersApi.cs
  4. +11
    -0
      src/TensorFlowNET.Core/Operations/NnOps/RNNCell.cs
  5. +60
    -0
      src/TensorFlowNET.Keras/BackendImpl.cs
  6. +4
    -0
      src/TensorFlowNET.Keras/Engine/Functional.cs
  7. +4
    -0
      src/TensorFlowNET.Keras/Engine/Layer.Apply.cs
  8. +4
    -0
      src/TensorFlowNET.Keras/Engine/Layer.cs
  9. +4
    -0
      src/TensorFlowNET.Keras/Engine/Sequential.cs
  10. +4
    -0
      src/TensorFlowNET.Keras/Layers/Activation/ELU.cs
  11. +4
    -0
      src/TensorFlowNET.Keras/Layers/Activation/Exponential.cs
  12. +4
    -0
      src/TensorFlowNET.Keras/Layers/Activation/HardSigmoid.cs
  13. +4
    -0
      src/TensorFlowNET.Keras/Layers/Activation/LeakyReLu.cs
  14. +4
    -0
      src/TensorFlowNET.Keras/Layers/Activation/SELU.cs
  15. +5
    -0
      src/TensorFlowNET.Keras/Layers/Activation/Softmax.cs
  16. +4
    -0
      src/TensorFlowNET.Keras/Layers/Activation/Softplus.cs
  17. +4
    -0
      src/TensorFlowNET.Keras/Layers/Activation/Softsign.cs
  18. +4
    -0
      src/TensorFlowNET.Keras/Layers/Activation/Swish.cs
  19. +4
    -0
      src/TensorFlowNET.Keras/Layers/Activation/Tanh.cs
  20. +4
    -0
      src/TensorFlowNET.Keras/Layers/Attention/BaseDenseAttention.cs
  21. +4
    -0
      src/TensorFlowNET.Keras/Layers/Attention/MultiHeadAttention.cs
  22. +4
    -0
      src/TensorFlowNET.Keras/Layers/Convolution/Conv2DTranspose.cs
  23. +4
    -0
      src/TensorFlowNET.Keras/Layers/Convolution/Convolutional.cs
  24. +4
    -0
      src/TensorFlowNET.Keras/Layers/Core/Dense.cs
  25. +4
    -0
      src/TensorFlowNET.Keras/Layers/Core/EinsumDense.cs
  26. +4
    -0
      src/TensorFlowNET.Keras/Layers/Core/Embedding.cs
  27. +76
    -0
      src/TensorFlowNET.Keras/Layers/LayersApi.cs
  28. +4
    -0
      src/TensorFlowNET.Keras/Layers/Merging/Merge.cs
  29. +4
    -0
      src/TensorFlowNET.Keras/Layers/Normalization/BatchNormalization.cs
  30. +4
    -0
      src/TensorFlowNET.Keras/Layers/Normalization/LayerNormalization.cs
  31. +4
    -0
      src/TensorFlowNET.Keras/Layers/Normalization/Normalization.cs
  32. +4
    -0
      src/TensorFlowNET.Keras/Layers/Pooling/GlobalAveragePooling1D.cs
  33. +4
    -0
      src/TensorFlowNET.Keras/Layers/Pooling/GlobalAveragePooling2D.cs
  34. +4
    -0
      src/TensorFlowNET.Keras/Layers/Pooling/GlobalMaxPooling1D.cs
  35. +4
    -0
      src/TensorFlowNET.Keras/Layers/Pooling/GlobalMaxPooling2D.cs
  36. +4
    -0
      src/TensorFlowNET.Keras/Layers/Pooling/Pooling1D.cs
  37. +4
    -0
      src/TensorFlowNET.Keras/Layers/Pooling/Pooling2D.cs
  38. +4
    -0
      src/TensorFlowNET.Keras/Layers/Preprocessing/CategoryEncoding.cs
  39. +4
    -0
      src/TensorFlowNET.Keras/Layers/Preprocessing/Rescaling.cs
  40. +4
    -0
      src/TensorFlowNET.Keras/Layers/Preprocessing/Resizing.cs
  41. +4
    -0
      src/TensorFlowNET.Keras/Layers/Regularization/Dropout.cs
  42. +4
    -0
      src/TensorFlowNET.Keras/Layers/Reshaping/Cropping1D.cs
  43. +4
    -0
      src/TensorFlowNET.Keras/Layers/Reshaping/Cropping2D.cs
  44. +4
    -0
      src/TensorFlowNET.Keras/Layers/Reshaping/Cropping3D.cs
  45. +4
    -0
      src/TensorFlowNET.Keras/Layers/Reshaping/Flatten.cs
  46. +4
    -0
      src/TensorFlowNET.Keras/Layers/Reshaping/Permute.cs
  47. +4
    -0
      src/TensorFlowNET.Keras/Layers/Reshaping/Reshape.cs
  48. +4
    -0
      src/TensorFlowNET.Keras/Layers/Reshaping/UpSampling2D.cs
  49. +4
    -0
      src/TensorFlowNET.Keras/Layers/Reshaping/ZeroPadding2D.cs
  50. +43
    -0
      src/TensorFlowNET.Keras/Layers/Rnn/DropoutRNNCellMixin.cs
  51. +6
    -0
      src/TensorFlowNET.Keras/Layers/Rnn/LSTM.cs
  52. +142
    -0
      src/TensorFlowNET.Keras/Layers/Rnn/RNN.cs
  53. +38
    -0
      src/TensorFlowNET.Keras/Layers/Rnn/SimpleRNNCell.cs
  54. +18
    -0
      src/TensorFlowNET.Keras/Layers/Rnn/StackedRNNCells.cs
  55. +4
    -0
      src/TensorFlowNET.Keras/Layers/TensorFlowOpLayer.cs
  56. +4
    -0
      src/TensorflowNET.Hub/KerasLayer.cs
  57. +3
    -0
      test/TensorFlowNET.Keras.UnitTest/Layers/LayersTest.cs

+ 12
- 0
src/TensorFlowNET.Core/Keras/ArgsDefinition/Rnn/RNNArgs.cs View File

@@ -2,18 +2,23 @@
using OneOf;
using System.Collections.Generic;
<<<<<<< HEAD
<<<<<<< HEAD
using Tensorflow.Keras.Layers.Rnn;
=======
using Tensorflow.Keras.Layers;
using Tensorflow.Keras.ArgsDefinition.Rnn;
using Tensorflow.NumPy;
>>>>>>> master
=======
using Tensorflow.Keras.Layers.Rnn;
>>>>>>> 90a65d7d98b92f26574ac32392ed802a57d4d2c8

namespace Tensorflow.Keras.ArgsDefinition.Rnn
{
// TODO(Rinne): add regularizers.
public class RNNArgs : AutoSerializeLayerArgs
{
<<<<<<< HEAD
<<<<<<< HEAD
[JsonProperty("cell")]
// TODO: the cell should be serialized with `serialize_keras_object`.
@@ -31,6 +36,13 @@ namespace Tensorflow.Keras.ArgsDefinition.Rnn
// TODO: the cell should be serialized with `serialize_keras_object`.
public OneOf<IList<IRnnArgCell>, IRnnArgCell> Cell { get; set; }
>>>>>>> master
=======
[JsonProperty("cell")]
// TODO: the cell should be serialized with `serialize_keras_object`.
public IRnnCell Cell { get; set; } = null;
[JsonProperty("cells")]
public IList<IRnnCell> Cells { get; set; } = null;
>>>>>>> 90a65d7d98b92f26574ac32392ed802a57d4d2c8

[JsonProperty("return_sequences")]
public bool ReturnSequences { get; set; } = false;


+ 8
- 0
src/TensorFlowNET.Core/Keras/ArgsDefinition/Rnn/StackedRNNCellsArgs.cs View File

@@ -1,19 +1,27 @@
using System.Collections.Generic;
<<<<<<< HEAD
<<<<<<< HEAD
using Tensorflow.Keras.Layers.Rnn;
=======
using static Tensorflow.Keras.ArgsDefinition.Rnn.RNNArgs;
>>>>>>> master
=======
using Tensorflow.Keras.Layers.Rnn;
>>>>>>> 90a65d7d98b92f26574ac32392ed802a57d4d2c8

namespace Tensorflow.Keras.ArgsDefinition.Rnn
{
public class StackedRNNCellsArgs : LayerArgs
{
<<<<<<< HEAD
<<<<<<< HEAD
public IList<IRnnCell> Cells { get; set; }
=======
public IList<IRnnArgCell> Cells { get; set; }
>>>>>>> master
=======
public IList<IRnnCell> Cells { get; set; }
>>>>>>> 90a65d7d98b92f26574ac32392ed802a57d4d2c8
public Dictionary<string, object> Kwargs { get; set; } = null;
}
}

+ 6
- 0
src/TensorFlowNET.Core/Keras/Layers/ILayersApi.cs View File

@@ -227,6 +227,9 @@ namespace Tensorflow.Keras.Layers
bool return_state = false);

<<<<<<< HEAD
<<<<<<< HEAD
=======
>>>>>>> 90a65d7d98b92f26574ac32392ed802a57d4d2c8
public ILayer RNN(
IRnnCell cell,
bool return_sequences = false,
@@ -246,6 +249,7 @@ namespace Tensorflow.Keras.Layers
bool unroll = false,
bool time_major = false
);
<<<<<<< HEAD
=======
public ILayer SimpleRNNCell(
int units,
@@ -257,6 +261,8 @@ namespace Tensorflow.Keras.Layers
float dropout = 0f,
float recurrent_dropout = 0f);
>>>>>>> master
=======
>>>>>>> 90a65d7d98b92f26574ac32392ed802a57d4d2c8

public ILayer Subtract();
}


+ 11
- 0
src/TensorFlowNET.Core/Operations/NnOps/RNNCell.cs View File

@@ -53,12 +53,17 @@ namespace Tensorflow
/// matching structure of Tensors having shape `[batch_size].concatenate(s)`
/// for each `s` in `self.batch_size`.
/// </summary>
<<<<<<< HEAD
<<<<<<< HEAD
[Obsolete("This is an incompleted tf v1 api, pleas use keras RNNs instead.")]
public abstract class RnnCell : ILayer, IRnnCell
=======
public abstract class RnnCell : ILayer
>>>>>>> master
=======
[Obsolete("This is an incompleted tf v1 api, pleas use keras RNNs instead.")]
public abstract class RnnCell : ILayer, IRnnCell
>>>>>>> 90a65d7d98b92f26574ac32392ed802a57d4d2c8
{
/// <summary>
/// Attribute that indicates whether the cell is a TF RNN cell, due the slight
@@ -185,6 +190,9 @@ namespace Tensorflow
}

<<<<<<< HEAD
<<<<<<< HEAD
=======
>>>>>>> 90a65d7d98b92f26574ac32392ed802a57d4d2c8
public (Tensor, Tensors) Call(Tensors inputs, Tensors states, bool? training = null)
{
throw new NotImplementedException();
@@ -193,11 +201,14 @@ namespace Tensorflow
public GeneralizedTensorShape OutputSize => throw new NotImplementedException();
public bool IsTFRnnCell => throw new NotImplementedException();
public bool SupportOptionalArgs => throw new NotImplementedException();
<<<<<<< HEAD
=======
public Tensors Call(Tensors inputs, Tensor mask = null, bool? training = null, Tensors initial_state = null, Tensors constants = null)
{
throw new NotImplementedException();
}
>>>>>>> master
=======
>>>>>>> 90a65d7d98b92f26574ac32392ed802a57d4d2c8
}
}

+ 60
- 0
src/TensorFlowNET.Keras/BackendImpl.cs View File

@@ -25,11 +25,15 @@ using static Tensorflow.Binding;
using static Tensorflow.Graphs.SubGraphUtility;
using Tensorflow.Util;
<<<<<<< HEAD
<<<<<<< HEAD
using Tensorflow.Common.Types;
=======
using Tensorflow.Operations;
using OneOf;
>>>>>>> master
=======
using Tensorflow.Common.Types;
>>>>>>> 90a65d7d98b92f26574ac32392ed802a57d4d2c8

namespace Tensorflow.Keras
{
@@ -460,6 +464,9 @@ namespace Tensorflow.Keras
}

<<<<<<< HEAD
<<<<<<< HEAD
=======
>>>>>>> 90a65d7d98b92f26574ac32392ed802a57d4d2c8
public (Tensors, Tensors, Tensors) rnn(
Func<Tensors, Tensors, (Tensors, Tensors)> step_function, // args:inputs, states, return:output, new_states
Tensors inputs, // inputs is a tuple of tensors (one per input sequence)
@@ -475,6 +482,7 @@ namespace Tensorflow.Keras
{

Tensor swap_batch_timestep(Tensor input_t)
<<<<<<< HEAD
=======
public static (Tensors, Tensors) convert_inputs_if_ragged(OneOf<Tensor, RaggedTensor> inputs)
{
@@ -498,6 +506,8 @@ namespace Tensorflow.Keras

Tensors swap_batch_timestep(Tensors input_t)
>>>>>>> master
=======
>>>>>>> 90a65d7d98b92f26574ac32392ed802a57d4d2c8
{
var axes = Enumerable.Range(0, input_t.rank).ToArray();
axes[0] = 1;
@@ -508,6 +518,9 @@ namespace Tensorflow.Keras
if (!time_major)
{
<<<<<<< HEAD
<<<<<<< HEAD
=======
>>>>>>> 90a65d7d98b92f26574ac32392ed802a57d4d2c8
inputs = Nest.MapStructure(swap_batch_timestep, inputs).ToTensors();
}

@@ -516,6 +529,7 @@ namespace Tensorflow.Keras
var time_steps = first_flatted_input.shape[0];
var batch = first_flatted_input.shape[1];
var time_steps_t = (int)first_flatted_input.shape[0];
<<<<<<< HEAD
=======
inputs = nest.map_structure(swap_batch_timestep, inputs);
}
@@ -525,6 +539,8 @@ namespace Tensorflow.Keras
var batch = flatted_inptus[0].shape[1];
var time_step_t = tf.shape(flatted_inptus[0])[0];
>>>>>>> master
=======
>>>>>>> 90a65d7d98b92f26574ac32392ed802a57d4d2c8

foreach (var input_ in flatted_inptus)
{
@@ -550,6 +566,7 @@ namespace Tensorflow.Keras

}
<<<<<<< HEAD
<<<<<<< HEAD
=======

@@ -559,6 +576,9 @@ namespace Tensorflow.Keras
}

>>>>>>> master
=======
>>>>>>> 90a65d7d98b92f26574ac32392ed802a57d4d2c8
// tf.where needs its condition tensor to be the same shape as its two
// result tensors, but in our case the condition (mask) tensor is
// (nsamples, 1), and inputs are (nsamples, ndimensions) or even more.
@@ -568,20 +588,28 @@ namespace Tensorflow.Keras

Tensors _expand_mask(Tensors mask_t, Tensors input_t, int fixed_dim = 1)
{
<<<<<<< HEAD
<<<<<<< HEAD
if (!mask_t.IsSingle())
=======
if (nest.is_nested(mask_t))
>>>>>>> master
=======
if (!mask_t.IsSingle())
>>>>>>> 90a65d7d98b92f26574ac32392ed802a57d4d2c8
{
throw new ValueError($"mask_t is expected to be tensor, but got {mask_t}");
}

<<<<<<< HEAD
<<<<<<< HEAD
if (!input_t.IsSingle())
=======
if (nest.is_nested(input_t))
>>>>>>> master
=======
if (!input_t.IsSingle())
>>>>>>> 90a65d7d98b92f26574ac32392ed802a57d4d2c8
{
throw new ValueError($"input_t is expected to be tensor, but got {input_t}");
}
@@ -591,11 +619,15 @@ namespace Tensorflow.Keras
{
mask_t = tf.expand_dims(mask_t, -1);
}
<<<<<<< HEAD
<<<<<<< HEAD
var multiples = Enumerable.Repeat(1, fixed_dim).ToArray().concat(input_t.shape.as_int_list().Skip(fixed_dim).ToArray());
=======
var multiples = Enumerable.Repeat(1, fixed_dim).ToArray().concat(input_t.shape.as_int_list().ToList().GetRange(fixed_dim, input_t.rank));
>>>>>>> master
=======
var multiples = Enumerable.Repeat(1, fixed_dim).ToArray().concat(input_t.shape.as_int_list().Skip(fixed_dim).ToArray());
>>>>>>> 90a65d7d98b92f26574ac32392ed802a57d4d2c8
return tf.tile(mask_t, multiples);
}

@@ -631,6 +663,9 @@ namespace Tensorflow.Keras
// the item in tuple is list of the tensor with shape (batch, feature)

<<<<<<< HEAD
<<<<<<< HEAD
=======
>>>>>>> 90a65d7d98b92f26574ac32392ed802a57d4d2c8
Tensors _process_single_input_t(Tensor input_t)
{
var unstaked_input_t = array_ops.unstack(input_t); // unstack for time_step dim
@@ -639,6 +674,7 @@ namespace Tensorflow.Keras
unstaked_input_t = unstaked_input_t.Reverse().ToArray();
}
return unstaked_input_t;
<<<<<<< HEAD
=======


@@ -652,10 +688,13 @@ namespace Tensorflow.Keras
}
return input_t;
>>>>>>> master
=======
>>>>>>> 90a65d7d98b92f26574ac32392ed802a57d4d2c8
}

// TODO(Wanglongzhi2001)
Tensors processed_input;
<<<<<<< HEAD
<<<<<<< HEAD
if (!inputs.IsSingle())
{
@@ -665,6 +704,11 @@ namespace Tensorflow.Keras
{
processed_input = nest.map_structure(_process_single_input_t, inputs);
>>>>>>> master
=======
if (!inputs.IsSingle())
{
processed_input = inputs.MapStructure(_process_single_input_t).ReduceTo<Tensors, Tensor>().ToTensors();
>>>>>>> 90a65d7d98b92f26574ac32392ed802a57d4d2c8
}
else
{
@@ -679,6 +723,9 @@ namespace Tensorflow.Keras
inp.Add(t_[time]);
}
<<<<<<< HEAD
<<<<<<< HEAD
=======
>>>>>>> 90a65d7d98b92f26574ac32392ed802a57d4d2c8
return Nest.PackSequenceAs(inputs, inp);
}

@@ -1006,6 +1053,7 @@ namespace Tensorflow.Keras
last_output = Nest.PackSequenceAs(output_time_zero, last_output).ToTensors();

}
<<<<<<< HEAD
=======
return nest.pack_sequence_as(inputs, inp);
}
@@ -1336,6 +1384,8 @@ namespace Tensorflow.Keras

//}
>>>>>>> master
=======
>>>>>>> 90a65d7d98b92f26574ac32392ed802a57d4d2c8

Func<Tensor, Tensor> set_shape;
set_shape = (output_) =>
@@ -1352,16 +1402,23 @@ namespace Tensorflow.Keras
shape[0] = 1;
}
shape[1] = (int)batch;
<<<<<<< HEAD
<<<<<<< HEAD
output_.shape = shape;
=======
output_.set_shape(new Tensor(shape));
>>>>>>> master
=======
output_.shape = shape;
>>>>>>> 90a65d7d98b92f26574ac32392ed802a57d4d2c8
}
return output_;
};

<<<<<<< HEAD
<<<<<<< HEAD
=======
>>>>>>> 90a65d7d98b92f26574ac32392ed802a57d4d2c8
outputs = Nest.MapStructure(set_shape, outputs).ToTensors();
if (!time_major)
{
@@ -1389,6 +1446,7 @@ namespace Tensorflow.Keras
}

throw new NotImplementedException("Not implemented currently, please submit an issue to https://github.com/SciSharp/TensorFlow.NET/issues");
<<<<<<< HEAD
=======
var Outputs = (Tensors)nest.map_structure(set_shape, outputs);
if (!time_major)
@@ -1420,6 +1478,8 @@ namespace Tensorflow.Keras
//}
throw new NotImplementedException();
>>>>>>> master
=======
>>>>>>> 90a65d7d98b92f26574ac32392ed802a57d4d2c8
}
}
}

+ 4
- 0
src/TensorFlowNET.Keras/Engine/Functional.cs View File

@@ -326,11 +326,15 @@ namespace Tensorflow.Keras.Engine
nodes_in_decreasing_depth.append(node);
}

<<<<<<< HEAD
<<<<<<< HEAD
protected override Tensors Call(Tensors inputs, Tensors state = null, bool? training = null, IOptionalArgs? optional_args = null)
=======
protected override Tensors Call(Tensors inputs, Tensor mask = null, bool? training = null, Tensors initial_state = null, Tensors constants = null)
>>>>>>> master
=======
protected override Tensors Call(Tensors inputs, Tensors state = null, bool? training = null, IOptionalArgs? optional_args = null)
>>>>>>> 90a65d7d98b92f26574ac32392ed802a57d4d2c8
{
var tensor_dict = new Dictionary<long, Queue<Tensor>>();
// map input values


+ 4
- 0
src/TensorFlowNET.Keras/Engine/Layer.Apply.cs View File

@@ -31,11 +31,15 @@ namespace Tensorflow.Keras.Engine
if (!built)
MaybeBuild(inputs);

<<<<<<< HEAD
<<<<<<< HEAD
var outputs = Call(inputs, state: states, training: training);
=======
var outputs = Call(inputs, initial_state: state, training: training);
>>>>>>> master
=======
var outputs = Call(inputs, state: states, training: training);
>>>>>>> 90a65d7d98b92f26574ac32392ed802a57d4d2c8

// memory leak
// _set_connectivity_metadata_(inputs, outputs);


+ 4
- 0
src/TensorFlowNET.Keras/Engine/Layer.cs View File

@@ -336,11 +336,15 @@ namespace Tensorflow.Keras.Engine
/// <param name="state"></param>
/// <param name="training"></param>
/// <returns></returns>
<<<<<<< HEAD
<<<<<<< HEAD
protected virtual Tensors Call(Tensors inputs, Tensors state = null, bool? training = null, IOptionalArgs? optional_args = null)
=======
protected virtual Tensors Call(Tensors inputs, Tensor mask = null, bool? training = null, Tensors initial_state = null, Tensors constants = null)
>>>>>>> master
=======
protected virtual Tensors Call(Tensors inputs, Tensors state = null, bool? training = null, IOptionalArgs? optional_args = null)
>>>>>>> 90a65d7d98b92f26574ac32392ed802a57d4d2c8
{
if (ReplacedCall is not null)
{


+ 4
- 0
src/TensorFlowNET.Keras/Engine/Sequential.cs View File

@@ -144,11 +144,15 @@ namespace Tensorflow.Keras.Engine
}
}

<<<<<<< HEAD
<<<<<<< HEAD
protected override Tensors Call(Tensors inputs, Tensors state = null, bool? training = null, IOptionalArgs? optional_args = null)
=======
protected override Tensors Call(Tensors inputs, Tensor mask = null, bool? training = null, Tensors initial_state = null, Tensors constants = null)
>>>>>>> master
=======
protected override Tensors Call(Tensors inputs, Tensors state = null, bool? training = null, IOptionalArgs? optional_args = null)
>>>>>>> 90a65d7d98b92f26574ac32392ed802a57d4d2c8
{
if (!_has_explicit_input_shape)
{


+ 4
- 0
src/TensorFlowNET.Keras/Layers/Activation/ELU.cs View File

@@ -30,11 +30,15 @@ namespace Tensorflow.Keras.Layers {
base.build(input_shape);
}

<<<<<<< HEAD
<<<<<<< HEAD
protected override Tensors Call(Tensors inputs, Tensors state = null, bool? training = null, IOptionalArgs? optional_args = null)
=======
protected override Tensors Call(Tensors inputs, Tensor mask = null, bool? training = null, Tensors initial_state = null, Tensors constants = null)
>>>>>>> master
=======
protected override Tensors Call(Tensors inputs, Tensors state = null, bool? training = null, IOptionalArgs? optional_args = null)
>>>>>>> 90a65d7d98b92f26574ac32392ed802a57d4d2c8
{
Tensor output = inputs;
output = tf.where(output > 0f, output,


+ 4
- 0
src/TensorFlowNET.Keras/Layers/Activation/Exponential.cs View File

@@ -17,11 +17,15 @@ namespace Tensorflow.Keras.Layers {
{
base.build(input_shape);
}
<<<<<<< HEAD
<<<<<<< HEAD
protected override Tensors Call(Tensors inputs, Tensors state = null, bool? training = null, IOptionalArgs? optional_args = null)
=======
protected override Tensors Call(Tensors inputs, Tensor mask = null, bool? training = null, Tensors initial_state = null, Tensors constants = null)
>>>>>>> master
=======
protected override Tensors Call(Tensors inputs, Tensors state = null, bool? training = null, IOptionalArgs? optional_args = null)
>>>>>>> 90a65d7d98b92f26574ac32392ed802a57d4d2c8
{
Tensor output = inputs;
return tf.exp(output);


+ 4
- 0
src/TensorFlowNET.Keras/Layers/Activation/HardSigmoid.cs View File

@@ -11,12 +11,16 @@ namespace Tensorflow.Keras.Layers {
public HardSigmoid ( LayerArgs args ) : base(args) {
// hard sigmoid has no arguments
}
<<<<<<< HEAD
<<<<<<< HEAD
protected override Tensors Call ( Tensors inputs, Tensors state = null, bool? training = null, IOptionalArgs? optional_args = null ) {
=======
protected override Tensors Call(Tensors inputs, Tensor mask = null, bool? training = null, Tensors initial_state = null, Tensors constants = null)
{
>>>>>>> master
=======
protected override Tensors Call ( Tensors inputs, Tensors state = null, bool? training = null, IOptionalArgs? optional_args = null ) {
>>>>>>> 90a65d7d98b92f26574ac32392ed802a57d4d2c8
Tensor x = inputs;
return tf.clip_by_value(
tf.add(tf.multiply(x, 0.2f), 0.5f), 0f, 1f);


+ 4
- 0
src/TensorFlowNET.Keras/Layers/Activation/LeakyReLu.cs View File

@@ -20,11 +20,15 @@ namespace Tensorflow.Keras.Layers
this.args = args;
}

<<<<<<< HEAD
<<<<<<< HEAD
protected override Tensors Call(Tensors inputs, Tensors state = null, bool? training = null, IOptionalArgs? optional_args = null)
=======
protected override Tensors Call(Tensors inputs, Tensor mask = null, bool? training = null, Tensors initial_state = null, Tensors constants = null)
>>>>>>> master
=======
protected override Tensors Call(Tensors inputs, Tensors state = null, bool? training = null, IOptionalArgs? optional_args = null)
>>>>>>> 90a65d7d98b92f26574ac32392ed802a57d4d2c8
{
return tf.nn.leaky_relu(inputs, alpha: alpha);
}


+ 4
- 0
src/TensorFlowNET.Keras/Layers/Activation/SELU.cs View File

@@ -23,12 +23,16 @@ namespace Tensorflow.Keras.Layers {
}
base.build(input_shape);
}
<<<<<<< HEAD
<<<<<<< HEAD
protected override Tensors Call ( Tensors inputs, Tensors state = null, bool? training = null, IOptionalArgs? optional_args = null) {
=======
protected override Tensors Call(Tensors inputs, Tensor mask = null, bool? training = null, Tensors initial_state = null, Tensors constants = null)
{
>>>>>>> master
=======
protected override Tensors Call ( Tensors inputs, Tensors state = null, bool? training = null, IOptionalArgs? optional_args = null) {
>>>>>>> 90a65d7d98b92f26574ac32392ed802a57d4d2c8
Tensor output = inputs;
return tf.where(output > 0f,
tf.multiply(scale, output),


+ 5
- 0
src/TensorFlowNET.Keras/Layers/Activation/Softmax.cs View File

@@ -12,6 +12,7 @@ namespace Tensorflow.Keras.Layers {
public Softmax ( SoftmaxArgs args ) : base(args) {
axis = args.axis;
}
<<<<<<< HEAD
<<<<<<< HEAD
protected override Tensors Call ( Tensors inputs, Tensors state = null, bool? training = null, IOptionalArgs? optional_args = null) {
Tensor x = inputs.Length == 2 ? inputs[0] + ((1.0 - tf.cast(inputs[1], inputs.dtype)) * 1e-9)
@@ -20,6 +21,10 @@ namespace Tensorflow.Keras.Layers {
{
Tensor x = inputs.Length == 2 ? inputs + ((1.0 - tf.cast(inputs[1], inputs.dtype)) * 1e-9)
>>>>>>> master
=======
protected override Tensors Call ( Tensors inputs, Tensors state = null, bool? training = null, IOptionalArgs? optional_args = null) {
Tensor x = inputs.Length == 2 ? inputs[0] + ((1.0 - tf.cast(inputs[1], inputs.dtype)) * 1e-9)
>>>>>>> 90a65d7d98b92f26574ac32392ed802a57d4d2c8
: inputs;
Tensor e = tf.exp(tf.sub(x, tf.reduce_max(x, axis: this.axis, keepdims: true)));
Tensor s = tf.reduce_sum(e, axis: this.axis, keepdims: true);


+ 4
- 0
src/TensorFlowNET.Keras/Layers/Activation/Softplus.cs View File

@@ -11,12 +11,16 @@ namespace Tensorflow.Keras.Layers {
public Softplus ( LayerArgs args ) : base(args) {
// Softplus has no arguments
}
<<<<<<< HEAD
<<<<<<< HEAD
protected override Tensors Call ( Tensors inputs, Tensors state = null, bool? training = null, IOptionalArgs? optional_args = null) {
=======
protected override Tensors Call(Tensors inputs, Tensor mask = null, bool? training = null, Tensors initial_state = null, Tensors constants = null)
{
>>>>>>> master
=======
protected override Tensors Call ( Tensors inputs, Tensors state = null, bool? training = null, IOptionalArgs? optional_args = null) {
>>>>>>> 90a65d7d98b92f26574ac32392ed802a57d4d2c8
Tensor x = inputs;
return tf.log(
tf.add(tf.exp(x), 1f));


+ 4
- 0
src/TensorFlowNET.Keras/Layers/Activation/Softsign.cs View File

@@ -11,12 +11,16 @@ namespace Tensorflow.Keras.Layers {
public Softsign ( LayerArgs args ) : base(args) {
// Softsign has no arguments
}
<<<<<<< HEAD
<<<<<<< HEAD
protected override Tensors Call ( Tensors inputs, Tensors state = null, bool? training = null, IOptionalArgs? optional_args = null) {
=======
protected override Tensors Call(Tensors inputs, Tensor mask = null, bool? training = null, Tensors initial_state = null, Tensors constants = null)
{
>>>>>>> master
=======
protected override Tensors Call ( Tensors inputs, Tensors state = null, bool? training = null, IOptionalArgs? optional_args = null) {
>>>>>>> 90a65d7d98b92f26574ac32392ed802a57d4d2c8
Tensor x = inputs;
// x / (abs(x) + 1)
return tf.div(x, tf.add(1f, tf.abs(x)));


+ 4
- 0
src/TensorFlowNET.Keras/Layers/Activation/Swish.cs View File

@@ -11,12 +11,16 @@ namespace Tensorflow.Keras.Layers {
public Swish ( LayerArgs args ) : base(args) {
// Swish has no arguments
}
<<<<<<< HEAD
<<<<<<< HEAD
protected override Tensors Call ( Tensors inputs, Tensors state = null, bool? training = null, IOptionalArgs? optional_args = null) {
=======
protected override Tensors Call(Tensors inputs, Tensor mask = null, bool? training = null, Tensors initial_state = null, Tensors constants = null)
{
>>>>>>> master
=======
protected override Tensors Call ( Tensors inputs, Tensors state = null, bool? training = null, IOptionalArgs? optional_args = null) {
>>>>>>> 90a65d7d98b92f26574ac32392ed802a57d4d2c8
Tensor x = inputs;

// x / (1 + exp(-x))


+ 4
- 0
src/TensorFlowNET.Keras/Layers/Activation/Tanh.cs View File

@@ -14,11 +14,15 @@ namespace Tensorflow.Keras.Layers
{
// Tanh has no arguments
}
<<<<<<< HEAD
<<<<<<< HEAD
protected override Tensors Call(Tensors inputs, Tensors state = null, bool? training = null, IOptionalArgs? optional_args = null)
=======
protected override Tensors Call(Tensors inputs, Tensor mask = null, bool? training = null, Tensors initial_state = null, Tensors constants = null)
>>>>>>> master
=======
protected override Tensors Call(Tensors inputs, Tensors state = null, bool? training = null, IOptionalArgs? optional_args = null)
>>>>>>> 90a65d7d98b92f26574ac32392ed802a57d4d2c8
{
Tensor x = inputs;



+ 4
- 0
src/TensorFlowNET.Keras/Layers/Attention/BaseDenseAttention.cs View File

@@ -115,11 +115,15 @@ namespace Tensorflow.Keras.Layers
return (tf.linalg.einsum("bij,bjk->bik", (weights, value)), weights);
}

<<<<<<< HEAD
<<<<<<< HEAD
protected override Tensors Call(Tensors inputs, Tensors state = null, bool? training = null, IOptionalArgs? optional_args = null)
=======
protected override Tensors Call(Tensors inputs, Tensor mask = null, bool? training = null, Tensors initial_state = null, Tensors constants = null)
>>>>>>> master
=======
protected override Tensors Call(Tensors inputs, Tensors state = null, bool? training = null, IOptionalArgs? optional_args = null)
>>>>>>> 90a65d7d98b92f26574ac32392ed802a57d4d2c8
{
Tensors _inp;
Tensors _mask = null;


+ 4
- 0
src/TensorFlowNET.Keras/Layers/Attention/MultiHeadAttention.cs View File

@@ -253,11 +253,15 @@ namespace Tensorflow.Keras.Layers
return (attention_output, attention_scores);
}

<<<<<<< HEAD
<<<<<<< HEAD
protected override Tensors Call(Tensors inputs, Tensors state = null, bool? training = null, IOptionalArgs? optional_args = null)
=======
protected override Tensors Call(Tensors inputs, Tensor mask = null, bool? training = null, Tensors initial_state = null, Tensors constants = null)
>>>>>>> master
=======
protected override Tensors Call(Tensors inputs, Tensors state = null, bool? training = null, IOptionalArgs? optional_args = null)
>>>>>>> 90a65d7d98b92f26574ac32392ed802a57d4d2c8
{
Tensors _inp;
Tensor _mask = null;


+ 4
- 0
src/TensorFlowNET.Keras/Layers/Convolution/Conv2DTranspose.cs View File

@@ -84,11 +84,15 @@ namespace Tensorflow.Keras.Layers
_buildInputShape = input_shape;
}

<<<<<<< HEAD
<<<<<<< HEAD
protected override Tensors Call(Tensors inputs, Tensors state = null, bool? training = null, IOptionalArgs? optional_args = null)
=======
protected override Tensors Call(Tensors inputs, Tensor mask = null, bool? training = null, Tensors initial_state = null, Tensors constants = null)
>>>>>>> master
=======
protected override Tensors Call(Tensors inputs, Tensors state = null, bool? training = null, IOptionalArgs? optional_args = null)
>>>>>>> 90a65d7d98b92f26574ac32392ed802a57d4d2c8
{
var inputs_shape = array_ops.shape(inputs);
var batch_size = inputs_shape[0];


+ 4
- 0
src/TensorFlowNET.Keras/Layers/Convolution/Convolutional.cs View File

@@ -104,11 +104,15 @@ namespace Tensorflow.Keras.Layers
_buildInputShape = input_shape;
}

<<<<<<< HEAD
<<<<<<< HEAD
protected override Tensors Call(Tensors inputs, Tensors state = null, bool? training = false, IOptionalArgs? optional_args = null)
=======
protected override Tensors Call(Tensors inputs, Tensor mask = null, bool? training = null, Tensors initial_state = null, Tensors constants = null)
>>>>>>> master
=======
protected override Tensors Call(Tensors inputs, Tensors state = null, bool? training = false, IOptionalArgs? optional_args = null)
>>>>>>> 90a65d7d98b92f26574ac32392ed802a57d4d2c8
{
var outputs = _convolution_op.Apply(inputs, kernel.AsTensor());
if (use_bias)


+ 4
- 0
src/TensorFlowNET.Keras/Layers/Core/Dense.cs View File

@@ -70,11 +70,15 @@ namespace Tensorflow.Keras.Layers
built = true;
}

<<<<<<< HEAD
<<<<<<< HEAD
protected override Tensors Call(Tensors inputs, Tensors state = null, bool? training = null, IOptionalArgs? optional_args = null)
=======
protected override Tensors Call(Tensors inputs, Tensor mask = null, bool? training = null, Tensors initial_state = null, Tensors constants = null)
>>>>>>> master
=======
protected override Tensors Call(Tensors inputs, Tensors state = null, bool? training = null, IOptionalArgs? optional_args = null)
>>>>>>> 90a65d7d98b92f26574ac32392ed802a57d4d2c8
{
Tensor outputs = null;
var rank = inputs.rank;


+ 4
- 0
src/TensorFlowNET.Keras/Layers/Core/EinsumDense.cs View File

@@ -190,11 +190,15 @@ namespace Tensorflow.Keras.Layers
// return new dict(base_config.items().ToList() + config.items().ToList());
//}

<<<<<<< HEAD
<<<<<<< HEAD
protected override Tensors Call(Tensors inputs, Tensors state = null, bool? training = null, IOptionalArgs? optional_args = null)
=======
protected override Tensors Call(Tensors inputs, Tensor mask = null, bool? training = null, Tensors initial_state = null, Tensors constants = null)
>>>>>>> master
=======
protected override Tensors Call(Tensors inputs, Tensors state = null, bool? training = null, IOptionalArgs? optional_args = null)
>>>>>>> 90a65d7d98b92f26574ac32392ed802a57d4d2c8
{
var ret = tf.linalg.einsum(this.equation, (inputs, this.kernel.AsTensor()));
if (this.bias != null)


+ 4
- 0
src/TensorFlowNET.Keras/Layers/Core/Embedding.cs View File

@@ -67,11 +67,15 @@ namespace Tensorflow.Keras.Layers
_buildInputShape = input_shape;
}

<<<<<<< HEAD
<<<<<<< HEAD
protected override Tensors Call(Tensors inputs, Tensors state = null, bool? training = null, IOptionalArgs? optional_args = null)
=======
protected override Tensors Call(Tensors inputs, Tensor mask = null, bool? training = null, Tensors initial_state = null, Tensors constants = null)
>>>>>>> master
=======
protected override Tensors Call(Tensors inputs, Tensors state = null, bool? training = null, IOptionalArgs? optional_args = null)
>>>>>>> 90a65d7d98b92f26574ac32392ed802a57d4d2c8
{
var dtype = inputs.dtype;
if (dtype != tf.int32 && dtype != tf.int64)


+ 76
- 0
src/TensorFlowNET.Keras/Layers/LayersApi.cs View File

@@ -809,6 +809,82 @@ namespace Tensorflow.Keras.Layers
});


public IRnnCell LSTMCell(int uints,
string activation = "tanh",
string recurrent_activation = "sigmoid",
bool use_bias = true,
string kernel_initializer = "glorot_uniform",
string recurrent_initializer = "orthogonal", // TODO(Wanglongzhi2001),glorot_uniform has not been developed.
string bias_initializer = "zeros",
bool unit_forget_bias = true,
float dropout = 0f,
float recurrent_dropout = 0f,
int implementation = 2)
=> new LSTMCell(new LSTMCellArgs
{
Units = uints,
Activation = keras.activations.GetActivationFromName(activation),
RecurrentActivation = keras.activations.GetActivationFromName(recurrent_activation),
UseBias = use_bias,
KernelInitializer = GetInitializerByName(kernel_initializer),
RecurrentInitializer = GetInitializerByName(recurrent_initializer),
BiasInitializer = GetInitializerByName(bias_initializer),
UnitForgetBias = unit_forget_bias,
Dropout = dropout,
RecurrentDropout = recurrent_dropout,
Implementation = implementation
});

/// <summary>
///
/// </summary>
/// <param name="cell"></param>
/// <param name="return_sequences"></param>
/// <param name="return_state"></param>
/// <param name="go_backwards"></param>
/// <param name="stateful"></param>
/// <param name="unroll"></param>
/// <param name="time_major"></param>
/// <returns></returns>
public ILayer RNN(
IRnnCell cell,
bool return_sequences = false,
bool return_state = false,
bool go_backwards = false,
bool stateful = false,
bool unroll = false,
bool time_major = false)
=> new RNN(new RNNArgs
{
Cell = cell,
ReturnSequences = return_sequences,
ReturnState = return_state,
GoBackwards = go_backwards,
Stateful = stateful,
Unroll = unroll,
TimeMajor = time_major
});

public ILayer RNN(
IEnumerable<IRnnCell> cell,
bool return_sequences = false,
bool return_state = false,
bool go_backwards = false,
bool stateful = false,
bool unroll = false,
bool time_major = false)
=> new RNN(new RNNArgs
{
Cells = cell.ToList(),
ReturnSequences = return_sequences,
ReturnState = return_state,
GoBackwards = go_backwards,
Stateful = stateful,
Unroll = unroll,
TimeMajor = time_major
});


public IRnnCell LSTMCell(int uints,
string activation = "tanh",
string recurrent_activation = "sigmoid",


+ 4
- 0
src/TensorFlowNET.Keras/Layers/Merging/Merge.cs View File

@@ -22,11 +22,15 @@ namespace Tensorflow.Keras.Layers
_buildInputShape = input_shape;
}

<<<<<<< HEAD
<<<<<<< HEAD
protected override Tensors Call(Tensors inputs, Tensors state = null, bool? training = null, IOptionalArgs? optional_args = null)
=======
protected override Tensors Call(Tensors inputs, Tensor mask = null, bool? training = null, Tensors initial_state = null, Tensors constants = null)
>>>>>>> master
=======
protected override Tensors Call(Tensors inputs, Tensors state = null, bool? training = null, IOptionalArgs? optional_args = null)
>>>>>>> 90a65d7d98b92f26574ac32392ed802a57d4d2c8
{
return _merge_function(inputs);
}


+ 4
- 0
src/TensorFlowNET.Keras/Layers/Normalization/BatchNormalization.cs View File

@@ -147,11 +147,15 @@ namespace Tensorflow.Keras.Layers
return false;
}

<<<<<<< HEAD
<<<<<<< HEAD
protected override Tensors Call(Tensors inputs, Tensors state = null, bool? training = null, IOptionalArgs? optional_args = null)
=======
protected override Tensors Call(Tensors inputs, Tensor mask = null, bool? training = null, Tensors initial_state = null, Tensors constants = null)
>>>>>>> master
=======
protected override Tensors Call(Tensors inputs, Tensors state = null, bool? training = null, IOptionalArgs? optional_args = null)
>>>>>>> 90a65d7d98b92f26574ac32392ed802a57d4d2c8
{
Tensor outputs = null;
var training_tensor = training == null


+ 4
- 0
src/TensorFlowNET.Keras/Layers/Normalization/LayerNormalization.cs View File

@@ -102,11 +102,15 @@ namespace Tensorflow.Keras.Layers
return input_shape;
}

<<<<<<< HEAD
<<<<<<< HEAD
protected override Tensors Call(Tensors inputs, Tensors state = null, bool? training = null, IOptionalArgs? optional_args = null)
=======
protected override Tensors Call(Tensors inputs, Tensor mask = null, bool? training = null, Tensors initial_state = null, Tensors constants = null)
>>>>>>> master
=======
protected override Tensors Call(Tensors inputs, Tensors state = null, bool? training = null, IOptionalArgs? optional_args = null)
>>>>>>> 90a65d7d98b92f26574ac32392ed802a57d4d2c8
{
Tensor outputs = null;
var inputs_dtype = inputs.dtype.as_base_dtype();


+ 4
- 0
src/TensorFlowNET.Keras/Layers/Normalization/Normalization.cs View File

@@ -158,11 +158,15 @@ namespace Tensorflow.Keras.Layers
base.adapt(data, batch_size: batch_size, steps: steps);
}

<<<<<<< HEAD
<<<<<<< HEAD
protected override Tensors Call(Tensors inputs, Tensors state = null, bool? training = null, IOptionalArgs? optional_args = null)
=======
protected override Tensors Call(Tensors inputs, Tensor mask = null, bool? training = null, Tensors initial_state = null, Tensors constants = null)
>>>>>>> master
=======
protected override Tensors Call(Tensors inputs, Tensors state = null, bool? training = null, IOptionalArgs? optional_args = null)
>>>>>>> 90a65d7d98b92f26574ac32392ed802a57d4d2c8
{
if (_args.Invert)
{


+ 4
- 0
src/TensorFlowNET.Keras/Layers/Pooling/GlobalAveragePooling1D.cs View File

@@ -13,11 +13,15 @@ namespace Tensorflow.Keras.Layers
{
}

<<<<<<< HEAD
<<<<<<< HEAD
protected override Tensors Call(Tensors inputs, Tensors state = null, bool? training = null, IOptionalArgs? optional_args = null)
=======
protected override Tensors Call(Tensors inputs, Tensor mask = null, bool? training = null, Tensors initial_state = null, Tensors constants = null)
>>>>>>> master
=======
protected override Tensors Call(Tensors inputs, Tensors state = null, bool? training = null, IOptionalArgs? optional_args = null)
>>>>>>> 90a65d7d98b92f26574ac32392ed802a57d4d2c8
{
if (data_format == "channels_last")
return math_ops.reduce_mean(inputs, 1, false);


+ 4
- 0
src/TensorFlowNET.Keras/Layers/Pooling/GlobalAveragePooling2D.cs View File

@@ -13,11 +13,15 @@ namespace Tensorflow.Keras.Layers
{
}

<<<<<<< HEAD
<<<<<<< HEAD
protected override Tensors Call(Tensors inputs, Tensors state = null, bool? training = null, IOptionalArgs? optional_args = null)
=======
protected override Tensors Call(Tensors inputs, Tensor mask = null, bool? training = null, Tensors initial_state = null, Tensors constants = null)
>>>>>>> master
=======
protected override Tensors Call(Tensors inputs, Tensors state = null, bool? training = null, IOptionalArgs? optional_args = null)
>>>>>>> 90a65d7d98b92f26574ac32392ed802a57d4d2c8
{
if (data_format == "channels_last")
return math_ops.reduce_mean(inputs, (1, 2), false);


+ 4
- 0
src/TensorFlowNET.Keras/Layers/Pooling/GlobalMaxPooling1D.cs View File

@@ -13,11 +13,15 @@ namespace Tensorflow.Keras.Layers
{
}

<<<<<<< HEAD
<<<<<<< HEAD
protected override Tensors Call(Tensors inputs, Tensors state = null, bool? training = null, IOptionalArgs? optional_args = null)
=======
protected override Tensors Call(Tensors inputs, Tensor mask = null, bool? training = null, Tensors initial_state = null, Tensors constants = null)
>>>>>>> master
=======
protected override Tensors Call(Tensors inputs, Tensors state = null, bool? training = null, IOptionalArgs? optional_args = null)
>>>>>>> 90a65d7d98b92f26574ac32392ed802a57d4d2c8
{
if (data_format == "channels_last")
return math_ops.reduce_max(inputs, 1, false);


+ 4
- 0
src/TensorFlowNET.Keras/Layers/Pooling/GlobalMaxPooling2D.cs View File

@@ -13,11 +13,15 @@ namespace Tensorflow.Keras.Layers
{
}

<<<<<<< HEAD
<<<<<<< HEAD
protected override Tensors Call(Tensors inputs, Tensors state = null, bool? training = null, IOptionalArgs? optional_args = null)
=======
protected override Tensors Call(Tensors inputs, Tensor mask = null, bool? training = null, Tensors initial_state = null, Tensors constants = null)
>>>>>>> master
=======
protected override Tensors Call(Tensors inputs, Tensors state = null, bool? training = null, IOptionalArgs? optional_args = null)
>>>>>>> 90a65d7d98b92f26574ac32392ed802a57d4d2c8
{
if (data_format == "channels_last")
return math_ops.reduce_max(inputs, (1, 2), false);


+ 4
- 0
src/TensorFlowNET.Keras/Layers/Pooling/Pooling1D.cs View File

@@ -37,11 +37,15 @@ namespace Tensorflow.Keras.Layers
input_spec = new InputSpec(ndim: 3);
}

<<<<<<< HEAD
<<<<<<< HEAD
protected override Tensors Call(Tensors inputs, Tensors state = null, bool? training = null, IOptionalArgs? optional_args = null)
=======
protected override Tensors Call(Tensors inputs, Tensor mask = null, bool? training = null, Tensors initial_state = null, Tensors constants = null)
>>>>>>> master
=======
protected override Tensors Call(Tensors inputs, Tensors state = null, bool? training = null, IOptionalArgs? optional_args = null)
>>>>>>> 90a65d7d98b92f26574ac32392ed802a57d4d2c8
{
int pad_axis = args.DataFormat == "channels_first" ? 2 : 3;
inputs = tf.expand_dims(inputs, pad_axis);


+ 4
- 0
src/TensorFlowNET.Keras/Layers/Pooling/Pooling2D.cs View File

@@ -37,11 +37,15 @@ namespace Tensorflow.Keras.Layers
input_spec = new InputSpec(ndim: 4);
}

<<<<<<< HEAD
<<<<<<< HEAD
protected override Tensors Call(Tensors inputs, Tensors state = null, bool? training = null, IOptionalArgs? optional_args = null)
=======
protected override Tensors Call(Tensors inputs, Tensor mask = null, bool? training = null, Tensors initial_state = null, Tensors constants = null)
>>>>>>> master
=======
protected override Tensors Call(Tensors inputs, Tensors state = null, bool? training = null, IOptionalArgs? optional_args = null)
>>>>>>> 90a65d7d98b92f26574ac32392ed802a57d4d2c8
{
int[] pool_shape;
int[] strides;


+ 4
- 0
src/TensorFlowNET.Keras/Layers/Preprocessing/CategoryEncoding.cs View File

@@ -15,11 +15,15 @@ namespace Tensorflow.Keras.Layers
this.args = args;
}

<<<<<<< HEAD
<<<<<<< HEAD
protected override Tensors Call(Tensors inputs, Tensors state = null, bool? training = null, IOptionalArgs? optional_args = null)
=======
protected override Tensors Call(Tensors inputs, Tensor mask = null, bool? training = null, Tensors initial_state = null, Tensors constants = null)
>>>>>>> master
=======
protected override Tensors Call(Tensors inputs, Tensors state = null, bool? training = null, IOptionalArgs? optional_args = null)
>>>>>>> 90a65d7d98b92f26574ac32392ed802a57d4d2c8
{
var depth = args.NumTokens;
var max_value = tf.reduce_max(inputs);


+ 4
- 0
src/TensorFlowNET.Keras/Layers/Preprocessing/Rescaling.cs View File

@@ -18,11 +18,15 @@ namespace Tensorflow.Keras.Layers
this.args = args;
}

<<<<<<< HEAD
<<<<<<< HEAD
protected override Tensors Call(Tensors inputs, Tensors state = null, bool? training = null, IOptionalArgs? optional_args = null)
=======
protected override Tensors Call(Tensors inputs, Tensor mask = null, bool? training = null, Tensors initial_state = null, Tensors constants = null)
>>>>>>> master
=======
protected override Tensors Call(Tensors inputs, Tensors state = null, bool? training = null, IOptionalArgs? optional_args = null)
>>>>>>> 90a65d7d98b92f26574ac32392ed802a57d4d2c8
{
scale = constant_op.constant(args.Scale, args.DType);
offset = constant_op.constant(args.Offset, args.DType);


+ 4
- 0
src/TensorFlowNET.Keras/Layers/Preprocessing/Resizing.cs View File

@@ -20,11 +20,15 @@ namespace Tensorflow.Keras.Layers
this.args = args;
}

<<<<<<< HEAD
<<<<<<< HEAD
protected override Tensors Call(Tensors inputs, Tensors state = null, bool? training = null, IOptionalArgs? optional_args = null)
=======
protected override Tensors Call(Tensors inputs, Tensor mask = null, bool? training = null, Tensors initial_state = null, Tensors constants = null)
>>>>>>> master
=======
protected override Tensors Call(Tensors inputs, Tensors state = null, bool? training = null, IOptionalArgs? optional_args = null)
>>>>>>> 90a65d7d98b92f26574ac32392ed802a57d4d2c8
{
return image_ops_impl.resize_images_v2(inputs, new[] { args.Height, args.Width }, method: args.Interpolation);
}


+ 4
- 0
src/TensorFlowNET.Keras/Layers/Regularization/Dropout.cs View File

@@ -16,11 +16,15 @@ namespace Tensorflow.Keras.Layers
this.args = args;
}

<<<<<<< HEAD
<<<<<<< HEAD
protected override Tensors Call(Tensors inputs, Tensors state = null, bool? training = null, IOptionalArgs? optional_args = null)
=======
protected override Tensors Call(Tensors inputs, Tensor mask = null, bool? training = null, Tensors initial_state = null, Tensors constants = null)
>>>>>>> master
=======
protected override Tensors Call(Tensors inputs, Tensors state = null, bool? training = null, IOptionalArgs? optional_args = null)
>>>>>>> 90a65d7d98b92f26574ac32392ed802a57d4d2c8
{
if (training == null)
training = false;


+ 4
- 0
src/TensorFlowNET.Keras/Layers/Reshaping/Cropping1D.cs View File

@@ -29,11 +29,15 @@ namespace Tensorflow.Keras.Layers.Reshaping
_buildInputShape = input_shape;
}

<<<<<<< HEAD
<<<<<<< HEAD
protected override Tensors Call(Tensors inputs, Tensors state = null, bool? training = null, IOptionalArgs? optional_args = null)
=======
protected override Tensors Call(Tensors inputs, Tensor mask = null, bool? training = null, Tensors initial_state = null, Tensors constants = null)
>>>>>>> master
=======
protected override Tensors Call(Tensors inputs, Tensors state = null, bool? training = null, IOptionalArgs? optional_args = null)
>>>>>>> 90a65d7d98b92f26574ac32392ed802a57d4d2c8
{
Tensor output = inputs;
if (output.rank != 3)


+ 4
- 0
src/TensorFlowNET.Keras/Layers/Reshaping/Cropping2D.cs View File

@@ -22,11 +22,15 @@ namespace Tensorflow.Keras.Layers.Reshaping
built = true;
_buildInputShape = input_shape;
}
<<<<<<< HEAD
<<<<<<< HEAD
protected override Tensors Call(Tensors inputs, Tensors state = null, bool? training = null, IOptionalArgs? optional_args = null)
=======
protected override Tensors Call(Tensors inputs, Tensor mask = null, bool? training = null, Tensors initial_state = null, Tensors constants = null)
>>>>>>> master
=======
protected override Tensors Call(Tensors inputs, Tensors state = null, bool? training = null, IOptionalArgs? optional_args = null)
>>>>>>> 90a65d7d98b92f26574ac32392ed802a57d4d2c8
{
Tensor output = inputs;
if (output.rank != 4)


+ 4
- 0
src/TensorFlowNET.Keras/Layers/Reshaping/Cropping3D.cs View File

@@ -22,11 +22,15 @@ namespace Tensorflow.Keras.Layers.Reshaping
_buildInputShape = input_shape;
}

<<<<<<< HEAD
<<<<<<< HEAD
protected override Tensors Call(Tensors inputs, Tensors state = null, bool? training = null, IOptionalArgs? optional_args = null)
=======
protected override Tensors Call(Tensors inputs, Tensor mask = null, bool? training = null, Tensors initial_state = null, Tensors constants = null)
>>>>>>> master
=======
protected override Tensors Call(Tensors inputs, Tensors state = null, bool? training = null, IOptionalArgs? optional_args = null)
>>>>>>> 90a65d7d98b92f26574ac32392ed802a57d4d2c8
{
Tensor output = inputs;
if (output.rank != 5)


+ 4
- 0
src/TensorFlowNET.Keras/Layers/Reshaping/Flatten.cs View File

@@ -24,11 +24,15 @@ namespace Tensorflow.Keras.Layers
_channels_first = args.DataFormat == "channels_first";
}

<<<<<<< HEAD
<<<<<<< HEAD
protected override Tensors Call(Tensors inputs, Tensors state = null, bool? training = null, IOptionalArgs? optional_args = null)
=======
protected override Tensors Call(Tensors inputs, Tensor mask = null, bool? training = null, Tensors initial_state = null, Tensors constants = null)
>>>>>>> master
=======
protected override Tensors Call(Tensors inputs, Tensors state = null, bool? training = null, IOptionalArgs? optional_args = null)
>>>>>>> 90a65d7d98b92f26574ac32392ed802a57d4d2c8
{
if (_channels_first)
{


+ 4
- 0
src/TensorFlowNET.Keras/Layers/Reshaping/Permute.cs View File

@@ -29,11 +29,15 @@ namespace Tensorflow.Keras.Layers {
built = true;
_buildInputShape = input_shape;
}
<<<<<<< HEAD
<<<<<<< HEAD
protected override Tensors Call(Tensors inputs, Tensors state = null, bool? training = null, IOptionalArgs? optional_args = null)
=======
protected override Tensors Call(Tensors inputs, Tensor mask = null, bool? training = null, Tensors initial_state = null, Tensors constants = null)
>>>>>>> master
=======
protected override Tensors Call(Tensors inputs, Tensors state = null, bool? training = null, IOptionalArgs? optional_args = null)
>>>>>>> 90a65d7d98b92f26574ac32392ed802a57d4d2c8
{
Tensor outputs = inputs;
return tf.transpose(outputs, new Axis(permute));


+ 4
- 0
src/TensorFlowNET.Keras/Layers/Reshaping/Reshape.cs View File

@@ -20,11 +20,15 @@ namespace Tensorflow.Keras.Layers
this.args = args;
}

<<<<<<< HEAD
<<<<<<< HEAD
protected override Tensors Call(Tensors inputs, Tensors state = null, bool? training = null, IOptionalArgs? optional_args = null)
=======
protected override Tensors Call(Tensors inputs, Tensor mask = null, bool? training = null, Tensors initial_state = null, Tensors constants = null)
>>>>>>> master
=======
protected override Tensors Call(Tensors inputs, Tensors state = null, bool? training = null, IOptionalArgs? optional_args = null)
>>>>>>> 90a65d7d98b92f26574ac32392ed802a57d4d2c8
{
var shapes = new List<Tensor>();
shapes.Add(array_ops.shape(inputs)[0]);


+ 4
- 0
src/TensorFlowNET.Keras/Layers/Reshaping/UpSampling2D.cs View File

@@ -25,11 +25,15 @@ namespace Tensorflow.Keras.Layers
inputSpec = new InputSpec(ndim: 4);
}

<<<<<<< HEAD
<<<<<<< HEAD
protected override Tensors Call(Tensors inputs, Tensors state = null, bool? training = null, IOptionalArgs? optional_args = null)
=======
protected override Tensors Call(Tensors inputs, Tensor mask = null, bool? training = null, Tensors initial_state = null, Tensors constants = null)
>>>>>>> master
=======
protected override Tensors Call(Tensors inputs, Tensors state = null, bool? training = null, IOptionalArgs? optional_args = null)
>>>>>>> 90a65d7d98b92f26574ac32392ed802a57d4d2c8
{
return keras.backend.resize_images(inputs,
size[0], size[1],


+ 4
- 0
src/TensorFlowNET.Keras/Layers/Reshaping/ZeroPadding2D.cs View File

@@ -27,11 +27,15 @@ namespace Tensorflow.Keras.Layers
this.input_spec = new InputSpec(ndim: 4);
}

<<<<<<< HEAD
<<<<<<< HEAD
protected override Tensors Call(Tensors inputs, Tensors state = null, bool? training = null, IOptionalArgs? optional_args = null)
=======
protected override Tensors Call(Tensors inputs, Tensor mask = null, bool? training = null, Tensors initial_state = null, Tensors constants = null)
>>>>>>> master
=======
protected override Tensors Call(Tensors inputs, Tensors state = null, bool? training = null, IOptionalArgs? optional_args = null)
>>>>>>> 90a65d7d98b92f26574ac32392ed802a57d4d2c8
{
return keras.backend.spatial_2d_padding(inputs,
padding: padding,


+ 43
- 0
src/TensorFlowNET.Keras/Layers/Rnn/DropoutRNNCellMixin.cs View File

@@ -1,5 +1,6 @@
using System;
using System.Collections.Generic;
<<<<<<< HEAD
using Tensorflow.Keras.ArgsDefinition;
using Tensorflow.Keras.ArgsDefinition.Rnn;
using Tensorflow.Keras.Engine;
@@ -14,6 +15,41 @@ namespace Tensorflow.Keras.Layers.Rnn
public float recurrent_dropout;
// Get the dropout mask for RNN cell's input.
public Tensors? get_dropout_maskcell_for_cell(Tensors input, bool training, int count = 1)
=======
using System.Text;
using Tensorflow.Common.Types;
using Tensorflow.Keras.ArgsDefinition;
using Tensorflow.Keras.Engine;

namespace Tensorflow.Keras.Layers.Rnn
{
public abstract class DropoutRNNCellMixin: RnnCellBase
{
public float dropout;
public float recurrent_dropout;
// TODO(Rinne): deal with cache.
public DropoutRNNCellMixin(LayerArgs args): base(args)
{

}

protected void _create_non_trackable_mask_cache()
{
}

public void reset_dropout_mask()
{

}

public void reset_recurrent_dropout_mask()
{

}

public Tensors? get_dropout_mask_for_cell(Tensors input, bool training, int count = 1)
>>>>>>> 90a65d7d98b92f26574ac32392ed802a57d4d2c8
{
if (dropout == 0f)
return null;
@@ -25,7 +61,11 @@ namespace Tensorflow.Keras.Layers.Rnn
}

// Get the recurrent dropout mask for RNN cell.
<<<<<<< HEAD
public Tensors? get_recurrent_dropout_maskcell_for_cell(Tensors input, bool training, int count = 1)
=======
public Tensors? get_recurrent_dropout_mask_for_cell(Tensors input, bool training, int count = 1)
>>>>>>> 90a65d7d98b92f26574ac32392ed802a57d4d2c8
{
if (dropout == 0f)
return null;
@@ -78,6 +118,9 @@ namespace Tensorflow.Keras.Layers.Rnn
return dropped_inputs();
}
}
<<<<<<< HEAD


=======
>>>>>>> 90a65d7d98b92f26574ac32392ed802a57d4d2c8
}

+ 6
- 0
src/TensorFlowNET.Keras/Layers/Rnn/LSTM.cs View File

@@ -27,6 +27,7 @@ namespace Tensorflow.Keras.Layers.Rnn
.ToArray();
}

<<<<<<< HEAD
<<<<<<< HEAD
protected override Tensors Call(Tensors inputs, Tensors state = null, bool? training = null, IOptionalArgs? optional_args = null)
{
@@ -36,6 +37,11 @@ namespace Tensorflow.Keras.Layers.Rnn
{
return base.Call(inputs, initial_state: initial_state, training: training);
>>>>>>> master
=======
protected override Tensors Call(Tensors inputs, Tensors state = null, bool? training = null, IOptionalArgs? optional_args = null)
{
return base.Call(inputs, initial_state: state, training: training);
>>>>>>> 90a65d7d98b92f26574ac32392ed802a57d4d2c8
}
}
}

+ 142
- 0
src/TensorFlowNET.Keras/Layers/Rnn/RNN.cs View File

@@ -1,4 +1,7 @@
<<<<<<< HEAD
<<<<<<< HEAD
=======
>>>>>>> 90a65d7d98b92f26574ac32392ed802a57d4d2c8
using OneOf;
using System;
using System.Collections.Generic;
@@ -16,15 +19,21 @@ using Tensorflow.Keras.Engine;
using Tensorflow.Keras.Saving;
using Tensorflow.Util;
<<<<<<< HEAD
<<<<<<< HEAD
=======
>>>>>>> 90a65d7d98b92f26574ac32392ed802a57d4d2c8
using Tensorflow.Common.Extensions;
using System.Linq.Expressions;
using Tensorflow.Keras.Utils;
using Tensorflow.Common.Types;
<<<<<<< HEAD
=======
using OneOf;
using OneOf.Types;
using Tensorflow.Common.Extensions;
>>>>>>> master
=======
>>>>>>> 90a65d7d98b92f26574ac32392ed802a57d4d2c8
// from tensorflow.python.distribute import distribution_strategy_context as ds_context;

namespace Tensorflow.Keras.Layers.Rnn
@@ -37,6 +46,9 @@ namespace Tensorflow.Keras.Layers.Rnn
public class RNN : RnnBase
{
<<<<<<< HEAD
<<<<<<< HEAD
=======
>>>>>>> 90a65d7d98b92f26574ac32392ed802a57d4d2c8
private RNNArgs _args;
private object _input_spec = null; // or NoneValue??
private object _state_spec = null;
@@ -46,6 +58,7 @@ namespace Tensorflow.Keras.Layers.Rnn
protected IVariableV1 _kernel;
protected IVariableV1 _bias;
protected IRnnCell _cell;
<<<<<<< HEAD
=======
private RNNArgs args;
private object input_spec = null; // or NoneValue??
@@ -57,6 +70,8 @@ namespace Tensorflow.Keras.Layers.Rnn
protected IVariableV1 bias;
protected ILayer cell;
>>>>>>> master
=======
>>>>>>> 90a65d7d98b92f26574ac32392ed802a57d4d2c8

public RNN(RNNArgs args) : base(PreConstruct(args))
{
@@ -65,11 +80,15 @@ namespace Tensorflow.Keras.Layers.Rnn

// if is StackedRnncell
<<<<<<< HEAD
<<<<<<< HEAD
=======
>>>>>>> 90a65d7d98b92f26574ac32392ed802a57d4d2c8
if (args.Cells != null)
{
_cell = new StackedRNNCells(new StackedRNNCellsArgs
{
Cells = args.Cells
<<<<<<< HEAD
=======
if (args.Cell.IsT0)
{
@@ -77,10 +96,13 @@ namespace Tensorflow.Keras.Layers.Rnn
{
Cells = args.Cell.AsT0,
>>>>>>> master
=======
>>>>>>> 90a65d7d98b92f26574ac32392ed802a57d4d2c8
});
}
else
{
<<<<<<< HEAD
<<<<<<< HEAD
_cell = args.Cell;
}
@@ -109,6 +131,10 @@ namespace Tensorflow.Keras.Layers.Rnn
// the input spec will be the list of specs for nested inputs, the structure
// of the input_spec will be the same as the input.
>>>>>>> master
=======
_cell = args.Cell;
}
>>>>>>> 90a65d7d98b92f26574ac32392ed802a57d4d2c8

// get input_shape
_args = PreConstruct(args);
@@ -227,6 +253,7 @@ namespace Tensorflow.Keras.Layers.Rnn
{
return output_mask;
}
<<<<<<< HEAD
}

// States is a tuple consist of cell states_size, like (cell1.state_size, cell2.state_size,...)
@@ -337,12 +364,15 @@ namespace Tensorflow.Keras.Layers.Rnn
{
return output_mask;
}
=======
>>>>>>> 90a65d7d98b92f26574ac32392ed802a57d4d2c8
}

public override void build(KerasShapesWrapper input_shape)
{
object get_input_spec(Shape shape)
<<<<<<< HEAD
<<<<<<< HEAD
=======
{
var input_spec_shape = shape.as_int_list();
@@ -391,6 +421,11 @@ namespace Tensorflow.Keras.Layers.Rnn
{
var input_spec_shape = shape.as_int_list();

=======
{
var input_spec_shape = shape.as_int_list();

>>>>>>> 90a65d7d98b92f26574ac32392ed802a57d4d2c8
var (batch_index, time_step_index) = _args.TimeMajor ? (1, 0) : (0, 1);
if (!_args.Stateful)
{
@@ -437,6 +472,9 @@ namespace Tensorflow.Keras.Layers.Rnn
}

<<<<<<< HEAD
<<<<<<< HEAD
=======
>>>>>>> 90a65d7d98b92f26574ac32392ed802a57d4d2c8
/// <summary>
///
/// </summary>
@@ -460,6 +498,7 @@ namespace Tensorflow.Keras.Layers.Rnn
//var (inputs_padded, row_length) = BackendImpl.convert_inputs_if_ragged(inputs);
// 暂时先不接受ragged tensor
int row_length = 0; // TODO(Rinne): support this param.
<<<<<<< HEAD
=======
// inputs: Tensors
// mask: Binary tensor of shape [batch_size, timesteps] indicating whether a given timestep should be masked
@@ -472,17 +511,23 @@ namespace Tensorflow.Keras.Layers.Rnn
// 暂时先不接受ragged tensor
int? row_length = null;
>>>>>>> master
=======
>>>>>>> 90a65d7d98b92f26574ac32392ed802a57d4d2c8
bool is_ragged_input = false;
_validate_args_if_ragged(is_ragged_input, mask);

(inputs, initial_state, constants) = _process_inputs(inputs, initial_state, constants);

<<<<<<< HEAD
<<<<<<< HEAD
=======
>>>>>>> 90a65d7d98b92f26574ac32392ed802a57d4d2c8
_maybe_reset_cell_dropout_mask(_cell);
if (_cell is StackedRNNCells)
{
var stack_cell = _cell as StackedRNNCells;
foreach (IRnnCell cell in stack_cell.Cells)
<<<<<<< HEAD
=======
_maybe_reset_cell_dropout_mask(cell);
if (cell is StackedRNNCells)
@@ -490,6 +535,8 @@ namespace Tensorflow.Keras.Layers.Rnn
var stack_cell = cell as StackedRNNCells;
foreach (var cell in stack_cell.Cells)
>>>>>>> master
=======
>>>>>>> 90a65d7d98b92f26574ac32392ed802a57d4d2c8
{
_maybe_reset_cell_dropout_mask(cell);
}
@@ -499,11 +546,15 @@ namespace Tensorflow.Keras.Layers.Rnn
{
// Time step masks must be the same for each input.
<<<<<<< HEAD
<<<<<<< HEAD
=======
>>>>>>> 90a65d7d98b92f26574ac32392ed802a57d4d2c8
mask = mask.Flatten().First();
}

Shape input_shape;
if (!inputs.IsNested())
<<<<<<< HEAD
=======
mask = nest.flatten(mask)[0];
}
@@ -511,21 +562,28 @@ namespace Tensorflow.Keras.Layers.Rnn
Shape input_shape;
if (nest.is_nested(inputs))
>>>>>>> master
=======
>>>>>>> 90a65d7d98b92f26574ac32392ed802a57d4d2c8
{
// In the case of nested input, use the first element for shape check
// input_shape = nest.flatten(inputs)[0].shape;
// TODO(Wanglongzhi2001)
<<<<<<< HEAD
<<<<<<< HEAD
input_shape = inputs.Flatten().First().shape;
=======
input_shape = nest.flatten(inputs)[0].shape;
>>>>>>> master
=======
input_shape = inputs.Flatten().First().shape;
>>>>>>> 90a65d7d98b92f26574ac32392ed802a57d4d2c8
}
else
{
input_shape = inputs.shape;
}

<<<<<<< HEAD
<<<<<<< HEAD
var timesteps = _args.TimeMajor ? input_shape[0] : input_shape[1];

@@ -535,6 +593,11 @@ namespace Tensorflow.Keras.Layers.Rnn

if (args.Unroll && timesteps != null)
>>>>>>> master
=======
var timesteps = _args.TimeMajor ? input_shape[0] : input_shape[1];

if (_args.Unroll && timesteps == null)
>>>>>>> 90a65d7d98b92f26574ac32392ed802a57d4d2c8
{
throw new ValueError(
"Cannot unroll a RNN if the " +
@@ -553,6 +616,9 @@ namespace Tensorflow.Keras.Layers.Rnn

// cell_call_fn = (self.cell.__call__ if callable(self.cell) else self.cell.call)
<<<<<<< HEAD
<<<<<<< HEAD
=======
>>>>>>> 90a65d7d98b92f26574ac32392ed802a57d4d2c8
Func<Tensors, Tensors, (Tensors, Tensors)> step;
bool is_tf_rnn_cell = _cell.IsTFRnnCell;
if (constants is not null)
@@ -561,6 +627,7 @@ namespace Tensorflow.Keras.Layers.Rnn
{
throw new ValueError(
$"RNN cell {_cell} does not support constants." +
<<<<<<< HEAD
=======
var cell_call_fn = cell.Call;
Func<Tensors, Tensors, (Tensors, Tensors)> step;
@@ -573,17 +640,23 @@ namespace Tensorflow.Keras.Layers.Rnn
throw new ValueError(
$"RNN cell {cell} does not support constants." +
>>>>>>> master
=======
>>>>>>> 90a65d7d98b92f26574ac32392ed802a57d4d2c8
$"Received: constants={constants}");
}

step = (inputs, states) =>
{
<<<<<<< HEAD
<<<<<<< HEAD
=======
>>>>>>> 90a65d7d98b92f26574ac32392ed802a57d4d2c8
constants = new Tensors(states.TakeLast(_num_constants));
states = new Tensors(states.SkipLast(_num_constants));
states = len(states) == 1 && is_tf_rnn_cell ? new Tensors(states[0]) : states;
var (output, new_states) = _cell.Apply(inputs, states, optional_args: new RnnOptionalArgs() { Constants = constants });
return (output, new_states.Single);
<<<<<<< HEAD
=======
// constants = states[-self._num_constants :]
constants = states.numpy()[new Slice(states.Length - _num_constants, states.Length)];
@@ -599,6 +672,8 @@ namespace Tensorflow.Keras.Layers.Rnn
}
return (output, new_states);
>>>>>>> master
=======
>>>>>>> 90a65d7d98b92f26574ac32392ed802a57d4d2c8
};
}
else
@@ -606,6 +681,9 @@ namespace Tensorflow.Keras.Layers.Rnn
step = (inputs, states) =>
{
<<<<<<< HEAD
<<<<<<< HEAD
=======
>>>>>>> 90a65d7d98b92f26574ac32392ed802a57d4d2c8
states = len(states) == 1 && is_tf_rnn_cell ? new Tensors(states.First()) : states;
var (output, new_states) = _cell.Apply(inputs, states);
return (output, new_states);
@@ -635,6 +713,7 @@ namespace Tensorflow.Keras.Layers.Rnn
{
// TODO(Rinne): add go_backwards parameter and revise the `row_length` param
output = keras.backend.maybe_convert_to_ragged(is_ragged_input, outputs, row_length, false);
<<<<<<< HEAD
=======
// states = (states[0] if len(states) == 1 and is_tf_rnn_cell else states)
states = states.Length == 1 ? states[0] : states;
@@ -670,12 +749,15 @@ namespace Tensorflow.Keras.Layers.Rnn
throw new NotImplementedException("this argument havn't been developed!");

>>>>>>> master
=======
>>>>>>> 90a65d7d98b92f26574ac32392ed802a57d4d2c8
}
else
{
output = last_output;
}

<<<<<<< HEAD
<<<<<<< HEAD
if (_args.ReturnState)
{
@@ -684,6 +766,10 @@ namespace Tensorflow.Keras.Layers.Rnn
{

>>>>>>> master
=======
if (_args.ReturnState)
{
>>>>>>> 90a65d7d98b92f26574ac32392ed802a57d4d2c8
foreach (var state in states)
{
output.Add(state);
@@ -697,6 +783,9 @@ namespace Tensorflow.Keras.Layers.Rnn
}

<<<<<<< HEAD
<<<<<<< HEAD
=======
>>>>>>> 90a65d7d98b92f26574ac32392ed802a57d4d2c8
public override Tensors Apply(Tensors inputs, Tensors initial_states = null, bool training = false, IOptionalArgs? optional_args = null)
{
RnnOptionalArgs? rnn_optional_args = optional_args as RnnOptionalArgs;
@@ -728,6 +817,7 @@ namespace Tensorflow.Keras.Layers.Rnn
{
initial_state = new Tensors(inputs.Skip(1).SkipLast(_num_constants));
constants = new Tensors(inputs.TakeLast(_num_constants));
<<<<<<< HEAD
=======
private (Tensors inputs, Tensors initial_state, Tensors constants) _process_inputs(Tensor inputs, Tensors initial_state, Tensors constants)
{
@@ -742,12 +832,15 @@ namespace Tensorflow.Keras.Layers.Rnn
initial_state = inputs[new Slice(1, len(inputs) - _num_constants)];
constants = inputs[new Slice(len(inputs) - _num_constants, len(inputs))];
>>>>>>> master
=======
>>>>>>> 90a65d7d98b92f26574ac32392ed802a57d4d2c8
}
if (len(initial_state) == 0)
initial_state = null;
inputs = inputs[0];
}
<<<<<<< HEAD
<<<<<<< HEAD

if (_args.Stateful)
@@ -755,6 +848,11 @@ namespace Tensorflow.Keras.Layers.Rnn

if (args.Stateful)
>>>>>>> master
=======

if (_args.Stateful)
>>>>>>> 90a65d7d98b92f26574ac32392ed802a57d4d2c8
{
if (initial_state != null)
{
@@ -762,11 +860,15 @@ namespace Tensorflow.Keras.Layers.Rnn
foreach (var s in nest.flatten(States))
{
<<<<<<< HEAD
<<<<<<< HEAD
=======
>>>>>>> 90a65d7d98b92f26574ac32392ed802a57d4d2c8
tmp.add(tf.math.count_nonzero(s.Single()));
}
var non_zero_count = tf.add_n(tmp);
//initial_state = tf.cond(non_zero_count > 0, () => States, () => initial_state);
if ((int)non_zero_count.numpy() > 0)
<<<<<<< HEAD
=======
tmp.add(tf.math.count_nonzero((Tensor)s));
}
@@ -774,6 +876,8 @@ namespace Tensorflow.Keras.Layers.Rnn
//initial_state = tf.cond(non_zero_count > 0, () => States, () => initial_state);
if((int)non_zero_count.numpy() > 0)
>>>>>>> master
=======
>>>>>>> 90a65d7d98b92f26574ac32392ed802a57d4d2c8
{
initial_state = States;
}
@@ -783,6 +887,9 @@ namespace Tensorflow.Keras.Layers.Rnn
initial_state = States;
}
<<<<<<< HEAD
<<<<<<< HEAD
=======
>>>>>>> 90a65d7d98b92f26574ac32392ed802a57d4d2c8
// TODO(Wanglongzhi2001),
// initial_state = tf.nest.map_structure(
//# When the layer has a inferred dtype, use the dtype from the
@@ -795,17 +902,21 @@ namespace Tensorflow.Keras.Layers.Rnn

}
else if (initial_state is null)
<<<<<<< HEAD
=======

}
else if(initial_state != null)
>>>>>>> master
=======
>>>>>>> 90a65d7d98b92f26574ac32392ed802a57d4d2c8
{
initial_state = get_initial_state(inputs);
}

if (initial_state.Length != States.Length)
{
<<<<<<< HEAD
<<<<<<< HEAD
throw new ValueError($"Layer {this} expects {States.Length} state(s), " +
$"but it received {initial_state.Length} " +
@@ -816,6 +927,11 @@ namespace Tensorflow.Keras.Layers.Rnn
$"but it received {initial_state.Length} " +
$"initial state(s). Input received: {inputs}");
>>>>>>> master
=======
throw new ValueError($"Layer {this} expects {States.Length} state(s), " +
$"but it received {initial_state.Length} " +
$"initial state(s). Input received: {inputs}");
>>>>>>> 90a65d7d98b92f26574ac32392ed802a57d4d2c8
}

return (inputs, initial_state, constants);
@@ -823,20 +939,28 @@ namespace Tensorflow.Keras.Layers.Rnn

private void _validate_args_if_ragged(bool is_ragged_input, Tensors mask)
{
<<<<<<< HEAD
<<<<<<< HEAD
if (!is_ragged_input)
=======
if (!is_ragged_input)
>>>>>>> master
=======
if (!is_ragged_input)
>>>>>>> 90a65d7d98b92f26574ac32392ed802a57d4d2c8
{
return;
}

<<<<<<< HEAD
<<<<<<< HEAD
if (_args.Unroll)
=======
if (args.Unroll)
>>>>>>> master
=======
if (_args.Unroll)
>>>>>>> 90a65d7d98b92f26574ac32392ed802a57d4d2c8
{
throw new ValueError("The input received contains RaggedTensors and does " +
"not support unrolling. Disable unrolling by passing " +
@@ -855,11 +979,15 @@ namespace Tensorflow.Keras.Layers.Rnn
void _maybe_reset_cell_dropout_mask(ILayer cell)
{
<<<<<<< HEAD
<<<<<<< HEAD
=======
>>>>>>> 90a65d7d98b92f26574ac32392ed802a57d4d2c8
if (cell is DropoutRNNCellMixin CellDRCMixin)
{
CellDRCMixin.reset_dropout_mask();
CellDRCMixin.reset_recurrent_dropout_mask();
}
<<<<<<< HEAD
=======
//if (cell is DropoutRNNCellMixin)
//{
@@ -867,6 +995,8 @@ namespace Tensorflow.Keras.Layers.Rnn
// cell.reset_recurrent_dropout_mask();
//}
>>>>>>> master
=======
>>>>>>> 90a65d7d98b92f26574ac32392ed802a57d4d2c8
}

private static RNNArgs PreConstruct(RNNArgs args)
@@ -900,7 +1030,10 @@ namespace Tensorflow.Keras.Layers.Rnn
{
throw new NotImplementedException();
<<<<<<< HEAD
<<<<<<< HEAD
=======
=======
>>>>>>> 90a65d7d98b92f26574ac32392ed802a57d4d2c8
}

// 好像不能cell不能传接口类型
@@ -941,6 +1074,7 @@ namespace Tensorflow.Keras.Layers.Rnn
// });


<<<<<<< HEAD
protected Tensors get_initial_state(Tensor inputs)
{
Type type = cell.GetType();
@@ -1016,6 +1150,10 @@ namespace Tensorflow.Keras.Layers.Rnn

protected Tensors get_initial_state(Tensors inputs)
{
=======
protected Tensors get_initial_state(Tensors inputs)
{
>>>>>>> 90a65d7d98b92f26574ac32392ed802a57d4d2c8
var get_initial_state_fn = _cell.GetType().GetMethod("get_initial_state");

var input = inputs[0];
@@ -1043,11 +1181,15 @@ namespace Tensorflow.Keras.Layers.Rnn
}

// Check whether the state_size contains multiple states.
<<<<<<< HEAD
<<<<<<< HEAD
public static bool is_multiple_state(GeneralizedTensorShape state_size)
=======
public static bool is_multiple_state(object state_size)
>>>>>>> master
=======
public static bool is_multiple_state(GeneralizedTensorShape state_size)
>>>>>>> 90a65d7d98b92f26574ac32392ed802a57d4d2c8
{
return state_size.Shapes.Length > 1;
}


+ 38
- 0
src/TensorFlowNET.Keras/Layers/Rnn/SimpleRNNCell.cs View File

@@ -5,12 +5,18 @@ using Tensorflow.Keras.ArgsDefinition.Rnn;
using Tensorflow.Keras.Engine;
using Tensorflow.Keras.Saving;
<<<<<<< HEAD
<<<<<<< HEAD
using Tensorflow.Common.Types;
using Tensorflow.Common.Extensions;
using Tensorflow.Keras.Utils;
=======
using Tensorflow.Util;
>>>>>>> master
=======
using Tensorflow.Common.Types;
using Tensorflow.Common.Extensions;
using Tensorflow.Keras.Utils;
>>>>>>> 90a65d7d98b92f26574ac32392ed802a57d4d2c8

namespace Tensorflow.Keras.Layers.Rnn
{
@@ -24,6 +30,9 @@ namespace Tensorflow.Keras.Layers.Rnn
public class SimpleRNNCell : DropoutRNNCellMixin
{
<<<<<<< HEAD
<<<<<<< HEAD
=======
>>>>>>> 90a65d7d98b92f26574ac32392ed802a57d4d2c8
SimpleRNNCellArgs _args;
IVariableV1 _kernel;
IVariableV1 _recurrent_kernel;
@@ -37,6 +46,7 @@ namespace Tensorflow.Keras.Layers.Rnn
public override bool SupportOptionalArgs => false;

public SimpleRNNCell(SimpleRNNCellArgs args) : base(args)
<<<<<<< HEAD
{
this._args = args;
=======
@@ -49,16 +59,24 @@ namespace Tensorflow.Keras.Layers.Rnn
{
this.args = args;
>>>>>>> master
=======
{
this._args = args;
>>>>>>> 90a65d7d98b92f26574ac32392ed802a57d4d2c8
if (args.Units <= 0)
{
throw new ValueError(
$"units must be a positive integer, got {args.Units}");
}
<<<<<<< HEAD
<<<<<<< HEAD
=======
>>>>>>> 90a65d7d98b92f26574ac32392ed802a57d4d2c8
this._args.Dropout = Math.Min(1f, Math.Max(0f, this._args.Dropout));
this._args.RecurrentDropout = Math.Min(1f, Math.Max(0f, this._args.RecurrentDropout));
_state_size = new GeneralizedTensorShape(args.Units);
_output_size = new GeneralizedTensorShape(args.Units);
<<<<<<< HEAD
=======
this.args.Dropout = Math.Min(1f, Math.Max(0f, this.args.Dropout));
this.args.RecurrentDropout = Math.Min(1f, Math.Max(0f, this.args.RecurrentDropout));
@@ -69,6 +87,8 @@ namespace Tensorflow.Keras.Layers.Rnn
DRCMixin.dropout = this.args.Dropout;
DRCMixin.recurrent_dropout = this.args.RecurrentDropout;
>>>>>>> master
=======
>>>>>>> 90a65d7d98b92f26574ac32392ed802a57d4d2c8
}

public override void build(KerasShapesWrapper input_shape)
@@ -96,6 +116,9 @@ namespace Tensorflow.Keras.Layers.Rnn
}

<<<<<<< HEAD
<<<<<<< HEAD
=======
>>>>>>> 90a65d7d98b92f26574ac32392ed802a57d4d2c8
// TODO(Rinne): revise the trining param (with refactoring of the framework)
protected override Tensors Call(Tensors inputs, Tensors states = null, bool? training = null, IOptionalArgs? optional_args = null)
{
@@ -103,6 +126,7 @@ namespace Tensorflow.Keras.Layers.Rnn
Tensors prev_output = Nest.IsNested(states) ? new Tensors(states[0]) : states;
var dp_mask = get_dropout_mask_for_cell(inputs, training.Value);
var rec_dp_mask = get_recurrent_dropout_mask_for_cell(prev_output, training.Value);
<<<<<<< HEAD
=======
protected override Tensors Call(Tensors inputs, Tensor mask = null, bool? training = null, Tensors initial_state = null, Tensors constants = null)
{
@@ -111,12 +135,17 @@ namespace Tensorflow.Keras.Layers.Rnn
var dp_mask = DRCMixin.get_dropout_maskcell_for_cell(inputs, training.Value);
var rec_dp_mask = DRCMixin.get_recurrent_dropout_maskcell_for_cell(prev_output, training.Value);
>>>>>>> master
=======
>>>>>>> 90a65d7d98b92f26574ac32392ed802a57d4d2c8

Tensor h;
var ranks = inputs.rank;
if (dp_mask != null)
{
<<<<<<< HEAD
<<<<<<< HEAD
=======
>>>>>>> 90a65d7d98b92f26574ac32392ed802a57d4d2c8

h = math_ops.matmul(math_ops.multiply(inputs.Single, dp_mask.Single), _kernel.AsTensor());
}
@@ -128,6 +157,7 @@ namespace Tensorflow.Keras.Layers.Rnn
if (_bias != null)
{
h = tf.nn.bias_add(h, _bias);
<<<<<<< HEAD
=======
if (ranks > 2)
{
@@ -155,11 +185,16 @@ namespace Tensorflow.Keras.Layers.Rnn
{
h = tf.nn.bias_add(h, bias);
>>>>>>> master
=======
>>>>>>> 90a65d7d98b92f26574ac32392ed802a57d4d2c8
}

if (rec_dp_mask != null)
{
<<<<<<< HEAD
<<<<<<< HEAD
=======
>>>>>>> 90a65d7d98b92f26574ac32392ed802a57d4d2c8
prev_output = math_ops.multiply(prev_output, rec_dp_mask);
}

@@ -184,6 +219,7 @@ namespace Tensorflow.Keras.Layers.Rnn
public Tensors get_initial_state(Tensors inputs = null, long? batch_size = null, TF_DataType? dtype = null)
{
return RnnUtils.generate_zero_filled_state_for_cell(this, inputs, batch_size.Value, dtype.Value);
<<<<<<< HEAD
=======
prev_output = math_ops.multiply(prev_output, rec_dp_mask)[0];
}
@@ -216,6 +252,8 @@ namespace Tensorflow.Keras.Layers.Rnn
{
return RNNUtils.generate_zero_filled_state_for_cell(this, inputs, batch_size, dtype);
>>>>>>> master
=======
>>>>>>> 90a65d7d98b92f26574ac32392ed802a57d4d2c8
}
}
}

+ 18
- 0
src/TensorFlowNET.Keras/Layers/Rnn/StackedRNNCells.cs View File

@@ -9,6 +9,9 @@ using static Tensorflow.Keras.ArgsDefinition.Rnn.RNNArgs;
using Tensorflow.Keras.Engine;
using Tensorflow.Keras.Saving;
<<<<<<< HEAD
<<<<<<< HEAD
=======
>>>>>>> 90a65d7d98b92f26574ac32392ed802a57d4d2c8
using Tensorflow.Keras.Utils;

namespace Tensorflow.Keras.Layers.Rnn
@@ -16,6 +19,7 @@ namespace Tensorflow.Keras.Layers.Rnn
public class StackedRNNCells : Layer, IRnnCell
{
public IList<IRnnCell> Cells { get; set; }
<<<<<<< HEAD
=======
using Tensorflow.Keras.ArgsDefinition.Rnn;

@@ -25,6 +29,8 @@ namespace Tensorflow.Keras.Layers.Rnn
{
public IList<IRnnArgCell> Cells { get; set; }
>>>>>>> master
=======
>>>>>>> 90a65d7d98b92f26574ac32392ed802a57d4d2c8
public bool reverse_state_order;

public StackedRNNCells(StackedRNNCellsArgs args) : base(args)
@@ -96,11 +102,15 @@ namespace Tensorflow.Keras.Layers.Rnn
{
return lastCell.OutputSize;
}
<<<<<<< HEAD
<<<<<<< HEAD
else if (RNN.is_multiple_state(lastCell.StateSize))
=======
else if (RNN.is_multiple_state(lastCell.state_size))
>>>>>>> master
=======
else if (RNN.is_multiple_state(lastCell.StateSize))
>>>>>>> 90a65d7d98b92f26574ac32392ed802a57d4d2c8
{
return lastCell.StateSize.First();
//throw new NotImplementedException("");
@@ -112,12 +122,16 @@ namespace Tensorflow.Keras.Layers.Rnn
}
}

<<<<<<< HEAD
<<<<<<< HEAD
public Tensors get_initial_state(Tensors inputs = null, long? batch_size = null, TF_DataType? dtype = null)
=======

public object get_initial_state()
>>>>>>> master
=======
public Tensors get_initial_state(Tensors inputs = null, long? batch_size = null, TF_DataType? dtype = null)
>>>>>>> 90a65d7d98b92f26574ac32392ed802a57d4d2c8
{
var cells = reverse_state_order ? Cells.Reverse() : Cells;
Tensors initial_states = new Tensors();
@@ -137,11 +151,15 @@ namespace Tensorflow.Keras.Layers.Rnn
return initial_states;
}

<<<<<<< HEAD
<<<<<<< HEAD
protected override Tensors Call(Tensors inputs, Tensors state = null, bool? training = null, IOptionalArgs? optional_args = null)
=======
public Tensors Call(Tensors inputs, Tensor mask = null, bool? training = null, Tensors initial_state = null, Tensors constants = null)
>>>>>>> master
=======
protected override Tensors Call(Tensors inputs, Tensors state = null, bool? training = null, IOptionalArgs? optional_args = null)
>>>>>>> 90a65d7d98b92f26574ac32392ed802a57d4d2c8
{
// Recover per-cell states.
var state_size = reverse_state_order ? StateSize.Reverse() : StateSize;


+ 4
- 0
src/TensorFlowNET.Keras/Layers/TensorFlowOpLayer.cs View File

@@ -35,11 +35,15 @@ namespace Tensorflow.Keras.Layers
built = true;
}

<<<<<<< HEAD
<<<<<<< HEAD
protected override Tensors Call(Tensors inputs, Tensors state = null, bool? training = null, IOptionalArgs? optional_args = null)
=======
protected override Tensors Call(Tensors inputs, Tensor mask = null, bool? training = null, Tensors initial_state = null, Tensors constants = null)
>>>>>>> master
=======
protected override Tensors Call(Tensors inputs, Tensors state = null, bool? training = null, IOptionalArgs? optional_args = null)
>>>>>>> 90a65d7d98b92f26574ac32392ed802a57d4d2c8
{
if (tf.Context.executing_eagerly())
return DeFunCall(inputs);


+ 4
- 0
src/TensorflowNET.Hub/KerasLayer.cs View File

@@ -90,11 +90,15 @@ namespace Tensorflow.Hub
}
}

<<<<<<< HEAD
<<<<<<< HEAD
protected override Tensors Call(Tensors inputs, Tensors state = null, bool? training = null, IOptionalArgs? optionalArgs = null)
=======
protected override Tensors Call(Tensors inputs, Tensor mask = null, bool? training = null, Tensors initial_state = null, Tensors constants = null)
>>>>>>> master
=======
protected override Tensors Call(Tensors inputs, Tensors state = null, bool? training = null, IOptionalArgs? optionalArgs = null)
>>>>>>> 90a65d7d98b92f26574ac32392ed802a57d4d2c8
{
_check_trainability();



+ 3
- 0
test/TensorFlowNET.Keras.UnitTest/Layers/LayersTest.cs View File

@@ -144,6 +144,7 @@ namespace Tensorflow.Keras.UnitTest.Layers
Assert.AreEqual(expected_output, actual_output);
}

<<<<<<< HEAD
<<<<<<< HEAD
=======
[TestMethod]
@@ -172,6 +173,8 @@ namespace Tensorflow.Keras.UnitTest.Layers
}

>>>>>>> master
=======
>>>>>>> 90a65d7d98b92f26574ac32392ed802a57d4d2c8
[TestMethod]
public void Resizing()
{


Loading…
Cancel
Save