diff --git a/test/TensorFlowNET.Keras.UnitTest/Layers/ActivationTest.cs b/test/TensorFlowNET.Keras.UnitTest/Layers/ActivationTest.cs index 75fcc023..cc99f4a0 100644 --- a/test/TensorFlowNET.Keras.UnitTest/Layers/ActivationTest.cs +++ b/test/TensorFlowNET.Keras.UnitTest/Layers/ActivationTest.cs @@ -49,7 +49,7 @@ namespace Tensorflow.Keras.UnitTest.Layers Tensor input = tf.constant(new float[] { -3f, -2f, -1f, 0f, 1f, 2f }); Tensor output = keras.layers.Softplus().Apply(input); NDArray expected = new NDArray(new float[] { 0.04858733f, 0.12692805f, 0.31326166f, 0.6931472f, 1.3132616f, 2.126928f }); - Assert.AreEqual(expected, output.numpy()); + Assert.IsTrue(expected == output.numpy()); } [TestMethod] diff --git a/test/TensorFlowNET.Keras.UnitTest/Layers/AttentionTest.cs b/test/TensorFlowNET.Keras.UnitTest/Layers/AttentionTest.cs index 162a10d2..95ef923e 100644 --- a/test/TensorFlowNET.Keras.UnitTest/Layers/AttentionTest.cs +++ b/test/TensorFlowNET.Keras.UnitTest/Layers/AttentionTest.cs @@ -94,7 +94,7 @@ namespace Tensorflow.Keras.UnitTest.Layers { 7.6400003f, 12.24f, 16.84f }, { 14.24f, 22.84f, 31.439999f } } }, dtype: np.float32); - Assert.AreEqual(expected, actual.numpy()); + Assert.IsTrue(expected == actual.numpy()); } [TestMethod] diff --git a/test/TensorFlowNET.Keras.UnitTest/Losses/LossesTest.cs b/test/TensorFlowNET.Keras.UnitTest/Losses/LossesTest.cs index 3bec2f17..0bb1d011 100644 --- a/test/TensorFlowNET.Keras.UnitTest/Losses/LossesTest.cs +++ b/test/TensorFlowNET.Keras.UnitTest/Losses/LossesTest.cs @@ -39,7 +39,7 @@ public class LossesTest : EagerModeTestBase // Using 'none' reduction type. bce = tf.keras.losses.BinaryCrossentropy(from_logits: true, reduction: Reduction.NONE); loss = bce.Call(y_true, y_pred); - Assert.AreEqual(new float[] { 0.23515666f, 1.4957594f }, loss.numpy()); + Assert.IsTrue(new NDArray(new float[] { 0.23515666f, 1.4957594f }) == loss.numpy()); } ///