diff --git a/README.md b/README.md
index d157e09d..4d87a8da 100644
--- a/README.md
+++ b/README.md
@@ -9,7 +9,7 @@
[](https://996.icu/#/en_US)
[](https://mybinder.org/v2/gh/javiercp/BinderTF.NET/master?urlpath=lab)
-*master branch is based on tensorflow v2.4, v0.3x branch is based on tensorflow v2.3, v0.15-tensorflow1.15 is from tensorflow1.15.*
+*master branch is based on tensorflow v2.x, v0.6x branch is based on tensorflow v2.6, v0.15-tensorflow1.15 is from tensorflow1.15.*

@@ -249,10 +249,6 @@ Follow us on [Twitter](https://twitter.com/ScisharpStack), [Facebook](https://ww
Join our chat on [Gitter](https://gitter.im/sci-sharp/community).
-Scan QR code to join Tencent TIM group:
-
-
-
WeChat Sponsor 微信打赏:

diff --git a/docs/TIM.jpg b/docs/TIM.jpg
deleted file mode 100644
index a436aa30..00000000
Binary files a/docs/TIM.jpg and /dev/null differ
diff --git a/src/TensorFlowNET.Core/APIs/c_api_lite.cs b/src/TensorFlowNET.Core/APIs/c_api_lite.cs
new file mode 100644
index 00000000..5a437d26
--- /dev/null
+++ b/src/TensorFlowNET.Core/APIs/c_api_lite.cs
@@ -0,0 +1,91 @@
+using System;
+using System.Collections.Generic;
+using System.Runtime.InteropServices;
+using System.Text;
+using Tensorflow.Lite;
+
+namespace Tensorflow
+{
+ public class c_api_lite
+ {
+ public const string TensorFlowLibName = "tensorflowlite_c";
+
+ public static string StringPiece(IntPtr handle)
+ {
+ return handle == IntPtr.Zero ? String.Empty : Marshal.PtrToStringAnsi(handle);
+ }
+
+ [DllImport(TensorFlowLibName)]
+ public static extern IntPtr TfLiteVersion();
+
+ [DllImport(TensorFlowLibName)]
+ public static extern SafeTfLiteModelHandle TfLiteModelCreateFromFile(string model_path);
+
+ [DllImport(TensorFlowLibName)]
+ public static extern void TfLiteModelDelete(IntPtr model);
+
+ [DllImport(TensorFlowLibName)]
+ public static extern SafeTfLiteInterpreterOptionsHandle TfLiteInterpreterOptionsCreate();
+
+ [DllImport(TensorFlowLibName)]
+ public static extern void TfLiteInterpreterOptionsDelete(IntPtr options);
+
+ [DllImport(TensorFlowLibName)]
+ public static extern void TfLiteInterpreterOptionsSetNumThreads(SafeTfLiteInterpreterOptionsHandle options, int num_threads);
+
+ [DllImport(TensorFlowLibName)]
+ public static extern SafeTfLiteInterpreterHandle TfLiteInterpreterCreate(SafeTfLiteModelHandle model, SafeTfLiteInterpreterOptionsHandle optional_options);
+
+ [DllImport(TensorFlowLibName)]
+ public static extern void TfLiteInterpreterDelete(IntPtr interpreter);
+
+ [DllImport(TensorFlowLibName)]
+ public static extern TfLiteStatus TfLiteInterpreterAllocateTensors(SafeTfLiteInterpreterHandle interpreter);
+
+ [DllImport(TensorFlowLibName)]
+ public static extern int TfLiteInterpreterGetInputTensorCount(SafeTfLiteInterpreterHandle interpreter);
+
+ [DllImport(TensorFlowLibName)]
+ public static extern int TfLiteInterpreterGetOutputTensorCount(SafeTfLiteInterpreterHandle interpreter);
+
+ [DllImport(TensorFlowLibName)]
+ public static extern TfLiteStatus TfLiteInterpreterResizeInputTensor(SafeTfLiteInterpreterHandle interpreter,
+ int input_index, int[] input_dims, int input_dims_size);
+
+ [DllImport(TensorFlowLibName)]
+ public static extern TfLiteTensor TfLiteInterpreterGetInputTensor(SafeTfLiteInterpreterHandle interpreter, int input_index);
+
+ [DllImport(TensorFlowLibName)]
+ public static extern TfLiteDataType TfLiteTensorType(TfLiteTensor tensor);
+
+ [DllImport(TensorFlowLibName)]
+ public static extern int TfLiteTensorNumDims(TfLiteTensor tensor);
+
+ [DllImport(TensorFlowLibName)]
+ public static extern int TfLiteTensorDim(TfLiteTensor tensor, int dim_index);
+
+ [DllImport(TensorFlowLibName)]
+ public static extern int TfLiteTensorByteSize(TfLiteTensor tensor);
+
+ [DllImport(TensorFlowLibName)]
+ public static extern IntPtr TfLiteTensorData(TfLiteTensor tensor);
+
+ [DllImport(TensorFlowLibName)]
+ public static extern IntPtr TfLiteTensorName(TfLiteTensor tensor);
+
+ [DllImport(TensorFlowLibName)]
+ public static extern TfLiteQuantizationParams TfLiteTensorQuantizationParams(TfLiteTensor tensor);
+
+ [DllImport(TensorFlowLibName)]
+ public static extern TfLiteStatus TfLiteTensorCopyFromBuffer(TfLiteTensor tensor, IntPtr input_data, int input_data_size);
+
+ [DllImport(TensorFlowLibName)]
+ public static extern TfLiteStatus TfLiteInterpreterInvoke(SafeTfLiteInterpreterHandle interpreter);
+
+ [DllImport(TensorFlowLibName)]
+ public static extern IntPtr TfLiteInterpreterGetOutputTensor(SafeTfLiteInterpreterHandle interpreter, int output_index);
+
+ [DllImport(TensorFlowLibName)]
+ public static extern TfLiteStatus TfLiteTensorCopyToBuffer(TfLiteTensor output_tensor, IntPtr output_data, int output_data_size);
+ }
+}
diff --git a/src/TensorFlowNET.Core/Lite/SafeTfLiteInterpreterHandle.cs b/src/TensorFlowNET.Core/Lite/SafeTfLiteInterpreterHandle.cs
new file mode 100644
index 00000000..feb65711
--- /dev/null
+++ b/src/TensorFlowNET.Core/Lite/SafeTfLiteInterpreterHandle.cs
@@ -0,0 +1,26 @@
+using System;
+using System.Collections.Generic;
+using System.Text;
+using Tensorflow.Util;
+
+namespace Tensorflow.Lite
+{
+ public class SafeTfLiteInterpreterHandle : SafeTensorflowHandle
+ {
+ protected SafeTfLiteInterpreterHandle()
+ {
+ }
+
+ public SafeTfLiteInterpreterHandle(IntPtr handle)
+ : base(handle)
+ {
+ }
+
+ protected override bool ReleaseHandle()
+ {
+ c_api_lite.TfLiteInterpreterDelete(handle);
+ SetHandle(IntPtr.Zero);
+ return true;
+ }
+ }
+}
diff --git a/src/TensorFlowNET.Core/Lite/SafeTfLiteInterpreterOptionsHandle.cs b/src/TensorFlowNET.Core/Lite/SafeTfLiteInterpreterOptionsHandle.cs
new file mode 100644
index 00000000..72893646
--- /dev/null
+++ b/src/TensorFlowNET.Core/Lite/SafeTfLiteInterpreterOptionsHandle.cs
@@ -0,0 +1,26 @@
+using System;
+using System.Collections.Generic;
+using System.Text;
+using Tensorflow.Util;
+
+namespace Tensorflow.Lite
+{
+ public class SafeTfLiteInterpreterOptionsHandle : SafeTensorflowHandle
+ {
+ protected SafeTfLiteInterpreterOptionsHandle()
+ {
+ }
+
+ public SafeTfLiteInterpreterOptionsHandle(IntPtr handle)
+ : base(handle)
+ {
+ }
+
+ protected override bool ReleaseHandle()
+ {
+ c_api_lite.TfLiteInterpreterOptionsDelete(handle);
+ SetHandle(IntPtr.Zero);
+ return true;
+ }
+ }
+}
diff --git a/src/TensorFlowNET.Core/Lite/SafeTfLiteModelHandle.cs b/src/TensorFlowNET.Core/Lite/SafeTfLiteModelHandle.cs
new file mode 100644
index 00000000..bdae1543
--- /dev/null
+++ b/src/TensorFlowNET.Core/Lite/SafeTfLiteModelHandle.cs
@@ -0,0 +1,26 @@
+using System;
+using System.Collections.Generic;
+using System.Text;
+using Tensorflow.Util;
+
+namespace Tensorflow.Lite
+{
+ public class SafeTfLiteModelHandle : SafeTensorflowHandle
+ {
+ protected SafeTfLiteModelHandle()
+ {
+ }
+
+ public SafeTfLiteModelHandle(IntPtr handle)
+ : base(handle)
+ {
+ }
+
+ protected override bool ReleaseHandle()
+ {
+ c_api_lite.TfLiteModelDelete(handle);
+ SetHandle(IntPtr.Zero);
+ return true;
+ }
+ }
+}
diff --git a/src/TensorFlowNET.Core/Lite/TfLiteDataType.cs b/src/TensorFlowNET.Core/Lite/TfLiteDataType.cs
new file mode 100644
index 00000000..7b3aa102
--- /dev/null
+++ b/src/TensorFlowNET.Core/Lite/TfLiteDataType.cs
@@ -0,0 +1,27 @@
+using System;
+using System.Collections.Generic;
+using System.Text;
+
+namespace Tensorflow.Lite
+{
+ public enum TfLiteDataType
+ {
+ kTfLiteNoType = 0,
+ kTfLiteFloat32 = 1,
+ kTfLiteInt32 = 2,
+ kTfLiteUInt8 = 3,
+ kTfLiteInt64 = 4,
+ kTfLiteString = 5,
+ kTfLiteBool = 6,
+ kTfLiteInt16 = 7,
+ kTfLiteComplex64 = 8,
+ kTfLiteInt8 = 9,
+ kTfLiteFloat16 = 10,
+ kTfLiteFloat64 = 11,
+ kTfLiteComplex128 = 12,
+ kTfLiteUInt64 = 13,
+ kTfLiteResource = 14,
+ kTfLiteVariant = 15,
+ kTfLiteUInt32 = 16,
+ }
+}
diff --git a/src/TensorFlowNET.Core/Lite/TfLiteQuantizationParams.cs b/src/TensorFlowNET.Core/Lite/TfLiteQuantizationParams.cs
new file mode 100644
index 00000000..e564392c
--- /dev/null
+++ b/src/TensorFlowNET.Core/Lite/TfLiteQuantizationParams.cs
@@ -0,0 +1,12 @@
+using System;
+using System.Collections.Generic;
+using System.Text;
+
+namespace Tensorflow.Lite
+{
+ public struct TfLiteQuantizationParams
+ {
+ public float scale;
+ public int zero_point;
+ }
+}
diff --git a/src/TensorFlowNET.Core/Lite/TfLiteStatus.cs b/src/TensorFlowNET.Core/Lite/TfLiteStatus.cs
new file mode 100644
index 00000000..06612125
--- /dev/null
+++ b/src/TensorFlowNET.Core/Lite/TfLiteStatus.cs
@@ -0,0 +1,31 @@
+using System;
+using System.Collections.Generic;
+using System.Text;
+
+namespace Tensorflow.Lite
+{
+ public enum TfLiteStatus
+ {
+ kTfLiteOk = 0,
+
+ // Generally referring to an error in the runtime (i.e. interpreter)
+ kTfLiteError = 1,
+
+ // Generally referring to an error from a TfLiteDelegate itself.
+ kTfLiteDelegateError = 2,
+
+ // Generally referring to an error in applying a delegate due to
+ // incompatibility between runtime and delegate, e.g., this error is returned
+ // when trying to apply a TfLite delegate onto a model graph that's already
+ // immutable.
+ kTfLiteApplicationError = 3,
+
+ // Generally referring to serialized delegate data not being found.
+ // See tflite::delegates::Serialization.
+ kTfLiteDelegateDataNotFound = 4,
+
+ // Generally referring to data-writing issues in delegate serialization.
+ // See tflite::delegates::Serialization.
+ kTfLiteDelegateDataWriteError = 5,
+ }
+}
diff --git a/src/TensorFlowNET.Core/Lite/TfLiteTensor.cs b/src/TensorFlowNET.Core/Lite/TfLiteTensor.cs
new file mode 100644
index 00000000..5a43f58f
--- /dev/null
+++ b/src/TensorFlowNET.Core/Lite/TfLiteTensor.cs
@@ -0,0 +1,21 @@
+using System;
+
+namespace Tensorflow.Lite
+{
+ public struct TfLiteTensor
+ {
+ IntPtr _handle;
+
+ public TfLiteTensor(IntPtr handle)
+ => _handle = handle;
+
+ public static implicit operator TfLiteTensor(IntPtr handle)
+ => new TfLiteTensor(handle);
+
+ public static implicit operator IntPtr(TfLiteTensor tensor)
+ => tensor._handle;
+
+ public override string ToString()
+ => $"TfLiteTensor 0x{_handle.ToString("x16")}";
+ }
+}
diff --git a/src/TensorFlowNET.Core/Tensorflow.Binding.csproj b/src/TensorFlowNET.Core/Tensorflow.Binding.csproj
index a1b466cc..c0a71239 100644
--- a/src/TensorFlowNET.Core/Tensorflow.Binding.csproj
+++ b/src/TensorFlowNET.Core/Tensorflow.Binding.csproj
@@ -5,7 +5,7 @@
TensorFlow.NET
Tensorflow
2.2.0
- 0.60.3
+ 0.60.4
9.0
enable
Haiping Chen, Meinrad Recheis, Eli Belash
@@ -20,7 +20,7 @@
Google's TensorFlow full binding in .NET Standard.
Building, training and infering deep learning models.
https://tensorflownet.readthedocs.io
- 0.60.3.0
+ 0.60.4.0
tf.net 0.60.x and above are based on tensorflow native 2.6.0
* Eager Mode is added finally.
@@ -35,7 +35,7 @@ Keras API is a separate package released as TensorFlow.Keras.
tf.net 0.4x.x aligns with TensorFlow v2.4.1 native library.
tf.net 0.5x.x aligns with TensorFlow v2.5.x native library.
tf.net 0.6x.x aligns with TensorFlow v2.6.x native library.
- 0.60.3.0
+ 0.60.4.0
LICENSE
true
true
diff --git a/src/TensorFlowNET.Core/Tensors/dtypes.cs b/src/TensorFlowNET.Core/Tensors/dtypes.cs
index 7c3a291c..372ac676 100644
--- a/src/TensorFlowNET.Core/Tensors/dtypes.cs
+++ b/src/TensorFlowNET.Core/Tensors/dtypes.cs
@@ -189,7 +189,10 @@ namespace Tensorflow
{
TF_DataType.TF_STRING => "string",
TF_DataType.TF_UINT8 => "uint8",
+ TF_DataType.TF_INT8 => "int8",
+ TF_DataType.TF_UINT32 => "uint32",
TF_DataType.TF_INT32 => "int32",
+ TF_DataType.TF_UINT64 => "uint64",
TF_DataType.TF_INT64 => "int64",
TF_DataType.TF_FLOAT => "float32",
TF_DataType.TF_DOUBLE => "float64",
@@ -204,9 +207,12 @@ namespace Tensorflow
{
TF_DataType.TF_BOOL => sizeof(bool),
TF_DataType.TF_UINT8 => sizeof(byte),
- TF_DataType.TF_INT8 => sizeof(byte),
+ TF_DataType.TF_INT8 => sizeof(sbyte),
+ TF_DataType.TF_UINT16 => sizeof(ushort),
TF_DataType.TF_INT16 => sizeof(short),
+ TF_DataType.TF_UINT32 => sizeof(uint),
TF_DataType.TF_INT32 => sizeof(int),
+ TF_DataType.TF_UINT64 => sizeof(ulong),
TF_DataType.TF_INT64 => sizeof(long),
TF_DataType.TF_FLOAT => sizeof(float),
TF_DataType.TF_DOUBLE => sizeof(double),
diff --git a/src/TensorFlowNet.Benchmarks/Leak/SavedModelCleanup.cs b/src/TensorFlowNet.Benchmarks/Leak/SavedModelCleanup.cs
index 3ed37316..09e20058 100644
--- a/src/TensorFlowNet.Benchmarks/Leak/SavedModelCleanup.cs
+++ b/src/TensorFlowNet.Benchmarks/Leak/SavedModelCleanup.cs
@@ -11,7 +11,9 @@ using static Tensorflow.Binding;
namespace Tensorflow.Benchmark.Leak
{
-
+ ///
+ /// https://github.com/SciSharp/TensorFlow.NET/issues/418
+ ///
public class SavedModelCleanup
{
[Benchmark]
diff --git a/src/TensorFlowNet.Benchmarks/Tensorflow.Benchmark.csproj b/src/TensorFlowNet.Benchmarks/Tensorflow.Benchmark.csproj
index ceba6cbb..51d972ad 100644
--- a/src/TensorFlowNet.Benchmarks/Tensorflow.Benchmark.csproj
+++ b/src/TensorFlowNet.Benchmarks/Tensorflow.Benchmark.csproj
@@ -36,7 +36,7 @@
-
+
diff --git a/test/TensorFlowNET.Graph.UnitTest/TensorFlowNET.Graph.UnitTest.csproj b/test/TensorFlowNET.Graph.UnitTest/TensorFlowNET.Graph.UnitTest.csproj
index db6a71b1..eb5f9099 100644
--- a/test/TensorFlowNET.Graph.UnitTest/TensorFlowNET.Graph.UnitTest.csproj
+++ b/test/TensorFlowNET.Graph.UnitTest/TensorFlowNET.Graph.UnitTest.csproj
@@ -24,14 +24,14 @@
-
-
-
-
+
+
+
+
all
runtime; build; native; contentfiles; analyzers; buildtransitive
-
+
diff --git a/test/TensorFlowNET.Keras.UnitTest/Tensorflow.Keras.UnitTest.csproj b/test/TensorFlowNET.Keras.UnitTest/Tensorflow.Keras.UnitTest.csproj
index cfb48dc6..04e7a5e7 100644
--- a/test/TensorFlowNET.Keras.UnitTest/Tensorflow.Keras.UnitTest.csproj
+++ b/test/TensorFlowNET.Keras.UnitTest/Tensorflow.Keras.UnitTest.csproj
@@ -14,14 +14,14 @@
-
-
-
-
+
+
+
+
all
runtime; build; native; contentfiles; analyzers; buildtransitive
-
+
diff --git a/test/TensorFlowNET.Native.UnitTest/Lite/TfLiteTest.cs b/test/TensorFlowNET.Native.UnitTest/Lite/TfLiteTest.cs
new file mode 100644
index 00000000..e1665557
--- /dev/null
+++ b/test/TensorFlowNET.Native.UnitTest/Lite/TfLiteTest.cs
@@ -0,0 +1,138 @@
+using Microsoft.VisualStudio.TestTools.UnitTesting;
+using System;
+using System.Collections.Generic;
+using System.Linq;
+using System.Runtime.InteropServices;
+using System.Text;
+using System.Threading.Tasks;
+using Tensorflow.Lite;
+
+namespace Tensorflow.Native.UnitTest
+{
+ [TestClass]
+ public class TfLiteTest
+ {
+ [TestMethod]
+ public void TfLiteVersion()
+ {
+ var ver = c_api_lite.StringPiece(c_api_lite.TfLiteVersion());
+ Assert.IsNotNull(ver);
+ }
+
+ [TestMethod]
+ public unsafe void SmokeTest()
+ {
+ var model = c_api_lite.TfLiteModelCreateFromFile("Lite/testdata/add.bin");
+ var options = c_api_lite.TfLiteInterpreterOptionsCreate();
+ c_api_lite.TfLiteInterpreterOptionsSetNumThreads(options, 2);
+
+ var interpreter = c_api_lite.TfLiteInterpreterCreate(model, options);
+
+ c_api_lite.TfLiteInterpreterOptionsDelete(options.DangerousGetHandle());
+ c_api_lite.TfLiteModelDelete(model.DangerousGetHandle());
+
+ Assert.AreEqual(TfLiteStatus.kTfLiteOk, c_api_lite.TfLiteInterpreterAllocateTensors(interpreter));
+ Assert.AreEqual(1, c_api_lite.TfLiteInterpreterGetInputTensorCount(interpreter));
+ Assert.AreEqual(1, c_api_lite.TfLiteInterpreterGetOutputTensorCount(interpreter));
+
+ var input_dims = new int[] { 2 };
+ Assert.AreEqual(TfLiteStatus.kTfLiteOk, c_api_lite.TfLiteInterpreterResizeInputTensor(interpreter, 0, input_dims, input_dims.Length));
+ Assert.AreEqual(TfLiteStatus.kTfLiteOk, c_api_lite.TfLiteInterpreterAllocateTensors(interpreter));
+
+ var input_tensor = c_api_lite.TfLiteInterpreterGetInputTensor(interpreter, 0);
+ Assert.AreEqual(TfLiteDataType.kTfLiteFloat32, c_api_lite.TfLiteTensorType(input_tensor));
+ Assert.AreEqual(1, c_api_lite.TfLiteTensorNumDims(input_tensor));
+ Assert.AreEqual(2, c_api_lite.TfLiteTensorDim(input_tensor, 0));
+ Assert.AreEqual(sizeof(float) * 2, c_api_lite.TfLiteTensorByteSize(input_tensor));
+ Assert.IsNotNull(c_api_lite.TfLiteTensorData(input_tensor));
+ Assert.AreEqual("input", c_api_lite.StringPiece(c_api_lite.TfLiteTensorName(input_tensor)));
+
+ var input_params = c_api_lite.TfLiteTensorQuantizationParams(input_tensor);
+ Assert.AreEqual(0f, input_params.scale);
+ Assert.AreEqual(0, input_params.zero_point);
+
+ var input = new[] { 1f, 3f };
+ fixed (float* addr = &input[0])
+ {
+ Assert.AreEqual(TfLiteStatus.kTfLiteOk,
+ c_api_lite.TfLiteTensorCopyFromBuffer(input_tensor, new IntPtr(addr), 2 * sizeof(float)));
+ }
+
+ Assert.AreEqual(TfLiteStatus.kTfLiteOk, c_api_lite.TfLiteInterpreterInvoke(interpreter));
+
+ var output_tensor = c_api_lite.TfLiteInterpreterGetOutputTensor(interpreter, 0);
+ Assert.AreEqual(TfLiteDataType.kTfLiteFloat32, c_api_lite.TfLiteTensorType(output_tensor));
+ Assert.AreEqual(1, c_api_lite.TfLiteTensorNumDims(output_tensor));
+ Assert.AreEqual(2, c_api_lite.TfLiteTensorDim(output_tensor, 0));
+ Assert.AreEqual(sizeof(float) * 2, c_api_lite.TfLiteTensorByteSize(output_tensor));
+ Assert.IsNotNull(c_api_lite.TfLiteTensorData(output_tensor));
+ Assert.AreEqual("output", c_api_lite.StringPiece(c_api_lite.TfLiteTensorName(output_tensor)));
+
+ var output_params = c_api_lite.TfLiteTensorQuantizationParams(output_tensor);
+ Assert.AreEqual(0f, output_params.scale);
+ Assert.AreEqual(0, output_params.zero_point);
+
+ var output = new float[2];
+ fixed (float* addr = &output[0])
+ {
+ Assert.AreEqual(TfLiteStatus.kTfLiteOk,
+ c_api_lite.TfLiteTensorCopyToBuffer(output_tensor, new IntPtr(addr), 2 * sizeof(float)));
+ }
+ Assert.AreEqual(3f, output[0]);
+ Assert.AreEqual(9f, output[1]);
+
+ c_api_lite.TfLiteInterpreterDelete(interpreter.DangerousGetHandle());
+ }
+
+ [TestMethod]
+ public unsafe void QuantizationParamsTest()
+ {
+ var model = c_api_lite.TfLiteModelCreateFromFile("Lite/testdata/add_quantized.bin");
+ var interpreter = c_api_lite.TfLiteInterpreterCreate(model, new SafeTfLiteInterpreterOptionsHandle(IntPtr.Zero));
+ c_api_lite.TfLiteModelDelete(model.DangerousGetHandle());
+ var input_dims = new[] { 2 };
+ Assert.AreEqual(TfLiteStatus.kTfLiteOk, c_api_lite.TfLiteInterpreterResizeInputTensor(interpreter, 0, input_dims, 1));
+ Assert.AreEqual(TfLiteStatus.kTfLiteOk, c_api_lite.TfLiteInterpreterAllocateTensors(interpreter));
+
+ var input_tensor = c_api_lite.TfLiteInterpreterGetInputTensor(interpreter, 0);
+ Assert.IsNotNull(input_tensor);
+
+ Assert.AreEqual(TfLiteDataType.kTfLiteUInt8, c_api_lite.TfLiteTensorType(input_tensor));
+ Assert.AreEqual(1, c_api_lite.TfLiteTensorNumDims(input_tensor));
+ Assert.AreEqual(2, c_api_lite.TfLiteTensorDim(input_tensor, 0));
+
+ var input_params = c_api_lite.TfLiteTensorQuantizationParams(input_tensor);
+ Assert.AreEqual((0.003922f, 0), (input_params.scale, input_params.zero_point));
+
+ var input = new byte[] { 1, 3 };
+ fixed (byte* addr = &input[0])
+ {
+ Assert.AreEqual(TfLiteStatus.kTfLiteOk,
+ c_api_lite.TfLiteTensorCopyFromBuffer(input_tensor, new IntPtr(addr), 2 * sizeof(byte)));
+ }
+ Assert.AreEqual(TfLiteStatus.kTfLiteOk, c_api_lite.TfLiteInterpreterInvoke(interpreter));
+
+ var output_tensor = c_api_lite.TfLiteInterpreterGetOutputTensor(interpreter, 0);
+ Assert.IsNotNull(output_tensor);
+
+ var output_params = c_api_lite.TfLiteTensorQuantizationParams(output_tensor);
+ Assert.AreEqual((0.003922f, 0), (output_params.scale, output_params.zero_point));
+
+ var output = new byte[2];
+ fixed (byte* addr = &output[0])
+ {
+ Assert.AreEqual(TfLiteStatus.kTfLiteOk,
+ c_api_lite.TfLiteTensorCopyToBuffer(output_tensor, new IntPtr(addr), 2 * sizeof(byte)));
+ }
+ Assert.AreEqual(3f, output[0]);
+ Assert.AreEqual(9f, output[1]);
+
+ var dequantizedOutput0 = output_params.scale * (output[0] - output_params.zero_point);
+ var dequantizedOutput1 = output_params.scale * (output[1] - output_params.zero_point);
+ Assert.AreEqual(dequantizedOutput0, 0.011766f);
+ Assert.AreEqual(dequantizedOutput1, 0.035298f);
+
+ c_api_lite.TfLiteInterpreterDelete(interpreter.DangerousGetHandle());
+ }
+ }
+}
diff --git a/test/TensorFlowNET.Native.UnitTest/Lite/testdata/add.bin b/test/TensorFlowNET.Native.UnitTest/Lite/testdata/add.bin
new file mode 100644
index 00000000..b4c02350
Binary files /dev/null and b/test/TensorFlowNET.Native.UnitTest/Lite/testdata/add.bin differ
diff --git a/test/TensorFlowNET.Native.UnitTest/Lite/testdata/add_quantized.bin b/test/TensorFlowNET.Native.UnitTest/Lite/testdata/add_quantized.bin
new file mode 100644
index 00000000..07d48b93
Binary files /dev/null and b/test/TensorFlowNET.Native.UnitTest/Lite/testdata/add_quantized.bin differ
diff --git a/test/TensorFlowNET.Native.UnitTest/Tensorflow.Native.UnitTest.csproj b/test/TensorFlowNET.Native.UnitTest/Tensorflow.Native.UnitTest.csproj
index 39678f9f..78189777 100644
--- a/test/TensorFlowNET.Native.UnitTest/Tensorflow.Native.UnitTest.csproj
+++ b/test/TensorFlowNET.Native.UnitTest/Tensorflow.Native.UnitTest.csproj
@@ -24,16 +24,31 @@
true
+
+
+
+
+
+
+
+ PreserveNewest
+
+
+ PreserveNewest
+
+
+
-
-
-
-
+
+
+
+
all
runtime; build; native; contentfiles; analyzers; buildtransitive
-
+
+
diff --git a/test/TensorFlowNET.UnitTest/Tensorflow.Binding.UnitTest.csproj b/test/TensorFlowNET.UnitTest/Tensorflow.Binding.UnitTest.csproj
index 4c989e05..01a0bfea 100644
--- a/test/TensorFlowNET.UnitTest/Tensorflow.Binding.UnitTest.csproj
+++ b/test/TensorFlowNET.UnitTest/Tensorflow.Binding.UnitTest.csproj
@@ -48,10 +48,10 @@
-
-
-
-
+
+
+
+