Compare commits

...

13 Commits

Author SHA1 Message Date
  Oceania2018 954bbe597a disable TF_UpdateEdge. 5 years ago
  Oceania2018 a93636b158 Add MethodBoundaryAspect.Fody. 5 years ago
  Oceania2018 4726a7dbd4 Fix get operation by pointer. 5 years ago
  Oceania2018 2431f840dd fix image resize. 5 years ago
  Oceania2018 050fd2c63e Add ConcreteFunction. 5 years ago
  Oceania2018 6c72af1503 Add ConcreteFunction to support dataset map. 5 years ago
  Oceania2018 e7da957bcb add FuncGraph. 5 years ago
  Oceania2018 8579575706 add function test. 5 years ago
  Oceania2018 c7642ab08e update docs of constant. 5 years ago
  Oceania2018 293f202589 update document. 5 years ago
  Oceania2018 375fe28f57 Add TF_UpdateEdge APIs. 5 years ago
  Oceania2018 f226ad704f add overload for Layer call function, be able to input array and return array. 5 years ago
  Oceania2018 c6c1125f54 Chagne return type to Tensor for assign_add. 5 years ago
79 changed files with 1502 additions and 291 deletions
Unified View
  1. +45
    -9
      docs/source/Constant.md
  2. +2
    -1
      docs/source/EagerMode.md
  3. +1
    -1
      docs/source/Graph.md
  4. +14
    -21
      docs/source/HelloWorld.md
  5. +10
    -9
      docs/source/Tensor.md
  6. +41
    -0
      docs/source/_static/constant/n-index-formula-offset.svg
  7. +33
    -0
      docs/source/_static/constant/n-index-formula.svg
  8. BIN
      docs/source/_static/contiguous-block-of-memory-ndarray-example-1.png
  9. BIN
      docs/source/_static/contiguous-block-of-memory.png
  10. BIN
      docs/source/_static/tensor-constant-ndarray.png
  11. BIN
      docs/source/_static/tensor-naming.png
  12. +17
    -0
      src/TensorFlowNET.Console/MemoryTestingCases.cs
  13. +3
    -0
      src/TensorFlowNET.Console/Program.cs
  14. +1
    -1
      src/TensorFlowNET.Console/TensorFlowNET.Console.csproj
  15. +4
    -0
      src/TensorFlowNET.Core/APIs/tf.array.cs
  16. +26
    -0
      src/TensorFlowNET.Core/APIs/tf.autograph.cs
  17. +29
    -15
      src/TensorFlowNET.Core/APIs/tf.control_flow.cs
  18. +3
    -0
      src/TensorFlowNET.Core/APIs/tf.image.cs
  19. +1
    -6
      src/TensorFlowNET.Core/APIs/tf.nn.cs
  20. +3
    -0
      src/TensorFlowNET.Core/APIs/tf.train.cs
  21. +3
    -0
      src/TensorFlowNET.Core/Data/DatasetManager.cs
  22. +1
    -1
      src/TensorFlowNET.Core/Data/DatasetV2.cs
  23. +6
    -4
      src/TensorFlowNET.Core/Data/MapDataset.cs
  24. +10
    -0
      src/TensorFlowNET.Core/Data/TensorSliceDataset.cs
  25. +5
    -1
      src/TensorFlowNET.Core/Eager/EagerRunner.TFE_FastPathExecute.cs
  26. +1
    -1
      src/TensorFlowNET.Core/Eager/EagerTensor.Creation.cs
  27. +6
    -0
      src/TensorFlowNET.Core/Eager/EagerTensor.cs
  28. +31
    -0
      src/TensorFlowNET.Core/Eager/c_api.eager.cs
  29. +58
    -0
      src/TensorFlowNET.Core/Functions/ConcreteFunction.cs
  30. +4
    -1
      src/TensorFlowNET.Core/Functions/c_api.function.cs
  31. +1
    -0
      src/TensorFlowNET.Core/Gradients/gradients_util.cs
  32. +45
    -0
      src/TensorFlowNET.Core/Graphs/AutoGraph.cs
  33. +80
    -0
      src/TensorFlowNET.Core/Graphs/AutoGraphAttribute.cs
  34. +61
    -0
      src/TensorFlowNET.Core/Graphs/FuncGraph.cs
  35. +1
    -1
      src/TensorFlowNET.Core/Graphs/c_api.graph.cs
  36. +1
    -1
      src/TensorFlowNET.Core/Keras/Engine/Flatten.cs
  37. +37
    -3
      src/TensorFlowNET.Core/Keras/Engine/Layer.cs
  38. +1
    -1
      src/TensorFlowNET.Core/Keras/Layers/BatchNormalization.cs
  39. +1
    -1
      src/TensorFlowNET.Core/Keras/Layers/Conv.cs
  40. +1
    -1
      src/TensorFlowNET.Core/Keras/Layers/Dense.cs
  41. +1
    -1
      src/TensorFlowNET.Core/Keras/Layers/Dropout.cs
  42. +1
    -1
      src/TensorFlowNET.Core/Keras/Layers/Embedding.cs
  43. +2
    -2
      src/TensorFlowNET.Core/Keras/Layers/LSTM.cs
  44. +1
    -1
      src/TensorFlowNET.Core/Keras/Layers/Pooling2D.cs
  45. +1
    -1
      src/TensorFlowNET.Core/Keras/Layers/Rescaling.cs
  46. +6
    -3
      src/TensorFlowNET.Core/Keras/Preprocessings/Preprocessing.paths_and_labels_to_dataset.cs
  47. +43
    -6
      src/TensorFlowNET.Core/Layers/Layer.cs
  48. +1
    -1
      src/TensorFlowNET.Core/Operations/ControlFlows/WhileContext.cs
  49. +2
    -2
      src/TensorFlowNET.Core/Operations/NnOps/BasicLSTMCell.cs
  50. +3
    -3
      src/TensorFlowNET.Core/Operations/NnOps/BasicRNNCell.cs
  51. +1
    -1
      src/TensorFlowNET.Core/Operations/NnOps/rnn.cs
  52. +1
    -1
      src/TensorFlowNET.Core/Operations/Operation.Instance.cs
  53. +3
    -1
      src/TensorFlowNET.Core/Operations/Operation.cs
  54. +25
    -2
      src/TensorFlowNET.Core/Operations/control_flow_ops.cs
  55. +3
    -2
      src/TensorFlowNET.Core/Operations/dataset_ops.cs
  56. +44
    -25
      src/TensorFlowNET.Core/Operations/image_ops_impl.cs
  57. +13
    -5
      src/TensorFlowNET.Core/Tensorflow.Binding.csproj
  58. +2
    -0
      src/TensorFlowNET.Core/Tensors/Tensor.Creation.cs
  59. +1
    -1
      src/TensorFlowNET.Core/Tensors/Tensor.Explicit.cs
  60. +1
    -2
      src/TensorFlowNET.Core/Tensors/Tensor.cs
  61. +12
    -3
      src/TensorFlowNET.Core/Tensors/tensor_util.cs
  62. +1
    -1
      src/TensorFlowNET.Core/Training/Optimizer.cs
  63. +2
    -2
      src/TensorFlowNET.Core/Variables/BaseResourceVariable.cs
  64. +2
    -2
      src/TensorFlowNET.Core/Variables/IVariableV1.cs
  65. +2
    -2
      src/TensorFlowNET.Core/Variables/RefVariable.cs
  66. +67
    -64
      src/TensorFlowNET.Core/Variables/ResourceVariable.cs
  67. +1
    -1
      src/TensorFlowNET.Core/Variables/state_ops.cs
  68. +2
    -0
      src/TensorFlowNET.Core/ops.cs
  69. +1
    -0
      src/TensorFlowNet.Benchmarks/Tensorflow.Benchmark.csproj
  70. +4
    -4
      test/TensorFlowNET.UnitTest/Dataset/DatasetTest.cs
  71. +1
    -2
      test/TensorFlowNET.UnitTest/EagerModeTestBase.cs
  72. +70
    -3
      test/TensorFlowNET.UnitTest/ImageTest.cs
  73. +54
    -0
      test/TensorFlowNET.UnitTest/ManagedAPI/ControlFlowApiTest.cs
  74. +70
    -0
      test/TensorFlowNET.UnitTest/ManagedAPI/FunctionApiTest.cs
  75. +456
    -9
      test/TensorFlowNET.UnitTest/NativeAPI/CApiFunctionTest.cs
  76. +2
    -1
      test/TensorFlowNET.UnitTest/NativeAPI/CSession.cs
  77. +7
    -0
      test/TensorFlowNET.UnitTest/NativeAPI/c_test_util.cs
  78. +6
    -1
      test/TensorFlowNET.UnitTest/Tensorflow.UnitTest.csproj
  79. +0
    -56
      test/TensorFlowNET.UnitTest/img_test/TestCrop.cs

+ 45
- 9
docs/source/Constant.md View File

@@ -1,6 +1,6 @@
# Chapter. Constant
# Chapter 2. Constant


In TensorFlow, a constant is a special Tensor that cannot be modified while the graph is running. Like in a linear model $\tilde{y_i}=\boldsymbol{w}x_i+b$, constant $b$ can be represented as a Constant Tensor. Since the constant is a Tensor, it also has all the data characteristics of Tensor, including:
In TensorFlow, a constant is a special Tensor that cannot be modified while the graph is running. Like in a linear model `y = ax + b`, constant `b` can be represented as a `Constant` Tensor. Since the constant is a Tensor, it also has all the data characteristics of Tensor, including:


* value: scalar value or constant list matching the data type defined in TensorFlow; * value: scalar value or constant list matching the data type defined in TensorFlow;
* dtype: data type; * dtype: data type;
@@ -9,9 +9,9 @@ In TensorFlow, a constant is a special Tensor that cannot be modified while the






##### How to create a Constant
### How to create a Constant


TensorFlow provides a handy function to create a Constant. In TF.NET, you can use the same function name `tf.constant` to create it. TF.NET takes the same name as python binding to the API. Naming, although this will make developers who are used to C# naming habits feel uncomfortable, but after careful consideration, I decided to give up the C# convention naming method.
TensorFlow provides a handy function to create a Constant. In TF.NET, you can use the same function name `tf.constant` to create it. TF.NET takes the same name as python binding for the API. Naming, although this will make developers who are used to C# naming convention feel uncomfortable, but after careful consideration, I decided to give up the C# convention naming method. One of reason is for model developer, they don't have to learn a totally new different APIs.


Initialize a scalar constant: Initialize a scalar constant:


@@ -24,21 +24,57 @@ var c4 = tf.constant("Big Tree"); // string


Initialize a constant through ndarray: Initialize a constant through ndarray:


TF.NET works very well with `NumSharp`'s `NDArray`. You can create a tensor from .NET primitive data type and NDArray as well. An `ndarray` is a (usually fixed-size) multidimensional container of items of the same type and size. The number of dimensions and items in an array is defined by its `shape`, which is a tuple of N non-negative integers that specify the sizes of each dimension.

```csharp ```csharp
// dtype=int, shape=(2, 3) // dtype=int, shape=(2, 3)
var nd = np.array(new int[][]
var nd = np.array(new int[,]
{ {
new int[]{3, 1, 1},
new int[]{2, 3, 1}
{1, 2, 3},
{4, 5, 6}
}); });
var tensor = tf.constant(nd); var tensor = tf.constant(nd);
``` ```


##### Dive in Constant
### Dive in Constant

Now let's explore how `constant` works in `eager` mode inside the black box.

Let's continue using the last examples, we're going to initialize a tensor in an ndarray of `[shape(2, 3), int32]`.

##### NDArray

The first thing we need to know is about `ndarray`'s memory model. The ndarray memory model is a very important data structure, and almost all underlying computation are inseparable from this datb a structure. One fundamental aspect of the ndarray is that an array is seen as a "chunk" of memory starting at some location. The interpretation of this memory depends on the stride information. A segment of memory is inherently 1-dimensional, and there are many different schemes for arranging the items of an N-dimensional array in a 1-dimensional block. `ndarray` objects can accommodate any strided indexing scheme. In a strided scheme, the N-dimensional index <img src="_static\constant\n-index-formula.svg"/> corresponds to the offset (in bytes) : <img src="_static\constant\n-index-formula-offset.svg" />.

<img src="_static\contiguous-block-of-memory.png" />

If we take a look at the real memory allocation in Visual Studio, below diagram helps us understand the data structure more intuitively. The strides keep track the size of every single dimension, help identify the actual offset in heap memory. The formula to calculate offset is: `offset = i * strides[0] + j * strides[1]`.

For example: if you want to seek the value in `[1, 1]`, you just need to calculate `1 * 3 + 1 * 1 = 4`, converted to pointer is `0x000002556B194260 + 4 = 0x000002556B194264` where has a value `05`.

<img src="_static\contiguous-block-of-memory-ndarray-example-1.png"/>


Now let's explore how `constant` works.
Through the above diagram, we know how the data is stored in memory, and then we will look at how the data is transferred to `TensorFlow`.


##### Tensor

If you don't understand very well what `Tensor` is, you can go back to the chapter `Tensor` there is pretty much explanation if you skipped that chapter. Tensor is actually an NDArray that is with more than 2 dimensions.

TensorFlow will decide whether to copy the data or use the same pointer. Normally speaking, it's more safe whenever you copy data for the following process, especially in interoperating between .NET runtime and C++ runtime that they all have their own garbage collection (GC) mechanism, application will crash if someone access a block of destroyed memory. `TF_STRING` and `TF_RESOURCE` tensors have a different representation in `TF_Tensor` than they do in `tensorflow::Tensor`. Other types have the same representation, so copy only if it is safe to do so.

<img src="_static\tensor-constant-ndarray.png" />

Before tensorflow is creating the `TF_Tensor`, it checks the shape and data size. If the size doesn't match, it will return `nullptr` pointer.

##### Get the data of Tensor

For `eager` mode, it's pretty simple to view the actual value in a `tensor`.

```csharp
var data = tensor.numpy()
```


The `data` will be a `ndarray` variable.


##### Other functions to create a Constant ##### Other functions to create a Constant




+ 2
- 1
docs/source/EagerMode.md View File

@@ -1,2 +1,3 @@
# Chapter. Eager Mode
# Chapter 4. Eager Mode


TensorFlow's eager execution is an imperative programming environment that evaluates operations immediately, without building graphs: operations return concrete values instead of constructing a computational graph to run later. This makes it easy to get started with TensorFlow and debug models, and it reduces boilerplate as well.

+ 1
- 1
docs/source/Graph.md View File

@@ -1,4 +1,4 @@
# Chapter. Graph
# Chapter 3. Graph


TensorFlow uses a **dataflow graph** to represent your computation in terms of the dependencies between individual operations. A graph defines the computation. It doesn't compute anything, it doesn't hold any values, it just defines the operations that you specified in your code. TensorFlow uses a **dataflow graph** to represent your computation in terms of the dependencies between individual operations. A graph defines the computation. It doesn't compute anything, it doesn't hold any values, it just defines the operations that you specified in your code.




+ 14
- 21
docs/source/HelloWorld.md View File

@@ -10,7 +10,7 @@ Let's run a classic HelloWorld program first and see if TensorFlow is running on


### Install the TensorFlow.NET SDK ### Install the TensorFlow.NET SDK


TensorFlow.NET uses the .NET Standard 2.0 standard, so your new project Target Framework can be .NET Framework or .NET Core. All the examples in this book are using .NET Core 2.2 and Microsoft Visual Studio Community 2017. To start building TensorFlow program you just need to download and install the .NET SDK (Software Development Kit). You have to download the latest .NET Core SDK from offical website: https://dotnet.microsoft.com/download.
TensorFlow.NET uses the .NET Standard 2.0 standard, so your new project Target Framework can be .NET Framework or .NET Core/ .NET 5. All the examples in this book are using .NET Core 3.1 and Microsoft Visual Studio Community 2019. To start building TensorFlow program you just need to download and install the .NET SDK (Software Development Kit). You have to download the latest .NET Core SDK from offical website: https://dotnet.microsoft.com/download.






@@ -38,9 +38,9 @@ PM> Install-Package SciSharp.TensorFlow.Redist-Windows-GPU


### Start coding Hello World ### Start coding Hello World


After installing the TensorFlow.NET package, you can use the `using Tensorflow` to introduce the TensorFlow library.

After installing the TensorFlow.NET package, you can use the `using static Tensorflow.Binding` to introduce the TensorFlow .NET library.


TensorFlow 2.x enabled `Eager Mode` by default. About what eager mode is, I will introduce it in detail in the following chapters.


```csharp ```csharp
using System; using System;
@@ -51,33 +51,26 @@ namespace TensorFlowNET.Examples
/// <summary> /// <summary>
/// Simple hello world using TensorFlow /// Simple hello world using TensorFlow
/// </summary> /// </summary>
public class HelloWorld : IExample
class Program
{ {
public void Run()
static void Main(string[] args)
{ {
/* Create a Constant op
The op is added as a node to the default graph.
The value returned by the constructor represents the output
of the Constant op. */
var hello = tf.constant("Hello, TensorFlow!"); var hello = tf.constant("Hello, TensorFlow!");

// Start tf session
using (var sess = tf.Session())
{
// Run the op
var result = sess.run(hello);
Console.WriteLine(result);
}
Console.WriteLine(hello);
} }
} }
} }
``` ```
After CTRL + F5 run, you will get the output. After CTRL + F5 run, you will get the output.
```cmd ```cmd
2019-01-05 10:53:42.145931: I tensorflow/core/platform/cpu_feature_guard.cc:141] Your CPU supports instructions that this TensorFlow binary was not compiled to use: AVX2
Hello, TensorFlow!
Press any key to continue . . .
9/20/2020 2:15:09 AM Starting Hello World
tf.Tensor: shape=(), dtype=string, numpy=Hello, TensorFlow.NET!
9/20/2020 2:15:09 AM Completed Hello World
Example: Hello World in 0.1273463s is OK!
TensorFlow.NET v0.20.1.0
TensorFlow Binary v2.3.0
1 of 21 example(s) are completed.
Press [Enter] to continue...
``` ```


This sample code can be found at [here](https://github.com/SciSharp/SciSharp-Stack-Examples/blob/master/src/TensorFlowNET.Examples/HelloWorld.cs). This sample code can be found at [here](https://github.com/SciSharp/SciSharp-Stack-Examples/blob/master/src/TensorFlowNET.Examples/HelloWorld.cs).


+ 10
- 9
docs/source/Tensor.md View File

@@ -1,4 +1,4 @@
# Chapter. Tensor
# Chapter 1. Tensor


### Represents one of the outputs of an Operation ### Represents one of the outputs of an Operation


@@ -6,13 +6,13 @@


##### What is Tensor? ##### What is Tensor?


Tensor holds a multi-dimensional array of elements of a single data type which is very similar with numpy's ndarray. When the dimension is zero, it can be called a scalar. When the dimension is 2, it can be called a matrix. When the dimension is greater than 2, it is usually called a tensor. If you are very familiar with numpy, then understanding Tensor will be quite easy.

Tensor holds a multi-dimensional array of elements of a single data type which is very similar with `NumPy`'s `ndarray`. When the dimension is zero, it can be called a scalar. When the dimension is 2, it can be called a matrix. When the dimension is greater than 2, it is usually called a tensor. If you are very familiar with `NumPy`, then understanding Tensor will be quite easy.


<img src="_static\tensor-naming.png">


##### How to create a Tensor? ##### How to create a Tensor?


There are many ways to initialize a Tensor object in TF.NET. It can be initialized from a scalar, string, matrix or tensor.
There are many ways to initialize a Tensor object in TF.NET. It can be initialized from a scalar, string, matrix or tensor. But the best way to create a Tensor is using high level APIs like `tf.constant`, `tf.zeros` and `tf.ones`. We'll talk about constant more detail in next chapter.


```csharp ```csharp
// Create a tensor holds a scalar value // Create a tensor holds a scalar value
@@ -32,13 +32,9 @@ Console.WriteLine($"t1: {t1}, t2: {t2}, t3: {t3}");


##### Data Structure of Tensor ##### Data Structure of Tensor






TF uses column major order. If we use NumSharp to generate a 2 x 3 matrix, if we access the data from 0 to 5 in order, we won't get a number of 1-6, but we get the order of 1, 4, 2, 5, 3, 6. a set of numbers. TF uses column major order. If we use NumSharp to generate a 2 x 3 matrix, if we access the data from 0 to 5 in order, we won't get a number of 1-6, but we get the order of 1, 4, 2, 5, 3, 6. a set of numbers.


```cs
```csharp
// Generate a matrix:[[1, 2, 3], [4, 5, 6]] // Generate a matrix:[[1, 2, 3], [4, 5, 6]]
var nd = np.array(1f, 2f, 3f, 4f, 5f, 6f).reshape(2, 3); var nd = np.array(1f, 2f, 3f, 4f, 5f, 6f).reshape(2, 3);
// The index will be 0 2 4 1 3 5, it's column-major order. // The index will be 0 2 4 1 3 5, it's column-major order.
@@ -49,3 +45,8 @@ var nd = np.array(1f, 2f, 3f, 4f, 5f, 6f).reshape(2, 3);
![column-major order](_static/column-major-order.png) ![column-major order](_static/column-major-order.png)


![row-major order](_static/row-major-order.png) ![row-major order](_static/row-major-order.png)

##### Index/ Slice of Tensor

Tensor element can be accessed by `index` and `slice` related operations. Through some high level APIs, we can easily access specific dimension's data.


+ 41
- 0
docs/source/_static/constant/n-index-formula-offset.svg View File

@@ -0,0 +1,41 @@
<?xml version='1.0' encoding='UTF-8'?>
<!-- This file was generated by dvisvgm 2.9.1 -->
<svg version='1.1' xmlns='http://www.w3.org/2000/svg' xmlns:xlink='http://www.w3.org/1999/xlink' width='85.429852pt' height='34.493011pt' viewBox='190.161786 79.900371 85.429852 34.493011'>
<defs>
<path id='g2-78' d='M6.312329-4.574844C6.40797-4.96538 6.583313-5.156663 7.157161-5.180573C7.236862-5.180573 7.300623-5.228394 7.300623-5.332005C7.300623-5.379826 7.260772-5.443587 7.181071-5.443587C7.12528-5.443587 6.973848-5.419676 6.38406-5.419676C5.746451-5.419676 5.642839-5.443587 5.571108-5.443587C5.443587-5.443587 5.419676-5.355915 5.419676-5.292154C5.419676-5.188543 5.523288-5.180573 5.595019-5.180573C6.081196-5.164633 6.081196-4.94944 6.081196-4.837858C6.081196-4.798007 6.081196-4.758157 6.049315-4.630635L5.172603-1.139726L3.251806-5.300125C3.188045-5.443587 3.172105-5.443587 2.980822-5.443587H1.944707C1.801245-5.443587 1.697634-5.443587 1.697634-5.292154C1.697634-5.180573 1.793275-5.180573 1.960648-5.180573C2.024408-5.180573 2.263512-5.180573 2.446824-5.132752L1.378829-.852802C1.283188-.454296 1.075965-.278954 .541968-.263014C.494147-.263014 .398506-.255044 .398506-.111582C.398506-.063761 .438356 0 .518057 0C.549938 0 .73325-.02391 1.307098-.02391C1.936737-.02391 2.056289 0 2.12802 0C2.1599 0 2.279452 0 2.279452-.151432C2.279452-.247073 2.191781-.263014 2.13599-.263014C1.849066-.270984 1.609963-.318804 1.609963-.597758C1.609963-.637609 1.633873-.749191 1.633873-.757161L2.677958-4.917559H2.685928L4.901619-.143462C4.95741-.01594 4.96538 0 5.053051 0C5.164633 0 5.172603-.03188 5.204483-.167372L6.312329-4.574844Z'/>
<path id='g2-107' d='M2.327273-5.292154C2.335243-5.308095 2.359153-5.411706 2.359153-5.419676C2.359153-5.459527 2.327273-5.531258 2.231631-5.531258C2.199751-5.531258 1.952677-5.507347 1.769365-5.491407L1.323039-5.459527C1.147696-5.443587 1.067995-5.435616 1.067995-5.292154C1.067995-5.180573 1.179577-5.180573 1.275218-5.180573C1.657783-5.180573 1.657783-5.132752 1.657783-5.061021C1.657783-5.037111 1.657783-5.021171 1.617933-4.877709L.486177-.342715C.454296-.223163 .454296-.175342 .454296-.167372C.454296-.03188 .565878 .079701 .71731 .079701C.988294 .079701 1.052055-.175342 1.083935-.286924C1.163636-.621669 1.370859-1.466501 1.458531-1.801245C1.896887-1.753425 2.430884-1.601993 2.430884-1.147696C2.430884-1.107846 2.430884-1.067995 2.414944-.988294C2.391034-.884682 2.375093-.773101 2.375093-.73325C2.375093-.263014 2.725778 .079701 3.188045 .079701C3.52279 .079701 3.730012-.167372 3.833624-.318804C4.024907-.613699 4.152428-1.091905 4.152428-1.139726C4.152428-1.219427 4.088667-1.243337 4.032877-1.243337C3.937235-1.243337 3.921295-1.195517 3.889415-1.052055C3.785803-.67746 3.57858-.143462 3.203985-.143462C2.996762-.143462 2.948941-.318804 2.948941-.533998C2.948941-.637609 2.956912-.73325 2.996762-.916563C3.004732-.948443 3.036613-1.075965 3.036613-1.163636C3.036613-1.817186 2.215691-1.960648 1.809215-2.016438C2.10411-2.191781 2.375093-2.462765 2.470735-2.566376C2.909091-2.996762 3.267746-3.291656 3.650311-3.291656C3.753923-3.291656 3.849564-3.267746 3.913325-3.188045C3.482939-3.132254 3.482939-2.757659 3.482939-2.749689C3.482939-2.574346 3.618431-2.454795 3.793773-2.454795C4.008966-2.454795 4.24807-2.630137 4.24807-2.956912C4.24807-3.227895 4.056787-3.514819 3.658281-3.514819C3.196015-3.514819 2.781569-3.164134 2.327273-2.709838C1.865006-2.255542 1.665753-2.16787 1.538232-2.11208L2.327273-5.292154Z'/>
<path id='g0-88' d='M15.135243 16.737235L16.581818 12.911582H16.282939C15.816687 14.154919 14.54944 14.96787 13.174595 15.326526C12.923537 15.386301 11.75193 15.697136 9.456538 15.697136H2.247572L8.332752 8.5599C8.416438 8.464259 8.440349 8.428394 8.440349 8.368618C8.440349 8.344707 8.440349 8.308842 8.356663 8.18929L2.785554 .573848H9.336986C10.938979 .573848 12.026899 .74122 12.134496 .765131C12.780075 .860772 13.820174 1.06401 14.764633 1.661768C15.063512 1.853051 15.876463 2.391034 16.282939 3.359402H16.581818L15.135243 0H1.004234C.729265 0 .71731 .011955 .681445 .083686C.669489 .119552 .669489 .3467 .669489 .478207L6.993773 9.133748L.800996 16.390535C.681445 16.533998 .681445 16.593773 .681445 16.605729C.681445 16.737235 .789041 16.737235 1.004234 16.737235H15.135243Z'/>
<path id='g1-0' d='M5.571108-1.809215C5.69863-1.809215 5.873973-1.809215 5.873973-1.992528S5.69863-2.175841 5.571108-2.175841H1.004234C.876712-2.175841 .70137-2.175841 .70137-1.992528S.876712-1.809215 1.004234-1.809215H5.571108Z'/>
<path id='g5-61' d='M8.069738-3.873474C8.237111-3.873474 8.452304-3.873474 8.452304-4.088667C8.452304-4.315816 8.249066-4.315816 8.069738-4.315816H1.028144C.860772-4.315816 .645579-4.315816 .645579-4.100623C.645579-3.873474 .848817-3.873474 1.028144-3.873474H8.069738ZM8.069738-1.649813C8.237111-1.649813 8.452304-1.649813 8.452304-1.865006C8.452304-2.092154 8.249066-2.092154 8.069738-2.092154H1.028144C.860772-2.092154 .645579-2.092154 .645579-1.876961C.645579-1.649813 .848817-1.649813 1.028144-1.649813H8.069738Z'/>
<path id='g4-11' d='M3.809714-3.172105H4.774097V-3.435118H3.785803V-4.327771C3.785803-5.045081 4.184309-5.387796 4.534994-5.387796C4.558904-5.387796 4.654545-5.387796 4.766127-5.347945C4.622665-5.276214 4.582814-5.140722 4.582814-5.037111C4.582814-4.829888 4.734247-4.678456 4.941469-4.678456C5.156663-4.678456 5.300125-4.829888 5.300125-5.037111C5.300125-5.379826 4.96538-5.610959 4.542964-5.610959C4.24807-5.610959 3.905355-5.507347 3.642341-5.276214C3.379328-5.587049 2.909091-5.610959 2.693898-5.610959C1.896887-5.610959 .884682-5.212453 .884682-4.311831V-3.435118H.231133V-3.172105H.884682V-.621669C.884682-.263014 .789041-.263014 .278954-.263014V0C.589788-.02391 1.036115-.02391 1.171606-.02391C1.331009-.02391 1.761395-.02391 2.072229 0V-.263014C1.562142-.263014 1.466501-.263014 1.466501-.621669V-3.172105H3.227895V-.621669C3.227895-.263014 3.132254-.263014 2.622167-.263014V0C2.933001-.02391 3.371357-.02391 3.56264-.02391C4.032877-.02391 4.048817-.02391 4.566874 0V-.263014H4.407472C3.825654-.263014 3.809714-.350685 3.809714-.637609V-3.172105ZM1.44259-3.435118V-4.303861C1.44259-5.076961 2.13599-5.387796 2.677958-5.387796C2.749689-5.387796 3.052553-5.387796 3.283686-5.260274C3.076463-5.180573 3.052553-5.00523 3.052553-4.925529C3.052553-4.861768 3.068493-4.686426 3.251806-4.606725C3.227895-4.511083 3.227895-4.367621 3.227895-4.327771V-3.435118H1.44259Z'/>
<path id='g4-48' d='M3.897385-2.542466C3.897385-3.395268 3.809714-3.913325 3.5467-4.423412C3.196015-5.124782 2.550436-5.300125 2.11208-5.300125C1.107846-5.300125 .74122-4.550934 .629639-4.327771C.342715-3.745953 .326775-2.956912 .326775-2.542466C.326775-2.016438 .350685-1.211457 .73325-.573848C1.099875 .01594 1.689664 .167372 2.11208 .167372C2.494645 .167372 3.180075 .047821 3.57858-.74122C3.873474-1.315068 3.897385-2.024408 3.897385-2.542466ZM2.11208-.055791C1.841096-.055791 1.291158-.183313 1.123786-1.020174C1.036115-1.474471 1.036115-2.223661 1.036115-2.638107C1.036115-3.188045 1.036115-3.745953 1.123786-4.184309C1.291158-4.99726 1.912827-5.076961 2.11208-5.076961C2.383064-5.076961 2.933001-4.941469 3.092403-4.216189C3.188045-3.777833 3.188045-3.180075 3.188045-2.638107C3.188045-2.16787 3.188045-1.45056 3.092403-1.004234C2.925031-.167372 2.375093-.055791 2.11208-.055791Z'/>
<path id='g4-49' d='M2.502615-5.076961C2.502615-5.292154 2.486675-5.300125 2.271482-5.300125C1.944707-4.98132 1.522291-4.790037 .765131-4.790037V-4.527024C.980324-4.527024 1.41071-4.527024 1.872976-4.742217V-.653549C1.872976-.358655 1.849066-.263014 1.091905-.263014H.812951V0C1.139726-.02391 1.825156-.02391 2.183811-.02391S3.235866-.02391 3.56264 0V-.263014H3.283686C2.526526-.263014 2.502615-.358655 2.502615-.653549V-5.076961Z'/>
<path id='g4-61' d='M5.826152-2.654047C5.945704-2.654047 6.105106-2.654047 6.105106-2.83736S5.913823-3.020672 5.794271-3.020672H.781071C.661519-3.020672 .470237-3.020672 .470237-2.83736S.629639-2.654047 .749191-2.654047H5.826152ZM5.794271-.964384C5.913823-.964384 6.105106-.964384 6.105106-1.147696S5.945704-1.331009 5.826152-1.331009H.749191C.629639-1.331009 .470237-1.331009 .470237-1.147696S.661519-.964384 .781071-.964384H5.794271Z'/>
<path id='g4-101' d='M3.291656-1.817186C3.466999-1.817186 3.514819-1.817186 3.514819-2.000498C3.514819-2.709838 3.124284-3.55467 2.000498-3.55467C1.012204-3.55467 .239103-2.733748 .239103-1.745455C.239103-.71731 1.099875 .079701 2.10411 .079701C3.116314 .079701 3.514819-.773101 3.514819-.956413C3.514819-.988294 3.490909-1.067995 3.387298-1.067995C3.299626-1.067995 3.283686-1.012204 3.267746-.964384C2.980822-.191283 2.295392-.167372 2.15193-.167372C1.793275-.167372 1.42665-.334745 1.187547-.70137S.948443-1.578082 .948443-1.817186H3.291656ZM.956413-2.024408C1.028144-3.140224 1.705604-3.331507 2.000498-3.331507C2.933001-3.331507 2.964882-2.207721 2.972852-2.024408H.956413Z'/>
<path id='g4-111' d='M3.985056-1.697634C3.985056-2.693898 3.164134-3.55467 2.11208-3.55467S.239103-2.693898 .239103-1.697634S1.091905 .079701 2.11208 .079701C3.140224 .079701 3.985056-.70137 3.985056-1.697634ZM2.11208-.167372C1.681694-.167372 1.346949-.374595 1.171606-.653549C.972354-.980324 .948443-1.370859 .948443-1.769365C.948443-2.072229 .948443-2.550436 1.195517-2.893151C1.40274-3.172105 1.737484-3.331507 2.11208-3.331507C2.526526-3.331507 2.86924-3.132254 3.052553-2.8533C3.267746-2.518555 3.275716-2.088169 3.275716-1.769365C3.275716-1.40274 3.259776-.964384 3.036613-.629639C2.82142-.310834 2.462765-.167372 2.11208-.167372Z'/>
<path id='g4-115' d='M2.83736-3.347447C2.83736-3.474969 2.83736-3.55467 2.733748-3.55467C2.693898-3.55467 2.669988-3.55467 2.542466-3.427148C2.526526-3.419178 2.454795-3.347447 2.430884-3.347447C2.422914-3.347447 2.406974-3.347447 2.359153-3.379328C2.231631-3.466999 2.000498-3.55467 1.641843-3.55467C.526027-3.55467 .278954-2.948941 .278954-2.566376C.278954-2.16787 .573848-1.936737 .597758-1.912827C.916563-1.673724 1.099875-1.641843 1.633873-1.546202C2.008468-1.474471 2.622167-1.362889 2.622167-.820922C2.622167-.510087 2.414944-.143462 1.681694-.143462C.876712-.143462 .645579-.765131 .541968-1.187547C.510087-1.291158 .502117-1.331009 .406476-1.331009C.278954-1.331009 .278954-1.267248 .278954-1.115816V-.127522C.278954 0 .278954 .079701 .382565 .079701C.430386 .079701 .438356 .071731 .581818-.079701C.621669-.119552 .70934-.223163 .749191-.263014C1.107846 .063761 1.482441 .079701 1.689664 .079701C2.701868 .079701 3.052553-.502117 3.052553-1.028144C3.052553-1.41071 2.82142-1.968618 1.872976-2.14396C1.809215-2.1599 1.362889-2.239601 1.331009-2.239601C1.083935-2.295392 .70934-2.462765 .70934-2.781569C.70934-3.020672 .884682-3.355417 1.641843-3.355417C2.534496-3.355417 2.574346-2.701868 2.590286-2.478705C2.598257-2.414944 2.654047-2.391034 2.709838-2.391034C2.83736-2.391034 2.83736-2.446824 2.83736-2.598257V-3.347447Z'/>
<path id='g4-116' d='M1.482441-3.172105H2.669988V-3.435118H1.482441V-4.901619H1.235367C1.227397-4.176339 .900623-3.419178 .159402-3.395268V-3.172105H.876712V-.996264C.876712-.063761 1.594022 .079701 1.960648 .079701C2.494645 .079701 2.81345-.398506 2.81345-.996264V-1.44259H2.566376V-1.012204C2.566376-.462267 2.319303-.167372 2.016438-.167372C1.482441-.167372 1.482441-.852802 1.482441-.980324V-3.172105Z'/>
<path id='g3-110' d='M2.462765-3.502864C2.486675-3.574595 2.785554-4.172354 3.227895-4.554919C3.53873-4.841843 3.945205-5.033126 4.411457-5.033126C4.889664-5.033126 5.057036-4.674471 5.057036-4.196264C5.057036-3.514819 4.566874-2.15193 4.327771-1.506351C4.220174-1.219427 4.160399-1.06401 4.160399-.848817C4.160399-.310834 4.531009 .119552 5.104857 .119552C6.216687 .119552 6.635118-1.637858 6.635118-1.709589C6.635118-1.769365 6.587298-1.817186 6.515567-1.817186C6.40797-1.817186 6.396015-1.78132 6.336239-1.578082C6.06127-.597758 5.606974-.119552 5.140722-.119552C5.021171-.119552 4.829888-.131507 4.829888-.514072C4.829888-.812951 4.961395-1.171606 5.033126-1.338979C5.272229-1.996513 5.774346-3.335492 5.774346-4.016936C5.774346-4.734247 5.355915-5.272229 4.447323-5.272229C3.383313-5.272229 2.82142-4.519054 2.606227-4.220174C2.570361-4.901619 2.080199-5.272229 1.554172-5.272229C1.171606-5.272229 .908593-5.045081 .705355-4.638605C.490162-4.208219 .32279-3.490909 .32279-3.443088S.37061-3.335492 .454296-3.335492C.549938-3.335492 .561893-3.347447 .633624-3.622416C.824907-4.351681 1.0401-5.033126 1.518306-5.033126C1.793275-5.033126 1.888917-4.841843 1.888917-4.483188C1.888917-4.220174 1.769365-3.753923 1.685679-3.383313L1.350934-2.092154C1.303113-1.865006 1.171606-1.327024 1.111831-1.111831C1.028144-.800996 .896638-.239103 .896638-.179328C.896638-.011955 1.028144 .119552 1.207472 .119552C1.350934 .119552 1.518306 .047821 1.613948-.131507C1.637858-.191283 1.745455-.609714 1.80523-.848817L2.068244-1.924782L2.462765-3.502864Z'/>
<path id='g3-115' d='M2.725778-2.391034C2.929016-2.355168 3.251806-2.283437 3.323537-2.271482C3.478954-2.223661 4.016936-2.032379 4.016936-1.458531C4.016936-1.08792 3.682192-.119552 2.295392-.119552C2.044334-.119552 1.147696-.155417 .908593-.812951C1.3868-.753176 1.625903-1.123786 1.625903-1.3868C1.625903-1.637858 1.458531-1.769365 1.219427-1.769365C.956413-1.769365 .609714-1.566127 .609714-1.028144C.609714-.32279 1.327024 .119552 2.283437 .119552C4.100623 .119552 4.638605-1.219427 4.638605-1.841096C4.638605-2.020423 4.638605-2.355168 4.25604-2.737733C3.957161-3.024658 3.670237-3.084433 3.024658-3.21594C2.701868-3.287671 2.187796-3.395268 2.187796-3.93325C2.187796-4.172354 2.402989-5.033126 3.53873-5.033126C4.040847-5.033126 4.531009-4.841843 4.65056-4.411457C4.124533-4.411457 4.100623-3.957161 4.100623-3.945205C4.100623-3.694147 4.327771-3.622416 4.435367-3.622416C4.60274-3.622416 4.937484-3.753923 4.937484-4.25604S4.483188-5.272229 3.550685-5.272229C1.984558-5.272229 1.566127-4.040847 1.566127-3.550685C1.566127-2.642092 2.450809-2.450809 2.725778-2.391034Z'/>
</defs>
<g id='page1'>
<use x='190.161786' y='100.290628' xlink:href='#g3-110'/>
<use x='197.149392' y='102.083891' xlink:href='#g4-111'/>
<use x='201.383575' y='102.083891' xlink:href='#g4-11'/>
<use x='206.323455' y='102.083891' xlink:href='#g4-115'/>
<use x='209.663758' y='102.083891' xlink:href='#g4-101'/>
<use x='213.427476' y='102.083891' xlink:href='#g4-116'/>
<use x='220.539691' y='100.290628' xlink:href='#g5-61'/>
<use x='232.965172' y='85.346607' xlink:href='#g2-78'/>
<use x='240.535477' y='85.346607' xlink:href='#g1-0'/>
<use x='247.121983' y='85.346607' xlink:href='#g4-49'/>
<use x='233.52636' y='88.933163' xlink:href='#g0-88'/>
<use x='234.439516' y='114.393382' xlink:href='#g2-107'/>
<use x='239.061132' y='114.393382' xlink:href='#g4-61'/>
<use x='245.647638' y='114.393382' xlink:href='#g4-48'/>
<use x='253.348664' y='100.290628' xlink:href='#g3-115'/>
<use x='258.86267' y='102.083891' xlink:href='#g2-107'/>
<use x='263.982417' y='100.290628' xlink:href='#g3-110'/>
<use x='270.970023' y='102.083891' xlink:href='#g2-107'/>
</g>
</svg>

+ 33
- 0
docs/source/_static/constant/n-index-formula.svg View File

@@ -0,0 +1,33 @@
<?xml version='1.0' encoding='UTF-8'?>
<!-- This file was generated by dvisvgm 2.9.1 -->
<svg version='1.1' xmlns='http://www.w3.org/2000/svg' xmlns:xlink='http://www.w3.org/1999/xlink' width='83.908685pt' height='11.955168pt' viewBox='56.413267 56.787049 83.908685 11.955168'>
<defs>
<path id='g0-0' d='M5.571108-1.809215C5.69863-1.809215 5.873973-1.809215 5.873973-1.992528S5.69863-2.175841 5.571108-2.175841H1.004234C.876712-2.175841 .70137-2.175841 .70137-1.992528S.876712-1.809215 1.004234-1.809215H5.571108Z'/>
<path id='g3-48' d='M3.897385-2.542466C3.897385-3.395268 3.809714-3.913325 3.5467-4.423412C3.196015-5.124782 2.550436-5.300125 2.11208-5.300125C1.107846-5.300125 .74122-4.550934 .629639-4.327771C.342715-3.745953 .326775-2.956912 .326775-2.542466C.326775-2.016438 .350685-1.211457 .73325-.573848C1.099875 .01594 1.689664 .167372 2.11208 .167372C2.494645 .167372 3.180075 .047821 3.57858-.74122C3.873474-1.315068 3.897385-2.024408 3.897385-2.542466ZM2.11208-.055791C1.841096-.055791 1.291158-.183313 1.123786-1.020174C1.036115-1.474471 1.036115-2.223661 1.036115-2.638107C1.036115-3.188045 1.036115-3.745953 1.123786-4.184309C1.291158-4.99726 1.912827-5.076961 2.11208-5.076961C2.383064-5.076961 2.933001-4.941469 3.092403-4.216189C3.188045-3.777833 3.188045-3.180075 3.188045-2.638107C3.188045-2.16787 3.188045-1.45056 3.092403-1.004234C2.925031-.167372 2.375093-.055791 2.11208-.055791Z'/>
<path id='g3-49' d='M2.502615-5.076961C2.502615-5.292154 2.486675-5.300125 2.271482-5.300125C1.944707-4.98132 1.522291-4.790037 .765131-4.790037V-4.527024C.980324-4.527024 1.41071-4.527024 1.872976-4.742217V-.653549C1.872976-.358655 1.849066-.263014 1.091905-.263014H.812951V0C1.139726-.02391 1.825156-.02391 2.183811-.02391S3.235866-.02391 3.56264 0V-.263014H3.283686C2.526526-.263014 2.502615-.358655 2.502615-.653549V-5.076961Z'/>
<path id='g2-58' d='M2.199751-.573848C2.199751-.920548 1.912827-1.159651 1.625903-1.159651C1.279203-1.159651 1.0401-.872727 1.0401-.585803C1.0401-.239103 1.327024 0 1.613948 0C1.960648 0 2.199751-.286924 2.199751-.573848Z'/>
<path id='g2-59' d='M2.331258 .047821C2.331258-.645579 2.10411-1.159651 1.613948-1.159651C1.231382-1.159651 1.0401-.848817 1.0401-.585803S1.219427 0 1.625903 0C1.78132 0 1.912827-.047821 2.020423-.155417C2.044334-.179328 2.056289-.179328 2.068244-.179328C2.092154-.179328 2.092154-.011955 2.092154 .047821C2.092154 .442341 2.020423 1.219427 1.327024 1.996513C1.195517 2.139975 1.195517 2.163885 1.195517 2.187796C1.195517 2.247572 1.255293 2.307347 1.315068 2.307347C1.41071 2.307347 2.331258 1.422665 2.331258 .047821Z'/>
<path id='g2-110' d='M2.462765-3.502864C2.486675-3.574595 2.785554-4.172354 3.227895-4.554919C3.53873-4.841843 3.945205-5.033126 4.411457-5.033126C4.889664-5.033126 5.057036-4.674471 5.057036-4.196264C5.057036-3.514819 4.566874-2.15193 4.327771-1.506351C4.220174-1.219427 4.160399-1.06401 4.160399-.848817C4.160399-.310834 4.531009 .119552 5.104857 .119552C6.216687 .119552 6.635118-1.637858 6.635118-1.709589C6.635118-1.769365 6.587298-1.817186 6.515567-1.817186C6.40797-1.817186 6.396015-1.78132 6.336239-1.578082C6.06127-.597758 5.606974-.119552 5.140722-.119552C5.021171-.119552 4.829888-.131507 4.829888-.514072C4.829888-.812951 4.961395-1.171606 5.033126-1.338979C5.272229-1.996513 5.774346-3.335492 5.774346-4.016936C5.774346-4.734247 5.355915-5.272229 4.447323-5.272229C3.383313-5.272229 2.82142-4.519054 2.606227-4.220174C2.570361-4.901619 2.080199-5.272229 1.554172-5.272229C1.171606-5.272229 .908593-5.045081 .705355-4.638605C.490162-4.208219 .32279-3.490909 .32279-3.443088S.37061-3.335492 .454296-3.335492C.549938-3.335492 .561893-3.347447 .633624-3.622416C.824907-4.351681 1.0401-5.033126 1.518306-5.033126C1.793275-5.033126 1.888917-4.841843 1.888917-4.483188C1.888917-4.220174 1.769365-3.753923 1.685679-3.383313L1.350934-2.092154C1.303113-1.865006 1.171606-1.327024 1.111831-1.111831C1.028144-.800996 .896638-.239103 .896638-.179328C.896638-.011955 1.028144 .119552 1.207472 .119552C1.350934 .119552 1.518306 .047821 1.613948-.131507C1.637858-.191283 1.745455-.609714 1.80523-.848817L2.068244-1.924782L2.462765-3.502864Z'/>
<path id='g1-78' d='M6.312329-4.574844C6.40797-4.96538 6.583313-5.156663 7.157161-5.180573C7.236862-5.180573 7.300623-5.228394 7.300623-5.332005C7.300623-5.379826 7.260772-5.443587 7.181071-5.443587C7.12528-5.443587 6.973848-5.419676 6.38406-5.419676C5.746451-5.419676 5.642839-5.443587 5.571108-5.443587C5.443587-5.443587 5.419676-5.355915 5.419676-5.292154C5.419676-5.188543 5.523288-5.180573 5.595019-5.180573C6.081196-5.164633 6.081196-4.94944 6.081196-4.837858C6.081196-4.798007 6.081196-4.758157 6.049315-4.630635L5.172603-1.139726L3.251806-5.300125C3.188045-5.443587 3.172105-5.443587 2.980822-5.443587H1.944707C1.801245-5.443587 1.697634-5.443587 1.697634-5.292154C1.697634-5.180573 1.793275-5.180573 1.960648-5.180573C2.024408-5.180573 2.263512-5.180573 2.446824-5.132752L1.378829-.852802C1.283188-.454296 1.075965-.278954 .541968-.263014C.494147-.263014 .398506-.255044 .398506-.111582C.398506-.063761 .438356 0 .518057 0C.549938 0 .73325-.02391 1.307098-.02391C1.936737-.02391 2.056289 0 2.12802 0C2.1599 0 2.279452 0 2.279452-.151432C2.279452-.247073 2.191781-.263014 2.13599-.263014C1.849066-.270984 1.609963-.318804 1.609963-.597758C1.609963-.637609 1.633873-.749191 1.633873-.757161L2.677958-4.917559H2.685928L4.901619-.143462C4.95741-.01594 4.96538 0 5.053051 0C5.164633 0 5.172603-.03188 5.204483-.167372L6.312329-4.574844Z'/>
<path id='g4-40' d='M3.88543 2.905106C3.88543 2.86924 3.88543 2.84533 3.682192 2.642092C2.486675 1.43462 1.817186-.537983 1.817186-2.976837C1.817186-5.296139 2.379078-7.292653 3.765878-8.703362C3.88543-8.810959 3.88543-8.834869 3.88543-8.870735C3.88543-8.942466 3.825654-8.966376 3.777833-8.966376C3.622416-8.966376 2.642092-8.105604 2.056289-6.933998C1.446575-5.726526 1.171606-4.447323 1.171606-2.976837C1.171606-1.912827 1.338979-.490162 1.960648 .789041C2.666002 2.223661 3.646326 3.000747 3.777833 3.000747C3.825654 3.000747 3.88543 2.976837 3.88543 2.905106Z'/>
<path id='g4-41' d='M3.371357-2.976837C3.371357-3.88543 3.251806-5.36787 2.582316-6.75467C1.876961-8.18929 .896638-8.966376 .765131-8.966376C.71731-8.966376 .657534-8.942466 .657534-8.870735C.657534-8.834869 .657534-8.810959 .860772-8.607721C2.056289-7.400249 2.725778-5.427646 2.725778-2.988792C2.725778-.669489 2.163885 1.327024 .777086 2.737733C.657534 2.84533 .657534 2.86924 .657534 2.905106C.657534 2.976837 .71731 3.000747 .765131 3.000747C.920548 3.000747 1.900872 2.139975 2.486675 .968369C3.096389-.251059 3.371357-1.542217 3.371357-2.976837Z'/>
</defs>
<g id='page1'>
<use x='56.413267' y='65.753425' xlink:href='#g4-40'/>
<use x='60.965593' y='65.753425' xlink:href='#g2-110'/>
<use x='67.953199' y='67.546688' xlink:href='#g3-48'/>
<use x='72.685513' y='65.753425' xlink:href='#g2-59'/>
<use x='77.929672' y='65.753425' xlink:href='#g2-110'/>
<use x='84.917278' y='67.546688' xlink:href='#g3-49'/>
<use x='89.649593' y='65.753425' xlink:href='#g2-59'/>
<use x='94.893752' y='65.753425' xlink:href='#g2-58'/>
<use x='98.145413' y='65.753425' xlink:href='#g2-58'/>
<use x='101.397074' y='65.753425' xlink:href='#g2-58'/>
<use x='104.648735' y='65.753425' xlink:href='#g2-59'/>
<use x='109.892894' y='65.753425' xlink:href='#g2-110'/>
<use x='116.8805' y='67.546688' xlink:href='#g1-78'/>
<use x='124.450805' y='67.546688' xlink:href='#g0-0'/>
<use x='131.037312' y='67.546688' xlink:href='#g3-49'/>
<use x='135.769627' y='65.753425' xlink:href='#g4-41'/>
</g>
</svg>

BIN
docs/source/_static/contiguous-block-of-memory-ndarray-example-1.png View File

Before After
Width: 1458  |  Height: 496  |  Size: 66 kB Width: 1458  |  Height: 496  |  Size: 66 kB

BIN
docs/source/_static/contiguous-block-of-memory.png View File

Before After
Width: 800  |  Height: 249  |  Size: 40 kB Width: 800  |  Height: 249  |  Size: 40 kB

BIN
docs/source/_static/tensor-constant-ndarray.png View File

Before After
Width: 1573  |  Height: 915  |  Size: 131 kB Width: 1573  |  Height: 915  |  Size: 131 kB

BIN
docs/source/_static/tensor-naming.png View File

Before After
Width: 1024  |  Height: 347  |  Size: 89 kB Width: 1024  |  Height: 347  |  Size: 89 kB

+ 17
- 0
src/TensorFlowNET.Console/MemoryTestingCases.cs View File

@@ -1,6 +1,7 @@
using System; using System;
using System.Collections.Generic; using System.Collections.Generic;
using System.Text; using System.Text;
using NumSharp;
using static Tensorflow.Binding; using static Tensorflow.Binding;


namespace Tensorflow namespace Tensorflow
@@ -18,6 +19,22 @@ namespace Tensorflow
var tensor = tf.constant(3112.0f); var tensor = tf.constant(3112.0f);
} }
}; };

public Action<int> Constant2x3
=> (iterate) =>
{
var nd = np.array(new byte[,]
{
{1, 2, 3},
{4, 5, 6}
});
for (int i = 0; i < iterate; i++)
{
var tensor = tf.constant(nd);
var data = tensor.numpy();
}
};

public Action<int> Variable public Action<int> Variable
=> (iterate) => => (iterate) =>
{ {


+ 3
- 0
src/TensorFlowNET.Console/Program.cs View File

@@ -15,6 +15,9 @@ namespace Tensorflow


int batchSize = 1000; int batchSize = 1000;


// explaination of constant
mm.Execute(10, 100 * batchSize, cases.Constant2x3);

// 1 million float tensor 68M. // 1 million float tensor 68M.
mm.Execute(10, 100 * batchSize, cases.Constant); mm.Execute(10, 100 * batchSize, cases.Constant);




+ 1
- 1
src/TensorFlowNET.Console/TensorFlowNET.Console.csproj View File

@@ -1,4 +1,4 @@
<Project Sdk="Microsoft.NET.Sdk">
<Project Sdk="Microsoft.NET.Sdk">


<PropertyGroup> <PropertyGroup>
<OutputType>Exe</OutputType> <OutputType>Exe</OutputType>


+ 4
- 0
src/TensorFlowNET.Core/APIs/tf.array.cs View File

@@ -29,6 +29,10 @@ namespace Tensorflow
/// A convenient alias for None, useful for indexing arrays. /// A convenient alias for None, useful for indexing arrays.
/// </summary> /// </summary>
public Slice newaxis = Slice.NewAxis; public Slice newaxis = Slice.NewAxis;
/// <summary>
/// A convenient alias for ...
/// </summary>
public Slice ellipsis = Slice.Ellipsis;


/// <summary> /// <summary>
/// BatchToSpace for N-D tensors of type T. /// BatchToSpace for N-D tensors of type T.


+ 26
- 0
src/TensorFlowNET.Core/APIs/tf.autograph.cs View File

@@ -0,0 +1,26 @@
/*****************************************************************************
Copyright 2020 Haiping Chen. All Rights Reserved.

Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at

http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
******************************************************************************/

using Tensorflow.Graphs;
using Tensorflow.Operations;

namespace Tensorflow
{
public partial class tensorflow
{
public AutoGraph autograph = new AutoGraph();
}
}

+ 29
- 15
src/TensorFlowNET.Core/APIs/tf.control_flow.cs View File

@@ -15,17 +15,22 @@
******************************************************************************/ ******************************************************************************/


using System; using System;
using static Tensorflow.Binding;


namespace Tensorflow namespace Tensorflow
{ {
public partial class tensorflow public partial class tensorflow
{ {
public Tensor cond(Tensor pred,
Tensor true_value,
Tensor false_false)
=> control_flow_ops.cond(pred, () => true_value, () => false_false);

public Tensor cond(Tensor pred, public Tensor cond(Tensor pred,
Func<ITensorOrOperation> true_fn = null, Func<ITensorOrOperation> true_fn = null,
Func<ITensorOrOperation> false_fn = null, Func<ITensorOrOperation> false_fn = null,
bool strict = false,
string name = null) string name = null)
=> control_flow_ops.cond(pred, true_fn, false_fn, strict: strict, name: name);
=> control_flow_ops.cond(pred, true_fn, false_fn, name: name);


/// <summary> /// <summary>
/// Create an op that groups multiple operations. /// Create an op that groups multiple operations.
@@ -37,22 +42,31 @@ namespace Tensorflow
public Operation group<T>(T[] inputs, string name = null) where T : ITensorOrOperation public Operation group<T>(T[] inputs, string name = null) where T : ITensorOrOperation
=> control_flow_ops.group(inputs, name: name); => control_flow_ops.group(inputs, name: name);


/*public Tensor while_loop(Func<Tensor, Tensor> cond, Func<Tensor, Tensor> body, Tensor[] loop_vars,
TensorShape shape_invariants = null,
public Tensor while_loop(Func<Tensor, Tensor> cond,
Func<Tensor, Tensor> body,
Tensor loop_vars,
int parallel_iterations = 10)
{
Func<Tensor[], Tensor> cond1 = x
=> cond(x[0]);

Func<Tensor[], Tensor[]> body1 = x
=> new[] { body(x[0]) };

var results = control_flow_ops.while_loop(cond1,
body1,
new[] { loop_vars });
return results[0];
}

public Tensor[] while_loop(Func<Tensor[], Tensor> cond,
Func<Tensor[], Tensor[]> body,
Tensor[] loop_vars,
int parallel_iterations = 10, int parallel_iterations = 10,
bool back_prop = true,
bool swap_memory = false,
string name = null,
int? maximum_iterations = null,
bool return_same_structure = false)
string name = null)
=> control_flow_ops.while_loop(cond, body, loop_vars, => control_flow_ops.while_loop(cond, body, loop_vars,
shape_invariants: shape_invariants,
parallel_iterations: parallel_iterations, parallel_iterations: parallel_iterations,
back_prop: back_prop,
swap_memory: swap_memory,
name: name,
maximum_iterations: maximum_iterations,
return_same_structure: return_same_structure);*/
name: name);


public _ControlDependenciesController control_dependencies(ITensorOrOperation[] control_inputs) public _ControlDependenciesController control_dependencies(ITensorOrOperation[] control_inputs)
=> ops.control_dependencies(control_inputs); => ops.control_dependencies(control_inputs);


+ 3
- 0
src/TensorFlowNET.Core/APIs/tf.image.cs View File

@@ -208,6 +208,9 @@ namespace Tensorflow
=> image_ops_impl.non_max_suppression_padded(boxes, scores, max_output_size, iou_threshold, score_threshold, pad_to_max_output_size, => image_ops_impl.non_max_suppression_padded(boxes, scores, max_output_size, iou_threshold, score_threshold, pad_to_max_output_size,
name, sorted_input, canonicalized_coordinates, tile_size); name, sorted_input, canonicalized_coordinates, tile_size);


public Tensor resize(Tensor image, TensorShape size)
=> image_ops_impl.resize_images_v2(image, size);

public Tensor resize_bilinear(Tensor images, Tensor size, bool align_corners = false, bool half_pixel_centers = false, string name = null) public Tensor resize_bilinear(Tensor images, Tensor size, bool align_corners = false, bool half_pixel_centers = false, string name = null)
=> gen_image_ops.resize_bilinear(images, size, align_corners: align_corners, half_pixel_centers: half_pixel_centers, name: name); => gen_image_ops.resize_bilinear(images, size, align_corners: align_corners, half_pixel_centers: half_pixel_centers, name: name);




+ 1
- 6
src/TensorFlowNET.Core/APIs/tf.nn.cs View File

@@ -182,12 +182,7 @@ namespace Tensorflow
=> nn_impl.sigmoid_cross_entropy_with_logits(labels: labels, logits: logits, name: name); => nn_impl.sigmoid_cross_entropy_with_logits(labels: labels, logits: logits, name: name);


public Tensor softmax(Tensor logits, int axis = -1, string name = null) public Tensor softmax(Tensor logits, int axis = -1, string name = null)
{
if (axis == -1)
return gen_nn_ops.softmax(logits, name);
else
throw new NotImplementedException("");
}
=> gen_nn_ops.softmax(logits, name);


/// <summary> /// <summary>


+ 3
- 0
src/TensorFlowNET.Core/APIs/tf.train.cs View File

@@ -44,6 +44,9 @@ namespace Tensorflow
public Optimizer AdamOptimizer(float learning_rate, TF_DataType dtype, string name = "Adam") public Optimizer AdamOptimizer(float learning_rate, TF_DataType dtype, string name = "Adam")
=> new AdamOptimizer(learning_rate, name: name, dtype: dtype); => new AdamOptimizer(learning_rate, name: name, dtype: dtype);


public Optimizer AdamOptimizer(IVariableV1 learning_rate, string name = "Adam")
=> new AdamOptimizer(learning_rate.AsTensor(), name: name);

public Optimizer AdamOptimizer(Tensor learning_rate, string name = "Adam") public Optimizer AdamOptimizer(Tensor learning_rate, string name = "Adam")
=> new AdamOptimizer(learning_rate, name: name); => new AdamOptimizer(learning_rate, name: name);




+ 3
- 0
src/TensorFlowNET.Core/Data/DatasetManager.cs View File

@@ -25,6 +25,9 @@ namespace Tensorflow
public IDatasetV2 from_tensor_slices(Tensor features, Tensor labels) public IDatasetV2 from_tensor_slices(Tensor features, Tensor labels)
=> new TensorSliceDataset(features, labels); => new TensorSliceDataset(features, labels);


public IDatasetV2 from_tensor_slices(string[] array)
=> new TensorSliceDataset(array);

public IDatasetV2 from_tensor_slices(NDArray array) public IDatasetV2 from_tensor_slices(NDArray array)
=> new TensorSliceDataset(array); => new TensorSliceDataset(array);




+ 1
- 1
src/TensorFlowNET.Core/Data/DatasetV2.cs View File

@@ -52,7 +52,7 @@ namespace Tensorflow


public IDatasetV2 map(Func<Tensor, Tensor> map_func, public IDatasetV2 map(Func<Tensor, Tensor> map_func,
bool use_inter_op_parallelism = true, bool use_inter_op_parallelism = true,
bool preserve_cardinality = false,
bool preserve_cardinality = true,
bool use_legacy_function = false) bool use_legacy_function = false)
=> new MapDataset(this, => new MapDataset(this,
map_func, map_func,


+ 6
- 4
src/TensorFlowNET.Core/Data/MapDataset.cs View File

@@ -1,6 +1,10 @@
using System; using System;
using System.Collections.Generic; using System.Collections.Generic;
using System.Linq;
using System.Text; using System.Text;
using Tensorflow.Functions;
using Tensorflow.Graphs;
using static Tensorflow.Binding;


namespace Tensorflow namespace Tensorflow
{ {
@@ -15,12 +19,10 @@ namespace Tensorflow
bool preserve_cardinality = false, bool preserve_cardinality = false,
bool use_legacy_function = false) : base(input_dataset) bool use_legacy_function = false) : base(input_dataset)
{ {
foreach(var input in input_dataset)
{
var data = map_func(input.Item1);
}
var func = new ConcreteFunction(map_func, input_dataset.element_spec[0].dtype);


variant_tensor = ops.map_dataset(input_dataset.variant_tensor, variant_tensor = ops.map_dataset(input_dataset.variant_tensor,
func,
output_types, output_types,
output_shapes); output_shapes);
} }


+ 10
- 0
src/TensorFlowNET.Core/Data/TensorSliceDataset.cs View File

@@ -11,6 +11,16 @@ namespace Tensorflow.Data
{ {
public class TensorSliceDataset : DatasetSource public class TensorSliceDataset : DatasetSource
{ {
public TensorSliceDataset(string[] array)
{
var element = tf.constant(array);
_tensors = new[] { element };
var batched_spec = new[] { element.ToTensorSpec() };
structure = batched_spec.Select(x => x._unbatch()).ToArray();

variant_tensor = ops.tensor_slice_dataset(_tensors, output_shapes);
}

public TensorSliceDataset(NDArray array) public TensorSliceDataset(NDArray array)
{ {
var element = tf.constant(array); var element = tf.constant(array);


+ 5
- 1
src/TensorFlowNET.Core/Eager/EagerRunner.TFE_FastPathExecute.cs View File

@@ -6,6 +6,7 @@ using static Tensorflow.Binding;
using Tensorflow.Util; using Tensorflow.Util;
using System.Runtime.InteropServices; using System.Runtime.InteropServices;
using Tensorflow.Contexts; using Tensorflow.Contexts;
using Tensorflow.Functions;


namespace Tensorflow.Eager namespace Tensorflow.Eager
{ {
@@ -385,7 +386,10 @@ namespace Tensorflow.Eager
status.Check(true); status.Check(true);
break; break;
case TF_AttrType.TF_ATTR_FUNC: case TF_AttrType.TF_ATTR_FUNC:
c_api.TFE_OpSetAttrFunctionName(op, key, value.ToString(), value.ToString().Length);
if (value is ConcreteFunction func)
c_api.TFE_OpSetAttrFunctionName(op, key, func.Name, func.Name.Length);
else
throw new NotImplementedException("TF_AttrType.TF_ATTR_FUNC");
break; break;
default: default:
throw new NotImplementedException($"SetOpAttrScalar for {type}"); throw new NotImplementedException($"SetOpAttrScalar for {type}");


+ 1
- 1
src/TensorFlowNET.Core/Eager/EagerTensor.Creation.cs View File

@@ -70,8 +70,8 @@ namespace Tensorflow.Eager


protected override void DisposeUnmanagedResources(IntPtr handle) protected override void DisposeUnmanagedResources(IntPtr handle)
{ {
base.DisposeUnmanagedResources(handle);
//print($"deleting DeleteTensorHandle {Id} {_handle.ToString("x16")}"); //print($"deleting DeleteTensorHandle {Id} {_handle.ToString("x16")}");
c_api.TF_DeleteTensor(_handle);
} }
} }
} }

+ 6
- 0
src/TensorFlowNET.Core/Eager/EagerTensor.cs View File

@@ -19,6 +19,12 @@ namespace Tensorflow.Eager


public override int rank => c_api.TFE_TensorHandleNumDims(EagerTensorHandle, tf.Status.Handle); public override int rank => c_api.TFE_TensorHandleNumDims(EagerTensorHandle, tf.Status.Handle);


public override void set_shape(TensorShape shape)
{
if (!shape.is_compatible_with(this.shape))
throw new ValueError($"Tensor's shape is not compatible.");
}

public static int GetRank(IntPtr handle) public static int GetRank(IntPtr handle)
{ {
var tfe_tensor_handle = c_api.TFE_EagerTensorHandle(handle); var tfe_tensor_handle = c_api.TFE_EagerTensorHandle(handle);


+ 31
- 0
src/TensorFlowNET.Core/Eager/c_api.eager.cs View File

@@ -78,6 +78,37 @@ namespace Tensorflow
[DllImport(TensorFlowLibName)] [DllImport(TensorFlowLibName)]
public static extern SafeContextHandle TFE_NewContext(SafeContextOptionsHandle opts, SafeStatusHandle status); public static extern SafeContextHandle TFE_NewContext(SafeContextOptionsHandle opts, SafeStatusHandle status);


/// <summary>
/// Adds a function (created from TF_GraphToFunction or
/// TF_FunctionImportFunctionDef) to the context, allowing it to be executed with
/// TFE_Execute by creating an op with the same name as the function.
/// </summary>
/// <param name="ctx"></param>
/// <param name="function"></param>
/// <param name="status"></param>
[DllImport(TensorFlowLibName)]
public static extern void TFE_ContextAddFunction(SafeContextHandle ctx, IntPtr function, SafeStatusHandle status);

/// <summary>
/// Removes a function from the context. Once removed, you can no longer
/// TFE_Execute it or TFE_Execute any TFE_Op which has it as an attribute or any
/// other function which calls it as an attribute.
/// </summary>
/// <param name="ctx"></param>
/// <param name="name"></param>
/// <param name="status"></param>
[DllImport(TensorFlowLibName)]
public static extern void TFE_ContextRemoveFunction(SafeContextHandle ctx, string name, SafeStatusHandle status);

/// <summary>
/// Checks whether a function is registered under `name`.
/// </summary>
/// <param name="ctx"></param>
/// <param name="name"></param>
/// <returns></returns>
[DllImport(TensorFlowLibName)]
public static extern bool TFE_ContextHasFunction(SafeContextHandle ctx, string name);

[DllImport(TensorFlowLibName)] [DllImport(TensorFlowLibName)]
public static extern void TFE_ContextStartStep(SafeContextHandle ctx); public static extern void TFE_ContextStartStep(SafeContextHandle ctx);




+ 58
- 0
src/TensorFlowNET.Core/Functions/ConcreteFunction.cs View File

@@ -0,0 +1,58 @@
using System;
using System.Collections.Generic;
using System.Linq;
using System.Text;
using Tensorflow.Graphs;
using static Tensorflow.Binding;

namespace Tensorflow.Functions
{
/// <summary>
///
/// </summary>
public class ConcreteFunction : IDisposable
{
public string Name => _handle == IntPtr.Zero ? string.Empty : c_api.StringPiece(c_api.TF_FunctionName(_handle));
IntPtr _handle;

public ConcreteFunction(Func<Tensor, Tensor> func, TF_DataType dtype)
{
string func_name = $"autograph_{Guid.NewGuid()}_{func.Method.Name}";

tf.compat.v1.disable_eager_execution();

// IntPtr func_handle;
using (var graph = new FuncGraph(func_name))
{
graph.as_default();
var input = tf.placeholder(dtype);
var output = func(input);

var opers = graph._nodes_by_name.Values.Select(x => x as Operation).ToArray();
_handle = graph.ToGraph(opers,
new Operation[] { input },
new Operation[] { output },
null);
}

tf.enable_eager_execution();
}

public Tensor Execute(Tensor arg)
{
var result = tf.Runner.TFE_Execute(tf.Context,
tf.Context.DeviceName,
Name,
new[] { arg },
null,
1);
return result[0];
}

public void Dispose()
{
c_api.TFE_ContextRemoveFunction(tf.Context.Handle, Name, tf.Status.Handle);
c_api.TF_DeleteFunction(_handle);
}
}
}

+ 4
- 1
src/TensorFlowNET.Core/Functions/c_api.function.cs View File

@@ -21,6 +21,9 @@ namespace Tensorflow
{ {
public partial class c_api public partial class c_api
{ {
[DllImport(TensorFlowLibName)]
public static extern void TF_DeleteFunction(IntPtr handle);

/// <summary> /// <summary>
/// Write out a serialized representation of `func` (as a FunctionDef protocol /// Write out a serialized representation of `func` (as a FunctionDef protocol
/// message) to `output_func_def` (allocated by TF_NewBuffer()). /// message) to `output_func_def` (allocated by TF_NewBuffer()).
@@ -39,7 +42,7 @@ namespace Tensorflow
int num_opers, IntPtr[] opers, int num_opers, IntPtr[] opers,
int ninputs, TF_Output[] inputs, int ninputs, TF_Output[] inputs,
int noutputs, TF_Output[] outputs, int noutputs, TF_Output[] outputs,
IntPtr output_names,
string[] output_names,
IntPtr opts, IntPtr opts,
string description, string description,
SafeStatusHandle status); SafeStatusHandle status);


+ 1
- 0
src/TensorFlowNET.Core/Gradients/gradients_util.cs View File

@@ -311,6 +311,7 @@ namespace Tensorflow
while (queue.Count > 0) while (queue.Count > 0)
{ {
var op = queue.Dequeue(); var op = queue.Dequeue();

if (reached_ops.Contains(op)) if (reached_ops.Contains(op))
{ {
between_ops.Add(op); between_ops.Add(op);


+ 45
- 0
src/TensorFlowNET.Core/Graphs/AutoGraph.cs View File

@@ -0,0 +1,45 @@
using System;
using System.Collections.Generic;
using System.Linq;
using System.Linq.Expressions;
using System.Text;
using static Tensorflow.Binding;

namespace Tensorflow.Graphs
{
public class AutoGraph
{
public Func<Tensor, Tensor, Tensor> to_graph(Func<Tensor, Tensor, Tensor> func)
{
string func_name = $"autograph_{Guid.NewGuid()}_{func.Method.Name}";
tf.compat.v1.disable_eager_execution();
// IntPtr func_handle;
using(var graph = new FuncGraph(func_name))
{
graph.as_default();
var input1 = tf.placeholder(tf.int32);
var input2 = tf.placeholder(tf.int32);
var output = func(input1, input2);

var opers = graph._nodes_by_name.Values.Select(x => x as Operation).ToArray();
var func_handle = graph.ToGraph(opers,
new Operation[] { input1, input2 },
new Operation[] { output },
null);
}

tf.enable_eager_execution();

return (Tensor a, Tensor b) =>
{
var result = tf.Runner.TFE_Execute(tf.Context,
tf.Context.DeviceName,
func_name,
new[] { a, b },
null,
1);
return result[0];
};
}
}
}

+ 80
- 0
src/TensorFlowNET.Core/Graphs/AutoGraphAttribute.cs View File

@@ -0,0 +1,80 @@
using MethodBoundaryAspect.Fody.Attributes;
using System;
using System.Collections.Generic;
using System.Linq;
using System.Text;
using Tensorflow.Eager;
using static Tensorflow.Binding;

namespace Tensorflow.Graphs
{
[AllowChangingInputArguments]
public sealed class AutoGraphAttribute : OnMethodBoundaryAspect
{
FuncGraph graph;
Tensor[] originalInputs;
string func_name;
static Dictionary<string, Func<Tensor[], Tensor>> functions = new Dictionary<string, Func<Tensor[], Tensor>>();

public override void OnEntry(MethodExecutionArgs args)
{
func_name = $"autograph_{args.Instance}.{args.Method.Name}";

if (functions.ContainsKey(func_name))
{
args.ReturnValue = functions[func_name](args.Arguments.Select(x => x as Tensor).ToArray());
args.FlowBehavior = FlowBehavior.Return;
return;
}
tf.compat.v1.disable_eager_execution();

// make function as an Operation by autograph
graph = new FuncGraph(func_name);
graph.as_default();

originalInputs = new Tensor[args.Arguments.Length];
// convert args to placeholder
for (var i = 0; i < args.Arguments.Length; i++)
{
if (args.Arguments[i] is EagerTensor tensor)
{
originalInputs[i] = tensor;
args.Arguments[i] = tf.placeholder(tensor.dtype, shape: tensor.TensorShape);
}
}
}

public override void OnExit(MethodExecutionArgs args)
{
var output = (Tensor)args.ReturnValue;
var inputs = args.Arguments.Select(x => x as Tensor).ToArray();
var opers = graph._nodes_by_name.Values.Select(x => x as Operation).ToArray();

graph.ToGraph(opers,
inputs.Select(x => x.op).ToArray(),
new Operation[] { output.op },
null);

graph.Dispose();
tf.enable_eager_execution();

Func<Tensor[], Tensor> function = (x) =>
{
var result = tf.Runner.TFE_Execute(tf.Context,
tf.Context.DeviceName,
func_name,
x,
null,
1);

return result[0];
};
// cache function.
functions[func_name] = function;

// run function
args.ReturnValue = function(originalInputs);
}
}
}

+ 61
- 0
src/TensorFlowNET.Core/Graphs/FuncGraph.cs View File

@@ -0,0 +1,61 @@
using System;
using System.Collections.Generic;
using System.Linq;
using System.Runtime.InteropServices;
using System.Text;
using Tensorflow.Functions;
using static Tensorflow.Binding;

namespace Tensorflow.Graphs
{
/// <summary>
/// Graph representing a function body.
/// </summary>
public class FuncGraph : Graph
{
List<Operation> inputs;
List<Operation> outputs;
Graph outer_graph;
string func_name;
IntPtr func_handle;
public string FuncName => c_api.StringPiece(c_api.TF_FunctionName(func_handle));

/// <summary>
/// Construct a new FuncGraph.
/// </summary>
public FuncGraph(string name) : base()
{
outer_graph = ops.get_default_graph();
func_name = name;
}

public IntPtr ToGraph(Operation[] opers,
Operation[] inputs, Operation[] outputs,
string[] output_names)
{
using var status = new Status();
func_handle = c_api.TF_GraphToFunction(_handle,
func_name,
false,
opers.Length,
opers.Select(x => (IntPtr)x).ToArray(),
inputs.Length,
inputs.Select(x => new TF_Output(x, 0)).ToArray(),
outputs.Length,
outputs.Select(x => new TF_Output(x, 0)).ToArray(),
output_names == null || output_names.Length == 0 ? null : output_names,
IntPtr.Zero,
null,
status.Handle);
status.Check(true);

c_api.TF_GraphCopyFunction(outer_graph, func_handle, IntPtr.Zero, status.Handle);
status.Check(true);

c_api.TFE_ContextAddFunction(tf.Context.Handle, func_handle, status.Handle);
status.Check(true);

return func_handle;
}
}
}

+ 1
- 1
src/TensorFlowNET.Core/Graphs/c_api.graph.cs View File

@@ -321,6 +321,6 @@ namespace Tensorflow
/// <param name="status">TF_Status*</param> /// <param name="status">TF_Status*</param>
[DllImport(TensorFlowLibName)] [DllImport(TensorFlowLibName)]
public static extern void UpdateEdge(IntPtr graph, TF_Output new_src, TF_Input dst, SafeStatusHandle status);
public static extern void TF_UpdateEdge(IntPtr graph, TF_Output new_src, TF_Input dst, SafeStatusHandle status);
} }
} }

+ 1
- 1
src/TensorFlowNET.Core/Keras/Engine/Flatten.cs View File

@@ -21,7 +21,7 @@ namespace Tensorflow.Keras.Engine
_channels_first = args.DataFormat == "channels_first"; _channels_first = args.DataFormat == "channels_first";
} }


protected override Tensor call(Tensor inputs, bool is_training = false, Tensor state = null)
protected override Tensor call(Tensor inputs, bool is_training = false)
{ {
if (_channels_first) if (_channels_first)
{ {


+ 37
- 3
src/TensorFlowNET.Core/Keras/Engine/Layer.cs View File

@@ -121,7 +121,7 @@ namespace Tensorflow.Keras.Engine
/// <param name="input"></param> /// <param name="input"></param>
/// <param name="is_training"></param> /// <param name="is_training"></param>
/// <returns></returns> /// <returns></returns>
public Tensor Apply(Tensor inputs, bool is_training = false, Tensor state = null)
public Tensor Apply(Tensor inputs, bool is_training = false)
{ {
Tensor outputs = null; Tensor outputs = null;


@@ -148,7 +148,7 @@ namespace Tensorflow.Keras.Engine
if (!built) if (!built)
MaybeBuild(inputs); MaybeBuild(inputs);


outputs = call(inputs, is_training: is_training, state: state);
outputs = call(inputs, is_training: is_training);


outputs = _set_connectivity_metadata_(inputs, outputs); outputs = _set_connectivity_metadata_(inputs, outputs);
_handle_activity_regularization(inputs, outputs); _handle_activity_regularization(inputs, outputs);
@@ -161,6 +161,35 @@ namespace Tensorflow.Keras.Engine
return outputs; return outputs;
} }


public Tensor[] Apply(Tensor[] inputs, Tensor state, bool is_training = false)
{
Tensor[] outputs = null;

callContext = callContext ?? new ThreadLocal<CallContext>()
{
Value = new CallContext()
};

var eager = tf.executing_eagerly();
using var ctxManager = CallContext.enter();

string nameScope = "";
if (eager)
nameScope = name;
else
nameScope = _name_scope();

tf_with(ops.name_scope(nameScope), scope =>
{
if (!built)
MaybeBuild(inputs[0]);

outputs = call(inputs, is_training: is_training, state: state);
});

return outputs;
}

private Tensor _set_connectivity_metadata_(Tensor inputs, Tensor outputs) private Tensor _set_connectivity_metadata_(Tensor inputs, Tensor outputs)
{ {
/*var returnOutputs = new List<Tensor>(); /*var returnOutputs = new List<Tensor>();
@@ -200,7 +229,12 @@ namespace Tensorflow.Keras.Engine
return null; return null;
} }


protected virtual Tensor call(Tensor inputs, bool is_training = false, Tensor state = null)
protected virtual Tensor call(Tensor inputs, bool is_training = false)
{
throw new NotImplementedException("");
}

protected virtual Tensor[] call(Tensor[] inputs, Tensor state, bool is_training = false)
{ {
throw new NotImplementedException(""); throw new NotImplementedException("");
} }


+ 1
- 1
src/TensorFlowNET.Core/Keras/Layers/BatchNormalization.cs View File

@@ -143,7 +143,7 @@ namespace Tensorflow.Keras.Layers
built = true; built = true;
} }


protected override Tensor call(Tensor inputs, bool is_training = false, Tensor state = null)
protected override Tensor call(Tensor inputs, bool is_training = false)
{ {
Tensor outputs = null; Tensor outputs = null;




+ 1
- 1
src/TensorFlowNET.Core/Keras/Layers/Conv.cs View File

@@ -95,7 +95,7 @@ namespace Tensorflow.Keras.Layers
built = true; built = true;
} }


protected override Tensor call(Tensor inputs, bool training = false, Tensor state = null)
protected override Tensor call(Tensor inputs, bool training = false)
{ {
var outputs = _convolution_op.__call__(inputs, kernel); var outputs = _convolution_op.__call__(inputs, kernel);
if (use_bias) if (use_bias)


+ 1
- 1
src/TensorFlowNET.Core/Keras/Layers/Dense.cs View File

@@ -65,7 +65,7 @@ namespace Tensorflow.Keras.Layers
built = true; built = true;
} }


protected override Tensor call(Tensor inputs, bool training = false, Tensor state = null)
protected override Tensor call(Tensor inputs, bool training = false)
{ {
Tensor outputs = null; Tensor outputs = null;
var rank = inputs.rank; var rank = inputs.rank;


+ 1
- 1
src/TensorFlowNET.Core/Keras/Layers/Dropout.cs View File

@@ -18,7 +18,7 @@ namespace Tensorflow.Keras.Layers
this.args = args; this.args = args;
} }


protected override Tensor call(Tensor inputs, bool is_training = false, Tensor state = null)
protected override Tensor call(Tensor inputs, bool is_training = false)
{ {
var output = tf_utils.smart_cond(is_training, var output = tf_utils.smart_cond(is_training,
() => tf.nn.dropout(inputs, () => tf.nn.dropout(inputs,


+ 1
- 1
src/TensorFlowNET.Core/Keras/Layers/Embedding.cs View File

@@ -62,7 +62,7 @@ namespace Tensorflow.Keras.Layers
built = true; built = true;
} }


protected override Tensor call(Tensor inputs, bool is_training = false, Tensor state = null)
protected override Tensor call(Tensor inputs, bool is_training = false)
{ {
var dtype = inputs.dtype; var dtype = inputs.dtype;
if (dtype != tf.int32 && dtype != tf.int64) if (dtype != tf.int32 && dtype != tf.int64)


+ 2
- 2
src/TensorFlowNET.Core/Keras/Layers/LSTM.cs View File

@@ -29,9 +29,9 @@ namespace Tensorflow.Keras.Layers
.ToArray(); .ToArray();
} }


protected override Tensor call(Tensor inputs, bool is_training = false, Tensor state = null)
protected override Tensor call(Tensor inputs, bool is_training = false)
{ {
return base.call(inputs, is_training, state);
return base.call(inputs, is_training);
} }
} }
} }

+ 1
- 1
src/TensorFlowNET.Core/Keras/Layers/Pooling2D.cs View File

@@ -36,7 +36,7 @@ namespace Tensorflow.Keras.Layers
input_spec = new InputSpec(ndim: 4); input_spec = new InputSpec(ndim: 4);
} }


protected override Tensor call(Tensor inputs, bool is_training = false, Tensor state = null)
protected override Tensor call(Tensor inputs, bool is_training = false)
{ {
int[] pool_shape; int[] pool_shape;
int[] strides; int[] strides;


+ 1
- 1
src/TensorFlowNET.Core/Keras/Layers/Rescaling.cs View File

@@ -20,7 +20,7 @@ namespace Tensorflow.Keras.Layers
this.args = args; this.args = args;
} }


protected override Tensor call(Tensor inputs, bool is_training = false, Tensor state = null)
protected override Tensor call(Tensor inputs, bool is_training = false)
{ {
scale = math_ops.cast(args.Scale, args.DType); scale = math_ops.cast(args.Scale, args.DType);
offset = math_ops.cast(args.Offset, args.DType); offset = math_ops.cast(args.Offset, args.DType);


+ 6
- 3
src/TensorFlowNET.Core/Keras/Preprocessings/Preprocessing.paths_and_labels_to_dataset.cs View File

@@ -16,7 +16,10 @@ namespace Tensorflow.Keras
int num_classes, int num_classes,
string interpolation) string interpolation)
{ {
Shape shape = (image_paths.Length, image_size.dims[0], image_size.dims[1], num_channels);
var path_ds = tf.data.Dataset.from_tensor_slices(image_paths);
var img_ds = path_ds.map(x => path_to_image(x, image_size, num_channels, interpolation));

/*Shape shape = (image_paths.Length, image_size.dims[0], image_size.dims[1], num_channels);
Console.WriteLine($"Allocating memory for shape{shape}, {NPTypeCode.Float}"); Console.WriteLine($"Allocating memory for shape{shape}, {NPTypeCode.Float}");
var data = np.zeros(shape, NPTypeCode.Float); var data = np.zeros(shape, NPTypeCode.Float);


@@ -35,13 +38,13 @@ namespace Tensorflow.Keras
var label_ds = tf.keras.preprocessing.dataset_utils.labels_to_dataset(labels, label_mode, num_classes); var label_ds = tf.keras.preprocessing.dataset_utils.labels_to_dataset(labels, label_mode, num_classes);
img_ds = tf.data.Dataset.zip(img_ds, label_ds); img_ds = tf.data.Dataset.zip(img_ds, label_ds);
} }
else
else*/
throw new NotImplementedException(""); throw new NotImplementedException("");


return img_ds; return img_ds;
} }


Tensor path_to_image(string path, TensorShape image_size, int num_channels, string interpolation)
Tensor path_to_image(Tensor path, TensorShape image_size, int num_channels, string interpolation)
{ {
var img = tf.io.read_file(path); var img = tf.io.read_file(path);
img = tf.image.decode_image( img = tf.image.decode_image(


+ 43
- 6
src/TensorFlowNET.Core/Layers/Layer.cs View File

@@ -61,9 +61,8 @@ namespace Tensorflow.Layers
return (results[0], results[1]); return (results[0], results[1]);
} }


public Tensor[] __call__(Tensor inputs,
public Tensor __call__(Tensor inputs,
Tensor training = null, Tensor training = null,
Tensor state = null,
VariableScope scope = null) VariableScope scope = null)
{ {
_set_scope(scope); _set_scope(scope);
@@ -88,16 +87,54 @@ namespace Tensorflow.Layers
{ {
_current_scope = scope2; _current_scope = scope2;
// Actually call layer // Actually call layer
outputs = base.Apply(inputs,
is_training: training == null ? false : false,
state: state);
outputs = base.Apply(inputs[0],
is_training: training == null ? false : false);
});


// Update global default collections.
_add_elements_to_collection(updates.ToArray(), new string[] { tf.GraphKeys.UPDATE_OPS });

return outputs;
}

public Tensor[] __call__(Tensor[] inputs,
Tensor state = null,
Tensor training = null,
VariableScope scope = null)
{
_set_scope(scope);
_graph = ops._get_graph_from_inputs(inputs, graph: _graph);

variable_scope scope_context_manager = null;
if (built)
{
scope_context_manager = tf.variable_scope(_scope,
reuse: true,
auxiliary_name_scope: false);
}
else
{
scope_context_manager = tf.variable_scope(_scope,
reuse: _reuse,
auxiliary_name_scope: false);
}

Tensor[] outputs = null;
tf_with(scope_context_manager, scope2 =>
{
_current_scope = scope2;
// Actually call layer
outputs = base.Apply(inputs,
state,
is_training: training == null ? false : false);
}); });




// Update global default collections. // Update global default collections.
_add_elements_to_collection(updates.ToArray(), new string[] { tf.GraphKeys.UPDATE_OPS }); _add_elements_to_collection(updates.ToArray(), new string[] { tf.GraphKeys.UPDATE_OPS });


return new Tensor[] { outputs };
return outputs;
} }


protected virtual void _add_elements_to_collection(Operation[] elements, string[] collection_list) protected virtual void _add_elements_to_collection(Operation[] elements, string[] collection_list)


+ 1
- 1
src/TensorFlowNET.Core/Operations/ControlFlows/WhileContext.cs View File

@@ -326,7 +326,7 @@ namespace Tensorflow.Operations


protected override void _AddOpInternal(Operation op) protected override void _AddOpInternal(Operation op)
{ {
if (op.name == "gradients/rnn/while/basic_rnn_cell/Tanh_grad/TanhGrad")
if (op.name == "rnn/basic_rnn_cell/kernel/Initializer/random_uniform/shape")
{ {


} }


+ 2
- 2
src/TensorFlowNET.Core/Operations/NnOps/BasicLSTMCell.cs View File

@@ -61,7 +61,7 @@ namespace Tensorflow
built = true; built = true;
} }


public Tensor[] __call__(Tensor inputs, LSTMStateTuple state)
public Tensor __call__(Tensor inputs, LSTMStateTuple state)
{ {
_state = state; _state = state;
return base.__call__(inputs); return base.__call__(inputs);
@@ -74,7 +74,7 @@ namespace Tensorflow
/// <param name="training"></param> /// <param name="training"></param>
/// <param name="state"></param> /// <param name="state"></param>
/// <returns></returns> /// <returns></returns>
protected override Tensor call(Tensor inputs, bool is_training = false, Tensor state = null)
protected override Tensor call(Tensor inputs, bool is_training = false)
{ {
var one = constant_op.constant(1, dtype: dtypes.int32); var one = constant_op.constant(1, dtype: dtypes.int32);
// Parameters of gates are concatenated into one multiply for efficiency. // Parameters of gates are concatenated into one multiply for efficiency.


+ 3
- 3
src/TensorFlowNET.Core/Operations/NnOps/BasicRNNCell.cs View File

@@ -67,14 +67,14 @@ namespace Tensorflow
built = true; built = true;
} }


protected override Tensor call(Tensor inputs, bool is_training = false, Tensor state = null)
protected override Tensor[] call(Tensor[] inputs, Tensor state, bool is_training = false)
{ {
// Most basic RNN: output = new_state = act(W * input + U * state + B). // Most basic RNN: output = new_state = act(W * input + U * state + B).
var concat = array_ops.concat(new[] { inputs, state }, 1);
var concat = array_ops.concat(new[] { inputs[0], state }, 1);
var gate_inputs = math_ops.matmul(concat, _kernel.AsTensor()); var gate_inputs = math_ops.matmul(concat, _kernel.AsTensor());
gate_inputs = nn_ops.bias_add(gate_inputs, _bias.AsTensor()); gate_inputs = nn_ops.bias_add(gate_inputs, _bias.AsTensor());
var output = _activation(gate_inputs, null); var output = _activation(gate_inputs, null);
return output;
return new[] { output, output };
} }
} }
} }

+ 1
- 1
src/TensorFlowNET.Core/Operations/NnOps/rnn.cs View File

@@ -364,7 +364,7 @@ namespace Tensorflow.Operations
if (sequence_length != null) if (sequence_length != null)
throw new NotImplementedException("sequence_length != null"); throw new NotImplementedException("sequence_length != null");
else else
outputs = cell.__call__(input_t_t, state: state1);
outputs = cell.__call__(new[] { input_t_t }, state: state1);


var (output, new_state) = (outputs[0], outputs[1]); var (output, new_state) = (outputs[0], outputs[1]);
// Keras cells always wrap state as list, even if it's a single tensor. // Keras cells always wrap state as list, even if it's a single tensor.


+ 1
- 1
src/TensorFlowNET.Core/Operations/Operation.Instance.cs View File

@@ -40,7 +40,7 @@ namespace Tensorflow
} }
} }


return null;
return new Operation(handle);
} }
} }
} }

+ 3
- 1
src/TensorFlowNET.Core/Operations/Operation.cs View File

@@ -322,11 +322,13 @@ namespace Tensorflow


// Reset cached inputs. // Reset cached inputs.
_inputs_val = null; _inputs_val = null;
_node_def = null;
// after the c_api call next time _inputs is accessed // after the c_api call next time _inputs is accessed
// the updated inputs are reloaded from the c_api // the updated inputs are reloaded from the c_api
lock (Locks.ProcessWide) lock (Locks.ProcessWide)
{ {
// c_api.UpdateEdge(_graph, output, input, tf.Status.Handle);
// disable
// c_api.TF_UpdateEdge(_graph, output, input, tf.Status.Handle);
//var updated_inputs = inputs; //var updated_inputs = inputs;
tf.Status.Check(); tf.Status.Check();
} }


+ 25
- 2
src/TensorFlowNET.Core/Operations/control_flow_ops.cs View File

@@ -22,6 +22,7 @@ using Tensorflow.Operations.ControlFlows;
using util = Tensorflow.control_flow_util; using util = Tensorflow.control_flow_util;
using static Tensorflow.Binding; using static Tensorflow.Binding;
using Tensorflow.Util; using Tensorflow.Util;
using System.Data;


namespace Tensorflow namespace Tensorflow
{ {
@@ -420,14 +421,13 @@ namespace Tensorflow
public static Tensor cond(Tensor pred, public static Tensor cond(Tensor pred,
Func<ITensorOrOperation> true_fn = null, Func<ITensorOrOperation> true_fn = null,
Func<ITensorOrOperation> false_fn = null, Func<ITensorOrOperation> false_fn = null,
bool strict = false,
string name = null) string name = null)
{ {
return tf_with(ops.name_scope(name, "cond", new { pred }), delegate return tf_with(ops.name_scope(name, "cond", new { pred }), delegate
{ {
if (tf.Context.executing_eagerly()) if (tf.Context.executing_eagerly())
{ {
if (pred.ToArray<bool>()[0])
if ((bool)pred)
return true_fn() as Tensor; return true_fn() as Tensor;
else else
return false_fn() as Tensor; return false_fn() as Tensor;
@@ -676,6 +676,29 @@ namespace Tensorflow
} }
} }


public static Tensor[] while_loop(Func<Tensor[], Tensor> cond,
Func<Tensor[], Tensor[]> body,
Tensor[] loop_vars,
int parallel_iterations = 10,
string name = null)
{
var executing_eagerly = tf.Context.executing_eagerly();
if (!executing_eagerly)
{
throw new NotImplementedException("");
}

return tf_with(ops.name_scope("name", "while"), delegate
{
while ((bool)cond(loop_vars))
{
loop_vars = body(loop_vars);
}

return loop_vars;
});
}

/// <summary> /// <summary>
/// Repeat `body` while the condition `cond` is true. /// Repeat `body` while the condition `cond` is true.
/// </summary> /// </summary>


+ 3
- 2
src/TensorFlowNET.Core/Operations/dataset_ops.cs View File

@@ -2,6 +2,7 @@
using System.Collections.Generic; using System.Collections.Generic;
using System.Text; using System.Text;
using Tensorflow.Framework.Models; using Tensorflow.Framework.Models;
using Tensorflow.Functions;
using static Tensorflow.Binding; using static Tensorflow.Binding;


namespace Tensorflow namespace Tensorflow
@@ -419,7 +420,7 @@ namespace Tensorflow
/// <param name="iterator"></param> /// <param name="iterator"></param>
/// <param name="name"></param> /// <param name="name"></param>
/// <returns></returns> /// <returns></returns>
public Tensor map_dataset(Tensor dataset, TF_DataType[] output_types, TensorShape[] output_shapes,
public Tensor map_dataset(Tensor dataset, ConcreteFunction f, TF_DataType[] output_types, TensorShape[] output_shapes,
bool use_inter_op_parallelism = true, bool preserve_cardinality = false, string name = null) bool use_inter_op_parallelism = true, bool preserve_cardinality = false, string name = null)
{ {
if (tf.Context.executing_eagerly()) if (tf.Context.executing_eagerly())
@@ -428,7 +429,7 @@ namespace Tensorflow
"MapDataset", name, "MapDataset", name,
null, null,
dataset, new Tensor[0], dataset, new Tensor[0],
"f", "MapDataset",
"f", f,
"output_types", output_types, "output_types", output_types,
"output_shapes", output_shapes, "output_shapes", output_shapes,
"use_inter_op_parallelism", use_inter_op_parallelism, "use_inter_op_parallelism", use_inter_op_parallelism,


+ 44
- 25
src/TensorFlowNET.Core/Operations/image_ops_impl.cs View File

@@ -672,10 +672,8 @@ or rank = 4. Had rank = {0}", rank));
internal static Tensor _resize_images_common(Tensor images, Func<Tensor, Tensor, Tensor> resizer_fn, internal static Tensor _resize_images_common(Tensor images, Func<Tensor, Tensor, Tensor> resizer_fn,
Tensor size, bool preserve_aspect_ratio, string name, bool skip_resize_if_same) Tensor size, bool preserve_aspect_ratio, string name, bool skip_resize_if_same)
{ {
using (ops.name_scope(name, "resize", new [] {images, size}))
return tf_with(ops.name_scope(name, "resize", new [] {images, size}), delegate
return tf_with(ops.name_scope(name, "resize", new[] {images, size}), delegate
{ {
images = ops.convert_to_tensor(images, name: "images");
if (images.TensorShape.ndim == Unknown) if (images.TensorShape.ndim == Unknown)
throw new ValueError("\'images\' contains no shape."); throw new ValueError("\'images\' contains no shape.");
bool is_batch = true; bool is_batch = true;
@@ -688,18 +686,6 @@ or rank = 4. Had rank = {0}", rank));


var (height, width) = (images.dims[1], images.dims[2]); var (height, width) = (images.dims[1], images.dims[2]);


try
{
size = ops.convert_to_tensor(size, dtypes.int32, name: "size");
}
catch (Exception ex)
{
if (ex is TypeError || ex is ValueError)
throw new ValueError("\'size\' must be a 1-D int32 Tensor");
else
throw;
}

if (!size.TensorShape.is_compatible_with(new [] {2})) if (!size.TensorShape.is_compatible_with(new [] {2}))
throw new ValueError(@"\'size\' must be a 1-D Tensor of 2 elements: throw new ValueError(@"\'size\' must be a 1-D Tensor of 2 elements:
new_height, new_width"); new_height, new_width");
@@ -756,7 +742,7 @@ new_height, new_width");
images = resizer_fn(images, size); images = resizer_fn(images, size);


// images.set_shape(new TensorShape(new int[] { -1, new_height_const, new_width_const, -1 }));
images.set_shape(new TensorShape(new int[] { Unknown, new_height_const, new_width_const, Unknown }));
if (!is_batch) if (!is_batch)
images = array_ops.squeeze(images, axis: new int[] {0}); images = array_ops.squeeze(images, axis: new int[] {0});
@@ -1668,8 +1654,6 @@ new_height, new_width");
public static Tensor decode_image(Tensor contents, int channels = 0, TF_DataType dtype = TF_DataType.TF_UINT8, public static Tensor decode_image(Tensor contents, int channels = 0, TF_DataType dtype = TF_DataType.TF_UINT8,
string name = null, bool expand_animations = true) string name = null, bool expand_animations = true)
{ {
Tensor substr = null;

Func<ITensorOrOperation> _jpeg = () => Func<ITensorOrOperation> _jpeg = () =>
{ {
int jpeg_channels = channels; int jpeg_channels = channels;
@@ -1695,8 +1679,7 @@ new_height, new_width");
{ {
var result = convert_image_dtype(gen_image_ops.decode_gif(contents), dtype); var result = convert_image_dtype(gen_image_ops.decode_gif(contents), dtype);
if (!expand_animations) if (!expand_animations)
// result = array_ops.gather(result, 0);
throw new NotImplementedException("");
result = array_ops.gather(result, 0);
return result; return result;
}); });
}; };
@@ -1728,18 +1711,16 @@ new_height, new_width");


Func<ITensorOrOperation> check_gif = () => Func<ITensorOrOperation> check_gif = () =>
{ {
var is_gif = math_ops.equal(substr, "\x47\x49\x46", name: "is_gif");
return control_flow_ops.cond(is_gif, _gif, _bmp, name: "cond_gif");
return control_flow_ops.cond(is_gif(contents), _gif, _bmp, name: "cond_gif");
}; };


Func<ITensorOrOperation> check_png = () => Func<ITensorOrOperation> check_png = () =>
{ {
return control_flow_ops.cond(_is_png(contents), _png, check_gif, name: "cond_png");
return control_flow_ops.cond(is_png(contents), _png, check_gif, name: "cond_png");
}; };


return tf_with(ops.name_scope(name, "decode_image"), scope => return tf_with(ops.name_scope(name, "decode_image"), scope =>
{ {
substr = tf.strings.substr(contents, 0, 3);
return control_flow_ops.cond(is_jpeg(contents), _jpeg, check_png, name: "cond_jpeg"); return control_flow_ops.cond(is_jpeg(contents), _jpeg, check_png, name: "cond_jpeg");
}); });
} }
@@ -2089,7 +2070,7 @@ new_height, new_width");
}); });
} }


public static Tensor _is_png(Tensor contents, string name = null)
static Tensor is_png(Tensor contents, string name = null)
{ {
return tf_with(ops.name_scope(name, "is_png"), scope => return tf_with(ops.name_scope(name, "is_png"), scope =>
{ {
@@ -2098,6 +2079,17 @@ new_height, new_width");
}); });
} }


static Tensor is_gif(Tensor contents, string name = null)
{
return tf_with(ops.name_scope(name, "is_gif"), scope =>
{
var substr = tf.strings.substr(contents, 0, 3);
var gif = tf.constant(new byte[] { 0x47, 0x49, 0x46 }, TF_DataType.TF_STRING);
var result = math_ops.equal(substr, gif, name: name);
return result;
});
}

public static Tensor convert_image_dtype(Tensor image, TF_DataType dtype, bool saturate = false, public static Tensor convert_image_dtype(Tensor image, TF_DataType dtype, bool saturate = false,
string name = null) string name = null)
{ {
@@ -2157,6 +2149,33 @@ new_height, new_width");
}); });
} }


/// <summary>
/// Resize `images` to `size` using the specified `method`.
/// </summary>
/// <param name="images"></param>
/// <param name="size"></param>
/// <param name="method"></param>
/// <param name="preserve_aspect_ratio"></param>
/// <param name="antialias"></param>
/// <param name="name"></param>
/// <returns></returns>
public static Tensor resize_images_v2(Tensor images, TensorShape size, string method = ResizeMethod.BILINEAR,
bool preserve_aspect_ratio = false,
bool antialias = false,
string name = null)
{
Func<Tensor, Tensor, Tensor> resize_fn = (images, size) =>
{
if (method == ResizeMethod.BILINEAR)
return gen_image_ops.resize_bilinear(images, size, half_pixel_centers: true);
throw new NotImplementedException("");
};
return _resize_images_common(images, resize_fn, ops.convert_to_tensor(size),
preserve_aspect_ratio: preserve_aspect_ratio,
skip_resize_if_same: false,
name: name);
}

/// <summary> /// <summary>
/// Resize `images` to `size` using nearest neighbor interpolation. /// Resize `images` to `size` using nearest neighbor interpolation.
/// </summary> /// </summary>


+ 13
- 5
src/TensorFlowNET.Core/Tensorflow.Binding.csproj View File

@@ -5,7 +5,7 @@
<AssemblyName>TensorFlow.NET</AssemblyName> <AssemblyName>TensorFlow.NET</AssemblyName>
<RootNamespace>Tensorflow</RootNamespace> <RootNamespace>Tensorflow</RootNamespace>
<TargetTensorFlow>2.2.0</TargetTensorFlow> <TargetTensorFlow>2.2.0</TargetTensorFlow>
<Version>0.20.0</Version>
<Version>0.21.0</Version>
<LangVersion>8.0</LangVersion> <LangVersion>8.0</LangVersion>
<Authors>Haiping Chen, Meinrad Recheis, Eli Belash</Authors> <Authors>Haiping Chen, Meinrad Recheis, Eli Belash</Authors>
<Company>SciSharp STACK</Company> <Company>SciSharp STACK</Company>
@@ -19,16 +19,17 @@
<Description>Google's TensorFlow full binding in .NET Standard. <Description>Google's TensorFlow full binding in .NET Standard.
Building, training and infering deep learning models. Building, training and infering deep learning models.
https://tensorflownet.readthedocs.io</Description> https://tensorflownet.readthedocs.io</Description>
<AssemblyVersion>0.20.0.0</AssemblyVersion>
<AssemblyVersion>0.21.0.0</AssemblyVersion>
<PackageReleaseNotes>tf.net 0.20.x and above are based on tensorflow native 2.x. <PackageReleaseNotes>tf.net 0.20.x and above are based on tensorflow native 2.x.


* Eager Mode is added finally. * Eager Mode is added finally.
* tf.keras is partially working. * tf.keras is partially working.
* tf.data is added.</PackageReleaseNotes>
<FileVersion>0.20.0.0</FileVersion>
* tf.data is added.
* autograph works partially.</PackageReleaseNotes>
<FileVersion>0.21.0.0</FileVersion>
<PackageLicenseFile>LICENSE</PackageLicenseFile> <PackageLicenseFile>LICENSE</PackageLicenseFile>
<PackageRequireLicenseAcceptance>true</PackageRequireLicenseAcceptance> <PackageRequireLicenseAcceptance>true</PackageRequireLicenseAcceptance>
<SignAssembly>true</SignAssembly>
<SignAssembly>false</SignAssembly>
<AssemblyOriginatorKeyFile>Open.snk</AssemblyOriginatorKeyFile> <AssemblyOriginatorKeyFile>Open.snk</AssemblyOriginatorKeyFile>
<Platforms>AnyCPU;x64</Platforms> <Platforms>AnyCPU;x64</Platforms>
</PropertyGroup> </PropertyGroup>
@@ -76,6 +77,7 @@ https://tensorflownet.readthedocs.io</Description>


<ItemGroup> <ItemGroup>
<PackageReference Include="Google.Protobuf" Version="3.11.4" /> <PackageReference Include="Google.Protobuf" Version="3.11.4" />
<PackageReference Include="MethodBoundaryAspect.Fody" Version="2.0.138" />
<PackageReference Include="NumSharp.Lite" Version="0.1.8" /> <PackageReference Include="NumSharp.Lite" Version="0.1.8" />
<PackageReference Include="Protobuf.Text" Version="0.4.0" /> <PackageReference Include="Protobuf.Text" Version="0.4.0" />
</ItemGroup> </ItemGroup>
@@ -83,4 +85,10 @@ https://tensorflownet.readthedocs.io</Description>
<ItemGroup> <ItemGroup>
<Folder Include="Keras\Initializers\" /> <Folder Include="Keras\Initializers\" />
</ItemGroup> </ItemGroup>

<ItemGroup>
<None Update="FodyWeavers.xml">
<CopyToOutputDirectory>PreserveNewest</CopyToOutputDirectory>
</None>
</ItemGroup>
</Project> </Project>

+ 2
- 0
src/TensorFlowNET.Core/Tensors/Tensor.Creation.cs View File

@@ -50,6 +50,8 @@ namespace Tensorflow
/// </summary> /// </summary>
public AllocationType AllocationType { get; protected set; } public AllocationType AllocationType { get; protected set; }


public IntPtr TensorDataPointer => _handle == IntPtr.Zero ? IntPtr.Zero : TF_TensorData(_handle);

/// <summary> /// <summary>
/// Create a Tensor object from an existing TF handle /// Create a Tensor object from an existing TF handle
/// </summary> /// </summary>


+ 1
- 1
src/TensorFlowNET.Core/Tensors/Tensor.Explicit.cs View File

@@ -11,7 +11,7 @@ namespace Tensorflow
{ {
EnsureScalar(tensor); EnsureScalar(tensor);
EnsureDType(tensor, TF_DataType.TF_BOOL); EnsureDType(tensor, TF_DataType.TF_BOOL);
return *(bool*) tensor.buffer;
return *(bool*)tensor.buffer;
} }
} }




+ 1
- 2
src/TensorFlowNET.Core/Tensors/Tensor.cs View File

@@ -149,7 +149,7 @@ namespace Tensorflow
/// <summary> /// <summary>
/// Updates the shape of this tensor. /// Updates the shape of this tensor.
/// </summary> /// </summary>
public void set_shape(TensorShape shape)
public virtual void set_shape(TensorShape shape)
{ {
this.shape = shape.rank >= 0 ? shape.dims : null; this.shape = shape.rank >= 0 ? shape.dims : null;
} }
@@ -261,7 +261,6 @@ namespace Tensorflow
protected override void DisposeUnmanagedResources(IntPtr handle) protected override void DisposeUnmanagedResources(IntPtr handle)
{ {
c_api.TF_DeleteTensor(handle); c_api.TF_DeleteTensor(handle);

if (AllocationHandle == null) if (AllocationHandle == null)
return; return;




+ 12
- 3
src/TensorFlowNET.Core/Tensors/tensor_util.cs View File

@@ -148,9 +148,18 @@ namespace Tensorflow
// If shape is not given, get the shape from the numpy array. // If shape is not given, get the shape from the numpy array.
if (shape == null) if (shape == null)
{ {
shape = nparray.shape;
is_same_size = true;
shape_size = nparray.size;
if(numpy_dtype == TF_DataType.TF_STRING)
{
// scalar string
shape = new int[0];
shape_size = 0;
}
else
{
shape = nparray.shape;
is_same_size = true;
shape_size = nparray.size;
}
} }
else else
{ {


+ 1
- 1
src/TensorFlowNET.Core/Training/Optimizer.cs View File

@@ -207,7 +207,7 @@ namespace Tensorflow
{ {
apply_updates = state_ops.assign_add(global_step, apply_updates = state_ops.assign_add(global_step,
ops.convert_to_tensor(1, dtype: global_step.dtype), ops.convert_to_tensor(1, dtype: global_step.dtype),
name: name) as Operation;
name: name);
} }
}); });
} }


+ 2
- 2
src/TensorFlowNET.Core/Variables/BaseResourceVariable.cs View File

@@ -73,7 +73,7 @@ namespace Tensorflow
// handle_deleter // handle_deleter
} }


public ITensorOrOperation assign<T>(T value, bool use_locking = false, string name = null, bool read_value = true)
public Tensor assign<T>(T value, bool use_locking = false, string name = null, bool read_value = true)
{ {
if(value.GetType() == typeof(Tensor)) if(value.GetType() == typeof(Tensor))
{ {
@@ -134,7 +134,7 @@ namespace Tensorflow
return array_ops.identity(value); return array_ops.identity(value);
}); });


public ITensorOrOperation assign_add<T>(T delta, bool use_locking = false, string name = null, bool read_value = true)
public Tensor assign_add<T>(T delta, bool use_locking = false, string name = null, bool read_value = true)
{ {
var assign_add_op = gen_resource_variable_ops.assign_add_variable_op(Handle, var assign_add_op = gen_resource_variable_ops.assign_add_variable_op(Handle,
ops.convert_to_tensor(delta, dtype: dtype), name: name); ops.convert_to_tensor(delta, dtype: dtype), name: name);


+ 2
- 2
src/TensorFlowNET.Core/Variables/IVariableV1.cs View File

@@ -47,8 +47,8 @@ namespace Tensorflow
public Graph Graph { get; } public Graph Graph { get; }
public TF_DataType dtype { get; } public TF_DataType dtype { get; }
public TensorShape shape { get; } public TensorShape shape { get; }
ITensorOrOperation assign_add<T>(T delta, bool use_locking = false, string name = null, bool read_value = true);
ITensorOrOperation assign<T>(T value, bool use_locking = false, string name = null, bool read_value = true);
Tensor assign_add<T>(T delta, bool use_locking = false, string name = null, bool read_value = true);
Tensor assign<T>(T value, bool use_locking = false, string name = null, bool read_value = true);
Tensor AsTensor(); Tensor AsTensor();
} }
} }

+ 2
- 2
src/TensorFlowNET.Core/Variables/RefVariable.cs View File

@@ -335,7 +335,7 @@ namespace Tensorflow
/// A `Tensor` that will hold the new value of this variable after /// A `Tensor` that will hold the new value of this variable after
/// the assignment has completed. /// the assignment has completed.
/// </returns> /// </returns>
public ITensorOrOperation assign<T>(T value, bool use_locking = false, string name = null, bool read_value = true)
public Tensor assign<T>(T value, bool use_locking = false, string name = null, bool read_value = true)
{ {
var assign = gen_state_ops.assign(_variable, value, use_locking: use_locking, name: name); var assign = gen_state_ops.assign(_variable, value, use_locking: use_locking, name: name);
if (read_value) if (read_value)
@@ -418,7 +418,7 @@ namespace Tensorflow
// name: A name for the operation(optional). // name: A name for the operation(optional).
// Returns: // Returns:
// A mutable `Tensor`. Has the same type as `ref`. // A mutable `Tensor`. Has the same type as `ref`.
public ITensorOrOperation assign_add<T>(T value, bool use_locking = false, string name = null, bool read_value = true)
public Tensor assign_add<T>(T value, bool use_locking = false, string name = null, bool read_value = true)
{ {
var variable = this; var variable = this;
var _op = tf.OpDefLib._apply_op_helper("AssignAdd", name: name, args: new { variable, value, use_locking }); var _op = tf.OpDefLib._apply_op_helper("AssignAdd", name: name, args: new { variable, value, use_locking });


+ 67
- 64
src/TensorFlowNET.Core/Variables/ResourceVariable.cs View File

@@ -88,80 +88,83 @@ namespace Tensorflow


if (trainable && !collections.Contains(tf.GraphKeys.TRAINABLE_VARIABLES)) if (trainable && !collections.Contains(tf.GraphKeys.TRAINABLE_VARIABLES))
collections.Add(tf.GraphKeys.TRAINABLE_VARIABLES); collections.Add(tf.GraphKeys.TRAINABLE_VARIABLES);

ops.init_scope();
_in_graph_mode = !tf.Context.executing_eagerly(); _in_graph_mode = !tf.Context.executing_eagerly();
tf_with(ops.name_scope(name, "Variable"), scope =>
tf_with(ops.init_scope2(), delegate
{ {
name = scope;
var handle_name = ops.name_from_scope_name(name);
string unique_id = "";
string shared_name = "";

if (_in_graph_mode)
{
shared_name = handle_name;
unique_id = shared_name;
}
else
var values = init_from_fn ? new object[0] : new object[] { initial_value };
tf_with(ops.name_scope(name, "Variable", values), scope =>
{ {
unique_id = $"{handle_name}_{ops.uid()}";
shared_name = tf.Context.shared_name();
}

var attr = new AttrValue();
attr.List = new AttrValue.Types.ListValue();
attr.List.S.Add(ByteString.CopyFromUtf8($"loc:@{handle_name}"));
tf_with(ops.name_scope("Initializer"), delegate
{
if (initial_value.GetType().GetInterface("IInitializer") != null)
initial_value = ops.convert_to_tensor((initial_value as IInitializer).Apply(new InitializerArgs(shape, dtype: dtype)));
name = scope;
var handle_name = ops.name_from_scope_name(name);
string unique_id = "";
string shared_name = "";

if (_in_graph_mode)
{
shared_name = handle_name;
unique_id = shared_name;
}
else else
{ {
var value = init_from_fn ? (initial_value as Func<Tensor>)() : initial_value;
initial_value = ops.convert_to_tensor(value,
name: "initial_value",
dtype: dtype);
unique_id = $"{handle_name}_{ops.uid()}";
shared_name = tf.Context.shared_name();
} }
});
_shape = shape ?? (initial_value as Tensor).TensorShape;
_initial_value = initial_value as Tensor;


var attr = new AttrValue();
attr.List = new AttrValue.Types.ListValue();
attr.List.S.Add(ByteString.CopyFromUtf8($"loc:@{handle_name}"));
tf_with(ops.name_scope("Initializer"), delegate
{
if (initial_value.GetType().GetInterface("IInitializer") != null)
initial_value = ops.convert_to_tensor((initial_value as IInitializer).Apply(new InitializerArgs(shape, dtype: dtype)));
else
{
var value = init_from_fn ? (initial_value as Func<Tensor>)() : initial_value;
initial_value = ops.convert_to_tensor(value,
name: "initial_value",
dtype: dtype);
}
});
_shape = shape ?? (initial_value as Tensor).TensorShape;
_initial_value = initial_value as Tensor;



if (_in_graph_mode)
{
handle = state_ops.variable_op_v2(_initial_value.shape, _initial_value.dtype.as_base_dtype(), name: name);
initializer_op = gen_state_ops.assign(handle, _initial_value, true).op;


if (_in_graph_mode)
{
handle = state_ops.variable_op_v2(_initial_value.shape, _initial_value.dtype.as_base_dtype(), name: name);
initializer_op = gen_state_ops.assign(handle, _initial_value, true).op;
ops.colocate_with(initializer_op);


ops.colocate_with(initializer_op);
_graph_element = gen_array_ops.identity(handle, name = "read");
ops.add_to_collections<IVariableV1>(collections, this);
_dtype = handle.dtype;
}
else
{
handle = resource_variable_ops.eager_safe_variable_handle(
initial_value: _initial_value,
shape: _shape,
shared_name: shared_name,
name: name,
graph_mode: _in_graph_mode);

gen_resource_variable_ops.assign_variable_op(handle, _initial_value);
is_initialized_op = null;
initializer_op = null;
_graph_element = null;
_dtype = _initial_value.dtype.as_base_dtype();
initial_value = _in_graph_mode ? initial_value : null;
}


_graph_element = gen_array_ops.identity(handle, name = "read");
ops.add_to_collections<IVariableV1>(collections, this);
_dtype = handle.dtype;
}
else
{
handle = resource_variable_ops.eager_safe_variable_handle(
initial_value: _initial_value,
shape: _shape,
shared_name: shared_name,
name: name,
graph_mode: _in_graph_mode);

gen_resource_variable_ops.assign_variable_op(handle, _initial_value);
is_initialized_op = null;
initializer_op = null;
_graph_element = null;
_dtype = _initial_value.dtype.as_base_dtype();
initial_value = _in_graph_mode ? initial_value : null;
}

base.__init__(trainable: trainable,
handle: handle,
name: name,
unique_id: unique_id,
handle_name: handle_name);
base.__init__(trainable: trainable,
handle: handle,
name: name,
unique_id: unique_id,
handle_name: handle_name);
});
}); });
} }




+ 1
- 1
src/TensorFlowNET.Core/Variables/state_ops.cs View File

@@ -95,7 +95,7 @@ namespace Tensorflow
// Returns: // Returns:
// Same as "ref". Returned as a convenience for operations that want // Same as "ref". Returned as a convenience for operations that want
// to use the new value after the variable has been updated. // to use the new value after the variable has been updated.
public static ITensorOrOperation assign_add<T>(IVariableV1 @ref,
public static Tensor assign_add<T>(IVariableV1 @ref,
T value, T value,
bool use_locking = false, bool use_locking = false,
string name = null) string name = null)


+ 2
- 0
src/TensorFlowNET.Core/ops.cs View File

@@ -470,6 +470,8 @@ namespace Tensorflow
return varVal._TensorConversionFunction(dtype: dtype, name: name, as_ref: as_ref); return varVal._TensorConversionFunction(dtype: dtype, name: name, as_ref: as_ref);
case TensorShape ts: case TensorShape ts:
return constant_op.constant(ts.dims, dtype: dtype, name: name); return constant_op.constant(ts.dims, dtype: dtype, name: name);
case string str:
return constant_op.constant(value, dtype: tf.@string, name: name);
case int[] dims: case int[] dims:
return constant_op.constant(dims, dtype: dtype, name: name); return constant_op.constant(dims, dtype: dtype, name: name);
case object[] objects: case object[] objects:


+ 1
- 0
src/TensorFlowNet.Benchmarks/Tensorflow.Benchmark.csproj View File

@@ -30,6 +30,7 @@
<ItemGroup> <ItemGroup>
<PackageReference Include="BenchmarkDotNet" Version="0.12.1" /> <PackageReference Include="BenchmarkDotNet" Version="0.12.1" />
<PackageReference Include="SciSharp.TensorFlow.Redist" Version="2.3.0" /> <PackageReference Include="SciSharp.TensorFlow.Redist" Version="2.3.0" />
<PackageReference Include="TensorFlow.NET" Version="0.20.0" />
</ItemGroup> </ItemGroup>


<ItemGroup> <ItemGroup>


+ 4
- 4
test/TensorFlowNET.UnitTest/Dataset/DatasetTest.cs View File

@@ -118,17 +118,17 @@ namespace TensorFlowNET.UnitTest.Dataset
} }
} }


[TestMethod, Ignore]
[TestMethod]
public void Map() public void Map()
{ {
long value = 0; long value = 0;


var dataset = tf.data.Dataset.range(3);
var dataset1 = dataset.map(x => x);
var dataset = tf.data.Dataset.range(0, 2);
dataset = dataset.map(x => x + 10);


foreach (var item in dataset) foreach (var item in dataset)
{ {
Assert.AreEqual(value, (long)item.Item1);
Assert.AreEqual(value + 10, (long)item.Item1);
value++; value++;
} }
} }


+ 1
- 2
test/TensorFlowNET.UnitTest/EagerModeTestBase.cs View File

@@ -2,10 +2,9 @@
using System; using System;
using System.Collections.Generic; using System.Collections.Generic;
using System.Text; using System.Text;
using TensorFlowNET.UnitTest;
using static Tensorflow.Binding; using static Tensorflow.Binding;


namespace Tensorflow.UnitTest
namespace TensorFlowNET.UnitTest
{ {
public class EagerModeTestBase : PythonTest public class EagerModeTestBase : PythonTest
{ {


+ 70
- 3
test/TensorFlowNET.UnitTest/ImageTest.cs View File

@@ -1,7 +1,10 @@
using Microsoft.VisualStudio.TestTools.UnitTesting;
using FluentAssertions;
using Microsoft.VisualStudio.TestTools.UnitTesting;
using NumSharp;
using System; using System;
using System.Collections.Generic; using System.Collections.Generic;
using System.IO; using System.IO;
using System.Linq;
using System.Reflection; using System.Reflection;
using System.Text; using System.Text;
using Tensorflow; using Tensorflow;
@@ -14,7 +17,7 @@ namespace TensorFlowNET.UnitTest.Basics
/// Find more examples in https://www.programcreek.com/python/example/90444/tensorflow.read_file /// Find more examples in https://www.programcreek.com/python/example/90444/tensorflow.read_file
/// </summary> /// </summary>
[TestClass] [TestClass]
public class ImageTest
public class ImageTest : GraphModeTestBase
{ {
string imgPath = "shasta-daisy.jpg"; string imgPath = "shasta-daisy.jpg";
Tensor contents; Tensor contents;
@@ -26,12 +29,76 @@ namespace TensorFlowNET.UnitTest.Basics
contents = tf.io.read_file(imgPath); contents = tf.io.read_file(imgPath);
} }


[Ignore]
[TestMethod] [TestMethod]
public void decode_image() public void decode_image()
{ {
var img = tf.image.decode_image(contents); var img = tf.image.decode_image(contents);
Assert.AreEqual(img.name, "decode_image/cond_jpeg/Merge:0"); Assert.AreEqual(img.name, "decode_image/cond_jpeg/Merge:0");
} }

[TestMethod]
public void resize_image()
{
tf.enable_eager_execution();
var image = tf.constant(new int[5, 5]
{
{1, 0, 0, 0, 0 },
{0, 1, 0, 0, 0 },
{0, 0, 1, 0, 0 },
{0, 0, 0, 1, 0 },
{0, 0, 0, 0, 1 }
});
image = image[tf.newaxis, tf.ellipsis, tf.newaxis];
image = tf.image.resize(image, (3, 5));
image = image[0, tf.ellipsis, 0];
Assert.IsTrue(Enumerable.SequenceEqual(new float[] { 0.6666667f, 0.3333333f, 0, 0, 0 },
image[0].ToArray<float>()));
Assert.IsTrue(Enumerable.SequenceEqual(new float[] { 0, 0, 1, 0, 0 },
image[1].ToArray<float>()));
Assert.IsTrue(Enumerable.SequenceEqual(new float[] { 0, 0, 0, 0.3333335f, 0.6666665f },
image[2].ToArray<float>()));
tf.compat.v1.disable_eager_execution();
}

[TestMethod]
public void TestCropAndResize()
{
var graph = tf.Graph().as_default();

// 3x3 'Image' with numbered coordinates
var input = np.array(0f, 1f, 2f, 3f, 4f, 5f, 6f, 7f, 8f);
var image = tf.reshape(input, new int[] { 1, 3, 3, 1 });

// 4x4 'Image' with numbered coordinates
var input2 = np.array(0f, 1f, 2f, 3f, 4f, 5f, 6f, 7f, 8f, 9f, 10f, 11f, 12f, 13f, 14f, 15f);
var image2 = tf.reshape(input2, new int[] { 1, 4, 4, 1 });
// create one box over the full image that flips it (y1 > y2)
var box = tf.reshape(np.array(1f, 0f, 0f, 1f), new int[] { 1, 4 });
var boxInd = tf.Variable(np.array(0));
// crop first 3x3 imageto size 1x1
var cropSize1_1 = tf.Variable(np.array(1, 1));
// don't crop second 4x4 image
var cropSize2_2 = tf.Variable(np.array(4, 4));

var init = tf.global_variables_initializer();
using (Session sess = tf.Session())
{
sess.run(init);

var cropped = tf.image.crop_and_resize(image, box, boxInd, cropSize1_1);

var result = sess.run(cropped);
// check if cropped to 1x1 center was succesfull
result.size.Should().Be(1);
result[0, 0, 0, 0].Should().Be(4f);

cropped = tf.image.crop_and_resize(image2, box, boxInd, cropSize2_2);
result = sess.run(cropped);
// check if flipped and no cropping occured
result.size.Should().Be(16);
result[0, 0, 0, 0].Should().Be(12f);

}
}
} }
} }

+ 54
- 0
test/TensorFlowNET.UnitTest/ManagedAPI/ControlFlowApiTest.cs View File

@@ -0,0 +1,54 @@
using Microsoft.VisualStudio.TestTools.UnitTesting;
using System;
using System.Collections.Generic;
using System.Linq.Expressions;
using System.Runtime.CompilerServices;
using System.Security.Cryptography.X509Certificates;
using System.Text;
using Tensorflow;
using static Tensorflow.Binding;

namespace TensorFlowNET.UnitTest.ManagedAPI
{
[TestClass]
public class ControlFlowApiTest
{
[TestMethod]
public void WhileLoopOneInputEagerMode()
{
tf.enable_eager_execution();

var i = tf.constant(2);
Func<Tensor, Tensor> c = (x) => tf.less(x, 10);
Func<Tensor, Tensor> b = (x) => tf.add(x, 1);
var r = tf.while_loop(c, b, i);
Assert.AreEqual(10, (int)r);
}

[TestMethod]
public void WhileLoopTwoInputsEagerMode()
{
tf.enable_eager_execution();

var i = tf.constant(2);
var j = tf.constant(3);
Func<Tensor[], Tensor> c = (x) => tf.less(x[0] + x[1], 10);
Func<Tensor[], Tensor[]> b = (x) => new[] { tf.add(x[0], 1), tf.add(x[1], 1) };
var r = tf.while_loop(c, b, new[] { i, j });
Assert.AreEqual(5, (int)r[0]);
Assert.AreEqual(6, (int)r[1]);
}

[TestMethod, Ignore]
public void WhileLoopGraphMode()
{
tf.compat.v1.disable_eager_execution();

var i = tf.constant(2);
Func<Tensor, Tensor> c = (x) => tf.less(x, 10);
Func<Tensor, Tensor> b = (x) => tf.add(x, 1);
var r = tf.while_loop(c, b, i);
Assert.AreEqual(10, (int)r);
}
}
}

+ 70
- 0
test/TensorFlowNET.UnitTest/ManagedAPI/FunctionApiTest.cs View File

@@ -0,0 +1,70 @@
using Microsoft.VisualStudio.TestTools.UnitTesting;
using System;
using System.Collections.Generic;
using System.Linq;
using System.Text;
using Tensorflow;
using Tensorflow.Graphs;
using static Tensorflow.Binding;

namespace TensorFlowNET.UnitTest.ManagedAPI
{
[TestClass]
public class FunctionApiTest : TFNetApiTest
{
[TestMethod]
public void TwoInputs_OneOutput()
{
var func = tf.autograph.to_graph(Add);
var a = tf.constant(1);
var b = tf.constant(2);
var output = func(a, b);
Assert.AreEqual(3, (int)output);
}

Tensor Add(Tensor a, Tensor b)
{
return a + b;
}

[TestMethod]
public void TwoInputs_OneOutput_Condition()
{
var func = tf.autograph.to_graph(Condition);
var a = tf.constant(3);
var b = tf.constant(2);
var output = func(a, b);
Assert.AreEqual(2, (int)output);
}

Tensor Condition(Tensor a, Tensor b)
{
return tf.cond(a < b, a, b);
}

[TestMethod]
public void TwoInputs_OneOutput_Lambda()
{
var func = tf.autograph.to_graph((x, y) => x * y);
var output = func(tf.constant(3), tf.constant(2));
Assert.AreEqual(6, (int)output);
}

[TestMethod]
public void TwoInputs_OneOutput_WhileLoop()
{
var func = tf.autograph.to_graph((x, y) => x * y);
var output = func(tf.constant(3), tf.constant(2));
Assert.AreEqual(6, (int)output);
}

Tensor WhileLoop()
{
var i = tf.constant(0);
Func<Tensor, Tensor> c = i => tf.less(i, 10);
Func<Tensor, Tensor> b = i => tf.add(i, 1);
//var r = tf.(c, b, [i])
throw new NotImplementedException("");
}
}
}

+ 456
- 9
test/TensorFlowNET.UnitTest/NativeAPI/CApiFunctionTest.cs View File

@@ -2,6 +2,7 @@
using System; using System;
using System.Collections.Generic; using System.Collections.Generic;
using System.Linq; using System.Linq;
using System.Reflection;
using System.Runtime.InteropServices; using System.Runtime.InteropServices;
using Tensorflow; using Tensorflow;
using Tensorflow.Functions; using Tensorflow.Functions;
@@ -41,7 +42,351 @@ namespace TensorFlowNET.UnitTest.NativeAPI
// Use, run, and verify // Use, run, and verify
var func_op = Use(new Operation[0]); var func_op = Use(new Operation[0]);
Run(new KeyValuePair<Operation, Tensor>[0], func_op, 10); Run(new KeyValuePair<Operation, Tensor>[0], func_op, 10);
VerifyFDef(new[] { "scalar10_0" });
VerifyFDef(new[] { "scalar10_0" },
new List<IOSpec>(),
new List<IOSpec> { new IOSpec("scalar10", DataType.DtInt32) },
new List<EdgeSpec> { new EdgeSpec("scalar10_0:output:0", "scalar10") },
new List<EdgeSpec>());
}

[TestMethod]
public void OneOp_OneInput_OneOutput()
{
// Define
var feed = Placeholder(func_graph_, s_);
var neg = Neg(feed, func_graph_, s_);
Define(-1, new Operation[0], new[] { feed }, new[] { neg }, new string[0]);

// Use, run, and verify
var func_feed = Placeholder(host_graph_, s_);
var func_op = Use(new[] { func_feed });
Run(new[] { new KeyValuePair<Operation, Tensor>(func_feed, Int32Tensor(3)) }, func_op, -3);
VerifyFDef(new string[] { "neg_0" },
new List<IOSpec> { new IOSpec("feed", DataType.DtInt32) },
new List<IOSpec> { new IOSpec("neg", DataType.DtInt32) },
new List<EdgeSpec> { new EdgeSpec("feed", "neg_0:0"), new EdgeSpec("neg_0:y:0", "neg") },
new List<EdgeSpec>());
}

[TestMethod]
public void OneOutput_OutputNames()
{
// Define
var feed = Placeholder(func_graph_, s_);
var neg = Neg(feed, func_graph_, s_);
Define(-1,
new Operation[0],
new[] { feed },
new[] { neg },
new[] { "negated_num" });

// Use, run, and verify
var func_feed = Placeholder(host_graph_, s_);
var func_op = Use(new[] { func_feed });
Run(new[] { new KeyValuePair<Operation, Tensor>(func_feed, Int32Tensor(3)) }, func_op, -3);
VerifyFDef(new string[] { "neg" },
new List<IOSpec> { new IOSpec("feed", DataType.DtInt32) },
new List<IOSpec> { new IOSpec("negated_num", DataType.DtInt32) },
new List<EdgeSpec> { new EdgeSpec("feed", "neg:0"), new EdgeSpec("neg:y:0", "negated_num") },
new List<EdgeSpec>());
}

[TestMethod]
public void OutputNames_SameNameAsInput()
{
// Define
var feed = Placeholder(func_graph_, s_, "negation");
var neg = Neg(feed, func_graph_, s_, "neg");
Define(-1,
new Operation[0],
new[] { feed },
new[] { neg },
new[] { "negation" });

// Use, run, and verify
var func_feed = Placeholder(host_graph_, s_);
var func_op = Use(new[] { func_feed });
Run(new[] { new KeyValuePair<Operation, Tensor>(func_feed, Int32Tensor(3)) }, func_op, -3);
VerifyFDef(new string[] { "neg" },
new List<IOSpec> { new IOSpec("negation_0", DataType.DtInt32) },
new List<IOSpec> { new IOSpec("negation", DataType.DtInt32) },
new List<EdgeSpec> { new EdgeSpec("negation_0", "neg:0"), new EdgeSpec("neg:y:0", "negation") },
new List<EdgeSpec>());
}

[TestMethod]
public void ZeroOps_Identity()
{
// Define
var feed = Placeholder(func_graph_, s_);
Define(-1,
new Operation[0],
new[] { feed },
new[] { feed },
new string[0]);

// Use, run, and verify
var func_feed = Placeholder(host_graph_, s_);
var func_op = Use(new[] { func_feed });
Run(new[] { new KeyValuePair<Operation, Tensor>(func_feed, Int32Tensor(3)) }, func_op, 3);
VerifyFDef(new string[0],
new List<IOSpec> { new IOSpec("feed_0", DataType.DtInt32) },
new List<IOSpec> { new IOSpec("feed", DataType.DtInt32) },
new List<EdgeSpec> { new EdgeSpec("feed_0", "feed") },
new List<EdgeSpec>());
}

[TestMethod]
public void ZeroOps_Permutation()
{
// Define
var feed1 = Placeholder(func_graph_, s_, "feed1");
var feed2 = Placeholder(func_graph_, s_, "feed2");
Define(-1,
null,
new[] { feed1, feed2 },
new[] { feed2, feed1 },
null);

// Use, run, and verify
var two = ScalarConst(2, host_graph_, s_);
var func_feed = Placeholder(host_graph_, s_);
var func_op = Use(new[] { two, func_feed });
Run(new[] { new KeyValuePair<Operation, Tensor>(func_feed, Int32Tensor(3)) },
new[] { new TF_Output(func_op, 0), new TF_Output(func_op, 1) },
new[] { 3, 2 });
VerifyFDef(new string[0],
new List<IOSpec> { new IOSpec("feed1_0"), new IOSpec("feed2_0") },
new List<IOSpec> { new IOSpec("feed2"), new IOSpec("feed1") },
new List<EdgeSpec> { new EdgeSpec("feed1_0", "feed1"), new EdgeSpec("feed2_0", "feed2") },
new List<EdgeSpec>());
}

[TestMethod]
public void ZeroOps_Permutation_OutputNames()
{
// Define
var feed1 = Placeholder(func_graph_, s_, "feed1");
var feed2 = Placeholder(func_graph_, s_, "feed2");
Define(-1,
null,
new[] { feed1, feed2 },
new[] { feed2, feed1 },
new[] { "first", "second" });

// Use, run, and verify
var two = ScalarConst(2, host_graph_, s_);
var func_feed = Placeholder(host_graph_, s_);
var func_op = Use(new[] { two, func_feed });
Run(new[] { new KeyValuePair<Operation, Tensor>(func_feed, Int32Tensor(3)) },
new[] { new TF_Output(func_op, 0), new TF_Output(func_op, 1) },
new[] { 3, 2 });
VerifyFDef(new string[0],
new List<IOSpec> { new IOSpec("feed1"), new IOSpec("feed2") },
new List<IOSpec> { new IOSpec("first"), new IOSpec("second") },
new List<EdgeSpec> { new EdgeSpec("feed1", "second"), new EdgeSpec("feed2", "first") },
new List<EdgeSpec>());
}

[TestMethod]
public void OneOp_TwoInputs_OneOutput()
{
// Define
var feed1 = Placeholder(func_graph_, s_, "feed1");
var feed2 = Placeholder(func_graph_, s_, "feed2");
var add = Add(feed1, feed2, func_graph_, s_);
Define(-1,
null,
new[] { feed1, feed2 },
new[] { add },
null);

// Use, run, and verify
var two = ScalarConst(2, host_graph_, s_);
var func_feed = Placeholder(host_graph_, s_);
var func_op = Use(new[] { two, func_feed });
Run(new[] { new KeyValuePair<Operation, Tensor>(func_feed, Int32Tensor(3)) },
func_op,
2 + 3);
VerifyFDef(new string[] { "add_0" },
new List<IOSpec> { new IOSpec("feed1"), new IOSpec("feed2") },
new List<IOSpec> { new IOSpec("add") },
new List<EdgeSpec>
{
new EdgeSpec("feed1", "add_0:0"),
new EdgeSpec("feed2", "add_0:1"),
new EdgeSpec("add_0:sum:0", "add")
},
new List<EdgeSpec>());
}

[TestMethod]
public void OneOp_TwoInputs_ZeroOutputs()
{
// Define
var feed1 = Placeholder(func_graph_, s_, "feed1");
var feed2 = Placeholder(func_graph_, s_, "feed2");
var add = Add(feed1, feed2, func_graph_, s_);
Define(-1,
null,
new[] { feed1, feed2 },
new Operation[0],
null);

// Use, run, and verify
var two = ScalarConst(2, host_graph_, s_);
var func_feed = Placeholder(host_graph_, s_);
var func_op = Use(new[] { two, func_feed });
VerifyFDef(new string[] { "add" },
new List<IOSpec> { new IOSpec("feed1"), new IOSpec("feed2") },
new List<IOSpec>(),
new List<EdgeSpec>
{
new EdgeSpec("feed1", "add:0"),
new EdgeSpec("feed2", "add:1")
},
new List<EdgeSpec>());
}

[TestMethod]
public void TwoOps_ThreeInputs_OneOutput()
{
// Define
var feed1 = Placeholder(func_graph_, s_, "feed1");
var feed2 = Placeholder(func_graph_, s_, "feed2");
var feed3 = Placeholder(func_graph_, s_, "feed3");
var add1 = Add(feed1, feed2, func_graph_, s_, "add1");
var add2 = Add(add1, feed3, func_graph_, s_, "add2");
Define(-1,
null,
new[] { feed1, feed2, feed3 },
new[] { add2 },
null);

// Use, run, and verify
var two = ScalarConst(2, host_graph_, s_, "two");
var ten = ScalarConst(10, host_graph_, s_, "ten");
var func_feed = Placeholder(host_graph_, s_);
var func_op = Use(new[] { two, ten, func_feed });
Run(new[] { new KeyValuePair<Operation, Tensor>(func_feed, Int32Tensor(3)) },
func_op,
2 + 10 + 3);
VerifyFDef(new string[] { "add1", "add2_0" },
new List<IOSpec> { new IOSpec("feed1"), new IOSpec("feed2"), new IOSpec("feed3") },
new List<IOSpec> { new IOSpec("add2") },
new List<EdgeSpec>
{
new EdgeSpec("feed1", "add1:0"),
new EdgeSpec("feed2", "add1:1"),
new EdgeSpec("add1:sum:0", "add2_0:0"),
new EdgeSpec("feed3", "add2_0:1"),
new EdgeSpec("add2_0:sum:0", "add2"),
},
new List<EdgeSpec>());
}

[TestMethod]
public void OneOp_TwoInputs_TwoDuplicateOutputs()
{
// Define
var feed1 = Placeholder(func_graph_, s_, "feed1");
var feed2 = Placeholder(func_graph_, s_, "feed2");
var add = Add(feed1, feed2, func_graph_, s_);
Define(-1,
null,
new[] { feed1, feed2 },
new[] { add, add },
null);

// Use, run, and verify
var two = ScalarConst(2, host_graph_, s_);
var func_feed = Placeholder(host_graph_, s_);
var func_op = Use(new[] { two, func_feed });
Run(new[] { new KeyValuePair<Operation, Tensor>(func_feed, Int32Tensor(3)) },
new[] { new TF_Output(func_op, 0), new TF_Output(func_op, 1) },
new[] { 5, 5 });
VerifyFDef(new string[] { "add_1" },
new List<IOSpec> { new IOSpec("feed1"), new IOSpec("feed2") },
new List<IOSpec> { new IOSpec("add"), new IOSpec("add_0") },
new List<EdgeSpec>
{
new EdgeSpec("feed1", "add_1:0"),
new EdgeSpec("feed2", "add_1:1"),
new EdgeSpec("add_1:sum:0", "add"),
new EdgeSpec("add_1:sum:0", "add_0")
},
new List<EdgeSpec>());
}

[TestMethod]
public void TwoDuplicateOutputs_OutputNames()
{
// Define
var feed1 = Placeholder(func_graph_, s_, "feed1");
var feed2 = Placeholder(func_graph_, s_, "feed2");
var add = Add(feed1, feed2, func_graph_, s_);
Define(-1,
null,
new[] { feed1, feed2 },
new[] { add, add },
new[] { "out1", "out2" });

// Use, run, and verify
var two = ScalarConst(2, host_graph_, s_);
var func_feed = Placeholder(host_graph_, s_);
var func_op = Use(new[] { two, func_feed });
Run(new[] { new KeyValuePair<Operation, Tensor>(func_feed, Int32Tensor(3)) },
new[] { new TF_Output(func_op, 0), new TF_Output(func_op, 1) },
new[] { 5, 5 });
VerifyFDef(new string[] { "add" },
new List<IOSpec> { new IOSpec("feed1"), new IOSpec("feed2") },
new List<IOSpec> { new IOSpec("out1"), new IOSpec("out2") },
new List<EdgeSpec>
{
new EdgeSpec("feed1", "add:0"),
new EdgeSpec("feed2", "add:1"),
new EdgeSpec("add:sum:0", "out1"),
new EdgeSpec("add:sum:0", "out2")
},
new List<EdgeSpec>());
}

[TestMethod]
public void TwoOps_ThreeInputs_TwoOutputs()
{
// Define
var feed1 = Placeholder(func_graph_, s_, "feed1");
var feed2 = Placeholder(func_graph_, s_, "feed2");
var feed3 = Placeholder(func_graph_, s_, "feed3");
var add1 = Add(feed1, feed2, func_graph_, s_, "add1");
var add2 = Add(add1, feed3, func_graph_, s_, "add2");
Define(-1,
null,
new[] { feed1, feed2, feed3 },
new[] { add1, add2 },
null);

// Use, run, and verify
var two = ScalarConst(2, host_graph_, s_, "two");
var ten = ScalarConst(10, host_graph_, s_, "ten");
var func_feed = Placeholder(host_graph_, s_);
var func_op = Use(new[] { two, ten, func_feed });
Run(new[] { new KeyValuePair<Operation, Tensor>(func_feed, Int32Tensor(3)) },
new[] { new TF_Output(func_op, 0), new TF_Output(func_op, 1) },
new[] { 12, 15 });
VerifyFDef(new string[] { "add1_0", "add2_0" },
new List<IOSpec> { new IOSpec("feed1"), new IOSpec("feed2"), new IOSpec("feed3") },
new List<IOSpec> { new IOSpec("add1"), new IOSpec("add2") },
new List<EdgeSpec>
{
new EdgeSpec("feed1", "add1_0:0"),
new EdgeSpec("feed2", "add1_0:1"),
new EdgeSpec("add1_0:sum:0", "add2_0:0"),
new EdgeSpec("feed3", "add2_0:1"),
new EdgeSpec("add1_0:sum:0", "add1"),
new EdgeSpec("add2_0:sum:0", "add2")
},
new List<EdgeSpec>());
} }


void Define(int num_opers, Operation[] opers, void Define(int num_opers, Operation[] opers,
@@ -56,15 +401,12 @@ namespace TensorFlowNET.UnitTest.NativeAPI
TF_Output[] inputs, TF_Output[] outputs, TF_Output[] inputs, TF_Output[] outputs,
string[] output_names, bool expect_failure = false) string[] output_names, bool expect_failure = false)
{ {
IntPtr output_names_ptr = IntPtr.Zero;

func_ = c_api.TF_GraphToFunction(func_graph_, func_name_, false, func_ = c_api.TF_GraphToFunction(func_graph_, func_name_, false,
num_opers, num_opers == -1 ? new IntPtr[0] : opers.Select(x => (IntPtr)x).ToArray(),
num_opers, num_opers == -1 ? null : opers.Select(x => (IntPtr)x).ToArray(),
inputs.Length, inputs.ToArray(), inputs.Length, inputs.ToArray(),
outputs.Length, outputs.ToArray(), outputs.Length, outputs.ToArray(),
output_names_ptr, IntPtr.Zero, null, s_.Handle);

// delete output_names_ptr
output_names == null || output_names.Length == 0 ? null : output_names,
IntPtr.Zero, null, s_.Handle);


if (expect_failure) if (expect_failure)
{ {
@@ -73,6 +415,7 @@ namespace TensorFlowNET.UnitTest.NativeAPI
} }


ASSERT_EQ(TF_OK, s_.Code, s_.Message); ASSERT_EQ(TF_OK, s_.Code, s_.Message);
ASSERT_NE(func_, IntPtr.Zero);
ASSERT_EQ(func_name_, c_api.StringPiece(c_api.TF_FunctionName(func_))); ASSERT_EQ(func_name_, c_api.StringPiece(c_api.TF_FunctionName(func_)));
c_api.TF_GraphCopyFunction(host_graph_, func_, IntPtr.Zero, s_.Handle); c_api.TF_GraphCopyFunction(host_graph_, func_, IntPtr.Zero, s_.Handle);
ASSERT_EQ(TF_OK, s_.Code, s_.Message); ASSERT_EQ(TF_OK, s_.Code, s_.Message);
@@ -113,7 +456,7 @@ namespace TensorFlowNET.UnitTest.NativeAPI
for (int i = 0; i < expected_results.Length; ++i) for (int i = 0; i < expected_results.Length; ++i)
{ {
var output = csession.output_tensor(i); var output = csession.output_tensor(i);
ASSERT_NE(output, IntPtr.Zero);
ASSERT_TRUE(output != IntPtr.Zero);
EXPECT_EQ(TF_DataType.TF_INT32, c_api.TF_TensorType(output)); EXPECT_EQ(TF_DataType.TF_INT32, c_api.TF_TensorType(output));
EXPECT_EQ(0, c_api.TF_NumDims(output)); EXPECT_EQ(0, c_api.TF_NumDims(output));
ASSERT_EQ(sizeof(int), (int)c_api.TF_TensorByteSize(output)); ASSERT_EQ(sizeof(int), (int)c_api.TF_TensorByteSize(output));
@@ -122,21 +465,125 @@ namespace TensorFlowNET.UnitTest.NativeAPI
} }
} }


void VerifyFDef(string[] nodes)
void VerifyFDef(string[] nodes, List<IOSpec> inputs, List<IOSpec> outputs,
List<EdgeSpec> e_edges, List<EdgeSpec> c_edges,
bool is_exact_edges = true)
{ {
var fdef = GetFunctionDef(func_); var fdef = GetFunctionDef(func_);
EXPECT_NE(fdef, IntPtr.Zero); EXPECT_NE(fdef, IntPtr.Zero);
VerifyFDefNodes(fdef, nodes); VerifyFDefNodes(fdef, nodes);
VerifyFDefInputs(fdef, inputs);
VerifyFDefOutputs(fdef, outputs);
VerifyFDefEdges(fdef, e_edges, c_edges, is_exact_edges);
} }


void VerifyFDefNodes(FunctionDef fdef, string[] nodes) void VerifyFDefNodes(FunctionDef fdef, string[] nodes)
{ {
ASSERT_EQ(nodes.Length, fdef.NodeDef.Count); ASSERT_EQ(nodes.Length, fdef.NodeDef.Count);
foreach(var node in fdef.NodeDef)
{
ASSERT_TRUE(nodes.Contains(node.Name), $"Got unexpected node: {node.Name} in fdef: {fdef}");
}
}

void VerifyFDefInputs(FunctionDef fdef, List<IOSpec> inputs)
{
var signature = fdef.Signature;
ASSERT_EQ(inputs.Count, signature.InputArg.Count);
for (int i = 0; i < inputs.Count; ++i)
{
var arg = signature.InputArg[i];
var input = inputs[i];
if (input.Value != DataType.DtInvalid)
ASSERT_EQ(arg.Type, input.Value, $"");
ASSERT_EQ(arg.Name, input.Key, $"Got unexpected name for input {i}. fdef: {fdef}");
}
}

void VerifyFDefOutputs(FunctionDef fdef, List<IOSpec> outputs)
{
var signature = fdef.Signature;
ASSERT_EQ(outputs.Count, signature.OutputArg.Count);
for (int i = 0; i < outputs.Count; ++i)
{
var arg = signature.OutputArg[i];
var output = outputs[i];
if (output.Value != DataType.DtInvalid)
ASSERT_EQ(arg.Type, output.Value, $"");
ASSERT_EQ(arg.Name, output.Key, $"Got unexpected name for input {i}. fdef: {fdef}");
}
}

void VerifyFDefEdges(FunctionDef fdef, List<EdgeSpec> e_edges, List<EdgeSpec> c_edges, bool is_exact_edges = true)
{
// Build a set of edges from fdef
var a_edges = new List<EdgeSpec>(); // actual edges
// Get edges from inputs to body nodes and between body nodes
foreach(var node in fdef.NodeDef)
{
for (int i = 0; i < node.Input.Count; ++i)
{
var input = node.Input[i];
a_edges.Add(new EdgeSpec(input, $"{node.Name}:{i}"));
}
}
// Get edges from body nodes to outputs and from inputs to outputs
foreach(var arg in fdef.Signature.OutputArg)
{
var iter = fdef.Ret.FirstOrDefault(x => x.Key == arg.Name);
if(iter.Key != null)
{
a_edges.Add(new EdgeSpec(iter.Value, arg.Name));
}
else
{
a_edges.Add(new EdgeSpec(arg.Name, arg.Name));
}
}
// Verify edges
foreach(var edge in e_edges)
{
ASSERT_TRUE(a_edges.Contains(edge));
}
foreach (var edge in c_edges)
{
ASSERT_TRUE(a_edges.Contains(edge));
}
// If caller specified all edges, check that we have seen all
if (is_exact_edges)
{
ASSERT_EQ(e_edges.Count + c_edges.Count, a_edges.Count,
$"Expected edges: {e_edges}, Expected Control edges: {c_edges}, Actual edges: {a_edges}");
}
} }


public void Dispose() public void Dispose()
{ {
} }

public struct IOSpec
{
KeyValuePair<string, DataType> pair;
public string Key => pair.Key;
public DataType Value => pair.Value;

public IOSpec(string key, DataType value = DataType.DtInvalid)
{
pair = new KeyValuePair<string, DataType>(key, value);
}
}

public struct EdgeSpec
{
KeyValuePair<string, string> pair;
public string Key => pair.Key;
public string Value => pair.Value;

public EdgeSpec(string key, string value)
{
pair = new KeyValuePair<string, string>(key, value);
}
}
} }
} }

+ 2
- 1
test/TensorFlowNET.UnitTest/NativeAPI/CSession.cs View File

@@ -90,7 +90,8 @@ namespace TensorFlowNET.UnitTest


s.Check(); s.Check();


output_values_[0] = output_values_ptr[0];
for (var i = 0; i < outputs_.Count; i++)
output_values_[i] = output_values_ptr[i];
} }


public IntPtr output_tensor(int i) public IntPtr output_tensor(int i)


+ 7
- 0
test/TensorFlowNET.UnitTest/NativeAPI/c_test_util.cs View File

@@ -1,6 +1,8 @@
using System; using System;
using System.Diagnostics.CodeAnalysis; using System.Diagnostics.CodeAnalysis;
using System.Runtime.CompilerServices;
using Tensorflow; using Tensorflow;
using Tensorflow.Functions;
using Tensorflow.Util; using Tensorflow.Util;
using Buffer = Tensorflow.Buffer; using Buffer = Tensorflow.Buffer;


@@ -230,5 +232,10 @@ namespace TensorFlowNET.UnitTest
{ {
return Const(new Tensor(v), graph, s, name); return Const(new Tensor(v), graph, s, name);
} }

public static Tensor Int32Tensor(int v)
{
return new Tensor(v);
}
} }
} }

+ 6
- 1
test/TensorFlowNET.UnitTest/Tensorflow.UnitTest.csproj View File

@@ -41,9 +41,14 @@
<Compile Remove="KerasTests.cs" /> <Compile Remove="KerasTests.cs" />
</ItemGroup> </ItemGroup>


<ItemGroup>
<None Remove="ManagedAPI\nn_test.py" />
</ItemGroup>

<ItemGroup> <ItemGroup>
<PackageReference Include="FluentAssertions" Version="5.10.3" /> <PackageReference Include="FluentAssertions" Version="5.10.3" />
<PackageReference Include="Microsoft.NET.Test.Sdk" Version="16.7.0" />
<PackageReference Include="MethodBoundaryAspect.Fody" Version="2.0.138" />
<PackageReference Include="Microsoft.NET.Test.Sdk" Version="16.7.1" />
<PackageReference Include="MSTest.TestAdapter" Version="2.1.2" /> <PackageReference Include="MSTest.TestAdapter" Version="2.1.2" />
<PackageReference Include="MSTest.TestFramework" Version="2.1.2" /> <PackageReference Include="MSTest.TestFramework" Version="2.1.2" />
<PackageReference Include="SciSharp.TensorFlow.Redist" Version="2.3.0" /> <PackageReference Include="SciSharp.TensorFlow.Redist" Version="2.3.0" />


+ 0
- 56
test/TensorFlowNET.UnitTest/img_test/TestCrop.cs View File

@@ -1,56 +0,0 @@
using FluentAssertions;
using Microsoft.VisualStudio.TestTools.UnitTesting;
using NumSharp;
using Tensorflow;
using Tensorflow.UnitTest;
using static Tensorflow.Binding;

namespace TensorFlowNET.UnitTest.img_test
{
[TestClass]
public class TestCrop : GraphModeTestBase
{
[TestMethod]
public void TestCropAndResize()
{
var graph = tf.Graph().as_default();

// 3x3 'Image' with numbered coordinates
var input = np.array(0f, 1f, 2f, 3f, 4f, 5f, 6f, 7f, 8f);
var image = tf.reshape(input, new int[] { 1, 3, 3, 1 });

// 4x4 'Image' with numbered coordinates
var input2 = np.array(0f, 1f, 2f, 3f, 4f, 5f, 6f, 7f, 8f, 9f, 10f, 11f, 12f, 13f, 14f, 15f);
var image2 = tf.reshape(input2, new int[] { 1, 4, 4, 1 });
// create one box over the full image that flips it (y1 > y2)
var box = tf.reshape(np.array(1f, 0f, 0f, 1f), new int[] {1, 4});
var boxInd = tf.Variable(np.array(0));
// crop first 3x3 imageto size 1x1
var cropSize1_1 = tf.Variable(np.array(1, 1));
// don't crop second 4x4 image
var cropSize2_2 = tf.Variable(np.array(4, 4));
var init = tf.global_variables_initializer();
using (Session sess = tf.Session())
{
sess.run(init);

var cropped = tf.image.crop_and_resize(image, box, boxInd, cropSize1_1);
var result = sess.run(cropped);
// check if cropped to 1x1 center was succesfull
result.size.Should().Be(1);
result[0, 0, 0, 0].Should().Be(4f);

cropped = tf.image.crop_and_resize(image2, box, boxInd, cropSize2_2);
result = sess.run(cropped);
// check if flipped and no cropping occured
result.size.Should().Be(16);
result[0, 0, 0, 0].Should().Be(12f);

}

}

}
}

Loading…
Cancel
Save