From 728699d6a6956f24870ca1bd9552fa654a233066 Mon Sep 17 00:00:00 2001 From: Oceania2018 Date: Sat, 15 Feb 2020 08:34:00 -0600 Subject: [PATCH 01/11] upgrade libraries. --- src/TensorFlowNET.Core/TensorFlow.Binding.csproj | 2 +- src/TensorFlowNET.Hub/Tensorflow.Hub.csproj | 2 +- .../Tensorflow.Benchmark.csproj | 2 +- .../TensorFlowNET.UnitTest/Tensorflow.UnitTest.csproj | 8 ++++---- .../Tensorflow.Keras.UnitTest.csproj | 11 +++++++---- 5 files changed, 14 insertions(+), 11 deletions(-) diff --git a/src/TensorFlowNET.Core/TensorFlow.Binding.csproj b/src/TensorFlowNET.Core/TensorFlow.Binding.csproj index 22006661..09e2674a 100644 --- a/src/TensorFlowNET.Core/TensorFlow.Binding.csproj +++ b/src/TensorFlowNET.Core/TensorFlow.Binding.csproj @@ -62,7 +62,7 @@ https://tensorflownet.readthedocs.io - + diff --git a/src/TensorFlowNET.Hub/Tensorflow.Hub.csproj b/src/TensorFlowNET.Hub/Tensorflow.Hub.csproj index 640e1515..76965c1e 100644 --- a/src/TensorFlowNET.Hub/Tensorflow.Hub.csproj +++ b/src/TensorFlowNET.Hub/Tensorflow.Hub.csproj @@ -22,6 +22,6 @@ Change to NumSharp compact version. DEBUG;TRACE - + \ No newline at end of file diff --git a/src/TensorFlowNet.Benchmarks/Tensorflow.Benchmark.csproj b/src/TensorFlowNet.Benchmarks/Tensorflow.Benchmark.csproj index 9f28cffa..0152683b 100644 --- a/src/TensorFlowNet.Benchmarks/Tensorflow.Benchmark.csproj +++ b/src/TensorFlowNet.Benchmarks/Tensorflow.Benchmark.csproj @@ -20,7 +20,7 @@ - + diff --git a/test/TensorFlowNET.UnitTest/Tensorflow.UnitTest.csproj b/test/TensorFlowNET.UnitTest/Tensorflow.UnitTest.csproj index cff48481..e7ebc23e 100644 --- a/test/TensorFlowNET.UnitTest/Tensorflow.UnitTest.csproj +++ b/test/TensorFlowNET.UnitTest/Tensorflow.UnitTest.csproj @@ -28,10 +28,10 @@ - - - - + + + + diff --git a/test/Tensorflow.Keras.UnitTest/Tensorflow.Keras.UnitTest.csproj b/test/Tensorflow.Keras.UnitTest/Tensorflow.Keras.UnitTest.csproj index 41dbf2e4..b646a28b 100644 --- a/test/Tensorflow.Keras.UnitTest/Tensorflow.Keras.UnitTest.csproj +++ b/test/Tensorflow.Keras.UnitTest/Tensorflow.Keras.UnitTest.csproj @@ -7,10 +7,13 @@ - - - - + + + + + all + runtime; build; native; contentfiles; analyzers; buildtransitive + From 6119bd13172f85021c4bf7db1c2115eddc4cc5fc Mon Sep 17 00:00:00 2001 From: Sam Harwell Date: Tue, 4 Feb 2020 17:17:34 -0800 Subject: [PATCH 02/11] Add initial .gitattributes --- .gitattributes | 1 + 1 file changed, 1 insertion(+) create mode 100644 .gitattributes diff --git a/.gitattributes b/.gitattributes new file mode 100644 index 00000000..176a458f --- /dev/null +++ b/.gitattributes @@ -0,0 +1 @@ +* text=auto From a3795c880a6630a428c96cdba3d5000d4dcaa35e Mon Sep 17 00:00:00 2001 From: Sam Harwell Date: Tue, 4 Feb 2020 17:18:12 -0800 Subject: [PATCH 03/11] Renormalize all files --- TensorFlow.NET.sln | 266 +-- docs/assets/Logo.md | 4 +- .../Graphs/Graph.Control.cs | 46 +- .../Graphs/Graph.Operation.cs | 48 +- src/TensorFlowNET.Core/Graphs/Graph.cs | 250 +-- .../Graphs/_ControlDependenciesController.cs | 56 +- .../ControlFlows/ControlFlowState.cs | 648 +++--- .../Operations/ControlFlows/GradLoopState.cs | 670 +++--- .../Operations/NnOps/gen_nn_ops.cs | 384 ++-- .../Operations/Operation.Control.cs | 136 +- .../Operations/Operation.Input.cs | 218 +- .../Operations/Operation.cs | 64 +- .../Operations/control_flow_util.py.cs | 276 +-- .../Operations/gen_math_ops.cs | 1470 ++++++------ src/TensorFlowNET.Core/Sessions/FeedDict.cs | 16 +- src/TensorFlowNET.Core/Util/nest.py.cs | 1974 ++++++++--------- .../Variables/gen_state_ops.py.cs | 312 +-- src/TensorFlowNET.Core/Variables/state_ops.cs | 246 +- test/TensorFlowNET.UnitTest/PythonTest.cs | 668 +++--- .../control_flow_ops_test/CondTestCases.cs | 172 +- .../control_flow_ops_test/ShapeTestCase.cs | 46 +- .../control_flow_ops_test/SwitchTestCase.cs | 346 +-- .../WhileContextTestCase.cs | 104 +- .../nest_test/NestTest.cs | 1746 +++++++-------- .../nn_test/ZeroFractionTest.cs | 174 +- .../ops_test/ControlDependenciesTest.cs | 632 +++--- .../ops_test/CreateOpFromTfOperationTest.cs | 440 ++-- .../ops_test/GraphTest.cs | 390 ++-- 28 files changed, 5901 insertions(+), 5901 deletions(-) diff --git a/TensorFlow.NET.sln b/TensorFlow.NET.sln index 9efeddaa..f113418a 100644 --- a/TensorFlow.NET.sln +++ b/TensorFlow.NET.sln @@ -1,133 +1,133 @@ - -Microsoft Visual Studio Solution File, Format Version 12.00 -# Visual Studio Version 16 -VisualStudioVersion = 16.0.29102.190 -MinimumVisualStudioVersion = 10.0.40219.1 -Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "Tensorflow.Binding", "src\TensorFlowNET.Core\Tensorflow.Binding.csproj", "{FD682AC0-7B2D-45D3-8B0D-C6D678B04144}" -EndProject -Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "Tensorflow.Benchmark", "src\TensorFlowNet.Benchmarks\Tensorflow.Benchmark.csproj", "{3A6EB896-604F-4E25-B677-B8103BCF3D2E}" -EndProject -Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "Tensorflow.UnitTest", "test\TensorFlowNET.UnitTest\Tensorflow.UnitTest.csproj", "{23C28035-2FCE-41F3-9A12-E73CE8A5AE32}" -EndProject -Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "Tensorflow.Keras", "src\TensorFlowNET.Keras\Tensorflow.Keras.csproj", "{6268B461-486A-460B-9B3C-86493CBBAAF7}" -EndProject -Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "Tensorflow.Keras.UnitTest", "test\Tensorflow.Keras.UnitTest\Tensorflow.Keras.UnitTest.csproj", "{EB92DD90-6346-41FB-B967-2B33A860AD98}" -EndProject -Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "Tensorflow.Hub", "src\TensorFlowNET.Hub\Tensorflow.Hub.csproj", "{95B077C1-E21B-486F-8BDD-1C902FE687AB}" -EndProject -Global - GlobalSection(SolutionConfigurationPlatforms) = preSolution - Debug|Any CPU = Debug|Any CPU - Debug|x64 = Debug|x64 - Debug-Minimal|Any CPU = Debug-Minimal|Any CPU - Debug-Minimal|x64 = Debug-Minimal|x64 - Publish|Any CPU = Publish|Any CPU - Publish|x64 = Publish|x64 - Release|Any CPU = Release|Any CPU - Release|x64 = Release|x64 - EndGlobalSection - GlobalSection(ProjectConfigurationPlatforms) = postSolution - {FD682AC0-7B2D-45D3-8B0D-C6D678B04144}.Debug|Any CPU.ActiveCfg = Debug|Any CPU - {FD682AC0-7B2D-45D3-8B0D-C6D678B04144}.Debug|Any CPU.Build.0 = Debug|Any CPU - {FD682AC0-7B2D-45D3-8B0D-C6D678B04144}.Debug|x64.ActiveCfg = Debug|Any CPU - {FD682AC0-7B2D-45D3-8B0D-C6D678B04144}.Debug|x64.Build.0 = Debug|Any CPU - {FD682AC0-7B2D-45D3-8B0D-C6D678B04144}.Debug-Minimal|Any CPU.ActiveCfg = Debug|Any CPU - {FD682AC0-7B2D-45D3-8B0D-C6D678B04144}.Debug-Minimal|Any CPU.Build.0 = Debug|Any CPU - {FD682AC0-7B2D-45D3-8B0D-C6D678B04144}.Debug-Minimal|x64.ActiveCfg = Debug|Any CPU - {FD682AC0-7B2D-45D3-8B0D-C6D678B04144}.Debug-Minimal|x64.Build.0 = Debug|Any CPU - {FD682AC0-7B2D-45D3-8B0D-C6D678B04144}.Publish|Any CPU.ActiveCfg = Release|Any CPU - {FD682AC0-7B2D-45D3-8B0D-C6D678B04144}.Publish|Any CPU.Build.0 = Release|Any CPU - {FD682AC0-7B2D-45D3-8B0D-C6D678B04144}.Publish|x64.ActiveCfg = Release|Any CPU - {FD682AC0-7B2D-45D3-8B0D-C6D678B04144}.Publish|x64.Build.0 = Release|Any CPU - {FD682AC0-7B2D-45D3-8B0D-C6D678B04144}.Release|Any CPU.ActiveCfg = Release|Any CPU - {FD682AC0-7B2D-45D3-8B0D-C6D678B04144}.Release|Any CPU.Build.0 = Release|Any CPU - {FD682AC0-7B2D-45D3-8B0D-C6D678B04144}.Release|x64.ActiveCfg = Release|Any CPU - {FD682AC0-7B2D-45D3-8B0D-C6D678B04144}.Release|x64.Build.0 = Release|Any CPU - {3A6EB896-604F-4E25-B677-B8103BCF3D2E}.Debug|Any CPU.ActiveCfg = Debug|Any CPU - {3A6EB896-604F-4E25-B677-B8103BCF3D2E}.Debug|Any CPU.Build.0 = Debug|Any CPU - {3A6EB896-604F-4E25-B677-B8103BCF3D2E}.Debug|x64.ActiveCfg = Debug|Any CPU - {3A6EB896-604F-4E25-B677-B8103BCF3D2E}.Debug|x64.Build.0 = Debug|Any CPU - {3A6EB896-604F-4E25-B677-B8103BCF3D2E}.Debug-Minimal|Any CPU.ActiveCfg = Debug|Any CPU - {3A6EB896-604F-4E25-B677-B8103BCF3D2E}.Debug-Minimal|Any CPU.Build.0 = Debug|Any CPU - {3A6EB896-604F-4E25-B677-B8103BCF3D2E}.Debug-Minimal|x64.ActiveCfg = Debug|Any CPU - {3A6EB896-604F-4E25-B677-B8103BCF3D2E}.Debug-Minimal|x64.Build.0 = Debug|Any CPU - {3A6EB896-604F-4E25-B677-B8103BCF3D2E}.Publish|Any CPU.ActiveCfg = Release|Any CPU - {3A6EB896-604F-4E25-B677-B8103BCF3D2E}.Publish|Any CPU.Build.0 = Release|Any CPU - {3A6EB896-604F-4E25-B677-B8103BCF3D2E}.Publish|x64.ActiveCfg = Release|Any CPU - {3A6EB896-604F-4E25-B677-B8103BCF3D2E}.Publish|x64.Build.0 = Release|Any CPU - {3A6EB896-604F-4E25-B677-B8103BCF3D2E}.Release|Any CPU.ActiveCfg = Release|Any CPU - {3A6EB896-604F-4E25-B677-B8103BCF3D2E}.Release|Any CPU.Build.0 = Release|Any CPU - {3A6EB896-604F-4E25-B677-B8103BCF3D2E}.Release|x64.ActiveCfg = Release|Any CPU - {3A6EB896-604F-4E25-B677-B8103BCF3D2E}.Release|x64.Build.0 = Release|Any CPU - {23C28035-2FCE-41F3-9A12-E73CE8A5AE32}.Debug|Any CPU.ActiveCfg = Debug|Any CPU - {23C28035-2FCE-41F3-9A12-E73CE8A5AE32}.Debug|Any CPU.Build.0 = Debug|Any CPU - {23C28035-2FCE-41F3-9A12-E73CE8A5AE32}.Debug|x64.ActiveCfg = Debug|Any CPU - {23C28035-2FCE-41F3-9A12-E73CE8A5AE32}.Debug|x64.Build.0 = Debug|Any CPU - {23C28035-2FCE-41F3-9A12-E73CE8A5AE32}.Debug-Minimal|Any CPU.ActiveCfg = Debug|Any CPU - {23C28035-2FCE-41F3-9A12-E73CE8A5AE32}.Debug-Minimal|Any CPU.Build.0 = Debug|Any CPU - {23C28035-2FCE-41F3-9A12-E73CE8A5AE32}.Debug-Minimal|x64.ActiveCfg = Debug|Any CPU - {23C28035-2FCE-41F3-9A12-E73CE8A5AE32}.Debug-Minimal|x64.Build.0 = Debug|Any CPU - {23C28035-2FCE-41F3-9A12-E73CE8A5AE32}.Publish|Any CPU.ActiveCfg = Release|Any CPU - {23C28035-2FCE-41F3-9A12-E73CE8A5AE32}.Publish|Any CPU.Build.0 = Release|Any CPU - {23C28035-2FCE-41F3-9A12-E73CE8A5AE32}.Publish|x64.ActiveCfg = Release|Any CPU - {23C28035-2FCE-41F3-9A12-E73CE8A5AE32}.Publish|x64.Build.0 = Release|Any CPU - {23C28035-2FCE-41F3-9A12-E73CE8A5AE32}.Release|Any CPU.ActiveCfg = Release|Any CPU - {23C28035-2FCE-41F3-9A12-E73CE8A5AE32}.Release|Any CPU.Build.0 = Release|Any CPU - {23C28035-2FCE-41F3-9A12-E73CE8A5AE32}.Release|x64.ActiveCfg = Release|Any CPU - {23C28035-2FCE-41F3-9A12-E73CE8A5AE32}.Release|x64.Build.0 = Release|Any CPU - {6268B461-486A-460B-9B3C-86493CBBAAF7}.Debug|Any CPU.ActiveCfg = Debug|Any CPU - {6268B461-486A-460B-9B3C-86493CBBAAF7}.Debug|Any CPU.Build.0 = Debug|Any CPU - {6268B461-486A-460B-9B3C-86493CBBAAF7}.Debug|x64.ActiveCfg = Debug|Any CPU - {6268B461-486A-460B-9B3C-86493CBBAAF7}.Debug|x64.Build.0 = Debug|Any CPU - {6268B461-486A-460B-9B3C-86493CBBAAF7}.Debug-Minimal|Any CPU.ActiveCfg = Debug|Any CPU - {6268B461-486A-460B-9B3C-86493CBBAAF7}.Debug-Minimal|Any CPU.Build.0 = Debug|Any CPU - {6268B461-486A-460B-9B3C-86493CBBAAF7}.Debug-Minimal|x64.ActiveCfg = Debug|Any CPU - {6268B461-486A-460B-9B3C-86493CBBAAF7}.Debug-Minimal|x64.Build.0 = Debug|Any CPU - {6268B461-486A-460B-9B3C-86493CBBAAF7}.Publish|Any CPU.ActiveCfg = Release|Any CPU - {6268B461-486A-460B-9B3C-86493CBBAAF7}.Publish|Any CPU.Build.0 = Release|Any CPU - {6268B461-486A-460B-9B3C-86493CBBAAF7}.Publish|x64.ActiveCfg = Release|Any CPU - {6268B461-486A-460B-9B3C-86493CBBAAF7}.Publish|x64.Build.0 = Release|Any CPU - {6268B461-486A-460B-9B3C-86493CBBAAF7}.Release|Any CPU.ActiveCfg = Release|Any CPU - {6268B461-486A-460B-9B3C-86493CBBAAF7}.Release|Any CPU.Build.0 = Release|Any CPU - {6268B461-486A-460B-9B3C-86493CBBAAF7}.Release|x64.ActiveCfg = Release|Any CPU - {6268B461-486A-460B-9B3C-86493CBBAAF7}.Release|x64.Build.0 = Release|Any CPU - {EB92DD90-6346-41FB-B967-2B33A860AD98}.Debug|Any CPU.ActiveCfg = Debug|Any CPU - {EB92DD90-6346-41FB-B967-2B33A860AD98}.Debug|Any CPU.Build.0 = Debug|Any CPU - {EB92DD90-6346-41FB-B967-2B33A860AD98}.Debug|x64.ActiveCfg = Debug|Any CPU - {EB92DD90-6346-41FB-B967-2B33A860AD98}.Debug|x64.Build.0 = Debug|Any CPU - {EB92DD90-6346-41FB-B967-2B33A860AD98}.Debug-Minimal|Any CPU.ActiveCfg = Debug|Any CPU - {EB92DD90-6346-41FB-B967-2B33A860AD98}.Debug-Minimal|Any CPU.Build.0 = Debug|Any CPU - {EB92DD90-6346-41FB-B967-2B33A860AD98}.Debug-Minimal|x64.ActiveCfg = Debug|Any CPU - {EB92DD90-6346-41FB-B967-2B33A860AD98}.Debug-Minimal|x64.Build.0 = Debug|Any CPU - {EB92DD90-6346-41FB-B967-2B33A860AD98}.Publish|Any CPU.ActiveCfg = Release|Any CPU - {EB92DD90-6346-41FB-B967-2B33A860AD98}.Publish|Any CPU.Build.0 = Release|Any CPU - {EB92DD90-6346-41FB-B967-2B33A860AD98}.Publish|x64.ActiveCfg = Release|Any CPU - {EB92DD90-6346-41FB-B967-2B33A860AD98}.Publish|x64.Build.0 = Release|Any CPU - {EB92DD90-6346-41FB-B967-2B33A860AD98}.Release|Any CPU.ActiveCfg = Release|Any CPU - {EB92DD90-6346-41FB-B967-2B33A860AD98}.Release|Any CPU.Build.0 = Release|Any CPU - {EB92DD90-6346-41FB-B967-2B33A860AD98}.Release|x64.ActiveCfg = Release|Any CPU - {EB92DD90-6346-41FB-B967-2B33A860AD98}.Release|x64.Build.0 = Release|Any CPU - {95B077C1-E21B-486F-8BDD-1C902FE687AB}.Debug|Any CPU.ActiveCfg = Debug|Any CPU - {95B077C1-E21B-486F-8BDD-1C902FE687AB}.Debug|Any CPU.Build.0 = Debug|Any CPU - {95B077C1-E21B-486F-8BDD-1C902FE687AB}.Debug|x64.ActiveCfg = Debug|Any CPU - {95B077C1-E21B-486F-8BDD-1C902FE687AB}.Debug|x64.Build.0 = Debug|Any CPU - {95B077C1-E21B-486F-8BDD-1C902FE687AB}.Debug-Minimal|Any CPU.ActiveCfg = Debug|Any CPU - {95B077C1-E21B-486F-8BDD-1C902FE687AB}.Debug-Minimal|Any CPU.Build.0 = Debug|Any CPU - {95B077C1-E21B-486F-8BDD-1C902FE687AB}.Debug-Minimal|x64.ActiveCfg = Debug|Any CPU - {95B077C1-E21B-486F-8BDD-1C902FE687AB}.Debug-Minimal|x64.Build.0 = Debug|Any CPU - {95B077C1-E21B-486F-8BDD-1C902FE687AB}.Publish|Any CPU.ActiveCfg = Debug|Any CPU - {95B077C1-E21B-486F-8BDD-1C902FE687AB}.Publish|Any CPU.Build.0 = Debug|Any CPU - {95B077C1-E21B-486F-8BDD-1C902FE687AB}.Publish|x64.ActiveCfg = Debug|Any CPU - {95B077C1-E21B-486F-8BDD-1C902FE687AB}.Publish|x64.Build.0 = Debug|Any CPU - {95B077C1-E21B-486F-8BDD-1C902FE687AB}.Release|Any CPU.ActiveCfg = Release|Any CPU - {95B077C1-E21B-486F-8BDD-1C902FE687AB}.Release|Any CPU.Build.0 = Release|Any CPU - {95B077C1-E21B-486F-8BDD-1C902FE687AB}.Release|x64.ActiveCfg = Release|Any CPU - {95B077C1-E21B-486F-8BDD-1C902FE687AB}.Release|x64.Build.0 = Release|Any CPU - EndGlobalSection - GlobalSection(SolutionProperties) = preSolution - HideSolutionNode = FALSE - EndGlobalSection - GlobalSection(ExtensibilityGlobals) = postSolution - SolutionGuid = {2DEAD3CC-486B-4918-A607-50B0DE7B114A} - EndGlobalSection -EndGlobal + +Microsoft Visual Studio Solution File, Format Version 12.00 +# Visual Studio Version 16 +VisualStudioVersion = 16.0.29102.190 +MinimumVisualStudioVersion = 10.0.40219.1 +Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "Tensorflow.Binding", "src\TensorFlowNET.Core\Tensorflow.Binding.csproj", "{FD682AC0-7B2D-45D3-8B0D-C6D678B04144}" +EndProject +Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "Tensorflow.Benchmark", "src\TensorFlowNet.Benchmarks\Tensorflow.Benchmark.csproj", "{3A6EB896-604F-4E25-B677-B8103BCF3D2E}" +EndProject +Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "Tensorflow.UnitTest", "test\TensorFlowNET.UnitTest\Tensorflow.UnitTest.csproj", "{23C28035-2FCE-41F3-9A12-E73CE8A5AE32}" +EndProject +Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "Tensorflow.Keras", "src\TensorFlowNET.Keras\Tensorflow.Keras.csproj", "{6268B461-486A-460B-9B3C-86493CBBAAF7}" +EndProject +Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "Tensorflow.Keras.UnitTest", "test\Tensorflow.Keras.UnitTest\Tensorflow.Keras.UnitTest.csproj", "{EB92DD90-6346-41FB-B967-2B33A860AD98}" +EndProject +Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "Tensorflow.Hub", "src\TensorFlowNET.Hub\Tensorflow.Hub.csproj", "{95B077C1-E21B-486F-8BDD-1C902FE687AB}" +EndProject +Global + GlobalSection(SolutionConfigurationPlatforms) = preSolution + Debug|Any CPU = Debug|Any CPU + Debug|x64 = Debug|x64 + Debug-Minimal|Any CPU = Debug-Minimal|Any CPU + Debug-Minimal|x64 = Debug-Minimal|x64 + Publish|Any CPU = Publish|Any CPU + Publish|x64 = Publish|x64 + Release|Any CPU = Release|Any CPU + Release|x64 = Release|x64 + EndGlobalSection + GlobalSection(ProjectConfigurationPlatforms) = postSolution + {FD682AC0-7B2D-45D3-8B0D-C6D678B04144}.Debug|Any CPU.ActiveCfg = Debug|Any CPU + {FD682AC0-7B2D-45D3-8B0D-C6D678B04144}.Debug|Any CPU.Build.0 = Debug|Any CPU + {FD682AC0-7B2D-45D3-8B0D-C6D678B04144}.Debug|x64.ActiveCfg = Debug|Any CPU + {FD682AC0-7B2D-45D3-8B0D-C6D678B04144}.Debug|x64.Build.0 = Debug|Any CPU + {FD682AC0-7B2D-45D3-8B0D-C6D678B04144}.Debug-Minimal|Any CPU.ActiveCfg = Debug|Any CPU + {FD682AC0-7B2D-45D3-8B0D-C6D678B04144}.Debug-Minimal|Any CPU.Build.0 = Debug|Any CPU + {FD682AC0-7B2D-45D3-8B0D-C6D678B04144}.Debug-Minimal|x64.ActiveCfg = Debug|Any CPU + {FD682AC0-7B2D-45D3-8B0D-C6D678B04144}.Debug-Minimal|x64.Build.0 = Debug|Any CPU + {FD682AC0-7B2D-45D3-8B0D-C6D678B04144}.Publish|Any CPU.ActiveCfg = Release|Any CPU + {FD682AC0-7B2D-45D3-8B0D-C6D678B04144}.Publish|Any CPU.Build.0 = Release|Any CPU + {FD682AC0-7B2D-45D3-8B0D-C6D678B04144}.Publish|x64.ActiveCfg = Release|Any CPU + {FD682AC0-7B2D-45D3-8B0D-C6D678B04144}.Publish|x64.Build.0 = Release|Any CPU + {FD682AC0-7B2D-45D3-8B0D-C6D678B04144}.Release|Any CPU.ActiveCfg = Release|Any CPU + {FD682AC0-7B2D-45D3-8B0D-C6D678B04144}.Release|Any CPU.Build.0 = Release|Any CPU + {FD682AC0-7B2D-45D3-8B0D-C6D678B04144}.Release|x64.ActiveCfg = Release|Any CPU + {FD682AC0-7B2D-45D3-8B0D-C6D678B04144}.Release|x64.Build.0 = Release|Any CPU + {3A6EB896-604F-4E25-B677-B8103BCF3D2E}.Debug|Any CPU.ActiveCfg = Debug|Any CPU + {3A6EB896-604F-4E25-B677-B8103BCF3D2E}.Debug|Any CPU.Build.0 = Debug|Any CPU + {3A6EB896-604F-4E25-B677-B8103BCF3D2E}.Debug|x64.ActiveCfg = Debug|Any CPU + {3A6EB896-604F-4E25-B677-B8103BCF3D2E}.Debug|x64.Build.0 = Debug|Any CPU + {3A6EB896-604F-4E25-B677-B8103BCF3D2E}.Debug-Minimal|Any CPU.ActiveCfg = Debug|Any CPU + {3A6EB896-604F-4E25-B677-B8103BCF3D2E}.Debug-Minimal|Any CPU.Build.0 = Debug|Any CPU + {3A6EB896-604F-4E25-B677-B8103BCF3D2E}.Debug-Minimal|x64.ActiveCfg = Debug|Any CPU + {3A6EB896-604F-4E25-B677-B8103BCF3D2E}.Debug-Minimal|x64.Build.0 = Debug|Any CPU + {3A6EB896-604F-4E25-B677-B8103BCF3D2E}.Publish|Any CPU.ActiveCfg = Release|Any CPU + {3A6EB896-604F-4E25-B677-B8103BCF3D2E}.Publish|Any CPU.Build.0 = Release|Any CPU + {3A6EB896-604F-4E25-B677-B8103BCF3D2E}.Publish|x64.ActiveCfg = Release|Any CPU + {3A6EB896-604F-4E25-B677-B8103BCF3D2E}.Publish|x64.Build.0 = Release|Any CPU + {3A6EB896-604F-4E25-B677-B8103BCF3D2E}.Release|Any CPU.ActiveCfg = Release|Any CPU + {3A6EB896-604F-4E25-B677-B8103BCF3D2E}.Release|Any CPU.Build.0 = Release|Any CPU + {3A6EB896-604F-4E25-B677-B8103BCF3D2E}.Release|x64.ActiveCfg = Release|Any CPU + {3A6EB896-604F-4E25-B677-B8103BCF3D2E}.Release|x64.Build.0 = Release|Any CPU + {23C28035-2FCE-41F3-9A12-E73CE8A5AE32}.Debug|Any CPU.ActiveCfg = Debug|Any CPU + {23C28035-2FCE-41F3-9A12-E73CE8A5AE32}.Debug|Any CPU.Build.0 = Debug|Any CPU + {23C28035-2FCE-41F3-9A12-E73CE8A5AE32}.Debug|x64.ActiveCfg = Debug|Any CPU + {23C28035-2FCE-41F3-9A12-E73CE8A5AE32}.Debug|x64.Build.0 = Debug|Any CPU + {23C28035-2FCE-41F3-9A12-E73CE8A5AE32}.Debug-Minimal|Any CPU.ActiveCfg = Debug|Any CPU + {23C28035-2FCE-41F3-9A12-E73CE8A5AE32}.Debug-Minimal|Any CPU.Build.0 = Debug|Any CPU + {23C28035-2FCE-41F3-9A12-E73CE8A5AE32}.Debug-Minimal|x64.ActiveCfg = Debug|Any CPU + {23C28035-2FCE-41F3-9A12-E73CE8A5AE32}.Debug-Minimal|x64.Build.0 = Debug|Any CPU + {23C28035-2FCE-41F3-9A12-E73CE8A5AE32}.Publish|Any CPU.ActiveCfg = Release|Any CPU + {23C28035-2FCE-41F3-9A12-E73CE8A5AE32}.Publish|Any CPU.Build.0 = Release|Any CPU + {23C28035-2FCE-41F3-9A12-E73CE8A5AE32}.Publish|x64.ActiveCfg = Release|Any CPU + {23C28035-2FCE-41F3-9A12-E73CE8A5AE32}.Publish|x64.Build.0 = Release|Any CPU + {23C28035-2FCE-41F3-9A12-E73CE8A5AE32}.Release|Any CPU.ActiveCfg = Release|Any CPU + {23C28035-2FCE-41F3-9A12-E73CE8A5AE32}.Release|Any CPU.Build.0 = Release|Any CPU + {23C28035-2FCE-41F3-9A12-E73CE8A5AE32}.Release|x64.ActiveCfg = Release|Any CPU + {23C28035-2FCE-41F3-9A12-E73CE8A5AE32}.Release|x64.Build.0 = Release|Any CPU + {6268B461-486A-460B-9B3C-86493CBBAAF7}.Debug|Any CPU.ActiveCfg = Debug|Any CPU + {6268B461-486A-460B-9B3C-86493CBBAAF7}.Debug|Any CPU.Build.0 = Debug|Any CPU + {6268B461-486A-460B-9B3C-86493CBBAAF7}.Debug|x64.ActiveCfg = Debug|Any CPU + {6268B461-486A-460B-9B3C-86493CBBAAF7}.Debug|x64.Build.0 = Debug|Any CPU + {6268B461-486A-460B-9B3C-86493CBBAAF7}.Debug-Minimal|Any CPU.ActiveCfg = Debug|Any CPU + {6268B461-486A-460B-9B3C-86493CBBAAF7}.Debug-Minimal|Any CPU.Build.0 = Debug|Any CPU + {6268B461-486A-460B-9B3C-86493CBBAAF7}.Debug-Minimal|x64.ActiveCfg = Debug|Any CPU + {6268B461-486A-460B-9B3C-86493CBBAAF7}.Debug-Minimal|x64.Build.0 = Debug|Any CPU + {6268B461-486A-460B-9B3C-86493CBBAAF7}.Publish|Any CPU.ActiveCfg = Release|Any CPU + {6268B461-486A-460B-9B3C-86493CBBAAF7}.Publish|Any CPU.Build.0 = Release|Any CPU + {6268B461-486A-460B-9B3C-86493CBBAAF7}.Publish|x64.ActiveCfg = Release|Any CPU + {6268B461-486A-460B-9B3C-86493CBBAAF7}.Publish|x64.Build.0 = Release|Any CPU + {6268B461-486A-460B-9B3C-86493CBBAAF7}.Release|Any CPU.ActiveCfg = Release|Any CPU + {6268B461-486A-460B-9B3C-86493CBBAAF7}.Release|Any CPU.Build.0 = Release|Any CPU + {6268B461-486A-460B-9B3C-86493CBBAAF7}.Release|x64.ActiveCfg = Release|Any CPU + {6268B461-486A-460B-9B3C-86493CBBAAF7}.Release|x64.Build.0 = Release|Any CPU + {EB92DD90-6346-41FB-B967-2B33A860AD98}.Debug|Any CPU.ActiveCfg = Debug|Any CPU + {EB92DD90-6346-41FB-B967-2B33A860AD98}.Debug|Any CPU.Build.0 = Debug|Any CPU + {EB92DD90-6346-41FB-B967-2B33A860AD98}.Debug|x64.ActiveCfg = Debug|Any CPU + {EB92DD90-6346-41FB-B967-2B33A860AD98}.Debug|x64.Build.0 = Debug|Any CPU + {EB92DD90-6346-41FB-B967-2B33A860AD98}.Debug-Minimal|Any CPU.ActiveCfg = Debug|Any CPU + {EB92DD90-6346-41FB-B967-2B33A860AD98}.Debug-Minimal|Any CPU.Build.0 = Debug|Any CPU + {EB92DD90-6346-41FB-B967-2B33A860AD98}.Debug-Minimal|x64.ActiveCfg = Debug|Any CPU + {EB92DD90-6346-41FB-B967-2B33A860AD98}.Debug-Minimal|x64.Build.0 = Debug|Any CPU + {EB92DD90-6346-41FB-B967-2B33A860AD98}.Publish|Any CPU.ActiveCfg = Release|Any CPU + {EB92DD90-6346-41FB-B967-2B33A860AD98}.Publish|Any CPU.Build.0 = Release|Any CPU + {EB92DD90-6346-41FB-B967-2B33A860AD98}.Publish|x64.ActiveCfg = Release|Any CPU + {EB92DD90-6346-41FB-B967-2B33A860AD98}.Publish|x64.Build.0 = Release|Any CPU + {EB92DD90-6346-41FB-B967-2B33A860AD98}.Release|Any CPU.ActiveCfg = Release|Any CPU + {EB92DD90-6346-41FB-B967-2B33A860AD98}.Release|Any CPU.Build.0 = Release|Any CPU + {EB92DD90-6346-41FB-B967-2B33A860AD98}.Release|x64.ActiveCfg = Release|Any CPU + {EB92DD90-6346-41FB-B967-2B33A860AD98}.Release|x64.Build.0 = Release|Any CPU + {95B077C1-E21B-486F-8BDD-1C902FE687AB}.Debug|Any CPU.ActiveCfg = Debug|Any CPU + {95B077C1-E21B-486F-8BDD-1C902FE687AB}.Debug|Any CPU.Build.0 = Debug|Any CPU + {95B077C1-E21B-486F-8BDD-1C902FE687AB}.Debug|x64.ActiveCfg = Debug|Any CPU + {95B077C1-E21B-486F-8BDD-1C902FE687AB}.Debug|x64.Build.0 = Debug|Any CPU + {95B077C1-E21B-486F-8BDD-1C902FE687AB}.Debug-Minimal|Any CPU.ActiveCfg = Debug|Any CPU + {95B077C1-E21B-486F-8BDD-1C902FE687AB}.Debug-Minimal|Any CPU.Build.0 = Debug|Any CPU + {95B077C1-E21B-486F-8BDD-1C902FE687AB}.Debug-Minimal|x64.ActiveCfg = Debug|Any CPU + {95B077C1-E21B-486F-8BDD-1C902FE687AB}.Debug-Minimal|x64.Build.0 = Debug|Any CPU + {95B077C1-E21B-486F-8BDD-1C902FE687AB}.Publish|Any CPU.ActiveCfg = Debug|Any CPU + {95B077C1-E21B-486F-8BDD-1C902FE687AB}.Publish|Any CPU.Build.0 = Debug|Any CPU + {95B077C1-E21B-486F-8BDD-1C902FE687AB}.Publish|x64.ActiveCfg = Debug|Any CPU + {95B077C1-E21B-486F-8BDD-1C902FE687AB}.Publish|x64.Build.0 = Debug|Any CPU + {95B077C1-E21B-486F-8BDD-1C902FE687AB}.Release|Any CPU.ActiveCfg = Release|Any CPU + {95B077C1-E21B-486F-8BDD-1C902FE687AB}.Release|Any CPU.Build.0 = Release|Any CPU + {95B077C1-E21B-486F-8BDD-1C902FE687AB}.Release|x64.ActiveCfg = Release|Any CPU + {95B077C1-E21B-486F-8BDD-1C902FE687AB}.Release|x64.Build.0 = Release|Any CPU + EndGlobalSection + GlobalSection(SolutionProperties) = preSolution + HideSolutionNode = FALSE + EndGlobalSection + GlobalSection(ExtensibilityGlobals) = postSolution + SolutionGuid = {2DEAD3CC-486B-4918-A607-50B0DE7B114A} + EndGlobalSection +EndGlobal diff --git a/docs/assets/Logo.md b/docs/assets/Logo.md index dad45dca..21e7858a 100644 --- a/docs/assets/Logo.md +++ b/docs/assets/Logo.md @@ -1,3 +1,3 @@ -TensorFlow.NET logo (c) 2019 by Meinrad Recheis. - +TensorFlow.NET logo (c) 2019 by Meinrad Recheis. + The logo is based on the original Tensorflow logo which is copyrighted by the respective creator. \ No newline at end of file diff --git a/src/TensorFlowNET.Core/Graphs/Graph.Control.cs b/src/TensorFlowNET.Core/Graphs/Graph.Control.cs index c6a5dee0..81c13827 100644 --- a/src/TensorFlowNET.Core/Graphs/Graph.Control.cs +++ b/src/TensorFlowNET.Core/Graphs/Graph.Control.cs @@ -1,17 +1,17 @@ -/***************************************************************************** - Copyright 2018 The TensorFlow.NET Authors. All Rights Reserved. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. +/***************************************************************************** + Copyright 2018 The TensorFlow.NET Authors. All Rights Reserved. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. ******************************************************************************/ using System.Collections.Generic; @@ -77,8 +77,8 @@ namespace Tensorflow /// /// Use with the `with` keyword to specify that all operations constructed /// within the context should have control dependencies on - /// `control_inputs`. - /// + /// `control_inputs`. + /// public _ControlDependenciesController control_dependencies(object[] control_inputs) { if (control_inputs == null) @@ -92,20 +92,20 @@ namespace Tensorflow // TODO: implement IndexedSlices //case IndexedSlices islice: // control_ops.Add(islice.op); - // break; + // break; case Tensor t: control_ops.Add(t.op); break; case Operation op: control_ops.Add(op); - break; + break; default: var t1 = _as_graph_element(c); if (t1 == null) throw new TypeError($"Control input must be Operation or Tensor:{c}"); control_ops.Add(t1.op); - break; - } + break; + } } return new _ControlDependenciesController(this, control_ops); } @@ -138,9 +138,9 @@ namespace Tensorflow _control_dependencies_stack.RemoveAt(_control_dependencies_stack.Count-1); } - /// - /// Record that the given op depends on all registered control dependencies. - /// + /// + /// Record that the given op depends on all registered control dependencies. + /// public void _record_op_seen_by_control_dependencies(Operation op) { foreach (var controller in _control_dependencies_stack) diff --git a/src/TensorFlowNET.Core/Graphs/Graph.Operation.cs b/src/TensorFlowNET.Core/Graphs/Graph.Operation.cs index 75f46a59..a826d2f6 100644 --- a/src/TensorFlowNET.Core/Graphs/Graph.Operation.cs +++ b/src/TensorFlowNET.Core/Graphs/Graph.Operation.cs @@ -1,17 +1,17 @@ -/***************************************************************************** - Copyright 2018 The TensorFlow.NET Authors. All Rights Reserved. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. +/***************************************************************************** + Copyright 2018 The TensorFlow.NET Authors. All Rights Reserved. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. ******************************************************************************/ using System; @@ -38,8 +38,8 @@ namespace Tensorflow public OperationDescription NewOperation(string opType, string opName) { return c_api.TF_NewOperation(_handle, opType, opName); - } - + } + public Operation[] ReturnOperations(IntPtr results) { TF_Operation return_oper_handle = new TF_Operation(); @@ -89,14 +89,14 @@ namespace Tensorflow public ITensorOrOperation[] get_operations() { return _nodes_by_name.Values.ToArray(); - } - + } + /// /// Returns the `Operation` with the given `name`. /// - /// This method may be called concurrently from multiple threads. - /// - /// The name of the `Operation` to return. + /// This method may be called concurrently from multiple threads. + /// + /// The name of the `Operation` to return. public Operation get_operation_by_name(string name) => as_graph_element(name, allow_tensor: false, allow_operation: true) as Operation; @@ -109,8 +109,8 @@ namespace Tensorflow { var op_name = Marshal.PtrToStringAnsi(c_api.TF_OperationName(tf_oper)); return _get_operation_by_name_unsafe(op_name); - } - + } + /// /// Creates an `Operation` in this graph from the supplied TF_Operation. /// @@ -125,7 +125,7 @@ namespace Tensorflow /// /// a wrapped TF_Operation /// (Optional.) If True, device functions will be executed - /// to compute the device property of the Operation. + /// to compute the device property of the Operation. /// An `Operation` object. public Operation _create_op_from_tf_operation(IntPtr c_op, bool compute_device = true) { diff --git a/src/TensorFlowNET.Core/Graphs/Graph.cs b/src/TensorFlowNET.Core/Graphs/Graph.cs index 48420d18..e53aa02e 100644 --- a/src/TensorFlowNET.Core/Graphs/Graph.cs +++ b/src/TensorFlowNET.Core/Graphs/Graph.cs @@ -1,21 +1,21 @@ -/***************************************************************************** - Copyright 2018 The TensorFlow.NET Authors. All Rights Reserved. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. +/***************************************************************************** + Copyright 2018 The TensorFlow.NET Authors. All Rights Reserved. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. ******************************************************************************/ using System; -using System.Collections; +using System.Collections; using System.Collections.Generic; using System.Linq; using System.Runtime.InteropServices; @@ -75,7 +75,7 @@ namespace Tensorflow /// then create a TensorFlow session to run parts of the graph across a set of local and remote devices. /// /// https://www.tensorflow.org/guide/graphs

https://www.tensorflow.org/api_docs/python/tf/Graph
- public partial class Graph : DisposableObject + public partial class Graph : DisposableObject #if !SERIALIZABLE , IEnumerable #endif @@ -105,18 +105,18 @@ namespace Tensorflow /// private Dictionary _collections = new Dictionary(); - public bool building_function; - - int _seed; - public int seed - { - get => _seed; - set - { - _seed = value; - } - } - + public bool building_function; + + int _seed; + public int seed + { + get => _seed; + set + { + _seed = value; + } + } + public Graph() { _handle = c_api.TF_NewGraph(); @@ -133,20 +133,20 @@ namespace Tensorflow _nodes_by_name = new Dictionary(); _names_in_use = new Dictionary(); _graph_key = $"grap-key-{ops.uid()}/"; - } - + } + public ITensorOrOperation as_graph_element(object obj, bool allow_tensor = true, bool allow_operation = true) { return _as_graph_element_locked(obj, allow_tensor, allow_operation); } - /// - /// Returns a context manager that makes this `Graph` the default graph. - /// + /// + /// Returns a context manager that makes this `Graph` the default graph. + /// /// - public Graph as_default() - { - return ops.set_default_graph(this); + public Graph as_default() + { + return ops.set_default_graph(this); } private Tensor _as_graph_element(object obj) @@ -155,8 +155,8 @@ namespace Tensorflow return var._as_graph_element(); return null; - } - + } + private ITensorOrOperation _as_graph_element_locked(object obj, bool allow_tensor = true, bool allow_operation = true) { string types_str = ""; @@ -259,8 +259,8 @@ namespace Tensorflow throw new RuntimeError("Graph is finalized and cannot be modified."); } - public Operation create_op(string op_type, Tensor[] inputs, TF_DataType[] dtypes, - TF_DataType[] input_types = null, string name = null, + public Operation create_op(string op_type, Tensor[] inputs, TF_DataType[] dtypes, + TF_DataType[] input_types = null, string name = null, Dictionary attrs = null, OpDef op_def = null) { if (inputs == null) @@ -272,12 +272,12 @@ namespace Tensorflow // If a names ends with a '/' it is a "name scope" and we use it as-is, // after removing the trailing '/'. name = name.EndsWith("/") ? ops.name_from_scope_name(name) : unique_name(name); - var node_def = ops._NodeDef(op_type, name, device: "", attrs: attrs); + var node_def = ops._NodeDef(op_type, name, device: "", attrs: attrs); - var input_ops = inputs.Select(x => x.op).ToArray(); + var input_ops = inputs.Select(x => x.op).ToArray(); var control_inputs = _control_dependencies_for_inputs(input_ops); - var op = new Operation(node_def, + var op = new Operation(node_def, this, inputs: inputs, output_types: dtypes, @@ -297,9 +297,9 @@ namespace Tensorflow return op; } - public void device(string device_name) - { - throw new NotImplementedException(""); + public void device(string device_name) + { + throw new NotImplementedException(""); } private void _create_op_helper(Operation op, bool compute_device = true) @@ -353,8 +353,8 @@ namespace Tensorflow _name_stack = new_stack; return String.IsNullOrEmpty(new_stack) ? "" : new_stack + "/"; - } - + } + /// /// Return a unique operation name for `name`. /// @@ -379,10 +379,10 @@ namespace Tensorflow /// A string to be passed to `create_op()` that will be used /// to name the operation being created. public string unique_name(string name, bool mark_as_used = true) - { - if (name.EndsWith("basic_r_n_n_cell")) - { - + { + if (name.EndsWith("basic_r_n_n_cell")) + { + } if (!String.IsNullOrEmpty(_name_stack)) name = _name_stack + "/" + name; @@ -411,7 +411,7 @@ namespace Tensorflow // Return the new name with the original capitalization of the given name. name = $"{name}_{i - 1}"; - } + } return name; } @@ -424,7 +424,7 @@ namespace Tensorflow unsafe { var tf_output_ptr = (TF_Output*)return_output_handle; - for (int i = 0; i < num_return_outputs; i++) + for (int i = 0; i < num_return_outputs; i++) return_outputs[i] = *(tf_output_ptr + i); return return_outputs; } @@ -444,25 +444,25 @@ namespace Tensorflow { List t = default; var collection = _collections.ContainsKey(name) ? _collections[name] : new List(); - switch (collection) - { - case List list: - t = list.Select(x => (T)(object)x).ToList(); - break; - case List list: - t = list.Select(x => (T)(object)x).ToList(); - break; - case List list: - t = list.Select(x => (T)(object)x).ToList(); - break; - case List list: - t = list.Select(x => (T)(object)x).ToList(); - break; - case List list: - t = list.Select(x => (T)(object)x).ToList(); - break; - default: - throw new NotImplementedException($"get_collection<{typeof(T).FullName}>"); + switch (collection) + { + case List list: + t = list.Select(x => (T)(object)x).ToList(); + break; + case List list: + t = list.Select(x => (T)(object)x).ToList(); + break; + case List list: + t = list.Select(x => (T)(object)x).ToList(); + break; + case List list: + t = list.Select(x => (T)(object)x).ToList(); + break; + case List list: + t = list.Select(x => (T)(object)x).ToList(); + break; + default: + throw new NotImplementedException($"get_collection<{typeof(T).FullName}>"); } return t; } @@ -482,22 +482,22 @@ namespace Tensorflow public void prevent_fetching(Operation op) { _unfetchable_ops.Add(op); - } - - protected override void DisposeManagedResources() - { - ops.default_graph_stack.remove(this); - } - - protected override void DisposeUnmanagedResources(IntPtr handle) - { - c_api.TF_DeleteGraph(handle); } - public Tensor get_tensor_by_tf_output(TF_Output tf_output) - { - var op = _get_operation_by_tf_operation(tf_output.oper); - return op.outputs[tf_output.index]; + protected override void DisposeManagedResources() + { + ops.default_graph_stack.remove(this); + } + + protected override void DisposeUnmanagedResources(IntPtr handle) + { + c_api.TF_DeleteGraph(handle); + } + + public Tensor get_tensor_by_tf_output(TF_Output tf_output) + { + var op = _get_operation_by_tf_operation(tf_output.oper); + return op.outputs[tf_output.index]; } /// @@ -510,48 +510,48 @@ namespace Tensorflow public Tensor get_tensor_by_name(string name) { return (Tensor)this.as_graph_element(name, allow_tensor: true, allow_operation: false); - } - - public TensorShape GetTensorShape(TF_Output output) - { - var status = new Status(); - var ndim = c_api.TF_GraphGetTensorNumDims(_handle, output, status); - status.Check(); - - if (ndim == -1) - return new TensorShape(); - - var dims = new long[ndim]; - c_api.TF_GraphGetTensorShape(_handle, output, dims, dims.Length, status); - status.Check(); - - return new TensorShape(dims.Select(x => (int)x).ToArray()); - } - - string debugString = string.Empty; - public override string ToString() - { - return $"{graph_key}, ({_handle})"; - /*if (string.IsNullOrEmpty(debugString)) - { - int len = 0; - debugString = c_api.TF_GraphDebugString(_handle, out len); - } - - return debugString;*/ - } - + } + + public TensorShape GetTensorShape(TF_Output output) + { + var status = new Status(); + var ndim = c_api.TF_GraphGetTensorNumDims(_handle, output, status); + status.Check(); + + if (ndim == -1) + return new TensorShape(); + + var dims = new long[ndim]; + c_api.TF_GraphGetTensorShape(_handle, output, dims, dims.Length, status); + status.Check(); + + return new TensorShape(dims.Select(x => (int)x).ToArray()); + } + + string debugString = string.Empty; + public override string ToString() + { + return $"{graph_key}, ({_handle})"; + /*if (string.IsNullOrEmpty(debugString)) + { + int len = 0; + debugString = c_api.TF_GraphDebugString(_handle, out len); + } + + return debugString;*/ + } + #if !SERIALIZABLE - private IEnumerable GetEnumerable() + private IEnumerable GetEnumerable() => c_api_util.tf_operations(this); - IEnumerator IEnumerable.GetEnumerator() - => GetEnumerable().GetEnumerator(); - - IEnumerator IEnumerable.GetEnumerator() - => throw new NotImplementedException(); + IEnumerator IEnumerable.GetEnumerator() + => GetEnumerable().GetEnumerator(); + + IEnumerator IEnumerable.GetEnumerator() + => throw new NotImplementedException(); #endif - + public static implicit operator IntPtr(Graph graph) { return graph._handle; diff --git a/src/TensorFlowNET.Core/Graphs/_ControlDependenciesController.cs b/src/TensorFlowNET.Core/Graphs/_ControlDependenciesController.cs index 63285bae..3472db29 100644 --- a/src/TensorFlowNET.Core/Graphs/_ControlDependenciesController.cs +++ b/src/TensorFlowNET.Core/Graphs/_ControlDependenciesController.cs @@ -1,17 +1,17 @@ -/***************************************************************************** - Copyright 2018 The TensorFlow.NET Authors. All Rights Reserved. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. +/***************************************************************************** + Copyright 2018 The TensorFlow.NET Authors. All Rights Reserved. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. ******************************************************************************/ using System.Collections.Generic; @@ -32,8 +32,8 @@ namespace Tensorflow private bool _new_stack; private ControlFlowContext _old_control_flow_context; - public ITensorOrOperation[] control_inputs => _control_inputs_val.ToArray(); - + public ITensorOrOperation[] control_inputs => _control_inputs_val.ToArray(); + /// /// Create a new `_ControlDependenciesController`. /// @@ -69,7 +69,7 @@ namespace Tensorflow _new_stack = false; } - _seen_nodes = new List(); + _seen_nodes = new List(); _old_stack = null; _old_control_flow_context = null; } @@ -113,16 +113,16 @@ namespace Tensorflow public void Dispose() { - } - - public void __init__() - { - - } - - public void __del__() - { - - } + } + + public void __init__() + { + + } + + public void __del__() + { + + } } } diff --git a/src/TensorFlowNET.Core/Operations/ControlFlows/ControlFlowState.cs b/src/TensorFlowNET.Core/Operations/ControlFlows/ControlFlowState.cs index 9351cab4..d04eefe2 100644 --- a/src/TensorFlowNET.Core/Operations/ControlFlows/ControlFlowState.cs +++ b/src/TensorFlowNET.Core/Operations/ControlFlows/ControlFlowState.cs @@ -1,324 +1,324 @@ -/***************************************************************************** - Copyright 2018 The TensorFlow.NET Authors. All Rights Reserved. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -******************************************************************************/ - -using System; -using System.Linq; -using System.Collections.Generic; -using util = Tensorflow.control_flow_util; -using static Tensorflow.Binding; - -namespace Tensorflow.Operations.ControlFlows -{ - /// - /// Maintain the mapping from the loops to their grad states. - /// - public class ControlFlowState - { - Dictionary _map; - //class ControlFlowState(object): - // """Maintain the mapping from the loops to their grad states.""" - - // def __init__(self): - // self._map = {} # maps forward loop context to GradLoopState - - // def GetGradState(self, op, before): - // """Return the grad state for this op if it's in a forward loop context.""" - // if before and util.IsLoopExit(op): - // forward_ctxt = op._get_control_flow_context() - // forward_ctxt = forward_ctxt.outer_context - // if forward_ctxt: - // forward_ctxt = forward_ctxt.GetWhileContext() - // else: - // forward_ctxt = _GetWhileContext(op) - // if forward_ctxt: - // return self._map.get(forward_ctxt) - // return None - - public ControlFlowState() - { - _map = new Dictionary(); - } - - /// - /// Return the grad state for this op if it's in a forward loop context. - /// - /// - /// - /// - public GradLoopState GetGradState(Operation op, bool before) - { - ControlFlowContext forward_ctxt = null; - if (before && util.IsLoopExit(op)) - { - forward_ctxt = op._get_control_flow_context(); - forward_ctxt = forward_ctxt.outer_context; - if (forward_ctxt != null) - forward_ctxt = forward_ctxt.GetWhileContext(); - } - else - forward_ctxt = util.GetWhileContext(op); - if (forward_ctxt != null) - return _map.get(forward_ctxt); - return null; - } - - public Tensor[] ProcessUnusedLoopExits(Dictionary pending_count, List to_ops_set) - { - var loop_exits = new List(); - foreach(var grad_state in _map.Values) - { - foreach(var y in grad_state.forward_loop_exits) - { - if(!pending_count.ContainsKey(y.op.name)) - { - grad_state.pending_exits_count -= 1; - if (!to_ops_set.Contains(y.op)) - grad_state.unused_exits.append(y); - if (grad_state.pending_exits_count == 0) - loop_exits.extend(grad_state.unused_exits); - } - } - - foreach(var y in grad_state.forward_context.loop_enters) - { - if (!pending_count.ContainsKey(y.op.name)) - pending_count[y.op.name] = 1; - } - } - - return loop_exits.ToArray(); - } - - public void EnterGradWhileContext(Operation op, bool before) - { - var grad_state = GetGradState(op, before); - if (grad_state != null) - grad_state.grad_context.Enter(); - } - - public void ExitGradWhileContext(Operation op, bool before) - { - var grad_state = GetGradState(op, before); - if (grad_state != null) - grad_state.grad_context.Exit(); - } - - // def AddWhileContext(self, op, between_op_list, between_ops): - // """Add the grad state for the while loop that op belongs to. - - // Note that op is an Exit, and this method must be called in - // the control flow context where gradients() is called. - - // Note that this method modifies `between_op_list` and `between_ops`. - // """ - // forward_ctxt = _GetWhileContext(op) - // grad_state = self._map.get(forward_ctxt) - // if grad_state is None: - // # This is a new while loop so create a grad state for it. - // outer_forward_ctxt = forward_ctxt.outer_context - // if outer_forward_ctxt: - // outer_forward_ctxt = outer_forward_ctxt.GetWhileContext() - // outer_grad_state = None - // if outer_forward_ctxt: - // outer_grad_state = self._map.get(outer_forward_ctxt) - // grad_state = GradLoopState(forward_ctxt, outer_grad_state) - // self._map[forward_ctxt] = grad_state - - // # We need to include all exits of a loop for backprop. - // for loop_exit in grad_state.forward_loop_exits: - // if loop_exit.op not in between_ops: - // between_ops.add(loop_exit.op) - // between_op_list.append(loop_exit.op) - public void AddWhileContext(Operation op, List between_op_list, List between_ops) - { - var forward_ctxt = op.GetWhileContext(); - var grad_state = _map.ContainsKey(forward_ctxt) ? _map[forward_ctxt] : null; - if(grad_state == null) - { - GradLoopState outer_grad_state = null; - var outer_forward_ctxt = forward_ctxt.outer_context; - if (outer_forward_ctxt != null) - outer_forward_ctxt = outer_forward_ctxt.GetWhileContext(); - if (outer_forward_ctxt != null) - outer_grad_state = _map[outer_forward_ctxt]; - grad_state = new GradLoopState(forward_ctxt, outer_grad_state); - _map[forward_ctxt] = grad_state; - - // We need to include all exits of a loop for backprop. - foreach (var loop_exit in grad_state.forward_loop_exits) - { - if(!between_ops.Contains(loop_exit.op)) - { - between_ops.add(loop_exit.op); - between_op_list.append(loop_exit.op); - } - } - } - } - - // def ZerosLikeForExit(self, val): - // """Create zeros_like gradient for a loop exit. - - // If the result of a loop variable is not used but is involved in - // computing the result of some needed loop variable, we create a - // zero-valued tensor that is fed as gradient for the Exit node of that - // loop variable. Note that val.op is an Exit, and this method must be - // called in the control flow context where gradients() is called. - - // Args: - // val: The output tensor of an Exit op. - - // Returns: - // A zero tensor of the same shape of val. - // """ - // val_shape = val.get_shape() - // forward_ctxt = val.op._get_control_flow_context() - // outer_forward_ctxt = forward_ctxt.outer_context - // if outer_forward_ctxt: - // outer_forward_ctxt = outer_forward_ctxt.GetWhileContext() - // outer_grad_state = None - // if outer_forward_ctxt: - // outer_grad_state = self._map.get(outer_forward_ctxt) - // if outer_grad_state: - // # This is a nested loop. - // if val_shape.is_fully_defined(): - // # If the shape is known statically, just create a zero tensor - // # with the right shape in the right context. - // outer_grad_state.grad_context.Enter() - // result = array_ops.zeros(val_shape.dims, val.dtype) - // outer_grad_state.grad_context.Exit() - // else: - // # Only the shape of value is needed for backprop. - // forward_ctxt.outer_context.Enter() - // shape = array_ops.shape_internal(val, optimize=False) - // forward_ctxt.outer_context.Exit() - // # Save the shape to a stack. - // history_shape = outer_grad_state.AddForwardAccumulator(shape) - // # Get the shape back from the stack. - // outer_grad_ctxt = outer_grad_state.grad_context - // outer_grad_ctxt.Enter() - // real_shape = outer_grad_state.AddBackpropAccumulatedValue( - // history_shape, shape) - // result = array_ops.zeros(real_shape, val.dtype) - // outer_grad_ctxt.Exit() - // else: - // # This is not a nested loop. - // if val_shape.is_fully_defined(): - // # If the shape is known statically, just create a zero tensor - // # with the right shape. - // result = array_ops.zeros(val_shape.dims, val.dtype) - // else: - // result = array_ops.zeros_like(val, optimize=False) - // return result - - public Tensor ZerosLike(Operation op, int index) - { - if (util.IsLoopSwitch(op)) - return null; - if (op.graph.building_function) - return array_ops.zeros_like(op.outputs[index]); - var dead_branch = util.IsSwitch(op); - var forward_ctxt = util.GetWhileContext(op); - var grad_state = _map.get(forward_ctxt); - // op is not in a while loop that is part of gradients(). - if (grad_state == null) - return ZerosLikeOutsideLoop(op, index); - throw new NotImplementedException("ZerosLike"); - } - - public Tensor ZerosLikeOutsideLoop(Operation op, int index) - { - var val = op.outputs[index]; - if (!util.IsSwitch(op)) - { - if (val.dtype == dtypes.resource) - throw new NotImplementedException("ZerosLikeOutsideLoop"); - /*return array_ops.zeros( - gen_resource_variable_ops.variable_shape(val), - dtype: default_gradient.get_zeros_dtype(val));*/ - return array_ops.zeros_like(val, optimize: false); - } - else - throw new NotImplementedException("ZerosLikeOutsideLoop"); - } - - /// - /// Create zeros_like gradient for a loop exit. - /// - /// - /// - public Tensor ZerosLikeForExit(Tensor val) - { - Tensor result = null; - var val_shape = val.TensorShape; - var forward_ctxt = val.op._get_control_flow_context(); - var outer_forward_ctxt = forward_ctxt.outer_context; - if (outer_forward_ctxt != null) - outer_forward_ctxt = outer_forward_ctxt.GetWhileContext(); - GradLoopState outer_grad_state = null; - if (outer_forward_ctxt != null) - outer_grad_state = _map.get(outer_forward_ctxt); - // This is a nested loop. - if (outer_grad_state != null) - { - throw new NotImplementedException("ZerosLikeForExit"); - } - else - { - // If the shape is known statically, just create a zero tensor - // with the right shape. - if (val_shape.is_fully_defined()) - result = array_ops.zeros(val_shape.dims, val.dtype); - else - result = array_ops.zeros_like(val, optimize: false); - } - return result; - } - - public void PostProcessing() - { - foreach(var grad_state in _map.Values) - { - foreach(var b_merge in grad_state.switch_map.Values) - { - if(b_merge.op.inputs[0] == b_merge.op.inputs[1]) - { - Tensor next_grad_val = null; - // The value of this loop variable at iteration i+1 doesn't - // depend on its value at iteration i. So use zeros as the - // gradients for all iterations > 0. - var dtype = b_merge.op.inputs[0].dtype; - var shape = b_merge.op.inputs[0].TensorShape; - if (shape.is_fully_defined()) - { - grad_state.grad_context.Enter(); - // Create a zeros and use it for iterations > 0. - var grad_val = constant_op.constant(0, dtype: dtype, shape: shape); - next_grad_val = control_flow_ops._NextIteration(grad_val); - grad_state.grad_context.Exit(); - } - else - { - throw new NotImplementedException("PostProcessing shape is not fully defined."); - } - - b_merge.op._update_input(1, next_grad_val); - } - } - } - } - } -} +/***************************************************************************** + Copyright 2018 The TensorFlow.NET Authors. All Rights Reserved. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +******************************************************************************/ + +using System; +using System.Linq; +using System.Collections.Generic; +using util = Tensorflow.control_flow_util; +using static Tensorflow.Binding; + +namespace Tensorflow.Operations.ControlFlows +{ + /// + /// Maintain the mapping from the loops to their grad states. + /// + public class ControlFlowState + { + Dictionary _map; + //class ControlFlowState(object): + // """Maintain the mapping from the loops to their grad states.""" + + // def __init__(self): + // self._map = {} # maps forward loop context to GradLoopState + + // def GetGradState(self, op, before): + // """Return the grad state for this op if it's in a forward loop context.""" + // if before and util.IsLoopExit(op): + // forward_ctxt = op._get_control_flow_context() + // forward_ctxt = forward_ctxt.outer_context + // if forward_ctxt: + // forward_ctxt = forward_ctxt.GetWhileContext() + // else: + // forward_ctxt = _GetWhileContext(op) + // if forward_ctxt: + // return self._map.get(forward_ctxt) + // return None + + public ControlFlowState() + { + _map = new Dictionary(); + } + + /// + /// Return the grad state for this op if it's in a forward loop context. + /// + /// + /// + /// + public GradLoopState GetGradState(Operation op, bool before) + { + ControlFlowContext forward_ctxt = null; + if (before && util.IsLoopExit(op)) + { + forward_ctxt = op._get_control_flow_context(); + forward_ctxt = forward_ctxt.outer_context; + if (forward_ctxt != null) + forward_ctxt = forward_ctxt.GetWhileContext(); + } + else + forward_ctxt = util.GetWhileContext(op); + if (forward_ctxt != null) + return _map.get(forward_ctxt); + return null; + } + + public Tensor[] ProcessUnusedLoopExits(Dictionary pending_count, List to_ops_set) + { + var loop_exits = new List(); + foreach(var grad_state in _map.Values) + { + foreach(var y in grad_state.forward_loop_exits) + { + if(!pending_count.ContainsKey(y.op.name)) + { + grad_state.pending_exits_count -= 1; + if (!to_ops_set.Contains(y.op)) + grad_state.unused_exits.append(y); + if (grad_state.pending_exits_count == 0) + loop_exits.extend(grad_state.unused_exits); + } + } + + foreach(var y in grad_state.forward_context.loop_enters) + { + if (!pending_count.ContainsKey(y.op.name)) + pending_count[y.op.name] = 1; + } + } + + return loop_exits.ToArray(); + } + + public void EnterGradWhileContext(Operation op, bool before) + { + var grad_state = GetGradState(op, before); + if (grad_state != null) + grad_state.grad_context.Enter(); + } + + public void ExitGradWhileContext(Operation op, bool before) + { + var grad_state = GetGradState(op, before); + if (grad_state != null) + grad_state.grad_context.Exit(); + } + + // def AddWhileContext(self, op, between_op_list, between_ops): + // """Add the grad state for the while loop that op belongs to. + + // Note that op is an Exit, and this method must be called in + // the control flow context where gradients() is called. + + // Note that this method modifies `between_op_list` and `between_ops`. + // """ + // forward_ctxt = _GetWhileContext(op) + // grad_state = self._map.get(forward_ctxt) + // if grad_state is None: + // # This is a new while loop so create a grad state for it. + // outer_forward_ctxt = forward_ctxt.outer_context + // if outer_forward_ctxt: + // outer_forward_ctxt = outer_forward_ctxt.GetWhileContext() + // outer_grad_state = None + // if outer_forward_ctxt: + // outer_grad_state = self._map.get(outer_forward_ctxt) + // grad_state = GradLoopState(forward_ctxt, outer_grad_state) + // self._map[forward_ctxt] = grad_state + + // # We need to include all exits of a loop for backprop. + // for loop_exit in grad_state.forward_loop_exits: + // if loop_exit.op not in between_ops: + // between_ops.add(loop_exit.op) + // between_op_list.append(loop_exit.op) + public void AddWhileContext(Operation op, List between_op_list, List between_ops) + { + var forward_ctxt = op.GetWhileContext(); + var grad_state = _map.ContainsKey(forward_ctxt) ? _map[forward_ctxt] : null; + if(grad_state == null) + { + GradLoopState outer_grad_state = null; + var outer_forward_ctxt = forward_ctxt.outer_context; + if (outer_forward_ctxt != null) + outer_forward_ctxt = outer_forward_ctxt.GetWhileContext(); + if (outer_forward_ctxt != null) + outer_grad_state = _map[outer_forward_ctxt]; + grad_state = new GradLoopState(forward_ctxt, outer_grad_state); + _map[forward_ctxt] = grad_state; + + // We need to include all exits of a loop for backprop. + foreach (var loop_exit in grad_state.forward_loop_exits) + { + if(!between_ops.Contains(loop_exit.op)) + { + between_ops.add(loop_exit.op); + between_op_list.append(loop_exit.op); + } + } + } + } + + // def ZerosLikeForExit(self, val): + // """Create zeros_like gradient for a loop exit. + + // If the result of a loop variable is not used but is involved in + // computing the result of some needed loop variable, we create a + // zero-valued tensor that is fed as gradient for the Exit node of that + // loop variable. Note that val.op is an Exit, and this method must be + // called in the control flow context where gradients() is called. + + // Args: + // val: The output tensor of an Exit op. + + // Returns: + // A zero tensor of the same shape of val. + // """ + // val_shape = val.get_shape() + // forward_ctxt = val.op._get_control_flow_context() + // outer_forward_ctxt = forward_ctxt.outer_context + // if outer_forward_ctxt: + // outer_forward_ctxt = outer_forward_ctxt.GetWhileContext() + // outer_grad_state = None + // if outer_forward_ctxt: + // outer_grad_state = self._map.get(outer_forward_ctxt) + // if outer_grad_state: + // # This is a nested loop. + // if val_shape.is_fully_defined(): + // # If the shape is known statically, just create a zero tensor + // # with the right shape in the right context. + // outer_grad_state.grad_context.Enter() + // result = array_ops.zeros(val_shape.dims, val.dtype) + // outer_grad_state.grad_context.Exit() + // else: + // # Only the shape of value is needed for backprop. + // forward_ctxt.outer_context.Enter() + // shape = array_ops.shape_internal(val, optimize=False) + // forward_ctxt.outer_context.Exit() + // # Save the shape to a stack. + // history_shape = outer_grad_state.AddForwardAccumulator(shape) + // # Get the shape back from the stack. + // outer_grad_ctxt = outer_grad_state.grad_context + // outer_grad_ctxt.Enter() + // real_shape = outer_grad_state.AddBackpropAccumulatedValue( + // history_shape, shape) + // result = array_ops.zeros(real_shape, val.dtype) + // outer_grad_ctxt.Exit() + // else: + // # This is not a nested loop. + // if val_shape.is_fully_defined(): + // # If the shape is known statically, just create a zero tensor + // # with the right shape. + // result = array_ops.zeros(val_shape.dims, val.dtype) + // else: + // result = array_ops.zeros_like(val, optimize=False) + // return result + + public Tensor ZerosLike(Operation op, int index) + { + if (util.IsLoopSwitch(op)) + return null; + if (op.graph.building_function) + return array_ops.zeros_like(op.outputs[index]); + var dead_branch = util.IsSwitch(op); + var forward_ctxt = util.GetWhileContext(op); + var grad_state = _map.get(forward_ctxt); + // op is not in a while loop that is part of gradients(). + if (grad_state == null) + return ZerosLikeOutsideLoop(op, index); + throw new NotImplementedException("ZerosLike"); + } + + public Tensor ZerosLikeOutsideLoop(Operation op, int index) + { + var val = op.outputs[index]; + if (!util.IsSwitch(op)) + { + if (val.dtype == dtypes.resource) + throw new NotImplementedException("ZerosLikeOutsideLoop"); + /*return array_ops.zeros( + gen_resource_variable_ops.variable_shape(val), + dtype: default_gradient.get_zeros_dtype(val));*/ + return array_ops.zeros_like(val, optimize: false); + } + else + throw new NotImplementedException("ZerosLikeOutsideLoop"); + } + + /// + /// Create zeros_like gradient for a loop exit. + /// + /// + /// + public Tensor ZerosLikeForExit(Tensor val) + { + Tensor result = null; + var val_shape = val.TensorShape; + var forward_ctxt = val.op._get_control_flow_context(); + var outer_forward_ctxt = forward_ctxt.outer_context; + if (outer_forward_ctxt != null) + outer_forward_ctxt = outer_forward_ctxt.GetWhileContext(); + GradLoopState outer_grad_state = null; + if (outer_forward_ctxt != null) + outer_grad_state = _map.get(outer_forward_ctxt); + // This is a nested loop. + if (outer_grad_state != null) + { + throw new NotImplementedException("ZerosLikeForExit"); + } + else + { + // If the shape is known statically, just create a zero tensor + // with the right shape. + if (val_shape.is_fully_defined()) + result = array_ops.zeros(val_shape.dims, val.dtype); + else + result = array_ops.zeros_like(val, optimize: false); + } + return result; + } + + public void PostProcessing() + { + foreach(var grad_state in _map.Values) + { + foreach(var b_merge in grad_state.switch_map.Values) + { + if(b_merge.op.inputs[0] == b_merge.op.inputs[1]) + { + Tensor next_grad_val = null; + // The value of this loop variable at iteration i+1 doesn't + // depend on its value at iteration i. So use zeros as the + // gradients for all iterations > 0. + var dtype = b_merge.op.inputs[0].dtype; + var shape = b_merge.op.inputs[0].TensorShape; + if (shape.is_fully_defined()) + { + grad_state.grad_context.Enter(); + // Create a zeros and use it for iterations > 0. + var grad_val = constant_op.constant(0, dtype: dtype, shape: shape); + next_grad_val = control_flow_ops._NextIteration(grad_val); + grad_state.grad_context.Exit(); + } + else + { + throw new NotImplementedException("PostProcessing shape is not fully defined."); + } + + b_merge.op._update_input(1, next_grad_val); + } + } + } + } + } +} diff --git a/src/TensorFlowNET.Core/Operations/ControlFlows/GradLoopState.cs b/src/TensorFlowNET.Core/Operations/ControlFlows/GradLoopState.cs index 2552df8a..2011ca56 100644 --- a/src/TensorFlowNET.Core/Operations/ControlFlows/GradLoopState.cs +++ b/src/TensorFlowNET.Core/Operations/ControlFlows/GradLoopState.cs @@ -1,335 +1,335 @@ -/***************************************************************************** - Copyright 2018 The TensorFlow.NET Authors. All Rights Reserved. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -******************************************************************************/ - -using System; -using System.Collections; -using System.Collections.Generic; -using System.Linq; -using static Tensorflow.Binding; -using util = Tensorflow.control_flow_util; - -namespace Tensorflow.Operations.ControlFlows -{ - /// - /// The state used for constructing the gradient graph for a while loop. - /// - public class GradLoopState - { - private WhileContext _grad_context = null; - - public WhileContext grad_context => _grad_context; - - // # The loop counter added by AddBackpropLoopCounter. It is the value - // # of the loop counter for the current iteration. - // self._grad_index = None - - // # A sync op for backprop. - // self._grad_sync = None - - // # Information needed by backprop. - private Hashtable _history_map = new Hashtable(); - public Hashtable history_map => _history_map; - Dictionary _switch_map = new Dictionary(); - public Dictionary switch_map => _switch_map; - - /// - /// The while loop context for forward. - /// - WhileContext _forward_context; - public WhileContext forward_context => _forward_context; - - /// - /// The grad loop state for the outer while loop. - /// - GradLoopState _outer_grad_state; - public GradLoopState outer_grad_state => _outer_grad_state; - - Tensor _forward_index; - public Tensor forward_index => _forward_index; - Tensor _grad_index; - - Tensor[] _forward_loop_exits; - /// - /// The list of exits of the forward loop. - /// - public Tensor[] forward_loop_exits => _forward_loop_exits; - - List _deferred_exits; - public List deferred_exits => _deferred_exits; - - List _unused_exits; - public List unused_exits => _unused_exits; - - /// - /// The number of exits we expect to see but haven't. - /// - public int pending_exits_count { get; set; } - - Operation _grad_sync; - public Operation grad_sync - { - get - { - if(_grad_sync == null) - { - tf_with(ops.control_dependencies(null), delegate - { - _grad_sync = gen_control_flow_ops.control_trigger(name: "b_sync"); - }); - _grad_sync._set_control_flow_context(_grad_context); - _grad_index.op._add_control_input(_grad_sync); - if (_grad_context.outer_context != null) - _grad_context.outer_context.AddInnerOp(_grad_sync); - } - return _grad_sync; - } - } - - public GradLoopState(WhileContext forward_ctxt, GradLoopState outer_grad_state_) - { - // Information needed by backprop. - _unused_exits = new List(); - _deferred_exits = new List(); - _forward_loop_exits = list(forward_ctxt.loop_exits); - pending_exits_count = len(forward_ctxt.loop_exits); - - _outer_grad_state = outer_grad_state_; - - ControlFlowContext outer_forward_ctxt = null; - if (outer_grad_state_ != null) - outer_forward_ctxt = outer_grad_state_.forward_context; - - // Add the forward loop counter. - // with forward_ctxt._graph.as_default(): - Tensor cnt, forward_index; - { - if (outer_forward_ctxt != null) - outer_forward_ctxt.Enter(); - (cnt, forward_index) = forward_ctxt.AddForwardLoopCounter(outer_grad_state); - if (outer_forward_ctxt != null) - outer_forward_ctxt.Exit(); - } - _forward_context = forward_ctxt; - _forward_index = forward_index; - - // Add the backprop WhileContext, and the backprop loop counter. - if (outer_grad_state != null) - { - // This is a nested loop. Remember the iteration counts for each - // execution of this inner loop. - throw new NotImplementedException("GradLoopState"); - } - else - { - if (outer_forward_ctxt != null) - outer_forward_ctxt.Enter(); - _grad_context = new WhileContext( - maximum_iterations: forward_ctxt.maximum_iterations, - parallel_iterations: forward_ctxt.parallel_iterations, - back_prop: forward_ctxt.back_prop, - swap_memory: forward_ctxt.swap_memory, - name: forward_ctxt.name, - grad_state: this); - _grad_index = _grad_context.AddBackpropLoopCounter(cnt, outer_grad_state); - if (outer_forward_ctxt != null) - outer_forward_ctxt.Exit(); - } - } - - /// - /// Add an accumulator for each forward tensor that is needed in backprop. - /// - /// This is added to the forward loop at the first time when a tensor - /// in the forward loop is used by backprop gradient computation loop. - /// We create an accumulator that accumulates the value of tensor at each - /// iteration. Called in the control flow context where gradients() is called. - /// - /// The pseudocode is: - /// ``` - /// acc = stack(); - /// while (_pivot) { - /// acc = stack_push(acc, value); - /// } - /// ``` - /// - /// We make sure that the stack push op in one iteration is executed before - /// next iteration. This is achieved by adding a control edge from - /// `forward_index.op.inputs[0].op` to the push op, and another control - /// edge from the push op to either `forward_index.op` or `forward_sync`. - /// - /// The source tensor in forward that is to be accumulated. - /// True iff the tensor is on a dead branch of a cond. - /// The stack that contains the accumulated history of the tensor. - public Tensor AddForwardAccumulator(Tensor value, bool dead_branch = false) - { - _forward_index.graph.as_default(); - { - var curr_ctxt = ops.get_default_graph()._get_control_flow_context(); - return tf_with(ops.control_dependencies(null), delegate - { - Tensor acc = null; - Tensor push = null; - if (curr_ctxt != null) - curr_ctxt.Enter(); - ops.colocate_with(value); - { - // We only need to pass maximum_iterations to the stack if - // we're inside an XLA context. - var max_size = constant_op.constant(-1, dtypes.int32); - acc = gen_data_flow_ops.stack_v2( - max_size: max_size, elem_type: value.dtype.as_base_dtype(), name: "f_acc"); - } - if (curr_ctxt != null) - curr_ctxt.Exit(); - - // Make acc available in the forward context. - var enter_acc = forward_context.AddValue(acc); - - // Add the stack_push op in the context of value.op. - var swap_enabled = forward_context.swap_memory; - var value_ctxt = util.GetOutputContext(value.op); - if(value_ctxt == forward_context) - { - // value is not nested in the forward context. - forward_context.Enter(); - push = gen_data_flow_ops.stack_push_v2(enter_acc, value, swap_memory: swap_enabled); - forward_context.Exit(); - // Protect stack push and order it before forward_index. - forward_index.op._add_control_input(push.op); - } - else - { - throw new NotImplementedException("AddForwardAccumulator"); - } - - // Order stack push after the successor of forward_index - var add_op = forward_index.op.inputs[0].op; - push.op._add_control_input(add_op); - return acc; - }); - } - } - - // """Add the getter for an accumulated value in the grad context. - // - // This is added to the backprop loop. Called in the grad context to - // get the value of an accumulated value. The stack pop op must be guarded - // by the pred of the controlling cond. - // - // Args: - // history_value: The history (a stack) of a value. - // value: The value that is pushed onto the stack. - // dead_branch: True iff the tensor is on a dead branch of a cond. - // - // Returns: - // The current value (the top of the stack). - // """ - - public Tensor AddBackpropAccumulatedValue(Tensor history_value, Tensor value, bool dead_branch= false) - { - var history_ctxt = history_value.op._get_control_flow_context(); - // Find the cond context that controls history_value if any. - CondContext cond_ctxt = null; - Tensor pop = null; - var value_ctxt = value.op._get_control_flow_context(); - while(value_ctxt != null && value_ctxt != history_ctxt) - { - if (value_ctxt is CondContext cc) - cond_ctxt = cc; - value_ctxt = value_ctxt.outer_context; - } - tf_with(ops.control_dependencies(null), delegate - { - grad_context.Enter(); - if(cond_ctxt != null) - { - throw new NotImplementedException("AddBackpropAccumulatedValue"); - } - pop = gen_data_flow_ops.stack_pop_v2(history_value, value.dtype.as_base_dtype()); - pop.set_shape(value.TensorShape); - grad_context.Exit(); - }); - var parallel_iterations = grad_context.parallel_iterations; - if (parallel_iterations > 1) - // All pops are ordered after pivot_for_body and before grad_sync. - grad_sync._add_control_input(pop.op); - return pop; - } - - /// - /// Get the real value of `value`. - /// - /// A tensor to be captured. - /// The same tensor obtained from the saved history. - public Tensor GetRealValue(Tensor value) - { - Tensor real_value = null; - if(real_value == null) - { - var cur_value = value; - var cur_grad_state = this; - Tensor history_value = null; - while (true) - { - var enter_op = util.GetLoopConstantEnter(cur_value); - if(enter_op != null) - { - // Special case: cur_value comes from a constant Enter node. - cur_value = enter_op.inputs[0]; - cur_grad_state = cur_grad_state.outer_grad_state; - if(cur_grad_state == null) - { - // We are now outside all nested loops for this gradient(), - // so `value` is a loop invariant and there is no need to - // save the history of value. Just make cur_value to enter - // the right control flow context. - real_value = _grad_context.AddValue(cur_value); - break; - } - } - else if (constant_op.is_constant(cur_value)) - { - // We are now outside all nested loops for this gradient(), - // so `value` is a loop invariant and there is no need to - // save the history of value. Just make cur_value to enter - // the right control flow context. - real_value = constant_op.constant( - tensor_util.constant_value(cur_value), dtype: cur_value.dtype); - break; - } - else - { - // Record the history of this value in forward_ctxt. - _grad_context.Exit(); - history_value = cur_grad_state.AddForwardAccumulator(cur_value); - _grad_context.Enter(); - break; - } - } - - if(real_value == null) - { - // Add the stack pop op in the grad context. - real_value = cur_grad_state.AddBackpropAccumulatedValue(history_value, cur_value); - if (cur_grad_state != this) - real_value = _grad_context.AddValue(real_value); - } - _history_map[value.name] = real_value; - } - return real_value; - } - } -} +/***************************************************************************** + Copyright 2018 The TensorFlow.NET Authors. All Rights Reserved. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +******************************************************************************/ + +using System; +using System.Collections; +using System.Collections.Generic; +using System.Linq; +using static Tensorflow.Binding; +using util = Tensorflow.control_flow_util; + +namespace Tensorflow.Operations.ControlFlows +{ + /// + /// The state used for constructing the gradient graph for a while loop. + /// + public class GradLoopState + { + private WhileContext _grad_context = null; + + public WhileContext grad_context => _grad_context; + + // # The loop counter added by AddBackpropLoopCounter. It is the value + // # of the loop counter for the current iteration. + // self._grad_index = None + + // # A sync op for backprop. + // self._grad_sync = None + + // # Information needed by backprop. + private Hashtable _history_map = new Hashtable(); + public Hashtable history_map => _history_map; + Dictionary _switch_map = new Dictionary(); + public Dictionary switch_map => _switch_map; + + /// + /// The while loop context for forward. + /// + WhileContext _forward_context; + public WhileContext forward_context => _forward_context; + + /// + /// The grad loop state for the outer while loop. + /// + GradLoopState _outer_grad_state; + public GradLoopState outer_grad_state => _outer_grad_state; + + Tensor _forward_index; + public Tensor forward_index => _forward_index; + Tensor _grad_index; + + Tensor[] _forward_loop_exits; + /// + /// The list of exits of the forward loop. + /// + public Tensor[] forward_loop_exits => _forward_loop_exits; + + List _deferred_exits; + public List deferred_exits => _deferred_exits; + + List _unused_exits; + public List unused_exits => _unused_exits; + + /// + /// The number of exits we expect to see but haven't. + /// + public int pending_exits_count { get; set; } + + Operation _grad_sync; + public Operation grad_sync + { + get + { + if(_grad_sync == null) + { + tf_with(ops.control_dependencies(null), delegate + { + _grad_sync = gen_control_flow_ops.control_trigger(name: "b_sync"); + }); + _grad_sync._set_control_flow_context(_grad_context); + _grad_index.op._add_control_input(_grad_sync); + if (_grad_context.outer_context != null) + _grad_context.outer_context.AddInnerOp(_grad_sync); + } + return _grad_sync; + } + } + + public GradLoopState(WhileContext forward_ctxt, GradLoopState outer_grad_state_) + { + // Information needed by backprop. + _unused_exits = new List(); + _deferred_exits = new List(); + _forward_loop_exits = list(forward_ctxt.loop_exits); + pending_exits_count = len(forward_ctxt.loop_exits); + + _outer_grad_state = outer_grad_state_; + + ControlFlowContext outer_forward_ctxt = null; + if (outer_grad_state_ != null) + outer_forward_ctxt = outer_grad_state_.forward_context; + + // Add the forward loop counter. + // with forward_ctxt._graph.as_default(): + Tensor cnt, forward_index; + { + if (outer_forward_ctxt != null) + outer_forward_ctxt.Enter(); + (cnt, forward_index) = forward_ctxt.AddForwardLoopCounter(outer_grad_state); + if (outer_forward_ctxt != null) + outer_forward_ctxt.Exit(); + } + _forward_context = forward_ctxt; + _forward_index = forward_index; + + // Add the backprop WhileContext, and the backprop loop counter. + if (outer_grad_state != null) + { + // This is a nested loop. Remember the iteration counts for each + // execution of this inner loop. + throw new NotImplementedException("GradLoopState"); + } + else + { + if (outer_forward_ctxt != null) + outer_forward_ctxt.Enter(); + _grad_context = new WhileContext( + maximum_iterations: forward_ctxt.maximum_iterations, + parallel_iterations: forward_ctxt.parallel_iterations, + back_prop: forward_ctxt.back_prop, + swap_memory: forward_ctxt.swap_memory, + name: forward_ctxt.name, + grad_state: this); + _grad_index = _grad_context.AddBackpropLoopCounter(cnt, outer_grad_state); + if (outer_forward_ctxt != null) + outer_forward_ctxt.Exit(); + } + } + + /// + /// Add an accumulator for each forward tensor that is needed in backprop. + /// + /// This is added to the forward loop at the first time when a tensor + /// in the forward loop is used by backprop gradient computation loop. + /// We create an accumulator that accumulates the value of tensor at each + /// iteration. Called in the control flow context where gradients() is called. + /// + /// The pseudocode is: + /// ``` + /// acc = stack(); + /// while (_pivot) { + /// acc = stack_push(acc, value); + /// } + /// ``` + /// + /// We make sure that the stack push op in one iteration is executed before + /// next iteration. This is achieved by adding a control edge from + /// `forward_index.op.inputs[0].op` to the push op, and another control + /// edge from the push op to either `forward_index.op` or `forward_sync`. + /// + /// The source tensor in forward that is to be accumulated. + /// True iff the tensor is on a dead branch of a cond. + /// The stack that contains the accumulated history of the tensor. + public Tensor AddForwardAccumulator(Tensor value, bool dead_branch = false) + { + _forward_index.graph.as_default(); + { + var curr_ctxt = ops.get_default_graph()._get_control_flow_context(); + return tf_with(ops.control_dependencies(null), delegate + { + Tensor acc = null; + Tensor push = null; + if (curr_ctxt != null) + curr_ctxt.Enter(); + ops.colocate_with(value); + { + // We only need to pass maximum_iterations to the stack if + // we're inside an XLA context. + var max_size = constant_op.constant(-1, dtypes.int32); + acc = gen_data_flow_ops.stack_v2( + max_size: max_size, elem_type: value.dtype.as_base_dtype(), name: "f_acc"); + } + if (curr_ctxt != null) + curr_ctxt.Exit(); + + // Make acc available in the forward context. + var enter_acc = forward_context.AddValue(acc); + + // Add the stack_push op in the context of value.op. + var swap_enabled = forward_context.swap_memory; + var value_ctxt = util.GetOutputContext(value.op); + if(value_ctxt == forward_context) + { + // value is not nested in the forward context. + forward_context.Enter(); + push = gen_data_flow_ops.stack_push_v2(enter_acc, value, swap_memory: swap_enabled); + forward_context.Exit(); + // Protect stack push and order it before forward_index. + forward_index.op._add_control_input(push.op); + } + else + { + throw new NotImplementedException("AddForwardAccumulator"); + } + + // Order stack push after the successor of forward_index + var add_op = forward_index.op.inputs[0].op; + push.op._add_control_input(add_op); + return acc; + }); + } + } + + // """Add the getter for an accumulated value in the grad context. + // + // This is added to the backprop loop. Called in the grad context to + // get the value of an accumulated value. The stack pop op must be guarded + // by the pred of the controlling cond. + // + // Args: + // history_value: The history (a stack) of a value. + // value: The value that is pushed onto the stack. + // dead_branch: True iff the tensor is on a dead branch of a cond. + // + // Returns: + // The current value (the top of the stack). + // """ + + public Tensor AddBackpropAccumulatedValue(Tensor history_value, Tensor value, bool dead_branch= false) + { + var history_ctxt = history_value.op._get_control_flow_context(); + // Find the cond context that controls history_value if any. + CondContext cond_ctxt = null; + Tensor pop = null; + var value_ctxt = value.op._get_control_flow_context(); + while(value_ctxt != null && value_ctxt != history_ctxt) + { + if (value_ctxt is CondContext cc) + cond_ctxt = cc; + value_ctxt = value_ctxt.outer_context; + } + tf_with(ops.control_dependencies(null), delegate + { + grad_context.Enter(); + if(cond_ctxt != null) + { + throw new NotImplementedException("AddBackpropAccumulatedValue"); + } + pop = gen_data_flow_ops.stack_pop_v2(history_value, value.dtype.as_base_dtype()); + pop.set_shape(value.TensorShape); + grad_context.Exit(); + }); + var parallel_iterations = grad_context.parallel_iterations; + if (parallel_iterations > 1) + // All pops are ordered after pivot_for_body and before grad_sync. + grad_sync._add_control_input(pop.op); + return pop; + } + + /// + /// Get the real value of `value`. + /// + /// A tensor to be captured. + /// The same tensor obtained from the saved history. + public Tensor GetRealValue(Tensor value) + { + Tensor real_value = null; + if(real_value == null) + { + var cur_value = value; + var cur_grad_state = this; + Tensor history_value = null; + while (true) + { + var enter_op = util.GetLoopConstantEnter(cur_value); + if(enter_op != null) + { + // Special case: cur_value comes from a constant Enter node. + cur_value = enter_op.inputs[0]; + cur_grad_state = cur_grad_state.outer_grad_state; + if(cur_grad_state == null) + { + // We are now outside all nested loops for this gradient(), + // so `value` is a loop invariant and there is no need to + // save the history of value. Just make cur_value to enter + // the right control flow context. + real_value = _grad_context.AddValue(cur_value); + break; + } + } + else if (constant_op.is_constant(cur_value)) + { + // We are now outside all nested loops for this gradient(), + // so `value` is a loop invariant and there is no need to + // save the history of value. Just make cur_value to enter + // the right control flow context. + real_value = constant_op.constant( + tensor_util.constant_value(cur_value), dtype: cur_value.dtype); + break; + } + else + { + // Record the history of this value in forward_ctxt. + _grad_context.Exit(); + history_value = cur_grad_state.AddForwardAccumulator(cur_value); + _grad_context.Enter(); + break; + } + } + + if(real_value == null) + { + // Add the stack pop op in the grad context. + real_value = cur_grad_state.AddBackpropAccumulatedValue(history_value, cur_value); + if (cur_grad_state != this) + real_value = _grad_context.AddValue(real_value); + } + _history_map[value.name] = real_value; + } + return real_value; + } + } +} diff --git a/src/TensorFlowNET.Core/Operations/NnOps/gen_nn_ops.cs b/src/TensorFlowNET.Core/Operations/NnOps/gen_nn_ops.cs index fbc68dbf..cee1ffd4 100644 --- a/src/TensorFlowNET.Core/Operations/NnOps/gen_nn_ops.cs +++ b/src/TensorFlowNET.Core/Operations/NnOps/gen_nn_ops.cs @@ -1,17 +1,17 @@ -/***************************************************************************** - Copyright 2018 The TensorFlow.NET Authors. All Rights Reserved. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. +/***************************************************************************** + Copyright 2018 The TensorFlow.NET Authors. All Rights Reserved. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. ******************************************************************************/ using static Tensorflow.Binding; @@ -22,23 +22,23 @@ namespace Tensorflow.Operations { public static OpDefLibrary _op_def_lib = new OpDefLibrary(); - /// - /// Computes a 2-D convolution given 4-D `input` and `filter` tensors. - /// - /// Given an input tensor of shape `[batch, in_height, in_width, in_channels]` - /// and a filter / kernel tensor of shape - /// `[filter_height, filter_width, in_channels, out_channels]`, this op - /// performs the following: - /// - /// 1. Flattens the filter to a 2-D matrix with shape - /// `[filter_height * filter_width * in_channels, output_channels]`. - /// 2. Extracts image patches from the input tensor to form a *virtual* - /// tensor of shape `[batch, out_height, out_width, - /// filter_height * filter_width * in_channels]`. - /// 3. For each patch, right-multiplies the filter matrix and the image patch - /// vector. - /// - /// + /// + /// Computes a 2-D convolution given 4-D `input` and `filter` tensors. + /// + /// Given an input tensor of shape `[batch, in_height, in_width, in_channels]` + /// and a filter / kernel tensor of shape + /// `[filter_height, filter_width, in_channels, out_channels]`, this op + /// performs the following: + /// + /// 1. Flattens the filter to a 2-D matrix with shape + /// `[filter_height * filter_width * in_channels, output_channels]`. + /// 2. Extracts image patches from the input tensor to form a *virtual* + /// tensor of shape `[batch, out_height, out_width, + /// filter_height * filter_width * in_channels]`. + /// 3. For each patch, right-multiplies the filter matrix and the image patch + /// vector. + /// + /// /// public static Tensor conv2d(Conv2dParams parameters) { @@ -55,15 +55,15 @@ namespace Tensorflow.Operations }); return _op.outputs[0]; - } - - /// - /// Computes the gradients of convolution with respect to the filter. - /// - /// - /// - public static Tensor conv2d_backprop_filter(Conv2dParams parameters) - { + } + + /// + /// Computes the gradients of convolution with respect to the filter. + /// + /// + /// + public static Tensor conv2d_backprop_filter(Conv2dParams parameters) + { var _op = _op_def_lib._apply_op_helper("Conv2DBackpropFilter", name: parameters.Name, args: new { input = parameters.Input, @@ -77,16 +77,16 @@ namespace Tensorflow.Operations dilations = parameters.Dilations }); - return _op.outputs[0]; + return _op.outputs[0]; } - /// - /// Computes the gradients of convolution with respect to the input. - /// - /// + /// + /// Computes the gradients of convolution with respect to the input. + /// + /// /// - public static Tensor conv2d_backprop_input(Conv2dParams parameters) - { + public static Tensor conv2d_backprop_input(Conv2dParams parameters) + { var _op = _op_def_lib._apply_op_helper("Conv2DBackpropInput", name: parameters.Name, args: new { input_sizes = parameters.InputSizes, @@ -100,7 +100,7 @@ namespace Tensorflow.Operations dilations = parameters.Dilations }); - return _op.outputs[0]; + return _op.outputs[0]; } public static Tensor bias_add(Tensor value, @@ -135,56 +135,56 @@ namespace Tensorflow.Operations }); return _op.outputs[0]; - } - - /// - /// Computes exponential linear: exp(features) - 1 if &lt; 0, features otherwise. - /// - /// - /// - /// - /// If specified, the created operation in the graph will be this one, otherwise it will be named 'Elu'. - /// - /// - /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. - /// - /// - /// See [Fast and Accurate Deep Network Learning by Exponential Linear Units (ELUs) - /// ](http://arxiv.org/abs/1511.07289) - /// - public static Tensor elu(Tensor features, string name = "Elu") - { - var op = _op_def_lib._apply_op_helper("Elu", name: name, args: new { features }); - return op.output; } - /// - /// Gradient for batch normalization. - /// - /// - /// - /// - /// - /// - /// - /// - /// - /// + /// + /// Computes exponential linear: exp(features) - 1 if &lt; 0, features otherwise. + /// + /// + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'Elu'. + /// + /// + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + /// + /// See [Fast and Accurate Deep Network Learning by Exponential Linear Units (ELUs) + /// ](http://arxiv.org/abs/1511.07289) + /// + public static Tensor elu(Tensor features, string name = "Elu") + { + var op = _op_def_lib._apply_op_helper("Elu", name: name, args: new { features }); + return op.output; + } + + /// + /// Gradient for batch normalization. + /// + /// + /// + /// + /// + /// + /// + /// + /// + /// /// - public static Tensor[] fused_batch_norm_grad(FusedBatchNormParams @params) - { - var op = _op_def_lib._apply_op_helper("FusedBatchNormGrad", name: @params.Name, args: new - { - y_backprop = @params.YBackprop, - x = @params.X, - scale = @params.Scale, - reserve_space_1 = @params.ReserveSpace1, - reserve_space_2 = @params.ReserveSpace2, - epsilon = @params.Epsilon, - data_format = @params.DataFormat, - is_training = @params.IsTraining - }); - return op.outputs; + public static Tensor[] fused_batch_norm_grad(FusedBatchNormParams @params) + { + var op = _op_def_lib._apply_op_helper("FusedBatchNormGrad", name: @params.Name, args: new + { + y_backprop = @params.YBackprop, + x = @params.X, + scale = @params.Scale, + reserve_space_1 = @params.ReserveSpace1, + reserve_space_2 = @params.ReserveSpace2, + epsilon = @params.Epsilon, + data_format = @params.DataFormat, + is_training = @params.IsTraining + }); + return op.outputs; } public static Tensor[] fused_batch_norm(Tensor x, @@ -212,19 +212,19 @@ namespace Tensorflow.Operations return _op.outputs; } - /// - /// Local Response Normalization. - /// - /// - /// - /// - /// - /// - /// + /// + /// Local Response Normalization. + /// + /// + /// + /// + /// + /// + /// /// - public static Tensor local_response_normalization(Tensor input, int depth_radius = 5, int bias = 1, - int alpha = 1, float beta = 0.5f, string name = null) - { + public static Tensor local_response_normalization(Tensor input, int depth_radius = 5, int bias = 1, + int alpha = 1, float beta = 0.5f, string name = null) + { var _op = _op_def_lib._apply_op_helper("LRN", name: name, args: new { input, @@ -234,7 +234,7 @@ namespace Tensorflow.Operations beta }); - return _op.output; + return _op.output; } public static Tensor log_softmax(Tensor logits, string name = null) @@ -245,16 +245,16 @@ namespace Tensorflow.Operations }); return _op.output; - } - - /// - /// Says whether the targets are in the top `K` predictions. - /// - /// - /// - /// - /// - /// A `Tensor` of type `bool`. + } + + /// + /// Says whether the targets are in the top `K` predictions. + /// + /// + /// + /// + /// + /// A `Tensor` of type `bool`. public static Tensor in_top_kv2(Tensor predictions, Tensor targets, int k, string name = null) { var _op = _op_def_lib._apply_op_helper("InTopKV2", name: name, args: new @@ -265,8 +265,8 @@ namespace Tensorflow.Operations }); return _op.output; - } - + } + public static Tensor leaky_relu(Tensor features, float alpha = 0.2f, string name = null) { var _op = _op_def_lib._apply_op_helper("LeakyRelu", name: name, args: new @@ -297,9 +297,9 @@ namespace Tensorflow.Operations return _op.outputs[0]; } - public static Tensor max_pool_grad(Tensor orig_input, Tensor orig_output, Tensor grad, int[] ksize, int[] strides, string padding, - string data_format= "NHWC", string name= null) - { + public static Tensor max_pool_grad(Tensor orig_input, Tensor orig_output, Tensor grad, int[] ksize, int[] strides, string padding, + string data_format= "NHWC", string name= null) + { var _op = _op_def_lib._apply_op_helper("MaxPoolGrad", name: name, args: new { orig_input, @@ -311,7 +311,7 @@ namespace Tensorflow.Operations data_format }); - return _op.outputs[0]; + return _op.outputs[0]; } public static Tensor[] top_kv2(Tensor input, int k, bool sorted = true, string name = null) @@ -335,8 +335,8 @@ namespace Tensorflow.Operations }); return _op.outputs[0]; - } - + } + public static Tensor leaky_relu_grad(Tensor gradients, Tensor features, float alpha = 0.2f, string name = null) { var _op = _op_def_lib._apply_op_helper("LeakyReluGrad", name: name, args: new @@ -377,81 +377,81 @@ namespace Tensorflow.Operations return (_op.outputs[0], _op.outputs[1]); } - /// - /// Computes softmax cross entropy cost and gradients to backpropagate. - /// - /// - /// batch_size x num_classes matrix - /// - /// - /// batch_size vector with values in [0, num_classes). - /// This is the label for the given minibatch entry. - /// - /// - /// If specified, the created operation in the graph will be this one, otherwise it will be named 'SparseSoftmaxCrossEntropyWithLogits'. - /// - /// - /// Returns a tuple with multiple values, as follows: - /// loss : Per example loss (batch_size vector). - /// backprop : backpropagated gradients (batch_size x num_classes matrix). - /// The Operation can be fetched from any of the Tensorreturned in the tuple values, by fetching the Operation property. - /// - /// - /// Unlike SoftmaxCrossEntropyWithLogits, this operation does not accept - /// a matrix of label probabilities, but rather a single label per row - /// of features. This label is considered to have probability 1.0 for the - /// given row. - /// - /// Inputs are the logits, not probabilities. - /// - public static (Tensor loss, Tensor backprop) sparse_softmax_cross_entropy_with_logits(Tensor features, Tensor labels, string name = "SparseSoftmaxCrossEntropyWithLogits") - { - var op = _op_def_lib._apply_op_helper("SparseSoftmaxCrossEntropyWithLogits", name: name, args: new { features, labels }); - int _idx = 0; - var loss = op.outputs[_idx++]; - var backprop = op.outputs[_idx++]; - return (loss, backprop); + /// + /// Computes softmax cross entropy cost and gradients to backpropagate. + /// + /// + /// batch_size x num_classes matrix + /// + /// + /// batch_size vector with values in [0, num_classes). + /// This is the label for the given minibatch entry. + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'SparseSoftmaxCrossEntropyWithLogits'. + /// + /// + /// Returns a tuple with multiple values, as follows: + /// loss : Per example loss (batch_size vector). + /// backprop : backpropagated gradients (batch_size x num_classes matrix). + /// The Operation can be fetched from any of the Tensorreturned in the tuple values, by fetching the Operation property. + /// + /// + /// Unlike SoftmaxCrossEntropyWithLogits, this operation does not accept + /// a matrix of label probabilities, but rather a single label per row + /// of features. This label is considered to have probability 1.0 for the + /// given row. + /// + /// Inputs are the logits, not probabilities. + /// + public static (Tensor loss, Tensor backprop) sparse_softmax_cross_entropy_with_logits(Tensor features, Tensor labels, string name = "SparseSoftmaxCrossEntropyWithLogits") + { + var op = _op_def_lib._apply_op_helper("SparseSoftmaxCrossEntropyWithLogits", name: name, args: new { features, labels }); + int _idx = 0; + var loss = op.outputs[_idx++]; + var backprop = op.outputs[_idx++]; + return (loss, backprop); } - /// - /// Computes rectified linear: `max(features, 0)`. - /// - /// A `Tensor`. Must be one of the following types: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `int64`, `bfloat16`, `uint16`, `half`, `uint32`, `uint64`, `qint8`. + /// + /// Computes rectified linear: `max(features, 0)`. + /// + /// A `Tensor`. Must be one of the following types: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `int64`, `bfloat16`, `uint16`, `half`, `uint32`, `uint64`, `qint8`. /// A name for the operation (optional). /// A `Tensor`. Has the same type as `features`. public static Tensor relu(Tensor features, string name = null) - { - - //_ctx = _context._context - //if _ctx is not None and _ctx._eager_context.is_eager: - // try: - // _result = _pywrap_tensorflow.TFE_Py_FastPathExecute( - // _ctx._context_handle, _ctx._eager_context.device_name, "Relu", name, - // _ctx._post_execution_callbacks, features) - // return _result - // except _core._FallbackException: - // try: - // return relu_eager_fallback( - // features, name=name, ctx=_ctx) - // except _core._SymbolicException: - // pass # Add nodes to the TensorFlow graph. - // except (TypeError, ValueError): - // result = _dispatch.dispatch( - // relu, features=features, name=name) - // if result is not _dispatch.OpDispatcher.NOT_SUPPORTED: - // return result - // raise - // except _core._NotOkStatusException as e: - // if name is not None: - // message = e.message + " name: " + name - // else: - // message = e.message - // _six.raise_from(_core._status_to_exception(e.code, message), None) + { + + //_ctx = _context._context + //if _ctx is not None and _ctx._eager_context.is_eager: + // try: + // _result = _pywrap_tensorflow.TFE_Py_FastPathExecute( + // _ctx._context_handle, _ctx._eager_context.device_name, "Relu", name, + // _ctx._post_execution_callbacks, features) + // return _result + // except _core._FallbackException: + // try: + // return relu_eager_fallback( + // features, name=name, ctx=_ctx) + // except _core._SymbolicException: + // pass # Add nodes to the TensorFlow graph. + // except (TypeError, ValueError): + // result = _dispatch.dispatch( + // relu, features=features, name=name) + // if result is not _dispatch.OpDispatcher.NOT_SUPPORTED: + // return result + // raise + // except _core._NotOkStatusException as e: + // if name is not None: + // message = e.message + " name: " + name + // else: + // message = e.message + // _six.raise_from(_core._status_to_exception(e.code, message), None) //# Add nodes to the TensorFlow graph. //try: OpDefLibrary _op_def_lib = new OpDefLibrary(); var _op = _op_def_lib._apply_op_helper("Relu", name: name, args: new { features }); - return _op.outputs[0]; + return _op.outputs[0]; //except (TypeError, ValueError): // result = _dispatch.dispatch( // relu, features=features, name=name) diff --git a/src/TensorFlowNET.Core/Operations/Operation.Control.cs b/src/TensorFlowNET.Core/Operations/Operation.Control.cs index ba7b0829..d6f73884 100644 --- a/src/TensorFlowNET.Core/Operations/Operation.Control.cs +++ b/src/TensorFlowNET.Core/Operations/Operation.Control.cs @@ -1,68 +1,68 @@ -/***************************************************************************** - Copyright 2018 The TensorFlow.NET Authors. All Rights Reserved. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -******************************************************************************/ - -using Tensorflow.Operations; -using static Tensorflow.Binding; - -namespace Tensorflow -{ - public partial class Operation - { - private ControlFlowContext _control_flow_context; - - /// - /// Add this op to its control flow context. - /// - /// This may add new ops and change this op's inputs. self.inputs must be - /// available before calling this method. - /// - public void _control_flow_post_processing() - { - foreach(Tensor input_tensor in inputs) - control_flow_util.CheckInputFromValidContext(this, input_tensor.op); - - if (_control_flow_context != null) - _control_flow_context.AddOp(this); - } - - public void _add_control_input(Operation op) - { - //c_api.TF_AddControlInput(_operDesc, op); - c_api.AddControlInput(graph, _handle, op); - } - - public void _add_control_inputs(Operation[] ops) - { - foreach (var op in ops) - _add_control_input(op); - } - - public void _set_control_flow_context(ControlFlowContext ctx) - { - _control_flow_context = ctx; - } - - public ControlFlowContext _get_control_flow_context() - { - return _control_flow_context; - } - - public WhileContext GetWhileContext() - { - return _control_flow_context as WhileContext; - } - } -} +/***************************************************************************** + Copyright 2018 The TensorFlow.NET Authors. All Rights Reserved. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +******************************************************************************/ + +using Tensorflow.Operations; +using static Tensorflow.Binding; + +namespace Tensorflow +{ + public partial class Operation + { + private ControlFlowContext _control_flow_context; + + /// + /// Add this op to its control flow context. + /// + /// This may add new ops and change this op's inputs. self.inputs must be + /// available before calling this method. + /// + public void _control_flow_post_processing() + { + foreach(Tensor input_tensor in inputs) + control_flow_util.CheckInputFromValidContext(this, input_tensor.op); + + if (_control_flow_context != null) + _control_flow_context.AddOp(this); + } + + public void _add_control_input(Operation op) + { + //c_api.TF_AddControlInput(_operDesc, op); + c_api.AddControlInput(graph, _handle, op); + } + + public void _add_control_inputs(Operation[] ops) + { + foreach (var op in ops) + _add_control_input(op); + } + + public void _set_control_flow_context(ControlFlowContext ctx) + { + _control_flow_context = ctx; + } + + public ControlFlowContext _get_control_flow_context() + { + return _control_flow_context; + } + + public WhileContext GetWhileContext() + { + return _control_flow_context as WhileContext; + } + } +} diff --git a/src/TensorFlowNET.Core/Operations/Operation.Input.cs b/src/TensorFlowNET.Core/Operations/Operation.Input.cs index af3c57b2..fdf92504 100644 --- a/src/TensorFlowNET.Core/Operations/Operation.Input.cs +++ b/src/TensorFlowNET.Core/Operations/Operation.Input.cs @@ -1,109 +1,109 @@ -/***************************************************************************** - Copyright 2018 The TensorFlow.NET Authors. All Rights Reserved. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -******************************************************************************/ - -using System; -using System.Linq; -using System.Runtime.InteropServices; -#if SERIALIZABLE -using Newtonsoft.Json; -#endif - -namespace Tensorflow -{ - - // from ops.py - public partial class Operation - { - public TF_Output Input(int index) => c_api.TF_OperationInput(new TF_Input(_handle, index)); - public TF_DataType InputType(int index) => c_api.TF_OperationInputType(new TF_Input(_handle, index)); - - public int InputListLength(string name) - { - int num = 0; - using(var status = new Status()) - { - num = c_api.TF_OperationInputListLength(_handle, name, status); - status.Check(true); - } - return num; - } -#if SERIALIZABLE - [JsonIgnore] -#endif - public int NumInputs => c_api.TF_OperationNumInputs(_handle); - private TF_DataType[] _input_types => _inputs_val._inputs.Select(x => x.dtype).ToArray(); - - private InputList _inputs_val; - public InputList inputs - { - get - { - if (_inputs_val == null) - { - var retval = new Tensor[NumInputs]; - - for (int i = 0; i < NumInputs; i++) - { - var tf_output = Input(i); - var op = GetOperation(tf_output.oper); - retval[i] = op.outputs[tf_output.index]; - } - - _inputs_val = new InputList(retval); - } - - return _inputs_val; - } - } - - public int NumControlInputs => c_api.TF_OperationNumControlInputs(_handle); - - /// - /// The `Operation` objects on which this op has a control dependency. - /// - /// Before this op is executed, TensorFlow will ensure that the - /// operations in `self.control_inputs` have finished executing.This - /// mechanism can be used to run ops sequentially for performance - /// reasons, or to ensure that the side effects of an op are observed - /// in the correct order. - /// - public Operation[] control_inputs - { - get - { - return GetControlInputs(); - } - } - - public unsafe Operation[] GetControlInputs() - { - var control_inputs = new Operation[NumControlInputs]; - - if (NumControlInputs > 0) - { - IntPtr control_input_handle = Marshal.AllocHGlobal(Marshal.SizeOf() * NumControlInputs); - c_api.TF_OperationGetControlInputs(_handle, control_input_handle, NumControlInputs); - for (int i = 0; i < NumControlInputs; i++) - { - var handle = control_input_handle + Marshal.SizeOf() * i; - control_inputs[i] = new Operation(*(IntPtr*)handle); - } - } - - return control_inputs; - } - } -} +/***************************************************************************** + Copyright 2018 The TensorFlow.NET Authors. All Rights Reserved. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +******************************************************************************/ + +using System; +using System.Linq; +using System.Runtime.InteropServices; +#if SERIALIZABLE +using Newtonsoft.Json; +#endif + +namespace Tensorflow +{ + + // from ops.py + public partial class Operation + { + public TF_Output Input(int index) => c_api.TF_OperationInput(new TF_Input(_handle, index)); + public TF_DataType InputType(int index) => c_api.TF_OperationInputType(new TF_Input(_handle, index)); + + public int InputListLength(string name) + { + int num = 0; + using(var status = new Status()) + { + num = c_api.TF_OperationInputListLength(_handle, name, status); + status.Check(true); + } + return num; + } +#if SERIALIZABLE + [JsonIgnore] +#endif + public int NumInputs => c_api.TF_OperationNumInputs(_handle); + private TF_DataType[] _input_types => _inputs_val._inputs.Select(x => x.dtype).ToArray(); + + private InputList _inputs_val; + public InputList inputs + { + get + { + if (_inputs_val == null) + { + var retval = new Tensor[NumInputs]; + + for (int i = 0; i < NumInputs; i++) + { + var tf_output = Input(i); + var op = GetOperation(tf_output.oper); + retval[i] = op.outputs[tf_output.index]; + } + + _inputs_val = new InputList(retval); + } + + return _inputs_val; + } + } + + public int NumControlInputs => c_api.TF_OperationNumControlInputs(_handle); + + /// + /// The `Operation` objects on which this op has a control dependency. + /// + /// Before this op is executed, TensorFlow will ensure that the + /// operations in `self.control_inputs` have finished executing.This + /// mechanism can be used to run ops sequentially for performance + /// reasons, or to ensure that the side effects of an op are observed + /// in the correct order. + /// + public Operation[] control_inputs + { + get + { + return GetControlInputs(); + } + } + + public unsafe Operation[] GetControlInputs() + { + var control_inputs = new Operation[NumControlInputs]; + + if (NumControlInputs > 0) + { + IntPtr control_input_handle = Marshal.AllocHGlobal(Marshal.SizeOf() * NumControlInputs); + c_api.TF_OperationGetControlInputs(_handle, control_input_handle, NumControlInputs); + for (int i = 0; i < NumControlInputs; i++) + { + var handle = control_input_handle + Marshal.SizeOf() * i; + control_inputs[i] = new Operation(*(IntPtr*)handle); + } + } + + return control_inputs; + } + } +} diff --git a/src/TensorFlowNET.Core/Operations/Operation.cs b/src/TensorFlowNET.Core/Operations/Operation.cs index 359dc870..831e6ca5 100644 --- a/src/TensorFlowNET.Core/Operations/Operation.cs +++ b/src/TensorFlowNET.Core/Operations/Operation.cs @@ -1,27 +1,27 @@ -/***************************************************************************** - Copyright 2018 The TensorFlow.NET Authors. All Rights Reserved. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. +/***************************************************************************** + Copyright 2018 The TensorFlow.NET Authors. All Rights Reserved. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. ******************************************************************************/ using Google.Protobuf.Collections; #if SERIALIZABLE -using Newtonsoft.Json; -#endif +using Newtonsoft.Json; +#endif using System; using System.Collections.Generic; using System.IO; -using System.Linq; +using System.Linq; using Tensorflow.Util; using static Tensorflow.Binding; @@ -47,26 +47,26 @@ namespace Tensorflow /// public partial class Operation : ITensorOrOperation { - private readonly IntPtr _handle; // _c_op in python + private readonly IntPtr _handle; // _c_op in python private readonly Graph _graph; - private NodeDef _node_def; + private NodeDef _node_def; #if SERIALIZABLE [JsonIgnore] #endif - public string type => OpType; + public string type => OpType; #if SERIALIZABLE [JsonIgnore] #endif - public Graph graph => _graph; + public Graph graph => _graph; #if SERIALIZABLE [JsonIgnore] #endif - public int _id => _id_value; + public int _id => _id_value; #if SERIALIZABLE [JsonIgnore] #endif - public int _id_value { get; set; } + public int _id_value { get; set; } #if SERIALIZABLE [JsonIgnore] #endif @@ -74,11 +74,11 @@ namespace Tensorflow public TF_DataType dtype => TF_DataType.DtInvalid; public string name => _handle == IntPtr.Zero ? null : c_api.StringPiece(c_api.TF_OperationName(_handle)); public string OpType => _handle == IntPtr.Zero ? null : c_api.StringPiece(c_api.TF_OperationOpType(_handle)); - public string Device => _handle == IntPtr.Zero ? null : c_api.StringPiece(c_api.TF_OperationDevice(_handle)); + public string Device => _handle == IntPtr.Zero ? null : c_api.StringPiece(c_api.TF_OperationDevice(_handle)); #if SERIALIZABLE [JsonIgnore] #endif - bool _is_stateful; + bool _is_stateful; #if SERIALIZABLE [JsonIgnore] #endif @@ -176,17 +176,17 @@ namespace Tensorflow } } - _id_value = _graph._next_id(); - + _id_value = _graph._next_id(); + // Dict mapping op name to file and line information for op colocation // context managers. - _control_flow_context = graph._get_control_flow_context(); - + _control_flow_context = graph._get_control_flow_context(); + // This will be set by self.inputs. if (op_def == null) - op_def = g.GetOpDef(node_def.Op); - - var grouped_inputs = _reconstruct_sequence_inputs(op_def, inputs, node_def.Attr); + op_def = g.GetOpDef(node_def.Op); + + var grouped_inputs = _reconstruct_sequence_inputs(op_def, inputs, node_def.Attr); _handle = ops._create_c_op(g, node_def, grouped_inputs, control_input_ops.ToArray()); _is_stateful = op_def.IsStateful; diff --git a/src/TensorFlowNET.Core/Operations/control_flow_util.py.cs b/src/TensorFlowNET.Core/Operations/control_flow_util.py.cs index 20dc0f26..f9571a8a 100644 --- a/src/TensorFlowNET.Core/Operations/control_flow_util.py.cs +++ b/src/TensorFlowNET.Core/Operations/control_flow_util.py.cs @@ -1,21 +1,21 @@ -/***************************************************************************** - Copyright 2018 The TensorFlow.NET Authors. All Rights Reserved. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. +/***************************************************************************** + Copyright 2018 The TensorFlow.NET Authors. All Rights Reserved. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. ******************************************************************************/ using System; -using System.Linq; +using System.Linq; using Tensorflow.Operations; using static Tensorflow.Binding; @@ -31,26 +31,26 @@ namespace Tensorflow public static bool IsLoopExit(Operation op) { return op.type == "Exit" || op.type == "RefExit"; - } - - /// - /// Returns true if `op` is an Enter. - /// - /// - /// + } + + /// + /// Returns true if `op` is an Enter. + /// + /// + /// public static bool IsLoopEnter(Operation op) { return op.type == "Enter" || op.type == "RefEnter"; } - /// - /// Return true iff op is a loop invariant. - /// - /// + /// + /// Return true iff op is a loop invariant. + /// + /// /// - public static bool IsLoopConstantEnter(Operation op) - { - return IsLoopEnter(op) && op.get_attr("is_constant"); + public static bool IsLoopConstantEnter(Operation op) + { + return IsLoopEnter(op) && op.get_attr("is_constant"); } /// @@ -61,141 +61,141 @@ namespace Tensorflow public static bool IsSwitch(Operation op) { return op.type == "Switch" || op.type == "RefSwitch"; - } - - public static WhileContext GetWhileContext(Operation op) + } + + public static WhileContext GetWhileContext(Operation op) => op.GetWhileContext(); public static bool IsCondSwitch(Operation op) - { - if (!IsSwitch(op)) - return false; - if (op.outputs == null || op.outputs.Length == 0) - return false; - + { + if (!IsSwitch(op)) + return false; + if (op.outputs == null || op.outputs.Length == 0) + return false; + // Switch nodes are not part of the cond control flow context that they // represent, so consider the consumers of its outputs to determine if it is // cond switch or not. A switch is a cond switch iff all its consumers are in - // cond contexts. - var is_cond_switch = true; - foreach(var o in op.outputs) - { - foreach(var c in o.consumers()) - { - var ctxt = c._get_control_flow_context(); - if (IsLoopEnter(c)) - ctxt = ctxt.outer_context; - is_cond_switch = is_cond_switch &&(ctxt != null && ctxt.IsCondContext()); - } - } - - return is_cond_switch; + // cond contexts. + var is_cond_switch = true; + foreach(var o in op.outputs) + { + foreach(var c in o.consumers()) + { + var ctxt = c._get_control_flow_context(); + if (IsLoopEnter(c)) + ctxt = ctxt.outer_context; + is_cond_switch = is_cond_switch &&(ctxt != null && ctxt.IsCondContext()); + } + } + + return is_cond_switch; } - public static bool IsLoopSwitch(Operation op) - { - if (IsSwitch(op)) - { - var ctxt = op._get_control_flow_context(); - return ctxt != null && ctxt.IsWhileContext() && !IsCondSwitch(op); - } - return false; + public static bool IsLoopSwitch(Operation op) + { + if (IsSwitch(op)) + { + var ctxt = op._get_control_flow_context(); + return ctxt != null && ctxt.IsWhileContext() && !IsCondSwitch(op); + } + return false; } - /// - /// Return the control flow context for the output of an op. - /// + /// + /// Return the control flow context for the output of an op. + /// public static ControlFlowContext GetOutputContext(Operation op) { var ctxt = op._get_control_flow_context(); - // Exit nodes usually have a control flow context, except in the case where the - // exit node was imported via import_graph_def (in which case no nodes have + // Exit nodes usually have a control flow context, except in the case where the + // exit node was imported via import_graph_def (in which case no nodes have // control flow contexts). if (ctxt != null && IsLoopExit(op)) ctxt = ctxt.outer_context; return ctxt; } - public static void CheckInputFromValidContext(Operation op, Operation input_op) - { - var op_ctxt = op._get_control_flow_context(); - var input_ctxt = GetOutputContext(input_op); - var valid = false; - if (input_ctxt == null) - valid = true; - else if (op_ctxt == input_ctxt) - valid = true; - else - { - var while_ctxt = GetContainingWhileContext(op_ctxt); - var input_while_ctxt = GetContainingWhileContext(input_ctxt); - - if (while_ctxt == null) - { + public static void CheckInputFromValidContext(Operation op, Operation input_op) + { + var op_ctxt = op._get_control_flow_context(); + var input_ctxt = GetOutputContext(input_op); + var valid = false; + if (input_ctxt == null) + valid = true; + else if (op_ctxt == input_ctxt) + valid = true; + else + { + var while_ctxt = GetContainingWhileContext(op_ctxt); + var input_while_ctxt = GetContainingWhileContext(input_ctxt); + + if (while_ctxt == null) + { // Neither op nor input_op is in a while loop, but one or both are in // conds. We allow this, although execution will fail if the branch - // corresponding to input_op's cond context isn't taken. - if (input_while_ctxt == null) - valid = true; - // Invalid if op isn't in a while loop and input_op is. Unless... - if (IsLoopEnter(op)) - // WhileContext._BuildLoop clears context for Enter nodes. - valid = true; - if (IsSwitch(op)) - // CondContext.AddValue clears context for Switch nodes. - valid = true; - } - else if (IsContainingContext(while_ctxt, input_while_ctxt)) - { - // input_op is in a while loop which contains op's while loop (or not in a - // while loop at all). - valid = true; - } - else if (while_ctxt.grad_state != null && + // corresponding to input_op's cond context isn't taken. + if (input_while_ctxt == null) + valid = true; + // Invalid if op isn't in a while loop and input_op is. Unless... + if (IsLoopEnter(op)) + // WhileContext._BuildLoop clears context for Enter nodes. + valid = true; + if (IsSwitch(op)) + // CondContext.AddValue clears context for Switch nodes. + valid = true; + } + else if (IsContainingContext(while_ctxt, input_while_ctxt)) + { + // input_op is in a while loop which contains op's while loop (or not in a + // while loop at all). + valid = true; + } + else if (while_ctxt.grad_state != null && IsContainingContext(while_ctxt.grad_state.forward_context, - input_while_ctxt)) - { - valid = true; - } - else - throw new NotImplementedException("CheckInputFromValidContext"); - } - - if (!valid) - { - throw new NotImplementedException("CheckInputFromValidContext"); - } - } - - public static Operation GetLoopConstantEnter(Tensor value) - { - var id_ops = new string[] { "Switch", "RefSwitch", "Identity", "RefIdentity" }; - var op = value.op; - while (id_ops.Contains(op.type)) - op = op.inputs[0].op; - return IsLoopConstantEnter(op) ? op : null; + input_while_ctxt)) + { + valid = true; + } + else + throw new NotImplementedException("CheckInputFromValidContext"); + } + + if (!valid) + { + throw new NotImplementedException("CheckInputFromValidContext"); + } } - public static bool IsContainingContext(WhileContext ctxt, WhileContext maybe_containing_ctxt) - { - while(ctxt != maybe_containing_ctxt) - { - if (ctxt == null) - return false; - ctxt = ctxt.outer_context as WhileContext; - } - return true; + public static Operation GetLoopConstantEnter(Tensor value) + { + var id_ops = new string[] { "Switch", "RefSwitch", "Identity", "RefIdentity" }; + var op = value.op; + while (id_ops.Contains(op.type)) + op = op.inputs[0].op; + return IsLoopConstantEnter(op) ? op : null; } - public static WhileContext GetContainingWhileContext(ControlFlowContext ctxt, ControlFlowContext stop_ctxt = null) - { - while (ctxt != null) - { - if (ctxt.IsWhileContext() || ctxt == stop_ctxt) - return ctxt as WhileContext; - ctxt = ctxt.outer_context; - } - return null; + public static bool IsContainingContext(WhileContext ctxt, WhileContext maybe_containing_ctxt) + { + while(ctxt != maybe_containing_ctxt) + { + if (ctxt == null) + return false; + ctxt = ctxt.outer_context as WhileContext; + } + return true; + } + + public static WhileContext GetContainingWhileContext(ControlFlowContext ctxt, ControlFlowContext stop_ctxt = null) + { + while (ctxt != null) + { + if (ctxt.IsWhileContext() || ctxt == stop_ctxt) + return ctxt as WhileContext; + ctxt = ctxt.outer_context; + } + return null; } } } diff --git a/src/TensorFlowNET.Core/Operations/gen_math_ops.cs b/src/TensorFlowNET.Core/Operations/gen_math_ops.cs index 62b0f1b4..5cf240e8 100644 --- a/src/TensorFlowNET.Core/Operations/gen_math_ops.cs +++ b/src/TensorFlowNET.Core/Operations/gen_math_ops.cs @@ -1,735 +1,735 @@ -/***************************************************************************** - Copyright 2018 The TensorFlow.NET Authors. All Rights Reserved. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -******************************************************************************/ - -using static Tensorflow.Binding; - -namespace Tensorflow -{ - public static class gen_math_ops - { - public static OpDefLibrary _op_def_lib = new OpDefLibrary(); - - public static Tensor _all(Tensor input, Tensor axis, bool keep_dims = false, string name = null) - { - var _op = _op_def_lib._apply_op_helper("All", name, args: new { input, reduction_indices = axis, keep_dims = keep_dims }); - - return _op.outputs[0]; - } - - /// - /// Add all input tensors element wise. - /// - /// - /// - /// - public static Tensor add_n(Tensor[] inputs, string name = null) - { - var _op = _op_def_lib._apply_op_helper("AddN", name, args: new { inputs }); - - return _op.outputs[0]; - } - - /// - /// Returns the index with the largest value across dimensions of a tensor. - /// - /// - /// - /// - /// - /// - public static Tensor arg_max(Tensor input, int dimension, TF_DataType output_type = TF_DataType.TF_INT64, string name = null) - => _op_def_lib._apply_op_helper("ArgMax", name, args: new { input, dimension, output_type }).outputs[0]; - - /// - /// Returns the index with the smallest value across dimensions of a tensor. - /// - /// - /// - /// - /// - /// - public static Tensor arg_min(Tensor input, int dimension, TF_DataType output_type= TF_DataType.TF_INT64, string name= null) - =>_op_def_lib._apply_op_helper("ArgMin", name, args: new { input, dimension, output_type }).outputs[0]; - - /// - /// Computes Psi, the derivative of Lgamma (the log of the absolute value of - /// `Gamma(x)`), element-wise. - /// - /// - /// - /// - public static Tensor digamma(Tensor x, string name = null) - => _op_def_lib._apply_op_helper("Digamma", name, args: new { x }).output; - - /// - /// Returns 0 if the denominator is zero. - /// - /// - /// - /// - /// - /// - /// If specified, the created operation in the graph will be this one, otherwise it will be named 'DivNoNan'. - /// - /// - /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. - /// - /// - /// - /// *NOTE*: DivNoNan supports broadcasting. More about broadcasting - /// [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html) - /// - public static Tensor div_no_nan(Tensor x, Tensor y, string name = null) - { - var op = _op_def_lib._apply_op_helper("DivNoNan", name: name, args: new { x, y }); - return op.output; - } - - /// - /// Computes the mean of elements across dimensions of a tensor. - /// Reduces `input` along the dimensions given in `axis`. Unless - /// `keep_dims` is true, the rank of the tensor is reduced by 1 for each entry in - /// `axis`. If `keep_dims` is true, the reduced dimensions are retained with length 1. - /// - /// A `Tensor`. Must be one of the following types: - /// `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`, `qint32`, `bfloat16`, `uint16`, `complex128`, `half`, `uint32`, `uint64`. - /// The tensor to reduce. - /// A `Tensor`. Must be one of the following types: `int32`, `int64`. The dimensions to reduce. - /// An optional `bool`. Defaults to `False`. If true, retain reduced dimensions with length 1. - /// A name for the operation (optional). - /// A `Tensor`. Has the same type as `input`. - public static Tensor mean(T1 input, T2 axis, bool keep_dims= false, string name = null) - { - var _op = _op_def_lib._apply_op_helper("Mean", name, args: new { input, reduction_indices = axis, keep_dims = keep_dims }); - - return _op.output; - } - - public static Tensor prod(T1 input, T2 axis, bool keep_dims = false, string name = null) - { - var _op = _op_def_lib._apply_op_helper("Prod", name, args: new { input, reduction_indices = axis, keep_dims }); - - return _op.outputs[0]; - } - - public static Tensor acos(Tensor x, string name = null) - { - var _op = _op_def_lib._apply_op_helper("Acos", name, args: new { x }); - - return _op.outputs[0]; - } - - public static Tensor asin(Tensor x, string name = null) - { - var _op = _op_def_lib._apply_op_helper("Asin", name, args: new { x }); - - return _op.outputs[0]; - } - - public static Tensor add(Tx x, Ty y, string name = null) - { - var _op = _op_def_lib._apply_op_helper("Add", name, args: new { x, y }); - - return _op.output; - } - - public static Tensor atan(Tensor x, string name = null) - { - var _op = _op_def_lib._apply_op_helper("Atan", name, args: new { x }); - - return _op.outputs[0]; - } - - public static Tensor ceil(Tensor x, string name = null) - { - var _op = _op_def_lib._apply_op_helper("Ceil", name, args: new { x }); - - return _op.outputs[0]; - } - - public static Tensor sin(Tensor x, string name = null) - { - var _op = _op_def_lib._apply_op_helper("Sin", name, args: new { x }); - - return _op.outputs[0]; - } - - /// - /// Computes sigmoid of x element-wise. - /// - /// - /// - /// - /// If specified, the created operation in the graph will be this one, otherwise it will be named 'Sigmoid'. - /// - /// - /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. - /// - /// - /// Specifically, y = 1 / (1 + exp(-x)). - /// - public static Tensor sigmoid(Tensor x, string name = "Sigmoid") - { - var op = _op_def_lib._apply_op_helper("Sigmoid", name: name, new { x }); - - return op.output; - } - - /// - /// Computes the gradient of the sigmoid of x wrt its input. - /// - /// - /// - /// - /// - /// - /// If specified, the created operation in the graph will be this one, otherwise it will be named 'SigmoidGrad'. - /// - /// - /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. - /// - /// - /// Specifically, grad = dy * y * (1 - y), where y = sigmoid(x), and - /// dy is the corresponding input gradient. - /// - public static Tensor sigmoid_grad(Tensor y, Tensor dy, string name = "SigmoidGrad") - { - var op = _op_def_lib._apply_op_helper("SigmoidGrad", name: name, args: new { y, dy }); - - return op.outputs[0]; - } - - public static Tensor sign(Tensor x, string name = "Sign") - { - var op = _op_def_lib._apply_op_helper("Sign", name: name, args: new {x}); - - return op.outputs[0]; - } - - public static Tensor sinh(Tensor x, string name = null) - { - var _op = _op_def_lib._apply_op_helper("Sinh", name, args: new { x }); - - return _op.outputs[0]; - } - - public static Tensor cos(Tensor x, string name = null) - { - var _op = _op_def_lib._apply_op_helper("Cos", name, args: new { x }); - - return _op.outputs[0]; - } - - public static Tensor cosh(Tensor x, string name = null) - { - var _op = _op_def_lib._apply_op_helper("Cosh", name, args: new { x }); - - return _op.outputs[0]; - } - - public static Tensor cumsum(Tensor x, T axis, bool exclusive = false, bool reverse = false, string name = null) - { - var _op = _op_def_lib._apply_op_helper("Cumsum", name, args: new { x, axis, exclusive, reverse }); - - return _op.outputs[0]; - } - - /// - /// Computes the sum along segments of a tensor. - /// - /// - /// - /// - /// - /// - public static Tensor unsorted_segment_sum(Tensor data, Tensor segment_ids, Tensor num_segments, string name = null) - { - var _op = _op_def_lib._apply_op_helper("UnsortedSegmentSum", name, new { data, segment_ids, num_segments }); - return _op.outputs[0]; - } - - public static Tensor tan(Tensor x, string name = null) - { - var _op = _op_def_lib._apply_op_helper("Tan", name, args: new { x }); - - return _op.outputs[0]; - } - - public static Tensor tanh(Tensor x, string name = null) - { - var _op = _op_def_lib._apply_op_helper("Tanh", name, args: new { x }); - - return _op.outputs[0]; - } - - /// - /// Computes the gradient for the tanh of `x` wrt its input. - /// - /// - /// - /// - /// - public static Tensor tanh_grad(Tensor y, Tensor dy, string name = null) - => _op_def_lib._apply_op_helper("TanhGrad", name: name, args: new { y, dy }).output; - - public static Tensor floor(Tensor x, string name = null) - { - var _op = _op_def_lib._apply_op_helper("Floor", name, args: new { x }); - - return _op.outputs[0]; - } - - public static Tensor _clip_by_value(Tensor t, Tensor clip_value_min, Tensor clip_value_max, string name = null) - { - var _op = _op_def_lib._apply_op_helper("ClipByValue", name, args: new { t, clip_value_min, clip_value_max }); - - return _op.outputs[0]; - } - - public static Tensor greater(Tx x, Ty y, string name = null) - { - var _op = _op_def_lib._apply_op_helper("Greater", name: name, args: new { x, y }); - - return _op.outputs[0]; - } - - /// - /// Computes the log of the absolute value of `Gamma(x)` element-wise. - /// - /// - /// A `Tensor`. Must be one of the following types: `bfloat16`, `half`, `float32`, `float64`. - /// - /// - /// - /// - /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. - /// - public static Tensor lgamma(Tensor x, string name = null) - { - var op = _op_def_lib._apply_op_helper("Lgamma", name: name, args: new { x }); - - return op.output; - } - - public static Tensor greater_equal(Tx x, Ty y, string name = null) - { - var _op = _op_def_lib._apply_op_helper("GreaterEqual", name: name, args: new { x, y }); - - return _op.outputs[0]; - } - - public static Tensor less(Tx x, Ty y, string name = null) - { - var _op = _op_def_lib._apply_op_helper("Less", name: name, args: new { x, y }); - - return _op.outputs[0]; - } - - public static Tensor less_equal(Tx x, Ty y, string name = null) - { - var _op = _op_def_lib._apply_op_helper("LessEqual", name: name, args: new { x, y }); - - return _op.outputs[0]; - } - - public static Tensor log1p(Tensor x, string name = null) - { - var _op = _op_def_lib._apply_op_helper("Log1p", name, args: new { x }); - - return _op.outputs[0]; - } - - public static Tensor logical_and(Tensor x, Tensor y, string name = null) - { - var _op = _op_def_lib._apply_op_helper("LogicalAnd", name, args: new { x, y }); - - return _op.outputs[0]; - } - - public static Tensor logical_not(Tensor x, string name = null) - { - var _op = _op_def_lib._apply_op_helper("LogicalNot", name, args: new { x }); - - return _op.outputs[0]; - } - - public static Tensor logical_or(Tensor x, Tensor y, string name = null) - { - var _op = _op_def_lib._apply_op_helper("LogicalOr", name, args: new { x, y }); - - return _op.outputs[0]; - } - - public static Tensor logical_xor(Tensor x, Tensor y, string name = "LogicalXor") - { - return logical_and( - logical_or(x, y), - logical_not(logical_and(x, y)), - name); - } - - public static Tensor squared_difference(Tensor x, Tensor y, string name = null) - { - var _op = _op_def_lib._apply_op_helper("SquaredDifference", name, args: new { x, y, name }); - - return _op.outputs[0]; - } - - /// - /// Computes square of x element-wise. - /// - /// A `Tensor`. Must be one of the following types: `bfloat16`, `half`, `float32`, `float64`, `int32`, `int64`, `complex64`, `complex128`. - /// A name for the operation (optional). - /// A `Tensor`. Has the same type as `x`. - public static Tensor square(Tensor x, string name = null) - { - var _op = _op_def_lib._apply_op_helper("Square", name, args: new { x }); - - return _op.outputs[0]; - } - - /// - /// Returns which elements of x are finite. - /// - /// A `Tensor`. Must be one of the following types: `bfloat16`, `half`, `float32`, `float64`. - /// A name for the operation (optional). - /// A `Tensor` of type `bool`. - public static Tensor is_finite(Tensor x, string name = null) - { - var _op = _op_def_lib._apply_op_helper("IsFinite", name, args: new { x }); - - return _op.outputs[0]; - } - - public static Tensor is_nan(Tensor x, string name = null) - { - var _op = _op_def_lib._apply_op_helper("IsNan", name: name, args: new { x }); - - return _op.outputs[0]; - } - - /// - /// Computes exponential of x element-wise. \\(y = e^x\\). - /// - /// A `Tensor`. Must be one of the following types: `bfloat16`, `half`, `float32`, `float64`, `complex64`, `complex128`. - /// A name for the operation (optional). - /// A `Tensor`. Has the same type as `x`. - public static Tensor exp(Tensor x, string name = null) - { - var _op = _op_def_lib._apply_op_helper("Exp", name, args: new { x }); - - return _op.outputs[0]; - } - - /// - /// Computes natural logarithm of x element-wise. - /// - /// A `Tensor`. Must be one of the following types: `bfloat16`, `half`, `float32`, `float64`, `complex64`, `complex128`. - /// name: A name for the operation (optional). - /// A `Tensor`. Has the same type as `x`. - public static Tensor log(Tensor x, string name = null) - { - var _op = _op_def_lib._apply_op_helper("Log", name, args: new { x }); - - return _op.outputs[0]; - } - - public static Tensor cast(Tensor x, TF_DataType DstT, bool Truncate= false, string name= "") - { - var _op = _op_def_lib._apply_op_helper("Cast", name, args: new { x, DstT, Truncate }); - - return _op.outputs[0]; - } - - public static Tensor neg(Tensor x, string name = null) - { - var _op = _op_def_lib._apply_op_helper("Neg", name, args: new { x }); - - return _op.outputs[0]; - } - - public static Tensor sqrt(Tensor x, string name = null) - { - var _op = _op_def_lib._apply_op_helper("Sqrt", name, args: new { x }); - - return _op.outputs[0]; - } - - public static Tensor sub(Tx x, Ty y, string name = null) - { - var _op = _op_def_lib._apply_op_helper("Sub", name, args: new { x, y }); - - return _op.outputs[0]; - } - - /// - /// Returns the truth value of (x == y) element-wise. - /// - /// - /// - /// - /// - public static Tensor equal(Tx x, Ty y, string name = null) - { - var _op = _op_def_lib._apply_op_helper("Equal", name, args: new { x, y }); - - return _op.outputs[0]; - } - - /// - /// Returns the truth value of (x != y) element-wise. - /// - /// The type of the x. - /// The type of the y. - /// The x. - /// The y. - /// The name. - /// - public static Tensor not_equal(Tx x, Ty y, string name = null) - { - var _op = _op_def_lib._apply_op_helper("NotEqual", name, args: new { x, y }); - - return _op.outputs[0]; - } - - - public static Tensor atan2(Tensor y, Tensor x, string name = null) - { - var _op = _op_def_lib._apply_op_helper("Atan2", name, args: new { y, x }); - - return _op.outputs[0]; - } - - public static Tensor mul(Tx x, Ty y, string name = null) - { - var _op = _op_def_lib._apply_op_helper("Mul", name, args: new { x, y }); - - return _op.outputs[0]; - } - - public static Tensor mul_no_nan(Tx x, Ty y, string name = null) - { - var _op = _op_def_lib._apply_op_helper("MulNoNan", name, args: new { x, y }); - - return _op.outputs[0]; - } - - public static Tensor real_div(Tensor x, Tensor y, string name = null) - { - var _op = _op_def_lib._apply_op_helper("RealDiv", name, args: new { x, y }); - - return _op.outputs[0]; - } - - public static Tensor reciprocal(Tensor x, string name = null) - { - var _op = _op_def_lib._apply_op_helper("Reciprocal", name, args: new { x }); - - return _op.outputs[0]; - } - - public static Tensor floor_mod(Tensor x, Tensor y, string name = null) - { - var _op = _op_def_lib._apply_op_helper("FloorMod", name, args: new { x, y }); - - return _op.outputs[0]; - } - - public static Tensor floor_div(Tensor x, Tensor y, string name = null) - { - var _op = _op_def_lib._apply_op_helper("FloorDiv", name, args: new { x, y }); - - return _op.outputs[0]; - } - - /// - /// Multiply the matrix "a" by the matrix "b". - /// - /// - /// - /// - /// - /// - /// - public static Tensor mat_mul(Tensor a, Tensor b, bool transpose_a = false, bool transpose_b = false, string name = null) - { - var _op = _op_def_lib._apply_op_helper("MatMul", name, args: new { a, b, transpose_a, transpose_b }); - - return _op.output; - } - - /// - /// Multiply slices of the two matrices "x" and "y". - /// - /// - /// The `BatchMatMul` operation is embedded into the - /// `MatMul` operation on the DLL side. However the expected - /// attributes are not the same, hence we need to expose this - /// method to have the right args list on the `_apply_op_helper` - /// function. - /// - /// For each rank > 2 the first rank - 2 dimensions are considered - /// as fixed, and have to be consistent across the two matrices. A - /// common matrix multiplication is then applied over the residual - /// 2 dimensions. - /// - /// e.g. - /// x is (3, 6, 12); y is (3, 12, 6) - /// batch_matmul(x, y) ==> (3, 6, 6) - /// - /// - /// - /// - /// - /// - /// - public static Tensor batch_mat_mul(Tensor x, Tensor y, bool adj_x = false, bool adj_y = false, string name = null) - { - var _op = _op_def_lib._apply_op_helper( - "BatchMatMul", - name, - args: new { x, y, adj_x, adj_y }); - - return _op.outputs[0]; - } - - /// - /// Returns the max of x and y (i.e. x > y ? x : y) element-wise. - /// - /// - /// - /// - /// - public static Tensor maximum(T1 x, T2 y, string name = null) - { - var _op = _op_def_lib._apply_op_helper("Maximum", name, args: new { x, y }); - - return _op.outputs[0]; - } - - public static Tensor minimum(T1 x, T2 y, string name = null) - { - var _op = _op_def_lib._apply_op_helper("Minimum", name, args: new { x, y }); - - return _op.outputs[0]; - } - - public static Tensor _abs(Tensor x, string name = null) - { - var _op = _op_def_lib._apply_op_helper("Abs", name, args: new { x }); - - return _op.output; - } - - public static Tensor _any(Tx input, Ty axis, bool keep_dims = false, string name = null) - { - var _op = _op_def_lib._apply_op_helper("Any", name, new { input, reduction_indices = axis, keep_dims }); - - return _op.outputs[0]; - } - - public static Tensor _max(Tx input, Ty axis, bool keep_dims=false, string name = null) - { - var _op = _op_def_lib._apply_op_helper("Max", name, new { input, reduction_indices = axis, keep_dims }); - - return _op.outputs[0]; - } - - public static Tensor _min(Tx input, Ty axis, bool keep_dims = false, string name = null) - { - var _op = _op_def_lib._apply_op_helper("Min", name, new { input, reduction_indices = axis, keep_dims }); - - return _op.outputs[0]; - } - - public static Tensor pow(Tx x, Ty y, string name = null) - { - var _op = _op_def_lib._apply_op_helper("Pow", name, args: new { x, y }); - - return _op.outputs[0]; - } - - public static Tensor _sum(Tx input, Ty axis = default, bool keep_dims = false, string name = null) - { - var _op = _op_def_lib._apply_op_helper("Sum", name, args: new { input, reduction_indices = axis, keep_dims }); - - return _op.outputs[0]; - } - - /// - /// Creates a sequence of numbers. - /// - /// - /// - /// - /// - /// - public static Tensor range(Tensor start, Tensor limit, Tensor delta, string name = null) - { - var _op = _op_def_lib._apply_op_helper("Range", name, new { start, limit, delta }); - - return _op.outputs[0]; - } - - /// - /// Rounds the values of a tensor to the nearest integer, element-wise. - /// - /// - /// - /// - /// If specified, the created operation in the graph will be this one, otherwise it will be named 'Round'. - /// - /// - /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. - /// - /// - /// Rounds half to even. Also known as bankers rounding. If you want to round - /// according to the current system rounding mode use std::cint. - /// - public static Tensor round(Tensor x, string name = "Round") - { - var op = _op_def_lib._apply_op_helper("Round", name: name, new { x }); - - return op.output; - } - - /// - /// Computes reciprocal of square root of x element-wise. - /// - /// - /// - /// - public static Tensor rsqrt(Tensor x, string name = null) - { - var _op = _op_def_lib._apply_op_helper("Rsqrt", name, new { x }); - - return _op.outputs[0]; - } - - /// - /// Returns the fraction of zeros in value. - /// - /// A tensor of numeric type. - /// A name for the operation (optional). - /// The fraction of zeros in value, with type float32. - public static Tensor zero_fraction(Tensor value, string name = null) - { - var _op = _op_def_lib._apply_op_helper("zero_fraction", name, new { value, name }); - - return _op.outputs[0]; - } - } -} +/***************************************************************************** + Copyright 2018 The TensorFlow.NET Authors. All Rights Reserved. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +******************************************************************************/ + +using static Tensorflow.Binding; + +namespace Tensorflow +{ + public static class gen_math_ops + { + public static OpDefLibrary _op_def_lib = new OpDefLibrary(); + + public static Tensor _all(Tensor input, Tensor axis, bool keep_dims = false, string name = null) + { + var _op = _op_def_lib._apply_op_helper("All", name, args: new { input, reduction_indices = axis, keep_dims = keep_dims }); + + return _op.outputs[0]; + } + + /// + /// Add all input tensors element wise. + /// + /// + /// + /// + public static Tensor add_n(Tensor[] inputs, string name = null) + { + var _op = _op_def_lib._apply_op_helper("AddN", name, args: new { inputs }); + + return _op.outputs[0]; + } + + /// + /// Returns the index with the largest value across dimensions of a tensor. + /// + /// + /// + /// + /// + /// + public static Tensor arg_max(Tensor input, int dimension, TF_DataType output_type = TF_DataType.TF_INT64, string name = null) + => _op_def_lib._apply_op_helper("ArgMax", name, args: new { input, dimension, output_type }).outputs[0]; + + /// + /// Returns the index with the smallest value across dimensions of a tensor. + /// + /// + /// + /// + /// + /// + public static Tensor arg_min(Tensor input, int dimension, TF_DataType output_type= TF_DataType.TF_INT64, string name= null) + =>_op_def_lib._apply_op_helper("ArgMin", name, args: new { input, dimension, output_type }).outputs[0]; + + /// + /// Computes Psi, the derivative of Lgamma (the log of the absolute value of + /// `Gamma(x)`), element-wise. + /// + /// + /// + /// + public static Tensor digamma(Tensor x, string name = null) + => _op_def_lib._apply_op_helper("Digamma", name, args: new { x }).output; + + /// + /// Returns 0 if the denominator is zero. + /// + /// + /// + /// + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'DivNoNan'. + /// + /// + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + /// + /// + /// *NOTE*: DivNoNan supports broadcasting. More about broadcasting + /// [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html) + /// + public static Tensor div_no_nan(Tensor x, Tensor y, string name = null) + { + var op = _op_def_lib._apply_op_helper("DivNoNan", name: name, args: new { x, y }); + return op.output; + } + + /// + /// Computes the mean of elements across dimensions of a tensor. + /// Reduces `input` along the dimensions given in `axis`. Unless + /// `keep_dims` is true, the rank of the tensor is reduced by 1 for each entry in + /// `axis`. If `keep_dims` is true, the reduced dimensions are retained with length 1. + /// + /// A `Tensor`. Must be one of the following types: + /// `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`, `qint32`, `bfloat16`, `uint16`, `complex128`, `half`, `uint32`, `uint64`. + /// The tensor to reduce. + /// A `Tensor`. Must be one of the following types: `int32`, `int64`. The dimensions to reduce. + /// An optional `bool`. Defaults to `False`. If true, retain reduced dimensions with length 1. + /// A name for the operation (optional). + /// A `Tensor`. Has the same type as `input`. + public static Tensor mean(T1 input, T2 axis, bool keep_dims= false, string name = null) + { + var _op = _op_def_lib._apply_op_helper("Mean", name, args: new { input, reduction_indices = axis, keep_dims = keep_dims }); + + return _op.output; + } + + public static Tensor prod(T1 input, T2 axis, bool keep_dims = false, string name = null) + { + var _op = _op_def_lib._apply_op_helper("Prod", name, args: new { input, reduction_indices = axis, keep_dims }); + + return _op.outputs[0]; + } + + public static Tensor acos(Tensor x, string name = null) + { + var _op = _op_def_lib._apply_op_helper("Acos", name, args: new { x }); + + return _op.outputs[0]; + } + + public static Tensor asin(Tensor x, string name = null) + { + var _op = _op_def_lib._apply_op_helper("Asin", name, args: new { x }); + + return _op.outputs[0]; + } + + public static Tensor add(Tx x, Ty y, string name = null) + { + var _op = _op_def_lib._apply_op_helper("Add", name, args: new { x, y }); + + return _op.output; + } + + public static Tensor atan(Tensor x, string name = null) + { + var _op = _op_def_lib._apply_op_helper("Atan", name, args: new { x }); + + return _op.outputs[0]; + } + + public static Tensor ceil(Tensor x, string name = null) + { + var _op = _op_def_lib._apply_op_helper("Ceil", name, args: new { x }); + + return _op.outputs[0]; + } + + public static Tensor sin(Tensor x, string name = null) + { + var _op = _op_def_lib._apply_op_helper("Sin", name, args: new { x }); + + return _op.outputs[0]; + } + + /// + /// Computes sigmoid of x element-wise. + /// + /// + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'Sigmoid'. + /// + /// + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + /// + /// Specifically, y = 1 / (1 + exp(-x)). + /// + public static Tensor sigmoid(Tensor x, string name = "Sigmoid") + { + var op = _op_def_lib._apply_op_helper("Sigmoid", name: name, new { x }); + + return op.output; + } + + /// + /// Computes the gradient of the sigmoid of x wrt its input. + /// + /// + /// + /// + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'SigmoidGrad'. + /// + /// + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + /// + /// Specifically, grad = dy * y * (1 - y), where y = sigmoid(x), and + /// dy is the corresponding input gradient. + /// + public static Tensor sigmoid_grad(Tensor y, Tensor dy, string name = "SigmoidGrad") + { + var op = _op_def_lib._apply_op_helper("SigmoidGrad", name: name, args: new { y, dy }); + + return op.outputs[0]; + } + + public static Tensor sign(Tensor x, string name = "Sign") + { + var op = _op_def_lib._apply_op_helper("Sign", name: name, args: new {x}); + + return op.outputs[0]; + } + + public static Tensor sinh(Tensor x, string name = null) + { + var _op = _op_def_lib._apply_op_helper("Sinh", name, args: new { x }); + + return _op.outputs[0]; + } + + public static Tensor cos(Tensor x, string name = null) + { + var _op = _op_def_lib._apply_op_helper("Cos", name, args: new { x }); + + return _op.outputs[0]; + } + + public static Tensor cosh(Tensor x, string name = null) + { + var _op = _op_def_lib._apply_op_helper("Cosh", name, args: new { x }); + + return _op.outputs[0]; + } + + public static Tensor cumsum(Tensor x, T axis, bool exclusive = false, bool reverse = false, string name = null) + { + var _op = _op_def_lib._apply_op_helper("Cumsum", name, args: new { x, axis, exclusive, reverse }); + + return _op.outputs[0]; + } + + /// + /// Computes the sum along segments of a tensor. + /// + /// + /// + /// + /// + /// + public static Tensor unsorted_segment_sum(Tensor data, Tensor segment_ids, Tensor num_segments, string name = null) + { + var _op = _op_def_lib._apply_op_helper("UnsortedSegmentSum", name, new { data, segment_ids, num_segments }); + return _op.outputs[0]; + } + + public static Tensor tan(Tensor x, string name = null) + { + var _op = _op_def_lib._apply_op_helper("Tan", name, args: new { x }); + + return _op.outputs[0]; + } + + public static Tensor tanh(Tensor x, string name = null) + { + var _op = _op_def_lib._apply_op_helper("Tanh", name, args: new { x }); + + return _op.outputs[0]; + } + + /// + /// Computes the gradient for the tanh of `x` wrt its input. + /// + /// + /// + /// + /// + public static Tensor tanh_grad(Tensor y, Tensor dy, string name = null) + => _op_def_lib._apply_op_helper("TanhGrad", name: name, args: new { y, dy }).output; + + public static Tensor floor(Tensor x, string name = null) + { + var _op = _op_def_lib._apply_op_helper("Floor", name, args: new { x }); + + return _op.outputs[0]; + } + + public static Tensor _clip_by_value(Tensor t, Tensor clip_value_min, Tensor clip_value_max, string name = null) + { + var _op = _op_def_lib._apply_op_helper("ClipByValue", name, args: new { t, clip_value_min, clip_value_max }); + + return _op.outputs[0]; + } + + public static Tensor greater(Tx x, Ty y, string name = null) + { + var _op = _op_def_lib._apply_op_helper("Greater", name: name, args: new { x, y }); + + return _op.outputs[0]; + } + + /// + /// Computes the log of the absolute value of `Gamma(x)` element-wise. + /// + /// + /// A `Tensor`. Must be one of the following types: `bfloat16`, `half`, `float32`, `float64`. + /// + /// + /// + /// + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + public static Tensor lgamma(Tensor x, string name = null) + { + var op = _op_def_lib._apply_op_helper("Lgamma", name: name, args: new { x }); + + return op.output; + } + + public static Tensor greater_equal(Tx x, Ty y, string name = null) + { + var _op = _op_def_lib._apply_op_helper("GreaterEqual", name: name, args: new { x, y }); + + return _op.outputs[0]; + } + + public static Tensor less(Tx x, Ty y, string name = null) + { + var _op = _op_def_lib._apply_op_helper("Less", name: name, args: new { x, y }); + + return _op.outputs[0]; + } + + public static Tensor less_equal(Tx x, Ty y, string name = null) + { + var _op = _op_def_lib._apply_op_helper("LessEqual", name: name, args: new { x, y }); + + return _op.outputs[0]; + } + + public static Tensor log1p(Tensor x, string name = null) + { + var _op = _op_def_lib._apply_op_helper("Log1p", name, args: new { x }); + + return _op.outputs[0]; + } + + public static Tensor logical_and(Tensor x, Tensor y, string name = null) + { + var _op = _op_def_lib._apply_op_helper("LogicalAnd", name, args: new { x, y }); + + return _op.outputs[0]; + } + + public static Tensor logical_not(Tensor x, string name = null) + { + var _op = _op_def_lib._apply_op_helper("LogicalNot", name, args: new { x }); + + return _op.outputs[0]; + } + + public static Tensor logical_or(Tensor x, Tensor y, string name = null) + { + var _op = _op_def_lib._apply_op_helper("LogicalOr", name, args: new { x, y }); + + return _op.outputs[0]; + } + + public static Tensor logical_xor(Tensor x, Tensor y, string name = "LogicalXor") + { + return logical_and( + logical_or(x, y), + logical_not(logical_and(x, y)), + name); + } + + public static Tensor squared_difference(Tensor x, Tensor y, string name = null) + { + var _op = _op_def_lib._apply_op_helper("SquaredDifference", name, args: new { x, y, name }); + + return _op.outputs[0]; + } + + /// + /// Computes square of x element-wise. + /// + /// A `Tensor`. Must be one of the following types: `bfloat16`, `half`, `float32`, `float64`, `int32`, `int64`, `complex64`, `complex128`. + /// A name for the operation (optional). + /// A `Tensor`. Has the same type as `x`. + public static Tensor square(Tensor x, string name = null) + { + var _op = _op_def_lib._apply_op_helper("Square", name, args: new { x }); + + return _op.outputs[0]; + } + + /// + /// Returns which elements of x are finite. + /// + /// A `Tensor`. Must be one of the following types: `bfloat16`, `half`, `float32`, `float64`. + /// A name for the operation (optional). + /// A `Tensor` of type `bool`. + public static Tensor is_finite(Tensor x, string name = null) + { + var _op = _op_def_lib._apply_op_helper("IsFinite", name, args: new { x }); + + return _op.outputs[0]; + } + + public static Tensor is_nan(Tensor x, string name = null) + { + var _op = _op_def_lib._apply_op_helper("IsNan", name: name, args: new { x }); + + return _op.outputs[0]; + } + + /// + /// Computes exponential of x element-wise. \\(y = e^x\\). + /// + /// A `Tensor`. Must be one of the following types: `bfloat16`, `half`, `float32`, `float64`, `complex64`, `complex128`. + /// A name for the operation (optional). + /// A `Tensor`. Has the same type as `x`. + public static Tensor exp(Tensor x, string name = null) + { + var _op = _op_def_lib._apply_op_helper("Exp", name, args: new { x }); + + return _op.outputs[0]; + } + + /// + /// Computes natural logarithm of x element-wise. + /// + /// A `Tensor`. Must be one of the following types: `bfloat16`, `half`, `float32`, `float64`, `complex64`, `complex128`. + /// name: A name for the operation (optional). + /// A `Tensor`. Has the same type as `x`. + public static Tensor log(Tensor x, string name = null) + { + var _op = _op_def_lib._apply_op_helper("Log", name, args: new { x }); + + return _op.outputs[0]; + } + + public static Tensor cast(Tensor x, TF_DataType DstT, bool Truncate= false, string name= "") + { + var _op = _op_def_lib._apply_op_helper("Cast", name, args: new { x, DstT, Truncate }); + + return _op.outputs[0]; + } + + public static Tensor neg(Tensor x, string name = null) + { + var _op = _op_def_lib._apply_op_helper("Neg", name, args: new { x }); + + return _op.outputs[0]; + } + + public static Tensor sqrt(Tensor x, string name = null) + { + var _op = _op_def_lib._apply_op_helper("Sqrt", name, args: new { x }); + + return _op.outputs[0]; + } + + public static Tensor sub(Tx x, Ty y, string name = null) + { + var _op = _op_def_lib._apply_op_helper("Sub", name, args: new { x, y }); + + return _op.outputs[0]; + } + + /// + /// Returns the truth value of (x == y) element-wise. + /// + /// + /// + /// + /// + public static Tensor equal(Tx x, Ty y, string name = null) + { + var _op = _op_def_lib._apply_op_helper("Equal", name, args: new { x, y }); + + return _op.outputs[0]; + } + + /// + /// Returns the truth value of (x != y) element-wise. + /// + /// The type of the x. + /// The type of the y. + /// The x. + /// The y. + /// The name. + /// + public static Tensor not_equal(Tx x, Ty y, string name = null) + { + var _op = _op_def_lib._apply_op_helper("NotEqual", name, args: new { x, y }); + + return _op.outputs[0]; + } + + + public static Tensor atan2(Tensor y, Tensor x, string name = null) + { + var _op = _op_def_lib._apply_op_helper("Atan2", name, args: new { y, x }); + + return _op.outputs[0]; + } + + public static Tensor mul(Tx x, Ty y, string name = null) + { + var _op = _op_def_lib._apply_op_helper("Mul", name, args: new { x, y }); + + return _op.outputs[0]; + } + + public static Tensor mul_no_nan(Tx x, Ty y, string name = null) + { + var _op = _op_def_lib._apply_op_helper("MulNoNan", name, args: new { x, y }); + + return _op.outputs[0]; + } + + public static Tensor real_div(Tensor x, Tensor y, string name = null) + { + var _op = _op_def_lib._apply_op_helper("RealDiv", name, args: new { x, y }); + + return _op.outputs[0]; + } + + public static Tensor reciprocal(Tensor x, string name = null) + { + var _op = _op_def_lib._apply_op_helper("Reciprocal", name, args: new { x }); + + return _op.outputs[0]; + } + + public static Tensor floor_mod(Tensor x, Tensor y, string name = null) + { + var _op = _op_def_lib._apply_op_helper("FloorMod", name, args: new { x, y }); + + return _op.outputs[0]; + } + + public static Tensor floor_div(Tensor x, Tensor y, string name = null) + { + var _op = _op_def_lib._apply_op_helper("FloorDiv", name, args: new { x, y }); + + return _op.outputs[0]; + } + + /// + /// Multiply the matrix "a" by the matrix "b". + /// + /// + /// + /// + /// + /// + /// + public static Tensor mat_mul(Tensor a, Tensor b, bool transpose_a = false, bool transpose_b = false, string name = null) + { + var _op = _op_def_lib._apply_op_helper("MatMul", name, args: new { a, b, transpose_a, transpose_b }); + + return _op.output; + } + + /// + /// Multiply slices of the two matrices "x" and "y". + /// + /// + /// The `BatchMatMul` operation is embedded into the + /// `MatMul` operation on the DLL side. However the expected + /// attributes are not the same, hence we need to expose this + /// method to have the right args list on the `_apply_op_helper` + /// function. + /// + /// For each rank > 2 the first rank - 2 dimensions are considered + /// as fixed, and have to be consistent across the two matrices. A + /// common matrix multiplication is then applied over the residual + /// 2 dimensions. + /// + /// e.g. + /// x is (3, 6, 12); y is (3, 12, 6) + /// batch_matmul(x, y) ==> (3, 6, 6) + /// + /// + /// + /// + /// + /// + /// + public static Tensor batch_mat_mul(Tensor x, Tensor y, bool adj_x = false, bool adj_y = false, string name = null) + { + var _op = _op_def_lib._apply_op_helper( + "BatchMatMul", + name, + args: new { x, y, adj_x, adj_y }); + + return _op.outputs[0]; + } + + /// + /// Returns the max of x and y (i.e. x > y ? x : y) element-wise. + /// + /// + /// + /// + /// + public static Tensor maximum(T1 x, T2 y, string name = null) + { + var _op = _op_def_lib._apply_op_helper("Maximum", name, args: new { x, y }); + + return _op.outputs[0]; + } + + public static Tensor minimum(T1 x, T2 y, string name = null) + { + var _op = _op_def_lib._apply_op_helper("Minimum", name, args: new { x, y }); + + return _op.outputs[0]; + } + + public static Tensor _abs(Tensor x, string name = null) + { + var _op = _op_def_lib._apply_op_helper("Abs", name, args: new { x }); + + return _op.output; + } + + public static Tensor _any(Tx input, Ty axis, bool keep_dims = false, string name = null) + { + var _op = _op_def_lib._apply_op_helper("Any", name, new { input, reduction_indices = axis, keep_dims }); + + return _op.outputs[0]; + } + + public static Tensor _max(Tx input, Ty axis, bool keep_dims=false, string name = null) + { + var _op = _op_def_lib._apply_op_helper("Max", name, new { input, reduction_indices = axis, keep_dims }); + + return _op.outputs[0]; + } + + public static Tensor _min(Tx input, Ty axis, bool keep_dims = false, string name = null) + { + var _op = _op_def_lib._apply_op_helper("Min", name, new { input, reduction_indices = axis, keep_dims }); + + return _op.outputs[0]; + } + + public static Tensor pow(Tx x, Ty y, string name = null) + { + var _op = _op_def_lib._apply_op_helper("Pow", name, args: new { x, y }); + + return _op.outputs[0]; + } + + public static Tensor _sum(Tx input, Ty axis = default, bool keep_dims = false, string name = null) + { + var _op = _op_def_lib._apply_op_helper("Sum", name, args: new { input, reduction_indices = axis, keep_dims }); + + return _op.outputs[0]; + } + + /// + /// Creates a sequence of numbers. + /// + /// + /// + /// + /// + /// + public static Tensor range(Tensor start, Tensor limit, Tensor delta, string name = null) + { + var _op = _op_def_lib._apply_op_helper("Range", name, new { start, limit, delta }); + + return _op.outputs[0]; + } + + /// + /// Rounds the values of a tensor to the nearest integer, element-wise. + /// + /// + /// + /// + /// If specified, the created operation in the graph will be this one, otherwise it will be named 'Round'. + /// + /// + /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. + /// + /// + /// Rounds half to even. Also known as bankers rounding. If you want to round + /// according to the current system rounding mode use std::cint. + /// + public static Tensor round(Tensor x, string name = "Round") + { + var op = _op_def_lib._apply_op_helper("Round", name: name, new { x }); + + return op.output; + } + + /// + /// Computes reciprocal of square root of x element-wise. + /// + /// + /// + /// + public static Tensor rsqrt(Tensor x, string name = null) + { + var _op = _op_def_lib._apply_op_helper("Rsqrt", name, new { x }); + + return _op.outputs[0]; + } + + /// + /// Returns the fraction of zeros in value. + /// + /// A tensor of numeric type. + /// A name for the operation (optional). + /// The fraction of zeros in value, with type float32. + public static Tensor zero_fraction(Tensor value, string name = null) + { + var _op = _op_def_lib._apply_op_helper("zero_fraction", name, new { value, name }); + + return _op.outputs[0]; + } + } +} diff --git a/src/TensorFlowNET.Core/Sessions/FeedDict.cs b/src/TensorFlowNET.Core/Sessions/FeedDict.cs index a95b5db1..f39a761d 100644 --- a/src/TensorFlowNET.Core/Sessions/FeedDict.cs +++ b/src/TensorFlowNET.Core/Sessions/FeedDict.cs @@ -1,8 +1,8 @@ -using System.Collections; - -namespace Tensorflow.Sessions -{ - public class FeedDict : Hashtable - { - } -} +using System.Collections; + +namespace Tensorflow.Sessions +{ + public class FeedDict : Hashtable + { + } +} diff --git a/src/TensorFlowNET.Core/Util/nest.py.cs b/src/TensorFlowNET.Core/Util/nest.py.cs index 54149fe1..3f5d78eb 100644 --- a/src/TensorFlowNET.Core/Util/nest.py.cs +++ b/src/TensorFlowNET.Core/Util/nest.py.cs @@ -1,987 +1,987 @@ -/***************************************************************************** - Copyright 2018 The TensorFlow.NET Authors. All Rights Reserved. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -******************************************************************************/ - -using System; -using System.Collections; -using System.Collections.Generic; -using System.Linq; -using NumSharp; -using Tensorflow.Operations; - -namespace Tensorflow.Util -{ - //Functions for working with arbitrarily nested sequences of elements. - - //This module can perform operations on nested structures. A nested structure is a - //Python sequence, tuple (including `namedtuple`), or dict that can contain - //further sequences, tuples, and dicts. - - //The utilities here assume (and do not check) that the nested structures form a - //'tree', i.e., no references in the structure of the input of these functions - //should be recursive. - - //Example structures: `((3, 4), 5, (6, 7, (9, 10), 8))`, `(np.array(0), - // (np.array([3, 4]), tf.constant([3, 4])))` - // - - public static class nest - { - - - /// - /// Untyped implementation of zip for arbitrary data - /// - /// Converts an list of lists or arrays [[1,2,3], [4,5,6], [7,8,9]] into a list of arrays - /// representing tuples of the same index of all source arrays [[1,4,7], [2,5,9], [3,6,9]] - /// - /// one or multiple sequences to be zipped - /// - public static IEnumerable zip_many(params IEnumerable[] lists) - { - if (lists.Length == 0) - yield break; - var first = lists[0]; - if (first == null) - yield break; - var arity = first.Count(); - for (int i = 0; i < arity; i++) - { - var array = new object[lists.Length]; - for (int j = 0; j < lists.Length; j++) - array[j] = GetSequenceElementAt(lists[j], i); - yield return array; - } - } - - private static object GetSequenceElementAt(object sequence, int i) - { - switch (sequence) - { - case Array array: - return array.GetValue(i); - case IList list: - return list[i]; - default: - return _yield_value(sequence).Skip(Math.Max(0, i)).FirstOrDefault(); - } - } - - public static IEnumerable<(T1, T2)> zip(IEnumerable e1, IEnumerable e2) - => zip(e1, e2); - - public static Dictionary ConvertToDict(object dyn) - => ConvertToDict(dyn); - - //def _get_attrs_values(obj): - // """Returns the list of values from an attrs instance.""" - // attrs = getattr(obj.__class__, "__attrs_attrs__") - // return [getattr(obj, a.name) for a in attrs] - - /// - /// Returns a sorted list of the dict keys, with error if keys not sortable. - /// - private static IEnumerable _sorted(IDictionary dict_) - { - return dict_.Keys.OfType().OrderBy(x => x); - } - - - //def _is_namedtuple(instance, strict=False): - // """Returns True iff `instance` is a `namedtuple`. - - // Args: - // instance: An instance of a Python object. - // strict: If True, `instance` is considered to be a `namedtuple` only if - // it is a "plain" namedtuple. For instance, a class inheriting - // from a `namedtuple` will be considered to be a `namedtuple` - // iff `strict=False`. - - // Returns: - // True if `instance` is a `namedtuple`. - // """ - // return _pywrap_tensorflow.IsNamedtuple(instance, strict) - - - //# See the swig file (util.i) for documentation. - //_is_mapping = _pywrap_tensorflow.IsMapping - //_is_attrs = _pywrap_tensorflow.IsAttrs - - /// - /// Converts the sequence `args` to the same type as `instance`. - /// - /// an instance of `tuple`, `list`, `namedtuple`, `dict`, or - /// `collections.OrderedDict`. - /// elements to be converted to the `instance` type. - /// `args` with the type of `instance`. - private static object _sequence_like(object instance, IEnumerable args) - { - if (is_mapping(instance)) - { - //# Pack dictionaries in a deterministic order by sorting the keys. - //# Notice this means that we ignore the original order of `OrderedDict` - //# instances. This is intentional, to avoid potential bugs caused by mixing - //# ordered and plain dicts (e.g., flattening a dict but using a - //# corresponding `OrderedDict` to pack it back). - switch (instance) - { - case Hashtable hash: - var result = new Hashtable(); - foreach ((object key, object value) in zip(_sorted(hash), args)) - result[key] = value; - return result; - } - } - //else if( _is_namedtuple(instance) || _is_attrs(instance)) - // return type(instance)(*args) - else - { - // Not a namedtuple - switch (instance) - { - case object[] array: - var result_array = new object[args.Count()]; - int i = 0; - foreach (var x in args) - { - result_array[i] = x; - i++; - } - return result_array; - case List list: - return new List(args); - default: - throw new TypeError("Type of sequence not supported (yet): " + instance.GetType()); - } - } - throw new TypeError("Type of sequence not supported (yet): " + instance.GetType()); - } - - /// - /// Yields the next value from the given iterable. - /// - private static IEnumerable _yield_value(object iterable) - { - if (is_mapping(iterable)) - { - var dict = iterable as IDictionary; - //# Iterate through dictionaries in a deterministic order by sorting the - //# keys. Notice this means that we ignore the original order of `OrderedDict` - //# instances. This is intentional, to avoid potential bugs caused by mixing - //# ordered and plain dicts (e.g., flattening a dict but using a - //# corresponding `OrderedDict` to pack it back). - foreach (var key in _sorted(dict)) - yield return dict[key]; - } - //else if (_is_attrs(iterable)) - //{ - // // for value in _get_attrs_values(iterable): - // // yield value - //} - else if (iterable is IEnumerable) - { - var enumerable = iterable as IEnumerable; - foreach (var value in enumerable) - yield return value; - } - else - { - throw new TypeError("Unexpected iterable type: " + iterable.GetType()); - //var jobj = JObject.FromObject(iterable); - //foreach (var key in _sorted()) - // yield return jobj[key]; - } - } - - //# See the swig file (util.i) for documentation. - public static bool is_sequence(object arg) - => arg is IEnumerable && !(arg is string) && !(arg is NDArray) && - !(arg.GetType().IsGenericType && arg.GetType().GetGenericTypeDefinition() == typeof(HashSet<>)); - - public static bool is_mapping(object arg) => arg is IDictionary; - - //# See the swig file (util.i) for documentation. - //flatten = _pywrap_tensorflow.Flatten - - public static List flatten(T structure) - { - var list = new List(); - _flatten_recursive(structure, list); - return list; - } - - public static object[] flatten2(ICanBeFlattened structure) - => structure.Flatten(); - - public static T[] flatten2(T[] structure) - => structure; - - private static void _flatten_recursive(T obj, List list) - { - switch(obj) - { - case IDictionary dict: - foreach (var key in _sorted(dict)) - _flatten_recursive((T)dict[key], list); - break; - case String str: - list.Add(obj); - break; - case NDArray nd: - list.Add(obj); - break; - case IEnumerable structure: - foreach (var child in structure) - _flatten_recursive((T)child, list); - break; - default: - list.Add(obj); - break; - } - } - - - //# See the swig file (util.i) for documentation. - //_same_namedtuples = _pywrap_tensorflow.SameNamedtuples - - - //class _DotString(object): - - // def __str__(self): - // return "." - - // def __repr__(self): - // return "." - - - //_DOT = _DotString() - - - //def assert_same_structure(nest1, nest2, check_types=True): - // """Asserts that two structures are nested in the same way. - - // Note that namedtuples with identical name and fields are always considered - // to have the same shallow structure (even with `check_types=True`). - // For intance, this code will print `True`: - - // ```python - // def nt(a, b): - // return collections.namedtuple('foo', 'a b')(a, b) - // print(assert_same_structure(nt(0, 1), nt(2, 3))) - // ``` - - // Args: - // nest1: an arbitrarily nested structure. - // nest2: an arbitrarily nested structure. - // check_types: if `True` (default) types of sequences are checked as well, - // including the keys of dictionaries. If set to `False`, for example a - // list and a tuple of objects will look the same if they have the same - // size. Note that namedtuples with identical name and fields are always - // considered to have the same shallow structure. Two types will also be - // considered the same if they are both list subtypes (which allows "list" - // and "_ListWrapper" from checkpointable dependency tracking to compare - // equal). - - // Raises: - // ValueError: If the two structures do not have the same number of elements or - // if the two structures are not nested in the same way. - // TypeError: If the two structures differ in the type of sequence in any of - // their substructures. Only possible if `check_types` is `True`. - // """ - // try: - // _pywrap_tensorflow.AssertSameStructure(nest1, nest2, check_types) - // except (ValueError, TypeError) as e: - // str1 = str(map_structure(lambda _: _DOT, nest1)) - // str2 = str(map_structure(lambda _: _DOT, nest2)) - // raise type(e)("%s\n" - // "Entire first structure:\n%s\n" - // "Entire second structure:\n%s" - // % (str(e), str1, str2)) - - - //def flatten_dict_items(dictionary): - // """Returns a dictionary with flattened keys and values. - - // This function flattens the keys and values of a dictionary, which can be - // arbitrarily nested structures, and returns the flattened version of such - // structures: - - // ```python - // example_dictionary = {(4, 5, (6, 8)): ("a", "b", ("c", "d"))} - // result = {4: "a", 5: "b", 6: "c", 8: "d"} - // flatten_dict_items(example_dictionary) == result - // ``` - - // The input dictionary must satisfy two properties: - - // 1. Its keys and values should have the same exact nested structure. - // 2. The set of all flattened keys of the dictionary must not contain repeated - // keys. - - // Args: - // dictionary: the dictionary to zip - - // Returns: - // The zipped dictionary. - - // Raises: - // TypeError: If the input is not a dictionary. - // ValueError: If any key and value have not the same structure, or if keys are - // not unique. - // """ - // if not isinstance(dictionary, (dict, _collections.Mapping)): - // raise TypeError("input must be a dictionary") - // flat_dictionary = {} - // for i, v in _six.iteritems(dictionary): - // if not is_sequence(i): - // if i in flat_dictionary: - // raise ValueError( - // "Could not flatten dictionary: key %s is not unique." % i) - // flat_dictionary[i] = v - // else: - // flat_i = flatten(i) - // flat_v = flatten(v) - // if len(flat_i) != len(flat_v): - // raise ValueError( - // "Could not flatten dictionary. Key had %d elements, but value had " - // "%d elements. Key: %s, value: %s." - // % (len(flat_i), len(flat_v), flat_i, flat_v)) - // for new_i, new_v in zip(flat_i, flat_v): - // if new_i in flat_dictionary: - // raise ValueError( - // "Could not flatten dictionary: key %s is not unique." - // % (new_i)) - // flat_dictionary[new_i] = new_v - // return flat_dictionary - - /// - /// Helper function for pack_sequence_as. - /// - /// Substructure (list / tuple / dict) to mimic. - /// Flattened values to output substructure for. - /// Index at which to start reading from flat. - /// - /// The tuple(new_index, child), where: - /// * new_index - the updated index into `flat` having processed `structure`. - /// * packed - the subset of `flat` corresponding to `structure`, - /// having started at `index`, and packed into the same nested - /// format. - private static (int new_index, List child) _packed_nest_with_indices(object structure, List flat, - int index) - { - var packed = new List(); - foreach (var s in _yield_value(structure)) - { - if (is_sequence(s)) - { - var (new_index, child) = _packed_nest_with_indices(s, flat, index); - packed.Add(_sequence_like(s, child)); - index = new_index; - } - else - { - packed.Add(flat[index]); - index += 1; - } - } - return (index, packed); - } - - private static int len(IEnumerable x) => x.Count(); - - public static T pack_sequence_as2(T structure, object[] flat_sequence, bool expand_composites = false) - where T : IPackable - => structure.Pack(flat_sequence); - - /// - /// Returns a given flattened sequence packed into a given structure. - /// If `structure` is a scalar, `flat_sequence` must be a single-element list; - /// in this case the return value is `flat_sequence[0]`. - /// - /// If `structure` is or contains a dict instance, the keys will be sorted to - /// pack the flat sequence in deterministic order. This is true also for - /// `OrderedDict` instances: their sequence order is ignored, the sorting order of - /// keys is used instead. The same convention is followed in `flatten`. - /// This correctly repacks dicts and `OrderedDict`s after they have been - /// flattened, and also allows flattening an `OrderedDict` and then repacking it - /// back using a corresponding plain dict, or vice-versa. - /// Dictionaries with non-sortable keys cannot be flattened. - /// - /// - /// Nested structure, whose structure is given by nested lists, - /// tuples, and dicts. Note: numpy arrays and strings are considered - /// scalars. - /// - /// flat sequence to pack. - /// `flat_sequence` converted to have the same recursive structure as - /// `structure`. - /// - public static object pack_sequence_as(object structure, IEnumerable flat_sequence, bool expand_composites = false) - { - List flat = null; - if (flat_sequence is List) - flat = flat_sequence as List; - else - flat=new List(flat_sequence); - if (flat_sequence==null) - throw new ArgumentException("flat_sequence must not be null"); - // if not is_sequence(flat_sequence): - // raise TypeError("flat_sequence must be a sequence") - - if (!is_sequence(structure)) - { - if (len(flat) != 1) - throw new ValueError($"Structure is a scalar but len(flat_sequence) == {len(flat)} > 1"); - return flat.FirstOrDefault(); - } - int final_index = 0; - List packed = null; - try - { - (final_index, packed) = _packed_nest_with_indices(structure, flat, 0); - if (final_index < len(flat)) - throw new IndexOutOfRangeException( - $"Final index: {final_index} was smaller than len(flat_sequence): {len(flat)}"); - return _sequence_like(structure, packed); - } - catch (IndexOutOfRangeException) - { - var flat_structure = flatten(structure); - if (len(flat_structure) != len(flat)) - { - throw new ValueError("Could not pack sequence. Structure had {len(structure)} elements, but " + - $"flat_sequence had {len(flat_structure)} elements. flat_sequence had: {len(flat)}"); - } - return _sequence_like(structure, packed); - } - catch (ArgumentOutOfRangeException) - { - var flat_structure = flatten(structure); - if (len(flat_structure) != len(flat)) - { - throw new ValueError("Could not pack sequence. Structure had {len(structure)} elements, but " + - $"flat_sequence had {len(flat_structure)} elements. flat_sequence had: {len(flat)}"); - } - return _sequence_like(structure, packed); - } - } - - /// - /// Applies `func` to each entry in `structure` and returns a new structure. - /// - /// Applies `func(x[0], x[1], ...)` where x[i] is an entry in - /// `structure[i]`. All structures in `structure` must have the same arity, - /// and the return value will contain the results in the same structure. - /// - /// A callable that accepts as many arguments as there are structures. - /// one or many IEnumerable of object - /// If set to - /// `True` (default) the types of iterables within the structures have to be - /// same (e.g. `map_structure(func, [1], (1,))` raises a `TypeError` - /// exception). To allow this set this argument to `False`. - /// Note that namedtuples with identical name and fields are always - /// considered to have the same shallow structure. - /// - /// A new structure with the same arity as `structure`, whose values correspond - /// to `func(x[0], x[1], ...)` where `x[i]` is a value in the corresponding - /// location in `structure[i]`. If there are different sequence types and - /// `check_types` is `False` the sequence types of the first structure will be - /// used. - /// - public static IEnumerable map_structure(Func func, params IEnumerable[] structure) - { - // TODO: check structure and types - // for other in structure[1:]: - // assert_same_structure(structure[0], other, check_types=check_types) - - if (structure.Length==1) - { - // we don't need to zip if we have only one structure - return map_structure(a => func(new object[]{a}), structure[0]); - } - var flat_structures = structure.Select(flatten).ToArray(); // ToArray is important here! - var entries = zip_many(flat_structures); - var mapped_flat_structure = entries.Select(func); - - return _yield_value(pack_sequence_as(structure[0], mapped_flat_structure)).ToList(); - } - - public static Tensor map_structure(Func func, T structure) - { - var flat_structure = flatten(structure); - var mapped_flat_structure = flat_structure.Select(func).ToList(); - - return pack_sequence_as(structure, mapped_flat_structure) as Tensor; - } - - /// - /// Same as map_structure, but with only one structure (no combining of multiple structures) - /// - /// - /// - /// - public static IEnumerable map_structure(Func func, IEnumerable structure) - { - // TODO: check structure and types - // for other in structure[1:]: - // assert_same_structure(structure[0], other, check_types=check_types) - - var flat_structure = flatten(structure); - var mapped_flat_structure = flat_structure.Select(func).ToList(); - - return _yield_value(pack_sequence_as(structure, mapped_flat_structure)).ToList(); - } - - //def map_structure_with_paths(func, *structure, **kwargs): - // """Applies `func` to each entry in `structure` and returns a new structure. - - // Applies `func(path, x[0], x[1], ..., **kwargs)` where x[i] is an entry in - // `structure[i]` and `path` is the common path to x[i] in the structures. All - // structures in `structure` must have the same arity, and the return value will - // contain the results in the same structure. Special kwarg `check_types` - // determines whether the types of iterables within the structure must be the - // same-- see **kwargs definition below. - - // Args: - // func: A callable with the signature func(path, *values, **kwargs) that is - // evaluated on the leaves of the structure. - // *structure: A variable number of compatible structures to process. - // **kwargs: Optional kwargs to be passed through to func. Special kwarg - // `check_types` is not passed to func, but instead determines whether the - // types of iterables within the structures have to be same (e.g., - // `map_structure(func, [1], (1,))` raises a `TypeError` exception). By - // default, the types must match. To allow iteration over structures of - // different types (but common arity), set this kwarg to `False`. - - // Returns: - // A structure of the same form as the input structures whose leaves are the - // result of evaluating func on corresponding leaves of the input structures. - - // Raises: - // TypeError: If `func` is not callable or if the structures do not match - // each other by depth tree. - // TypeError: If `check_types` is not `False` and the two structures differ in - // the type of sequence in any of their substructures. - // ValueError: If no structures are provided. - // """ - // if not callable(func): - // raise TypeError("func must be callable, got: %s" % func) - // if not structure: - // raise ValueError("Must provide at least one structure") - - // check_types = kwargs.pop("check_types", True) - // for other in structure[1:]: - // assert_same_structure(structure[0], other, check_types=check_types) - - //# First set paths_and_values to: - //# [[(p11, v11), ... (p1n, v1n)], ... [(pm1, vm1), ... (pmn, vmn)]] - // paths_and_values = [flatten_with_joined_string_paths(s) for s in structure] - - //# Now zip(*paths_and_values) would be: - //# [((p11, v11), ... (pm1, vm1)), ... ((p1n, v1n), ... (pmn, vmn))] - //# so grouped_by_path is set to: - //# [[(p11, ... pm1), (v11, ... vm1)], ... [(p1n, ... pmn), (v1n, ... vmn)]] - //# Note that p1i, ... pmi must all be equal since the structures are the same. - // grouped_by_path = [zip(*p_v) for p_v in zip(*paths_and_values)] - - // return pack_sequence_as(structure[0], [ - // func(paths[0], *values, **kwargs) for paths, values in grouped_by_path]) - - - //def _yield_flat_up_to(shallow_tree, input_tree): - // """Yields elements `input_tree` partially flattened up to `shallow_tree`.""" - // if is_sequence(shallow_tree): - // for shallow_branch, input_branch in zip(_yield_value(shallow_tree), - // _yield_value(input_tree)): - // for input_leaf in _yield_flat_up_to(shallow_branch, input_branch): - // yield input_leaf - // else: - // yield input_tree - - - //def assert_shallow_structure(shallow_tree, input_tree, check_types=True): - // """Asserts that `shallow_tree` is a shallow structure of `input_tree`. - - // That is, this function tests if the `input_tree` structure can be created from - // the `shallow_tree` structure by replacing its leaf nodes with deeper - // tree structures. - - // Examples: - - // The following code will raise an exception: - // ```python - // shallow_tree = ["a", "b"] - // input_tree = ["c", ["d", "e"], "f"] - // assert_shallow_structure(shallow_tree, input_tree) - // ``` - - // The following code will not raise an exception: - // ```python - // shallow_tree = ["a", "b"] - // input_tree = ["c", ["d", "e"]] - // assert_shallow_structure(shallow_tree, input_tree) - // ``` - - // Args: - // shallow_tree: an arbitrarily nested structure. - // input_tree: an arbitrarily nested structure. - // check_types: if `True` (default) the sequence types of `shallow_tree` and - // `input_tree` have to be the same. Note that even with check_types==True, - // this function will consider two different namedtuple classes with the same - // name and _fields attribute to be the same class. - - // Raises: - // TypeError: If `shallow_tree` is a sequence but `input_tree` is not. - // TypeError: If the sequence types of `shallow_tree` are different from - // `input_tree`. Only raised if `check_types` is `True`. - // ValueError: If the sequence lengths of `shallow_tree` are different from - // `input_tree`. - // """ - // if is_sequence(shallow_tree): - // if not is_sequence(input_tree): - // raise TypeError( - // "If shallow structure is a sequence, input must also be a sequence. " - // "Input has type: %s." % type(input_tree)) - - // if check_types and not isinstance(input_tree, type(shallow_tree)): - //# Duck-typing means that nest should be fine with two different - //# namedtuples with identical name and fields. - // shallow_is_namedtuple = _is_namedtuple(shallow_tree, False) - // input_is_namedtuple = _is_namedtuple(input_tree, False) - // if shallow_is_namedtuple and input_is_namedtuple: - // if not _same_namedtuples(shallow_tree, input_tree): - // raise TypeError( - // "The two namedtuples don't have the same sequence type. Input " - // "structure has type %s, while shallow structure has type %s." - // % (type(input_tree), type(shallow_tree))) - // elif not (isinstance(shallow_tree, _collections.Mapping) - // and isinstance(input_tree, _collections.Mapping)): - // raise TypeError( - // "The two structures don't have the same sequence type. Input " - // "structure has type %s, while shallow structure has type %s." - // % (type(input_tree), type(shallow_tree))) - - // if len(input_tree) != len(shallow_tree): - // raise ValueError( - // "The two structures don't have the same sequence length. Input " - // "structure has length %s, while shallow structure has length %s." - // % (len(input_tree), len(shallow_tree))) - - // if check_types and isinstance(shallow_tree, (dict, _collections.Mapping)): - // if set(input_tree) != set(shallow_tree): - // raise ValueError( - // "The two structures don't have the same keys. Input " - // "structure has keys %s, while shallow structure has keys %s." % - // (list(_six.iterkeys(input_tree)), - // list(_six.iterkeys(shallow_tree)))) - - // input_tree = list(sorted(_six.iteritems(input_tree))) - // shallow_tree = list(sorted(_six.iteritems(shallow_tree))) - - // for shallow_branch, input_branch in zip(shallow_tree, input_tree): - // assert_shallow_structure(shallow_branch, input_branch, - // check_types=check_types) - - - //def flatten_up_to(shallow_tree, input_tree): - // """Flattens `input_tree` up to `shallow_tree`. - - // Any further depth in structure in `input_tree` is retained as elements in the - // partially flatten output. - - // If `shallow_tree` and `input_tree` are not sequences, this returns a - // single-element list: `[input_tree]`. - - // Use Case: - - // Sometimes we may wish to partially flatten a nested sequence, retaining some - // of the nested structure. We achieve this by specifying a shallow structure, - // `shallow_tree`, we wish to flatten up to. - - // The input, `input_tree`, can be thought of as having the same structure as - // `shallow_tree`, but with leaf nodes that are themselves tree structures. - - // Examples: - - // ```python - // input_tree = [[[2, 2], [3, 3]], [[4, 9], [5, 5]]] - // shallow_tree = [[True, True], [False, True]] - - // flattened_input_tree = flatten_up_to(shallow_tree, input_tree) - // flattened_shallow_tree = flatten_up_to(shallow_tree, shallow_tree) - - //# Output is: - //# [[2, 2], [3, 3], [4, 9], [5, 5]] - //# [True, True, False, True] - // ``` - - // ```python - // input_tree = [[('a', 1), [('b', 2), [('c', 3), [('d', 4)]]]]] - // shallow_tree = [['level_1', ['level_2', ['level_3', ['level_4']]]]] - - // input_tree_flattened_as_shallow_tree = flatten_up_to(shallow_tree, input_tree) - // input_tree_flattened = flatten(input_tree) - - //# Output is: - //# [('a', 1), ('b', 2), ('c', 3), ('d', 4)] - //# ['a', 1, 'b', 2, 'c', 3, 'd', 4] - // ``` - - // Non-Sequence Edge Cases: - - // ```python - // flatten_up_to(0, 0) # Output: [0] - // flatten_up_to(0, [0, 1, 2]) # Output: [[0, 1, 2]] - // flatten_up_to([0, 1, 2], 0) # Output: TypeError - // flatten_up_to([0, 1, 2], [0, 1, 2]) # Output: [0, 1, 2] - // ``` - - // Args: - // shallow_tree: a possibly pruned structure of input_tree. - // input_tree: an arbitrarily nested structure or a scalar object. - // Note, numpy arrays are considered scalars. - - // Returns: - // A Python list, the partially flattened version of `input_tree` according to - // the structure of `shallow_tree`. - - // Raises: - // TypeError: If `shallow_tree` is a sequence but `input_tree` is not. - // TypeError: If the sequence types of `shallow_tree` are different from - // `input_tree`. - // ValueError: If the sequence lengths of `shallow_tree` are different from - // `input_tree`. - // """ - // assert_shallow_structure(shallow_tree, input_tree) - // return list(_yield_flat_up_to(shallow_tree, input_tree)) - - - //def map_structure_up_to(shallow_tree, func, *inputs): - // """Applies a function or op to a number of partially flattened inputs. - - // The `inputs` are flattened up to `shallow_tree` before being mapped. - - // Use Case: - - // Sometimes we wish to apply a function to a partially flattened - // sequence (for example when the function itself takes sequence inputs). We - // achieve this by specifying a shallow structure, `shallow_tree` we wish to - // flatten up to. - - // The `inputs`, can be thought of as having the same structure as - // `shallow_tree`, but with leaf nodes that are themselves tree structures. - - // This function therefore will return something with the same base structure as - // `shallow_tree`. - - // Examples: - - // ```python - // ab_tuple = collections.namedtuple("ab_tuple", "a, b") - // op_tuple = collections.namedtuple("op_tuple", "add, mul") - // inp_val = ab_tuple(a=2, b=3) - // inp_ops = ab_tuple(a=op_tuple(add=1, mul=2), b=op_tuple(add=2, mul=3)) - // out = map_structure_up_to(inp_val, lambda val, ops: (val + ops.add) * ops.mul, - // inp_val, inp_ops) - - //# Output is: ab_tuple(a=6, b=15) - // ``` - - // ```python - // data_list = [[2, 4, 6, 8], [[1, 3, 5, 7, 9], [3, 5, 7]]] - // name_list = ['evens', ['odds', 'primes']] - // out = map_structure_up_to( - // name_list, - // lambda name, sec: "first_{}_{}".format(len(sec), name), - // name_list, data_list) - - //# Output is: ['first_4_evens', ['first_5_odds', 'first_3_primes']] - // ``` - - // Args: - // shallow_tree: a shallow tree, common to all the inputs. - // func: callable which will be applied to each input individually. - // *inputs: arbitrarily nested combination of objects that are compatible with - // shallow_tree. The function `func` is applied to corresponding - // partially flattened elements of each input, so the function must support - // arity of `len(inputs)`. - - // Raises: - // TypeError: If `shallow_tree` is a sequence but `input_tree` is not. - // TypeError: If the sequence types of `shallow_tree` are different from - // `input_tree`. - // ValueError: If the sequence lengths of `shallow_tree` are different from - // `input_tree`. - - // Returns: - // result of repeatedly applying `func`, with same structure as - // `shallow_tree`. - // """ - // if not inputs: - // raise ValueError("Cannot map over no sequences") - // for input_tree in inputs: - // assert_shallow_structure(shallow_tree, input_tree) - - //# Flatten each input separately, apply the function to corresponding elements, - //# then repack based on the structure of the first input. - // all_flattened_up_to = [flatten_up_to(shallow_tree, input_tree) - // for input_tree in inputs] - // results = [func(*tensors) for tensors in zip(*all_flattened_up_to)] - // return pack_sequence_as(structure=shallow_tree, flat_sequence=results) - - - //def get_traverse_shallow_structure(traverse_fn, structure): - // """Generates a shallow structure from a `traverse_fn` and `structure`. - - // `traverse_fn` must accept any possible subtree of `structure` and return - // a depth=1 structure containing `True` or `False` values, describing which - // of the top-level subtrees may be traversed. It may also - // return scalar `True` or `False` "traversal is OK / not OK for all subtrees." - - // Examples are available in the unit tests (nest_test.py). - - // Args: - // traverse_fn: Function taking a substructure and returning either a scalar - // `bool` (whether to traverse that substructure or not) or a depth=1 - // shallow structure of the same type, describing which parts of the - // substructure to traverse. - // structure: The structure to traverse. - - // Returns: - // A shallow structure containing python bools, which can be passed to - // `map_structure_up_to` and `flatten_up_to`. - - // Raises: - // TypeError: if `traverse_fn` returns a sequence for a non-sequence input, - // or a structure with depth higher than 1 for a sequence input, - // or if any leaf values in the returned structure or scalar are not type - // `bool`. - // """ - // to_traverse = traverse_fn(structure) - // if not is_sequence(structure): - // if not isinstance(to_traverse, bool): - // raise TypeError("traverse_fn returned structure: %s for non-structure: %s" - // % (to_traverse, structure)) - // return to_traverse - // level_traverse = [] - // if isinstance(to_traverse, bool): - // if not to_traverse: - //# Do not traverse this substructure at all. Exit early. - // return False - // else: - //# Traverse the entire substructure. - // for branch in _yield_value(structure): - // level_traverse.append( - // get_traverse_shallow_structure(traverse_fn, branch)) - // elif not is_sequence(to_traverse): - // raise TypeError("traverse_fn returned a non-bool scalar: %s for input: %s" - // % (to_traverse, structure)) - // else: - //# Traverse some subset of this substructure. - // assert_shallow_structure(to_traverse, structure) - // for t, branch in zip(_yield_value(to_traverse), _yield_value(structure)): - // if not isinstance(t, bool): - // raise TypeError( - // "traverse_fn didn't return a depth=1 structure of bools. saw: %s " - // " for structure: %s" % (to_traverse, structure)) - // if t: - // level_traverse.append( - // get_traverse_shallow_structure(traverse_fn, branch)) - // else: - // level_traverse.append(False) - // return _sequence_like(structure, level_traverse) - - - //def yield_flat_paths(nest): - // """Yields paths for some nested structure. - - // Paths are lists of objects which can be str-converted, which may include - // integers or other types which are used as indices in a dict. - - // The flat list will be in the corresponding order as if you called - // `snt.nest.flatten` on the structure. This is handy for naming Tensors such - // the TF scope structure matches the tuple structure. - - // E.g. if we have a tuple `value = Foo(a=3, b=Bar(c=23, d=42))` - - // ```shell - // >>> nest.flatten(value) - // [3, 23, 42] - // >>> list(nest.yield_flat_paths(value)) - // [('a',), ('b', 'c'), ('b', 'd')] - // ``` - - // ```shell - // >>> list(nest.yield_flat_paths({'a': [3]})) - // [('a', 0)] - // >>> list(nest.yield_flat_paths({'a': 3})) - // [('a',)] - // ``` - - // Args: - // nest: the value to produce a flattened paths list for. - - // Yields: - // Tuples containing index or key values which form the path to a specific - // leaf value in the nested structure. - // """ - - //# The _maybe_add_final_path_element function is used below in order to avoid - //# adding trailing slashes when the sub-element recursed into is a leaf. - // if isinstance(nest, (dict, _collections.Mapping)): - // for key in _sorted(nest): - // value = nest[key] - // for sub_path in yield_flat_paths(value): - // yield (key,) + sub_path - // elif _is_namedtuple(nest): - // for key in nest._fields: - // value = getattr(nest, key) - // for sub_path in yield_flat_paths(value): - // yield (key,) + sub_path - // elif isinstance(nest, _six.string_types): - // yield () - // elif isinstance(nest, _collections.Sequence): - // for idx, value in enumerate(nest): - // for sub_path in yield_flat_paths(value): - // yield (idx,) + sub_path - // else: - // yield () - - - //def flatten_with_joined_string_paths(structure, separator="/"): - // """Returns a list of (string path, data element) tuples. - - // The order of tuples produced matches that of `nest.flatten`. This allows you - // to flatten a nested structure while keeping information about where in the - // structure each data element was located. See `nest.yield_flat_paths` - // for more information. - - // Args: - // structure: the nested structure to flatten. - // separator: string to separate levels of hierarchy in the results, defaults - // to '/'. - - // Returns: - // A list of (string, data element) tuples. - // """ - // flat_paths = yield_flat_paths(structure) - // def stringify_and_join(path_elements): - // return separator.join(str(path_element) for path_element in path_elements) - // flat_string_paths = [stringify_and_join(path) for path in flat_paths] - // return list(zip(flat_string_paths, flatten(structure))) - - - } -} +/***************************************************************************** + Copyright 2018 The TensorFlow.NET Authors. All Rights Reserved. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +******************************************************************************/ + +using System; +using System.Collections; +using System.Collections.Generic; +using System.Linq; +using NumSharp; +using Tensorflow.Operations; + +namespace Tensorflow.Util +{ + //Functions for working with arbitrarily nested sequences of elements. + + //This module can perform operations on nested structures. A nested structure is a + //Python sequence, tuple (including `namedtuple`), or dict that can contain + //further sequences, tuples, and dicts. + + //The utilities here assume (and do not check) that the nested structures form a + //'tree', i.e., no references in the structure of the input of these functions + //should be recursive. + + //Example structures: `((3, 4), 5, (6, 7, (9, 10), 8))`, `(np.array(0), + // (np.array([3, 4]), tf.constant([3, 4])))` + // + + public static class nest + { + + + /// + /// Untyped implementation of zip for arbitrary data + /// + /// Converts an list of lists or arrays [[1,2,3], [4,5,6], [7,8,9]] into a list of arrays + /// representing tuples of the same index of all source arrays [[1,4,7], [2,5,9], [3,6,9]] + /// + /// one or multiple sequences to be zipped + /// + public static IEnumerable zip_many(params IEnumerable[] lists) + { + if (lists.Length == 0) + yield break; + var first = lists[0]; + if (first == null) + yield break; + var arity = first.Count(); + for (int i = 0; i < arity; i++) + { + var array = new object[lists.Length]; + for (int j = 0; j < lists.Length; j++) + array[j] = GetSequenceElementAt(lists[j], i); + yield return array; + } + } + + private static object GetSequenceElementAt(object sequence, int i) + { + switch (sequence) + { + case Array array: + return array.GetValue(i); + case IList list: + return list[i]; + default: + return _yield_value(sequence).Skip(Math.Max(0, i)).FirstOrDefault(); + } + } + + public static IEnumerable<(T1, T2)> zip(IEnumerable e1, IEnumerable e2) + => zip(e1, e2); + + public static Dictionary ConvertToDict(object dyn) + => ConvertToDict(dyn); + + //def _get_attrs_values(obj): + // """Returns the list of values from an attrs instance.""" + // attrs = getattr(obj.__class__, "__attrs_attrs__") + // return [getattr(obj, a.name) for a in attrs] + + /// + /// Returns a sorted list of the dict keys, with error if keys not sortable. + /// + private static IEnumerable _sorted(IDictionary dict_) + { + return dict_.Keys.OfType().OrderBy(x => x); + } + + + //def _is_namedtuple(instance, strict=False): + // """Returns True iff `instance` is a `namedtuple`. + + // Args: + // instance: An instance of a Python object. + // strict: If True, `instance` is considered to be a `namedtuple` only if + // it is a "plain" namedtuple. For instance, a class inheriting + // from a `namedtuple` will be considered to be a `namedtuple` + // iff `strict=False`. + + // Returns: + // True if `instance` is a `namedtuple`. + // """ + // return _pywrap_tensorflow.IsNamedtuple(instance, strict) + + + //# See the swig file (util.i) for documentation. + //_is_mapping = _pywrap_tensorflow.IsMapping + //_is_attrs = _pywrap_tensorflow.IsAttrs + + /// + /// Converts the sequence `args` to the same type as `instance`. + /// + /// an instance of `tuple`, `list`, `namedtuple`, `dict`, or + /// `collections.OrderedDict`. + /// elements to be converted to the `instance` type. + /// `args` with the type of `instance`. + private static object _sequence_like(object instance, IEnumerable args) + { + if (is_mapping(instance)) + { + //# Pack dictionaries in a deterministic order by sorting the keys. + //# Notice this means that we ignore the original order of `OrderedDict` + //# instances. This is intentional, to avoid potential bugs caused by mixing + //# ordered and plain dicts (e.g., flattening a dict but using a + //# corresponding `OrderedDict` to pack it back). + switch (instance) + { + case Hashtable hash: + var result = new Hashtable(); + foreach ((object key, object value) in zip(_sorted(hash), args)) + result[key] = value; + return result; + } + } + //else if( _is_namedtuple(instance) || _is_attrs(instance)) + // return type(instance)(*args) + else + { + // Not a namedtuple + switch (instance) + { + case object[] array: + var result_array = new object[args.Count()]; + int i = 0; + foreach (var x in args) + { + result_array[i] = x; + i++; + } + return result_array; + case List list: + return new List(args); + default: + throw new TypeError("Type of sequence not supported (yet): " + instance.GetType()); + } + } + throw new TypeError("Type of sequence not supported (yet): " + instance.GetType()); + } + + /// + /// Yields the next value from the given iterable. + /// + private static IEnumerable _yield_value(object iterable) + { + if (is_mapping(iterable)) + { + var dict = iterable as IDictionary; + //# Iterate through dictionaries in a deterministic order by sorting the + //# keys. Notice this means that we ignore the original order of `OrderedDict` + //# instances. This is intentional, to avoid potential bugs caused by mixing + //# ordered and plain dicts (e.g., flattening a dict but using a + //# corresponding `OrderedDict` to pack it back). + foreach (var key in _sorted(dict)) + yield return dict[key]; + } + //else if (_is_attrs(iterable)) + //{ + // // for value in _get_attrs_values(iterable): + // // yield value + //} + else if (iterable is IEnumerable) + { + var enumerable = iterable as IEnumerable; + foreach (var value in enumerable) + yield return value; + } + else + { + throw new TypeError("Unexpected iterable type: " + iterable.GetType()); + //var jobj = JObject.FromObject(iterable); + //foreach (var key in _sorted()) + // yield return jobj[key]; + } + } + + //# See the swig file (util.i) for documentation. + public static bool is_sequence(object arg) + => arg is IEnumerable && !(arg is string) && !(arg is NDArray) && + !(arg.GetType().IsGenericType && arg.GetType().GetGenericTypeDefinition() == typeof(HashSet<>)); + + public static bool is_mapping(object arg) => arg is IDictionary; + + //# See the swig file (util.i) for documentation. + //flatten = _pywrap_tensorflow.Flatten + + public static List flatten(T structure) + { + var list = new List(); + _flatten_recursive(structure, list); + return list; + } + + public static object[] flatten2(ICanBeFlattened structure) + => structure.Flatten(); + + public static T[] flatten2(T[] structure) + => structure; + + private static void _flatten_recursive(T obj, List list) + { + switch(obj) + { + case IDictionary dict: + foreach (var key in _sorted(dict)) + _flatten_recursive((T)dict[key], list); + break; + case String str: + list.Add(obj); + break; + case NDArray nd: + list.Add(obj); + break; + case IEnumerable structure: + foreach (var child in structure) + _flatten_recursive((T)child, list); + break; + default: + list.Add(obj); + break; + } + } + + + //# See the swig file (util.i) for documentation. + //_same_namedtuples = _pywrap_tensorflow.SameNamedtuples + + + //class _DotString(object): + + // def __str__(self): + // return "." + + // def __repr__(self): + // return "." + + + //_DOT = _DotString() + + + //def assert_same_structure(nest1, nest2, check_types=True): + // """Asserts that two structures are nested in the same way. + + // Note that namedtuples with identical name and fields are always considered + // to have the same shallow structure (even with `check_types=True`). + // For intance, this code will print `True`: + + // ```python + // def nt(a, b): + // return collections.namedtuple('foo', 'a b')(a, b) + // print(assert_same_structure(nt(0, 1), nt(2, 3))) + // ``` + + // Args: + // nest1: an arbitrarily nested structure. + // nest2: an arbitrarily nested structure. + // check_types: if `True` (default) types of sequences are checked as well, + // including the keys of dictionaries. If set to `False`, for example a + // list and a tuple of objects will look the same if they have the same + // size. Note that namedtuples with identical name and fields are always + // considered to have the same shallow structure. Two types will also be + // considered the same if they are both list subtypes (which allows "list" + // and "_ListWrapper" from checkpointable dependency tracking to compare + // equal). + + // Raises: + // ValueError: If the two structures do not have the same number of elements or + // if the two structures are not nested in the same way. + // TypeError: If the two structures differ in the type of sequence in any of + // their substructures. Only possible if `check_types` is `True`. + // """ + // try: + // _pywrap_tensorflow.AssertSameStructure(nest1, nest2, check_types) + // except (ValueError, TypeError) as e: + // str1 = str(map_structure(lambda _: _DOT, nest1)) + // str2 = str(map_structure(lambda _: _DOT, nest2)) + // raise type(e)("%s\n" + // "Entire first structure:\n%s\n" + // "Entire second structure:\n%s" + // % (str(e), str1, str2)) + + + //def flatten_dict_items(dictionary): + // """Returns a dictionary with flattened keys and values. + + // This function flattens the keys and values of a dictionary, which can be + // arbitrarily nested structures, and returns the flattened version of such + // structures: + + // ```python + // example_dictionary = {(4, 5, (6, 8)): ("a", "b", ("c", "d"))} + // result = {4: "a", 5: "b", 6: "c", 8: "d"} + // flatten_dict_items(example_dictionary) == result + // ``` + + // The input dictionary must satisfy two properties: + + // 1. Its keys and values should have the same exact nested structure. + // 2. The set of all flattened keys of the dictionary must not contain repeated + // keys. + + // Args: + // dictionary: the dictionary to zip + + // Returns: + // The zipped dictionary. + + // Raises: + // TypeError: If the input is not a dictionary. + // ValueError: If any key and value have not the same structure, or if keys are + // not unique. + // """ + // if not isinstance(dictionary, (dict, _collections.Mapping)): + // raise TypeError("input must be a dictionary") + // flat_dictionary = {} + // for i, v in _six.iteritems(dictionary): + // if not is_sequence(i): + // if i in flat_dictionary: + // raise ValueError( + // "Could not flatten dictionary: key %s is not unique." % i) + // flat_dictionary[i] = v + // else: + // flat_i = flatten(i) + // flat_v = flatten(v) + // if len(flat_i) != len(flat_v): + // raise ValueError( + // "Could not flatten dictionary. Key had %d elements, but value had " + // "%d elements. Key: %s, value: %s." + // % (len(flat_i), len(flat_v), flat_i, flat_v)) + // for new_i, new_v in zip(flat_i, flat_v): + // if new_i in flat_dictionary: + // raise ValueError( + // "Could not flatten dictionary: key %s is not unique." + // % (new_i)) + // flat_dictionary[new_i] = new_v + // return flat_dictionary + + /// + /// Helper function for pack_sequence_as. + /// + /// Substructure (list / tuple / dict) to mimic. + /// Flattened values to output substructure for. + /// Index at which to start reading from flat. + /// + /// The tuple(new_index, child), where: + /// * new_index - the updated index into `flat` having processed `structure`. + /// * packed - the subset of `flat` corresponding to `structure`, + /// having started at `index`, and packed into the same nested + /// format. + private static (int new_index, List child) _packed_nest_with_indices(object structure, List flat, + int index) + { + var packed = new List(); + foreach (var s in _yield_value(structure)) + { + if (is_sequence(s)) + { + var (new_index, child) = _packed_nest_with_indices(s, flat, index); + packed.Add(_sequence_like(s, child)); + index = new_index; + } + else + { + packed.Add(flat[index]); + index += 1; + } + } + return (index, packed); + } + + private static int len(IEnumerable x) => x.Count(); + + public static T pack_sequence_as2(T structure, object[] flat_sequence, bool expand_composites = false) + where T : IPackable + => structure.Pack(flat_sequence); + + /// + /// Returns a given flattened sequence packed into a given structure. + /// If `structure` is a scalar, `flat_sequence` must be a single-element list; + /// in this case the return value is `flat_sequence[0]`. + /// + /// If `structure` is or contains a dict instance, the keys will be sorted to + /// pack the flat sequence in deterministic order. This is true also for + /// `OrderedDict` instances: their sequence order is ignored, the sorting order of + /// keys is used instead. The same convention is followed in `flatten`. + /// This correctly repacks dicts and `OrderedDict`s after they have been + /// flattened, and also allows flattening an `OrderedDict` and then repacking it + /// back using a corresponding plain dict, or vice-versa. + /// Dictionaries with non-sortable keys cannot be flattened. + /// + /// + /// Nested structure, whose structure is given by nested lists, + /// tuples, and dicts. Note: numpy arrays and strings are considered + /// scalars. + /// + /// flat sequence to pack. + /// `flat_sequence` converted to have the same recursive structure as + /// `structure`. + /// + public static object pack_sequence_as(object structure, IEnumerable flat_sequence, bool expand_composites = false) + { + List flat = null; + if (flat_sequence is List) + flat = flat_sequence as List; + else + flat=new List(flat_sequence); + if (flat_sequence==null) + throw new ArgumentException("flat_sequence must not be null"); + // if not is_sequence(flat_sequence): + // raise TypeError("flat_sequence must be a sequence") + + if (!is_sequence(structure)) + { + if (len(flat) != 1) + throw new ValueError($"Structure is a scalar but len(flat_sequence) == {len(flat)} > 1"); + return flat.FirstOrDefault(); + } + int final_index = 0; + List packed = null; + try + { + (final_index, packed) = _packed_nest_with_indices(structure, flat, 0); + if (final_index < len(flat)) + throw new IndexOutOfRangeException( + $"Final index: {final_index} was smaller than len(flat_sequence): {len(flat)}"); + return _sequence_like(structure, packed); + } + catch (IndexOutOfRangeException) + { + var flat_structure = flatten(structure); + if (len(flat_structure) != len(flat)) + { + throw new ValueError("Could not pack sequence. Structure had {len(structure)} elements, but " + + $"flat_sequence had {len(flat_structure)} elements. flat_sequence had: {len(flat)}"); + } + return _sequence_like(structure, packed); + } + catch (ArgumentOutOfRangeException) + { + var flat_structure = flatten(structure); + if (len(flat_structure) != len(flat)) + { + throw new ValueError("Could not pack sequence. Structure had {len(structure)} elements, but " + + $"flat_sequence had {len(flat_structure)} elements. flat_sequence had: {len(flat)}"); + } + return _sequence_like(structure, packed); + } + } + + /// + /// Applies `func` to each entry in `structure` and returns a new structure. + /// + /// Applies `func(x[0], x[1], ...)` where x[i] is an entry in + /// `structure[i]`. All structures in `structure` must have the same arity, + /// and the return value will contain the results in the same structure. + /// + /// A callable that accepts as many arguments as there are structures. + /// one or many IEnumerable of object + /// If set to + /// `True` (default) the types of iterables within the structures have to be + /// same (e.g. `map_structure(func, [1], (1,))` raises a `TypeError` + /// exception). To allow this set this argument to `False`. + /// Note that namedtuples with identical name and fields are always + /// considered to have the same shallow structure. + /// + /// A new structure with the same arity as `structure`, whose values correspond + /// to `func(x[0], x[1], ...)` where `x[i]` is a value in the corresponding + /// location in `structure[i]`. If there are different sequence types and + /// `check_types` is `False` the sequence types of the first structure will be + /// used. + /// + public static IEnumerable map_structure(Func func, params IEnumerable[] structure) + { + // TODO: check structure and types + // for other in structure[1:]: + // assert_same_structure(structure[0], other, check_types=check_types) + + if (structure.Length==1) + { + // we don't need to zip if we have only one structure + return map_structure(a => func(new object[]{a}), structure[0]); + } + var flat_structures = structure.Select(flatten).ToArray(); // ToArray is important here! + var entries = zip_many(flat_structures); + var mapped_flat_structure = entries.Select(func); + + return _yield_value(pack_sequence_as(structure[0], mapped_flat_structure)).ToList(); + } + + public static Tensor map_structure(Func func, T structure) + { + var flat_structure = flatten(structure); + var mapped_flat_structure = flat_structure.Select(func).ToList(); + + return pack_sequence_as(structure, mapped_flat_structure) as Tensor; + } + + /// + /// Same as map_structure, but with only one structure (no combining of multiple structures) + /// + /// + /// + /// + public static IEnumerable map_structure(Func func, IEnumerable structure) + { + // TODO: check structure and types + // for other in structure[1:]: + // assert_same_structure(structure[0], other, check_types=check_types) + + var flat_structure = flatten(structure); + var mapped_flat_structure = flat_structure.Select(func).ToList(); + + return _yield_value(pack_sequence_as(structure, mapped_flat_structure)).ToList(); + } + + //def map_structure_with_paths(func, *structure, **kwargs): + // """Applies `func` to each entry in `structure` and returns a new structure. + + // Applies `func(path, x[0], x[1], ..., **kwargs)` where x[i] is an entry in + // `structure[i]` and `path` is the common path to x[i] in the structures. All + // structures in `structure` must have the same arity, and the return value will + // contain the results in the same structure. Special kwarg `check_types` + // determines whether the types of iterables within the structure must be the + // same-- see **kwargs definition below. + + // Args: + // func: A callable with the signature func(path, *values, **kwargs) that is + // evaluated on the leaves of the structure. + // *structure: A variable number of compatible structures to process. + // **kwargs: Optional kwargs to be passed through to func. Special kwarg + // `check_types` is not passed to func, but instead determines whether the + // types of iterables within the structures have to be same (e.g., + // `map_structure(func, [1], (1,))` raises a `TypeError` exception). By + // default, the types must match. To allow iteration over structures of + // different types (but common arity), set this kwarg to `False`. + + // Returns: + // A structure of the same form as the input structures whose leaves are the + // result of evaluating func on corresponding leaves of the input structures. + + // Raises: + // TypeError: If `func` is not callable or if the structures do not match + // each other by depth tree. + // TypeError: If `check_types` is not `False` and the two structures differ in + // the type of sequence in any of their substructures. + // ValueError: If no structures are provided. + // """ + // if not callable(func): + // raise TypeError("func must be callable, got: %s" % func) + // if not structure: + // raise ValueError("Must provide at least one structure") + + // check_types = kwargs.pop("check_types", True) + // for other in structure[1:]: + // assert_same_structure(structure[0], other, check_types=check_types) + + //# First set paths_and_values to: + //# [[(p11, v11), ... (p1n, v1n)], ... [(pm1, vm1), ... (pmn, vmn)]] + // paths_and_values = [flatten_with_joined_string_paths(s) for s in structure] + + //# Now zip(*paths_and_values) would be: + //# [((p11, v11), ... (pm1, vm1)), ... ((p1n, v1n), ... (pmn, vmn))] + //# so grouped_by_path is set to: + //# [[(p11, ... pm1), (v11, ... vm1)], ... [(p1n, ... pmn), (v1n, ... vmn)]] + //# Note that p1i, ... pmi must all be equal since the structures are the same. + // grouped_by_path = [zip(*p_v) for p_v in zip(*paths_and_values)] + + // return pack_sequence_as(structure[0], [ + // func(paths[0], *values, **kwargs) for paths, values in grouped_by_path]) + + + //def _yield_flat_up_to(shallow_tree, input_tree): + // """Yields elements `input_tree` partially flattened up to `shallow_tree`.""" + // if is_sequence(shallow_tree): + // for shallow_branch, input_branch in zip(_yield_value(shallow_tree), + // _yield_value(input_tree)): + // for input_leaf in _yield_flat_up_to(shallow_branch, input_branch): + // yield input_leaf + // else: + // yield input_tree + + + //def assert_shallow_structure(shallow_tree, input_tree, check_types=True): + // """Asserts that `shallow_tree` is a shallow structure of `input_tree`. + + // That is, this function tests if the `input_tree` structure can be created from + // the `shallow_tree` structure by replacing its leaf nodes with deeper + // tree structures. + + // Examples: + + // The following code will raise an exception: + // ```python + // shallow_tree = ["a", "b"] + // input_tree = ["c", ["d", "e"], "f"] + // assert_shallow_structure(shallow_tree, input_tree) + // ``` + + // The following code will not raise an exception: + // ```python + // shallow_tree = ["a", "b"] + // input_tree = ["c", ["d", "e"]] + // assert_shallow_structure(shallow_tree, input_tree) + // ``` + + // Args: + // shallow_tree: an arbitrarily nested structure. + // input_tree: an arbitrarily nested structure. + // check_types: if `True` (default) the sequence types of `shallow_tree` and + // `input_tree` have to be the same. Note that even with check_types==True, + // this function will consider two different namedtuple classes with the same + // name and _fields attribute to be the same class. + + // Raises: + // TypeError: If `shallow_tree` is a sequence but `input_tree` is not. + // TypeError: If the sequence types of `shallow_tree` are different from + // `input_tree`. Only raised if `check_types` is `True`. + // ValueError: If the sequence lengths of `shallow_tree` are different from + // `input_tree`. + // """ + // if is_sequence(shallow_tree): + // if not is_sequence(input_tree): + // raise TypeError( + // "If shallow structure is a sequence, input must also be a sequence. " + // "Input has type: %s." % type(input_tree)) + + // if check_types and not isinstance(input_tree, type(shallow_tree)): + //# Duck-typing means that nest should be fine with two different + //# namedtuples with identical name and fields. + // shallow_is_namedtuple = _is_namedtuple(shallow_tree, False) + // input_is_namedtuple = _is_namedtuple(input_tree, False) + // if shallow_is_namedtuple and input_is_namedtuple: + // if not _same_namedtuples(shallow_tree, input_tree): + // raise TypeError( + // "The two namedtuples don't have the same sequence type. Input " + // "structure has type %s, while shallow structure has type %s." + // % (type(input_tree), type(shallow_tree))) + // elif not (isinstance(shallow_tree, _collections.Mapping) + // and isinstance(input_tree, _collections.Mapping)): + // raise TypeError( + // "The two structures don't have the same sequence type. Input " + // "structure has type %s, while shallow structure has type %s." + // % (type(input_tree), type(shallow_tree))) + + // if len(input_tree) != len(shallow_tree): + // raise ValueError( + // "The two structures don't have the same sequence length. Input " + // "structure has length %s, while shallow structure has length %s." + // % (len(input_tree), len(shallow_tree))) + + // if check_types and isinstance(shallow_tree, (dict, _collections.Mapping)): + // if set(input_tree) != set(shallow_tree): + // raise ValueError( + // "The two structures don't have the same keys. Input " + // "structure has keys %s, while shallow structure has keys %s." % + // (list(_six.iterkeys(input_tree)), + // list(_six.iterkeys(shallow_tree)))) + + // input_tree = list(sorted(_six.iteritems(input_tree))) + // shallow_tree = list(sorted(_six.iteritems(shallow_tree))) + + // for shallow_branch, input_branch in zip(shallow_tree, input_tree): + // assert_shallow_structure(shallow_branch, input_branch, + // check_types=check_types) + + + //def flatten_up_to(shallow_tree, input_tree): + // """Flattens `input_tree` up to `shallow_tree`. + + // Any further depth in structure in `input_tree` is retained as elements in the + // partially flatten output. + + // If `shallow_tree` and `input_tree` are not sequences, this returns a + // single-element list: `[input_tree]`. + + // Use Case: + + // Sometimes we may wish to partially flatten a nested sequence, retaining some + // of the nested structure. We achieve this by specifying a shallow structure, + // `shallow_tree`, we wish to flatten up to. + + // The input, `input_tree`, can be thought of as having the same structure as + // `shallow_tree`, but with leaf nodes that are themselves tree structures. + + // Examples: + + // ```python + // input_tree = [[[2, 2], [3, 3]], [[4, 9], [5, 5]]] + // shallow_tree = [[True, True], [False, True]] + + // flattened_input_tree = flatten_up_to(shallow_tree, input_tree) + // flattened_shallow_tree = flatten_up_to(shallow_tree, shallow_tree) + + //# Output is: + //# [[2, 2], [3, 3], [4, 9], [5, 5]] + //# [True, True, False, True] + // ``` + + // ```python + // input_tree = [[('a', 1), [('b', 2), [('c', 3), [('d', 4)]]]]] + // shallow_tree = [['level_1', ['level_2', ['level_3', ['level_4']]]]] + + // input_tree_flattened_as_shallow_tree = flatten_up_to(shallow_tree, input_tree) + // input_tree_flattened = flatten(input_tree) + + //# Output is: + //# [('a', 1), ('b', 2), ('c', 3), ('d', 4)] + //# ['a', 1, 'b', 2, 'c', 3, 'd', 4] + // ``` + + // Non-Sequence Edge Cases: + + // ```python + // flatten_up_to(0, 0) # Output: [0] + // flatten_up_to(0, [0, 1, 2]) # Output: [[0, 1, 2]] + // flatten_up_to([0, 1, 2], 0) # Output: TypeError + // flatten_up_to([0, 1, 2], [0, 1, 2]) # Output: [0, 1, 2] + // ``` + + // Args: + // shallow_tree: a possibly pruned structure of input_tree. + // input_tree: an arbitrarily nested structure or a scalar object. + // Note, numpy arrays are considered scalars. + + // Returns: + // A Python list, the partially flattened version of `input_tree` according to + // the structure of `shallow_tree`. + + // Raises: + // TypeError: If `shallow_tree` is a sequence but `input_tree` is not. + // TypeError: If the sequence types of `shallow_tree` are different from + // `input_tree`. + // ValueError: If the sequence lengths of `shallow_tree` are different from + // `input_tree`. + // """ + // assert_shallow_structure(shallow_tree, input_tree) + // return list(_yield_flat_up_to(shallow_tree, input_tree)) + + + //def map_structure_up_to(shallow_tree, func, *inputs): + // """Applies a function or op to a number of partially flattened inputs. + + // The `inputs` are flattened up to `shallow_tree` before being mapped. + + // Use Case: + + // Sometimes we wish to apply a function to a partially flattened + // sequence (for example when the function itself takes sequence inputs). We + // achieve this by specifying a shallow structure, `shallow_tree` we wish to + // flatten up to. + + // The `inputs`, can be thought of as having the same structure as + // `shallow_tree`, but with leaf nodes that are themselves tree structures. + + // This function therefore will return something with the same base structure as + // `shallow_tree`. + + // Examples: + + // ```python + // ab_tuple = collections.namedtuple("ab_tuple", "a, b") + // op_tuple = collections.namedtuple("op_tuple", "add, mul") + // inp_val = ab_tuple(a=2, b=3) + // inp_ops = ab_tuple(a=op_tuple(add=1, mul=2), b=op_tuple(add=2, mul=3)) + // out = map_structure_up_to(inp_val, lambda val, ops: (val + ops.add) * ops.mul, + // inp_val, inp_ops) + + //# Output is: ab_tuple(a=6, b=15) + // ``` + + // ```python + // data_list = [[2, 4, 6, 8], [[1, 3, 5, 7, 9], [3, 5, 7]]] + // name_list = ['evens', ['odds', 'primes']] + // out = map_structure_up_to( + // name_list, + // lambda name, sec: "first_{}_{}".format(len(sec), name), + // name_list, data_list) + + //# Output is: ['first_4_evens', ['first_5_odds', 'first_3_primes']] + // ``` + + // Args: + // shallow_tree: a shallow tree, common to all the inputs. + // func: callable which will be applied to each input individually. + // *inputs: arbitrarily nested combination of objects that are compatible with + // shallow_tree. The function `func` is applied to corresponding + // partially flattened elements of each input, so the function must support + // arity of `len(inputs)`. + + // Raises: + // TypeError: If `shallow_tree` is a sequence but `input_tree` is not. + // TypeError: If the sequence types of `shallow_tree` are different from + // `input_tree`. + // ValueError: If the sequence lengths of `shallow_tree` are different from + // `input_tree`. + + // Returns: + // result of repeatedly applying `func`, with same structure as + // `shallow_tree`. + // """ + // if not inputs: + // raise ValueError("Cannot map over no sequences") + // for input_tree in inputs: + // assert_shallow_structure(shallow_tree, input_tree) + + //# Flatten each input separately, apply the function to corresponding elements, + //# then repack based on the structure of the first input. + // all_flattened_up_to = [flatten_up_to(shallow_tree, input_tree) + // for input_tree in inputs] + // results = [func(*tensors) for tensors in zip(*all_flattened_up_to)] + // return pack_sequence_as(structure=shallow_tree, flat_sequence=results) + + + //def get_traverse_shallow_structure(traverse_fn, structure): + // """Generates a shallow structure from a `traverse_fn` and `structure`. + + // `traverse_fn` must accept any possible subtree of `structure` and return + // a depth=1 structure containing `True` or `False` values, describing which + // of the top-level subtrees may be traversed. It may also + // return scalar `True` or `False` "traversal is OK / not OK for all subtrees." + + // Examples are available in the unit tests (nest_test.py). + + // Args: + // traverse_fn: Function taking a substructure and returning either a scalar + // `bool` (whether to traverse that substructure or not) or a depth=1 + // shallow structure of the same type, describing which parts of the + // substructure to traverse. + // structure: The structure to traverse. + + // Returns: + // A shallow structure containing python bools, which can be passed to + // `map_structure_up_to` and `flatten_up_to`. + + // Raises: + // TypeError: if `traverse_fn` returns a sequence for a non-sequence input, + // or a structure with depth higher than 1 for a sequence input, + // or if any leaf values in the returned structure or scalar are not type + // `bool`. + // """ + // to_traverse = traverse_fn(structure) + // if not is_sequence(structure): + // if not isinstance(to_traverse, bool): + // raise TypeError("traverse_fn returned structure: %s for non-structure: %s" + // % (to_traverse, structure)) + // return to_traverse + // level_traverse = [] + // if isinstance(to_traverse, bool): + // if not to_traverse: + //# Do not traverse this substructure at all. Exit early. + // return False + // else: + //# Traverse the entire substructure. + // for branch in _yield_value(structure): + // level_traverse.append( + // get_traverse_shallow_structure(traverse_fn, branch)) + // elif not is_sequence(to_traverse): + // raise TypeError("traverse_fn returned a non-bool scalar: %s for input: %s" + // % (to_traverse, structure)) + // else: + //# Traverse some subset of this substructure. + // assert_shallow_structure(to_traverse, structure) + // for t, branch in zip(_yield_value(to_traverse), _yield_value(structure)): + // if not isinstance(t, bool): + // raise TypeError( + // "traverse_fn didn't return a depth=1 structure of bools. saw: %s " + // " for structure: %s" % (to_traverse, structure)) + // if t: + // level_traverse.append( + // get_traverse_shallow_structure(traverse_fn, branch)) + // else: + // level_traverse.append(False) + // return _sequence_like(structure, level_traverse) + + + //def yield_flat_paths(nest): + // """Yields paths for some nested structure. + + // Paths are lists of objects which can be str-converted, which may include + // integers or other types which are used as indices in a dict. + + // The flat list will be in the corresponding order as if you called + // `snt.nest.flatten` on the structure. This is handy for naming Tensors such + // the TF scope structure matches the tuple structure. + + // E.g. if we have a tuple `value = Foo(a=3, b=Bar(c=23, d=42))` + + // ```shell + // >>> nest.flatten(value) + // [3, 23, 42] + // >>> list(nest.yield_flat_paths(value)) + // [('a',), ('b', 'c'), ('b', 'd')] + // ``` + + // ```shell + // >>> list(nest.yield_flat_paths({'a': [3]})) + // [('a', 0)] + // >>> list(nest.yield_flat_paths({'a': 3})) + // [('a',)] + // ``` + + // Args: + // nest: the value to produce a flattened paths list for. + + // Yields: + // Tuples containing index or key values which form the path to a specific + // leaf value in the nested structure. + // """ + + //# The _maybe_add_final_path_element function is used below in order to avoid + //# adding trailing slashes when the sub-element recursed into is a leaf. + // if isinstance(nest, (dict, _collections.Mapping)): + // for key in _sorted(nest): + // value = nest[key] + // for sub_path in yield_flat_paths(value): + // yield (key,) + sub_path + // elif _is_namedtuple(nest): + // for key in nest._fields: + // value = getattr(nest, key) + // for sub_path in yield_flat_paths(value): + // yield (key,) + sub_path + // elif isinstance(nest, _six.string_types): + // yield () + // elif isinstance(nest, _collections.Sequence): + // for idx, value in enumerate(nest): + // for sub_path in yield_flat_paths(value): + // yield (idx,) + sub_path + // else: + // yield () + + + //def flatten_with_joined_string_paths(structure, separator="/"): + // """Returns a list of (string path, data element) tuples. + + // The order of tuples produced matches that of `nest.flatten`. This allows you + // to flatten a nested structure while keeping information about where in the + // structure each data element was located. See `nest.yield_flat_paths` + // for more information. + + // Args: + // structure: the nested structure to flatten. + // separator: string to separate levels of hierarchy in the results, defaults + // to '/'. + + // Returns: + // A list of (string, data element) tuples. + // """ + // flat_paths = yield_flat_paths(structure) + // def stringify_and_join(path_elements): + // return separator.join(str(path_element) for path_element in path_elements) + // flat_string_paths = [stringify_and_join(path) for path in flat_paths] + // return list(zip(flat_string_paths, flatten(structure))) + + + } +} diff --git a/src/TensorFlowNET.Core/Variables/gen_state_ops.py.cs b/src/TensorFlowNET.Core/Variables/gen_state_ops.py.cs index 9c006170..7cdea327 100644 --- a/src/TensorFlowNET.Core/Variables/gen_state_ops.py.cs +++ b/src/TensorFlowNET.Core/Variables/gen_state_ops.py.cs @@ -1,156 +1,156 @@ -/***************************************************************************** - Copyright 2018 The TensorFlow.NET Authors. All Rights Reserved. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -******************************************************************************/ - -using System; -using System.Collections.Generic; -using Tensorflow.Eager; - -namespace Tensorflow -{ - public class gen_state_ops - { - public static OpDefLibrary _op_def_lib = new OpDefLibrary(); - public static Execute _execute = new Execute(); - - /// - /// Holds state in the form of a tensor that persists across steps. - /// Outputs a ref to the tensor state so it may be read or modified. - /// - /// The shape of the variable tensor. - /// The type of elements in the variable tensor. - /// - /// - /// - /// - public static Tensor variable_v2(int[] shape, TF_DataType dtype, string name = null, string container = "", string shared_name = "") - { - var _op = _op_def_lib._apply_op_helper("VariableV2", name: name, args: new { dtype, shape, container, shared_name }); - - var _result = _op.outputs; - var _inputs_flat = _op.inputs; - - var _attrs = new Dictionary(); - _attrs["dtype"] = _op.get_attr("dtype"); - _attrs["shape"] = _op.get_attr("shape"); - _attrs["container"] = _op.get_attr("container"); - _attrs["shared_name"] = _op.get_attr("shared_name"); - - _execute.record_gradient("VariableV2", _inputs_flat, _attrs, _result, name); - - return _result[0]; - } - - /// - /// Update 'ref' by assigning 'value' to it - /// - /// - /// - /// - /// - /// - public static Tensor assign(Tensor @ref, object value, - bool validate_shape = true, - bool use_locking = true, - string name = null) - { - var _op = _op_def_lib._apply_op_helper("Assign", name: name, args: new { @ref, value, validate_shape, use_locking }); - - var _result = _op.outputs; - var _inputs_flat = _op.inputs; - - var _attrs = new Dictionary(); - _attrs["T"] = _op.get_attr("T"); - _attrs["validate_shape"] = _op.get_attr("validate_shape"); - _attrs["use_locking"] = _op.get_attr("use_locking"); - - _execute.record_gradient("Assign", _inputs_flat, _attrs, _result, name); - - return _result[0]; - } - - public static Tensor assign(RefVariable @ref, object value, - bool validate_shape = true, - bool use_locking = true, - string name = null) - { - var _op = _op_def_lib._apply_op_helper("Assign", name: name, args: new { @ref, value, validate_shape, use_locking }); - - var _result = _op.outputs; - var _inputs_flat = _op.inputs; - - var _attrs = new Dictionary(); - _attrs["T"] = _op.get_attr("T"); - _attrs["validate_shape"] = _op.get_attr("validate_shape"); - _attrs["use_locking"] = _op.get_attr("use_locking"); - - _execute.record_gradient("Assign", _inputs_flat, _attrs, _result, name); - - return _result[0]; - } - - public static Tensor assign_sub(RefVariable @ref, - Tensor value, - bool use_locking = false, - string name = null) - { - var _op = _op_def_lib._apply_op_helper("AssignSub", name: name, args: new { @ref, value, use_locking }); - - return _op.outputs[0]; - } - - - // Update 'ref' by adding 'value' to it. - // This operation outputs "ref" after the update is done. - // This makes it easier to chain operations that need to use the reset value. - // Args: - // ref: A mutable `Tensor`. Must be one of the following types: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`, `qint32`, `bfloat16`, `uint16`, `complex128`, `half`, `uint32`, `uint64`. - // Should be from a `Variable` node. - // value: A `Tensor`. Must have the same type as `ref`. - // The value to be added to the variable. - // use_locking: An optional `bool`. Defaults to `False`. - // If True, the addition will be protected by a lock; - // otherwise the behavior is undefined, but may exhibit less contention. - // name: A name for the operation(optional). - // Returns: - // A mutable `Tensor`. Has the same type as `ref`. - public static Tensor assign_add(RefVariable @ref, T value, bool use_locking = false, string name = null) - { - var _op = _op_def_lib._apply_op_helper("AssignAdd", name: name, args: new { @ref, value, use_locking }); - return _op.outputs[0]; - } - - /// - /// Adds sparse updates to a variable reference. - /// - /// - /// - /// - /// - /// - /// - public static Tensor scatter_add(RefVariable @ref, Tensor indices, Tensor updates, bool use_locking = false, string name = null) - { - var _op = _op_def_lib._apply_op_helper("ScatterAdd", name: name, args: new { @ref, indices, updates, use_locking }); - return _op.outputs[0]; - } - - public static Tensor is_variable_initialized(RefVariable @ref, string name = null) - { - var _op = _op_def_lib._apply_op_helper("IsVariableInitialized", name: name, args: new { @ref }); - return _op.output; - } - } -} +/***************************************************************************** + Copyright 2018 The TensorFlow.NET Authors. All Rights Reserved. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +******************************************************************************/ + +using System; +using System.Collections.Generic; +using Tensorflow.Eager; + +namespace Tensorflow +{ + public class gen_state_ops + { + public static OpDefLibrary _op_def_lib = new OpDefLibrary(); + public static Execute _execute = new Execute(); + + /// + /// Holds state in the form of a tensor that persists across steps. + /// Outputs a ref to the tensor state so it may be read or modified. + /// + /// The shape of the variable tensor. + /// The type of elements in the variable tensor. + /// + /// + /// + /// + public static Tensor variable_v2(int[] shape, TF_DataType dtype, string name = null, string container = "", string shared_name = "") + { + var _op = _op_def_lib._apply_op_helper("VariableV2", name: name, args: new { dtype, shape, container, shared_name }); + + var _result = _op.outputs; + var _inputs_flat = _op.inputs; + + var _attrs = new Dictionary(); + _attrs["dtype"] = _op.get_attr("dtype"); + _attrs["shape"] = _op.get_attr("shape"); + _attrs["container"] = _op.get_attr("container"); + _attrs["shared_name"] = _op.get_attr("shared_name"); + + _execute.record_gradient("VariableV2", _inputs_flat, _attrs, _result, name); + + return _result[0]; + } + + /// + /// Update 'ref' by assigning 'value' to it + /// + /// + /// + /// + /// + /// + public static Tensor assign(Tensor @ref, object value, + bool validate_shape = true, + bool use_locking = true, + string name = null) + { + var _op = _op_def_lib._apply_op_helper("Assign", name: name, args: new { @ref, value, validate_shape, use_locking }); + + var _result = _op.outputs; + var _inputs_flat = _op.inputs; + + var _attrs = new Dictionary(); + _attrs["T"] = _op.get_attr("T"); + _attrs["validate_shape"] = _op.get_attr("validate_shape"); + _attrs["use_locking"] = _op.get_attr("use_locking"); + + _execute.record_gradient("Assign", _inputs_flat, _attrs, _result, name); + + return _result[0]; + } + + public static Tensor assign(RefVariable @ref, object value, + bool validate_shape = true, + bool use_locking = true, + string name = null) + { + var _op = _op_def_lib._apply_op_helper("Assign", name: name, args: new { @ref, value, validate_shape, use_locking }); + + var _result = _op.outputs; + var _inputs_flat = _op.inputs; + + var _attrs = new Dictionary(); + _attrs["T"] = _op.get_attr("T"); + _attrs["validate_shape"] = _op.get_attr("validate_shape"); + _attrs["use_locking"] = _op.get_attr("use_locking"); + + _execute.record_gradient("Assign", _inputs_flat, _attrs, _result, name); + + return _result[0]; + } + + public static Tensor assign_sub(RefVariable @ref, + Tensor value, + bool use_locking = false, + string name = null) + { + var _op = _op_def_lib._apply_op_helper("AssignSub", name: name, args: new { @ref, value, use_locking }); + + return _op.outputs[0]; + } + + + // Update 'ref' by adding 'value' to it. + // This operation outputs "ref" after the update is done. + // This makes it easier to chain operations that need to use the reset value. + // Args: + // ref: A mutable `Tensor`. Must be one of the following types: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`, `qint32`, `bfloat16`, `uint16`, `complex128`, `half`, `uint32`, `uint64`. + // Should be from a `Variable` node. + // value: A `Tensor`. Must have the same type as `ref`. + // The value to be added to the variable. + // use_locking: An optional `bool`. Defaults to `False`. + // If True, the addition will be protected by a lock; + // otherwise the behavior is undefined, but may exhibit less contention. + // name: A name for the operation(optional). + // Returns: + // A mutable `Tensor`. Has the same type as `ref`. + public static Tensor assign_add(RefVariable @ref, T value, bool use_locking = false, string name = null) + { + var _op = _op_def_lib._apply_op_helper("AssignAdd", name: name, args: new { @ref, value, use_locking }); + return _op.outputs[0]; + } + + /// + /// Adds sparse updates to a variable reference. + /// + /// + /// + /// + /// + /// + /// + public static Tensor scatter_add(RefVariable @ref, Tensor indices, Tensor updates, bool use_locking = false, string name = null) + { + var _op = _op_def_lib._apply_op_helper("ScatterAdd", name: name, args: new { @ref, indices, updates, use_locking }); + return _op.outputs[0]; + } + + public static Tensor is_variable_initialized(RefVariable @ref, string name = null) + { + var _op = _op_def_lib._apply_op_helper("IsVariableInitialized", name: name, args: new { @ref }); + return _op.output; + } + } +} diff --git a/src/TensorFlowNET.Core/Variables/state_ops.cs b/src/TensorFlowNET.Core/Variables/state_ops.cs index cd8d4f3f..01a40bee 100644 --- a/src/TensorFlowNET.Core/Variables/state_ops.cs +++ b/src/TensorFlowNET.Core/Variables/state_ops.cs @@ -1,123 +1,123 @@ -/***************************************************************************** - Copyright 2018 The TensorFlow.NET Authors. All Rights Reserved. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -******************************************************************************/ - -using System; - -namespace Tensorflow -{ - public class state_ops - { - /// - /// Create a variable Operation. - /// - /// - /// - /// - /// - /// - /// - public static Tensor variable_op_v2(int[] shape, - TF_DataType dtype, - string name = "Variable", - string container = "", - string shared_name = "") => gen_state_ops.variable_v2(shape, - dtype, - name: name, - container: container, - shared_name: shared_name); - - public static Tensor assign(Tensor @ref, object value, - bool validate_shape = true, - bool use_locking = true, - string name = null) - { - if (@ref.dtype.is_ref_dtype()) - return gen_state_ops.assign(@ref, - value, - validate_shape: validate_shape, - use_locking: use_locking, - name: name); - - return @ref.assign((Tensor)value, name: name); - } - - public static Tensor assign(RefVariable @ref, object value, - bool validate_shape = true, - bool use_locking = true, - string name = null) - { - return gen_state_ops.assign(@ref, - value, - validate_shape: validate_shape, - use_locking: use_locking, - name: name); - } - - public static Tensor assign_sub(RefVariable @ref, - Tensor value, - bool use_locking = false, - string name = null) => gen_state_ops.assign_sub(@ref, - value, - use_locking: use_locking, - name: name); - - //"""Update 'ref' by adding 'value' to it. - // - // This operation outputs "ref" after the update is done. - // This makes it easier to chain operations that need to use the reset value. - // - // Args: - // ref: A mutable `Tensor`. Must be one of the following types: - // `float32`, `float64`, `int64`, `int32`, `uint8`, `uint16`, `int16`, - // `int8`, `complex64`, `complex128`, `qint8`, `quint8`, `qint32`, `half`. - // Should be from a `Variable` node. - // value: A `Tensor`. Must have the same type as `ref`. - // The value to be added to the variable. - // use_locking: An optional `bool`. Defaults to `False`. - // If True, the addition will be protected by a lock; - // otherwise the behavior is undefined, but may exhibit less contention. - // name: A name for the operation (optional). - // - // Returns: - // Same as "ref". Returned as a convenience for operations that want - // to use the new value after the variable has been updated. - public static Tensor assign_add(RefVariable @ref, - T value, - bool use_locking = false, - string name = null) - { - if (@ref.dtype.is_ref_dtype()) - return gen_state_ops.assign_add(@ref, value, use_locking: use_locking, name: name); - throw new NotImplementedException("assign_add"); - } - - public static Tensor scatter_add(RefVariable @ref, Tensor indices, Tensor updates, bool use_locking = false, string name = null) - { - if (@ref.dtype.is_ref_dtype()) - return gen_state_ops.scatter_add(@ref, indices, updates, use_locking: use_locking, name: name); - - throw new NotImplementedException("scatter_add"); - } - - public static Tensor is_variable_initialized(RefVariable @ref, string name = null) - { - if (@ref.dtype.is_ref_dtype()) - return gen_state_ops.is_variable_initialized(@ref: @ref, name: name); - throw new NotImplementedException(""); - //return @ref.is_initialized(name: name); - } - } -} +/***************************************************************************** + Copyright 2018 The TensorFlow.NET Authors. All Rights Reserved. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +******************************************************************************/ + +using System; + +namespace Tensorflow +{ + public class state_ops + { + /// + /// Create a variable Operation. + /// + /// + /// + /// + /// + /// + /// + public static Tensor variable_op_v2(int[] shape, + TF_DataType dtype, + string name = "Variable", + string container = "", + string shared_name = "") => gen_state_ops.variable_v2(shape, + dtype, + name: name, + container: container, + shared_name: shared_name); + + public static Tensor assign(Tensor @ref, object value, + bool validate_shape = true, + bool use_locking = true, + string name = null) + { + if (@ref.dtype.is_ref_dtype()) + return gen_state_ops.assign(@ref, + value, + validate_shape: validate_shape, + use_locking: use_locking, + name: name); + + return @ref.assign((Tensor)value, name: name); + } + + public static Tensor assign(RefVariable @ref, object value, + bool validate_shape = true, + bool use_locking = true, + string name = null) + { + return gen_state_ops.assign(@ref, + value, + validate_shape: validate_shape, + use_locking: use_locking, + name: name); + } + + public static Tensor assign_sub(RefVariable @ref, + Tensor value, + bool use_locking = false, + string name = null) => gen_state_ops.assign_sub(@ref, + value, + use_locking: use_locking, + name: name); + + //"""Update 'ref' by adding 'value' to it. + // + // This operation outputs "ref" after the update is done. + // This makes it easier to chain operations that need to use the reset value. + // + // Args: + // ref: A mutable `Tensor`. Must be one of the following types: + // `float32`, `float64`, `int64`, `int32`, `uint8`, `uint16`, `int16`, + // `int8`, `complex64`, `complex128`, `qint8`, `quint8`, `qint32`, `half`. + // Should be from a `Variable` node. + // value: A `Tensor`. Must have the same type as `ref`. + // The value to be added to the variable. + // use_locking: An optional `bool`. Defaults to `False`. + // If True, the addition will be protected by a lock; + // otherwise the behavior is undefined, but may exhibit less contention. + // name: A name for the operation (optional). + // + // Returns: + // Same as "ref". Returned as a convenience for operations that want + // to use the new value after the variable has been updated. + public static Tensor assign_add(RefVariable @ref, + T value, + bool use_locking = false, + string name = null) + { + if (@ref.dtype.is_ref_dtype()) + return gen_state_ops.assign_add(@ref, value, use_locking: use_locking, name: name); + throw new NotImplementedException("assign_add"); + } + + public static Tensor scatter_add(RefVariable @ref, Tensor indices, Tensor updates, bool use_locking = false, string name = null) + { + if (@ref.dtype.is_ref_dtype()) + return gen_state_ops.scatter_add(@ref, indices, updates, use_locking: use_locking, name: name); + + throw new NotImplementedException("scatter_add"); + } + + public static Tensor is_variable_initialized(RefVariable @ref, string name = null) + { + if (@ref.dtype.is_ref_dtype()) + return gen_state_ops.is_variable_initialized(@ref: @ref, name: name); + throw new NotImplementedException(""); + //return @ref.is_initialized(name: name); + } + } +} diff --git a/test/TensorFlowNET.UnitTest/PythonTest.cs b/test/TensorFlowNET.UnitTest/PythonTest.cs index 5ceeb9b5..7db2eeaa 100644 --- a/test/TensorFlowNET.UnitTest/PythonTest.cs +++ b/test/TensorFlowNET.UnitTest/PythonTest.cs @@ -1,334 +1,334 @@ -using System; -using System.Collections; -using System.Linq; -using Microsoft.VisualStudio.TestTools.UnitTesting; -using Newtonsoft.Json.Linq; -using NumSharp; -using Tensorflow; -using static Tensorflow.Binding; - -namespace TensorFlowNET.UnitTest -{ - /// - /// Use as base class for test classes to get additional assertions - /// - public class PythonTest - { - #region python compatibility layer - protected PythonTest self { get => this; } - protected object None - { - get { return null; } - } - #endregion - - #region pytest assertions - - public void assertItemsEqual(ICollection given, ICollection expected) - { - if (given is Hashtable && expected is Hashtable) - { - Assert.AreEqual(JObject.FromObject(expected).ToString(), JObject.FromObject(given).ToString()); - return; - } - Assert.IsNotNull(expected); - Assert.IsNotNull(given); - var e = expected.OfType().ToArray(); - var g = given.OfType().ToArray(); - Assert.AreEqual(e.Length, g.Length, $"The collections differ in length expected {e.Length} but got {g.Length}"); - for (int i = 0; i < e.Length; i++) - { - /*if (g[i] is NDArray && e[i] is NDArray) - assertItemsEqual((g[i] as NDArray).GetData(), (e[i] as NDArray).GetData()); - else*/ if (e[i] is ICollection && g[i] is ICollection) - assertEqual(g[i], e[i]); - else - Assert.AreEqual(e[i], g[i], $"Items differ at index {i}, expected {e[i]} but got {g[i]}"); - } - } - - public void assertAllEqual(ICollection given, ICollection expected) - { - assertItemsEqual(given, expected); - } - - public void assertFloat32Equal(float expected, float actual, string msg) - { - float eps = 1e-6f; - Assert.IsTrue(Math.Abs(expected - actual) < eps * Math.Max(1.0f, Math.Abs(expected)), $"{msg}: expected {expected} vs actual {actual}"); - } - - public void assertFloat64Equal(double expected, double actual, string msg) - { - double eps = 1e-16f; - Assert.IsTrue(Math.Abs(expected - actual) < eps * Math.Max(1.0f, Math.Abs(expected)), $"{msg}: expected {expected} vs actual {actual}"); - } - - public void assertEqual(object given, object expected) - { - /*if (given is NDArray && expected is NDArray) - { - assertItemsEqual((given as NDArray).GetData(), (expected as NDArray).GetData()); - return; - }*/ - if (given is Hashtable && expected is Hashtable) - { - Assert.AreEqual(JObject.FromObject(expected).ToString(), JObject.FromObject(given).ToString()); - return; - } - if (given is ICollection && expected is ICollection) - { - assertItemsEqual(given as ICollection, expected as ICollection); - return; - } - if (given is float && expected is float) - { - assertFloat32Equal((float)expected, (float)given, ""); - return; - } - if (given is double && expected is double) - { - assertFloat64Equal((double)expected, (double)given, ""); - return; - } - Assert.AreEqual(expected, given); - } - - public void assertEquals(object given, object expected) - { - assertEqual(given, expected); - } - - public void assert(object given) - { - if (given is bool) - Assert.IsTrue((bool)given); - Assert.IsNotNull(given); - } - - public void assertIsNotNone(object given) - { - Assert.IsNotNull(given); - } - - public void assertFalse(bool cond) - { - Assert.IsFalse(cond); - } - - public void assertTrue(bool cond) - { - Assert.IsTrue(cond); - } - - public void assertAllClose(NDArray array1, NDArray array2, double eps = 1e-5) - { - Assert.IsTrue(np.allclose(array1, array2, rtol: eps)); - } - - public void assertAllClose(double value, NDArray array2, double eps = 1e-5) - { - var array1 = np.ones_like(array2) * value; - Assert.IsTrue(np.allclose(array1, array2, rtol: eps)); - } - - public void assertProtoEquals(object toProto, object o) - { - throw new NotImplementedException(); - } - - #endregion - - #region tensor evaluation and test session - - //protected object _eval_helper(Tensor[] tensors) - //{ - // if (tensors == null) - // return null; - // return nest.map_structure(self._eval_tensor, tensors); - //} - - protected object _eval_tensor(object tensor) - { - if (tensor == None) - return None; - //else if (callable(tensor)) - // return self._eval_helper(tensor()) - else - { - try - { - //TODO: - // if sparse_tensor.is_sparse(tensor): - // return sparse_tensor.SparseTensorValue(tensor.indices, tensor.values, - // tensor.dense_shape) - //return (tensor as Tensor).numpy(); - } - catch (Exception) - { - throw new ValueError("Unsupported type: " + tensor.GetType()); - } - return null; - } - } - - /// - /// This function is used in many original tensorflow unit tests to evaluate tensors - /// in a test session with special settings (for instance constant folding off) - /// - /// - public T evaluate(Tensor tensor) - { - object result = null; - // if context.executing_eagerly(): - // return self._eval_helper(tensors) - // else: - { - using (var sess = tf.Session()) - { - var ndarray=tensor.eval(sess); - if (typeof(T) == typeof(double)) - { - double x = ndarray; - result=x; - } - else if (typeof(T) == typeof(int)) - { - int x = ndarray; - result = x; - } - else - { - result = ndarray; - } - } - - return (T)result; - } - } - - - public Session cached_session() - { - throw new NotImplementedException(); - } - - //Returns a TensorFlow Session for use in executing tests. - public Session session(Graph graph = null, object config = null, bool use_gpu = false, bool force_gpu = false) - { - //Note that this will set this session and the graph as global defaults. - - //Use the `use_gpu` and `force_gpu` options to control where ops are run.If - //`force_gpu` is True, all ops are pinned to `/device:GPU:0`. Otherwise, if - //`use_gpu` is True, TensorFlow tries to run as many ops on the GPU as - //possible.If both `force_gpu and `use_gpu` are False, all ops are pinned to - //the CPU. - - //Example: - //```python - //class MyOperatorTest(test_util.TensorFlowTestCase): - // def testMyOperator(self): - // with self.session(use_gpu= True): - // valid_input = [1.0, 2.0, 3.0, 4.0, 5.0] - // result = MyOperator(valid_input).eval() - // self.assertEqual(result, [1.0, 2.0, 3.0, 5.0, 8.0] - // invalid_input = [-1.0, 2.0, 7.0] - // with self.assertRaisesOpError("negative input not supported"): - // MyOperator(invalid_input).eval() - //``` - - //Args: - // graph: Optional graph to use during the returned session. - // config: An optional config_pb2.ConfigProto to use to configure the - // session. - // use_gpu: If True, attempt to run as many ops as possible on GPU. - // force_gpu: If True, pin all ops to `/device:GPU:0`. - - //Yields: - // A Session object that should be used as a context manager to surround - // the graph building and execution code in a test case. - - Session s = null; - //if (context.executing_eagerly()) - // yield None - //else - //{ - s = self._create_session(graph, config, force_gpu); - self._constrain_devices_and_set_default(s, use_gpu, force_gpu); - //} - return s.as_default(); - } - - private IObjectLife _constrain_devices_and_set_default(Session sess, bool useGpu, bool forceGpu) - { - //def _constrain_devices_and_set_default(self, sess, use_gpu, force_gpu): - //"""Set the session and its graph to global default and constrain devices.""" - //if context.executing_eagerly(): - // yield None - //else: - // with sess.graph.as_default(), sess.as_default(): - // if force_gpu: - // # Use the name of an actual device if one is detected, or - // # '/device:GPU:0' otherwise - // gpu_name = gpu_device_name() - // if not gpu_name: - // gpu_name = "/device:GPU:0" - // with sess.graph.device(gpu_name): - // yield sess - // elif use_gpu: - // yield sess - // else: - // with sess.graph.device("/device:CPU:0"): - // yield sess - return sess; - } - - // See session() for details. - private Session _create_session(Graph graph, object cfg, bool forceGpu) - { - var prepare_config = new Func((config) => - { - // """Returns a config for sessions. - // Args: - // config: An optional config_pb2.ConfigProto to use to configure the - // session. - // Returns: - // A config_pb2.ConfigProto object. - - //TODO: config - - // # use_gpu=False. Currently many tests rely on the fact that any device - // # will be used even when a specific device is supposed to be used. - // allow_soft_placement = not force_gpu - // if config is None: - // config = config_pb2.ConfigProto() - // config.allow_soft_placement = allow_soft_placement - // config.gpu_options.per_process_gpu_memory_fraction = 0.3 - // elif not allow_soft_placement and config.allow_soft_placement: - // config_copy = config_pb2.ConfigProto() - // config_copy.CopyFrom(config) - // config = config_copy - // config.allow_soft_placement = False - // # Don't perform optimizations for tests so we don't inadvertently run - // # gpu ops on cpu - // config.graph_options.optimizer_options.opt_level = -1 - // # Disable Grappler constant folding since some tests & benchmarks - // # use constant input and become meaningless after constant folding. - // # DO NOT DISABLE GRAPPLER OPTIMIZERS WITHOUT CONSULTING WITH THE - // # GRAPPLER TEAM. - // config.graph_options.rewrite_options.constant_folding = ( - // rewriter_config_pb2.RewriterConfig.OFF) - // config.graph_options.rewrite_options.pin_to_host_optimization = ( - // rewriter_config_pb2.RewriterConfig.OFF) - return config; - }); - //TODO: use this instead of normal session - //return new ErrorLoggingSession(graph = graph, config = prepare_config(config)) - return new Session(graph);//, config = prepare_config(config)) - } - - #endregion - - - } -} +using System; +using System.Collections; +using System.Linq; +using Microsoft.VisualStudio.TestTools.UnitTesting; +using Newtonsoft.Json.Linq; +using NumSharp; +using Tensorflow; +using static Tensorflow.Binding; + +namespace TensorFlowNET.UnitTest +{ + /// + /// Use as base class for test classes to get additional assertions + /// + public class PythonTest + { + #region python compatibility layer + protected PythonTest self { get => this; } + protected object None + { + get { return null; } + } + #endregion + + #region pytest assertions + + public void assertItemsEqual(ICollection given, ICollection expected) + { + if (given is Hashtable && expected is Hashtable) + { + Assert.AreEqual(JObject.FromObject(expected).ToString(), JObject.FromObject(given).ToString()); + return; + } + Assert.IsNotNull(expected); + Assert.IsNotNull(given); + var e = expected.OfType().ToArray(); + var g = given.OfType().ToArray(); + Assert.AreEqual(e.Length, g.Length, $"The collections differ in length expected {e.Length} but got {g.Length}"); + for (int i = 0; i < e.Length; i++) + { + /*if (g[i] is NDArray && e[i] is NDArray) + assertItemsEqual((g[i] as NDArray).GetData(), (e[i] as NDArray).GetData()); + else*/ if (e[i] is ICollection && g[i] is ICollection) + assertEqual(g[i], e[i]); + else + Assert.AreEqual(e[i], g[i], $"Items differ at index {i}, expected {e[i]} but got {g[i]}"); + } + } + + public void assertAllEqual(ICollection given, ICollection expected) + { + assertItemsEqual(given, expected); + } + + public void assertFloat32Equal(float expected, float actual, string msg) + { + float eps = 1e-6f; + Assert.IsTrue(Math.Abs(expected - actual) < eps * Math.Max(1.0f, Math.Abs(expected)), $"{msg}: expected {expected} vs actual {actual}"); + } + + public void assertFloat64Equal(double expected, double actual, string msg) + { + double eps = 1e-16f; + Assert.IsTrue(Math.Abs(expected - actual) < eps * Math.Max(1.0f, Math.Abs(expected)), $"{msg}: expected {expected} vs actual {actual}"); + } + + public void assertEqual(object given, object expected) + { + /*if (given is NDArray && expected is NDArray) + { + assertItemsEqual((given as NDArray).GetData(), (expected as NDArray).GetData()); + return; + }*/ + if (given is Hashtable && expected is Hashtable) + { + Assert.AreEqual(JObject.FromObject(expected).ToString(), JObject.FromObject(given).ToString()); + return; + } + if (given is ICollection && expected is ICollection) + { + assertItemsEqual(given as ICollection, expected as ICollection); + return; + } + if (given is float && expected is float) + { + assertFloat32Equal((float)expected, (float)given, ""); + return; + } + if (given is double && expected is double) + { + assertFloat64Equal((double)expected, (double)given, ""); + return; + } + Assert.AreEqual(expected, given); + } + + public void assertEquals(object given, object expected) + { + assertEqual(given, expected); + } + + public void assert(object given) + { + if (given is bool) + Assert.IsTrue((bool)given); + Assert.IsNotNull(given); + } + + public void assertIsNotNone(object given) + { + Assert.IsNotNull(given); + } + + public void assertFalse(bool cond) + { + Assert.IsFalse(cond); + } + + public void assertTrue(bool cond) + { + Assert.IsTrue(cond); + } + + public void assertAllClose(NDArray array1, NDArray array2, double eps = 1e-5) + { + Assert.IsTrue(np.allclose(array1, array2, rtol: eps)); + } + + public void assertAllClose(double value, NDArray array2, double eps = 1e-5) + { + var array1 = np.ones_like(array2) * value; + Assert.IsTrue(np.allclose(array1, array2, rtol: eps)); + } + + public void assertProtoEquals(object toProto, object o) + { + throw new NotImplementedException(); + } + + #endregion + + #region tensor evaluation and test session + + //protected object _eval_helper(Tensor[] tensors) + //{ + // if (tensors == null) + // return null; + // return nest.map_structure(self._eval_tensor, tensors); + //} + + protected object _eval_tensor(object tensor) + { + if (tensor == None) + return None; + //else if (callable(tensor)) + // return self._eval_helper(tensor()) + else + { + try + { + //TODO: + // if sparse_tensor.is_sparse(tensor): + // return sparse_tensor.SparseTensorValue(tensor.indices, tensor.values, + // tensor.dense_shape) + //return (tensor as Tensor).numpy(); + } + catch (Exception) + { + throw new ValueError("Unsupported type: " + tensor.GetType()); + } + return null; + } + } + + /// + /// This function is used in many original tensorflow unit tests to evaluate tensors + /// in a test session with special settings (for instance constant folding off) + /// + /// + public T evaluate(Tensor tensor) + { + object result = null; + // if context.executing_eagerly(): + // return self._eval_helper(tensors) + // else: + { + using (var sess = tf.Session()) + { + var ndarray=tensor.eval(sess); + if (typeof(T) == typeof(double)) + { + double x = ndarray; + result=x; + } + else if (typeof(T) == typeof(int)) + { + int x = ndarray; + result = x; + } + else + { + result = ndarray; + } + } + + return (T)result; + } + } + + + public Session cached_session() + { + throw new NotImplementedException(); + } + + //Returns a TensorFlow Session for use in executing tests. + public Session session(Graph graph = null, object config = null, bool use_gpu = false, bool force_gpu = false) + { + //Note that this will set this session and the graph as global defaults. + + //Use the `use_gpu` and `force_gpu` options to control where ops are run.If + //`force_gpu` is True, all ops are pinned to `/device:GPU:0`. Otherwise, if + //`use_gpu` is True, TensorFlow tries to run as many ops on the GPU as + //possible.If both `force_gpu and `use_gpu` are False, all ops are pinned to + //the CPU. + + //Example: + //```python + //class MyOperatorTest(test_util.TensorFlowTestCase): + // def testMyOperator(self): + // with self.session(use_gpu= True): + // valid_input = [1.0, 2.0, 3.0, 4.0, 5.0] + // result = MyOperator(valid_input).eval() + // self.assertEqual(result, [1.0, 2.0, 3.0, 5.0, 8.0] + // invalid_input = [-1.0, 2.0, 7.0] + // with self.assertRaisesOpError("negative input not supported"): + // MyOperator(invalid_input).eval() + //``` + + //Args: + // graph: Optional graph to use during the returned session. + // config: An optional config_pb2.ConfigProto to use to configure the + // session. + // use_gpu: If True, attempt to run as many ops as possible on GPU. + // force_gpu: If True, pin all ops to `/device:GPU:0`. + + //Yields: + // A Session object that should be used as a context manager to surround + // the graph building and execution code in a test case. + + Session s = null; + //if (context.executing_eagerly()) + // yield None + //else + //{ + s = self._create_session(graph, config, force_gpu); + self._constrain_devices_and_set_default(s, use_gpu, force_gpu); + //} + return s.as_default(); + } + + private IObjectLife _constrain_devices_and_set_default(Session sess, bool useGpu, bool forceGpu) + { + //def _constrain_devices_and_set_default(self, sess, use_gpu, force_gpu): + //"""Set the session and its graph to global default and constrain devices.""" + //if context.executing_eagerly(): + // yield None + //else: + // with sess.graph.as_default(), sess.as_default(): + // if force_gpu: + // # Use the name of an actual device if one is detected, or + // # '/device:GPU:0' otherwise + // gpu_name = gpu_device_name() + // if not gpu_name: + // gpu_name = "/device:GPU:0" + // with sess.graph.device(gpu_name): + // yield sess + // elif use_gpu: + // yield sess + // else: + // with sess.graph.device("/device:CPU:0"): + // yield sess + return sess; + } + + // See session() for details. + private Session _create_session(Graph graph, object cfg, bool forceGpu) + { + var prepare_config = new Func((config) => + { + // """Returns a config for sessions. + // Args: + // config: An optional config_pb2.ConfigProto to use to configure the + // session. + // Returns: + // A config_pb2.ConfigProto object. + + //TODO: config + + // # use_gpu=False. Currently many tests rely on the fact that any device + // # will be used even when a specific device is supposed to be used. + // allow_soft_placement = not force_gpu + // if config is None: + // config = config_pb2.ConfigProto() + // config.allow_soft_placement = allow_soft_placement + // config.gpu_options.per_process_gpu_memory_fraction = 0.3 + // elif not allow_soft_placement and config.allow_soft_placement: + // config_copy = config_pb2.ConfigProto() + // config_copy.CopyFrom(config) + // config = config_copy + // config.allow_soft_placement = False + // # Don't perform optimizations for tests so we don't inadvertently run + // # gpu ops on cpu + // config.graph_options.optimizer_options.opt_level = -1 + // # Disable Grappler constant folding since some tests & benchmarks + // # use constant input and become meaningless after constant folding. + // # DO NOT DISABLE GRAPPLER OPTIMIZERS WITHOUT CONSULTING WITH THE + // # GRAPPLER TEAM. + // config.graph_options.rewrite_options.constant_folding = ( + // rewriter_config_pb2.RewriterConfig.OFF) + // config.graph_options.rewrite_options.pin_to_host_optimization = ( + // rewriter_config_pb2.RewriterConfig.OFF) + return config; + }); + //TODO: use this instead of normal session + //return new ErrorLoggingSession(graph = graph, config = prepare_config(config)) + return new Session(graph);//, config = prepare_config(config)) + } + + #endregion + + + } +} diff --git a/test/TensorFlowNET.UnitTest/control_flow_ops_test/CondTestCases.cs b/test/TensorFlowNET.UnitTest/control_flow_ops_test/CondTestCases.cs index ea7c48f2..2017e87d 100644 --- a/test/TensorFlowNET.UnitTest/control_flow_ops_test/CondTestCases.cs +++ b/test/TensorFlowNET.UnitTest/control_flow_ops_test/CondTestCases.cs @@ -1,86 +1,86 @@ -using Microsoft.VisualStudio.TestTools.UnitTesting; -using Tensorflow; -using static Tensorflow.Binding; - -namespace TensorFlowNET.UnitTest.control_flow_ops_test -{ - /// - /// excerpt of tensorflow/python/framework/ops/control_flow_ops_test.py - /// - [TestClass] - public class CondTestCases : PythonTest - { - [TestMethod] - public void testCondTrue_ConstOnly() - { - var graph = tf.Graph().as_default(); - - using (var sess = tf.Session(graph)) - { - var x = tf.constant(2, name: "x"); - var y = tf.constant(5, name: "y"); - - var z = control_flow_ops.cond(tf.less(x, y), - () => tf.constant(22, name: "t22"), - () => tf.constant(55, name: "f55")); - - int result = z.eval(sess); - assertEquals(result, 22); - } - } - - [TestMethod] - public void testCondFalse_ConstOnly() - { - var graph = tf.Graph().as_default(); - - using (var sess = tf.Session(graph)) - { - var x = tf.constant(2, name: "x"); - var y = tf.constant(1, name: "y"); - - var z = control_flow_ops.cond(tf.less(x, y), - () => tf.constant(22, name: "t22"), - () => tf.constant(11, name: "f11")); - - int result = z.eval(sess); - assertEquals(result, 11); - } - } - - [TestMethod] - public void testCondTrue() - { - tf.Graph().as_default(); - - var x = tf.constant(2, name: "x"); - var y = tf.constant(5, name: "y"); - - var z = control_flow_ops.cond(tf.less(x, y), - () => tf.multiply(x, 17), - () => tf.add(y, 23)); - - var result = evaluate(z); - assertEquals(result, 34); - } - - [TestMethod] - public void testCondFalse() - { - tf.Graph().as_default(); - - var x = tf.constant(2); - var y = tf.constant(1); - - var z = control_flow_ops.cond(tf.less(x, y), - () => tf.multiply(x, 17), - () => tf.add(y, 23)); - - var result = evaluate(z); - assertEquals(result, 24); - } - - // NOTE: all other python test cases of this class are either not needed due to strong typing or test a deprecated api - - } -} +using Microsoft.VisualStudio.TestTools.UnitTesting; +using Tensorflow; +using static Tensorflow.Binding; + +namespace TensorFlowNET.UnitTest.control_flow_ops_test +{ + /// + /// excerpt of tensorflow/python/framework/ops/control_flow_ops_test.py + /// + [TestClass] + public class CondTestCases : PythonTest + { + [TestMethod] + public void testCondTrue_ConstOnly() + { + var graph = tf.Graph().as_default(); + + using (var sess = tf.Session(graph)) + { + var x = tf.constant(2, name: "x"); + var y = tf.constant(5, name: "y"); + + var z = control_flow_ops.cond(tf.less(x, y), + () => tf.constant(22, name: "t22"), + () => tf.constant(55, name: "f55")); + + int result = z.eval(sess); + assertEquals(result, 22); + } + } + + [TestMethod] + public void testCondFalse_ConstOnly() + { + var graph = tf.Graph().as_default(); + + using (var sess = tf.Session(graph)) + { + var x = tf.constant(2, name: "x"); + var y = tf.constant(1, name: "y"); + + var z = control_flow_ops.cond(tf.less(x, y), + () => tf.constant(22, name: "t22"), + () => tf.constant(11, name: "f11")); + + int result = z.eval(sess); + assertEquals(result, 11); + } + } + + [TestMethod] + public void testCondTrue() + { + tf.Graph().as_default(); + + var x = tf.constant(2, name: "x"); + var y = tf.constant(5, name: "y"); + + var z = control_flow_ops.cond(tf.less(x, y), + () => tf.multiply(x, 17), + () => tf.add(y, 23)); + + var result = evaluate(z); + assertEquals(result, 34); + } + + [TestMethod] + public void testCondFalse() + { + tf.Graph().as_default(); + + var x = tf.constant(2); + var y = tf.constant(1); + + var z = control_flow_ops.cond(tf.less(x, y), + () => tf.multiply(x, 17), + () => tf.add(y, 23)); + + var result = evaluate(z); + assertEquals(result, 24); + } + + // NOTE: all other python test cases of this class are either not needed due to strong typing or test a deprecated api + + } +} diff --git a/test/TensorFlowNET.UnitTest/control_flow_ops_test/ShapeTestCase.cs b/test/TensorFlowNET.UnitTest/control_flow_ops_test/ShapeTestCase.cs index 59bdd87c..a7e7b0bd 100644 --- a/test/TensorFlowNET.UnitTest/control_flow_ops_test/ShapeTestCase.cs +++ b/test/TensorFlowNET.UnitTest/control_flow_ops_test/ShapeTestCase.cs @@ -1,23 +1,23 @@ -using Microsoft.VisualStudio.TestTools.UnitTesting; -using Tensorflow; - -namespace TensorFlowNET.UnitTest.control_flow_ops_test -{ - /// - /// excerpt of tensorflow/python/framework/ops/control_flow_ops_test.py - /// - [TestClass] - public class ShapeTestCase : PythonTest - { - - [TestMethod] - public void testShape() - { - var tensor = constant_op.constant(new[]{1.0, 2.0}); - self.assertEquals(new int[] {2}, tensor.shape); - self.assertEquals(new int[] {2}, - control_flow_ops.with_dependencies(new[] {constant_op.constant(1.0).op}, tensor).shape); - } - - } -} +using Microsoft.VisualStudio.TestTools.UnitTesting; +using Tensorflow; + +namespace TensorFlowNET.UnitTest.control_flow_ops_test +{ + /// + /// excerpt of tensorflow/python/framework/ops/control_flow_ops_test.py + /// + [TestClass] + public class ShapeTestCase : PythonTest + { + + [TestMethod] + public void testShape() + { + var tensor = constant_op.constant(new[]{1.0, 2.0}); + self.assertEquals(new int[] {2}, tensor.shape); + self.assertEquals(new int[] {2}, + control_flow_ops.with_dependencies(new[] {constant_op.constant(1.0).op}, tensor).shape); + } + + } +} diff --git a/test/TensorFlowNET.UnitTest/control_flow_ops_test/SwitchTestCase.cs b/test/TensorFlowNET.UnitTest/control_flow_ops_test/SwitchTestCase.cs index 5d5c1b6e..74780fdb 100644 --- a/test/TensorFlowNET.UnitTest/control_flow_ops_test/SwitchTestCase.cs +++ b/test/TensorFlowNET.UnitTest/control_flow_ops_test/SwitchTestCase.cs @@ -1,173 +1,173 @@ -using Microsoft.VisualStudio.TestTools.UnitTesting; -using Tensorflow; - -namespace TensorFlowNET.UnitTest.control_flow_ops_test -{ - /// - /// excerpt of tensorflow/python/framework/ops/control_flow_ops_test.py - /// - [TestClass] - public class SwitchTestCase : PythonTest - { - - [Ignore("TODO")] - [TestMethod] - public void testResourceReadInLoop() - { - - //var embedding_matrix = variable_scope.get_variable( - //"embedding_matrix", initializer: new double[,] { { 2.0 }, { 3.0 } }, use_resource: true); - - /* - Tensor cond(Tensor it, Tensor _) - { - return it < 5; - } - */ - - // TODO: below code doesn't compile - //(Tensor, Tensor) body(Tensor it, Tensor cost) - //{ - // var embedding = embedding_ops.embedding_lookup(embedding_matrix, new int[]{0}); - // cost += math_ops.reduce_sum(embedding); - // return (it + 1, cost); - //} - //var (_, cost1) = control_flow_ops.while_loop( - // cond, body, new[] - // { - // constant_op.constant(0), - // constant_op.constant(0.0) - // }); - //with(this.cached_session(), sess => - //{ - // self.evaluate(variables.global_variables_initializer()); - // self.assertAllEqual(10.0, self.evaluate(cost1)); - //}); - } - - - [Ignore("TODO")] - [TestMethod] - public void testIndexedSlicesGradientInCondInWhileLoop() - { - doTestIndexedSlicesGradientInCondInWhileLoop(use_resource: false); - } - - [Ignore("TODO")] - [TestMethod] - public void testIndexedSlicesGradientInCondInWhileLoopResource() - { - doTestIndexedSlicesGradientInCondInWhileLoop(use_resource: true); - } - - private void doTestIndexedSlicesGradientInCondInWhileLoop(bool use_resource = false) - { - //def doTestIndexedSlicesGradientInCondInWhileLoop(self, use_resource=False): - // embedding_matrix = variable_scope.get_variable( - // "embedding_matrix", [5, 5], - // initializer=init_ops.random_normal_initializer(), - // use_resource=use_resource) - - // def cond(it, _): - // return it < 5 - - // def body(it, cost): - // embedding = embedding_ops.embedding_lookup(embedding_matrix, [0]) - // cost = control_flow_ops.cond( - // math_ops.equal(it, 3), lambda: math_ops.square(cost), - // (lambda: cost + math_ops.reduce_sum(embedding))) - // return it + 1, cost - - // _, cost = control_flow_ops.while_loop( - // cond, body, [constant_op.constant(0), - // constant_op.constant(0.0)]) - - // dynamic_grads = gradients_impl.gradients(cost, [embedding_matrix])[0] - // dynamic_grads = math_ops.segment_sum(dynamic_grads.values, - // dynamic_grads.indices) - - // embedding = embedding_ops.embedding_lookup(embedding_matrix, [0]) - // static = math_ops.square( - // math_ops.reduce_sum(embedding) + math_ops.reduce_sum(embedding) + - // math_ops.reduce_sum(embedding)) + math_ops.reduce_sum(embedding) - // static_grads = gradients_impl.gradients(static, [embedding_matrix])[0] - // static_grads = math_ops.segment_sum(static_grads.values, - // static_grads.indices) - - // with self.cached_session(): - // self.evaluate(variables.global_variables_initializer()) - // self.assertAllEqual(*self.evaluate([static_grads, dynamic_grads])) - } - - [Ignore("TODO")] - [TestMethod] - public void testIndexedSlicesWithShapeGradientInWhileLoop() - { - //@test_util.run_v1_only("b/120545219") - //def testIndexedSlicesWithShapeGradientInWhileLoop(self): - // for dtype in [dtypes.float32, dtypes.float64]: - // with self.cached_session() as sess: - // num_steps = 9 - - // inputs = array_ops.placeholder(dtype=dtype, shape=[num_steps]) - // initial_outputs = tensor_array_ops.TensorArray( - // dtype=dtype, size=num_steps) - // initial_i = constant_op.constant(0, dtype=dtypes.int32) - - // def cond(i, _): - // return i < num_steps # pylint: disable=cell-var-from-loop - - // def body(i, outputs): - // x = array_ops.gather(inputs, i) # pylint: disable=cell-var-from-loop - // outputs = outputs.write(i, x) - // return i + 1, outputs - - // _, outputs = control_flow_ops.while_loop(cond, body, - // [initial_i, initial_outputs]) - - // outputs = math_ops.reduce_sum(outputs.stack()) - // r = gradients_impl.gradients([outputs], [inputs])[0] - // grad_wr_inputs = ops.convert_to_tensor(r) - // o, grad = sess.run([outputs, grad_wr_inputs], - // feed_dict={inputs: [4, 6, 0, 7, 0, 0, 1, 2, 0]}) - // self.assertEquals(o, 20) - // self.assertAllEqual(grad, [1] * num_steps) - - } - - [Ignore("TODO")] - [TestMethod] - public void testIndexedSlicesWithDynamicShapeGradientInWhileLoop() - { - //@test_util.run_v1_only("b/120545219") - //def testIndexedSlicesWithDynamicShapeGradientInWhileLoop(self): - // for dtype in [dtypes.float32, dtypes.float64]: - // with self.cached_session() as sess: - // inputs = array_ops.placeholder(dtype=dtype) - // initial_outputs = tensor_array_ops.TensorArray( - // dtype=dtype, dynamic_size=True, size=1) - // initial_i = constant_op.constant(0, dtype=dtypes.int32) - - // def cond(i, _): - // return i < array_ops.size(inputs) # pylint: disable=cell-var-from-loop - - // def body(i, outputs): - // x = array_ops.gather(inputs, i) # pylint: disable=cell-var-from-loop - // outputs = outputs.write(i, x) - // return i + 1, outputs - - // _, outputs = control_flow_ops.while_loop(cond, body, - // [initial_i, initial_outputs]) - - // outputs = math_ops.reduce_sum(outputs.stack()) - // r = gradients_impl.gradients([outputs], [inputs])[0] - // grad_wr_inputs = ops.convert_to_tensor(r) - // o, grad = sess.run([outputs, grad_wr_inputs], - // feed_dict={inputs: [1, 3, 2]}) - // self.assertEquals(o, 6) - // self.assertAllEqual(grad, [1] * 3) - - } - - } -} +using Microsoft.VisualStudio.TestTools.UnitTesting; +using Tensorflow; + +namespace TensorFlowNET.UnitTest.control_flow_ops_test +{ + /// + /// excerpt of tensorflow/python/framework/ops/control_flow_ops_test.py + /// + [TestClass] + public class SwitchTestCase : PythonTest + { + + [Ignore("TODO")] + [TestMethod] + public void testResourceReadInLoop() + { + + //var embedding_matrix = variable_scope.get_variable( + //"embedding_matrix", initializer: new double[,] { { 2.0 }, { 3.0 } }, use_resource: true); + + /* + Tensor cond(Tensor it, Tensor _) + { + return it < 5; + } + */ + + // TODO: below code doesn't compile + //(Tensor, Tensor) body(Tensor it, Tensor cost) + //{ + // var embedding = embedding_ops.embedding_lookup(embedding_matrix, new int[]{0}); + // cost += math_ops.reduce_sum(embedding); + // return (it + 1, cost); + //} + //var (_, cost1) = control_flow_ops.while_loop( + // cond, body, new[] + // { + // constant_op.constant(0), + // constant_op.constant(0.0) + // }); + //with(this.cached_session(), sess => + //{ + // self.evaluate(variables.global_variables_initializer()); + // self.assertAllEqual(10.0, self.evaluate(cost1)); + //}); + } + + + [Ignore("TODO")] + [TestMethod] + public void testIndexedSlicesGradientInCondInWhileLoop() + { + doTestIndexedSlicesGradientInCondInWhileLoop(use_resource: false); + } + + [Ignore("TODO")] + [TestMethod] + public void testIndexedSlicesGradientInCondInWhileLoopResource() + { + doTestIndexedSlicesGradientInCondInWhileLoop(use_resource: true); + } + + private void doTestIndexedSlicesGradientInCondInWhileLoop(bool use_resource = false) + { + //def doTestIndexedSlicesGradientInCondInWhileLoop(self, use_resource=False): + // embedding_matrix = variable_scope.get_variable( + // "embedding_matrix", [5, 5], + // initializer=init_ops.random_normal_initializer(), + // use_resource=use_resource) + + // def cond(it, _): + // return it < 5 + + // def body(it, cost): + // embedding = embedding_ops.embedding_lookup(embedding_matrix, [0]) + // cost = control_flow_ops.cond( + // math_ops.equal(it, 3), lambda: math_ops.square(cost), + // (lambda: cost + math_ops.reduce_sum(embedding))) + // return it + 1, cost + + // _, cost = control_flow_ops.while_loop( + // cond, body, [constant_op.constant(0), + // constant_op.constant(0.0)]) + + // dynamic_grads = gradients_impl.gradients(cost, [embedding_matrix])[0] + // dynamic_grads = math_ops.segment_sum(dynamic_grads.values, + // dynamic_grads.indices) + + // embedding = embedding_ops.embedding_lookup(embedding_matrix, [0]) + // static = math_ops.square( + // math_ops.reduce_sum(embedding) + math_ops.reduce_sum(embedding) + + // math_ops.reduce_sum(embedding)) + math_ops.reduce_sum(embedding) + // static_grads = gradients_impl.gradients(static, [embedding_matrix])[0] + // static_grads = math_ops.segment_sum(static_grads.values, + // static_grads.indices) + + // with self.cached_session(): + // self.evaluate(variables.global_variables_initializer()) + // self.assertAllEqual(*self.evaluate([static_grads, dynamic_grads])) + } + + [Ignore("TODO")] + [TestMethod] + public void testIndexedSlicesWithShapeGradientInWhileLoop() + { + //@test_util.run_v1_only("b/120545219") + //def testIndexedSlicesWithShapeGradientInWhileLoop(self): + // for dtype in [dtypes.float32, dtypes.float64]: + // with self.cached_session() as sess: + // num_steps = 9 + + // inputs = array_ops.placeholder(dtype=dtype, shape=[num_steps]) + // initial_outputs = tensor_array_ops.TensorArray( + // dtype=dtype, size=num_steps) + // initial_i = constant_op.constant(0, dtype=dtypes.int32) + + // def cond(i, _): + // return i < num_steps # pylint: disable=cell-var-from-loop + + // def body(i, outputs): + // x = array_ops.gather(inputs, i) # pylint: disable=cell-var-from-loop + // outputs = outputs.write(i, x) + // return i + 1, outputs + + // _, outputs = control_flow_ops.while_loop(cond, body, + // [initial_i, initial_outputs]) + + // outputs = math_ops.reduce_sum(outputs.stack()) + // r = gradients_impl.gradients([outputs], [inputs])[0] + // grad_wr_inputs = ops.convert_to_tensor(r) + // o, grad = sess.run([outputs, grad_wr_inputs], + // feed_dict={inputs: [4, 6, 0, 7, 0, 0, 1, 2, 0]}) + // self.assertEquals(o, 20) + // self.assertAllEqual(grad, [1] * num_steps) + + } + + [Ignore("TODO")] + [TestMethod] + public void testIndexedSlicesWithDynamicShapeGradientInWhileLoop() + { + //@test_util.run_v1_only("b/120545219") + //def testIndexedSlicesWithDynamicShapeGradientInWhileLoop(self): + // for dtype in [dtypes.float32, dtypes.float64]: + // with self.cached_session() as sess: + // inputs = array_ops.placeholder(dtype=dtype) + // initial_outputs = tensor_array_ops.TensorArray( + // dtype=dtype, dynamic_size=True, size=1) + // initial_i = constant_op.constant(0, dtype=dtypes.int32) + + // def cond(i, _): + // return i < array_ops.size(inputs) # pylint: disable=cell-var-from-loop + + // def body(i, outputs): + // x = array_ops.gather(inputs, i) # pylint: disable=cell-var-from-loop + // outputs = outputs.write(i, x) + // return i + 1, outputs + + // _, outputs = control_flow_ops.while_loop(cond, body, + // [initial_i, initial_outputs]) + + // outputs = math_ops.reduce_sum(outputs.stack()) + // r = gradients_impl.gradients([outputs], [inputs])[0] + // grad_wr_inputs = ops.convert_to_tensor(r) + // o, grad = sess.run([outputs, grad_wr_inputs], + // feed_dict={inputs: [1, 3, 2]}) + // self.assertEquals(o, 6) + // self.assertAllEqual(grad, [1] * 3) + + } + + } +} diff --git a/test/TensorFlowNET.UnitTest/control_flow_ops_test/WhileContextTestCase.cs b/test/TensorFlowNET.UnitTest/control_flow_ops_test/WhileContextTestCase.cs index 4ffc5342..9527e689 100644 --- a/test/TensorFlowNET.UnitTest/control_flow_ops_test/WhileContextTestCase.cs +++ b/test/TensorFlowNET.UnitTest/control_flow_ops_test/WhileContextTestCase.cs @@ -1,52 +1,52 @@ -using System; -using Microsoft.VisualStudio.TestTools.UnitTesting; -using Tensorflow; -using static Tensorflow.Binding; - -namespace TensorFlowNET.UnitTest.control_flow_ops_test -{ - [TestClass] - public class WhileContextTestCase : PythonTest - { - /// - /// https://www.tensorflow.org/api_docs/python/tf/while_loop - /// - [Ignore] - [TestMethod] - public void SimpleWhileLoop() - { - var i = constant_op.constant(0, name: "i"); - var c = new Func(x => tf.less(x, 10, name: "c")); - var b = new Func(x => tf.add(x, 1, name: "c")); - //var r = control_flow_ops.while_loop(c, b, i); - } - - private void _testWhileContextHelper(int maximum_iterations) - { - // TODO: implement missing code dependencies - using (var sess = this.cached_session()) - { - var i = constant_op.constant(0, name: "i"); - var c = new Func(x => gen_math_ops.less(x, 10, name: "c")); - var b = new Func(x => gen_math_ops.add(x, 1, name: "c")); - //control_flow_ops.while_loop( - // c, b, i , maximum_iterations: tf.constant(maximum_iterations)); - foreach (Operation op in sess.graph.get_operations()) - { - var control_flow_context = op._get_control_flow_context(); - /*if (control_flow_context != null) - self.assertProtoEquals(control_flow_context.to_proto(), - WhileContext.from_proto( - control_flow_context.to_proto()).to_proto(), "");*/ - } - } - } - - [Ignore("TODO")] - [TestMethod] - public void testWhileContextWithMaximumIterations() - { - _testWhileContextHelper(maximum_iterations: 10); - } - } -} +using System; +using Microsoft.VisualStudio.TestTools.UnitTesting; +using Tensorflow; +using static Tensorflow.Binding; + +namespace TensorFlowNET.UnitTest.control_flow_ops_test +{ + [TestClass] + public class WhileContextTestCase : PythonTest + { + /// + /// https://www.tensorflow.org/api_docs/python/tf/while_loop + /// + [Ignore] + [TestMethod] + public void SimpleWhileLoop() + { + var i = constant_op.constant(0, name: "i"); + var c = new Func(x => tf.less(x, 10, name: "c")); + var b = new Func(x => tf.add(x, 1, name: "c")); + //var r = control_flow_ops.while_loop(c, b, i); + } + + private void _testWhileContextHelper(int maximum_iterations) + { + // TODO: implement missing code dependencies + using (var sess = this.cached_session()) + { + var i = constant_op.constant(0, name: "i"); + var c = new Func(x => gen_math_ops.less(x, 10, name: "c")); + var b = new Func(x => gen_math_ops.add(x, 1, name: "c")); + //control_flow_ops.while_loop( + // c, b, i , maximum_iterations: tf.constant(maximum_iterations)); + foreach (Operation op in sess.graph.get_operations()) + { + var control_flow_context = op._get_control_flow_context(); + /*if (control_flow_context != null) + self.assertProtoEquals(control_flow_context.to_proto(), + WhileContext.from_proto( + control_flow_context.to_proto()).to_proto(), "");*/ + } + } + } + + [Ignore("TODO")] + [TestMethod] + public void testWhileContextWithMaximumIterations() + { + _testWhileContextHelper(maximum_iterations: 10); + } + } +} diff --git a/test/TensorFlowNET.UnitTest/nest_test/NestTest.cs b/test/TensorFlowNET.UnitTest/nest_test/NestTest.cs index 4e2e5871..5d14920d 100644 --- a/test/TensorFlowNET.UnitTest/nest_test/NestTest.cs +++ b/test/TensorFlowNET.UnitTest/nest_test/NestTest.cs @@ -1,873 +1,873 @@ -using System; -using System.Collections; -using System.Collections.Generic; -using Microsoft.VisualStudio.TestTools.UnitTesting; -using Newtonsoft.Json.Linq; -using NumSharp; -using Tensorflow; -using Tensorflow.Util; -using static Tensorflow.Binding; - -namespace TensorFlowNET.UnitTest.nest_test -{ - /// - /// excerpt of tensorflow/python/framework/util/nest_test.py - /// - [TestClass] - public class NestTest : PythonTest - { - [TestInitialize] - public void TestInitialize() - { - tf.Graph().as_default(); - } - - //public class PointXY - //{ - // public double x; - // public double y; - //} - - // if attr: - // class BadAttr(object): - // """Class that has a non-iterable __attrs_attrs__.""" - // __attrs_attrs__ = None - - // @attr.s - // class SampleAttr(object): - // field1 = attr.ib() - // field2 = attr.ib() - - // @test_util.assert_no_new_pyobjects_executing_eagerly - // def testAttrsFlattenAndPack(self) : - // if attr is None: - // self.skipTest("attr module is unavailable.") - - // field_values = [1, 2] - // sample_attr = NestTest.SampleAttr(* field_values) - // self.assertFalse(nest._is_attrs(field_values)) - // self.assertTrue(nest._is_attrs(sample_attr)) - // flat = nest.flatten(sample_attr) - // self.assertEqual(field_values, flat) - // restructured_from_flat = nest.pack_sequence_as(sample_attr, flat) - // self.assertIsInstance(restructured_from_flat, NestTest.SampleAttr) - // self.assertEqual(restructured_from_flat, sample_attr) - - //# Check that flatten fails if attributes are not iterable - // with self.assertRaisesRegexp(TypeError, "object is not iterable"): - // flat = nest.flatten(NestTest.BadAttr()) - [Ignore] - [TestMethod] - public void testFlattenAndPack() - { - object structure = new object[] { new object[] { 3, 4 }, 5, new object[] { 6, 7, new object[] { 9, 10 }, 8 } }; - var flat = new List { "a", "b", "c", "d", "e", "f", "g", "h" }; - - self.assertEqual(nest.flatten(structure), new[] { 3, 4, 5, 6, 7, 9, 10, 8 }); - self.assertEqual(JArray.FromObject(nest.pack_sequence_as(structure, flat)).ToString(), - JArray.FromObject(new object[] { new object[] { "a", "b" }, "c", new object[] { "d", "e", new object[] { "f", "g" }, "h" } }).ToString()); - structure = new object[] { new Hashtable { ["x"] = 4, ["y"] = 2 }, new object[] { new object[] { new Hashtable { ["x"] = 1, ["y"] = 0 }, }, } }; - flat = new List { 4, 2, 1, 0 }; - self.assertEqual(nest.flatten(structure), flat); - var restructured_from_flat = nest.pack_sequence_as(structure, flat) as object[]; - //Console.WriteLine(JArray.FromObject(restructured_from_flat)); - self.assertEqual(restructured_from_flat, structure); - self.assertEqual((restructured_from_flat[0] as Hashtable)["x"], 4); - self.assertEqual((restructured_from_flat[0] as Hashtable)["y"], 2); - self.assertEqual((((restructured_from_flat[1] as object[])[0] as object[])[0] as Hashtable)["x"], 1); - self.assertEqual((((restructured_from_flat[1] as object[])[0] as object[])[0] as Hashtable)["y"], 0); - - self.assertEqual(new List { 5 }, nest.flatten(5)); - var flat1 = nest.flatten(np.array(new[] { 5 })); - self.assertEqual(new object[] { np.array(new int[] { 5 }) }, flat1); - - self.assertEqual("a", nest.pack_sequence_as(5, new List { "a" })); - self.assertEqual(np.array(new[] { 5 }), - nest.pack_sequence_as("scalar", new List { np.array(new[] { 5 }) })); - - Assert.ThrowsException(() => nest.pack_sequence_as("scalar", new List() { 4, 5 })); - - Assert.ThrowsException(() => - nest.pack_sequence_as(new object[] { 5, 6, new object[] { 7, 8 } }, new List { "a", "b", "c" })); - } - - // @parameterized.parameters({"mapping_type": collections.OrderedDict - // }, - // {"mapping_type": _CustomMapping - //}) - // @test_util.assert_no_new_pyobjects_executing_eagerly - // def testFlattenDictOrder(self, mapping_type) : - // """`flatten` orders dicts by key, including OrderedDicts.""" - // ordered = mapping_type([("d", 3), ("b", 1), ("a", 0), ("c", 2)]) - // plain = {"d": 3, "b": 1, "a": 0, "c": 2} - // ordered_flat = nest.flatten(ordered) - // plain_flat = nest.flatten(plain) - // self.assertEqual([0, 1, 2, 3], ordered_flat) - // self.assertEqual([0, 1, 2, 3], plain_flat) - - // @parameterized.parameters({"mapping_type": collections.OrderedDict}, - // {"mapping_type": _CustomMapping}) - // def testPackDictOrder(self, mapping_type): - // """Packing orders dicts by key, including OrderedDicts.""" - // custom = mapping_type([("d", 0), ("b", 0), ("a", 0), ("c", 0)]) - // plain = {"d": 0, "b": 0, "a": 0, "c": 0} - // seq = [0, 1, 2, 3] - //custom_reconstruction = nest.pack_sequence_as(custom, seq) - //plain_reconstruction = nest.pack_sequence_as(plain, seq) - // self.assertIsInstance(custom_reconstruction, mapping_type) - // self.assertIsInstance(plain_reconstruction, dict) - // self.assertEqual( - // mapping_type([("d", 3), ("b", 1), ("a", 0), ("c", 2)]), - // custom_reconstruction) - // self.assertEqual({"d": 3, "b": 1, "a": 0, "c": 2}, plain_reconstruction) - - // Abc = collections.namedtuple("A", ("b", "c")) # pylint: disable=invalid-name - - // @test_util.assert_no_new_pyobjects_executing_eagerly - // def testFlattenAndPack_withDicts(self) : - // # A nice messy mix of tuples, lists, dicts, and `OrderedDict`s. - // mess = [ - // "z", - // NestTest.Abc(3, 4), { - // "d": _CustomMapping({ - // 41: 4 - // }), - // "c": [ - // 1, - // collections.OrderedDict([ - // ("b", 3), - // ("a", 2), - // ]), - // ], - // "b": 5 - // }, 17 - // ] - - // flattened = nest.flatten(mess) - // self.assertEqual(flattened, ["z", 3, 4, 5, 1, 2, 3, 4, 17]) - - // structure_of_mess = [ - // 14, - // NestTest.Abc("a", True), - // { - // "d": _CustomMapping({ - // 41: 42 - // }), - // "c": [ - // 0, - // collections.OrderedDict([ - // ("b", 9), - // ("a", 8), - // ]), - // ], - // "b": 3 - // }, - // "hi everybody", - // ] - - // unflattened = nest.pack_sequence_as(structure_of_mess, flattened) - // self.assertEqual(unflattened, mess) - - // # Check also that the OrderedDict was created, with the correct key order. - //unflattened_ordered_dict = unflattened[2]["c"][1] - // self.assertIsInstance(unflattened_ordered_dict, collections.OrderedDict) - // self.assertEqual(list(unflattened_ordered_dict.keys()), ["b", "a"]) - - // unflattened_custom_mapping = unflattened[2]["d"] - // self.assertIsInstance(unflattened_custom_mapping, _CustomMapping) - // self.assertEqual(list(unflattened_custom_mapping.keys()), [41]) - - [TestMethod] - public void testFlatten_numpyIsNotFlattened() - { - var structure = np.array(1, 2, 3); - var flattened = nest.flatten(structure); - self.assertEqual(len(flattened), 1); - } - - [TestMethod] - public void testFlatten_stringIsNotFlattened() - { - var structure = "lots of letters"; - var flattened = nest.flatten(structure); - self.assertEqual(len(flattened), 1); - var unflattened = nest.pack_sequence_as("goodbye", flattened); - self.assertEqual(structure, unflattened); - } - - // def testPackSequenceAs_notIterableError(self) : - // with self.assertRaisesRegexp(TypeError, - // "flat_sequence must be a sequence"): - // nest.pack_sequence_as("hi", "bye") - - [TestMethod] - public void testPackSequenceAs_wrongLengthsError() - { - Assert.ThrowsException(() => - { - // with self.assertRaisesRegexp( - // ValueError, - // "Structure had 2 elements, but flat_sequence had 3 elements."): - nest.pack_sequence_as(new object[] { "hello", "world" }, new object[] { "and", "goodbye", "again" }); - }); - } - - [TestMethod] - public void testIsSequence() - { - self.assertFalse(nest.is_sequence("1234")); - self.assertTrue(nest.is_sequence(new object[] { 1, 3, new object[] { 4, 5 } })); - // TODO: ValueTuple - //self.assertTrue(nest.is_sequence(((7, 8), (5, 6)))); - self.assertTrue(nest.is_sequence(new object[] { })); - self.assertTrue(nest.is_sequence(new Hashtable { ["a"] = 1, ["b"] = 2 })); - self.assertFalse(nest.is_sequence(new HashSet { 1, 2 })); - var ones = array_ops.ones(new int[] { 2, 3 }); - self.assertFalse(nest.is_sequence(ones)); - self.assertFalse(nest.is_sequence(gen_math_ops.tanh(ones))); - self.assertFalse(nest.is_sequence(np.ones(new int[] { 4, 5 }))); - } - - // @parameterized.parameters({"mapping_type": _CustomMapping}, - // {"mapping_type": dict}) - // def testFlattenDictItems(self, mapping_type): - // dictionary = mapping_type({ (4, 5, (6, 8)): ("a", "b", ("c", "d"))}) - // flat = {4: "a", 5: "b", 6: "c", 8: "d"} - // self.assertEqual(nest.flatten_dict_items(dictionary), flat) - - // with self.assertRaises(TypeError): - // nest.flatten_dict_items(4) - - // bad_dictionary = mapping_type({ (4, 5, (4, 8)): ("a", "b", ("c", "d"))}) - // with self.assertRaisesRegexp(ValueError, "not unique"): - // nest.flatten_dict_items(bad_dictionary) - - // another_bad_dictionary = mapping_type({ - // (4, 5, (6, 8)): ("a", "b", ("c", ("d", "e"))) - // }) - // with self.assertRaisesRegexp( - // ValueError, "Key had [0-9]* elements, but value had [0-9]* elements"): - // nest.flatten_dict_items(another_bad_dictionary) - - //# pylint does not correctly recognize these as class names and - //# suggests to use variable style under_score naming. - //# pylint: disable=invalid-name - // Named0ab = collections.namedtuple("named_0", ("a", "b")) - // Named1ab = collections.namedtuple("named_1", ("a", "b")) - // SameNameab = collections.namedtuple("same_name", ("a", "b")) - // SameNameab2 = collections.namedtuple("same_name", ("a", "b")) - // SameNamexy = collections.namedtuple("same_name", ("x", "y")) - // SameName1xy = collections.namedtuple("same_name_1", ("x", "y")) - // SameName1xy2 = collections.namedtuple("same_name_1", ("x", "y")) - // NotSameName = collections.namedtuple("not_same_name", ("a", "b")) - // # pylint: enable=invalid-name - - // class SameNamedType1(SameNameab): - // pass - - // @test_util.assert_no_new_pyobjects_executing_eagerly - // def testAssertSameStructure(self): - // structure1 = (((1, 2), 3), 4, (5, 6)) - // structure2 = ((("foo1", "foo2"), "foo3"), "foo4", ("foo5", "foo6")) - // structure_different_num_elements = ("spam", "eggs") - // structure_different_nesting = (((1, 2), 3), 4, 5, (6,)) - // nest.assert_same_structure(structure1, structure2) - // nest.assert_same_structure("abc", 1.0) - // nest.assert_same_structure("abc", np.array([0, 1])) - // nest.assert_same_structure("abc", constant_op.constant([0, 1])) - - // with self.assertRaisesRegexp( - // ValueError, - // ("The two structures don't have the same nested structure\\.\n\n" - // "First structure:.*?\n\n" - // "Second structure:.*\n\n" - // "More specifically: Substructure " - // r'"type=tuple str=\(\(1, 2\), 3\)" is a sequence, while ' - // 'substructure "type=str str=spam" is not\n' - // "Entire first structure:\n" - // r"\(\(\(\., \.\), \.\), \., \(\., \.\)\)\n" - // "Entire second structure:\n" - // r"\(\., \.\)")): - // nest.assert_same_structure(structure1, structure_different_num_elements) - - // with self.assertRaisesRegexp( - // ValueError, - // ("The two structures don't have the same nested structure\\.\n\n" - // "First structure:.*?\n\n" - // "Second structure:.*\n\n" - // r'More specifically: Substructure "type=list str=\[0, 1\]" ' - // r'is a sequence, while substructure "type=ndarray str=\[0 1\]" ' - // "is not")): - // nest.assert_same_structure([0, 1], np.array([0, 1])) - - // with self.assertRaisesRegexp( - // ValueError, - // ("The two structures don't have the same nested structure\\.\n\n" - // "First structure:.*?\n\n" - // "Second structure:.*\n\n" - // r'More specifically: Substructure "type=list str=\[0, 1\]" ' - // 'is a sequence, while substructure "type=int str=0" ' - // "is not")): - // nest.assert_same_structure(0, [0, 1]) - - // self.assertRaises(TypeError, nest.assert_same_structure, (0, 1), [0, 1]) - - // with self.assertRaisesRegexp( - // ValueError, - // ("don't have the same nested structure\\.\n\n" - // "First structure: .*?\n\nSecond structure: ")): - // nest.assert_same_structure(structure1, structure_different_nesting) - - // self.assertRaises(TypeError, nest.assert_same_structure, (0, 1), - // NestTest.Named0ab("a", "b")) - - // nest.assert_same_structure(NestTest.Named0ab(3, 4), - // NestTest.Named0ab("a", "b")) - - // self.assertRaises(TypeError, nest.assert_same_structure, - // NestTest.Named0ab(3, 4), NestTest.Named1ab(3, 4)) - - // with self.assertRaisesRegexp( - // ValueError, - // ("don't have the same nested structure\\.\n\n" - // "First structure: .*?\n\nSecond structure: ")): - // nest.assert_same_structure(NestTest.Named0ab(3, 4), - // NestTest.Named0ab([3], 4)) - - // with self.assertRaisesRegexp( - // ValueError, - // ("don't have the same nested structure\\.\n\n" - // "First structure: .*?\n\nSecond structure: ")): - // nest.assert_same_structure([[3], 4], [3, [4]]) - - // structure1_list = [[[1, 2], 3], 4, [5, 6]] - // with self.assertRaisesRegexp(TypeError, - // "don't have the same sequence type"): - // nest.assert_same_structure(structure1, structure1_list) - // nest.assert_same_structure(structure1, structure2, check_types= False) - // nest.assert_same_structure(structure1, structure1_list, check_types=False) - - // with self.assertRaisesRegexp(ValueError, - // "don't have the same set of keys"): - // nest.assert_same_structure({"a": 1}, {"b": 1}) - - // nest.assert_same_structure(NestTest.SameNameab(0, 1), - // NestTest.SameNameab2(2, 3)) - - // # This assertion is expected to pass: two namedtuples with the same - // # name and field names are considered to be identical. - // nest.assert_same_structure( - // NestTest.SameNameab(NestTest.SameName1xy(0, 1), 2), - // NestTest.SameNameab2(NestTest.SameName1xy2(2, 3), 4)) - - // expected_message = "The two structures don't have the same.*" - // with self.assertRaisesRegexp(ValueError, expected_message): - // nest.assert_same_structure( - // NestTest.SameNameab(0, NestTest.SameNameab2(1, 2)), - // NestTest.SameNameab2(NestTest.SameNameab(0, 1), 2)) - - // self.assertRaises(TypeError, nest.assert_same_structure, - // NestTest.SameNameab(0, 1), NestTest.NotSameName(2, 3)) - - // self.assertRaises(TypeError, nest.assert_same_structure, - // NestTest.SameNameab(0, 1), NestTest.SameNamexy(2, 3)) - - // self.assertRaises(TypeError, nest.assert_same_structure, - // NestTest.SameNameab(0, 1), NestTest.SameNamedType1(2, 3)) - - // EmptyNT = collections.namedtuple("empty_nt", "") # pylint: disable=invalid-name - - // def testHeterogeneousComparison(self): - // nest.assert_same_structure({"a": 4}, _CustomMapping(a= 3)) - // nest.assert_same_structure(_CustomMapping(b=3), {"b": 4}) - [Ignore] - [TestMethod] - public void testMapStructure() - { - var structure1 = new object[] { new object[] { new object[] { 1, 2 }, 3 }, 4, new object[] { 5, 6 } }; - var structure2 = new object[] { new object[] { new object[] { 7, 8 }, 9 }, 10, new object[] { 11, 12 } }; - var structure1_plus1 = nest.map_structure(x => (int)x + 1, structure1); - var structure1_strings = nest.map_structure(x => $"{x}", structure1); - var s = JArray.FromObject(structure1_plus1).ToString(); - Console.WriteLine(s); - // nest.assert_same_structure(structure1, structure1_plus1) - self.assertAllEqual( nest.flatten(structure1_plus1), new object[] { 2, 3, 4, 5, 6, 7 }); - self.assertAllEqual(nest.flatten(structure1_strings), new object[] { "1", "2", "3", "4", "5", "6" }); - var structure1_plus_structure2 = nest.map_structure(x => (int)(x[0]) + (int)(x[1]), structure1, structure2); - self.assertEqual( - new object[] { new object[] { new object[] { 1 + 7, 2 + 8}, 3 + 9}, 4 + 10, new object[] { 5 + 11, 6 + 12}}, - structure1_plus_structure2); - - // self.assertEqual(3, nest.map_structure(lambda x: x - 1, 4)) - - // self.assertEqual(7, nest.map_structure(lambda x, y: x + y, 3, 4)) - - // # Empty structures - // self.assertEqual((), nest.map_structure(lambda x: x + 1, ())) - // self.assertEqual([], nest.map_structure(lambda x: x + 1, [])) - // self.assertEqual({}, nest.map_structure(lambda x: x + 1, {})) - // self.assertEqual(NestTest.EmptyNT(), nest.map_structure(lambda x: x + 1, - // NestTest.EmptyNT())) - - // # This is checking actual equality of types, empty list != empty tuple - // self.assertNotEqual((), nest.map_structure(lambda x: x + 1, [])) - - // with self.assertRaisesRegexp(TypeError, "callable"): - // nest.map_structure("bad", structure1_plus1) - - // with self.assertRaisesRegexp(ValueError, "at least one structure"): - // nest.map_structure(lambda x: x) - - // with self.assertRaisesRegexp(ValueError, "same number of elements"): - // nest.map_structure(lambda x, y: None, (3, 4), (3, 4, 5)) - - // with self.assertRaisesRegexp(ValueError, "same nested structure"): - // nest.map_structure(lambda x, y: None, 3, (3,)) - - // with self.assertRaisesRegexp(TypeError, "same sequence type"): - // nest.map_structure(lambda x, y: None, ((3, 4), 5), [(3, 4), 5]) - - // with self.assertRaisesRegexp(ValueError, "same nested structure"): - // nest.map_structure(lambda x, y: None, ((3, 4), 5), (3, (4, 5))) - - // structure1_list = [[[1, 2], 3], 4, [5, 6]] - // with self.assertRaisesRegexp(TypeError, "same sequence type"): - // nest.map_structure(lambda x, y: None, structure1, structure1_list) - - // nest.map_structure(lambda x, y: None, structure1, structure1_list, - // check_types=False) - - // with self.assertRaisesRegexp(ValueError, "same nested structure"): - // nest.map_structure(lambda x, y: None, ((3, 4), 5), (3, (4, 5)), - // check_types=False) - - // with self.assertRaisesRegexp(ValueError, "Only valid keyword argument"): - // nest.map_structure(lambda x: None, structure1, foo="a") - - // with self.assertRaisesRegexp(ValueError, "Only valid keyword argument"): - // nest.map_structure(lambda x: None, structure1, check_types=False, foo="a") - - // ABTuple = collections.namedtuple("ab_tuple", "a, b") # pylint: disable=invalid-name - } - - // @test_util.assert_no_new_pyobjects_executing_eagerly - // def testMapStructureWithStrings(self) : - // inp_a = NestTest.ABTuple(a="foo", b=("bar", "baz")) - // inp_b = NestTest.ABTuple(a=2, b=(1, 3)) - // out = nest.map_structure(lambda string, repeats: string* repeats, - // inp_a, - // inp_b) - // self.assertEqual("foofoo", out.a) - // self.assertEqual("bar", out.b[0]) - // self.assertEqual("bazbazbaz", out.b[1]) - - // nt = NestTest.ABTuple(a=("something", "something_else"), - // b="yet another thing") - // rev_nt = nest.map_structure(lambda x: x[::- 1], nt) - // # Check the output is the correct structure, and all strings are reversed. - // nest.assert_same_structure(nt, rev_nt) - // self.assertEqual(nt.a[0][::- 1], rev_nt.a[0]) - // self.assertEqual(nt.a[1][::- 1], rev_nt.a[1]) - // self.assertEqual(nt.b[::- 1], rev_nt.b) - - // @test_util.run_deprecated_v1 - // def testMapStructureOverPlaceholders(self) : - // inp_a = (array_ops.placeholder(dtypes.float32, shape=[3, 4]), - // array_ops.placeholder(dtypes.float32, shape=[3, 7])) - // inp_b = (array_ops.placeholder(dtypes.float32, shape=[3, 4]), - // array_ops.placeholder(dtypes.float32, shape=[3, 7])) - - // output = nest.map_structure(lambda x1, x2: x1 + x2, inp_a, inp_b) - - // nest.assert_same_structure(output, inp_a) - // self.assertShapeEqual(np.zeros((3, 4)), output[0]) - // self.assertShapeEqual(np.zeros((3, 7)), output[1]) - - // feed_dict = { - // inp_a: (np.random.randn(3, 4), np.random.randn(3, 7)), - // inp_b: (np.random.randn(3, 4), np.random.randn(3, 7)) - // } - - // with self.cached_session() as sess: - // output_np = sess.run(output, feed_dict=feed_dict) - // self.assertAllClose(output_np[0], - // feed_dict[inp_a][0] + feed_dict[inp_b][0]) - // self.assertAllClose(output_np[1], - // feed_dict[inp_a][1] + feed_dict[inp_b][1]) - - // def testAssertShallowStructure(self): - // inp_ab = ["a", "b"] - //inp_abc = ["a", "b", "c"] - //expected_message = ( - // "The two structures don't have the same sequence length. Input " - // "structure has length 2, while shallow structure has length 3.") - // with self.assertRaisesRegexp(ValueError, expected_message): - // nest.assert_shallow_structure(inp_abc, inp_ab) - - // inp_ab1 = [(1, 1), (2, 2)] - // inp_ab2 = [[1, 1], [2, 2]] - // expected_message = ( - // "The two structures don't have the same sequence type. Input structure " - // "has type <(type|class) 'tuple'>, while shallow structure has type " - // "<(type|class) 'list'>.") - // with self.assertRaisesRegexp(TypeError, expected_message): - // nest.assert_shallow_structure(inp_ab2, inp_ab1) - // nest.assert_shallow_structure(inp_ab2, inp_ab1, check_types= False) - - // inp_ab1 = {"a": (1, 1), "b": {"c": (2, 2)}} - // inp_ab2 = {"a": (1, 1), "b": {"d": (2, 2)}} - // expected_message = ( - // r"The two structures don't have the same keys. Input " - // r"structure has keys \['c'\], while shallow structure has " - // r"keys \['d'\].") - - // with self.assertRaisesRegexp(ValueError, expected_message): - // nest.assert_shallow_structure(inp_ab2, inp_ab1) - - // inp_ab = collections.OrderedDict([("a", 1), ("b", (2, 3))]) - // inp_ba = collections.OrderedDict([("b", (2, 3)), ("a", 1)]) - // nest.assert_shallow_structure(inp_ab, inp_ba) - - // # This assertion is expected to pass: two namedtuples with the same - //# name and field names are considered to be identical. - //inp_shallow = NestTest.SameNameab(1, 2) - // inp_deep = NestTest.SameNameab2(1, [1, 2, 3]) - // nest.assert_shallow_structure(inp_shallow, inp_deep, check_types=False) - // nest.assert_shallow_structure(inp_shallow, inp_deep, check_types=True) - - // def testFlattenUpTo(self): - // # Shallow tree ends at scalar. - // input_tree = [[[2, 2], [3, 3]], [[4, 9], [5, 5]]] - // shallow_tree = [[True, True], [False, True]] - // flattened_input_tree = nest.flatten_up_to(shallow_tree, input_tree) - // flattened_shallow_tree = nest.flatten_up_to(shallow_tree, shallow_tree) - // self.assertEqual(flattened_input_tree, [[2, 2], [3, 3], [4, 9], [5, 5]]) - // self.assertEqual(flattened_shallow_tree, [True, True, False, True]) - - //# Shallow tree ends at string. - // input_tree = [[("a", 1), [("b", 2), [("c", 3), [("d", 4)]]]]] - // shallow_tree = [["level_1", ["level_2", ["level_3", ["level_4"]]]]] - // input_tree_flattened_as_shallow_tree = nest.flatten_up_to(shallow_tree, - // input_tree) - // input_tree_flattened = nest.flatten(input_tree) - // self.assertEqual(input_tree_flattened_as_shallow_tree, - // [("a", 1), ("b", 2), ("c", 3), ("d", 4)]) - // self.assertEqual(input_tree_flattened, ["a", 1, "b", 2, "c", 3, "d", 4]) - - // # Make sure dicts are correctly flattened, yielding values, not keys. - //input_tree = {"a": 1, "b": {"c": 2}, "d": [3, (4, 5)]} - // shallow_tree = {"a": 0, "b": 0, "d": [0, 0]} - // input_tree_flattened_as_shallow_tree = nest.flatten_up_to(shallow_tree, - // input_tree) - // self.assertEqual(input_tree_flattened_as_shallow_tree, - // [1, { "c": 2}, 3, (4, 5)]) - - // # Namedtuples. - // ab_tuple = NestTest.ABTuple - // input_tree = ab_tuple(a =[0, 1], b = 2) - // shallow_tree = ab_tuple(a= 0, b= 1) - // input_tree_flattened_as_shallow_tree = nest.flatten_up_to(shallow_tree, - // input_tree) - // self.assertEqual(input_tree_flattened_as_shallow_tree, - // [[0, 1], 2]) - - // # Nested dicts, OrderedDicts and namedtuples. - // input_tree = collections.OrderedDict( - // [("a", ab_tuple(a =[0, {"b": 1}], b=2)), - // ("c", {"d": 3, "e": collections.OrderedDict([("f", 4)])})]) - // shallow_tree = input_tree - // input_tree_flattened_as_shallow_tree = nest.flatten_up_to(shallow_tree, - // input_tree) - // self.assertEqual(input_tree_flattened_as_shallow_tree, [0, 1, 2, 3, 4]) - // shallow_tree = collections.OrderedDict([("a", 0), ("c", {"d": 3, "e": 1})]) - // input_tree_flattened_as_shallow_tree = nest.flatten_up_to(shallow_tree, - // input_tree) - // self.assertEqual(input_tree_flattened_as_shallow_tree, - // [ab_tuple(a =[0, { "b": 1}], b=2), - // 3, - // collections.OrderedDict([("f", 4)])]) - // shallow_tree = collections.OrderedDict([("a", 0), ("c", 0)]) - // input_tree_flattened_as_shallow_tree = nest.flatten_up_to(shallow_tree, - // input_tree) - // self.assertEqual(input_tree_flattened_as_shallow_tree, - // [ab_tuple(a =[0, {"b": 1}], b=2), - // {"d": 3, "e": collections.OrderedDict([("f", 4)])}]) - - // ## Shallow non-list edge-case. - // # Using iterable elements. - // input_tree = ["input_tree"] - //shallow_tree = "shallow_tree" - // flattened_input_tree = nest.flatten_up_to(shallow_tree, input_tree) - // flattened_shallow_tree = nest.flatten_up_to(shallow_tree, shallow_tree) - // self.assertEqual(flattened_input_tree, [input_tree]) - // self.assertEqual(flattened_shallow_tree, [shallow_tree]) - - // input_tree = ["input_tree_0", "input_tree_1"] - //shallow_tree = "shallow_tree" - // flattened_input_tree = nest.flatten_up_to(shallow_tree, input_tree) - // flattened_shallow_tree = nest.flatten_up_to(shallow_tree, shallow_tree) - // self.assertEqual(flattened_input_tree, [input_tree]) - // self.assertEqual(flattened_shallow_tree, [shallow_tree]) - - // # Using non-iterable elements. - //input_tree = [0] - //shallow_tree = 9 - // flattened_input_tree = nest.flatten_up_to(shallow_tree, input_tree) - // flattened_shallow_tree = nest.flatten_up_to(shallow_tree, shallow_tree) - // self.assertEqual(flattened_input_tree, [input_tree]) - // self.assertEqual(flattened_shallow_tree, [shallow_tree]) - - // input_tree = [0, 1] - //shallow_tree = 9 - // flattened_input_tree = nest.flatten_up_to(shallow_tree, input_tree) - // flattened_shallow_tree = nest.flatten_up_to(shallow_tree, shallow_tree) - // self.assertEqual(flattened_input_tree, [input_tree]) - // self.assertEqual(flattened_shallow_tree, [shallow_tree]) - - // ## Both non-list edge-case. - //# Using iterable elements. - //input_tree = "input_tree" - // shallow_tree = "shallow_tree" - // flattened_input_tree = nest.flatten_up_to(shallow_tree, input_tree) - // flattened_shallow_tree = nest.flatten_up_to(shallow_tree, shallow_tree) - // self.assertEqual(flattened_input_tree, [input_tree]) - // self.assertEqual(flattened_shallow_tree, [shallow_tree]) - - // # Using non-iterable elements. - //input_tree = 0 - // shallow_tree = 0 - // flattened_input_tree = nest.flatten_up_to(shallow_tree, input_tree) - // flattened_shallow_tree = nest.flatten_up_to(shallow_tree, shallow_tree) - // self.assertEqual(flattened_input_tree, [input_tree]) - // self.assertEqual(flattened_shallow_tree, [shallow_tree]) - - // ## Input non-list edge-case. - //# Using iterable elements. - //input_tree = "input_tree" - // shallow_tree = ["shallow_tree"] - //expected_message = ("If shallow structure is a sequence, input must also " - // "be a sequence. Input has type: <(type|class) 'str'>.") - // with self.assertRaisesRegexp(TypeError, expected_message): - // flattened_input_tree = nest.flatten_up_to(shallow_tree, input_tree) - // flattened_shallow_tree = nest.flatten_up_to(shallow_tree, shallow_tree) - // self.assertEqual(flattened_shallow_tree, shallow_tree) - - // input_tree = "input_tree" - // shallow_tree = ["shallow_tree_9", "shallow_tree_8"] - //with self.assertRaisesRegexp(TypeError, expected_message): - // flattened_input_tree = nest.flatten_up_to(shallow_tree, input_tree) - // flattened_shallow_tree = nest.flatten_up_to(shallow_tree, shallow_tree) - // self.assertEqual(flattened_shallow_tree, shallow_tree) - - //# Using non-iterable elements. - // input_tree = 0 - // shallow_tree = [9] - //expected_message = ("If shallow structure is a sequence, input must also " - // "be a sequence. Input has type: <(type|class) 'int'>.") - // with self.assertRaisesRegexp(TypeError, expected_message): - // flattened_input_tree = nest.flatten_up_to(shallow_tree, input_tree) - // flattened_shallow_tree = nest.flatten_up_to(shallow_tree, shallow_tree) - // self.assertEqual(flattened_shallow_tree, shallow_tree) - - // input_tree = 0 - // shallow_tree = [9, 8] - //with self.assertRaisesRegexp(TypeError, expected_message): - // flattened_input_tree = nest.flatten_up_to(shallow_tree, input_tree) - // flattened_shallow_tree = nest.flatten_up_to(shallow_tree, shallow_tree) - // self.assertEqual(flattened_shallow_tree, shallow_tree) - - // def testMapStructureUpTo(self) : - // # Named tuples. - // ab_tuple = collections.namedtuple("ab_tuple", "a, b") - // op_tuple = collections.namedtuple("op_tuple", "add, mul") - // inp_val = ab_tuple(a= 2, b= 3) - // inp_ops = ab_tuple(a= op_tuple(add = 1, mul = 2), b= op_tuple(add = 2, mul = 3)) - // out = nest.map_structure_up_to( - // inp_val, lambda val, ops: (val + ops.add) * ops.mul, inp_val, inp_ops) - // self.assertEqual(out.a, 6) - // self.assertEqual(out.b, 15) - - // # Lists. - // data_list = [[2, 4, 6, 8], [[1, 3, 5, 7, 9], [3, 5, 7]]] - // name_list = ["evens", ["odds", "primes"]] - // out = nest.map_structure_up_to( - // name_list, lambda name, sec: "first_{}_{}".format(len(sec), name), - // name_list, data_list) - // self.assertEqual(out, ["first_4_evens", ["first_5_odds", "first_3_primes"]]) - - // # Dicts. - // inp_val = dict(a= 2, b= 3) - // inp_ops = dict(a= dict(add = 1, mul = 2), b= dict(add = 2, mul = 3)) - // out = nest.map_structure_up_to( - // inp_val, - // lambda val, ops: (val + ops["add"]) * ops["mul"], inp_val, inp_ops) - // self.assertEqual(out["a"], 6) - // self.assertEqual(out["b"], 15) - - // # Non-equal dicts. - // inp_val = dict(a= 2, b= 3) - // inp_ops = dict(a= dict(add = 1, mul = 2), c= dict(add = 2, mul = 3)) - // with self.assertRaisesRegexp(ValueError, "same keys"): - // nest.map_structure_up_to( - // inp_val, - // lambda val, ops: (val + ops["add"]) * ops["mul"], inp_val, inp_ops) - - // # Dict+custom mapping. - // inp_val = dict(a= 2, b= 3) - // inp_ops = _CustomMapping(a= dict(add = 1, mul = 2), b= dict(add = 2, mul = 3)) - // out = nest.map_structure_up_to( - // inp_val, - // lambda val, ops: (val + ops["add"]) * ops["mul"], inp_val, inp_ops) - // self.assertEqual(out["a"], 6) - // self.assertEqual(out["b"], 15) - - // # Non-equal dict/mapping. - // inp_val = dict(a= 2, b= 3) - // inp_ops = _CustomMapping(a= dict(add = 1, mul = 2), c= dict(add = 2, mul = 3)) - // with self.assertRaisesRegexp(ValueError, "same keys"): - // nest.map_structure_up_to( - // inp_val, - // lambda val, ops: (val + ops["add"]) * ops["mul"], inp_val, inp_ops) - - // def testGetTraverseShallowStructure(self): - // scalar_traverse_input = [3, 4, (1, 2, [0]), [5, 6], {"a": (7,)}, []] - // scalar_traverse_r = nest.get_traverse_shallow_structure( - // lambda s: not isinstance(s, tuple), - // scalar_traverse_input) - // self.assertEqual(scalar_traverse_r, - // [True, True, False, [True, True], {"a": False}, []]) - // nest.assert_shallow_structure(scalar_traverse_r, - // scalar_traverse_input) - - // structure_traverse_input = [(1, [2]), ([1], 2)] - // structure_traverse_r = nest.get_traverse_shallow_structure( - // lambda s: (True, False) if isinstance(s, tuple) else True, - // structure_traverse_input) - // self.assertEqual(structure_traverse_r, - // [(True, False), ([True], False)]) - // nest.assert_shallow_structure(structure_traverse_r, - // structure_traverse_input) - - // with self.assertRaisesRegexp(TypeError, "returned structure"): - // nest.get_traverse_shallow_structure(lambda _: [True], 0) - - // with self.assertRaisesRegexp(TypeError, "returned a non-bool scalar"): - // nest.get_traverse_shallow_structure(lambda _: 1, [1]) - - // with self.assertRaisesRegexp( - // TypeError, "didn't return a depth=1 structure of bools"): - // nest.get_traverse_shallow_structure(lambda _: [1], [1]) - - // def testYieldFlatStringPaths(self): - // for inputs_expected in ({"inputs": [], "expected": []}, - // {"inputs": 3, "expected": [()]}, - // {"inputs": [3], "expected": [(0,)]}, - // {"inputs": {"a": 3}, "expected": [("a",)]}, - // {"inputs": {"a": {"b": 4}}, - // "expected": [("a", "b")]}, - // {"inputs": [{"a": 2}], "expected": [(0, "a")]}, - // {"inputs": [{"a": [2]}], "expected": [(0, "a", 0)]}, - // {"inputs": [{"a": [(23, 42)]}], - // "expected": [(0, "a", 0, 0), (0, "a", 0, 1)]}, - // {"inputs": [{"a": ([23], 42)}], - // "expected": [(0, "a", 0, 0), (0, "a", 1)]}, - // {"inputs": {"a": {"a": 2}, "c": [[[4]]]}, - // "expected": [("a", "a"), ("c", 0, 0, 0)]}, - // {"inputs": {"0": [{"1": 23}]}, - // "expected": [("0", 0, "1")]}): - // inputs = inputs_expected["inputs"] - // expected = inputs_expected["expected"] - // self.assertEqual(list(nest.yield_flat_paths(inputs)), expected) - - // def testFlattenWithStringPaths(self): - // for inputs_expected in ( - // {"inputs": [], "expected": []}, - // {"inputs": [23, "42"], "expected": [("0", 23), ("1", "42")]}, - // {"inputs": [[[[108]]]], "expected": [("0/0/0/0", 108)]}): - // inputs = inputs_expected["inputs"] - // expected = inputs_expected["expected"] - // self.assertEqual( - // nest.flatten_with_joined_string_paths(inputs, separator="/"), - // expected) - - // # Need a separate test for namedtuple as we can't declare tuple definitions - // # in the @parameterized arguments. - // def testFlattenNamedTuple(self): - // # pylint: disable=invalid-name - // Foo = collections.namedtuple("Foo", ["a", "b"]) - // Bar = collections.namedtuple("Bar", ["c", "d"]) - // # pylint: enable=invalid-name - // test_cases = [ - // (Foo(a = 3, b = Bar(c = 23, d = 42)), - // [("a", 3), ("b/c", 23), ("b/d", 42)]), - // (Foo(a = Bar(c = 23, d = 42), b = Bar(c = 0, d = "something")), - // [("a/c", 23), ("a/d", 42), ("b/c", 0), ("b/d", "something")]), - // (Bar(c = 42, d = 43), - // [("c", 42), ("d", 43)]), - // (Bar(c =[42], d = 43), - // [("c/0", 42), ("d", 43)]), - // ] - // for inputs, expected in test_cases: - // self.assertEqual( - // list(nest.flatten_with_joined_string_paths(inputs)), expected) - - // @parameterized.named_parameters( - // ("tuples", (1, 2), (3, 4), True, (("0", 4), ("1", 6))), - // ("dicts", {"a": 1, "b": 2}, {"b": 4, "a": 3}, True, - // {"a": ("a", 4), "b": ("b", 6)}), - // ("mixed", (1, 2), [3, 4], False, (("0", 4), ("1", 6))), - // ("nested", - // {"a": [2, 3], "b": [1, 2, 3]}, {"b": [5, 6, 7], "a": [8, 9]}, True, - // {"a": [("a/0", 10), ("a/1", 12)], - // "b": [("b/0", 6), ("b/1", 8), ("b/2", 10)]})) - // def testMapWithPathsCompatibleStructures(self, s1, s2, check_types, expected): - // def format_sum(path, * values): - // return (path, sum(values)) - // result = nest.map_structure_with_paths(format_sum, s1, s2, - // check_types=check_types) - // self.assertEqual(expected, result) - - // @parameterized.named_parameters( - // ("tuples", (1, 2), (3, 4, 5), ValueError), - // ("dicts", {"a": 1}, {"b": 2}, ValueError), - // ("mixed", (1, 2), [3, 4], TypeError), - // ("nested", - // {"a": [2, 3], "b": [1, 3]}, - // {"b": [5, 6, 7], "a": [8, 9]}, - // ValueError - // )) - // def testMapWithPathsIncompatibleStructures(self, s1, s2, error_type): - // with self.assertRaises(error_type): - // nest.map_structure_with_paths(lambda path, * s: 0, s1, s2) - - - //class NestBenchmark(test.Benchmark): - - // def run_and_report(self, s1, s2, name): - // burn_iter, test_iter = 100, 30000 - - // for _ in xrange(burn_iter) : - // nest.assert_same_structure(s1, s2) - - // t0 = time.time() - // for _ in xrange(test_iter) : - // nest.assert_same_structure(s1, s2) - // t1 = time.time() - - // self.report_benchmark(iters=test_iter, wall_time=(t1 - t0) / test_iter, - // name=name) - - // def benchmark_assert_structure(self): - // s1 = (((1, 2), 3), 4, (5, 6)) - // s2 = ((("foo1", "foo2"), "foo3"), "foo4", ("foo5", "foo6")) - // self.run_and_report(s1, s2, "assert_same_structure_6_elem") - - // s1 = (((1, 2), 3), 4, (5, 6)) * 10 - // s2 = ((("foo1", "foo2"), "foo3"), "foo4", ("foo5", "foo6")) * 10 - // self.run_and_report(s1, s2, "assert_same_structure_60_elem") - - - //if __name__ == "__main__": - // test.main() - } -} +using System; +using System.Collections; +using System.Collections.Generic; +using Microsoft.VisualStudio.TestTools.UnitTesting; +using Newtonsoft.Json.Linq; +using NumSharp; +using Tensorflow; +using Tensorflow.Util; +using static Tensorflow.Binding; + +namespace TensorFlowNET.UnitTest.nest_test +{ + /// + /// excerpt of tensorflow/python/framework/util/nest_test.py + /// + [TestClass] + public class NestTest : PythonTest + { + [TestInitialize] + public void TestInitialize() + { + tf.Graph().as_default(); + } + + //public class PointXY + //{ + // public double x; + // public double y; + //} + + // if attr: + // class BadAttr(object): + // """Class that has a non-iterable __attrs_attrs__.""" + // __attrs_attrs__ = None + + // @attr.s + // class SampleAttr(object): + // field1 = attr.ib() + // field2 = attr.ib() + + // @test_util.assert_no_new_pyobjects_executing_eagerly + // def testAttrsFlattenAndPack(self) : + // if attr is None: + // self.skipTest("attr module is unavailable.") + + // field_values = [1, 2] + // sample_attr = NestTest.SampleAttr(* field_values) + // self.assertFalse(nest._is_attrs(field_values)) + // self.assertTrue(nest._is_attrs(sample_attr)) + // flat = nest.flatten(sample_attr) + // self.assertEqual(field_values, flat) + // restructured_from_flat = nest.pack_sequence_as(sample_attr, flat) + // self.assertIsInstance(restructured_from_flat, NestTest.SampleAttr) + // self.assertEqual(restructured_from_flat, sample_attr) + + //# Check that flatten fails if attributes are not iterable + // with self.assertRaisesRegexp(TypeError, "object is not iterable"): + // flat = nest.flatten(NestTest.BadAttr()) + [Ignore] + [TestMethod] + public void testFlattenAndPack() + { + object structure = new object[] { new object[] { 3, 4 }, 5, new object[] { 6, 7, new object[] { 9, 10 }, 8 } }; + var flat = new List { "a", "b", "c", "d", "e", "f", "g", "h" }; + + self.assertEqual(nest.flatten(structure), new[] { 3, 4, 5, 6, 7, 9, 10, 8 }); + self.assertEqual(JArray.FromObject(nest.pack_sequence_as(structure, flat)).ToString(), + JArray.FromObject(new object[] { new object[] { "a", "b" }, "c", new object[] { "d", "e", new object[] { "f", "g" }, "h" } }).ToString()); + structure = new object[] { new Hashtable { ["x"] = 4, ["y"] = 2 }, new object[] { new object[] { new Hashtable { ["x"] = 1, ["y"] = 0 }, }, } }; + flat = new List { 4, 2, 1, 0 }; + self.assertEqual(nest.flatten(structure), flat); + var restructured_from_flat = nest.pack_sequence_as(structure, flat) as object[]; + //Console.WriteLine(JArray.FromObject(restructured_from_flat)); + self.assertEqual(restructured_from_flat, structure); + self.assertEqual((restructured_from_flat[0] as Hashtable)["x"], 4); + self.assertEqual((restructured_from_flat[0] as Hashtable)["y"], 2); + self.assertEqual((((restructured_from_flat[1] as object[])[0] as object[])[0] as Hashtable)["x"], 1); + self.assertEqual((((restructured_from_flat[1] as object[])[0] as object[])[0] as Hashtable)["y"], 0); + + self.assertEqual(new List { 5 }, nest.flatten(5)); + var flat1 = nest.flatten(np.array(new[] { 5 })); + self.assertEqual(new object[] { np.array(new int[] { 5 }) }, flat1); + + self.assertEqual("a", nest.pack_sequence_as(5, new List { "a" })); + self.assertEqual(np.array(new[] { 5 }), + nest.pack_sequence_as("scalar", new List { np.array(new[] { 5 }) })); + + Assert.ThrowsException(() => nest.pack_sequence_as("scalar", new List() { 4, 5 })); + + Assert.ThrowsException(() => + nest.pack_sequence_as(new object[] { 5, 6, new object[] { 7, 8 } }, new List { "a", "b", "c" })); + } + + // @parameterized.parameters({"mapping_type": collections.OrderedDict + // }, + // {"mapping_type": _CustomMapping + //}) + // @test_util.assert_no_new_pyobjects_executing_eagerly + // def testFlattenDictOrder(self, mapping_type) : + // """`flatten` orders dicts by key, including OrderedDicts.""" + // ordered = mapping_type([("d", 3), ("b", 1), ("a", 0), ("c", 2)]) + // plain = {"d": 3, "b": 1, "a": 0, "c": 2} + // ordered_flat = nest.flatten(ordered) + // plain_flat = nest.flatten(plain) + // self.assertEqual([0, 1, 2, 3], ordered_flat) + // self.assertEqual([0, 1, 2, 3], plain_flat) + + // @parameterized.parameters({"mapping_type": collections.OrderedDict}, + // {"mapping_type": _CustomMapping}) + // def testPackDictOrder(self, mapping_type): + // """Packing orders dicts by key, including OrderedDicts.""" + // custom = mapping_type([("d", 0), ("b", 0), ("a", 0), ("c", 0)]) + // plain = {"d": 0, "b": 0, "a": 0, "c": 0} + // seq = [0, 1, 2, 3] + //custom_reconstruction = nest.pack_sequence_as(custom, seq) + //plain_reconstruction = nest.pack_sequence_as(plain, seq) + // self.assertIsInstance(custom_reconstruction, mapping_type) + // self.assertIsInstance(plain_reconstruction, dict) + // self.assertEqual( + // mapping_type([("d", 3), ("b", 1), ("a", 0), ("c", 2)]), + // custom_reconstruction) + // self.assertEqual({"d": 3, "b": 1, "a": 0, "c": 2}, plain_reconstruction) + + // Abc = collections.namedtuple("A", ("b", "c")) # pylint: disable=invalid-name + + // @test_util.assert_no_new_pyobjects_executing_eagerly + // def testFlattenAndPack_withDicts(self) : + // # A nice messy mix of tuples, lists, dicts, and `OrderedDict`s. + // mess = [ + // "z", + // NestTest.Abc(3, 4), { + // "d": _CustomMapping({ + // 41: 4 + // }), + // "c": [ + // 1, + // collections.OrderedDict([ + // ("b", 3), + // ("a", 2), + // ]), + // ], + // "b": 5 + // }, 17 + // ] + + // flattened = nest.flatten(mess) + // self.assertEqual(flattened, ["z", 3, 4, 5, 1, 2, 3, 4, 17]) + + // structure_of_mess = [ + // 14, + // NestTest.Abc("a", True), + // { + // "d": _CustomMapping({ + // 41: 42 + // }), + // "c": [ + // 0, + // collections.OrderedDict([ + // ("b", 9), + // ("a", 8), + // ]), + // ], + // "b": 3 + // }, + // "hi everybody", + // ] + + // unflattened = nest.pack_sequence_as(structure_of_mess, flattened) + // self.assertEqual(unflattened, mess) + + // # Check also that the OrderedDict was created, with the correct key order. + //unflattened_ordered_dict = unflattened[2]["c"][1] + // self.assertIsInstance(unflattened_ordered_dict, collections.OrderedDict) + // self.assertEqual(list(unflattened_ordered_dict.keys()), ["b", "a"]) + + // unflattened_custom_mapping = unflattened[2]["d"] + // self.assertIsInstance(unflattened_custom_mapping, _CustomMapping) + // self.assertEqual(list(unflattened_custom_mapping.keys()), [41]) + + [TestMethod] + public void testFlatten_numpyIsNotFlattened() + { + var structure = np.array(1, 2, 3); + var flattened = nest.flatten(structure); + self.assertEqual(len(flattened), 1); + } + + [TestMethod] + public void testFlatten_stringIsNotFlattened() + { + var structure = "lots of letters"; + var flattened = nest.flatten(structure); + self.assertEqual(len(flattened), 1); + var unflattened = nest.pack_sequence_as("goodbye", flattened); + self.assertEqual(structure, unflattened); + } + + // def testPackSequenceAs_notIterableError(self) : + // with self.assertRaisesRegexp(TypeError, + // "flat_sequence must be a sequence"): + // nest.pack_sequence_as("hi", "bye") + + [TestMethod] + public void testPackSequenceAs_wrongLengthsError() + { + Assert.ThrowsException(() => + { + // with self.assertRaisesRegexp( + // ValueError, + // "Structure had 2 elements, but flat_sequence had 3 elements."): + nest.pack_sequence_as(new object[] { "hello", "world" }, new object[] { "and", "goodbye", "again" }); + }); + } + + [TestMethod] + public void testIsSequence() + { + self.assertFalse(nest.is_sequence("1234")); + self.assertTrue(nest.is_sequence(new object[] { 1, 3, new object[] { 4, 5 } })); + // TODO: ValueTuple + //self.assertTrue(nest.is_sequence(((7, 8), (5, 6)))); + self.assertTrue(nest.is_sequence(new object[] { })); + self.assertTrue(nest.is_sequence(new Hashtable { ["a"] = 1, ["b"] = 2 })); + self.assertFalse(nest.is_sequence(new HashSet { 1, 2 })); + var ones = array_ops.ones(new int[] { 2, 3 }); + self.assertFalse(nest.is_sequence(ones)); + self.assertFalse(nest.is_sequence(gen_math_ops.tanh(ones))); + self.assertFalse(nest.is_sequence(np.ones(new int[] { 4, 5 }))); + } + + // @parameterized.parameters({"mapping_type": _CustomMapping}, + // {"mapping_type": dict}) + // def testFlattenDictItems(self, mapping_type): + // dictionary = mapping_type({ (4, 5, (6, 8)): ("a", "b", ("c", "d"))}) + // flat = {4: "a", 5: "b", 6: "c", 8: "d"} + // self.assertEqual(nest.flatten_dict_items(dictionary), flat) + + // with self.assertRaises(TypeError): + // nest.flatten_dict_items(4) + + // bad_dictionary = mapping_type({ (4, 5, (4, 8)): ("a", "b", ("c", "d"))}) + // with self.assertRaisesRegexp(ValueError, "not unique"): + // nest.flatten_dict_items(bad_dictionary) + + // another_bad_dictionary = mapping_type({ + // (4, 5, (6, 8)): ("a", "b", ("c", ("d", "e"))) + // }) + // with self.assertRaisesRegexp( + // ValueError, "Key had [0-9]* elements, but value had [0-9]* elements"): + // nest.flatten_dict_items(another_bad_dictionary) + + //# pylint does not correctly recognize these as class names and + //# suggests to use variable style under_score naming. + //# pylint: disable=invalid-name + // Named0ab = collections.namedtuple("named_0", ("a", "b")) + // Named1ab = collections.namedtuple("named_1", ("a", "b")) + // SameNameab = collections.namedtuple("same_name", ("a", "b")) + // SameNameab2 = collections.namedtuple("same_name", ("a", "b")) + // SameNamexy = collections.namedtuple("same_name", ("x", "y")) + // SameName1xy = collections.namedtuple("same_name_1", ("x", "y")) + // SameName1xy2 = collections.namedtuple("same_name_1", ("x", "y")) + // NotSameName = collections.namedtuple("not_same_name", ("a", "b")) + // # pylint: enable=invalid-name + + // class SameNamedType1(SameNameab): + // pass + + // @test_util.assert_no_new_pyobjects_executing_eagerly + // def testAssertSameStructure(self): + // structure1 = (((1, 2), 3), 4, (5, 6)) + // structure2 = ((("foo1", "foo2"), "foo3"), "foo4", ("foo5", "foo6")) + // structure_different_num_elements = ("spam", "eggs") + // structure_different_nesting = (((1, 2), 3), 4, 5, (6,)) + // nest.assert_same_structure(structure1, structure2) + // nest.assert_same_structure("abc", 1.0) + // nest.assert_same_structure("abc", np.array([0, 1])) + // nest.assert_same_structure("abc", constant_op.constant([0, 1])) + + // with self.assertRaisesRegexp( + // ValueError, + // ("The two structures don't have the same nested structure\\.\n\n" + // "First structure:.*?\n\n" + // "Second structure:.*\n\n" + // "More specifically: Substructure " + // r'"type=tuple str=\(\(1, 2\), 3\)" is a sequence, while ' + // 'substructure "type=str str=spam" is not\n' + // "Entire first structure:\n" + // r"\(\(\(\., \.\), \.\), \., \(\., \.\)\)\n" + // "Entire second structure:\n" + // r"\(\., \.\)")): + // nest.assert_same_structure(structure1, structure_different_num_elements) + + // with self.assertRaisesRegexp( + // ValueError, + // ("The two structures don't have the same nested structure\\.\n\n" + // "First structure:.*?\n\n" + // "Second structure:.*\n\n" + // r'More specifically: Substructure "type=list str=\[0, 1\]" ' + // r'is a sequence, while substructure "type=ndarray str=\[0 1\]" ' + // "is not")): + // nest.assert_same_structure([0, 1], np.array([0, 1])) + + // with self.assertRaisesRegexp( + // ValueError, + // ("The two structures don't have the same nested structure\\.\n\n" + // "First structure:.*?\n\n" + // "Second structure:.*\n\n" + // r'More specifically: Substructure "type=list str=\[0, 1\]" ' + // 'is a sequence, while substructure "type=int str=0" ' + // "is not")): + // nest.assert_same_structure(0, [0, 1]) + + // self.assertRaises(TypeError, nest.assert_same_structure, (0, 1), [0, 1]) + + // with self.assertRaisesRegexp( + // ValueError, + // ("don't have the same nested structure\\.\n\n" + // "First structure: .*?\n\nSecond structure: ")): + // nest.assert_same_structure(structure1, structure_different_nesting) + + // self.assertRaises(TypeError, nest.assert_same_structure, (0, 1), + // NestTest.Named0ab("a", "b")) + + // nest.assert_same_structure(NestTest.Named0ab(3, 4), + // NestTest.Named0ab("a", "b")) + + // self.assertRaises(TypeError, nest.assert_same_structure, + // NestTest.Named0ab(3, 4), NestTest.Named1ab(3, 4)) + + // with self.assertRaisesRegexp( + // ValueError, + // ("don't have the same nested structure\\.\n\n" + // "First structure: .*?\n\nSecond structure: ")): + // nest.assert_same_structure(NestTest.Named0ab(3, 4), + // NestTest.Named0ab([3], 4)) + + // with self.assertRaisesRegexp( + // ValueError, + // ("don't have the same nested structure\\.\n\n" + // "First structure: .*?\n\nSecond structure: ")): + // nest.assert_same_structure([[3], 4], [3, [4]]) + + // structure1_list = [[[1, 2], 3], 4, [5, 6]] + // with self.assertRaisesRegexp(TypeError, + // "don't have the same sequence type"): + // nest.assert_same_structure(structure1, structure1_list) + // nest.assert_same_structure(structure1, structure2, check_types= False) + // nest.assert_same_structure(structure1, structure1_list, check_types=False) + + // with self.assertRaisesRegexp(ValueError, + // "don't have the same set of keys"): + // nest.assert_same_structure({"a": 1}, {"b": 1}) + + // nest.assert_same_structure(NestTest.SameNameab(0, 1), + // NestTest.SameNameab2(2, 3)) + + // # This assertion is expected to pass: two namedtuples with the same + // # name and field names are considered to be identical. + // nest.assert_same_structure( + // NestTest.SameNameab(NestTest.SameName1xy(0, 1), 2), + // NestTest.SameNameab2(NestTest.SameName1xy2(2, 3), 4)) + + // expected_message = "The two structures don't have the same.*" + // with self.assertRaisesRegexp(ValueError, expected_message): + // nest.assert_same_structure( + // NestTest.SameNameab(0, NestTest.SameNameab2(1, 2)), + // NestTest.SameNameab2(NestTest.SameNameab(0, 1), 2)) + + // self.assertRaises(TypeError, nest.assert_same_structure, + // NestTest.SameNameab(0, 1), NestTest.NotSameName(2, 3)) + + // self.assertRaises(TypeError, nest.assert_same_structure, + // NestTest.SameNameab(0, 1), NestTest.SameNamexy(2, 3)) + + // self.assertRaises(TypeError, nest.assert_same_structure, + // NestTest.SameNameab(0, 1), NestTest.SameNamedType1(2, 3)) + + // EmptyNT = collections.namedtuple("empty_nt", "") # pylint: disable=invalid-name + + // def testHeterogeneousComparison(self): + // nest.assert_same_structure({"a": 4}, _CustomMapping(a= 3)) + // nest.assert_same_structure(_CustomMapping(b=3), {"b": 4}) + [Ignore] + [TestMethod] + public void testMapStructure() + { + var structure1 = new object[] { new object[] { new object[] { 1, 2 }, 3 }, 4, new object[] { 5, 6 } }; + var structure2 = new object[] { new object[] { new object[] { 7, 8 }, 9 }, 10, new object[] { 11, 12 } }; + var structure1_plus1 = nest.map_structure(x => (int)x + 1, structure1); + var structure1_strings = nest.map_structure(x => $"{x}", structure1); + var s = JArray.FromObject(structure1_plus1).ToString(); + Console.WriteLine(s); + // nest.assert_same_structure(structure1, structure1_plus1) + self.assertAllEqual( nest.flatten(structure1_plus1), new object[] { 2, 3, 4, 5, 6, 7 }); + self.assertAllEqual(nest.flatten(structure1_strings), new object[] { "1", "2", "3", "4", "5", "6" }); + var structure1_plus_structure2 = nest.map_structure(x => (int)(x[0]) + (int)(x[1]), structure1, structure2); + self.assertEqual( + new object[] { new object[] { new object[] { 1 + 7, 2 + 8}, 3 + 9}, 4 + 10, new object[] { 5 + 11, 6 + 12}}, + structure1_plus_structure2); + + // self.assertEqual(3, nest.map_structure(lambda x: x - 1, 4)) + + // self.assertEqual(7, nest.map_structure(lambda x, y: x + y, 3, 4)) + + // # Empty structures + // self.assertEqual((), nest.map_structure(lambda x: x + 1, ())) + // self.assertEqual([], nest.map_structure(lambda x: x + 1, [])) + // self.assertEqual({}, nest.map_structure(lambda x: x + 1, {})) + // self.assertEqual(NestTest.EmptyNT(), nest.map_structure(lambda x: x + 1, + // NestTest.EmptyNT())) + + // # This is checking actual equality of types, empty list != empty tuple + // self.assertNotEqual((), nest.map_structure(lambda x: x + 1, [])) + + // with self.assertRaisesRegexp(TypeError, "callable"): + // nest.map_structure("bad", structure1_plus1) + + // with self.assertRaisesRegexp(ValueError, "at least one structure"): + // nest.map_structure(lambda x: x) + + // with self.assertRaisesRegexp(ValueError, "same number of elements"): + // nest.map_structure(lambda x, y: None, (3, 4), (3, 4, 5)) + + // with self.assertRaisesRegexp(ValueError, "same nested structure"): + // nest.map_structure(lambda x, y: None, 3, (3,)) + + // with self.assertRaisesRegexp(TypeError, "same sequence type"): + // nest.map_structure(lambda x, y: None, ((3, 4), 5), [(3, 4), 5]) + + // with self.assertRaisesRegexp(ValueError, "same nested structure"): + // nest.map_structure(lambda x, y: None, ((3, 4), 5), (3, (4, 5))) + + // structure1_list = [[[1, 2], 3], 4, [5, 6]] + // with self.assertRaisesRegexp(TypeError, "same sequence type"): + // nest.map_structure(lambda x, y: None, structure1, structure1_list) + + // nest.map_structure(lambda x, y: None, structure1, structure1_list, + // check_types=False) + + // with self.assertRaisesRegexp(ValueError, "same nested structure"): + // nest.map_structure(lambda x, y: None, ((3, 4), 5), (3, (4, 5)), + // check_types=False) + + // with self.assertRaisesRegexp(ValueError, "Only valid keyword argument"): + // nest.map_structure(lambda x: None, structure1, foo="a") + + // with self.assertRaisesRegexp(ValueError, "Only valid keyword argument"): + // nest.map_structure(lambda x: None, structure1, check_types=False, foo="a") + + // ABTuple = collections.namedtuple("ab_tuple", "a, b") # pylint: disable=invalid-name + } + + // @test_util.assert_no_new_pyobjects_executing_eagerly + // def testMapStructureWithStrings(self) : + // inp_a = NestTest.ABTuple(a="foo", b=("bar", "baz")) + // inp_b = NestTest.ABTuple(a=2, b=(1, 3)) + // out = nest.map_structure(lambda string, repeats: string* repeats, + // inp_a, + // inp_b) + // self.assertEqual("foofoo", out.a) + // self.assertEqual("bar", out.b[0]) + // self.assertEqual("bazbazbaz", out.b[1]) + + // nt = NestTest.ABTuple(a=("something", "something_else"), + // b="yet another thing") + // rev_nt = nest.map_structure(lambda x: x[::- 1], nt) + // # Check the output is the correct structure, and all strings are reversed. + // nest.assert_same_structure(nt, rev_nt) + // self.assertEqual(nt.a[0][::- 1], rev_nt.a[0]) + // self.assertEqual(nt.a[1][::- 1], rev_nt.a[1]) + // self.assertEqual(nt.b[::- 1], rev_nt.b) + + // @test_util.run_deprecated_v1 + // def testMapStructureOverPlaceholders(self) : + // inp_a = (array_ops.placeholder(dtypes.float32, shape=[3, 4]), + // array_ops.placeholder(dtypes.float32, shape=[3, 7])) + // inp_b = (array_ops.placeholder(dtypes.float32, shape=[3, 4]), + // array_ops.placeholder(dtypes.float32, shape=[3, 7])) + + // output = nest.map_structure(lambda x1, x2: x1 + x2, inp_a, inp_b) + + // nest.assert_same_structure(output, inp_a) + // self.assertShapeEqual(np.zeros((3, 4)), output[0]) + // self.assertShapeEqual(np.zeros((3, 7)), output[1]) + + // feed_dict = { + // inp_a: (np.random.randn(3, 4), np.random.randn(3, 7)), + // inp_b: (np.random.randn(3, 4), np.random.randn(3, 7)) + // } + + // with self.cached_session() as sess: + // output_np = sess.run(output, feed_dict=feed_dict) + // self.assertAllClose(output_np[0], + // feed_dict[inp_a][0] + feed_dict[inp_b][0]) + // self.assertAllClose(output_np[1], + // feed_dict[inp_a][1] + feed_dict[inp_b][1]) + + // def testAssertShallowStructure(self): + // inp_ab = ["a", "b"] + //inp_abc = ["a", "b", "c"] + //expected_message = ( + // "The two structures don't have the same sequence length. Input " + // "structure has length 2, while shallow structure has length 3.") + // with self.assertRaisesRegexp(ValueError, expected_message): + // nest.assert_shallow_structure(inp_abc, inp_ab) + + // inp_ab1 = [(1, 1), (2, 2)] + // inp_ab2 = [[1, 1], [2, 2]] + // expected_message = ( + // "The two structures don't have the same sequence type. Input structure " + // "has type <(type|class) 'tuple'>, while shallow structure has type " + // "<(type|class) 'list'>.") + // with self.assertRaisesRegexp(TypeError, expected_message): + // nest.assert_shallow_structure(inp_ab2, inp_ab1) + // nest.assert_shallow_structure(inp_ab2, inp_ab1, check_types= False) + + // inp_ab1 = {"a": (1, 1), "b": {"c": (2, 2)}} + // inp_ab2 = {"a": (1, 1), "b": {"d": (2, 2)}} + // expected_message = ( + // r"The two structures don't have the same keys. Input " + // r"structure has keys \['c'\], while shallow structure has " + // r"keys \['d'\].") + + // with self.assertRaisesRegexp(ValueError, expected_message): + // nest.assert_shallow_structure(inp_ab2, inp_ab1) + + // inp_ab = collections.OrderedDict([("a", 1), ("b", (2, 3))]) + // inp_ba = collections.OrderedDict([("b", (2, 3)), ("a", 1)]) + // nest.assert_shallow_structure(inp_ab, inp_ba) + + // # This assertion is expected to pass: two namedtuples with the same + //# name and field names are considered to be identical. + //inp_shallow = NestTest.SameNameab(1, 2) + // inp_deep = NestTest.SameNameab2(1, [1, 2, 3]) + // nest.assert_shallow_structure(inp_shallow, inp_deep, check_types=False) + // nest.assert_shallow_structure(inp_shallow, inp_deep, check_types=True) + + // def testFlattenUpTo(self): + // # Shallow tree ends at scalar. + // input_tree = [[[2, 2], [3, 3]], [[4, 9], [5, 5]]] + // shallow_tree = [[True, True], [False, True]] + // flattened_input_tree = nest.flatten_up_to(shallow_tree, input_tree) + // flattened_shallow_tree = nest.flatten_up_to(shallow_tree, shallow_tree) + // self.assertEqual(flattened_input_tree, [[2, 2], [3, 3], [4, 9], [5, 5]]) + // self.assertEqual(flattened_shallow_tree, [True, True, False, True]) + + //# Shallow tree ends at string. + // input_tree = [[("a", 1), [("b", 2), [("c", 3), [("d", 4)]]]]] + // shallow_tree = [["level_1", ["level_2", ["level_3", ["level_4"]]]]] + // input_tree_flattened_as_shallow_tree = nest.flatten_up_to(shallow_tree, + // input_tree) + // input_tree_flattened = nest.flatten(input_tree) + // self.assertEqual(input_tree_flattened_as_shallow_tree, + // [("a", 1), ("b", 2), ("c", 3), ("d", 4)]) + // self.assertEqual(input_tree_flattened, ["a", 1, "b", 2, "c", 3, "d", 4]) + + // # Make sure dicts are correctly flattened, yielding values, not keys. + //input_tree = {"a": 1, "b": {"c": 2}, "d": [3, (4, 5)]} + // shallow_tree = {"a": 0, "b": 0, "d": [0, 0]} + // input_tree_flattened_as_shallow_tree = nest.flatten_up_to(shallow_tree, + // input_tree) + // self.assertEqual(input_tree_flattened_as_shallow_tree, + // [1, { "c": 2}, 3, (4, 5)]) + + // # Namedtuples. + // ab_tuple = NestTest.ABTuple + // input_tree = ab_tuple(a =[0, 1], b = 2) + // shallow_tree = ab_tuple(a= 0, b= 1) + // input_tree_flattened_as_shallow_tree = nest.flatten_up_to(shallow_tree, + // input_tree) + // self.assertEqual(input_tree_flattened_as_shallow_tree, + // [[0, 1], 2]) + + // # Nested dicts, OrderedDicts and namedtuples. + // input_tree = collections.OrderedDict( + // [("a", ab_tuple(a =[0, {"b": 1}], b=2)), + // ("c", {"d": 3, "e": collections.OrderedDict([("f", 4)])})]) + // shallow_tree = input_tree + // input_tree_flattened_as_shallow_tree = nest.flatten_up_to(shallow_tree, + // input_tree) + // self.assertEqual(input_tree_flattened_as_shallow_tree, [0, 1, 2, 3, 4]) + // shallow_tree = collections.OrderedDict([("a", 0), ("c", {"d": 3, "e": 1})]) + // input_tree_flattened_as_shallow_tree = nest.flatten_up_to(shallow_tree, + // input_tree) + // self.assertEqual(input_tree_flattened_as_shallow_tree, + // [ab_tuple(a =[0, { "b": 1}], b=2), + // 3, + // collections.OrderedDict([("f", 4)])]) + // shallow_tree = collections.OrderedDict([("a", 0), ("c", 0)]) + // input_tree_flattened_as_shallow_tree = nest.flatten_up_to(shallow_tree, + // input_tree) + // self.assertEqual(input_tree_flattened_as_shallow_tree, + // [ab_tuple(a =[0, {"b": 1}], b=2), + // {"d": 3, "e": collections.OrderedDict([("f", 4)])}]) + + // ## Shallow non-list edge-case. + // # Using iterable elements. + // input_tree = ["input_tree"] + //shallow_tree = "shallow_tree" + // flattened_input_tree = nest.flatten_up_to(shallow_tree, input_tree) + // flattened_shallow_tree = nest.flatten_up_to(shallow_tree, shallow_tree) + // self.assertEqual(flattened_input_tree, [input_tree]) + // self.assertEqual(flattened_shallow_tree, [shallow_tree]) + + // input_tree = ["input_tree_0", "input_tree_1"] + //shallow_tree = "shallow_tree" + // flattened_input_tree = nest.flatten_up_to(shallow_tree, input_tree) + // flattened_shallow_tree = nest.flatten_up_to(shallow_tree, shallow_tree) + // self.assertEqual(flattened_input_tree, [input_tree]) + // self.assertEqual(flattened_shallow_tree, [shallow_tree]) + + // # Using non-iterable elements. + //input_tree = [0] + //shallow_tree = 9 + // flattened_input_tree = nest.flatten_up_to(shallow_tree, input_tree) + // flattened_shallow_tree = nest.flatten_up_to(shallow_tree, shallow_tree) + // self.assertEqual(flattened_input_tree, [input_tree]) + // self.assertEqual(flattened_shallow_tree, [shallow_tree]) + + // input_tree = [0, 1] + //shallow_tree = 9 + // flattened_input_tree = nest.flatten_up_to(shallow_tree, input_tree) + // flattened_shallow_tree = nest.flatten_up_to(shallow_tree, shallow_tree) + // self.assertEqual(flattened_input_tree, [input_tree]) + // self.assertEqual(flattened_shallow_tree, [shallow_tree]) + + // ## Both non-list edge-case. + //# Using iterable elements. + //input_tree = "input_tree" + // shallow_tree = "shallow_tree" + // flattened_input_tree = nest.flatten_up_to(shallow_tree, input_tree) + // flattened_shallow_tree = nest.flatten_up_to(shallow_tree, shallow_tree) + // self.assertEqual(flattened_input_tree, [input_tree]) + // self.assertEqual(flattened_shallow_tree, [shallow_tree]) + + // # Using non-iterable elements. + //input_tree = 0 + // shallow_tree = 0 + // flattened_input_tree = nest.flatten_up_to(shallow_tree, input_tree) + // flattened_shallow_tree = nest.flatten_up_to(shallow_tree, shallow_tree) + // self.assertEqual(flattened_input_tree, [input_tree]) + // self.assertEqual(flattened_shallow_tree, [shallow_tree]) + + // ## Input non-list edge-case. + //# Using iterable elements. + //input_tree = "input_tree" + // shallow_tree = ["shallow_tree"] + //expected_message = ("If shallow structure is a sequence, input must also " + // "be a sequence. Input has type: <(type|class) 'str'>.") + // with self.assertRaisesRegexp(TypeError, expected_message): + // flattened_input_tree = nest.flatten_up_to(shallow_tree, input_tree) + // flattened_shallow_tree = nest.flatten_up_to(shallow_tree, shallow_tree) + // self.assertEqual(flattened_shallow_tree, shallow_tree) + + // input_tree = "input_tree" + // shallow_tree = ["shallow_tree_9", "shallow_tree_8"] + //with self.assertRaisesRegexp(TypeError, expected_message): + // flattened_input_tree = nest.flatten_up_to(shallow_tree, input_tree) + // flattened_shallow_tree = nest.flatten_up_to(shallow_tree, shallow_tree) + // self.assertEqual(flattened_shallow_tree, shallow_tree) + + //# Using non-iterable elements. + // input_tree = 0 + // shallow_tree = [9] + //expected_message = ("If shallow structure is a sequence, input must also " + // "be a sequence. Input has type: <(type|class) 'int'>.") + // with self.assertRaisesRegexp(TypeError, expected_message): + // flattened_input_tree = nest.flatten_up_to(shallow_tree, input_tree) + // flattened_shallow_tree = nest.flatten_up_to(shallow_tree, shallow_tree) + // self.assertEqual(flattened_shallow_tree, shallow_tree) + + // input_tree = 0 + // shallow_tree = [9, 8] + //with self.assertRaisesRegexp(TypeError, expected_message): + // flattened_input_tree = nest.flatten_up_to(shallow_tree, input_tree) + // flattened_shallow_tree = nest.flatten_up_to(shallow_tree, shallow_tree) + // self.assertEqual(flattened_shallow_tree, shallow_tree) + + // def testMapStructureUpTo(self) : + // # Named tuples. + // ab_tuple = collections.namedtuple("ab_tuple", "a, b") + // op_tuple = collections.namedtuple("op_tuple", "add, mul") + // inp_val = ab_tuple(a= 2, b= 3) + // inp_ops = ab_tuple(a= op_tuple(add = 1, mul = 2), b= op_tuple(add = 2, mul = 3)) + // out = nest.map_structure_up_to( + // inp_val, lambda val, ops: (val + ops.add) * ops.mul, inp_val, inp_ops) + // self.assertEqual(out.a, 6) + // self.assertEqual(out.b, 15) + + // # Lists. + // data_list = [[2, 4, 6, 8], [[1, 3, 5, 7, 9], [3, 5, 7]]] + // name_list = ["evens", ["odds", "primes"]] + // out = nest.map_structure_up_to( + // name_list, lambda name, sec: "first_{}_{}".format(len(sec), name), + // name_list, data_list) + // self.assertEqual(out, ["first_4_evens", ["first_5_odds", "first_3_primes"]]) + + // # Dicts. + // inp_val = dict(a= 2, b= 3) + // inp_ops = dict(a= dict(add = 1, mul = 2), b= dict(add = 2, mul = 3)) + // out = nest.map_structure_up_to( + // inp_val, + // lambda val, ops: (val + ops["add"]) * ops["mul"], inp_val, inp_ops) + // self.assertEqual(out["a"], 6) + // self.assertEqual(out["b"], 15) + + // # Non-equal dicts. + // inp_val = dict(a= 2, b= 3) + // inp_ops = dict(a= dict(add = 1, mul = 2), c= dict(add = 2, mul = 3)) + // with self.assertRaisesRegexp(ValueError, "same keys"): + // nest.map_structure_up_to( + // inp_val, + // lambda val, ops: (val + ops["add"]) * ops["mul"], inp_val, inp_ops) + + // # Dict+custom mapping. + // inp_val = dict(a= 2, b= 3) + // inp_ops = _CustomMapping(a= dict(add = 1, mul = 2), b= dict(add = 2, mul = 3)) + // out = nest.map_structure_up_to( + // inp_val, + // lambda val, ops: (val + ops["add"]) * ops["mul"], inp_val, inp_ops) + // self.assertEqual(out["a"], 6) + // self.assertEqual(out["b"], 15) + + // # Non-equal dict/mapping. + // inp_val = dict(a= 2, b= 3) + // inp_ops = _CustomMapping(a= dict(add = 1, mul = 2), c= dict(add = 2, mul = 3)) + // with self.assertRaisesRegexp(ValueError, "same keys"): + // nest.map_structure_up_to( + // inp_val, + // lambda val, ops: (val + ops["add"]) * ops["mul"], inp_val, inp_ops) + + // def testGetTraverseShallowStructure(self): + // scalar_traverse_input = [3, 4, (1, 2, [0]), [5, 6], {"a": (7,)}, []] + // scalar_traverse_r = nest.get_traverse_shallow_structure( + // lambda s: not isinstance(s, tuple), + // scalar_traverse_input) + // self.assertEqual(scalar_traverse_r, + // [True, True, False, [True, True], {"a": False}, []]) + // nest.assert_shallow_structure(scalar_traverse_r, + // scalar_traverse_input) + + // structure_traverse_input = [(1, [2]), ([1], 2)] + // structure_traverse_r = nest.get_traverse_shallow_structure( + // lambda s: (True, False) if isinstance(s, tuple) else True, + // structure_traverse_input) + // self.assertEqual(structure_traverse_r, + // [(True, False), ([True], False)]) + // nest.assert_shallow_structure(structure_traverse_r, + // structure_traverse_input) + + // with self.assertRaisesRegexp(TypeError, "returned structure"): + // nest.get_traverse_shallow_structure(lambda _: [True], 0) + + // with self.assertRaisesRegexp(TypeError, "returned a non-bool scalar"): + // nest.get_traverse_shallow_structure(lambda _: 1, [1]) + + // with self.assertRaisesRegexp( + // TypeError, "didn't return a depth=1 structure of bools"): + // nest.get_traverse_shallow_structure(lambda _: [1], [1]) + + // def testYieldFlatStringPaths(self): + // for inputs_expected in ({"inputs": [], "expected": []}, + // {"inputs": 3, "expected": [()]}, + // {"inputs": [3], "expected": [(0,)]}, + // {"inputs": {"a": 3}, "expected": [("a",)]}, + // {"inputs": {"a": {"b": 4}}, + // "expected": [("a", "b")]}, + // {"inputs": [{"a": 2}], "expected": [(0, "a")]}, + // {"inputs": [{"a": [2]}], "expected": [(0, "a", 0)]}, + // {"inputs": [{"a": [(23, 42)]}], + // "expected": [(0, "a", 0, 0), (0, "a", 0, 1)]}, + // {"inputs": [{"a": ([23], 42)}], + // "expected": [(0, "a", 0, 0), (0, "a", 1)]}, + // {"inputs": {"a": {"a": 2}, "c": [[[4]]]}, + // "expected": [("a", "a"), ("c", 0, 0, 0)]}, + // {"inputs": {"0": [{"1": 23}]}, + // "expected": [("0", 0, "1")]}): + // inputs = inputs_expected["inputs"] + // expected = inputs_expected["expected"] + // self.assertEqual(list(nest.yield_flat_paths(inputs)), expected) + + // def testFlattenWithStringPaths(self): + // for inputs_expected in ( + // {"inputs": [], "expected": []}, + // {"inputs": [23, "42"], "expected": [("0", 23), ("1", "42")]}, + // {"inputs": [[[[108]]]], "expected": [("0/0/0/0", 108)]}): + // inputs = inputs_expected["inputs"] + // expected = inputs_expected["expected"] + // self.assertEqual( + // nest.flatten_with_joined_string_paths(inputs, separator="/"), + // expected) + + // # Need a separate test for namedtuple as we can't declare tuple definitions + // # in the @parameterized arguments. + // def testFlattenNamedTuple(self): + // # pylint: disable=invalid-name + // Foo = collections.namedtuple("Foo", ["a", "b"]) + // Bar = collections.namedtuple("Bar", ["c", "d"]) + // # pylint: enable=invalid-name + // test_cases = [ + // (Foo(a = 3, b = Bar(c = 23, d = 42)), + // [("a", 3), ("b/c", 23), ("b/d", 42)]), + // (Foo(a = Bar(c = 23, d = 42), b = Bar(c = 0, d = "something")), + // [("a/c", 23), ("a/d", 42), ("b/c", 0), ("b/d", "something")]), + // (Bar(c = 42, d = 43), + // [("c", 42), ("d", 43)]), + // (Bar(c =[42], d = 43), + // [("c/0", 42), ("d", 43)]), + // ] + // for inputs, expected in test_cases: + // self.assertEqual( + // list(nest.flatten_with_joined_string_paths(inputs)), expected) + + // @parameterized.named_parameters( + // ("tuples", (1, 2), (3, 4), True, (("0", 4), ("1", 6))), + // ("dicts", {"a": 1, "b": 2}, {"b": 4, "a": 3}, True, + // {"a": ("a", 4), "b": ("b", 6)}), + // ("mixed", (1, 2), [3, 4], False, (("0", 4), ("1", 6))), + // ("nested", + // {"a": [2, 3], "b": [1, 2, 3]}, {"b": [5, 6, 7], "a": [8, 9]}, True, + // {"a": [("a/0", 10), ("a/1", 12)], + // "b": [("b/0", 6), ("b/1", 8), ("b/2", 10)]})) + // def testMapWithPathsCompatibleStructures(self, s1, s2, check_types, expected): + // def format_sum(path, * values): + // return (path, sum(values)) + // result = nest.map_structure_with_paths(format_sum, s1, s2, + // check_types=check_types) + // self.assertEqual(expected, result) + + // @parameterized.named_parameters( + // ("tuples", (1, 2), (3, 4, 5), ValueError), + // ("dicts", {"a": 1}, {"b": 2}, ValueError), + // ("mixed", (1, 2), [3, 4], TypeError), + // ("nested", + // {"a": [2, 3], "b": [1, 3]}, + // {"b": [5, 6, 7], "a": [8, 9]}, + // ValueError + // )) + // def testMapWithPathsIncompatibleStructures(self, s1, s2, error_type): + // with self.assertRaises(error_type): + // nest.map_structure_with_paths(lambda path, * s: 0, s1, s2) + + + //class NestBenchmark(test.Benchmark): + + // def run_and_report(self, s1, s2, name): + // burn_iter, test_iter = 100, 30000 + + // for _ in xrange(burn_iter) : + // nest.assert_same_structure(s1, s2) + + // t0 = time.time() + // for _ in xrange(test_iter) : + // nest.assert_same_structure(s1, s2) + // t1 = time.time() + + // self.report_benchmark(iters=test_iter, wall_time=(t1 - t0) / test_iter, + // name=name) + + // def benchmark_assert_structure(self): + // s1 = (((1, 2), 3), 4, (5, 6)) + // s2 = ((("foo1", "foo2"), "foo3"), "foo4", ("foo5", "foo6")) + // self.run_and_report(s1, s2, "assert_same_structure_6_elem") + + // s1 = (((1, 2), 3), 4, (5, 6)) * 10 + // s2 = ((("foo1", "foo2"), "foo3"), "foo4", ("foo5", "foo6")) * 10 + // self.run_and_report(s1, s2, "assert_same_structure_60_elem") + + + //if __name__ == "__main__": + // test.main() + } +} diff --git a/test/TensorFlowNET.UnitTest/nn_test/ZeroFractionTest.cs b/test/TensorFlowNET.UnitTest/nn_test/ZeroFractionTest.cs index 3a5515d9..8097070b 100644 --- a/test/TensorFlowNET.UnitTest/nn_test/ZeroFractionTest.cs +++ b/test/TensorFlowNET.UnitTest/nn_test/ZeroFractionTest.cs @@ -1,87 +1,87 @@ -using System; -using System.Linq; -using Microsoft.VisualStudio.TestTools.UnitTesting; -using NumSharp; -using Tensorflow; -using static Tensorflow.Binding; - -namespace TensorFlowNET.UnitTest.nn_test -{ - [TestClass] - public class ZeroFractionTest : PythonTest - { - - protected double _ZeroFraction(NDArray x) - { - assert(x.shape); - int total_elements = np.prod(x.shape); - - var eps = 1e-8; - var nonzeros = x.Data().Count(d=>Math.Abs(d)> eps); - return 1.0 - nonzeros / (double)total_elements; - } - - [Ignore("TODO implement nn_impl.zero_fraction")] - [TestMethod] - public void testZeroFraction() - { - var x_shape = new Shape(5, 17); - var x_np = np.random.randint(0, 2, x_shape); - //x_np.astype(np.float32); - var y_np = this._ZeroFraction(x_np); - - var x_tf = constant_op.constant(x_np); - x_tf.set_shape(x_shape); - var y_tf = nn_impl.zero_fraction(x_tf); - var y_tf_np = self.evaluate(y_tf); - - var eps = 1e-8; - self.assertAllClose(y_tf_np, y_np, eps); - } - - [Ignore("TODO implement nn_impl.zero_fraction")] - [TestMethod] - public void testZeroFractionEmpty() - { - - var x = np.zeros(0); - var y = self.evaluate(nn_impl.zero_fraction(new Tensor(x))); - self.assertTrue(np.isnan(y)); - } - - [Ignore("TODO implement nn_impl.zero_fraction")] - [TestMethod] - public void testZeroFraction2_27Zeros() - { - var sparsity = nn_impl.zero_fraction( - array_ops.zeros(new Shape((int) Math.Pow(2, 27 * 1.01)), dtypes.int8)); - self.assertAllClose(1.0, self.evaluate(sparsity)); - } - - [Ignore("TODO implement nn_impl.zero_fraction")] - [TestMethod] - public void testZeroFraction2_27Ones() - { - var sparsity = nn_impl.zero_fraction( - array_ops.ones(new TensorShape((int)Math.Pow(2, 27 * 1.01)), dtypes.int8)); - self.assertAllClose(0.0, self.evaluate(sparsity)); - } - - [Ignore("TODO implement nn_impl.zero_fraction")] - [TestMethod] - public void testUnknownSize() - { - var value = array_ops.placeholder(dtype: dtypes.float32); - var sparsity = nn_impl.zero_fraction(value); - using (var sess = self.cached_session()) - { - // TODO: make this compile - //self.assertAllClose( - // 0.25, - // sess.run(sparsity, {value: [[0., 1.], [0.3, 2.]]})); - } - } - - - } -} +using System; +using System.Linq; +using Microsoft.VisualStudio.TestTools.UnitTesting; +using NumSharp; +using Tensorflow; +using static Tensorflow.Binding; + +namespace TensorFlowNET.UnitTest.nn_test +{ + [TestClass] + public class ZeroFractionTest : PythonTest + { + + protected double _ZeroFraction(NDArray x) + { + assert(x.shape); + int total_elements = np.prod(x.shape); + + var eps = 1e-8; + var nonzeros = x.Data().Count(d=>Math.Abs(d)> eps); + return 1.0 - nonzeros / (double)total_elements; + } + + [Ignore("TODO implement nn_impl.zero_fraction")] + [TestMethod] + public void testZeroFraction() + { + var x_shape = new Shape(5, 17); + var x_np = np.random.randint(0, 2, x_shape); + //x_np.astype(np.float32); + var y_np = this._ZeroFraction(x_np); + + var x_tf = constant_op.constant(x_np); + x_tf.set_shape(x_shape); + var y_tf = nn_impl.zero_fraction(x_tf); + var y_tf_np = self.evaluate(y_tf); + + var eps = 1e-8; + self.assertAllClose(y_tf_np, y_np, eps); + } + + [Ignore("TODO implement nn_impl.zero_fraction")] + [TestMethod] + public void testZeroFractionEmpty() + { + + var x = np.zeros(0); + var y = self.evaluate(nn_impl.zero_fraction(new Tensor(x))); + self.assertTrue(np.isnan(y)); + } + + [Ignore("TODO implement nn_impl.zero_fraction")] + [TestMethod] + public void testZeroFraction2_27Zeros() + { + var sparsity = nn_impl.zero_fraction( + array_ops.zeros(new Shape((int) Math.Pow(2, 27 * 1.01)), dtypes.int8)); + self.assertAllClose(1.0, self.evaluate(sparsity)); + } + + [Ignore("TODO implement nn_impl.zero_fraction")] + [TestMethod] + public void testZeroFraction2_27Ones() + { + var sparsity = nn_impl.zero_fraction( + array_ops.ones(new TensorShape((int)Math.Pow(2, 27 * 1.01)), dtypes.int8)); + self.assertAllClose(0.0, self.evaluate(sparsity)); + } + + [Ignore("TODO implement nn_impl.zero_fraction")] + [TestMethod] + public void testUnknownSize() + { + var value = array_ops.placeholder(dtype: dtypes.float32); + var sparsity = nn_impl.zero_fraction(value); + using (var sess = self.cached_session()) + { + // TODO: make this compile + //self.assertAllClose( + // 0.25, + // sess.run(sparsity, {value: [[0., 1.], [0.3, 2.]]})); + } + } + + + } +} diff --git a/test/TensorFlowNET.UnitTest/ops_test/ControlDependenciesTest.cs b/test/TensorFlowNET.UnitTest/ops_test/ControlDependenciesTest.cs index f39a71b2..8c64a61b 100644 --- a/test/TensorFlowNET.UnitTest/ops_test/ControlDependenciesTest.cs +++ b/test/TensorFlowNET.UnitTest/ops_test/ControlDependenciesTest.cs @@ -1,316 +1,316 @@ -using System; -using System.Linq; -using Microsoft.VisualStudio.TestTools.UnitTesting; -using Tensorflow; -using Tensorflow.Eager; -using static Tensorflow.Binding; - -namespace TensorFlowNET.UnitTest.ops_test -{ - /// - /// excerpt of tensorflow/python/framework/ops_test.py - /// - [TestClass] - public class ControlDependenciesTest : PythonTest - { - [TestMethod] - public void TestBasic() - { - var g = tf.Graph().as_default(); - Tensor a = null, b = null, c = null, d = null, e = null; - - a = constant_op.constant(1.0); - b = constant_op.constant(1.0); - tf_with(g.control_dependencies(new[] { a }), x => - { - c = constant_op.constant(1.0); - d = array_ops.identity(b); - e = array_ops.identity(c); - }); - - Assert.IsTrue(Enumerable.SequenceEqual(c.op.control_inputs, new[] { a.op })); - Assert.IsTrue(Enumerable.SequenceEqual(d.op.control_inputs, new[] { a.op })); - // e should be dominated by c. - Assert.AreEqual(0, e.op.control_inputs.Length); - } - - [Ignore("Future is not supported yet")] - [TestMethod] - public void TestEager() - { - Tensor a = null, c = null; - object b = null; - var calls = 0; - Func future = () => - { - calls += 1; - return constant_op.constant(2.0); - }; - using (var opts = new ContextOptions()) - using (var status = new Status()) - using (var context = new Context(opts, status)) - { - if (context.executing_eagerly()) - { - // TODO: make this compile (see original Python code below) - a = constant_op.constant(1.0); - b = future; // <--- {henon} obviously, this doesn't compile, looks like control_dependencies needs to be able to take callables as well. - tf_with(ops.control_dependencies(new object[] { a, b }), ctrl => - { - return c = constant_op.constant(3.0); - }); - Assert.AreEqual(calls, 1); - } - else - { - var g = tf.Graph().as_default(); - a = constant_op.constant(1.0); - var b1 = future(); - tf_with(g.control_dependencies(new[] { a, b }), ctrl => - { - c = constant_op.constant(3.0); - }); - Assert.IsTrue(Enumerable.SequenceEqual(c.op.control_inputs, new[] { a.op, b1.op })); - Assert.AreEqual(1, calls); - } - } - /* - def testEager(self): - def future(): - future.calls += 1 - return constant_op.constant(2.0) - future.calls = 0 - - if context.executing_eagerly(): - a = constant_op.constant(1.0) - b = future - with ops.control_dependencies([a, b]): - c = constant_op.constant(3.0) - self.assertEqual(future.calls, 1) - else: - g = ops.Graph() - with g.as_default(): - a = constant_op.constant(1.0) - b = future() - with g.control_dependencies([a, b]): - c = constant_op.constant(3.0) - self.assertEqual(c.op.control_inputs, [a.op, b.op]) - self.assertEqual(future.calls, 1) - */ - } - - - [Ignore("How to port the ConvertibleObj?")] - [TestMethod] - public void TestBasicWithConversion() - { - var g = tf.Graph().as_default(); - // Note: _apply_op can be replaced by g.create_op - var a = g.create_op("FloatOutput", new Tensor[] { }, new[] { TF_DataType.TF_FLOAT }); - // TODO: ConvertibleObj, see original source below - /* - def testBasicWithConversion(self): - g = ops.Graph() - a = _apply_op(g, "FloatOutput", [], [dtypes.float32]) - - class ConvertibleObj(object): - - def _as_graph_element(self): - return a - - with g.control_dependencies([ConvertibleObj()]): - c = _apply_op(g, "FloatOutput", [], [dtypes.float32]) - - self.assertEqual(c.op.control_inputs, [a.op]) - */ - } - - [TestMethod] - public void TestNested() - { - var g = tf.Graph().as_default(); - var a_1 = constant_op.constant(1.0); - var a_2 = constant_op.constant(3.0); - var a_3 = constant_op.constant(4.0); - var a_4 = constant_op.constant(5.0); - Tensor b_1 = null, b_2 = null; - tf_with(g.control_dependencies(new[] { a_1, a_2, a_3, a_4 }), ctrl => - { - b_1 = constant_op.constant(6.0); - }); - tf_with(g.control_dependencies(new[] { a_1 }), ctrl1 => - { - tf_with(g.control_dependencies(new[] { a_2 }), ctrl2 => - { - tf_with(g.control_dependencies(new[] { a_3 }), ctrl3 => - { - tf_with(g.control_dependencies(new[] { a_4 }), ctrl4 => - { - b_2 = constant_op.constant(7.0); - }); - }); - }); - }); - //var z=tf.add(a_1, tf.multiply(b_2, b_1)); - //with(g.control_dependencies(new[] {z}), ctrl => - //{ - // var z1 = tf.add(a_3, tf.multiply(a_4, a_2)); - //}); - //tf.train.export_meta_graph(@"D:\dev\tensorboard\logdir\sharp.meta", as_text: false); - assertItemsEqual(b_1.op.control_inputs, new[] { a_1.op, a_2.op, a_3.op, a_4.op }); - assertItemsEqual(b_2.op.control_inputs, b_1.op.control_inputs); - } - - [TestMethod] - public void TestClear() - { - var g = tf.Graph().as_default(); - var a_1 = constant_op.constant(1.0); - var a_2 = constant_op.constant(3.0); - var a_3 = constant_op.constant(4.0); - var a_4 = constant_op.constant(5.0); - Operation b_3_4 = null, b_3 = null, b_none = null, b_1 = null, b_1_2 = null, b_none2 = null; - tf_with(g.control_dependencies(new[] { a_1 }), ctrl1 => - { - tf_with(g.control_dependencies(new[] { a_2 }), ctrl2 => - { - tf_with(g.control_dependencies(null), ctrl3 => - { - tf_with(g.control_dependencies(new[] { a_3 }), ctrl4 => - { - tf_with(g.control_dependencies(new[] { a_4 }), ctrl5 => - { - // deps [a_3, a_4] - b_3_4 = constant_op.constant(7.0); - }); - // deps = [a_3] - b_3 = constant_op.constant(8.0); - }); - // deps back to None - b_none = constant_op.constant(9.0); - }); - // deps back to [a_1, a_2] - b_1_2 = constant_op.constant(10.0); - }); - // deps back to [a_1] - b_1 = constant_op.constant(11.0); - tf_with(g.control_dependencies(null), ctrl6 => - { - // deps are None again - b_none2 = constant_op.constant(12.0); - }); - }); - // Note assertItemsEqual(given, expected), expected and given parameters should be swapped below - assertItemsEqual(new[] { a_3.op, a_4.op }, b_3_4.op.control_inputs); - assertItemsEqual(new[] { a_3.op }, b_3.op.control_inputs); - assertItemsEqual(new object[0], b_none.op.control_inputs); - assertItemsEqual(new[] { a_1.op, a_2.op }, b_1_2.op.control_inputs); - assertItemsEqual(new[] { a_1.op }, b_1.op.control_inputs); - assertItemsEqual(new object[0], b_none2.op.control_inputs); - } - - [TestMethod] - public void TestComplex() - { - var g = tf.Graph().as_default(); - // Usage pattern: - // * Nodes a_i are constants defined at the outermost scope, and are used - // as control inputs for the ith nested scope. - // * Nodes b_i are defined as Mul(a_3, a_4) at each scope. - // * Nodes c_i are defined as Mul(a_1, b_1) at each scope. - // * Nodes d_i are defined as Mul(b_i, c_i) at each scope. - // * Nodes e_i are defined as Mul(e_i-1, e_i-1) at each scope i > 1. - var a_1 = constant_op.constant(1.0); - var a_2 = constant_op.constant(2.0); - var a_3 = constant_op.constant(3.0); - var a_4 = constant_op.constant(4.0); - Operation b_1 = null, b_2 = null, b_3 = null, b_4 = null; - Operation c_1 = null, c_2 = null, c_3 = null, c_4 = null; - Operation d_1 = null, d_2 = null, d_3 = null, d_4 = null; - Operation e_1 = null, e_2 = null, e_3 = null, e_4 = null; - tf_with(g.control_dependencies(new[] { a_1 }), ctrl1 => - { - b_1 = tf.multiply(a_3, a_4); - c_1 = tf.multiply(a_1, b_1.output); - d_1 = tf.multiply(b_1.output, c_1.output); - e_1 = constant_op.constant(5.0); - tf_with(g.control_dependencies(new[] { a_2 }), ctrl2 => - { - b_2 = tf.multiply(a_3, a_4); - c_2 = tf.multiply(a_1, b_1.output); - d_2 = tf.multiply(b_2.output, c_2.output); - e_2 = tf.multiply(e_1.output, e_1.output); - tf_with(g.control_dependencies(new[] { a_3 }), ctrl3 => - { - b_3 = tf.multiply(a_3, a_4); - c_3 = tf.multiply(a_1, b_1.output); - d_3 = tf.multiply(b_3.output, c_3.output); - e_3 = tf.multiply(e_2.output, e_2.output); - tf_with(g.control_dependencies(new[] { a_4 }), ctrl4 => - { - b_4 = tf.multiply(a_3, a_4); - c_4 = tf.multiply(a_1, b_1.output); - d_4 = tf.multiply(b_4.output, c_4.output); - e_4 = tf.multiply(e_3.output, e_3.output); - }); - }); - }); - }); - - // Note assertItemsEqual(given, expected), expected and given parameters should be swapped below - assertItemsEqual(new[] {a_1.op}, b_1.op.control_inputs); - assertItemsEqual(new[] {a_1.op, a_2.op}, b_2.op.control_inputs); - assertItemsEqual(new[] { a_1.op, a_2.op}, b_3.op.control_inputs); - assertItemsEqual(new[] {a_1.op, a_2.op}, b_4.op.control_inputs); - - assertItemsEqual(new object[0], c_1.op.control_inputs); - assertItemsEqual(new[] {a_2.op}, c_2.op.control_inputs); - assertItemsEqual(new[] {a_2.op, a_3.op}, c_3.op.control_inputs); - assertItemsEqual(new[] {a_2.op, a_3.op, a_4.op}, c_4.op.control_inputs); - - assertItemsEqual(new object[0], d_1.op.control_inputs); - assertItemsEqual(new object[0], d_2.op.control_inputs); - assertItemsEqual(new object[0], d_3.op.control_inputs); - assertItemsEqual(new object[0], d_4.op.control_inputs); - - assertItemsEqual(new[] {a_1.op}, e_1.op.control_inputs); - assertItemsEqual(new[] {a_2.op}, e_2.op.control_inputs); - assertItemsEqual(new[] {a_3.op}, e_3.op.control_inputs); - assertItemsEqual(new[] {a_4.op}, e_4.op.control_inputs); - } - - [Ignore("Don't know how to create an operation with two outputs")] - [TestMethod] - public void TestRepeatedDependency() - { - /* - def testRepeatedDependency(self): - g = ops.Graph() - a = g.create_op("TwoFloatOutputs", [], [dtypes.float32, dtypes.float32]) - a_0, a_1 = a.outputs - with g.control_dependencies([a_0]): - b = _apply_op(g, "FloatOutput", [], [dtypes.float32]) - with g.control_dependencies([a_1]): - c = _apply_op(g, "FloatOutput", [], [dtypes.float32]) - - self.assertEqual(b.op.control_inputs, [a]) - self.assertEqual(c.op.control_inputs, [a]) - - */ - } - - [TestMethod] - public void TestNoControlDependencyWithDataDependency() - { - var g = tf.Graph().as_default(); - Operation b = null; - var a = constant_op.constant(100.0); - tf_with(g.control_dependencies(new[] { a }), ctrl1 => - { - b = array_ops.identity(a); - }); - Assert.AreEqual(0, b.op.control_inputs.Length); - } - - } -} +using System; +using System.Linq; +using Microsoft.VisualStudio.TestTools.UnitTesting; +using Tensorflow; +using Tensorflow.Eager; +using static Tensorflow.Binding; + +namespace TensorFlowNET.UnitTest.ops_test +{ + /// + /// excerpt of tensorflow/python/framework/ops_test.py + /// + [TestClass] + public class ControlDependenciesTest : PythonTest + { + [TestMethod] + public void TestBasic() + { + var g = tf.Graph().as_default(); + Tensor a = null, b = null, c = null, d = null, e = null; + + a = constant_op.constant(1.0); + b = constant_op.constant(1.0); + tf_with(g.control_dependencies(new[] { a }), x => + { + c = constant_op.constant(1.0); + d = array_ops.identity(b); + e = array_ops.identity(c); + }); + + Assert.IsTrue(Enumerable.SequenceEqual(c.op.control_inputs, new[] { a.op })); + Assert.IsTrue(Enumerable.SequenceEqual(d.op.control_inputs, new[] { a.op })); + // e should be dominated by c. + Assert.AreEqual(0, e.op.control_inputs.Length); + } + + [Ignore("Future is not supported yet")] + [TestMethod] + public void TestEager() + { + Tensor a = null, c = null; + object b = null; + var calls = 0; + Func future = () => + { + calls += 1; + return constant_op.constant(2.0); + }; + using (var opts = new ContextOptions()) + using (var status = new Status()) + using (var context = new Context(opts, status)) + { + if (context.executing_eagerly()) + { + // TODO: make this compile (see original Python code below) + a = constant_op.constant(1.0); + b = future; // <--- {henon} obviously, this doesn't compile, looks like control_dependencies needs to be able to take callables as well. + tf_with(ops.control_dependencies(new object[] { a, b }), ctrl => + { + return c = constant_op.constant(3.0); + }); + Assert.AreEqual(calls, 1); + } + else + { + var g = tf.Graph().as_default(); + a = constant_op.constant(1.0); + var b1 = future(); + tf_with(g.control_dependencies(new[] { a, b }), ctrl => + { + c = constant_op.constant(3.0); + }); + Assert.IsTrue(Enumerable.SequenceEqual(c.op.control_inputs, new[] { a.op, b1.op })); + Assert.AreEqual(1, calls); + } + } + /* + def testEager(self): + def future(): + future.calls += 1 + return constant_op.constant(2.0) + future.calls = 0 + + if context.executing_eagerly(): + a = constant_op.constant(1.0) + b = future + with ops.control_dependencies([a, b]): + c = constant_op.constant(3.0) + self.assertEqual(future.calls, 1) + else: + g = ops.Graph() + with g.as_default(): + a = constant_op.constant(1.0) + b = future() + with g.control_dependencies([a, b]): + c = constant_op.constant(3.0) + self.assertEqual(c.op.control_inputs, [a.op, b.op]) + self.assertEqual(future.calls, 1) + */ + } + + + [Ignore("How to port the ConvertibleObj?")] + [TestMethod] + public void TestBasicWithConversion() + { + var g = tf.Graph().as_default(); + // Note: _apply_op can be replaced by g.create_op + var a = g.create_op("FloatOutput", new Tensor[] { }, new[] { TF_DataType.TF_FLOAT }); + // TODO: ConvertibleObj, see original source below + /* + def testBasicWithConversion(self): + g = ops.Graph() + a = _apply_op(g, "FloatOutput", [], [dtypes.float32]) + + class ConvertibleObj(object): + + def _as_graph_element(self): + return a + + with g.control_dependencies([ConvertibleObj()]): + c = _apply_op(g, "FloatOutput", [], [dtypes.float32]) + + self.assertEqual(c.op.control_inputs, [a.op]) + */ + } + + [TestMethod] + public void TestNested() + { + var g = tf.Graph().as_default(); + var a_1 = constant_op.constant(1.0); + var a_2 = constant_op.constant(3.0); + var a_3 = constant_op.constant(4.0); + var a_4 = constant_op.constant(5.0); + Tensor b_1 = null, b_2 = null; + tf_with(g.control_dependencies(new[] { a_1, a_2, a_3, a_4 }), ctrl => + { + b_1 = constant_op.constant(6.0); + }); + tf_with(g.control_dependencies(new[] { a_1 }), ctrl1 => + { + tf_with(g.control_dependencies(new[] { a_2 }), ctrl2 => + { + tf_with(g.control_dependencies(new[] { a_3 }), ctrl3 => + { + tf_with(g.control_dependencies(new[] { a_4 }), ctrl4 => + { + b_2 = constant_op.constant(7.0); + }); + }); + }); + }); + //var z=tf.add(a_1, tf.multiply(b_2, b_1)); + //with(g.control_dependencies(new[] {z}), ctrl => + //{ + // var z1 = tf.add(a_3, tf.multiply(a_4, a_2)); + //}); + //tf.train.export_meta_graph(@"D:\dev\tensorboard\logdir\sharp.meta", as_text: false); + assertItemsEqual(b_1.op.control_inputs, new[] { a_1.op, a_2.op, a_3.op, a_4.op }); + assertItemsEqual(b_2.op.control_inputs, b_1.op.control_inputs); + } + + [TestMethod] + public void TestClear() + { + var g = tf.Graph().as_default(); + var a_1 = constant_op.constant(1.0); + var a_2 = constant_op.constant(3.0); + var a_3 = constant_op.constant(4.0); + var a_4 = constant_op.constant(5.0); + Operation b_3_4 = null, b_3 = null, b_none = null, b_1 = null, b_1_2 = null, b_none2 = null; + tf_with(g.control_dependencies(new[] { a_1 }), ctrl1 => + { + tf_with(g.control_dependencies(new[] { a_2 }), ctrl2 => + { + tf_with(g.control_dependencies(null), ctrl3 => + { + tf_with(g.control_dependencies(new[] { a_3 }), ctrl4 => + { + tf_with(g.control_dependencies(new[] { a_4 }), ctrl5 => + { + // deps [a_3, a_4] + b_3_4 = constant_op.constant(7.0); + }); + // deps = [a_3] + b_3 = constant_op.constant(8.0); + }); + // deps back to None + b_none = constant_op.constant(9.0); + }); + // deps back to [a_1, a_2] + b_1_2 = constant_op.constant(10.0); + }); + // deps back to [a_1] + b_1 = constant_op.constant(11.0); + tf_with(g.control_dependencies(null), ctrl6 => + { + // deps are None again + b_none2 = constant_op.constant(12.0); + }); + }); + // Note assertItemsEqual(given, expected), expected and given parameters should be swapped below + assertItemsEqual(new[] { a_3.op, a_4.op }, b_3_4.op.control_inputs); + assertItemsEqual(new[] { a_3.op }, b_3.op.control_inputs); + assertItemsEqual(new object[0], b_none.op.control_inputs); + assertItemsEqual(new[] { a_1.op, a_2.op }, b_1_2.op.control_inputs); + assertItemsEqual(new[] { a_1.op }, b_1.op.control_inputs); + assertItemsEqual(new object[0], b_none2.op.control_inputs); + } + + [TestMethod] + public void TestComplex() + { + var g = tf.Graph().as_default(); + // Usage pattern: + // * Nodes a_i are constants defined at the outermost scope, and are used + // as control inputs for the ith nested scope. + // * Nodes b_i are defined as Mul(a_3, a_4) at each scope. + // * Nodes c_i are defined as Mul(a_1, b_1) at each scope. + // * Nodes d_i are defined as Mul(b_i, c_i) at each scope. + // * Nodes e_i are defined as Mul(e_i-1, e_i-1) at each scope i > 1. + var a_1 = constant_op.constant(1.0); + var a_2 = constant_op.constant(2.0); + var a_3 = constant_op.constant(3.0); + var a_4 = constant_op.constant(4.0); + Operation b_1 = null, b_2 = null, b_3 = null, b_4 = null; + Operation c_1 = null, c_2 = null, c_3 = null, c_4 = null; + Operation d_1 = null, d_2 = null, d_3 = null, d_4 = null; + Operation e_1 = null, e_2 = null, e_3 = null, e_4 = null; + tf_with(g.control_dependencies(new[] { a_1 }), ctrl1 => + { + b_1 = tf.multiply(a_3, a_4); + c_1 = tf.multiply(a_1, b_1.output); + d_1 = tf.multiply(b_1.output, c_1.output); + e_1 = constant_op.constant(5.0); + tf_with(g.control_dependencies(new[] { a_2 }), ctrl2 => + { + b_2 = tf.multiply(a_3, a_4); + c_2 = tf.multiply(a_1, b_1.output); + d_2 = tf.multiply(b_2.output, c_2.output); + e_2 = tf.multiply(e_1.output, e_1.output); + tf_with(g.control_dependencies(new[] { a_3 }), ctrl3 => + { + b_3 = tf.multiply(a_3, a_4); + c_3 = tf.multiply(a_1, b_1.output); + d_3 = tf.multiply(b_3.output, c_3.output); + e_3 = tf.multiply(e_2.output, e_2.output); + tf_with(g.control_dependencies(new[] { a_4 }), ctrl4 => + { + b_4 = tf.multiply(a_3, a_4); + c_4 = tf.multiply(a_1, b_1.output); + d_4 = tf.multiply(b_4.output, c_4.output); + e_4 = tf.multiply(e_3.output, e_3.output); + }); + }); + }); + }); + + // Note assertItemsEqual(given, expected), expected and given parameters should be swapped below + assertItemsEqual(new[] {a_1.op}, b_1.op.control_inputs); + assertItemsEqual(new[] {a_1.op, a_2.op}, b_2.op.control_inputs); + assertItemsEqual(new[] { a_1.op, a_2.op}, b_3.op.control_inputs); + assertItemsEqual(new[] {a_1.op, a_2.op}, b_4.op.control_inputs); + + assertItemsEqual(new object[0], c_1.op.control_inputs); + assertItemsEqual(new[] {a_2.op}, c_2.op.control_inputs); + assertItemsEqual(new[] {a_2.op, a_3.op}, c_3.op.control_inputs); + assertItemsEqual(new[] {a_2.op, a_3.op, a_4.op}, c_4.op.control_inputs); + + assertItemsEqual(new object[0], d_1.op.control_inputs); + assertItemsEqual(new object[0], d_2.op.control_inputs); + assertItemsEqual(new object[0], d_3.op.control_inputs); + assertItemsEqual(new object[0], d_4.op.control_inputs); + + assertItemsEqual(new[] {a_1.op}, e_1.op.control_inputs); + assertItemsEqual(new[] {a_2.op}, e_2.op.control_inputs); + assertItemsEqual(new[] {a_3.op}, e_3.op.control_inputs); + assertItemsEqual(new[] {a_4.op}, e_4.op.control_inputs); + } + + [Ignore("Don't know how to create an operation with two outputs")] + [TestMethod] + public void TestRepeatedDependency() + { + /* + def testRepeatedDependency(self): + g = ops.Graph() + a = g.create_op("TwoFloatOutputs", [], [dtypes.float32, dtypes.float32]) + a_0, a_1 = a.outputs + with g.control_dependencies([a_0]): + b = _apply_op(g, "FloatOutput", [], [dtypes.float32]) + with g.control_dependencies([a_1]): + c = _apply_op(g, "FloatOutput", [], [dtypes.float32]) + + self.assertEqual(b.op.control_inputs, [a]) + self.assertEqual(c.op.control_inputs, [a]) + + */ + } + + [TestMethod] + public void TestNoControlDependencyWithDataDependency() + { + var g = tf.Graph().as_default(); + Operation b = null; + var a = constant_op.constant(100.0); + tf_with(g.control_dependencies(new[] { a }), ctrl1 => + { + b = array_ops.identity(a); + }); + Assert.AreEqual(0, b.op.control_inputs.Length); + } + + } +} diff --git a/test/TensorFlowNET.UnitTest/ops_test/CreateOpFromTfOperationTest.cs b/test/TensorFlowNET.UnitTest/ops_test/CreateOpFromTfOperationTest.cs index 08c8da2a..fddf5aa9 100644 --- a/test/TensorFlowNET.UnitTest/ops_test/CreateOpFromTfOperationTest.cs +++ b/test/TensorFlowNET.UnitTest/ops_test/CreateOpFromTfOperationTest.cs @@ -1,220 +1,220 @@ -using System; -using System.Linq; -using Microsoft.VisualStudio.TestTools.UnitTesting; -using Tensorflow; -using Tensorflow.Operations; -using static Tensorflow.Binding; - -namespace TensorFlowNET.UnitTest.ops_test -{ - /// - /// excerpt of tensorflow/python/framework/ops_test.py - /// # These cases test the private Graph._create_op_from_tf_operation - /// # method. Arguably we should only test the public APIs that depend on this - /// # method. However, this logic is complex and tricky, and it can be difficult to - /// # ascertain if we have adequate coverage (e.g. a graph may run successfully if - /// # the control flow context isn't set properly, but a more complicated use case - /// # that might not be obvious to test will fail). Thus we instead explicitly test - /// # the low-level behavior. - /// - [TestClass] - public class CreateOpFromTfOperationTest : PythonTest - { - - [TestMethod] - public void TestShape() - { - using (var g = tf.Graph().as_default()) - { - var x = constant_op.constant(new[,] {{1, 2, 3}, {4, 5, 6}}); - var c_op = ops._create_c_op(g, ops._NodeDef("Identity", "myop"), new[] {x}, new Operation[0]); - var op = g._create_op_from_tf_operation(c_op); - - Assert.AreEqual("myop", op.name); - Assert.AreEqual("Identity", op.type); - Assert.AreEqual(1, len(op.outputs)); - assertItemsEqual(new[] {2, 3}, op.outputs[0].shape); - } - } - - [TestMethod] - public void TestUniqueName() - { - var graph = tf.Graph().as_default(); - //var (c_op,op_desc) = ops._create_c_op(g, ops._NodeDef("Const", "myop"), new Tensor[0], new Operation[0]); - //var (c_op2, op_desc1) = ops._create_c_op(g, ops._NodeDef("Const", "myop_1"), new Tensor[0], new Operation[0]); - //var op = g._create_op_from_tf_operation(c_op); - //var op2 = g._create_op_from_tf_operation(c_op2); - var op = constant_op.constant(0, name: "myop").op; - var op2 = constant_op.constant(0, name: "myop_1").op; - - // Create ops with same names as op1 and op2. We expect the new names to be - // uniquified. - var op3 = constant_op.constant(0, name: "myop").op; - var op4 = constant_op.constant(0, name: "myop_1").op; - - self.assertEqual(op.name, "myop"); - self.assertEqual(op2.name, "myop_1"); - self.assertEqual(op3.name, "myop_2"); - self.assertEqual(op4.name, "myop_1_1"); - } - - [Ignore("need tesnroflow expose UpdateEdge API")] - [TestMethod] - public void TestCond() - { - var g = tf.Graph().as_default(); - var x = constant_op.constant(10); - - var true_fn = new Func(() => - { - var c_op = ops._create_c_op(g, ops._NodeDef("Identity", "cond/myop"), new[] { x }, new Operation[0]); - var new_ops = g._add_new_tf_operations(); - self.assertEqual(len(new_ops), 1); - return x; - }); - - control_flow_ops.cond(x < 10, true_fn, () => x); - - var op = g.get_operation_by_name("cond/myop"); - - //tf.train.export_meta_graph(@"D:\dev\tensorboard\logdir\sharp.meta.txt", as_text:true); - //tf.train.export_meta_graph(@"D:\dev\tensorboard\logdir\sharp.meta", as_text: false); - - self.assertIsNotNone(op); - self.assertEqual(op.name, "cond/myop"); - self.assertEqual(op.type, "Identity"); - //self.assertEqual(op.outputs, new object[0]); - var op_input = op.inputs[0].op; - self.assertEqual(op_input.type, "Switch"); - self.assertEqual(op_input.inputs[0].name, x.name); - self.assertEqual(op.graph, g); - self.assertIsNotNone(op._get_control_flow_context()); - var cond_text = op._get_control_flow_context() as ControlFlowContext; - self.assertEqual(cond_text.name, "cond/cond_text"); - } - - [Ignore("Todo: Port")] - [TestMethod] - public void TestWhileLoop() - { - var graph = tf.Graph().as_default(); - Operation x=null; - x = constant_op.constant(42); - var body = new Func(i => - { - ops._create_c_op(ops.get_default_graph(), ops._NodeDef("Identity", "myloop/myop"), new[] {x}, - new Operation[0]); - var new_ops = graph._add_new_tf_operations(); - self.assertEqual(len(new_ops), 1); - return i; - }); - // TODO: port control_flow_ops.while_loop - //control_flow_ops.while_loop( i => i < 10, body, new int[]{0}, name = "myloop"); - var op = graph.get_operation_by_name("myloop/myop"); - self.assertIsNotNone(op); - self.assertEqual(op.name, "myloop/myop"); - self.assertEqual(op.type, "Identity"); - self.assertEqual(op.outputs.Length, 0); - var op_input = op.inputs[0].op; - self.assertEqual(op_input.type, "Enter"); - self.assertItemsEqual(op_input.inputs.OfType().ToArray(), new[] {x}); - self.assertEqual(op.graph, graph); - self.assertIsNotNone(op._get_control_flow_context()); - self.assertEqual(((ControlFlowContext)op._get_control_flow_context()).name, "myloop/while_context"); - /* - @test_util.run_v1_only("b/120545219") - def testWhileLoop(self): - g = ops.Graph() - with g.as_default(): - x = test_ops.int_output() - - def body(i): - ops._create_c_op(ops.get_default_graph(), - ops._NodeDef("IntInput", "myloop/myop"), [x], []) - new_ops = g._add_new_tf_operations() - self.assertEqual(len(new_ops), 1) - return i - - control_flow_ops.while_loop(lambda i: i < 10, body, [0], name="myloop") - - op = g.get_operation_by_name("myloop/myop") - self.assertIsNotNone(op) - self.assertEqual(op.name, "myloop/myop") - self.assertEqual(op.type, "IntInput") - self.assertEqual(op.outputs, []) - op_input = op.inputs[0].op - self.assertEqual(op_input.type, "Enter") - self.assertEqual(list(op_input.inputs), [x]) - self.assertEqual(op.graph, g) - # pylint: disable=protected-access - self.assertIsNotNone(op._get_control_flow_context()) - self.assertEqual(op._get_control_flow_context().name, - "myloop/while_context") - # pylint: enable=protected-access - */ - } - - [Ignore("Todo: Port")] - [TestMethod] - public void TestWhileLoopWithInternalControlDep() - { - /* -@test_util.run_v1_only("b/120545219") - def testWhileLoopWithInternalControlDep(self): - g = ops.Graph() - with g.as_default(): - x = test_ops.int_output() - - def body(i): - c = constant_op.constant(1.0, name="c") - ops._create_c_op(ops.get_default_graph(), - ops._NodeDef("IntInput", "myloop/myop"), [x], []) - with ops.control_dependencies([c]): - new_ops = g._add_new_tf_operations() - self.assertEqual(len(new_ops), 1) - return i - - control_flow_ops.while_loop(lambda i: i < 10, body, [0], name="myloop") - - op = g.get_operation_by_name("myloop/myop") - self.assertIsNotNone(op) - c = g.get_operation_by_name("myloop/c") - self.assertIsNotNone(c) - # Internal control dep is preserved - self.assertEqual(op.control_inputs, [c]) - */ - } - - [Ignore("Todo: Port")] - [TestMethod] - public void TestWhileLoopWithExternalControlDep() - { - /* - @test_util.run_v1_only("b/120545219") - def testWhileLoopWithExternalControlDep(self): - g = ops.Graph() - with g.as_default(): - x = test_ops.int_output() - c = constant_op.constant(1.0) - - def body(i): - ops._create_c_op(ops.get_default_graph(), - ops._NodeDef("IntInput", "myloop/myop"), [x], []) - with ops.control_dependencies([c]): - new_ops = g._add_new_tf_operations() - self.assertEqual(len(new_ops), 1) - return i - - control_flow_ops.while_loop(lambda i: i < 10, body, [0], name="myloop") - - op = g.get_operation_by_name("myloop/myop") - self.assertIsNotNone(op) - # External control dep is removed and replaced with internal control dep - self.assertNotEqual(op.control_inputs[0], c.op) - self.assertIsNotNone(op.control_inputs[0]._get_control_flow_context()) - */ - } - - } -} +using System; +using System.Linq; +using Microsoft.VisualStudio.TestTools.UnitTesting; +using Tensorflow; +using Tensorflow.Operations; +using static Tensorflow.Binding; + +namespace TensorFlowNET.UnitTest.ops_test +{ + /// + /// excerpt of tensorflow/python/framework/ops_test.py + /// # These cases test the private Graph._create_op_from_tf_operation + /// # method. Arguably we should only test the public APIs that depend on this + /// # method. However, this logic is complex and tricky, and it can be difficult to + /// # ascertain if we have adequate coverage (e.g. a graph may run successfully if + /// # the control flow context isn't set properly, but a more complicated use case + /// # that might not be obvious to test will fail). Thus we instead explicitly test + /// # the low-level behavior. + /// + [TestClass] + public class CreateOpFromTfOperationTest : PythonTest + { + + [TestMethod] + public void TestShape() + { + using (var g = tf.Graph().as_default()) + { + var x = constant_op.constant(new[,] {{1, 2, 3}, {4, 5, 6}}); + var c_op = ops._create_c_op(g, ops._NodeDef("Identity", "myop"), new[] {x}, new Operation[0]); + var op = g._create_op_from_tf_operation(c_op); + + Assert.AreEqual("myop", op.name); + Assert.AreEqual("Identity", op.type); + Assert.AreEqual(1, len(op.outputs)); + assertItemsEqual(new[] {2, 3}, op.outputs[0].shape); + } + } + + [TestMethod] + public void TestUniqueName() + { + var graph = tf.Graph().as_default(); + //var (c_op,op_desc) = ops._create_c_op(g, ops._NodeDef("Const", "myop"), new Tensor[0], new Operation[0]); + //var (c_op2, op_desc1) = ops._create_c_op(g, ops._NodeDef("Const", "myop_1"), new Tensor[0], new Operation[0]); + //var op = g._create_op_from_tf_operation(c_op); + //var op2 = g._create_op_from_tf_operation(c_op2); + var op = constant_op.constant(0, name: "myop").op; + var op2 = constant_op.constant(0, name: "myop_1").op; + + // Create ops with same names as op1 and op2. We expect the new names to be + // uniquified. + var op3 = constant_op.constant(0, name: "myop").op; + var op4 = constant_op.constant(0, name: "myop_1").op; + + self.assertEqual(op.name, "myop"); + self.assertEqual(op2.name, "myop_1"); + self.assertEqual(op3.name, "myop_2"); + self.assertEqual(op4.name, "myop_1_1"); + } + + [Ignore("need tesnroflow expose UpdateEdge API")] + [TestMethod] + public void TestCond() + { + var g = tf.Graph().as_default(); + var x = constant_op.constant(10); + + var true_fn = new Func(() => + { + var c_op = ops._create_c_op(g, ops._NodeDef("Identity", "cond/myop"), new[] { x }, new Operation[0]); + var new_ops = g._add_new_tf_operations(); + self.assertEqual(len(new_ops), 1); + return x; + }); + + control_flow_ops.cond(x < 10, true_fn, () => x); + + var op = g.get_operation_by_name("cond/myop"); + + //tf.train.export_meta_graph(@"D:\dev\tensorboard\logdir\sharp.meta.txt", as_text:true); + //tf.train.export_meta_graph(@"D:\dev\tensorboard\logdir\sharp.meta", as_text: false); + + self.assertIsNotNone(op); + self.assertEqual(op.name, "cond/myop"); + self.assertEqual(op.type, "Identity"); + //self.assertEqual(op.outputs, new object[0]); + var op_input = op.inputs[0].op; + self.assertEqual(op_input.type, "Switch"); + self.assertEqual(op_input.inputs[0].name, x.name); + self.assertEqual(op.graph, g); + self.assertIsNotNone(op._get_control_flow_context()); + var cond_text = op._get_control_flow_context() as ControlFlowContext; + self.assertEqual(cond_text.name, "cond/cond_text"); + } + + [Ignore("Todo: Port")] + [TestMethod] + public void TestWhileLoop() + { + var graph = tf.Graph().as_default(); + Operation x=null; + x = constant_op.constant(42); + var body = new Func(i => + { + ops._create_c_op(ops.get_default_graph(), ops._NodeDef("Identity", "myloop/myop"), new[] {x}, + new Operation[0]); + var new_ops = graph._add_new_tf_operations(); + self.assertEqual(len(new_ops), 1); + return i; + }); + // TODO: port control_flow_ops.while_loop + //control_flow_ops.while_loop( i => i < 10, body, new int[]{0}, name = "myloop"); + var op = graph.get_operation_by_name("myloop/myop"); + self.assertIsNotNone(op); + self.assertEqual(op.name, "myloop/myop"); + self.assertEqual(op.type, "Identity"); + self.assertEqual(op.outputs.Length, 0); + var op_input = op.inputs[0].op; + self.assertEqual(op_input.type, "Enter"); + self.assertItemsEqual(op_input.inputs.OfType().ToArray(), new[] {x}); + self.assertEqual(op.graph, graph); + self.assertIsNotNone(op._get_control_flow_context()); + self.assertEqual(((ControlFlowContext)op._get_control_flow_context()).name, "myloop/while_context"); + /* + @test_util.run_v1_only("b/120545219") + def testWhileLoop(self): + g = ops.Graph() + with g.as_default(): + x = test_ops.int_output() + + def body(i): + ops._create_c_op(ops.get_default_graph(), + ops._NodeDef("IntInput", "myloop/myop"), [x], []) + new_ops = g._add_new_tf_operations() + self.assertEqual(len(new_ops), 1) + return i + + control_flow_ops.while_loop(lambda i: i < 10, body, [0], name="myloop") + + op = g.get_operation_by_name("myloop/myop") + self.assertIsNotNone(op) + self.assertEqual(op.name, "myloop/myop") + self.assertEqual(op.type, "IntInput") + self.assertEqual(op.outputs, []) + op_input = op.inputs[0].op + self.assertEqual(op_input.type, "Enter") + self.assertEqual(list(op_input.inputs), [x]) + self.assertEqual(op.graph, g) + # pylint: disable=protected-access + self.assertIsNotNone(op._get_control_flow_context()) + self.assertEqual(op._get_control_flow_context().name, + "myloop/while_context") + # pylint: enable=protected-access + */ + } + + [Ignore("Todo: Port")] + [TestMethod] + public void TestWhileLoopWithInternalControlDep() + { + /* +@test_util.run_v1_only("b/120545219") + def testWhileLoopWithInternalControlDep(self): + g = ops.Graph() + with g.as_default(): + x = test_ops.int_output() + + def body(i): + c = constant_op.constant(1.0, name="c") + ops._create_c_op(ops.get_default_graph(), + ops._NodeDef("IntInput", "myloop/myop"), [x], []) + with ops.control_dependencies([c]): + new_ops = g._add_new_tf_operations() + self.assertEqual(len(new_ops), 1) + return i + + control_flow_ops.while_loop(lambda i: i < 10, body, [0], name="myloop") + + op = g.get_operation_by_name("myloop/myop") + self.assertIsNotNone(op) + c = g.get_operation_by_name("myloop/c") + self.assertIsNotNone(c) + # Internal control dep is preserved + self.assertEqual(op.control_inputs, [c]) + */ + } + + [Ignore("Todo: Port")] + [TestMethod] + public void TestWhileLoopWithExternalControlDep() + { + /* + @test_util.run_v1_only("b/120545219") + def testWhileLoopWithExternalControlDep(self): + g = ops.Graph() + with g.as_default(): + x = test_ops.int_output() + c = constant_op.constant(1.0) + + def body(i): + ops._create_c_op(ops.get_default_graph(), + ops._NodeDef("IntInput", "myloop/myop"), [x], []) + with ops.control_dependencies([c]): + new_ops = g._add_new_tf_operations() + self.assertEqual(len(new_ops), 1) + return i + + control_flow_ops.while_loop(lambda i: i < 10, body, [0], name="myloop") + + op = g.get_operation_by_name("myloop/myop") + self.assertIsNotNone(op) + # External control dep is removed and replaced with internal control dep + self.assertNotEqual(op.control_inputs[0], c.op) + self.assertIsNotNone(op.control_inputs[0]._get_control_flow_context()) + */ + } + + } +} diff --git a/test/TensorFlowNET.UnitTest/ops_test/GraphTest.cs b/test/TensorFlowNET.UnitTest/ops_test/GraphTest.cs index 87a34b8a..14566738 100644 --- a/test/TensorFlowNET.UnitTest/ops_test/GraphTest.cs +++ b/test/TensorFlowNET.UnitTest/ops_test/GraphTest.cs @@ -1,195 +1,195 @@ -using Microsoft.VisualStudio.TestTools.UnitTesting; -using Tensorflow; - -namespace TensorFlowNET.UnitTest.ops_test -{ - /// - /// excerpt of tensorflow/python/framework/ops_test.py - /// - [TestClass] - public class GraphTest : PythonTest - { - - [TestInitialize] - public void SetUp() - { - ops.reset_default_graph(); - } - - [TestCleanup] - public void TearDown() - { - ops.reset_default_graph(); - } - - private void _AssertDefault(Graph expected) { - Assert.AreSame(ops.get_default_graph(), expected); - } - - - [Ignore("Todo: Port")] - [TestMethod] - public void testResetDefaultGraphNesting() - { -/* - def testResetDefaultGraphNesting(self): - g0 = ops.Graph() - with self.assertRaises(AssertionError): - with g0.as_default(): - ops.reset_default_graph() -*/ - } - - [Ignore("Todo: Port")] - [TestMethod] - public void testGraphContextManagerCancelsEager() - { - /* - def testGraphContextManagerCancelsEager(self): - with context.eager_mode(): - with ops.Graph().as_default(): - self.assertFalse(context.executing_eagerly()) - */ - } - - - [Ignore("Todo: Port")] - [TestMethod] - public void testGraphContextManager() - { - /* - def testGraphContextManager(self): - g0 = ops.Graph() - with g0.as_default() as g1: - self.assertIs(g0, g1) - */ - } - - [Ignore("Todo: Port")] - [TestMethod] - public void testDefaultGraph() - { - /* - def testDefaultGraph(self): - orig = ops.get_default_graph() - self._AssertDefault(orig) - g0 = ops.Graph() - self._AssertDefault(orig) - context_manager_0 = g0.as_default() - self._AssertDefault(orig) - with context_manager_0 as g0: - self._AssertDefault(g0) - with ops.Graph().as_default() as g1: - self._AssertDefault(g1) - self._AssertDefault(g0) - self._AssertDefault(orig) - */ - } - - [Ignore("Todo: Port")] - [TestMethod] - public void testPreventFeeding() - { - /* - def testPreventFeeding(self): - g = ops.Graph() - a = constant_op.constant(2.0) - self.assertTrue(g.is_feedable(a)) - g.prevent_feeding(a) - self.assertFalse(g.is_feedable(a)) - */ - } - - - [Ignore("Todo: Port")] - [TestMethod] - public void testAsGraphElementConversions() - { - /* - def testAsGraphElementConversions(self): - - class ConvertibleObj(object): - - def _as_graph_element(self): - return "FloatOutput:0" - - class NonConvertibleObj(object): - - pass - - g = ops.Graph() - a = _apply_op(g, "FloatOutput", [], [dtypes.float32]) - self.assertEqual(a, g.as_graph_element(ConvertibleObj())) - with self.assertRaises(TypeError): - g.as_graph_element(NonConvertibleObj()) - */ - } - - [Ignore("Todo: Port")] - [TestMethod] - public void testGarbageCollected() - { - /* - # Regression test against creating custom __del__ functions in classes - # involved in cyclic references, e.g. Graph and Operation. (Python won't gc - # cycles that require calling a __del__ method, because the __del__ method can - # theoretically increase the object's refcount to "save" it from gc, and any - # already-deleted objects in the cycle would have be to restored.) - def testGarbageCollected(self): - # Create a graph we can delete and a weak reference to monitor if it's gc'd - g = ops.Graph() - g_ref = weakref.ref(g) - # Create some ops - with g.as_default(): - a = constant_op.constant(2.0) - b = constant_op.constant(3.0) - c = math_ops.add(a, b) - # Create a session we can delete - with session.Session(graph=g) as sess: - self.evaluate(c) - # Delete all references and trigger gc - del g - del a - del b - del c - del sess - gc.collect() - self.assertIsNone(g_ref()) - */ - } - - [Ignore("Todo: Port")] - [TestMethod] - public void testRunnableAfterInvalidShape() - { - /* - def testRunnableAfterInvalidShape(self): - with ops.Graph().as_default(): - with self.assertRaises(ValueError): - math_ops.add([1, 2], [1, 2, 3]) - a = constant_op.constant(1) - with session.Session() as sess: - self.evaluate(a) - */ - } - - [Ignore("Todo: Port")] - [TestMethod] - public void testRunnableAfterInvalidShapeWithKernelLabelMap() - { - /* - def testRunnableAfterInvalidShapeWithKernelLabelMap(self): - g = ops.Graph() - with g.as_default(): - with g._kernel_label_map({"KernelLabelRequired": "overload_1"}): - with self.assertRaises(ValueError): - test_ops.kernel_label_required(1) - a = constant_op.constant(1) - with session.Session() as sess: - self.evaluate(a) - */ - } - - - } -} +using Microsoft.VisualStudio.TestTools.UnitTesting; +using Tensorflow; + +namespace TensorFlowNET.UnitTest.ops_test +{ + /// + /// excerpt of tensorflow/python/framework/ops_test.py + /// + [TestClass] + public class GraphTest : PythonTest + { + + [TestInitialize] + public void SetUp() + { + ops.reset_default_graph(); + } + + [TestCleanup] + public void TearDown() + { + ops.reset_default_graph(); + } + + private void _AssertDefault(Graph expected) { + Assert.AreSame(ops.get_default_graph(), expected); + } + + + [Ignore("Todo: Port")] + [TestMethod] + public void testResetDefaultGraphNesting() + { +/* + def testResetDefaultGraphNesting(self): + g0 = ops.Graph() + with self.assertRaises(AssertionError): + with g0.as_default(): + ops.reset_default_graph() +*/ + } + + [Ignore("Todo: Port")] + [TestMethod] + public void testGraphContextManagerCancelsEager() + { + /* + def testGraphContextManagerCancelsEager(self): + with context.eager_mode(): + with ops.Graph().as_default(): + self.assertFalse(context.executing_eagerly()) + */ + } + + + [Ignore("Todo: Port")] + [TestMethod] + public void testGraphContextManager() + { + /* + def testGraphContextManager(self): + g0 = ops.Graph() + with g0.as_default() as g1: + self.assertIs(g0, g1) + */ + } + + [Ignore("Todo: Port")] + [TestMethod] + public void testDefaultGraph() + { + /* + def testDefaultGraph(self): + orig = ops.get_default_graph() + self._AssertDefault(orig) + g0 = ops.Graph() + self._AssertDefault(orig) + context_manager_0 = g0.as_default() + self._AssertDefault(orig) + with context_manager_0 as g0: + self._AssertDefault(g0) + with ops.Graph().as_default() as g1: + self._AssertDefault(g1) + self._AssertDefault(g0) + self._AssertDefault(orig) + */ + } + + [Ignore("Todo: Port")] + [TestMethod] + public void testPreventFeeding() + { + /* + def testPreventFeeding(self): + g = ops.Graph() + a = constant_op.constant(2.0) + self.assertTrue(g.is_feedable(a)) + g.prevent_feeding(a) + self.assertFalse(g.is_feedable(a)) + */ + } + + + [Ignore("Todo: Port")] + [TestMethod] + public void testAsGraphElementConversions() + { + /* + def testAsGraphElementConversions(self): + + class ConvertibleObj(object): + + def _as_graph_element(self): + return "FloatOutput:0" + + class NonConvertibleObj(object): + + pass + + g = ops.Graph() + a = _apply_op(g, "FloatOutput", [], [dtypes.float32]) + self.assertEqual(a, g.as_graph_element(ConvertibleObj())) + with self.assertRaises(TypeError): + g.as_graph_element(NonConvertibleObj()) + */ + } + + [Ignore("Todo: Port")] + [TestMethod] + public void testGarbageCollected() + { + /* + # Regression test against creating custom __del__ functions in classes + # involved in cyclic references, e.g. Graph and Operation. (Python won't gc + # cycles that require calling a __del__ method, because the __del__ method can + # theoretically increase the object's refcount to "save" it from gc, and any + # already-deleted objects in the cycle would have be to restored.) + def testGarbageCollected(self): + # Create a graph we can delete and a weak reference to monitor if it's gc'd + g = ops.Graph() + g_ref = weakref.ref(g) + # Create some ops + with g.as_default(): + a = constant_op.constant(2.0) + b = constant_op.constant(3.0) + c = math_ops.add(a, b) + # Create a session we can delete + with session.Session(graph=g) as sess: + self.evaluate(c) + # Delete all references and trigger gc + del g + del a + del b + del c + del sess + gc.collect() + self.assertIsNone(g_ref()) + */ + } + + [Ignore("Todo: Port")] + [TestMethod] + public void testRunnableAfterInvalidShape() + { + /* + def testRunnableAfterInvalidShape(self): + with ops.Graph().as_default(): + with self.assertRaises(ValueError): + math_ops.add([1, 2], [1, 2, 3]) + a = constant_op.constant(1) + with session.Session() as sess: + self.evaluate(a) + */ + } + + [Ignore("Todo: Port")] + [TestMethod] + public void testRunnableAfterInvalidShapeWithKernelLabelMap() + { + /* + def testRunnableAfterInvalidShapeWithKernelLabelMap(self): + g = ops.Graph() + with g.as_default(): + with g._kernel_label_map({"KernelLabelRequired": "overload_1"}): + with self.assertRaises(ValueError): + test_ops.kernel_label_required(1) + a = constant_op.constant(1) + with session.Session() as sess: + self.evaluate(a) + */ + } + + + } +} From e8cab09f1638389a6e95004c7a93604686cd19cf Mon Sep 17 00:00:00 2001 From: Oceania2018 Date: Sat, 15 Feb 2020 12:46:49 -0600 Subject: [PATCH 04/11] global_step int64 #499 --- src/TensorFlowNET.Core/Training/Optimizer.cs | 4 +++- src/TensorFlowNET.Hub/Tensorflow.Hub.csproj | 2 +- 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/src/TensorFlowNET.Core/Training/Optimizer.cs b/src/TensorFlowNET.Core/Training/Optimizer.cs index 524a0e34..04ec949c 100644 --- a/src/TensorFlowNET.Core/Training/Optimizer.cs +++ b/src/TensorFlowNET.Core/Training/Optimizer.cs @@ -205,7 +205,9 @@ namespace Tensorflow //} //else { - apply_updates = state_ops.assign_add(global_step, tf.constant(1), name: name); + apply_updates = state_ops.assign_add(global_step, + ops.convert_to_tensor(1, dtype: global_step.dtype), + name: name); } }); } diff --git a/src/TensorFlowNET.Hub/Tensorflow.Hub.csproj b/src/TensorFlowNET.Hub/Tensorflow.Hub.csproj index 76965c1e..64ca76f6 100644 --- a/src/TensorFlowNET.Hub/Tensorflow.Hub.csproj +++ b/src/TensorFlowNET.Hub/Tensorflow.Hub.csproj @@ -2,7 +2,7 @@ Tensorflow.Hub netstandard2.0 - 0.0.6 + 0.1.0 Kerry Jiang, Haiping Chen SciSharp STACK Apache 2.0 From 11017d4f14eff9d3c969202524241ca074e9f1d5 Mon Sep 17 00:00:00 2001 From: Oceania2018 Date: Wed, 19 Feb 2020 05:52:43 -0600 Subject: [PATCH 05/11] add SmartBroadcastGradientArgs --- src/TensorFlowNET.Core/Gradients/math_grad.cs | 67 +++++++++++++------ 1 file changed, 45 insertions(+), 22 deletions(-) diff --git a/src/TensorFlowNET.Core/Gradients/math_grad.cs b/src/TensorFlowNET.Core/Gradients/math_grad.cs index 1f24afb0..94e434cb 100644 --- a/src/TensorFlowNET.Core/Gradients/math_grad.cs +++ b/src/TensorFlowNET.Core/Gradients/math_grad.cs @@ -36,7 +36,7 @@ namespace Tensorflow.Gradients return new Tensor[] { gen_ops.mul(grad, gen_math_ops.sign(x)) }; } - [RegisterGradient("Add")] + [RegisterGradient("AddV2")] public static Tensor[] _AddGrad(Operation op, Tensor[] grads) { var x = op.inputs[0]; @@ -107,7 +107,9 @@ namespace Tensorflow.Gradients var y = op.outputs[0]; // y = e^x return tf_with(ops.control_dependencies(new Operation[] { grad }), dp => { y = math_ops.conj(y); - return new Tensor[] { math_ops.mul_no_nan(y, grad) }; + // forward_compatible(2019, 9, 14) + // return new Tensor[] { math_ops.mul_no_nan(y, grad) }; + return new Tensor[] { grad * y }; }); } @@ -167,8 +169,7 @@ namespace Tensorflow.Gradients new TF_DataType[] { tf.int32, tf.float32 }.Contains(grad.dtype)) return new Tensor[] { gen_math_ops.mul(grad, y), gen_math_ops.mul(grad, x) }; - var sx = array_ops.shape(x); - var sy = array_ops.shape(y); + var (sx, sy) = SmartBroadcastGradientArgs(x, y); var (rx, ry) = gen_array_ops.broadcast_gradient_args(sx, sy); x = math_ops.conj(x); @@ -355,8 +356,8 @@ namespace Tensorflow.Gradients : gen_math_ops.less_equal(x, y); var (rx, ry) = gen_array_ops.broadcast_gradient_args(sx, sy); var xgrad = array_ops.where(xmask, grad, zeros); - var ygrad = array_ops.where(xmask, zeros, grad); var gx = array_ops.reshape(math_ops.reduce_sum(xgrad, rx), sx); + var ygrad = array_ops.where(xmask, zeros, grad); var gy = array_ops.reshape(math_ops.reduce_sum(ygrad, ry), sy); return new Tensor[] { gx, gy }; } @@ -397,14 +398,13 @@ namespace Tensorflow.Gradients _ShapesFullySpecifiedAndEqual(x, y, grad)) return new Tensor[] { grad, -grad }; - var sx = array_ops.shape(x); - var sy = array_ops.shape(y); + var (sx, sy) = SmartBroadcastGradientArgs(x, y); var (rx, ry) = gen_array_ops.broadcast_gradient_args(sx, sy); - var r1 = gen_array_ops.reshape(math_ops.reduce_sum(grad, rx), sx); - var r2 = gen_array_ops.reshape(-math_ops.reduce_sum(grad, ry), sy); + var gx = array_ops.reshape(math_ops.reduce_sum(grad, rx), sx); + var gy = array_ops.reshape(math_ops.reduce_sum(-grad, ry), sy); - return new Tensor[] { r1, r2 }; + return new Tensor[] { gx, gy }; } public static bool _ShapesFullySpecifiedAndEqual(Tensor x, Tensor y, Tensor grad) @@ -468,15 +468,16 @@ namespace Tensorflow.Gradients x = math_ops.conj(x); y = math_ops.conj(y); - var realdiv1 = gen_math_ops.real_div(-x, y); - var realdiv2 = gen_math_ops.real_div(realdiv1, y); - var reduce_sum1 = math_ops.reduce_sum(grad * realdiv2, ry); - var reshape1 = gen_array_ops.reshape(reduce_sum1, sy); - var realdiv3 = gen_math_ops.real_div(grad, y); - var reduce_sum2 = math_ops.reduce_sum(realdiv3, rx); - var reshape2 = gen_array_ops.reshape(reduce_sum2, sx); + var reshape1 = array_ops.reshape( + math_ops.reduce_sum( + math_ops.realdiv(grad, y), rx), + sx); + var reshape2 = array_ops.reshape( + math_ops.reduce_sum( + grad * math_ops.realdiv(math_ops.realdiv(-x, y), y), ry), + sy); - return new Tensor[] { reshape2, reshape1 }; + return new Tensor[] { reshape1, reshape2 }; } [RegisterGradient("Sigmoid")] @@ -602,14 +603,12 @@ namespace Tensorflow.Gradients var y = op.inputs[1]; var z = op.outputs[0]; - var sx = array_ops.shape(x); - var sy = array_ops.shape(y); + var (sx, sy) = SmartBroadcastGradientArgs(x, y); var (rx, ry) = gen_array_ops.broadcast_gradient_args(sx, sy); x = math_ops.conj(x); y = math_ops.conj(y); z = math_ops.conj(z); - var pow = gen_math_ops.pow(x, y - 1.0f); - var mul = grad * y * pow; + var mul = grad * y * math_ops.pow(x, y - 1.0f); var reduce_sum = math_ops.reduce_sum(mul, rx); var gx = gen_array_ops.reshape(reduce_sum, sx); @@ -630,5 +629,29 @@ namespace Tensorflow.Gradients return new Tensor[] { gx, gy }; } + + /// + /// Optimized version of `broadcast_gradient_args` that caches results. + /// + /// + /// + /// + private static (Tensor, Tensor) SmartBroadcastGradientArgs(Tensor x, Tensor y) + { + Tensor sx, sy; + if (x.TensorShape.is_fully_defined() && + y.TensorShape.is_fully_defined()) + { + sx = array_ops.shape(x); + sy = array_ops.shape(y); + } + else + { + sx = array_ops.shape_internal(x, optimize: false); + sy = array_ops.shape_internal(y, optimize: false); + } + + return (sx, sy); + } } } From 95c7db76f9ce0cd994d454c52c26443648683277 Mon Sep 17 00:00:00 2001 From: Oceania2018 Date: Wed, 19 Feb 2020 05:53:43 -0600 Subject: [PATCH 06/11] fused_batch_norm_grad_v3 --- .../Operations/NnOps/gen_nn_ops.cs | 42 +++++++++++++++++++ 1 file changed, 42 insertions(+) diff --git a/src/TensorFlowNET.Core/Operations/NnOps/gen_nn_ops.cs b/src/TensorFlowNET.Core/Operations/NnOps/gen_nn_ops.cs index cee1ffd4..3761cdfe 100644 --- a/src/TensorFlowNET.Core/Operations/NnOps/gen_nn_ops.cs +++ b/src/TensorFlowNET.Core/Operations/NnOps/gen_nn_ops.cs @@ -187,6 +187,23 @@ namespace Tensorflow.Operations return op.outputs; } + public static Tensor[] fused_batch_norm_grad_v3(FusedBatchNormParams @params) + { + var op = _op_def_lib._apply_op_helper("FusedBatchNormGradV3", name: @params.Name, args: new + { + y_backprop = @params.YBackprop, + x = @params.X, + scale = @params.Scale, + reserve_space_1 = @params.ReserveSpace1, + reserve_space_2 = @params.ReserveSpace2, + reserve_space_3 = @params.ReserveSpace3, + epsilon = @params.Epsilon, + data_format = @params.DataFormat, + is_training = @params.IsTraining + }); + return op.outputs; + } + public static Tensor[] fused_batch_norm(Tensor x, Tensor scale, Tensor offset, @@ -212,6 +229,31 @@ namespace Tensorflow.Operations return _op.outputs; } + public static Tensor[] fused_batch_norm_v3(Tensor x, + Tensor scale, + Tensor offset, + Tensor mean, + Tensor variance, + float epsilon = 0.0001f, + string data_format = "NHWC", + bool is_training = true, + string name = null) + { + var _op = _op_def_lib._apply_op_helper("FusedBatchNormV3", name: name, args: new + { + x, + scale, + offset, + mean, + variance, + epsilon, + data_format, + is_training + }); + + return _op.outputs; + } + /// /// Local Response Normalization. /// From 8b5ef9c9a16cebeeb3ae66ad82478d77c12b6ca4 Mon Sep 17 00:00:00 2001 From: Oceania2018 Date: Wed, 19 Feb 2020 05:55:11 -0600 Subject: [PATCH 07/11] AddV2 --- src/TensorFlowNET.Core/Gradients/nn_grad.cs | 16 +++++++++++++--- .../Operations/OpDefLibrary.cs | 4 ++++ .../Operations/Operation.Implicit.cs | 2 +- .../Operations/Operation.Input.cs | 3 +++ .../Operations/Operation.Output.cs | 3 +++ src/TensorFlowNET.Core/Operations/Operation.cs | 7 +++++++ src/TensorFlowNET.Core/Operations/array_ops.cs | 2 +- .../Operations/gen_math_ops.cs | 3 ++- src/TensorFlowNET.Core/Operations/math_ops.cs | 6 ++++++ src/TensorFlowNET.Core/Operations/nn_impl.py.cs | 2 +- src/TensorFlowNET.Core/TensorFlow.Binding.csproj | 3 ++- src/TensorFlowNET.Core/Tensors/Tensor.cs | 3 +++ src/TensorFlowNET.Core/ops.name_scope.cs | 4 ++-- .../Hub/MnistModelLoaderTest.cs | 2 +- 14 files changed, 49 insertions(+), 11 deletions(-) diff --git a/src/TensorFlowNET.Core/Gradients/nn_grad.cs b/src/TensorFlowNET.Core/Gradients/nn_grad.cs index 967b3c21..e4502ad8 100644 --- a/src/TensorFlowNET.Core/Gradients/nn_grad.cs +++ b/src/TensorFlowNET.Core/Gradients/nn_grad.cs @@ -170,6 +170,14 @@ namespace Tensorflow.Gradients public static Tensor[] _FusedBatchNormGrad(Operation op, Tensor[] grads) => _BaseFusedBatchNormGrad(op, 0, grads); + [RegisterGradient("FusedBatchNormV2")] + public static Tensor[] _FusedBatchNormV2Grad(Operation op, Tensor[] grads) + => _BaseFusedBatchNormGrad(op, 1, grads); + + [RegisterGradient("FusedBatchNormV3")] + public static Tensor[] _FusedBatchNormV3Grad(Operation op, Tensor[] grads) + => _BaseFusedBatchNormGrad(op, 2, grads); + /// /// Return the gradients for the 3 inputs of BatchNorm. /// @@ -190,8 +198,10 @@ namespace Tensorflow.Gradients switch (version) { case 2: - throw new NotImplementedException(""); + grad_fun = gen_nn_ops.fused_batch_norm_grad_v3; + break; case 1: + // grad_fun = gen_nn_ops.fused_batch_norm_grad_v2; throw new NotImplementedException(""); default: grad_fun = gen_nn_ops.fused_batch_norm_grad; @@ -225,8 +235,8 @@ namespace Tensorflow.Gradients YBackprop = grad_y, X = x, Scale = scale, - ReserveSpace1 = op.outputs[3], - ReserveSpace2 = op.outputs[4], + ReserveSpace1 = pop_mean, + ReserveSpace2 = pop_var, ReserveSpace3 = version == 2 ? op.outputs[5] : null, Epsilon = epsilon, DataFormat = data_format, diff --git a/src/TensorFlowNET.Core/Operations/OpDefLibrary.cs b/src/TensorFlowNET.Core/Operations/OpDefLibrary.cs index 5700ccdd..e842fcb4 100644 --- a/src/TensorFlowNET.Core/Operations/OpDefLibrary.cs +++ b/src/TensorFlowNET.Core/Operations/OpDefLibrary.cs @@ -65,6 +65,10 @@ namespace Tensorflow var base_types = new List(); var types = new List(); +#if DEBUG + if (op_type_name == "FusedBatchNormGradV3") + ; +#endif // Perform input type inference foreach (var input_arg in op_def.InputArg) { diff --git a/src/TensorFlowNET.Core/Operations/Operation.Implicit.cs b/src/TensorFlowNET.Core/Operations/Operation.Implicit.cs index 8de412c8..9cadac0c 100644 --- a/src/TensorFlowNET.Core/Operations/Operation.Implicit.cs +++ b/src/TensorFlowNET.Core/Operations/Operation.Implicit.cs @@ -32,7 +32,7 @@ namespace Tensorflow public override string ToString() { - return _handle == IntPtr.Zero ? "tf.Operation Undefined" : $"tf.Operation '{name}' type={OpType}"; + return _handle == IntPtr.Zero ? "tf.Operation Undefined" : $""; } public override bool Equals(object obj) diff --git a/src/TensorFlowNET.Core/Operations/Operation.Input.cs b/src/TensorFlowNET.Core/Operations/Operation.Input.cs index fdf92504..bbd11b0e 100644 --- a/src/TensorFlowNET.Core/Operations/Operation.Input.cs +++ b/src/TensorFlowNET.Core/Operations/Operation.Input.cs @@ -80,6 +80,9 @@ namespace Tensorflow /// reasons, or to ensure that the side effects of an op are observed /// in the correct order. /// +#if SERIALIZABLE + [JsonIgnore] +#endif public Operation[] control_inputs { get diff --git a/src/TensorFlowNET.Core/Operations/Operation.Output.cs b/src/TensorFlowNET.Core/Operations/Operation.Output.cs index abe8e9c1..3a70c107 100644 --- a/src/TensorFlowNET.Core/Operations/Operation.Output.cs +++ b/src/TensorFlowNET.Core/Operations/Operation.Output.cs @@ -45,6 +45,9 @@ namespace Tensorflow } private Tensor[] _outputs; +#if SERIALIZABLE + [JsonIgnore] +#endif public Tensor[] outputs => _outputs; #if SERIALIZABLE [JsonIgnore] diff --git a/src/TensorFlowNET.Core/Operations/Operation.cs b/src/TensorFlowNET.Core/Operations/Operation.cs index 831e6ca5..a0acf4bb 100644 --- a/src/TensorFlowNET.Core/Operations/Operation.cs +++ b/src/TensorFlowNET.Core/Operations/Operation.cs @@ -74,6 +74,9 @@ namespace Tensorflow public TF_DataType dtype => TF_DataType.DtInvalid; public string name => _handle == IntPtr.Zero ? null : c_api.StringPiece(c_api.TF_OperationName(_handle)); public string OpType => _handle == IntPtr.Zero ? null : c_api.StringPiece(c_api.TF_OperationOpType(_handle)); +#if SERIALIZABLE + [JsonIgnore] +#endif public string Device => _handle == IntPtr.Zero ? null : c_api.StringPiece(c_api.TF_OperationDevice(_handle)); #if SERIALIZABLE [JsonIgnore] @@ -152,6 +155,10 @@ namespace Tensorflow { _graph = g; +#if DEBUG + if (node_def.Name == "define_second_stage_train/gradients/define_loss/conv_lobj_branch/batch_normalization/cond/FusedBatchNormV3_1_grad/FusedBatchNormGradV3") + ; +#endif // Build the list of control inputs. var control_input_ops = new List(); if (control_inputs != null) diff --git a/src/TensorFlowNET.Core/Operations/array_ops.cs b/src/TensorFlowNET.Core/Operations/array_ops.cs index f9f2f58f..5374d72b 100644 --- a/src/TensorFlowNET.Core/Operations/array_ops.cs +++ b/src/TensorFlowNET.Core/Operations/array_ops.cs @@ -427,7 +427,7 @@ namespace Tensorflow if (!tf.context.executing_eagerly()) { var input_tensor = ops.convert_to_tensor(input); - var input_shape = tensor_util.to_shape(input_tensor.shape); + var input_shape = input_tensor.TensorShape; if (optimize && input_tensor.NDims > -1 && input_shape.is_fully_defined()) { var nd = np.array(input_tensor.shape).astype(out_type.as_numpy_dtype()); diff --git a/src/TensorFlowNET.Core/Operations/gen_math_ops.cs b/src/TensorFlowNET.Core/Operations/gen_math_ops.cs index 5cf240e8..7621d1b2 100644 --- a/src/TensorFlowNET.Core/Operations/gen_math_ops.cs +++ b/src/TensorFlowNET.Core/Operations/gen_math_ops.cs @@ -141,7 +141,8 @@ namespace Tensorflow public static Tensor add(Tx x, Ty y, string name = null) { - var _op = _op_def_lib._apply_op_helper("Add", name, args: new { x, y }); + // forward_compatible(2019, 6, 25): + var _op = _op_def_lib._apply_op_helper("AddV2", name, args: new { x, y }); return _op.output; } diff --git a/src/TensorFlowNET.Core/Operations/math_ops.cs b/src/TensorFlowNET.Core/Operations/math_ops.cs index bb8d7134..bc904028 100644 --- a/src/TensorFlowNET.Core/Operations/math_ops.cs +++ b/src/TensorFlowNET.Core/Operations/math_ops.cs @@ -355,6 +355,9 @@ namespace Tensorflow return _may_reduce_to_scalar(keepdims, axis, all); } + public static Tensor realdiv(Tensor x, Tensor y, string name = null) + => gen_math_ops.real_div(x, y, name: name); + /// /// Computes log(sum(exp(elements across dimensions of a tensor))). /// Reduces `input_tensor` along the dimensions given in `axis`. @@ -561,6 +564,9 @@ namespace Tensorflow public static Tensor rsqrt(Tensor x, string name = null) => gen_math_ops.rsqrt(x, name: name); + public static Tensor pow(Tx x, Ty y, string name = null) + => gen_math_ops.pow(x, y, name: name); + public static Tensor range(object start, object limit = null, object delta = null, TF_DataType dtype = TF_DataType.DtInvalid, string name = "range") { if(limit == null) diff --git a/src/TensorFlowNET.Core/Operations/nn_impl.py.cs b/src/TensorFlowNET.Core/Operations/nn_impl.py.cs index 42103b00..a6c9e221 100644 --- a/src/TensorFlowNET.Core/Operations/nn_impl.py.cs +++ b/src/TensorFlowNET.Core/Operations/nn_impl.py.cs @@ -117,7 +117,7 @@ namespace Tensorflow var min_epsilon = 1.001e-5f; epsilon = epsilon > min_epsilon ? epsilon : min_epsilon; - var results = gen_nn_ops.fused_batch_norm(x, + var results = gen_nn_ops.fused_batch_norm_v3(x, scale_tensor, offset_tensor, mean, diff --git a/src/TensorFlowNET.Core/TensorFlow.Binding.csproj b/src/TensorFlowNET.Core/TensorFlow.Binding.csproj index 09e2674a..369c6c81 100644 --- a/src/TensorFlowNET.Core/TensorFlow.Binding.csproj +++ b/src/TensorFlowNET.Core/TensorFlow.Binding.csproj @@ -33,7 +33,7 @@ https://tensorflownet.readthedocs.io true - TRACE;DEBUG;SERIALIZABLE_ + TRACE;DEBUG;SERIALIZABLE @@ -62,6 +62,7 @@ https://tensorflownet.readthedocs.io + diff --git a/src/TensorFlowNET.Core/Tensors/Tensor.cs b/src/TensorFlowNET.Core/Tensors/Tensor.cs index efac802d..2ec02232 100644 --- a/src/TensorFlowNET.Core/Tensors/Tensor.cs +++ b/src/TensorFlowNET.Core/Tensors/Tensor.cs @@ -115,6 +115,9 @@ namespace Tensorflow /// /// The name of the device on which this tensor will be produced, or null. /// +#if SERIALIZABLE + [JsonIgnore] +#endif public string Device => op.Device; #if SERIALIZABLE [JsonIgnore] diff --git a/src/TensorFlowNET.Core/ops.name_scope.cs b/src/TensorFlowNET.Core/ops.name_scope.cs index 80397667..55e9cf61 100644 --- a/src/TensorFlowNET.Core/ops.name_scope.cs +++ b/src/TensorFlowNET.Core/ops.name_scope.cs @@ -68,7 +68,7 @@ namespace Tensorflow var g = get_default_graph(); g._name_stack = old_stack; } - + public void __exit__() { } @@ -82,7 +82,7 @@ namespace Tensorflow { } - + /// /// __enter__() /// diff --git a/test/TensorFlowNET.UnitTest/Hub/MnistModelLoaderTest.cs b/test/TensorFlowNET.UnitTest/Hub/MnistModelLoaderTest.cs index 26dfd3b6..b1c90b32 100644 --- a/test/TensorFlowNET.UnitTest/Hub/MnistModelLoaderTest.cs +++ b/test/TensorFlowNET.UnitTest/Hub/MnistModelLoaderTest.cs @@ -2,7 +2,7 @@ using Microsoft.VisualStudio.TestTools.UnitTesting; using System.Threading.Tasks; using Tensorflow.Hub; -namespace UnitTest +namespace TensorFlowNET.UnitTest { [TestClass] public class MnistModelLoaderTest From 232ba8dad2dc72287926c2ad041f002b45ab1aee Mon Sep 17 00:00:00 2001 From: Oceania2018 Date: Sat, 22 Feb 2020 14:37:12 -0600 Subject: [PATCH 08/11] fix sign --- src/TensorFlowNET.Core/APIs/tf.math.cs | 4 +- src/TensorFlowNET.Core/Gradients/math_grad.cs | 6 ++- .../Operations/OpDefLibrary.cs | 4 -- .../Operations/Operation.Input.cs | 12 +++-- .../Operations/Operation.Output.cs | 3 -- .../Operations/Operation.cs | 5 -- .../Operations/gen_math_ops.cs | 12 ++++- src/TensorFlowNET.Core/Operations/math_ops.cs | 54 ++++++++++++------- .../Tensors/Tensor.Operators.cs | 2 +- 9 files changed, 59 insertions(+), 43 deletions(-) diff --git a/src/TensorFlowNET.Core/APIs/tf.math.cs b/src/TensorFlowNET.Core/APIs/tf.math.cs index dec4c470..a28ec9d3 100644 --- a/src/TensorFlowNET.Core/APIs/tf.math.cs +++ b/src/TensorFlowNET.Core/APIs/tf.math.cs @@ -364,8 +364,8 @@ namespace Tensorflow public Tensor divide(Tensor x, T[] y, string name = null) where T : struct => x / ops.convert_to_tensor(y, dtype: x.dtype.as_base_dtype(), name: "y"); - public Tensor pow(T1 x, T2 y) - => gen_math_ops.pow(x, y); + public Tensor pow(T1 x, T2 y, string name = "pow") + => gen_math_ops.pow(x, y, name: name); /// /// Divides `x / y` elementwise, rounding toward the most negative integer. diff --git a/src/TensorFlowNET.Core/Gradients/math_grad.cs b/src/TensorFlowNET.Core/Gradients/math_grad.cs index 94e434cb..d8ca512a 100644 --- a/src/TensorFlowNET.Core/Gradients/math_grad.cs +++ b/src/TensorFlowNET.Core/Gradients/math_grad.cs @@ -33,10 +33,14 @@ namespace Tensorflow.Gradients var x = op.inputs[0]; var grad = grads[0]; - return new Tensor[] { gen_ops.mul(grad, gen_math_ops.sign(x)) }; + return new Tensor[] { grad * math_ops.sign(x) }; } [RegisterGradient("AddV2")] + public static Tensor[] _AddV2Grad(Operation op, Tensor[] grads) + => _AddGrad(op, grads); + + [RegisterGradient("Add")] public static Tensor[] _AddGrad(Operation op, Tensor[] grads) { var x = op.inputs[0]; diff --git a/src/TensorFlowNET.Core/Operations/OpDefLibrary.cs b/src/TensorFlowNET.Core/Operations/OpDefLibrary.cs index e842fcb4..5700ccdd 100644 --- a/src/TensorFlowNET.Core/Operations/OpDefLibrary.cs +++ b/src/TensorFlowNET.Core/Operations/OpDefLibrary.cs @@ -65,10 +65,6 @@ namespace Tensorflow var base_types = new List(); var types = new List(); -#if DEBUG - if (op_type_name == "FusedBatchNormGradV3") - ; -#endif // Perform input type inference foreach (var input_arg in op_def.InputArg) { diff --git a/src/TensorFlowNET.Core/Operations/Operation.Input.cs b/src/TensorFlowNET.Core/Operations/Operation.Input.cs index bbd11b0e..5c4106ae 100644 --- a/src/TensorFlowNET.Core/Operations/Operation.Input.cs +++ b/src/TensorFlowNET.Core/Operations/Operation.Input.cs @@ -40,13 +40,11 @@ namespace Tensorflow } return num; } -#if SERIALIZABLE - [JsonIgnore] -#endif public int NumInputs => c_api.TF_OperationNumInputs(_handle); private TF_DataType[] _input_types => _inputs_val._inputs.Select(x => x.dtype).ToArray(); private InputList _inputs_val; + public InputList inputs { get @@ -69,8 +67,10 @@ namespace Tensorflow } } - public int NumControlInputs => c_api.TF_OperationNumControlInputs(_handle); + public int NumControlInputs + => _handle == IntPtr.Zero ? 0 : c_api.TF_OperationNumControlInputs(_handle); + Operation[] _control_inputs; /// /// The `Operation` objects on which this op has a control dependency. /// @@ -87,7 +87,9 @@ namespace Tensorflow { get { - return GetControlInputs(); + if (_control_inputs == null || _control_inputs.Length == 0) + _control_inputs = GetControlInputs(); + return _control_inputs; } } diff --git a/src/TensorFlowNET.Core/Operations/Operation.Output.cs b/src/TensorFlowNET.Core/Operations/Operation.Output.cs index 3a70c107..bbf0b13f 100644 --- a/src/TensorFlowNET.Core/Operations/Operation.Output.cs +++ b/src/TensorFlowNET.Core/Operations/Operation.Output.cs @@ -26,9 +26,6 @@ namespace Tensorflow { public partial class Operation { -#if SERIALIZABLE - [JsonIgnore] -#endif public int NumOutputs => c_api.TF_OperationNumOutputs(_handle); public TF_DataType OutputType(int index) => c_api.TF_OperationOutputType(_tf_output(index)); diff --git a/src/TensorFlowNET.Core/Operations/Operation.cs b/src/TensorFlowNET.Core/Operations/Operation.cs index a0acf4bb..029f8d69 100644 --- a/src/TensorFlowNET.Core/Operations/Operation.cs +++ b/src/TensorFlowNET.Core/Operations/Operation.cs @@ -154,11 +154,6 @@ namespace Tensorflow public Operation(NodeDef node_def, Graph g, Tensor[] inputs = null, TF_DataType[] output_types = null, ITensorOrOperation[] control_inputs = null, TF_DataType[] input_types = null, string original_op = "", OpDef op_def = null) { _graph = g; - -#if DEBUG - if (node_def.Name == "define_second_stage_train/gradients/define_loss/conv_lobj_branch/batch_normalization/cond/FusedBatchNormV3_1_grad/FusedBatchNormGradV3") - ; -#endif // Build the list of control inputs. var control_input_ops = new List(); if (control_inputs != null) diff --git a/src/TensorFlowNET.Core/Operations/gen_math_ops.cs b/src/TensorFlowNET.Core/Operations/gen_math_ops.cs index 7621d1b2..02bc1ada 100644 --- a/src/TensorFlowNET.Core/Operations/gen_math_ops.cs +++ b/src/TensorFlowNET.Core/Operations/gen_math_ops.cs @@ -140,6 +140,14 @@ namespace Tensorflow } public static Tensor add(Tx x, Ty y, string name = null) + { + // forward_compatible(2019, 6, 25): + var _op = _op_def_lib._apply_op_helper("Add", name, args: new { x, y }); + + return _op.output; + } + + public static Tensor add_v2(Tx x, Ty y, string name = null) { // forward_compatible(2019, 6, 25): var _op = _op_def_lib._apply_op_helper("AddV2", name, args: new { x, y }); @@ -213,7 +221,7 @@ namespace Tensorflow return op.outputs[0]; } - public static Tensor sign(Tensor x, string name = "Sign") + public static Tensor sign(T x, string name = "Sign") { var op = _op_def_lib._apply_op_helper("Sign", name: name, args: new {x}); @@ -448,7 +456,7 @@ namespace Tensorflow return _op.outputs[0]; } - public static Tensor cast(Tensor x, TF_DataType DstT, bool Truncate= false, string name= "") + public static Tensor cast(Tensor x, TF_DataType DstT, bool Truncate= false, string name= null) { var _op = _op_def_lib._apply_op_helper("Cast", name, args: new { x, DstT, Truncate }); diff --git a/src/TensorFlowNET.Core/Operations/math_ops.cs b/src/TensorFlowNET.Core/Operations/math_ops.cs index bc904028..34ab587f 100644 --- a/src/TensorFlowNET.Core/Operations/math_ops.cs +++ b/src/TensorFlowNET.Core/Operations/math_ops.cs @@ -35,14 +35,17 @@ namespace Tensorflow x = ops.convert_to_tensor(x, name: "x"); if (x.dtype.is_complex()) throw new NotImplementedException("math_ops.abs for dtype.is_complex"); - //return gen_math_ops.complex_abs(x, Tout: x.dtype.real_dtype, name: name); + //return gen_math_ops.complex_abs(x, Tout: x.dtype.real_dtype, name: name); return gen_math_ops._abs(x, name: name); }); } - public static Tensor add(Tx x, Ty y, string name = null) + public static Tensor add(Tx x, Ty y, string name = null) => gen_math_ops.add(x, y, name); + public static Tensor add_v2(Tx x, Ty y, string name = null) + => gen_math_ops.add_v2(x, y, name); + /// /// Adds all input tensors element-wise. /// @@ -53,21 +56,38 @@ namespace Tensorflow { inputs = ops.convert_n_to_tensor_or_indexed_slices(inputs); - if(inputs.Length == 1) + if (inputs.Length == 1) { var values = inputs[0]; if (name != null) return array_ops.identity(values, name: name); return values; } - + return gen_math_ops.add_n(inputs, name: name); } + public static Tensor cast(RefVariable x, TF_DataType dtype = TF_DataType.DtInvalid, string name = null) + { + var base_type = dtype.as_base_dtype(); + if (base_type == x.dtype) + return x; + + return tf_with(ops.name_scope(name, "Cast", new { x }), scope => + { + name = scope; + var t_x = ops.convert_to_tensor(x, name: "x"); + if (t_x.dtype.as_base_dtype() != base_type) + t_x = gen_math_ops.cast(t_x, base_type, name: name); + + return x; + }); + } + public static Tensor cast(Tensor x, TF_DataType dtype = TF_DataType.DtInvalid, string name = null) { var base_type = dtype.as_base_dtype(); - if(base_type == x.dtype) + if (base_type == x.dtype) return x; return tf_with(ops.name_scope(name, "Cast", new { x }), scope => @@ -98,13 +118,13 @@ namespace Tensorflow public static Tensor cumsum(Tensor x, T axis = default, bool exclusive = false, bool reverse = false, string name = null) { - return tf_with(ops.name_scope(name, "Cumsum", new {x}), scope => - { - name = scope; - x = ops.convert_to_tensor(x, name: "x"); + return tf_with(ops.name_scope(name, "Cumsum", new { x }), scope => + { + name = scope; + x = ops.convert_to_tensor(x, name: "x"); - return gen_math_ops.cumsum(x, axis: axis, exclusive: exclusive, reverse: reverse, name: name); - }); + return gen_math_ops.cumsum(x, axis: axis, exclusive: exclusive, reverse: reverse, name: name); + }); } /// @@ -221,7 +241,7 @@ namespace Tensorflow public static Tensor reduce_mean(Tensor[] input_tensors, int? axis = null, bool keepdims = false, string name = null) { - if(axis == null) + if (axis == null) { var r = _ReductionDims(input_tensors, axis); var m = gen_math_ops.mean(input_tensors, r, keepdims, name); @@ -263,14 +283,8 @@ namespace Tensorflow return gen_math_ops.sigmoid(x_tensor, name: name); } - public static Tensor sign(Tensor x, string name = null) - { - return tf_with(ops.name_scope(name, "Sign", new {x}), scope => - { - x = ops.convert_to_tensor(x, name: "x"); - return gen_math_ops.sign(x); - }); - } + public static Tensor sign(T x, string name = null) + => gen_math_ops.sign(x, name: name); /// /// Returns (x - y)(x - y) element-wise. diff --git a/src/TensorFlowNET.Core/Tensors/Tensor.Operators.cs b/src/TensorFlowNET.Core/Tensors/Tensor.Operators.cs index b205674d..616a375f 100644 --- a/src/TensorFlowNET.Core/Tensors/Tensor.Operators.cs +++ b/src/TensorFlowNET.Core/Tensors/Tensor.Operators.cs @@ -328,7 +328,7 @@ namespace Tensorflow switch (name.ToLowerInvariant()) { case "add": - result = math_ops.add(x1, y1, name: scope); + result = math_ops.add_v2(x1, y1, name: scope); break; case "div": result = math_ops.div(x1, y1, name: scope); From 08224a9a93cdbb97bbc6b604ca8edff5facaa56c Mon Sep 17 00:00:00 2001 From: Oceania2018 Date: Sat, 22 Feb 2020 21:24:11 -0600 Subject: [PATCH 09/11] Add implicitation for Operation to RefVariable --- .../Operations/Operation.Implicit.cs | 8 ++++++-- src/TensorFlowNET.Core/Operations/Operation.cs | 2 +- src/TensorFlowNET.Core/TensorFlow.Binding.csproj | 13 +++++++------ src/TensorFlowNET.Core/Variables/RefVariable.cs | 13 ++++++++++++- 4 files changed, 26 insertions(+), 10 deletions(-) diff --git a/src/TensorFlowNET.Core/Operations/Operation.Implicit.cs b/src/TensorFlowNET.Core/Operations/Operation.Implicit.cs index 9cadac0c..289c69ad 100644 --- a/src/TensorFlowNET.Core/Operations/Operation.Implicit.cs +++ b/src/TensorFlowNET.Core/Operations/Operation.Implicit.cs @@ -27,8 +27,12 @@ namespace Tensorflow public static implicit operator Operation(IntPtr handle) => new Operation(handle); - public static implicit operator IntPtr(Operation op) => op._handle; - public static implicit operator Tensor(Operation op) => op.output; + public static implicit operator IntPtr(Operation op) + => op._handle; + public static implicit operator Tensor(Operation op) + => op.output; + public static implicit operator RefVariable(Operation op) + => new RefVariable(op); public override string ToString() { diff --git a/src/TensorFlowNET.Core/Operations/Operation.cs b/src/TensorFlowNET.Core/Operations/Operation.cs index 029f8d69..65097d5b 100644 --- a/src/TensorFlowNET.Core/Operations/Operation.cs +++ b/src/TensorFlowNET.Core/Operations/Operation.cs @@ -106,7 +106,7 @@ namespace Tensorflow _outputs = new Tensor[NumOutputs]; for (int i = 0; i < NumOutputs; i++) _outputs[i] = new Tensor(this, i, OutputType(i)); - + // Dict mapping op name to file and line information for op colocation // context managers. _control_flow_context = _graph._get_control_flow_context(); diff --git a/src/TensorFlowNET.Core/TensorFlow.Binding.csproj b/src/TensorFlowNET.Core/TensorFlow.Binding.csproj index 369c6c81..0e1c7622 100644 --- a/src/TensorFlowNET.Core/TensorFlow.Binding.csproj +++ b/src/TensorFlowNET.Core/TensorFlow.Binding.csproj @@ -5,7 +5,7 @@ TensorFlow.NET Tensorflow 1.14.1 - 0.14.2.0 + 0.15.0 Haiping Chen, Meinrad Recheis, Eli Belash SciSharp STACK true @@ -18,13 +18,15 @@ Google's TensorFlow full binding in .NET Standard. Building, training and infering deep learning models. https://tensorflownet.readthedocs.io - 0.14.2.0 + 0.15.0.0 Changes since v0.14.0: 1: Add TransformGraphWithStringInputs. 2: tf.trainer.load_graph, tf.trainer.freeze_graph -3: Import Protobuf.Text +3: Import Protobuf.Text +4: Support YOLOv3 object detection +5: Add implicitation for Operation to RefVariable 7.3 - 0.14.2.0 + 0.15.0.0 LICENSE true true @@ -33,7 +35,7 @@ https://tensorflownet.readthedocs.io true - TRACE;DEBUG;SERIALIZABLE + TRACE;DEBUG;SERIALIZABLE_ @@ -62,7 +64,6 @@ https://tensorflownet.readthedocs.io - diff --git a/src/TensorFlowNET.Core/Variables/RefVariable.cs b/src/TensorFlowNET.Core/Variables/RefVariable.cs index c79c5b7f..cff14ec1 100644 --- a/src/TensorFlowNET.Core/Variables/RefVariable.cs +++ b/src/TensorFlowNET.Core/Variables/RefVariable.cs @@ -61,7 +61,11 @@ namespace Tensorflow { _in_graph_mode = true; - if (variable_def != null) + if(initial_value is Operation op) + { + _init_from_op(op); + } + else if (variable_def != null) { if (initial_value != null) throw new ValueError("variable_def and initial_value are mutually exclusive."); @@ -73,6 +77,13 @@ namespace Tensorflow } } + private void _init_from_op(Operation op) + { + var g = ops.get_default_graph(); + _initializer_op = op; + _variable = op.output; + } + private void _init_from_proto(VariableDef variable_def, string import_scope = "") { var g = ops.get_default_graph(); From d028a2841a7f90b84f4e39043d27b40eba14df68 Mon Sep 17 00:00:00 2001 From: Esther2013 Date: Sat, 29 Feb 2020 19:39:32 -0600 Subject: [PATCH 10/11] update installation readme --- tensorflowlib/README.md | 55 ++++++++++++++++++++--------------------- 1 file changed, 27 insertions(+), 28 deletions(-) diff --git a/tensorflowlib/README.md b/tensorflowlib/README.md index 1450dc44..320f5619 100644 --- a/tensorflowlib/README.md +++ b/tensorflowlib/README.md @@ -1,27 +1,12 @@ -TensorFlow.NET pack all required libraries in architecture-specific assemblies folders per NuGet standard [Deprecated] . - -We changed to use `Microsoft.ML.TensorFlow.Redist` to maintain the TensorFlow library. - - - -### Download manually - -Here are some pre-built TensorFlow binaries you can use for each platform: - -- Linux - - CPU-only: https://storage.googleapis.com/tensorflow/libtensorflow/libtensorflow-cpu-linux-x86_64-1.14.0.tar.gz - - GPU-enabled: https://storage.googleapis.com/tensorflow/libtensorflow/libtensorflow-gpu-linux-x86_64-1.14.0.tar.gz -- Mac: https://storage.googleapis.com/tensorflow/libtensorflow/libtensorflow-cpu-darwin-x86_64-1.14.0.tar.gz -- Windows - - CPU-only: https://storage.googleapis.com/tensorflow/libtensorflow/libtensorflow-cpu-windows-x86_64-1.14.0.zip - - GPU-enabled: https://storage.googleapis.com/tensorflow/libtensorflow/libtensorflow-gpu-windows-x86_64-1.14.0.zip - +TensorFlow.NET pack all required libraries in architecture-specific assemblies folders per NuGet standard. +```powershell +PM> Install-Package TensorFlow.NET +PM> Install-Package SciSharp.TensorFlow.Redist +``` ### Run in Linux -`Install-Package TensorFlow.NET` - Download Linux pre-built library and unzip `libtensorflow.so` and `libtensorflow_framework.so` into current running directory. To run image recognition in Linux, please ensure some prerequisite libraries is install. @@ -33,20 +18,34 @@ sudo apt install libgdiplus More information about [System.Drawing on Linux](). +### Run TensorFlow in GPU +Before running verify you installed CUDA and cuDNN (TensorFlow v1.15 is compatible with CUDA v10.0 and cuDNN v7.4), and make sure the corresponding cuda version is compatible. +#### Run in Mac OS +There is no GPU support for macOS. -### Run in Mac OS - - - -### Tensorflow GPU for Windows - -Before running verify you installed CUDA and cuDNN (TensorFlow v1.14 is compatible with CUDA v10.0 and cuDNN v7.4), and make sure the corresponding cuda version is compatible. +#### Tensorflow GPU for Windows ```powershell PM> Install-Package SciSharp.TensorFlow.Redist-Windows-GPU ``` +#### Tensorflow GPU for Linux +```powershell +PM> Install-Package SciSharp.TensorFlow.Redist-Linux-GPU +``` + +### Download prebuild binary manually + +Here are some pre-built TensorFlow binaries you can use for each platform: + +- Linux + - CPU-only: https://storage.googleapis.com/tensorflow/libtensorflow/libtensorflow-cpu-linux-x86_64-1.15.0.tar.gz + - GPU-enabled: https://storage.googleapis.com/tensorflow/libtensorflow/libtensorflow-gpu-linux-x86_64-1.15.0.tar.gz +- Mac: https://storage.googleapis.com/tensorflow/libtensorflow/libtensorflow-cpu-darwin-x86_64-1.15.0.tar.gz +- Windows + - CPU-only: https://storage.googleapis.com/tensorflow/libtensorflow/libtensorflow-cpu-windows-x86_64-1.15.0.zip + - GPU-enabled: https://storage.googleapis.com/tensorflow/libtensorflow/libtensorflow-gpu-windows-x86_64-1.15.0.zip ### Build from source for Windows @@ -69,7 +68,7 @@ https://www.tensorflow.org/install/source_windows 4. Install from local wheel file. -`pip install C:/tmp/tensorflow_pkg/tensorflow-1.14.0-cp36-cp36m-win_amd64.whl` +`pip install C:/tmp/tensorflow_pkg/tensorflow-1.15.0-cp36-cp36m-win_amd64.whl` ### Export more APIs From 7c6497bf0f49a61c0e5a131f02a14352dfed3541 Mon Sep 17 00:00:00 2001 From: Oceania2018 Date: Sat, 29 Feb 2020 23:12:36 -0600 Subject: [PATCH 11/11] Release v0.15.0 --- src/TensorFlowNET.Core/TensorFlow.Binding.csproj | 4 ++-- src/TensorFlowNET.Hub/Tensorflow.Hub.csproj | 6 +++--- src/TensorFlowNet.Benchmarks/Tensorflow.Benchmark.csproj | 2 +- test/TensorFlowNET.UnitTest/Tensorflow.UnitTest.csproj | 2 +- 4 files changed, 7 insertions(+), 7 deletions(-) diff --git a/src/TensorFlowNET.Core/TensorFlow.Binding.csproj b/src/TensorFlowNET.Core/TensorFlow.Binding.csproj index 0e1c7622..a84d7f75 100644 --- a/src/TensorFlowNET.Core/TensorFlow.Binding.csproj +++ b/src/TensorFlowNET.Core/TensorFlow.Binding.csproj @@ -63,8 +63,8 @@ https://tensorflownet.readthedocs.io - - + + diff --git a/src/TensorFlowNET.Hub/Tensorflow.Hub.csproj b/src/TensorFlowNET.Hub/Tensorflow.Hub.csproj index 64ca76f6..38e5f907 100644 --- a/src/TensorFlowNET.Hub/Tensorflow.Hub.csproj +++ b/src/TensorFlowNET.Hub/Tensorflow.Hub.csproj @@ -2,7 +2,7 @@ Tensorflow.Hub netstandard2.0 - 0.1.0 + 0.1.1 Kerry Jiang, Haiping Chen SciSharp STACK Apache 2.0 @@ -14,7 +14,7 @@ SciSharp.TensorFlowHub true Fix GetNextBatch() bug. -Change to NumSharp compact version. +Upgrade NumSharp.Lite 0.1.4. https://avatars3.githubusercontent.com/u/44989469?s=200&v=4 TensorFlow.Hub @@ -22,6 +22,6 @@ Change to NumSharp compact version. DEBUG;TRACE - + \ No newline at end of file diff --git a/src/TensorFlowNet.Benchmarks/Tensorflow.Benchmark.csproj b/src/TensorFlowNet.Benchmarks/Tensorflow.Benchmark.csproj index 0152683b..1c2981e2 100644 --- a/src/TensorFlowNet.Benchmarks/Tensorflow.Benchmark.csproj +++ b/src/TensorFlowNet.Benchmarks/Tensorflow.Benchmark.csproj @@ -19,7 +19,7 @@ - + diff --git a/test/TensorFlowNET.UnitTest/Tensorflow.UnitTest.csproj b/test/TensorFlowNET.UnitTest/Tensorflow.UnitTest.csproj index e7ebc23e..efbf401c 100644 --- a/test/TensorFlowNET.UnitTest/Tensorflow.UnitTest.csproj +++ b/test/TensorFlowNET.UnitTest/Tensorflow.UnitTest.csproj @@ -32,7 +32,7 @@ - +