@@ -0,0 +1 @@ | |||
* text=auto |
@@ -1,133 +1,133 @@ | |||
| |||
Microsoft Visual Studio Solution File, Format Version 12.00 | |||
# Visual Studio Version 16 | |||
VisualStudioVersion = 16.0.29102.190 | |||
MinimumVisualStudioVersion = 10.0.40219.1 | |||
Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "Tensorflow.Binding", "src\TensorFlowNET.Core\Tensorflow.Binding.csproj", "{FD682AC0-7B2D-45D3-8B0D-C6D678B04144}" | |||
EndProject | |||
Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "Tensorflow.Benchmark", "src\TensorFlowNet.Benchmarks\Tensorflow.Benchmark.csproj", "{3A6EB896-604F-4E25-B677-B8103BCF3D2E}" | |||
EndProject | |||
Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "Tensorflow.UnitTest", "test\TensorFlowNET.UnitTest\Tensorflow.UnitTest.csproj", "{23C28035-2FCE-41F3-9A12-E73CE8A5AE32}" | |||
EndProject | |||
Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "Tensorflow.Keras", "src\TensorFlowNET.Keras\Tensorflow.Keras.csproj", "{6268B461-486A-460B-9B3C-86493CBBAAF7}" | |||
EndProject | |||
Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "Tensorflow.Keras.UnitTest", "test\Tensorflow.Keras.UnitTest\Tensorflow.Keras.UnitTest.csproj", "{EB92DD90-6346-41FB-B967-2B33A860AD98}" | |||
EndProject | |||
Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "Tensorflow.Hub", "src\TensorFlowNET.Hub\Tensorflow.Hub.csproj", "{95B077C1-E21B-486F-8BDD-1C902FE687AB}" | |||
EndProject | |||
Global | |||
GlobalSection(SolutionConfigurationPlatforms) = preSolution | |||
Debug|Any CPU = Debug|Any CPU | |||
Debug|x64 = Debug|x64 | |||
Debug-Minimal|Any CPU = Debug-Minimal|Any CPU | |||
Debug-Minimal|x64 = Debug-Minimal|x64 | |||
Publish|Any CPU = Publish|Any CPU | |||
Publish|x64 = Publish|x64 | |||
Release|Any CPU = Release|Any CPU | |||
Release|x64 = Release|x64 | |||
EndGlobalSection | |||
GlobalSection(ProjectConfigurationPlatforms) = postSolution | |||
{FD682AC0-7B2D-45D3-8B0D-C6D678B04144}.Debug|Any CPU.ActiveCfg = Debug|Any CPU | |||
{FD682AC0-7B2D-45D3-8B0D-C6D678B04144}.Debug|Any CPU.Build.0 = Debug|Any CPU | |||
{FD682AC0-7B2D-45D3-8B0D-C6D678B04144}.Debug|x64.ActiveCfg = Debug|Any CPU | |||
{FD682AC0-7B2D-45D3-8B0D-C6D678B04144}.Debug|x64.Build.0 = Debug|Any CPU | |||
{FD682AC0-7B2D-45D3-8B0D-C6D678B04144}.Debug-Minimal|Any CPU.ActiveCfg = Debug|Any CPU | |||
{FD682AC0-7B2D-45D3-8B0D-C6D678B04144}.Debug-Minimal|Any CPU.Build.0 = Debug|Any CPU | |||
{FD682AC0-7B2D-45D3-8B0D-C6D678B04144}.Debug-Minimal|x64.ActiveCfg = Debug|Any CPU | |||
{FD682AC0-7B2D-45D3-8B0D-C6D678B04144}.Debug-Minimal|x64.Build.0 = Debug|Any CPU | |||
{FD682AC0-7B2D-45D3-8B0D-C6D678B04144}.Publish|Any CPU.ActiveCfg = Release|Any CPU | |||
{FD682AC0-7B2D-45D3-8B0D-C6D678B04144}.Publish|Any CPU.Build.0 = Release|Any CPU | |||
{FD682AC0-7B2D-45D3-8B0D-C6D678B04144}.Publish|x64.ActiveCfg = Release|Any CPU | |||
{FD682AC0-7B2D-45D3-8B0D-C6D678B04144}.Publish|x64.Build.0 = Release|Any CPU | |||
{FD682AC0-7B2D-45D3-8B0D-C6D678B04144}.Release|Any CPU.ActiveCfg = Release|Any CPU | |||
{FD682AC0-7B2D-45D3-8B0D-C6D678B04144}.Release|Any CPU.Build.0 = Release|Any CPU | |||
{FD682AC0-7B2D-45D3-8B0D-C6D678B04144}.Release|x64.ActiveCfg = Release|Any CPU | |||
{FD682AC0-7B2D-45D3-8B0D-C6D678B04144}.Release|x64.Build.0 = Release|Any CPU | |||
{3A6EB896-604F-4E25-B677-B8103BCF3D2E}.Debug|Any CPU.ActiveCfg = Debug|Any CPU | |||
{3A6EB896-604F-4E25-B677-B8103BCF3D2E}.Debug|Any CPU.Build.0 = Debug|Any CPU | |||
{3A6EB896-604F-4E25-B677-B8103BCF3D2E}.Debug|x64.ActiveCfg = Debug|Any CPU | |||
{3A6EB896-604F-4E25-B677-B8103BCF3D2E}.Debug|x64.Build.0 = Debug|Any CPU | |||
{3A6EB896-604F-4E25-B677-B8103BCF3D2E}.Debug-Minimal|Any CPU.ActiveCfg = Debug|Any CPU | |||
{3A6EB896-604F-4E25-B677-B8103BCF3D2E}.Debug-Minimal|Any CPU.Build.0 = Debug|Any CPU | |||
{3A6EB896-604F-4E25-B677-B8103BCF3D2E}.Debug-Minimal|x64.ActiveCfg = Debug|Any CPU | |||
{3A6EB896-604F-4E25-B677-B8103BCF3D2E}.Debug-Minimal|x64.Build.0 = Debug|Any CPU | |||
{3A6EB896-604F-4E25-B677-B8103BCF3D2E}.Publish|Any CPU.ActiveCfg = Release|Any CPU | |||
{3A6EB896-604F-4E25-B677-B8103BCF3D2E}.Publish|Any CPU.Build.0 = Release|Any CPU | |||
{3A6EB896-604F-4E25-B677-B8103BCF3D2E}.Publish|x64.ActiveCfg = Release|Any CPU | |||
{3A6EB896-604F-4E25-B677-B8103BCF3D2E}.Publish|x64.Build.0 = Release|Any CPU | |||
{3A6EB896-604F-4E25-B677-B8103BCF3D2E}.Release|Any CPU.ActiveCfg = Release|Any CPU | |||
{3A6EB896-604F-4E25-B677-B8103BCF3D2E}.Release|Any CPU.Build.0 = Release|Any CPU | |||
{3A6EB896-604F-4E25-B677-B8103BCF3D2E}.Release|x64.ActiveCfg = Release|Any CPU | |||
{3A6EB896-604F-4E25-B677-B8103BCF3D2E}.Release|x64.Build.0 = Release|Any CPU | |||
{23C28035-2FCE-41F3-9A12-E73CE8A5AE32}.Debug|Any CPU.ActiveCfg = Debug|Any CPU | |||
{23C28035-2FCE-41F3-9A12-E73CE8A5AE32}.Debug|Any CPU.Build.0 = Debug|Any CPU | |||
{23C28035-2FCE-41F3-9A12-E73CE8A5AE32}.Debug|x64.ActiveCfg = Debug|Any CPU | |||
{23C28035-2FCE-41F3-9A12-E73CE8A5AE32}.Debug|x64.Build.0 = Debug|Any CPU | |||
{23C28035-2FCE-41F3-9A12-E73CE8A5AE32}.Debug-Minimal|Any CPU.ActiveCfg = Debug|Any CPU | |||
{23C28035-2FCE-41F3-9A12-E73CE8A5AE32}.Debug-Minimal|Any CPU.Build.0 = Debug|Any CPU | |||
{23C28035-2FCE-41F3-9A12-E73CE8A5AE32}.Debug-Minimal|x64.ActiveCfg = Debug|Any CPU | |||
{23C28035-2FCE-41F3-9A12-E73CE8A5AE32}.Debug-Minimal|x64.Build.0 = Debug|Any CPU | |||
{23C28035-2FCE-41F3-9A12-E73CE8A5AE32}.Publish|Any CPU.ActiveCfg = Release|Any CPU | |||
{23C28035-2FCE-41F3-9A12-E73CE8A5AE32}.Publish|Any CPU.Build.0 = Release|Any CPU | |||
{23C28035-2FCE-41F3-9A12-E73CE8A5AE32}.Publish|x64.ActiveCfg = Release|Any CPU | |||
{23C28035-2FCE-41F3-9A12-E73CE8A5AE32}.Publish|x64.Build.0 = Release|Any CPU | |||
{23C28035-2FCE-41F3-9A12-E73CE8A5AE32}.Release|Any CPU.ActiveCfg = Release|Any CPU | |||
{23C28035-2FCE-41F3-9A12-E73CE8A5AE32}.Release|Any CPU.Build.0 = Release|Any CPU | |||
{23C28035-2FCE-41F3-9A12-E73CE8A5AE32}.Release|x64.ActiveCfg = Release|Any CPU | |||
{23C28035-2FCE-41F3-9A12-E73CE8A5AE32}.Release|x64.Build.0 = Release|Any CPU | |||
{6268B461-486A-460B-9B3C-86493CBBAAF7}.Debug|Any CPU.ActiveCfg = Debug|Any CPU | |||
{6268B461-486A-460B-9B3C-86493CBBAAF7}.Debug|Any CPU.Build.0 = Debug|Any CPU | |||
{6268B461-486A-460B-9B3C-86493CBBAAF7}.Debug|x64.ActiveCfg = Debug|Any CPU | |||
{6268B461-486A-460B-9B3C-86493CBBAAF7}.Debug|x64.Build.0 = Debug|Any CPU | |||
{6268B461-486A-460B-9B3C-86493CBBAAF7}.Debug-Minimal|Any CPU.ActiveCfg = Debug|Any CPU | |||
{6268B461-486A-460B-9B3C-86493CBBAAF7}.Debug-Minimal|Any CPU.Build.0 = Debug|Any CPU | |||
{6268B461-486A-460B-9B3C-86493CBBAAF7}.Debug-Minimal|x64.ActiveCfg = Debug|Any CPU | |||
{6268B461-486A-460B-9B3C-86493CBBAAF7}.Debug-Minimal|x64.Build.0 = Debug|Any CPU | |||
{6268B461-486A-460B-9B3C-86493CBBAAF7}.Publish|Any CPU.ActiveCfg = Release|Any CPU | |||
{6268B461-486A-460B-9B3C-86493CBBAAF7}.Publish|Any CPU.Build.0 = Release|Any CPU | |||
{6268B461-486A-460B-9B3C-86493CBBAAF7}.Publish|x64.ActiveCfg = Release|Any CPU | |||
{6268B461-486A-460B-9B3C-86493CBBAAF7}.Publish|x64.Build.0 = Release|Any CPU | |||
{6268B461-486A-460B-9B3C-86493CBBAAF7}.Release|Any CPU.ActiveCfg = Release|Any CPU | |||
{6268B461-486A-460B-9B3C-86493CBBAAF7}.Release|Any CPU.Build.0 = Release|Any CPU | |||
{6268B461-486A-460B-9B3C-86493CBBAAF7}.Release|x64.ActiveCfg = Release|Any CPU | |||
{6268B461-486A-460B-9B3C-86493CBBAAF7}.Release|x64.Build.0 = Release|Any CPU | |||
{EB92DD90-6346-41FB-B967-2B33A860AD98}.Debug|Any CPU.ActiveCfg = Debug|Any CPU | |||
{EB92DD90-6346-41FB-B967-2B33A860AD98}.Debug|Any CPU.Build.0 = Debug|Any CPU | |||
{EB92DD90-6346-41FB-B967-2B33A860AD98}.Debug|x64.ActiveCfg = Debug|Any CPU | |||
{EB92DD90-6346-41FB-B967-2B33A860AD98}.Debug|x64.Build.0 = Debug|Any CPU | |||
{EB92DD90-6346-41FB-B967-2B33A860AD98}.Debug-Minimal|Any CPU.ActiveCfg = Debug|Any CPU | |||
{EB92DD90-6346-41FB-B967-2B33A860AD98}.Debug-Minimal|Any CPU.Build.0 = Debug|Any CPU | |||
{EB92DD90-6346-41FB-B967-2B33A860AD98}.Debug-Minimal|x64.ActiveCfg = Debug|Any CPU | |||
{EB92DD90-6346-41FB-B967-2B33A860AD98}.Debug-Minimal|x64.Build.0 = Debug|Any CPU | |||
{EB92DD90-6346-41FB-B967-2B33A860AD98}.Publish|Any CPU.ActiveCfg = Release|Any CPU | |||
{EB92DD90-6346-41FB-B967-2B33A860AD98}.Publish|Any CPU.Build.0 = Release|Any CPU | |||
{EB92DD90-6346-41FB-B967-2B33A860AD98}.Publish|x64.ActiveCfg = Release|Any CPU | |||
{EB92DD90-6346-41FB-B967-2B33A860AD98}.Publish|x64.Build.0 = Release|Any CPU | |||
{EB92DD90-6346-41FB-B967-2B33A860AD98}.Release|Any CPU.ActiveCfg = Release|Any CPU | |||
{EB92DD90-6346-41FB-B967-2B33A860AD98}.Release|Any CPU.Build.0 = Release|Any CPU | |||
{EB92DD90-6346-41FB-B967-2B33A860AD98}.Release|x64.ActiveCfg = Release|Any CPU | |||
{EB92DD90-6346-41FB-B967-2B33A860AD98}.Release|x64.Build.0 = Release|Any CPU | |||
{95B077C1-E21B-486F-8BDD-1C902FE687AB}.Debug|Any CPU.ActiveCfg = Debug|Any CPU | |||
{95B077C1-E21B-486F-8BDD-1C902FE687AB}.Debug|Any CPU.Build.0 = Debug|Any CPU | |||
{95B077C1-E21B-486F-8BDD-1C902FE687AB}.Debug|x64.ActiveCfg = Debug|Any CPU | |||
{95B077C1-E21B-486F-8BDD-1C902FE687AB}.Debug|x64.Build.0 = Debug|Any CPU | |||
{95B077C1-E21B-486F-8BDD-1C902FE687AB}.Debug-Minimal|Any CPU.ActiveCfg = Debug|Any CPU | |||
{95B077C1-E21B-486F-8BDD-1C902FE687AB}.Debug-Minimal|Any CPU.Build.0 = Debug|Any CPU | |||
{95B077C1-E21B-486F-8BDD-1C902FE687AB}.Debug-Minimal|x64.ActiveCfg = Debug|Any CPU | |||
{95B077C1-E21B-486F-8BDD-1C902FE687AB}.Debug-Minimal|x64.Build.0 = Debug|Any CPU | |||
{95B077C1-E21B-486F-8BDD-1C902FE687AB}.Publish|Any CPU.ActiveCfg = Debug|Any CPU | |||
{95B077C1-E21B-486F-8BDD-1C902FE687AB}.Publish|Any CPU.Build.0 = Debug|Any CPU | |||
{95B077C1-E21B-486F-8BDD-1C902FE687AB}.Publish|x64.ActiveCfg = Debug|Any CPU | |||
{95B077C1-E21B-486F-8BDD-1C902FE687AB}.Publish|x64.Build.0 = Debug|Any CPU | |||
{95B077C1-E21B-486F-8BDD-1C902FE687AB}.Release|Any CPU.ActiveCfg = Release|Any CPU | |||
{95B077C1-E21B-486F-8BDD-1C902FE687AB}.Release|Any CPU.Build.0 = Release|Any CPU | |||
{95B077C1-E21B-486F-8BDD-1C902FE687AB}.Release|x64.ActiveCfg = Release|Any CPU | |||
{95B077C1-E21B-486F-8BDD-1C902FE687AB}.Release|x64.Build.0 = Release|Any CPU | |||
EndGlobalSection | |||
GlobalSection(SolutionProperties) = preSolution | |||
HideSolutionNode = FALSE | |||
EndGlobalSection | |||
GlobalSection(ExtensibilityGlobals) = postSolution | |||
SolutionGuid = {2DEAD3CC-486B-4918-A607-50B0DE7B114A} | |||
EndGlobalSection | |||
EndGlobal | |||
| |||
Microsoft Visual Studio Solution File, Format Version 12.00 | |||
# Visual Studio Version 16 | |||
VisualStudioVersion = 16.0.29102.190 | |||
MinimumVisualStudioVersion = 10.0.40219.1 | |||
Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "Tensorflow.Binding", "src\TensorFlowNET.Core\Tensorflow.Binding.csproj", "{FD682AC0-7B2D-45D3-8B0D-C6D678B04144}" | |||
EndProject | |||
Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "Tensorflow.Benchmark", "src\TensorFlowNet.Benchmarks\Tensorflow.Benchmark.csproj", "{3A6EB896-604F-4E25-B677-B8103BCF3D2E}" | |||
EndProject | |||
Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "Tensorflow.UnitTest", "test\TensorFlowNET.UnitTest\Tensorflow.UnitTest.csproj", "{23C28035-2FCE-41F3-9A12-E73CE8A5AE32}" | |||
EndProject | |||
Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "Tensorflow.Keras", "src\TensorFlowNET.Keras\Tensorflow.Keras.csproj", "{6268B461-486A-460B-9B3C-86493CBBAAF7}" | |||
EndProject | |||
Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "Tensorflow.Keras.UnitTest", "test\Tensorflow.Keras.UnitTest\Tensorflow.Keras.UnitTest.csproj", "{EB92DD90-6346-41FB-B967-2B33A860AD98}" | |||
EndProject | |||
Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "Tensorflow.Hub", "src\TensorFlowNET.Hub\Tensorflow.Hub.csproj", "{95B077C1-E21B-486F-8BDD-1C902FE687AB}" | |||
EndProject | |||
Global | |||
GlobalSection(SolutionConfigurationPlatforms) = preSolution | |||
Debug|Any CPU = Debug|Any CPU | |||
Debug|x64 = Debug|x64 | |||
Debug-Minimal|Any CPU = Debug-Minimal|Any CPU | |||
Debug-Minimal|x64 = Debug-Minimal|x64 | |||
Publish|Any CPU = Publish|Any CPU | |||
Publish|x64 = Publish|x64 | |||
Release|Any CPU = Release|Any CPU | |||
Release|x64 = Release|x64 | |||
EndGlobalSection | |||
GlobalSection(ProjectConfigurationPlatforms) = postSolution | |||
{FD682AC0-7B2D-45D3-8B0D-C6D678B04144}.Debug|Any CPU.ActiveCfg = Debug|Any CPU | |||
{FD682AC0-7B2D-45D3-8B0D-C6D678B04144}.Debug|Any CPU.Build.0 = Debug|Any CPU | |||
{FD682AC0-7B2D-45D3-8B0D-C6D678B04144}.Debug|x64.ActiveCfg = Debug|Any CPU | |||
{FD682AC0-7B2D-45D3-8B0D-C6D678B04144}.Debug|x64.Build.0 = Debug|Any CPU | |||
{FD682AC0-7B2D-45D3-8B0D-C6D678B04144}.Debug-Minimal|Any CPU.ActiveCfg = Debug|Any CPU | |||
{FD682AC0-7B2D-45D3-8B0D-C6D678B04144}.Debug-Minimal|Any CPU.Build.0 = Debug|Any CPU | |||
{FD682AC0-7B2D-45D3-8B0D-C6D678B04144}.Debug-Minimal|x64.ActiveCfg = Debug|Any CPU | |||
{FD682AC0-7B2D-45D3-8B0D-C6D678B04144}.Debug-Minimal|x64.Build.0 = Debug|Any CPU | |||
{FD682AC0-7B2D-45D3-8B0D-C6D678B04144}.Publish|Any CPU.ActiveCfg = Release|Any CPU | |||
{FD682AC0-7B2D-45D3-8B0D-C6D678B04144}.Publish|Any CPU.Build.0 = Release|Any CPU | |||
{FD682AC0-7B2D-45D3-8B0D-C6D678B04144}.Publish|x64.ActiveCfg = Release|Any CPU | |||
{FD682AC0-7B2D-45D3-8B0D-C6D678B04144}.Publish|x64.Build.0 = Release|Any CPU | |||
{FD682AC0-7B2D-45D3-8B0D-C6D678B04144}.Release|Any CPU.ActiveCfg = Release|Any CPU | |||
{FD682AC0-7B2D-45D3-8B0D-C6D678B04144}.Release|Any CPU.Build.0 = Release|Any CPU | |||
{FD682AC0-7B2D-45D3-8B0D-C6D678B04144}.Release|x64.ActiveCfg = Release|Any CPU | |||
{FD682AC0-7B2D-45D3-8B0D-C6D678B04144}.Release|x64.Build.0 = Release|Any CPU | |||
{3A6EB896-604F-4E25-B677-B8103BCF3D2E}.Debug|Any CPU.ActiveCfg = Debug|Any CPU | |||
{3A6EB896-604F-4E25-B677-B8103BCF3D2E}.Debug|Any CPU.Build.0 = Debug|Any CPU | |||
{3A6EB896-604F-4E25-B677-B8103BCF3D2E}.Debug|x64.ActiveCfg = Debug|Any CPU | |||
{3A6EB896-604F-4E25-B677-B8103BCF3D2E}.Debug|x64.Build.0 = Debug|Any CPU | |||
{3A6EB896-604F-4E25-B677-B8103BCF3D2E}.Debug-Minimal|Any CPU.ActiveCfg = Debug|Any CPU | |||
{3A6EB896-604F-4E25-B677-B8103BCF3D2E}.Debug-Minimal|Any CPU.Build.0 = Debug|Any CPU | |||
{3A6EB896-604F-4E25-B677-B8103BCF3D2E}.Debug-Minimal|x64.ActiveCfg = Debug|Any CPU | |||
{3A6EB896-604F-4E25-B677-B8103BCF3D2E}.Debug-Minimal|x64.Build.0 = Debug|Any CPU | |||
{3A6EB896-604F-4E25-B677-B8103BCF3D2E}.Publish|Any CPU.ActiveCfg = Release|Any CPU | |||
{3A6EB896-604F-4E25-B677-B8103BCF3D2E}.Publish|Any CPU.Build.0 = Release|Any CPU | |||
{3A6EB896-604F-4E25-B677-B8103BCF3D2E}.Publish|x64.ActiveCfg = Release|Any CPU | |||
{3A6EB896-604F-4E25-B677-B8103BCF3D2E}.Publish|x64.Build.0 = Release|Any CPU | |||
{3A6EB896-604F-4E25-B677-B8103BCF3D2E}.Release|Any CPU.ActiveCfg = Release|Any CPU | |||
{3A6EB896-604F-4E25-B677-B8103BCF3D2E}.Release|Any CPU.Build.0 = Release|Any CPU | |||
{3A6EB896-604F-4E25-B677-B8103BCF3D2E}.Release|x64.ActiveCfg = Release|Any CPU | |||
{3A6EB896-604F-4E25-B677-B8103BCF3D2E}.Release|x64.Build.0 = Release|Any CPU | |||
{23C28035-2FCE-41F3-9A12-E73CE8A5AE32}.Debug|Any CPU.ActiveCfg = Debug|Any CPU | |||
{23C28035-2FCE-41F3-9A12-E73CE8A5AE32}.Debug|Any CPU.Build.0 = Debug|Any CPU | |||
{23C28035-2FCE-41F3-9A12-E73CE8A5AE32}.Debug|x64.ActiveCfg = Debug|Any CPU | |||
{23C28035-2FCE-41F3-9A12-E73CE8A5AE32}.Debug|x64.Build.0 = Debug|Any CPU | |||
{23C28035-2FCE-41F3-9A12-E73CE8A5AE32}.Debug-Minimal|Any CPU.ActiveCfg = Debug|Any CPU | |||
{23C28035-2FCE-41F3-9A12-E73CE8A5AE32}.Debug-Minimal|Any CPU.Build.0 = Debug|Any CPU | |||
{23C28035-2FCE-41F3-9A12-E73CE8A5AE32}.Debug-Minimal|x64.ActiveCfg = Debug|Any CPU | |||
{23C28035-2FCE-41F3-9A12-E73CE8A5AE32}.Debug-Minimal|x64.Build.0 = Debug|Any CPU | |||
{23C28035-2FCE-41F3-9A12-E73CE8A5AE32}.Publish|Any CPU.ActiveCfg = Release|Any CPU | |||
{23C28035-2FCE-41F3-9A12-E73CE8A5AE32}.Publish|Any CPU.Build.0 = Release|Any CPU | |||
{23C28035-2FCE-41F3-9A12-E73CE8A5AE32}.Publish|x64.ActiveCfg = Release|Any CPU | |||
{23C28035-2FCE-41F3-9A12-E73CE8A5AE32}.Publish|x64.Build.0 = Release|Any CPU | |||
{23C28035-2FCE-41F3-9A12-E73CE8A5AE32}.Release|Any CPU.ActiveCfg = Release|Any CPU | |||
{23C28035-2FCE-41F3-9A12-E73CE8A5AE32}.Release|Any CPU.Build.0 = Release|Any CPU | |||
{23C28035-2FCE-41F3-9A12-E73CE8A5AE32}.Release|x64.ActiveCfg = Release|Any CPU | |||
{23C28035-2FCE-41F3-9A12-E73CE8A5AE32}.Release|x64.Build.0 = Release|Any CPU | |||
{6268B461-486A-460B-9B3C-86493CBBAAF7}.Debug|Any CPU.ActiveCfg = Debug|Any CPU | |||
{6268B461-486A-460B-9B3C-86493CBBAAF7}.Debug|Any CPU.Build.0 = Debug|Any CPU | |||
{6268B461-486A-460B-9B3C-86493CBBAAF7}.Debug|x64.ActiveCfg = Debug|Any CPU | |||
{6268B461-486A-460B-9B3C-86493CBBAAF7}.Debug|x64.Build.0 = Debug|Any CPU | |||
{6268B461-486A-460B-9B3C-86493CBBAAF7}.Debug-Minimal|Any CPU.ActiveCfg = Debug|Any CPU | |||
{6268B461-486A-460B-9B3C-86493CBBAAF7}.Debug-Minimal|Any CPU.Build.0 = Debug|Any CPU | |||
{6268B461-486A-460B-9B3C-86493CBBAAF7}.Debug-Minimal|x64.ActiveCfg = Debug|Any CPU | |||
{6268B461-486A-460B-9B3C-86493CBBAAF7}.Debug-Minimal|x64.Build.0 = Debug|Any CPU | |||
{6268B461-486A-460B-9B3C-86493CBBAAF7}.Publish|Any CPU.ActiveCfg = Release|Any CPU | |||
{6268B461-486A-460B-9B3C-86493CBBAAF7}.Publish|Any CPU.Build.0 = Release|Any CPU | |||
{6268B461-486A-460B-9B3C-86493CBBAAF7}.Publish|x64.ActiveCfg = Release|Any CPU | |||
{6268B461-486A-460B-9B3C-86493CBBAAF7}.Publish|x64.Build.0 = Release|Any CPU | |||
{6268B461-486A-460B-9B3C-86493CBBAAF7}.Release|Any CPU.ActiveCfg = Release|Any CPU | |||
{6268B461-486A-460B-9B3C-86493CBBAAF7}.Release|Any CPU.Build.0 = Release|Any CPU | |||
{6268B461-486A-460B-9B3C-86493CBBAAF7}.Release|x64.ActiveCfg = Release|Any CPU | |||
{6268B461-486A-460B-9B3C-86493CBBAAF7}.Release|x64.Build.0 = Release|Any CPU | |||
{EB92DD90-6346-41FB-B967-2B33A860AD98}.Debug|Any CPU.ActiveCfg = Debug|Any CPU | |||
{EB92DD90-6346-41FB-B967-2B33A860AD98}.Debug|Any CPU.Build.0 = Debug|Any CPU | |||
{EB92DD90-6346-41FB-B967-2B33A860AD98}.Debug|x64.ActiveCfg = Debug|Any CPU | |||
{EB92DD90-6346-41FB-B967-2B33A860AD98}.Debug|x64.Build.0 = Debug|Any CPU | |||
{EB92DD90-6346-41FB-B967-2B33A860AD98}.Debug-Minimal|Any CPU.ActiveCfg = Debug|Any CPU | |||
{EB92DD90-6346-41FB-B967-2B33A860AD98}.Debug-Minimal|Any CPU.Build.0 = Debug|Any CPU | |||
{EB92DD90-6346-41FB-B967-2B33A860AD98}.Debug-Minimal|x64.ActiveCfg = Debug|Any CPU | |||
{EB92DD90-6346-41FB-B967-2B33A860AD98}.Debug-Minimal|x64.Build.0 = Debug|Any CPU | |||
{EB92DD90-6346-41FB-B967-2B33A860AD98}.Publish|Any CPU.ActiveCfg = Release|Any CPU | |||
{EB92DD90-6346-41FB-B967-2B33A860AD98}.Publish|Any CPU.Build.0 = Release|Any CPU | |||
{EB92DD90-6346-41FB-B967-2B33A860AD98}.Publish|x64.ActiveCfg = Release|Any CPU | |||
{EB92DD90-6346-41FB-B967-2B33A860AD98}.Publish|x64.Build.0 = Release|Any CPU | |||
{EB92DD90-6346-41FB-B967-2B33A860AD98}.Release|Any CPU.ActiveCfg = Release|Any CPU | |||
{EB92DD90-6346-41FB-B967-2B33A860AD98}.Release|Any CPU.Build.0 = Release|Any CPU | |||
{EB92DD90-6346-41FB-B967-2B33A860AD98}.Release|x64.ActiveCfg = Release|Any CPU | |||
{EB92DD90-6346-41FB-B967-2B33A860AD98}.Release|x64.Build.0 = Release|Any CPU | |||
{95B077C1-E21B-486F-8BDD-1C902FE687AB}.Debug|Any CPU.ActiveCfg = Debug|Any CPU | |||
{95B077C1-E21B-486F-8BDD-1C902FE687AB}.Debug|Any CPU.Build.0 = Debug|Any CPU | |||
{95B077C1-E21B-486F-8BDD-1C902FE687AB}.Debug|x64.ActiveCfg = Debug|Any CPU | |||
{95B077C1-E21B-486F-8BDD-1C902FE687AB}.Debug|x64.Build.0 = Debug|Any CPU | |||
{95B077C1-E21B-486F-8BDD-1C902FE687AB}.Debug-Minimal|Any CPU.ActiveCfg = Debug|Any CPU | |||
{95B077C1-E21B-486F-8BDD-1C902FE687AB}.Debug-Minimal|Any CPU.Build.0 = Debug|Any CPU | |||
{95B077C1-E21B-486F-8BDD-1C902FE687AB}.Debug-Minimal|x64.ActiveCfg = Debug|Any CPU | |||
{95B077C1-E21B-486F-8BDD-1C902FE687AB}.Debug-Minimal|x64.Build.0 = Debug|Any CPU | |||
{95B077C1-E21B-486F-8BDD-1C902FE687AB}.Publish|Any CPU.ActiveCfg = Debug|Any CPU | |||
{95B077C1-E21B-486F-8BDD-1C902FE687AB}.Publish|Any CPU.Build.0 = Debug|Any CPU | |||
{95B077C1-E21B-486F-8BDD-1C902FE687AB}.Publish|x64.ActiveCfg = Debug|Any CPU | |||
{95B077C1-E21B-486F-8BDD-1C902FE687AB}.Publish|x64.Build.0 = Debug|Any CPU | |||
{95B077C1-E21B-486F-8BDD-1C902FE687AB}.Release|Any CPU.ActiveCfg = Release|Any CPU | |||
{95B077C1-E21B-486F-8BDD-1C902FE687AB}.Release|Any CPU.Build.0 = Release|Any CPU | |||
{95B077C1-E21B-486F-8BDD-1C902FE687AB}.Release|x64.ActiveCfg = Release|Any CPU | |||
{95B077C1-E21B-486F-8BDD-1C902FE687AB}.Release|x64.Build.0 = Release|Any CPU | |||
EndGlobalSection | |||
GlobalSection(SolutionProperties) = preSolution | |||
HideSolutionNode = FALSE | |||
EndGlobalSection | |||
GlobalSection(ExtensibilityGlobals) = postSolution | |||
SolutionGuid = {2DEAD3CC-486B-4918-A607-50B0DE7B114A} | |||
EndGlobalSection | |||
EndGlobal |
@@ -1,3 +1,3 @@ | |||
TensorFlow.NET logo (c) 2019 by Meinrad Recheis. | |||
TensorFlow.NET logo (c) 2019 by Meinrad Recheis. | |||
The logo is based on the original Tensorflow logo which is copyrighted by the respective creator. |
@@ -364,8 +364,8 @@ namespace Tensorflow | |||
public Tensor divide<T>(Tensor x, T[] y, string name = null) where T : struct | |||
=> x / ops.convert_to_tensor(y, dtype: x.dtype.as_base_dtype(), name: "y"); | |||
public Tensor pow<T1, T2>(T1 x, T2 y) | |||
=> gen_math_ops.pow(x, y); | |||
public Tensor pow<T1, T2>(T1 x, T2 y, string name = "pow") | |||
=> gen_math_ops.pow(x, y, name: name); | |||
/// <summary> | |||
/// Divides `x / y` elementwise, rounding toward the most negative integer. | |||
@@ -33,9 +33,13 @@ namespace Tensorflow.Gradients | |||
var x = op.inputs[0]; | |||
var grad = grads[0]; | |||
return new Tensor[] { gen_ops.mul(grad, gen_math_ops.sign(x)) }; | |||
return new Tensor[] { grad * math_ops.sign(x) }; | |||
} | |||
[RegisterGradient("AddV2")] | |||
public static Tensor[] _AddV2Grad(Operation op, Tensor[] grads) | |||
=> _AddGrad(op, grads); | |||
[RegisterGradient("Add")] | |||
public static Tensor[] _AddGrad(Operation op, Tensor[] grads) | |||
{ | |||
@@ -107,7 +111,9 @@ namespace Tensorflow.Gradients | |||
var y = op.outputs[0]; // y = e^x | |||
return tf_with(ops.control_dependencies(new Operation[] { grad }), dp => { | |||
y = math_ops.conj(y); | |||
return new Tensor[] { math_ops.mul_no_nan(y, grad) }; | |||
// forward_compatible(2019, 9, 14) | |||
// return new Tensor[] { math_ops.mul_no_nan(y, grad) }; | |||
return new Tensor[] { grad * y }; | |||
}); | |||
} | |||
@@ -167,8 +173,7 @@ namespace Tensorflow.Gradients | |||
new TF_DataType[] { tf.int32, tf.float32 }.Contains(grad.dtype)) | |||
return new Tensor[] { gen_math_ops.mul(grad, y), gen_math_ops.mul(grad, x) }; | |||
var sx = array_ops.shape(x); | |||
var sy = array_ops.shape(y); | |||
var (sx, sy) = SmartBroadcastGradientArgs(x, y); | |||
var (rx, ry) = gen_array_ops.broadcast_gradient_args(sx, sy); | |||
x = math_ops.conj(x); | |||
@@ -355,8 +360,8 @@ namespace Tensorflow.Gradients | |||
: gen_math_ops.less_equal(x, y); | |||
var (rx, ry) = gen_array_ops.broadcast_gradient_args(sx, sy); | |||
var xgrad = array_ops.where(xmask, grad, zeros); | |||
var ygrad = array_ops.where(xmask, zeros, grad); | |||
var gx = array_ops.reshape(math_ops.reduce_sum(xgrad, rx), sx); | |||
var ygrad = array_ops.where(xmask, zeros, grad); | |||
var gy = array_ops.reshape(math_ops.reduce_sum(ygrad, ry), sy); | |||
return new Tensor[] { gx, gy }; | |||
} | |||
@@ -397,14 +402,13 @@ namespace Tensorflow.Gradients | |||
_ShapesFullySpecifiedAndEqual(x, y, grad)) | |||
return new Tensor[] { grad, -grad }; | |||
var sx = array_ops.shape(x); | |||
var sy = array_ops.shape(y); | |||
var (sx, sy) = SmartBroadcastGradientArgs(x, y); | |||
var (rx, ry) = gen_array_ops.broadcast_gradient_args(sx, sy); | |||
var r1 = gen_array_ops.reshape(math_ops.reduce_sum(grad, rx), sx); | |||
var r2 = gen_array_ops.reshape(-math_ops.reduce_sum(grad, ry), sy); | |||
var gx = array_ops.reshape(math_ops.reduce_sum(grad, rx), sx); | |||
var gy = array_ops.reshape(math_ops.reduce_sum(-grad, ry), sy); | |||
return new Tensor[] { r1, r2 }; | |||
return new Tensor[] { gx, gy }; | |||
} | |||
public static bool _ShapesFullySpecifiedAndEqual(Tensor x, Tensor y, Tensor grad) | |||
@@ -468,15 +472,16 @@ namespace Tensorflow.Gradients | |||
x = math_ops.conj(x); | |||
y = math_ops.conj(y); | |||
var realdiv1 = gen_math_ops.real_div(-x, y); | |||
var realdiv2 = gen_math_ops.real_div(realdiv1, y); | |||
var reduce_sum1 = math_ops.reduce_sum(grad * realdiv2, ry); | |||
var reshape1 = gen_array_ops.reshape(reduce_sum1, sy); | |||
var realdiv3 = gen_math_ops.real_div(grad, y); | |||
var reduce_sum2 = math_ops.reduce_sum(realdiv3, rx); | |||
var reshape2 = gen_array_ops.reshape(reduce_sum2, sx); | |||
var reshape1 = array_ops.reshape( | |||
math_ops.reduce_sum( | |||
math_ops.realdiv(grad, y), rx), | |||
sx); | |||
var reshape2 = array_ops.reshape( | |||
math_ops.reduce_sum( | |||
grad * math_ops.realdiv(math_ops.realdiv(-x, y), y), ry), | |||
sy); | |||
return new Tensor[] { reshape2, reshape1 }; | |||
return new Tensor[] { reshape1, reshape2 }; | |||
} | |||
[RegisterGradient("Sigmoid")] | |||
@@ -602,14 +607,12 @@ namespace Tensorflow.Gradients | |||
var y = op.inputs[1]; | |||
var z = op.outputs[0]; | |||
var sx = array_ops.shape(x); | |||
var sy = array_ops.shape(y); | |||
var (sx, sy) = SmartBroadcastGradientArgs(x, y); | |||
var (rx, ry) = gen_array_ops.broadcast_gradient_args(sx, sy); | |||
x = math_ops.conj(x); | |||
y = math_ops.conj(y); | |||
z = math_ops.conj(z); | |||
var pow = gen_math_ops.pow(x, y - 1.0f); | |||
var mul = grad * y * pow; | |||
var mul = grad * y * math_ops.pow(x, y - 1.0f); | |||
var reduce_sum = math_ops.reduce_sum(mul, rx); | |||
var gx = gen_array_ops.reshape(reduce_sum, sx); | |||
@@ -630,5 +633,29 @@ namespace Tensorflow.Gradients | |||
return new Tensor[] { gx, gy }; | |||
} | |||
/// <summary> | |||
/// Optimized version of `broadcast_gradient_args` that caches results. | |||
/// </summary> | |||
/// <param name="x"></param> | |||
/// <param name="y"></param> | |||
/// <returns></returns> | |||
private static (Tensor, Tensor) SmartBroadcastGradientArgs(Tensor x, Tensor y) | |||
{ | |||
Tensor sx, sy; | |||
if (x.TensorShape.is_fully_defined() && | |||
y.TensorShape.is_fully_defined()) | |||
{ | |||
sx = array_ops.shape(x); | |||
sy = array_ops.shape(y); | |||
} | |||
else | |||
{ | |||
sx = array_ops.shape_internal(x, optimize: false); | |||
sy = array_ops.shape_internal(y, optimize: false); | |||
} | |||
return (sx, sy); | |||
} | |||
} | |||
} |
@@ -170,6 +170,14 @@ namespace Tensorflow.Gradients | |||
public static Tensor[] _FusedBatchNormGrad(Operation op, Tensor[] grads) | |||
=> _BaseFusedBatchNormGrad(op, 0, grads); | |||
[RegisterGradient("FusedBatchNormV2")] | |||
public static Tensor[] _FusedBatchNormV2Grad(Operation op, Tensor[] grads) | |||
=> _BaseFusedBatchNormGrad(op, 1, grads); | |||
[RegisterGradient("FusedBatchNormV3")] | |||
public static Tensor[] _FusedBatchNormV3Grad(Operation op, Tensor[] grads) | |||
=> _BaseFusedBatchNormGrad(op, 2, grads); | |||
/// <summary> | |||
/// Return the gradients for the 3 inputs of BatchNorm. | |||
/// </summary> | |||
@@ -190,8 +198,10 @@ namespace Tensorflow.Gradients | |||
switch (version) | |||
{ | |||
case 2: | |||
throw new NotImplementedException(""); | |||
grad_fun = gen_nn_ops.fused_batch_norm_grad_v3; | |||
break; | |||
case 1: | |||
// grad_fun = gen_nn_ops.fused_batch_norm_grad_v2; | |||
throw new NotImplementedException(""); | |||
default: | |||
grad_fun = gen_nn_ops.fused_batch_norm_grad; | |||
@@ -225,8 +235,8 @@ namespace Tensorflow.Gradients | |||
YBackprop = grad_y, | |||
X = x, | |||
Scale = scale, | |||
ReserveSpace1 = op.outputs[3], | |||
ReserveSpace2 = op.outputs[4], | |||
ReserveSpace1 = pop_mean, | |||
ReserveSpace2 = pop_var, | |||
ReserveSpace3 = version == 2 ? op.outputs[5] : null, | |||
Epsilon = epsilon, | |||
DataFormat = data_format, | |||
@@ -1,17 +1,17 @@ | |||
/***************************************************************************** | |||
Copyright 2018 The TensorFlow.NET Authors. All Rights Reserved. | |||
Licensed under the Apache License, Version 2.0 (the "License"); | |||
you may not use this file except in compliance with the License. | |||
You may obtain a copy of the License at | |||
http://www.apache.org/licenses/LICENSE-2.0 | |||
Unless required by applicable law or agreed to in writing, software | |||
distributed under the License is distributed on an "AS IS" BASIS, | |||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |||
See the License for the specific language governing permissions and | |||
limitations under the License. | |||
/***************************************************************************** | |||
Copyright 2018 The TensorFlow.NET Authors. All Rights Reserved. | |||
Licensed under the Apache License, Version 2.0 (the "License"); | |||
you may not use this file except in compliance with the License. | |||
You may obtain a copy of the License at | |||
http://www.apache.org/licenses/LICENSE-2.0 | |||
Unless required by applicable law or agreed to in writing, software | |||
distributed under the License is distributed on an "AS IS" BASIS, | |||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |||
See the License for the specific language governing permissions and | |||
limitations under the License. | |||
******************************************************************************/ | |||
using System.Collections.Generic; | |||
@@ -77,8 +77,8 @@ namespace Tensorflow | |||
/// | |||
/// Use with the `with` keyword to specify that all operations constructed | |||
/// within the context should have control dependencies on | |||
/// `control_inputs`. | |||
/// </summary> | |||
/// `control_inputs`. | |||
/// </summary> | |||
public _ControlDependenciesController control_dependencies(object[] control_inputs) | |||
{ | |||
if (control_inputs == null) | |||
@@ -92,20 +92,20 @@ namespace Tensorflow | |||
// TODO: implement IndexedSlices | |||
//case IndexedSlices islice: | |||
// control_ops.Add(islice.op); | |||
// break; | |||
// break; | |||
case Tensor t: | |||
control_ops.Add(t.op); | |||
break; | |||
case Operation op: | |||
control_ops.Add(op); | |||
break; | |||
break; | |||
default: | |||
var t1 = _as_graph_element(c); | |||
if (t1 == null) | |||
throw new TypeError($"Control input must be Operation or Tensor:{c}"); | |||
control_ops.Add(t1.op); | |||
break; | |||
} | |||
break; | |||
} | |||
} | |||
return new _ControlDependenciesController(this, control_ops); | |||
} | |||
@@ -138,9 +138,9 @@ namespace Tensorflow | |||
_control_dependencies_stack.RemoveAt(_control_dependencies_stack.Count-1); | |||
} | |||
/// <summary> | |||
/// Record that the given op depends on all registered control dependencies. | |||
/// </summary> | |||
/// <summary> | |||
/// Record that the given op depends on all registered control dependencies. | |||
/// </summary> | |||
public void _record_op_seen_by_control_dependencies(Operation op) | |||
{ | |||
foreach (var controller in _control_dependencies_stack) | |||
@@ -1,17 +1,17 @@ | |||
/***************************************************************************** | |||
Copyright 2018 The TensorFlow.NET Authors. All Rights Reserved. | |||
Licensed under the Apache License, Version 2.0 (the "License"); | |||
you may not use this file except in compliance with the License. | |||
You may obtain a copy of the License at | |||
http://www.apache.org/licenses/LICENSE-2.0 | |||
Unless required by applicable law or agreed to in writing, software | |||
distributed under the License is distributed on an "AS IS" BASIS, | |||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |||
See the License for the specific language governing permissions and | |||
limitations under the License. | |||
/***************************************************************************** | |||
Copyright 2018 The TensorFlow.NET Authors. All Rights Reserved. | |||
Licensed under the Apache License, Version 2.0 (the "License"); | |||
you may not use this file except in compliance with the License. | |||
You may obtain a copy of the License at | |||
http://www.apache.org/licenses/LICENSE-2.0 | |||
Unless required by applicable law or agreed to in writing, software | |||
distributed under the License is distributed on an "AS IS" BASIS, | |||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |||
See the License for the specific language governing permissions and | |||
limitations under the License. | |||
******************************************************************************/ | |||
using System; | |||
@@ -38,8 +38,8 @@ namespace Tensorflow | |||
public OperationDescription NewOperation(string opType, string opName) | |||
{ | |||
return c_api.TF_NewOperation(_handle, opType, opName); | |||
} | |||
} | |||
public Operation[] ReturnOperations(IntPtr results) | |||
{ | |||
TF_Operation return_oper_handle = new TF_Operation(); | |||
@@ -89,14 +89,14 @@ namespace Tensorflow | |||
public ITensorOrOperation[] get_operations() | |||
{ | |||
return _nodes_by_name.Values.ToArray(); | |||
} | |||
} | |||
/// <summary> | |||
/// Returns the `Operation` with the given `name`. | |||
/// | |||
/// This method may be called concurrently from multiple threads. | |||
/// </summary> | |||
/// <param name="name">The name of the `Operation` to return.</param> | |||
/// This method may be called concurrently from multiple threads. | |||
/// </summary> | |||
/// <param name="name">The name of the `Operation` to return.</param> | |||
public Operation get_operation_by_name(string name) | |||
=> as_graph_element(name, allow_tensor: false, allow_operation: true) as Operation; | |||
@@ -109,8 +109,8 @@ namespace Tensorflow | |||
{ | |||
var op_name = Marshal.PtrToStringAnsi(c_api.TF_OperationName(tf_oper)); | |||
return _get_operation_by_name_unsafe(op_name); | |||
} | |||
} | |||
/// <summary> | |||
/// Creates an `Operation` in this graph from the supplied TF_Operation. | |||
/// | |||
@@ -125,7 +125,7 @@ namespace Tensorflow | |||
/// </summary> | |||
/// <param name="c_op">a wrapped TF_Operation</param> | |||
/// <param name="compute_device">(Optional.) If True, device functions will be executed | |||
/// to compute the device property of the Operation.</param> | |||
/// to compute the device property of the Operation.</param> | |||
/// <returns>An `Operation` object.</returns> | |||
public Operation _create_op_from_tf_operation(IntPtr c_op, bool compute_device = true) | |||
{ | |||
@@ -1,21 +1,21 @@ | |||
/***************************************************************************** | |||
Copyright 2018 The TensorFlow.NET Authors. All Rights Reserved. | |||
Licensed under the Apache License, Version 2.0 (the "License"); | |||
you may not use this file except in compliance with the License. | |||
You may obtain a copy of the License at | |||
http://www.apache.org/licenses/LICENSE-2.0 | |||
Unless required by applicable law or agreed to in writing, software | |||
distributed under the License is distributed on an "AS IS" BASIS, | |||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |||
See the License for the specific language governing permissions and | |||
limitations under the License. | |||
/***************************************************************************** | |||
Copyright 2018 The TensorFlow.NET Authors. All Rights Reserved. | |||
Licensed under the Apache License, Version 2.0 (the "License"); | |||
you may not use this file except in compliance with the License. | |||
You may obtain a copy of the License at | |||
http://www.apache.org/licenses/LICENSE-2.0 | |||
Unless required by applicable law or agreed to in writing, software | |||
distributed under the License is distributed on an "AS IS" BASIS, | |||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |||
See the License for the specific language governing permissions and | |||
limitations under the License. | |||
******************************************************************************/ | |||
using System; | |||
using System.Collections; | |||
using System.Collections; | |||
using System.Collections.Generic; | |||
using System.Linq; | |||
using System.Runtime.InteropServices; | |||
@@ -75,7 +75,7 @@ namespace Tensorflow | |||
/// then create a TensorFlow session to run parts of the graph across a set of local and remote devices. | |||
/// </summary> | |||
/// <remarks>https://www.tensorflow.org/guide/graphs <br></br>https://www.tensorflow.org/api_docs/python/tf/Graph</remarks> | |||
public partial class Graph : DisposableObject | |||
public partial class Graph : DisposableObject | |||
#if !SERIALIZABLE | |||
, IEnumerable<Operation> | |||
#endif | |||
@@ -105,18 +105,18 @@ namespace Tensorflow | |||
/// </summary> | |||
private Dictionary<string, object> _collections = new Dictionary<string, object>(); | |||
public bool building_function; | |||
int _seed; | |||
public int seed | |||
{ | |||
get => _seed; | |||
set | |||
{ | |||
_seed = value; | |||
} | |||
} | |||
public bool building_function; | |||
int _seed; | |||
public int seed | |||
{ | |||
get => _seed; | |||
set | |||
{ | |||
_seed = value; | |||
} | |||
} | |||
public Graph() | |||
{ | |||
_handle = c_api.TF_NewGraph(); | |||
@@ -133,20 +133,20 @@ namespace Tensorflow | |||
_nodes_by_name = new Dictionary<string, ITensorOrOperation>(); | |||
_names_in_use = new Dictionary<string, int>(); | |||
_graph_key = $"grap-key-{ops.uid()}/"; | |||
} | |||
} | |||
public ITensorOrOperation as_graph_element(object obj, bool allow_tensor = true, bool allow_operation = true) | |||
{ | |||
return _as_graph_element_locked(obj, allow_tensor, allow_operation); | |||
} | |||
/// <summary> | |||
/// Returns a context manager that makes this `Graph` the default graph. | |||
/// </summary> | |||
/// <summary> | |||
/// Returns a context manager that makes this `Graph` the default graph. | |||
/// </summary> | |||
/// <returns></returns> | |||
public Graph as_default() | |||
{ | |||
return ops.set_default_graph(this); | |||
public Graph as_default() | |||
{ | |||
return ops.set_default_graph(this); | |||
} | |||
private Tensor _as_graph_element(object obj) | |||
@@ -155,8 +155,8 @@ namespace Tensorflow | |||
return var._as_graph_element(); | |||
return null; | |||
} | |||
} | |||
private ITensorOrOperation _as_graph_element_locked(object obj, bool allow_tensor = true, bool allow_operation = true) | |||
{ | |||
string types_str = ""; | |||
@@ -259,8 +259,8 @@ namespace Tensorflow | |||
throw new RuntimeError("Graph is finalized and cannot be modified."); | |||
} | |||
public Operation create_op(string op_type, Tensor[] inputs, TF_DataType[] dtypes, | |||
TF_DataType[] input_types = null, string name = null, | |||
public Operation create_op(string op_type, Tensor[] inputs, TF_DataType[] dtypes, | |||
TF_DataType[] input_types = null, string name = null, | |||
Dictionary<string, AttrValue> attrs = null, OpDef op_def = null) | |||
{ | |||
if (inputs == null) | |||
@@ -272,12 +272,12 @@ namespace Tensorflow | |||
// If a names ends with a '/' it is a "name scope" and we use it as-is, | |||
// after removing the trailing '/'. | |||
name = name.EndsWith("/") ? ops.name_from_scope_name(name) : unique_name(name); | |||
var node_def = ops._NodeDef(op_type, name, device: "", attrs: attrs); | |||
var node_def = ops._NodeDef(op_type, name, device: "", attrs: attrs); | |||
var input_ops = inputs.Select(x => x.op).ToArray(); | |||
var input_ops = inputs.Select(x => x.op).ToArray(); | |||
var control_inputs = _control_dependencies_for_inputs(input_ops); | |||
var op = new Operation(node_def, | |||
var op = new Operation(node_def, | |||
this, | |||
inputs: inputs, | |||
output_types: dtypes, | |||
@@ -297,9 +297,9 @@ namespace Tensorflow | |||
return op; | |||
} | |||
public void device(string device_name) | |||
{ | |||
throw new NotImplementedException(""); | |||
public void device(string device_name) | |||
{ | |||
throw new NotImplementedException(""); | |||
} | |||
private void _create_op_helper(Operation op, bool compute_device = true) | |||
@@ -353,8 +353,8 @@ namespace Tensorflow | |||
_name_stack = new_stack; | |||
return String.IsNullOrEmpty(new_stack) ? "" : new_stack + "/"; | |||
} | |||
} | |||
/// <summary> | |||
/// Return a unique operation name for `name`. | |||
/// | |||
@@ -379,10 +379,10 @@ namespace Tensorflow | |||
/// <returns>A string to be passed to `create_op()` that will be used | |||
/// to name the operation being created.</returns> | |||
public string unique_name(string name, bool mark_as_used = true) | |||
{ | |||
if (name.EndsWith("basic_r_n_n_cell")) | |||
{ | |||
{ | |||
if (name.EndsWith("basic_r_n_n_cell")) | |||
{ | |||
} | |||
if (!String.IsNullOrEmpty(_name_stack)) | |||
name = _name_stack + "/" + name; | |||
@@ -411,7 +411,7 @@ namespace Tensorflow | |||
// Return the new name with the original capitalization of the given name. | |||
name = $"{name}_{i - 1}"; | |||
} | |||
} | |||
return name; | |||
} | |||
@@ -424,7 +424,7 @@ namespace Tensorflow | |||
unsafe | |||
{ | |||
var tf_output_ptr = (TF_Output*)return_output_handle; | |||
for (int i = 0; i < num_return_outputs; i++) | |||
for (int i = 0; i < num_return_outputs; i++) | |||
return_outputs[i] = *(tf_output_ptr + i); | |||
return return_outputs; | |||
} | |||
@@ -444,25 +444,25 @@ namespace Tensorflow | |||
{ | |||
List<T> t = default; | |||
var collection = _collections.ContainsKey(name) ? _collections[name] : new List<T>(); | |||
switch (collection) | |||
{ | |||
case List<VariableV1> list: | |||
t = list.Select(x => (T)(object)x).ToList(); | |||
break; | |||
case List<ResourceVariable> list: | |||
t = list.Select(x => (T)(object)x).ToList(); | |||
break; | |||
case List<RefVariable> list: | |||
t = list.Select(x => (T)(object)x).ToList(); | |||
break; | |||
case List<Tensor> list: | |||
t = list.Select(x => (T)(object)x).ToList(); | |||
break; | |||
case List<Operation> list: | |||
t = list.Select(x => (T)(object)x).ToList(); | |||
break; | |||
default: | |||
throw new NotImplementedException($"get_collection<{typeof(T).FullName}>"); | |||
switch (collection) | |||
{ | |||
case List<VariableV1> list: | |||
t = list.Select(x => (T)(object)x).ToList(); | |||
break; | |||
case List<ResourceVariable> list: | |||
t = list.Select(x => (T)(object)x).ToList(); | |||
break; | |||
case List<RefVariable> list: | |||
t = list.Select(x => (T)(object)x).ToList(); | |||
break; | |||
case List<Tensor> list: | |||
t = list.Select(x => (T)(object)x).ToList(); | |||
break; | |||
case List<Operation> list: | |||
t = list.Select(x => (T)(object)x).ToList(); | |||
break; | |||
default: | |||
throw new NotImplementedException($"get_collection<{typeof(T).FullName}>"); | |||
} | |||
return t; | |||
} | |||
@@ -482,22 +482,22 @@ namespace Tensorflow | |||
public void prevent_fetching(Operation op) | |||
{ | |||
_unfetchable_ops.Add(op); | |||
} | |||
protected override void DisposeManagedResources() | |||
{ | |||
ops.default_graph_stack.remove(this); | |||
} | |||
protected override void DisposeUnmanagedResources(IntPtr handle) | |||
{ | |||
c_api.TF_DeleteGraph(handle); | |||
} | |||
public Tensor get_tensor_by_tf_output(TF_Output tf_output) | |||
{ | |||
var op = _get_operation_by_tf_operation(tf_output.oper); | |||
return op.outputs[tf_output.index]; | |||
protected override void DisposeManagedResources() | |||
{ | |||
ops.default_graph_stack.remove(this); | |||
} | |||
protected override void DisposeUnmanagedResources(IntPtr handle) | |||
{ | |||
c_api.TF_DeleteGraph(handle); | |||
} | |||
public Tensor get_tensor_by_tf_output(TF_Output tf_output) | |||
{ | |||
var op = _get_operation_by_tf_operation(tf_output.oper); | |||
return op.outputs[tf_output.index]; | |||
} | |||
/// <summary> | |||
@@ -510,48 +510,48 @@ namespace Tensorflow | |||
public Tensor get_tensor_by_name(string name) | |||
{ | |||
return (Tensor)this.as_graph_element(name, allow_tensor: true, allow_operation: false); | |||
} | |||
public TensorShape GetTensorShape(TF_Output output) | |||
{ | |||
var status = new Status(); | |||
var ndim = c_api.TF_GraphGetTensorNumDims(_handle, output, status); | |||
status.Check(); | |||
if (ndim == -1) | |||
return new TensorShape(); | |||
var dims = new long[ndim]; | |||
c_api.TF_GraphGetTensorShape(_handle, output, dims, dims.Length, status); | |||
status.Check(); | |||
return new TensorShape(dims.Select(x => (int)x).ToArray()); | |||
} | |||
string debugString = string.Empty; | |||
public override string ToString() | |||
{ | |||
return $"{graph_key}, ({_handle})"; | |||
/*if (string.IsNullOrEmpty(debugString)) | |||
{ | |||
int len = 0; | |||
debugString = c_api.TF_GraphDebugString(_handle, out len); | |||
} | |||
return debugString;*/ | |||
} | |||
} | |||
public TensorShape GetTensorShape(TF_Output output) | |||
{ | |||
var status = new Status(); | |||
var ndim = c_api.TF_GraphGetTensorNumDims(_handle, output, status); | |||
status.Check(); | |||
if (ndim == -1) | |||
return new TensorShape(); | |||
var dims = new long[ndim]; | |||
c_api.TF_GraphGetTensorShape(_handle, output, dims, dims.Length, status); | |||
status.Check(); | |||
return new TensorShape(dims.Select(x => (int)x).ToArray()); | |||
} | |||
string debugString = string.Empty; | |||
public override string ToString() | |||
{ | |||
return $"{graph_key}, ({_handle})"; | |||
/*if (string.IsNullOrEmpty(debugString)) | |||
{ | |||
int len = 0; | |||
debugString = c_api.TF_GraphDebugString(_handle, out len); | |||
} | |||
return debugString;*/ | |||
} | |||
#if !SERIALIZABLE | |||
private IEnumerable<Operation> GetEnumerable() | |||
private IEnumerable<Operation> GetEnumerable() | |||
=> c_api_util.tf_operations(this); | |||
IEnumerator<Operation> IEnumerable<Operation>.GetEnumerator() | |||
=> GetEnumerable().GetEnumerator(); | |||
IEnumerator IEnumerable.GetEnumerator() | |||
=> throw new NotImplementedException(); | |||
IEnumerator<Operation> IEnumerable<Operation>.GetEnumerator() | |||
=> GetEnumerable().GetEnumerator(); | |||
IEnumerator IEnumerable.GetEnumerator() | |||
=> throw new NotImplementedException(); | |||
#endif | |||
public static implicit operator IntPtr(Graph graph) | |||
{ | |||
return graph._handle; | |||
@@ -1,17 +1,17 @@ | |||
/***************************************************************************** | |||
Copyright 2018 The TensorFlow.NET Authors. All Rights Reserved. | |||
Licensed under the Apache License, Version 2.0 (the "License"); | |||
you may not use this file except in compliance with the License. | |||
You may obtain a copy of the License at | |||
http://www.apache.org/licenses/LICENSE-2.0 | |||
Unless required by applicable law or agreed to in writing, software | |||
distributed under the License is distributed on an "AS IS" BASIS, | |||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |||
See the License for the specific language governing permissions and | |||
limitations under the License. | |||
/***************************************************************************** | |||
Copyright 2018 The TensorFlow.NET Authors. All Rights Reserved. | |||
Licensed under the Apache License, Version 2.0 (the "License"); | |||
you may not use this file except in compliance with the License. | |||
You may obtain a copy of the License at | |||
http://www.apache.org/licenses/LICENSE-2.0 | |||
Unless required by applicable law or agreed to in writing, software | |||
distributed under the License is distributed on an "AS IS" BASIS, | |||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |||
See the License for the specific language governing permissions and | |||
limitations under the License. | |||
******************************************************************************/ | |||
using System.Collections.Generic; | |||
@@ -32,8 +32,8 @@ namespace Tensorflow | |||
private bool _new_stack; | |||
private ControlFlowContext _old_control_flow_context; | |||
public ITensorOrOperation[] control_inputs => _control_inputs_val.ToArray(); | |||
public ITensorOrOperation[] control_inputs => _control_inputs_val.ToArray(); | |||
/// <summary> | |||
/// Create a new `_ControlDependenciesController`. | |||
/// | |||
@@ -69,7 +69,7 @@ namespace Tensorflow | |||
_new_stack = false; | |||
} | |||
_seen_nodes = new List<ITensorOrOperation>(); | |||
_seen_nodes = new List<ITensorOrOperation>(); | |||
_old_stack = null; | |||
_old_control_flow_context = null; | |||
} | |||
@@ -113,16 +113,16 @@ namespace Tensorflow | |||
public void Dispose() | |||
{ | |||
} | |||
public void __init__() | |||
{ | |||
} | |||
public void __del__() | |||
{ | |||
} | |||
} | |||
public void __init__() | |||
{ | |||
} | |||
public void __del__() | |||
{ | |||
} | |||
} | |||
} |
@@ -1,324 +1,324 @@ | |||
/***************************************************************************** | |||
Copyright 2018 The TensorFlow.NET Authors. All Rights Reserved. | |||
Licensed under the Apache License, Version 2.0 (the "License"); | |||
you may not use this file except in compliance with the License. | |||
You may obtain a copy of the License at | |||
http://www.apache.org/licenses/LICENSE-2.0 | |||
Unless required by applicable law or agreed to in writing, software | |||
distributed under the License is distributed on an "AS IS" BASIS, | |||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |||
See the License for the specific language governing permissions and | |||
limitations under the License. | |||
******************************************************************************/ | |||
using System; | |||
using System.Linq; | |||
using System.Collections.Generic; | |||
using util = Tensorflow.control_flow_util; | |||
using static Tensorflow.Binding; | |||
namespace Tensorflow.Operations.ControlFlows | |||
{ | |||
/// <summary> | |||
/// Maintain the mapping from the loops to their grad states. | |||
/// </summary> | |||
public class ControlFlowState | |||
{ | |||
Dictionary<ControlFlowContext, GradLoopState> _map; | |||
//class ControlFlowState(object): | |||
// """Maintain the mapping from the loops to their grad states.""" | |||
// def __init__(self): | |||
// self._map = {} # maps forward loop context to GradLoopState | |||
// def GetGradState(self, op, before): | |||
// """Return the grad state for this op if it's in a forward loop context.""" | |||
// if before and util.IsLoopExit(op): | |||
// forward_ctxt = op._get_control_flow_context() | |||
// forward_ctxt = forward_ctxt.outer_context | |||
// if forward_ctxt: | |||
// forward_ctxt = forward_ctxt.GetWhileContext() | |||
// else: | |||
// forward_ctxt = _GetWhileContext(op) | |||
// if forward_ctxt: | |||
// return self._map.get(forward_ctxt) | |||
// return None | |||
public ControlFlowState() | |||
{ | |||
_map = new Dictionary<ControlFlowContext, GradLoopState>(); | |||
} | |||
/// <summary> | |||
/// Return the grad state for this op if it's in a forward loop context. | |||
/// </summary> | |||
/// <param name="op"></param> | |||
/// <param name="before"></param> | |||
/// <returns></returns> | |||
public GradLoopState GetGradState(Operation op, bool before) | |||
{ | |||
ControlFlowContext forward_ctxt = null; | |||
if (before && util.IsLoopExit(op)) | |||
{ | |||
forward_ctxt = op._get_control_flow_context(); | |||
forward_ctxt = forward_ctxt.outer_context; | |||
if (forward_ctxt != null) | |||
forward_ctxt = forward_ctxt.GetWhileContext(); | |||
} | |||
else | |||
forward_ctxt = util.GetWhileContext(op); | |||
if (forward_ctxt != null) | |||
return _map.get(forward_ctxt); | |||
return null; | |||
} | |||
public Tensor[] ProcessUnusedLoopExits(Dictionary<string, int> pending_count, List<Operation> to_ops_set) | |||
{ | |||
var loop_exits = new List<Tensor>(); | |||
foreach(var grad_state in _map.Values) | |||
{ | |||
foreach(var y in grad_state.forward_loop_exits) | |||
{ | |||
if(!pending_count.ContainsKey(y.op.name)) | |||
{ | |||
grad_state.pending_exits_count -= 1; | |||
if (!to_ops_set.Contains(y.op)) | |||
grad_state.unused_exits.append(y); | |||
if (grad_state.pending_exits_count == 0) | |||
loop_exits.extend(grad_state.unused_exits); | |||
} | |||
} | |||
foreach(var y in grad_state.forward_context.loop_enters) | |||
{ | |||
if (!pending_count.ContainsKey(y.op.name)) | |||
pending_count[y.op.name] = 1; | |||
} | |||
} | |||
return loop_exits.ToArray(); | |||
} | |||
public void EnterGradWhileContext(Operation op, bool before) | |||
{ | |||
var grad_state = GetGradState(op, before); | |||
if (grad_state != null) | |||
grad_state.grad_context.Enter(); | |||
} | |||
public void ExitGradWhileContext(Operation op, bool before) | |||
{ | |||
var grad_state = GetGradState(op, before); | |||
if (grad_state != null) | |||
grad_state.grad_context.Exit(); | |||
} | |||
// def AddWhileContext(self, op, between_op_list, between_ops): | |||
// """Add the grad state for the while loop that op belongs to. | |||
// Note that op is an Exit, and this method must be called in | |||
// the control flow context where gradients() is called. | |||
// Note that this method modifies `between_op_list` and `between_ops`. | |||
// """ | |||
// forward_ctxt = _GetWhileContext(op) | |||
// grad_state = self._map.get(forward_ctxt) | |||
// if grad_state is None: | |||
// # This is a new while loop so create a grad state for it. | |||
// outer_forward_ctxt = forward_ctxt.outer_context | |||
// if outer_forward_ctxt: | |||
// outer_forward_ctxt = outer_forward_ctxt.GetWhileContext() | |||
// outer_grad_state = None | |||
// if outer_forward_ctxt: | |||
// outer_grad_state = self._map.get(outer_forward_ctxt) | |||
// grad_state = GradLoopState(forward_ctxt, outer_grad_state) | |||
// self._map[forward_ctxt] = grad_state | |||
// # We need to include all exits of a loop for backprop. | |||
// for loop_exit in grad_state.forward_loop_exits: | |||
// if loop_exit.op not in between_ops: | |||
// between_ops.add(loop_exit.op) | |||
// between_op_list.append(loop_exit.op) | |||
public void AddWhileContext(Operation op, List<Operation> between_op_list, List<Operation> between_ops) | |||
{ | |||
var forward_ctxt = op.GetWhileContext(); | |||
var grad_state = _map.ContainsKey(forward_ctxt) ? _map[forward_ctxt] : null; | |||
if(grad_state == null) | |||
{ | |||
GradLoopState outer_grad_state = null; | |||
var outer_forward_ctxt = forward_ctxt.outer_context; | |||
if (outer_forward_ctxt != null) | |||
outer_forward_ctxt = outer_forward_ctxt.GetWhileContext(); | |||
if (outer_forward_ctxt != null) | |||
outer_grad_state = _map[outer_forward_ctxt]; | |||
grad_state = new GradLoopState(forward_ctxt, outer_grad_state); | |||
_map[forward_ctxt] = grad_state; | |||
// We need to include all exits of a loop for backprop. | |||
foreach (var loop_exit in grad_state.forward_loop_exits) | |||
{ | |||
if(!between_ops.Contains(loop_exit.op)) | |||
{ | |||
between_ops.add(loop_exit.op); | |||
between_op_list.append(loop_exit.op); | |||
} | |||
} | |||
} | |||
} | |||
// def ZerosLikeForExit(self, val): | |||
// """Create zeros_like gradient for a loop exit. | |||
// If the result of a loop variable is not used but is involved in | |||
// computing the result of some needed loop variable, we create a | |||
// zero-valued tensor that is fed as gradient for the Exit node of that | |||
// loop variable. Note that val.op is an Exit, and this method must be | |||
// called in the control flow context where gradients() is called. | |||
// Args: | |||
// val: The output tensor of an Exit op. | |||
// Returns: | |||
// A zero tensor of the same shape of val. | |||
// """ | |||
// val_shape = val.get_shape() | |||
// forward_ctxt = val.op._get_control_flow_context() | |||
// outer_forward_ctxt = forward_ctxt.outer_context | |||
// if outer_forward_ctxt: | |||
// outer_forward_ctxt = outer_forward_ctxt.GetWhileContext() | |||
// outer_grad_state = None | |||
// if outer_forward_ctxt: | |||
// outer_grad_state = self._map.get(outer_forward_ctxt) | |||
// if outer_grad_state: | |||
// # This is a nested loop. | |||
// if val_shape.is_fully_defined(): | |||
// # If the shape is known statically, just create a zero tensor | |||
// # with the right shape in the right context. | |||
// outer_grad_state.grad_context.Enter() | |||
// result = array_ops.zeros(val_shape.dims, val.dtype) | |||
// outer_grad_state.grad_context.Exit() | |||
// else: | |||
// # Only the shape of value is needed for backprop. | |||
// forward_ctxt.outer_context.Enter() | |||
// shape = array_ops.shape_internal(val, optimize=False) | |||
// forward_ctxt.outer_context.Exit() | |||
// # Save the shape to a stack. | |||
// history_shape = outer_grad_state.AddForwardAccumulator(shape) | |||
// # Get the shape back from the stack. | |||
// outer_grad_ctxt = outer_grad_state.grad_context | |||
// outer_grad_ctxt.Enter() | |||
// real_shape = outer_grad_state.AddBackpropAccumulatedValue( | |||
// history_shape, shape) | |||
// result = array_ops.zeros(real_shape, val.dtype) | |||
// outer_grad_ctxt.Exit() | |||
// else: | |||
// # This is not a nested loop. | |||
// if val_shape.is_fully_defined(): | |||
// # If the shape is known statically, just create a zero tensor | |||
// # with the right shape. | |||
// result = array_ops.zeros(val_shape.dims, val.dtype) | |||
// else: | |||
// result = array_ops.zeros_like(val, optimize=False) | |||
// return result | |||
public Tensor ZerosLike(Operation op, int index) | |||
{ | |||
if (util.IsLoopSwitch(op)) | |||
return null; | |||
if (op.graph.building_function) | |||
return array_ops.zeros_like(op.outputs[index]); | |||
var dead_branch = util.IsSwitch(op); | |||
var forward_ctxt = util.GetWhileContext(op); | |||
var grad_state = _map.get(forward_ctxt); | |||
// op is not in a while loop that is part of gradients(). | |||
if (grad_state == null) | |||
return ZerosLikeOutsideLoop(op, index); | |||
throw new NotImplementedException("ZerosLike"); | |||
} | |||
public Tensor ZerosLikeOutsideLoop(Operation op, int index) | |||
{ | |||
var val = op.outputs[index]; | |||
if (!util.IsSwitch(op)) | |||
{ | |||
if (val.dtype == dtypes.resource) | |||
throw new NotImplementedException("ZerosLikeOutsideLoop"); | |||
/*return array_ops.zeros( | |||
gen_resource_variable_ops.variable_shape(val), | |||
dtype: default_gradient.get_zeros_dtype(val));*/ | |||
return array_ops.zeros_like(val, optimize: false); | |||
} | |||
else | |||
throw new NotImplementedException("ZerosLikeOutsideLoop"); | |||
} | |||
/// <summary> | |||
/// Create zeros_like gradient for a loop exit. | |||
/// </summary> | |||
/// <param name="val"></param> | |||
/// <returns></returns> | |||
public Tensor ZerosLikeForExit(Tensor val) | |||
{ | |||
Tensor result = null; | |||
var val_shape = val.TensorShape; | |||
var forward_ctxt = val.op._get_control_flow_context(); | |||
var outer_forward_ctxt = forward_ctxt.outer_context; | |||
if (outer_forward_ctxt != null) | |||
outer_forward_ctxt = outer_forward_ctxt.GetWhileContext(); | |||
GradLoopState outer_grad_state = null; | |||
if (outer_forward_ctxt != null) | |||
outer_grad_state = _map.get(outer_forward_ctxt); | |||
// This is a nested loop. | |||
if (outer_grad_state != null) | |||
{ | |||
throw new NotImplementedException("ZerosLikeForExit"); | |||
} | |||
else | |||
{ | |||
// If the shape is known statically, just create a zero tensor | |||
// with the right shape. | |||
if (val_shape.is_fully_defined()) | |||
result = array_ops.zeros(val_shape.dims, val.dtype); | |||
else | |||
result = array_ops.zeros_like(val, optimize: false); | |||
} | |||
return result; | |||
} | |||
public void PostProcessing() | |||
{ | |||
foreach(var grad_state in _map.Values) | |||
{ | |||
foreach(var b_merge in grad_state.switch_map.Values) | |||
{ | |||
if(b_merge.op.inputs[0] == b_merge.op.inputs[1]) | |||
{ | |||
Tensor next_grad_val = null; | |||
// The value of this loop variable at iteration i+1 doesn't | |||
// depend on its value at iteration i. So use zeros as the | |||
// gradients for all iterations > 0. | |||
var dtype = b_merge.op.inputs[0].dtype; | |||
var shape = b_merge.op.inputs[0].TensorShape; | |||
if (shape.is_fully_defined()) | |||
{ | |||
grad_state.grad_context.Enter(); | |||
// Create a zeros and use it for iterations > 0. | |||
var grad_val = constant_op.constant(0, dtype: dtype, shape: shape); | |||
next_grad_val = control_flow_ops._NextIteration(grad_val); | |||
grad_state.grad_context.Exit(); | |||
} | |||
else | |||
{ | |||
throw new NotImplementedException("PostProcessing shape is not fully defined."); | |||
} | |||
b_merge.op._update_input(1, next_grad_val); | |||
} | |||
} | |||
} | |||
} | |||
} | |||
} | |||
/***************************************************************************** | |||
Copyright 2018 The TensorFlow.NET Authors. All Rights Reserved. | |||
Licensed under the Apache License, Version 2.0 (the "License"); | |||
you may not use this file except in compliance with the License. | |||
You may obtain a copy of the License at | |||
http://www.apache.org/licenses/LICENSE-2.0 | |||
Unless required by applicable law or agreed to in writing, software | |||
distributed under the License is distributed on an "AS IS" BASIS, | |||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |||
See the License for the specific language governing permissions and | |||
limitations under the License. | |||
******************************************************************************/ | |||
using System; | |||
using System.Linq; | |||
using System.Collections.Generic; | |||
using util = Tensorflow.control_flow_util; | |||
using static Tensorflow.Binding; | |||
namespace Tensorflow.Operations.ControlFlows | |||
{ | |||
/// <summary> | |||
/// Maintain the mapping from the loops to their grad states. | |||
/// </summary> | |||
public class ControlFlowState | |||
{ | |||
Dictionary<ControlFlowContext, GradLoopState> _map; | |||
//class ControlFlowState(object): | |||
// """Maintain the mapping from the loops to their grad states.""" | |||
// def __init__(self): | |||
// self._map = {} # maps forward loop context to GradLoopState | |||
// def GetGradState(self, op, before): | |||
// """Return the grad state for this op if it's in a forward loop context.""" | |||
// if before and util.IsLoopExit(op): | |||
// forward_ctxt = op._get_control_flow_context() | |||
// forward_ctxt = forward_ctxt.outer_context | |||
// if forward_ctxt: | |||
// forward_ctxt = forward_ctxt.GetWhileContext() | |||
// else: | |||
// forward_ctxt = _GetWhileContext(op) | |||
// if forward_ctxt: | |||
// return self._map.get(forward_ctxt) | |||
// return None | |||
public ControlFlowState() | |||
{ | |||
_map = new Dictionary<ControlFlowContext, GradLoopState>(); | |||
} | |||
/// <summary> | |||
/// Return the grad state for this op if it's in a forward loop context. | |||
/// </summary> | |||
/// <param name="op"></param> | |||
/// <param name="before"></param> | |||
/// <returns></returns> | |||
public GradLoopState GetGradState(Operation op, bool before) | |||
{ | |||
ControlFlowContext forward_ctxt = null; | |||
if (before && util.IsLoopExit(op)) | |||
{ | |||
forward_ctxt = op._get_control_flow_context(); | |||
forward_ctxt = forward_ctxt.outer_context; | |||
if (forward_ctxt != null) | |||
forward_ctxt = forward_ctxt.GetWhileContext(); | |||
} | |||
else | |||
forward_ctxt = util.GetWhileContext(op); | |||
if (forward_ctxt != null) | |||
return _map.get(forward_ctxt); | |||
return null; | |||
} | |||
public Tensor[] ProcessUnusedLoopExits(Dictionary<string, int> pending_count, List<Operation> to_ops_set) | |||
{ | |||
var loop_exits = new List<Tensor>(); | |||
foreach(var grad_state in _map.Values) | |||
{ | |||
foreach(var y in grad_state.forward_loop_exits) | |||
{ | |||
if(!pending_count.ContainsKey(y.op.name)) | |||
{ | |||
grad_state.pending_exits_count -= 1; | |||
if (!to_ops_set.Contains(y.op)) | |||
grad_state.unused_exits.append(y); | |||
if (grad_state.pending_exits_count == 0) | |||
loop_exits.extend(grad_state.unused_exits); | |||
} | |||
} | |||
foreach(var y in grad_state.forward_context.loop_enters) | |||
{ | |||
if (!pending_count.ContainsKey(y.op.name)) | |||
pending_count[y.op.name] = 1; | |||
} | |||
} | |||
return loop_exits.ToArray(); | |||
} | |||
public void EnterGradWhileContext(Operation op, bool before) | |||
{ | |||
var grad_state = GetGradState(op, before); | |||
if (grad_state != null) | |||
grad_state.grad_context.Enter(); | |||
} | |||
public void ExitGradWhileContext(Operation op, bool before) | |||
{ | |||
var grad_state = GetGradState(op, before); | |||
if (grad_state != null) | |||
grad_state.grad_context.Exit(); | |||
} | |||
// def AddWhileContext(self, op, between_op_list, between_ops): | |||
// """Add the grad state for the while loop that op belongs to. | |||
// Note that op is an Exit, and this method must be called in | |||
// the control flow context where gradients() is called. | |||
// Note that this method modifies `between_op_list` and `between_ops`. | |||
// """ | |||
// forward_ctxt = _GetWhileContext(op) | |||
// grad_state = self._map.get(forward_ctxt) | |||
// if grad_state is None: | |||
// # This is a new while loop so create a grad state for it. | |||
// outer_forward_ctxt = forward_ctxt.outer_context | |||
// if outer_forward_ctxt: | |||
// outer_forward_ctxt = outer_forward_ctxt.GetWhileContext() | |||
// outer_grad_state = None | |||
// if outer_forward_ctxt: | |||
// outer_grad_state = self._map.get(outer_forward_ctxt) | |||
// grad_state = GradLoopState(forward_ctxt, outer_grad_state) | |||
// self._map[forward_ctxt] = grad_state | |||
// # We need to include all exits of a loop for backprop. | |||
// for loop_exit in grad_state.forward_loop_exits: | |||
// if loop_exit.op not in between_ops: | |||
// between_ops.add(loop_exit.op) | |||
// between_op_list.append(loop_exit.op) | |||
public void AddWhileContext(Operation op, List<Operation> between_op_list, List<Operation> between_ops) | |||
{ | |||
var forward_ctxt = op.GetWhileContext(); | |||
var grad_state = _map.ContainsKey(forward_ctxt) ? _map[forward_ctxt] : null; | |||
if(grad_state == null) | |||
{ | |||
GradLoopState outer_grad_state = null; | |||
var outer_forward_ctxt = forward_ctxt.outer_context; | |||
if (outer_forward_ctxt != null) | |||
outer_forward_ctxt = outer_forward_ctxt.GetWhileContext(); | |||
if (outer_forward_ctxt != null) | |||
outer_grad_state = _map[outer_forward_ctxt]; | |||
grad_state = new GradLoopState(forward_ctxt, outer_grad_state); | |||
_map[forward_ctxt] = grad_state; | |||
// We need to include all exits of a loop for backprop. | |||
foreach (var loop_exit in grad_state.forward_loop_exits) | |||
{ | |||
if(!between_ops.Contains(loop_exit.op)) | |||
{ | |||
between_ops.add(loop_exit.op); | |||
between_op_list.append(loop_exit.op); | |||
} | |||
} | |||
} | |||
} | |||
// def ZerosLikeForExit(self, val): | |||
// """Create zeros_like gradient for a loop exit. | |||
// If the result of a loop variable is not used but is involved in | |||
// computing the result of some needed loop variable, we create a | |||
// zero-valued tensor that is fed as gradient for the Exit node of that | |||
// loop variable. Note that val.op is an Exit, and this method must be | |||
// called in the control flow context where gradients() is called. | |||
// Args: | |||
// val: The output tensor of an Exit op. | |||
// Returns: | |||
// A zero tensor of the same shape of val. | |||
// """ | |||
// val_shape = val.get_shape() | |||
// forward_ctxt = val.op._get_control_flow_context() | |||
// outer_forward_ctxt = forward_ctxt.outer_context | |||
// if outer_forward_ctxt: | |||
// outer_forward_ctxt = outer_forward_ctxt.GetWhileContext() | |||
// outer_grad_state = None | |||
// if outer_forward_ctxt: | |||
// outer_grad_state = self._map.get(outer_forward_ctxt) | |||
// if outer_grad_state: | |||
// # This is a nested loop. | |||
// if val_shape.is_fully_defined(): | |||
// # If the shape is known statically, just create a zero tensor | |||
// # with the right shape in the right context. | |||
// outer_grad_state.grad_context.Enter() | |||
// result = array_ops.zeros(val_shape.dims, val.dtype) | |||
// outer_grad_state.grad_context.Exit() | |||
// else: | |||
// # Only the shape of value is needed for backprop. | |||
// forward_ctxt.outer_context.Enter() | |||
// shape = array_ops.shape_internal(val, optimize=False) | |||
// forward_ctxt.outer_context.Exit() | |||
// # Save the shape to a stack. | |||
// history_shape = outer_grad_state.AddForwardAccumulator(shape) | |||
// # Get the shape back from the stack. | |||
// outer_grad_ctxt = outer_grad_state.grad_context | |||
// outer_grad_ctxt.Enter() | |||
// real_shape = outer_grad_state.AddBackpropAccumulatedValue( | |||
// history_shape, shape) | |||
// result = array_ops.zeros(real_shape, val.dtype) | |||
// outer_grad_ctxt.Exit() | |||
// else: | |||
// # This is not a nested loop. | |||
// if val_shape.is_fully_defined(): | |||
// # If the shape is known statically, just create a zero tensor | |||
// # with the right shape. | |||
// result = array_ops.zeros(val_shape.dims, val.dtype) | |||
// else: | |||
// result = array_ops.zeros_like(val, optimize=False) | |||
// return result | |||
public Tensor ZerosLike(Operation op, int index) | |||
{ | |||
if (util.IsLoopSwitch(op)) | |||
return null; | |||
if (op.graph.building_function) | |||
return array_ops.zeros_like(op.outputs[index]); | |||
var dead_branch = util.IsSwitch(op); | |||
var forward_ctxt = util.GetWhileContext(op); | |||
var grad_state = _map.get(forward_ctxt); | |||
// op is not in a while loop that is part of gradients(). | |||
if (grad_state == null) | |||
return ZerosLikeOutsideLoop(op, index); | |||
throw new NotImplementedException("ZerosLike"); | |||
} | |||
public Tensor ZerosLikeOutsideLoop(Operation op, int index) | |||
{ | |||
var val = op.outputs[index]; | |||
if (!util.IsSwitch(op)) | |||
{ | |||
if (val.dtype == dtypes.resource) | |||
throw new NotImplementedException("ZerosLikeOutsideLoop"); | |||
/*return array_ops.zeros( | |||
gen_resource_variable_ops.variable_shape(val), | |||
dtype: default_gradient.get_zeros_dtype(val));*/ | |||
return array_ops.zeros_like(val, optimize: false); | |||
} | |||
else | |||
throw new NotImplementedException("ZerosLikeOutsideLoop"); | |||
} | |||
/// <summary> | |||
/// Create zeros_like gradient for a loop exit. | |||
/// </summary> | |||
/// <param name="val"></param> | |||
/// <returns></returns> | |||
public Tensor ZerosLikeForExit(Tensor val) | |||
{ | |||
Tensor result = null; | |||
var val_shape = val.TensorShape; | |||
var forward_ctxt = val.op._get_control_flow_context(); | |||
var outer_forward_ctxt = forward_ctxt.outer_context; | |||
if (outer_forward_ctxt != null) | |||
outer_forward_ctxt = outer_forward_ctxt.GetWhileContext(); | |||
GradLoopState outer_grad_state = null; | |||
if (outer_forward_ctxt != null) | |||
outer_grad_state = _map.get(outer_forward_ctxt); | |||
// This is a nested loop. | |||
if (outer_grad_state != null) | |||
{ | |||
throw new NotImplementedException("ZerosLikeForExit"); | |||
} | |||
else | |||
{ | |||
// If the shape is known statically, just create a zero tensor | |||
// with the right shape. | |||
if (val_shape.is_fully_defined()) | |||
result = array_ops.zeros(val_shape.dims, val.dtype); | |||
else | |||
result = array_ops.zeros_like(val, optimize: false); | |||
} | |||
return result; | |||
} | |||
public void PostProcessing() | |||
{ | |||
foreach(var grad_state in _map.Values) | |||
{ | |||
foreach(var b_merge in grad_state.switch_map.Values) | |||
{ | |||
if(b_merge.op.inputs[0] == b_merge.op.inputs[1]) | |||
{ | |||
Tensor next_grad_val = null; | |||
// The value of this loop variable at iteration i+1 doesn't | |||
// depend on its value at iteration i. So use zeros as the | |||
// gradients for all iterations > 0. | |||
var dtype = b_merge.op.inputs[0].dtype; | |||
var shape = b_merge.op.inputs[0].TensorShape; | |||
if (shape.is_fully_defined()) | |||
{ | |||
grad_state.grad_context.Enter(); | |||
// Create a zeros and use it for iterations > 0. | |||
var grad_val = constant_op.constant(0, dtype: dtype, shape: shape); | |||
next_grad_val = control_flow_ops._NextIteration(grad_val); | |||
grad_state.grad_context.Exit(); | |||
} | |||
else | |||
{ | |||
throw new NotImplementedException("PostProcessing shape is not fully defined."); | |||
} | |||
b_merge.op._update_input(1, next_grad_val); | |||
} | |||
} | |||
} | |||
} | |||
} | |||
} |
@@ -1,335 +1,335 @@ | |||
/***************************************************************************** | |||
Copyright 2018 The TensorFlow.NET Authors. All Rights Reserved. | |||
Licensed under the Apache License, Version 2.0 (the "License"); | |||
you may not use this file except in compliance with the License. | |||
You may obtain a copy of the License at | |||
http://www.apache.org/licenses/LICENSE-2.0 | |||
Unless required by applicable law or agreed to in writing, software | |||
distributed under the License is distributed on an "AS IS" BASIS, | |||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |||
See the License for the specific language governing permissions and | |||
limitations under the License. | |||
******************************************************************************/ | |||
using System; | |||
using System.Collections; | |||
using System.Collections.Generic; | |||
using System.Linq; | |||
using static Tensorflow.Binding; | |||
using util = Tensorflow.control_flow_util; | |||
namespace Tensorflow.Operations.ControlFlows | |||
{ | |||
/// <summary> | |||
/// The state used for constructing the gradient graph for a while loop. | |||
/// </summary> | |||
public class GradLoopState | |||
{ | |||
private WhileContext _grad_context = null; | |||
public WhileContext grad_context => _grad_context; | |||
// # The loop counter added by AddBackpropLoopCounter. It is the value | |||
// # of the loop counter for the current iteration. | |||
// self._grad_index = None | |||
// # A sync op for backprop. | |||
// self._grad_sync = None | |||
// # Information needed by backprop. | |||
private Hashtable _history_map = new Hashtable(); | |||
public Hashtable history_map => _history_map; | |||
Dictionary<Operation, Tensor> _switch_map = new Dictionary<Operation, Tensor>(); | |||
public Dictionary<Operation, Tensor> switch_map => _switch_map; | |||
/// <summary> | |||
/// The while loop context for forward. | |||
/// </summary> | |||
WhileContext _forward_context; | |||
public WhileContext forward_context => _forward_context; | |||
/// <summary> | |||
/// The grad loop state for the outer while loop. | |||
/// </summary> | |||
GradLoopState _outer_grad_state; | |||
public GradLoopState outer_grad_state => _outer_grad_state; | |||
Tensor _forward_index; | |||
public Tensor forward_index => _forward_index; | |||
Tensor _grad_index; | |||
Tensor[] _forward_loop_exits; | |||
/// <summary> | |||
/// The list of exits of the forward loop. | |||
/// </summary> | |||
public Tensor[] forward_loop_exits => _forward_loop_exits; | |||
List<Tensor> _deferred_exits; | |||
public List<Tensor> deferred_exits => _deferred_exits; | |||
List<Tensor> _unused_exits; | |||
public List<Tensor> unused_exits => _unused_exits; | |||
/// <summary> | |||
/// The number of exits we expect to see but haven't. | |||
/// </summary> | |||
public int pending_exits_count { get; set; } | |||
Operation _grad_sync; | |||
public Operation grad_sync | |||
{ | |||
get | |||
{ | |||
if(_grad_sync == null) | |||
{ | |||
tf_with(ops.control_dependencies(null), delegate | |||
{ | |||
_grad_sync = gen_control_flow_ops.control_trigger(name: "b_sync"); | |||
}); | |||
_grad_sync._set_control_flow_context(_grad_context); | |||
_grad_index.op._add_control_input(_grad_sync); | |||
if (_grad_context.outer_context != null) | |||
_grad_context.outer_context.AddInnerOp(_grad_sync); | |||
} | |||
return _grad_sync; | |||
} | |||
} | |||
public GradLoopState(WhileContext forward_ctxt, GradLoopState outer_grad_state_) | |||
{ | |||
// Information needed by backprop. | |||
_unused_exits = new List<Tensor>(); | |||
_deferred_exits = new List<Tensor>(); | |||
_forward_loop_exits = list(forward_ctxt.loop_exits); | |||
pending_exits_count = len(forward_ctxt.loop_exits); | |||
_outer_grad_state = outer_grad_state_; | |||
ControlFlowContext outer_forward_ctxt = null; | |||
if (outer_grad_state_ != null) | |||
outer_forward_ctxt = outer_grad_state_.forward_context; | |||
// Add the forward loop counter. | |||
// with forward_ctxt._graph.as_default(): | |||
Tensor cnt, forward_index; | |||
{ | |||
if (outer_forward_ctxt != null) | |||
outer_forward_ctxt.Enter(); | |||
(cnt, forward_index) = forward_ctxt.AddForwardLoopCounter(outer_grad_state); | |||
if (outer_forward_ctxt != null) | |||
outer_forward_ctxt.Exit(); | |||
} | |||
_forward_context = forward_ctxt; | |||
_forward_index = forward_index; | |||
// Add the backprop WhileContext, and the backprop loop counter. | |||
if (outer_grad_state != null) | |||
{ | |||
// This is a nested loop. Remember the iteration counts for each | |||
// execution of this inner loop. | |||
throw new NotImplementedException("GradLoopState"); | |||
} | |||
else | |||
{ | |||
if (outer_forward_ctxt != null) | |||
outer_forward_ctxt.Enter(); | |||
_grad_context = new WhileContext( | |||
maximum_iterations: forward_ctxt.maximum_iterations, | |||
parallel_iterations: forward_ctxt.parallel_iterations, | |||
back_prop: forward_ctxt.back_prop, | |||
swap_memory: forward_ctxt.swap_memory, | |||
name: forward_ctxt.name, | |||
grad_state: this); | |||
_grad_index = _grad_context.AddBackpropLoopCounter(cnt, outer_grad_state); | |||
if (outer_forward_ctxt != null) | |||
outer_forward_ctxt.Exit(); | |||
} | |||
} | |||
/// <summary> | |||
/// Add an accumulator for each forward tensor that is needed in backprop. | |||
/// | |||
/// This is added to the forward loop at the first time when a tensor | |||
/// in the forward loop is used by backprop gradient computation loop. | |||
/// We create an accumulator that accumulates the value of tensor at each | |||
/// iteration. Called in the control flow context where gradients() is called. | |||
/// | |||
/// The pseudocode is: | |||
/// ``` | |||
/// acc = stack(); | |||
/// while (_pivot) { | |||
/// acc = stack_push(acc, value); | |||
/// } | |||
/// ``` | |||
/// | |||
/// We make sure that the stack push op in one iteration is executed before | |||
/// next iteration. This is achieved by adding a control edge from | |||
/// `forward_index.op.inputs[0].op` to the push op, and another control | |||
/// edge from the push op to either `forward_index.op` or `forward_sync`. | |||
/// </summary> | |||
/// <param name="value"> The source tensor in forward that is to be accumulated.</param> | |||
/// <param name="dead_branch"> True iff the tensor is on a dead branch of a cond.</param> | |||
/// <returns>The stack that contains the accumulated history of the tensor.</returns> | |||
public Tensor AddForwardAccumulator(Tensor value, bool dead_branch = false) | |||
{ | |||
_forward_index.graph.as_default(); | |||
{ | |||
var curr_ctxt = ops.get_default_graph()._get_control_flow_context(); | |||
return tf_with(ops.control_dependencies(null), delegate | |||
{ | |||
Tensor acc = null; | |||
Tensor push = null; | |||
if (curr_ctxt != null) | |||
curr_ctxt.Enter(); | |||
ops.colocate_with(value); | |||
{ | |||
// We only need to pass maximum_iterations to the stack if | |||
// we're inside an XLA context. | |||
var max_size = constant_op.constant(-1, dtypes.int32); | |||
acc = gen_data_flow_ops.stack_v2( | |||
max_size: max_size, elem_type: value.dtype.as_base_dtype(), name: "f_acc"); | |||
} | |||
if (curr_ctxt != null) | |||
curr_ctxt.Exit(); | |||
// Make acc available in the forward context. | |||
var enter_acc = forward_context.AddValue(acc); | |||
// Add the stack_push op in the context of value.op. | |||
var swap_enabled = forward_context.swap_memory; | |||
var value_ctxt = util.GetOutputContext(value.op); | |||
if(value_ctxt == forward_context) | |||
{ | |||
// value is not nested in the forward context. | |||
forward_context.Enter(); | |||
push = gen_data_flow_ops.stack_push_v2(enter_acc, value, swap_memory: swap_enabled); | |||
forward_context.Exit(); | |||
// Protect stack push and order it before forward_index. | |||
forward_index.op._add_control_input(push.op); | |||
} | |||
else | |||
{ | |||
throw new NotImplementedException("AddForwardAccumulator"); | |||
} | |||
// Order stack push after the successor of forward_index | |||
var add_op = forward_index.op.inputs[0].op; | |||
push.op._add_control_input(add_op); | |||
return acc; | |||
}); | |||
} | |||
} | |||
// """Add the getter for an accumulated value in the grad context. | |||
// | |||
// This is added to the backprop loop. Called in the grad context to | |||
// get the value of an accumulated value. The stack pop op must be guarded | |||
// by the pred of the controlling cond. | |||
// | |||
// Args: | |||
// history_value: The history (a stack) of a value. | |||
// value: The value that is pushed onto the stack. | |||
// dead_branch: True iff the tensor is on a dead branch of a cond. | |||
// | |||
// Returns: | |||
// The current value (the top of the stack). | |||
// """ | |||
public Tensor AddBackpropAccumulatedValue(Tensor history_value, Tensor value, bool dead_branch= false) | |||
{ | |||
var history_ctxt = history_value.op._get_control_flow_context(); | |||
// Find the cond context that controls history_value if any. | |||
CondContext cond_ctxt = null; | |||
Tensor pop = null; | |||
var value_ctxt = value.op._get_control_flow_context(); | |||
while(value_ctxt != null && value_ctxt != history_ctxt) | |||
{ | |||
if (value_ctxt is CondContext cc) | |||
cond_ctxt = cc; | |||
value_ctxt = value_ctxt.outer_context; | |||
} | |||
tf_with(ops.control_dependencies(null), delegate | |||
{ | |||
grad_context.Enter(); | |||
if(cond_ctxt != null) | |||
{ | |||
throw new NotImplementedException("AddBackpropAccumulatedValue"); | |||
} | |||
pop = gen_data_flow_ops.stack_pop_v2(history_value, value.dtype.as_base_dtype()); | |||
pop.set_shape(value.TensorShape); | |||
grad_context.Exit(); | |||
}); | |||
var parallel_iterations = grad_context.parallel_iterations; | |||
if (parallel_iterations > 1) | |||
// All pops are ordered after pivot_for_body and before grad_sync. | |||
grad_sync._add_control_input(pop.op); | |||
return pop; | |||
} | |||
/// <summary> | |||
/// Get the real value of `value`. | |||
/// </summary> | |||
/// <param name="value">A tensor to be captured.</param> | |||
/// <returns>The same tensor obtained from the saved history.</returns> | |||
public Tensor GetRealValue(Tensor value) | |||
{ | |||
Tensor real_value = null; | |||
if(real_value == null) | |||
{ | |||
var cur_value = value; | |||
var cur_grad_state = this; | |||
Tensor history_value = null; | |||
while (true) | |||
{ | |||
var enter_op = util.GetLoopConstantEnter(cur_value); | |||
if(enter_op != null) | |||
{ | |||
// Special case: cur_value comes from a constant Enter node. | |||
cur_value = enter_op.inputs[0]; | |||
cur_grad_state = cur_grad_state.outer_grad_state; | |||
if(cur_grad_state == null) | |||
{ | |||
// We are now outside all nested loops for this gradient(), | |||
// so `value` is a loop invariant and there is no need to | |||
// save the history of value. Just make cur_value to enter | |||
// the right control flow context. | |||
real_value = _grad_context.AddValue(cur_value); | |||
break; | |||
} | |||
} | |||
else if (constant_op.is_constant(cur_value)) | |||
{ | |||
// We are now outside all nested loops for this gradient(), | |||
// so `value` is a loop invariant and there is no need to | |||
// save the history of value. Just make cur_value to enter | |||
// the right control flow context. | |||
real_value = constant_op.constant( | |||
tensor_util.constant_value(cur_value), dtype: cur_value.dtype); | |||
break; | |||
} | |||
else | |||
{ | |||
// Record the history of this value in forward_ctxt. | |||
_grad_context.Exit(); | |||
history_value = cur_grad_state.AddForwardAccumulator(cur_value); | |||
_grad_context.Enter(); | |||
break; | |||
} | |||
} | |||
if(real_value == null) | |||
{ | |||
// Add the stack pop op in the grad context. | |||
real_value = cur_grad_state.AddBackpropAccumulatedValue(history_value, cur_value); | |||
if (cur_grad_state != this) | |||
real_value = _grad_context.AddValue(real_value); | |||
} | |||
_history_map[value.name] = real_value; | |||
} | |||
return real_value; | |||
} | |||
} | |||
} | |||
/***************************************************************************** | |||
Copyright 2018 The TensorFlow.NET Authors. All Rights Reserved. | |||
Licensed under the Apache License, Version 2.0 (the "License"); | |||
you may not use this file except in compliance with the License. | |||
You may obtain a copy of the License at | |||
http://www.apache.org/licenses/LICENSE-2.0 | |||
Unless required by applicable law or agreed to in writing, software | |||
distributed under the License is distributed on an "AS IS" BASIS, | |||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |||
See the License for the specific language governing permissions and | |||
limitations under the License. | |||
******************************************************************************/ | |||
using System; | |||
using System.Collections; | |||
using System.Collections.Generic; | |||
using System.Linq; | |||
using static Tensorflow.Binding; | |||
using util = Tensorflow.control_flow_util; | |||
namespace Tensorflow.Operations.ControlFlows | |||
{ | |||
/// <summary> | |||
/// The state used for constructing the gradient graph for a while loop. | |||
/// </summary> | |||
public class GradLoopState | |||
{ | |||
private WhileContext _grad_context = null; | |||
public WhileContext grad_context => _grad_context; | |||
// # The loop counter added by AddBackpropLoopCounter. It is the value | |||
// # of the loop counter for the current iteration. | |||
// self._grad_index = None | |||
// # A sync op for backprop. | |||
// self._grad_sync = None | |||
// # Information needed by backprop. | |||
private Hashtable _history_map = new Hashtable(); | |||
public Hashtable history_map => _history_map; | |||
Dictionary<Operation, Tensor> _switch_map = new Dictionary<Operation, Tensor>(); | |||
public Dictionary<Operation, Tensor> switch_map => _switch_map; | |||
/// <summary> | |||
/// The while loop context for forward. | |||
/// </summary> | |||
WhileContext _forward_context; | |||
public WhileContext forward_context => _forward_context; | |||
/// <summary> | |||
/// The grad loop state for the outer while loop. | |||
/// </summary> | |||
GradLoopState _outer_grad_state; | |||
public GradLoopState outer_grad_state => _outer_grad_state; | |||
Tensor _forward_index; | |||
public Tensor forward_index => _forward_index; | |||
Tensor _grad_index; | |||
Tensor[] _forward_loop_exits; | |||
/// <summary> | |||
/// The list of exits of the forward loop. | |||
/// </summary> | |||
public Tensor[] forward_loop_exits => _forward_loop_exits; | |||
List<Tensor> _deferred_exits; | |||
public List<Tensor> deferred_exits => _deferred_exits; | |||
List<Tensor> _unused_exits; | |||
public List<Tensor> unused_exits => _unused_exits; | |||
/// <summary> | |||
/// The number of exits we expect to see but haven't. | |||
/// </summary> | |||
public int pending_exits_count { get; set; } | |||
Operation _grad_sync; | |||
public Operation grad_sync | |||
{ | |||
get | |||
{ | |||
if(_grad_sync == null) | |||
{ | |||
tf_with(ops.control_dependencies(null), delegate | |||
{ | |||
_grad_sync = gen_control_flow_ops.control_trigger(name: "b_sync"); | |||
}); | |||
_grad_sync._set_control_flow_context(_grad_context); | |||
_grad_index.op._add_control_input(_grad_sync); | |||
if (_grad_context.outer_context != null) | |||
_grad_context.outer_context.AddInnerOp(_grad_sync); | |||
} | |||
return _grad_sync; | |||
} | |||
} | |||
public GradLoopState(WhileContext forward_ctxt, GradLoopState outer_grad_state_) | |||
{ | |||
// Information needed by backprop. | |||
_unused_exits = new List<Tensor>(); | |||
_deferred_exits = new List<Tensor>(); | |||
_forward_loop_exits = list(forward_ctxt.loop_exits); | |||
pending_exits_count = len(forward_ctxt.loop_exits); | |||
_outer_grad_state = outer_grad_state_; | |||
ControlFlowContext outer_forward_ctxt = null; | |||
if (outer_grad_state_ != null) | |||
outer_forward_ctxt = outer_grad_state_.forward_context; | |||
// Add the forward loop counter. | |||
// with forward_ctxt._graph.as_default(): | |||
Tensor cnt, forward_index; | |||
{ | |||
if (outer_forward_ctxt != null) | |||
outer_forward_ctxt.Enter(); | |||
(cnt, forward_index) = forward_ctxt.AddForwardLoopCounter(outer_grad_state); | |||
if (outer_forward_ctxt != null) | |||
outer_forward_ctxt.Exit(); | |||
} | |||
_forward_context = forward_ctxt; | |||
_forward_index = forward_index; | |||
// Add the backprop WhileContext, and the backprop loop counter. | |||
if (outer_grad_state != null) | |||
{ | |||
// This is a nested loop. Remember the iteration counts for each | |||
// execution of this inner loop. | |||
throw new NotImplementedException("GradLoopState"); | |||
} | |||
else | |||
{ | |||
if (outer_forward_ctxt != null) | |||
outer_forward_ctxt.Enter(); | |||
_grad_context = new WhileContext( | |||
maximum_iterations: forward_ctxt.maximum_iterations, | |||
parallel_iterations: forward_ctxt.parallel_iterations, | |||
back_prop: forward_ctxt.back_prop, | |||
swap_memory: forward_ctxt.swap_memory, | |||
name: forward_ctxt.name, | |||
grad_state: this); | |||
_grad_index = _grad_context.AddBackpropLoopCounter(cnt, outer_grad_state); | |||
if (outer_forward_ctxt != null) | |||
outer_forward_ctxt.Exit(); | |||
} | |||
} | |||
/// <summary> | |||
/// Add an accumulator for each forward tensor that is needed in backprop. | |||
/// | |||
/// This is added to the forward loop at the first time when a tensor | |||
/// in the forward loop is used by backprop gradient computation loop. | |||
/// We create an accumulator that accumulates the value of tensor at each | |||
/// iteration. Called in the control flow context where gradients() is called. | |||
/// | |||
/// The pseudocode is: | |||
/// ``` | |||
/// acc = stack(); | |||
/// while (_pivot) { | |||
/// acc = stack_push(acc, value); | |||
/// } | |||
/// ``` | |||
/// | |||
/// We make sure that the stack push op in one iteration is executed before | |||
/// next iteration. This is achieved by adding a control edge from | |||
/// `forward_index.op.inputs[0].op` to the push op, and another control | |||
/// edge from the push op to either `forward_index.op` or `forward_sync`. | |||
/// </summary> | |||
/// <param name="value"> The source tensor in forward that is to be accumulated.</param> | |||
/// <param name="dead_branch"> True iff the tensor is on a dead branch of a cond.</param> | |||
/// <returns>The stack that contains the accumulated history of the tensor.</returns> | |||
public Tensor AddForwardAccumulator(Tensor value, bool dead_branch = false) | |||
{ | |||
_forward_index.graph.as_default(); | |||
{ | |||
var curr_ctxt = ops.get_default_graph()._get_control_flow_context(); | |||
return tf_with(ops.control_dependencies(null), delegate | |||
{ | |||
Tensor acc = null; | |||
Tensor push = null; | |||
if (curr_ctxt != null) | |||
curr_ctxt.Enter(); | |||
ops.colocate_with(value); | |||
{ | |||
// We only need to pass maximum_iterations to the stack if | |||
// we're inside an XLA context. | |||
var max_size = constant_op.constant(-1, dtypes.int32); | |||
acc = gen_data_flow_ops.stack_v2( | |||
max_size: max_size, elem_type: value.dtype.as_base_dtype(), name: "f_acc"); | |||
} | |||
if (curr_ctxt != null) | |||
curr_ctxt.Exit(); | |||
// Make acc available in the forward context. | |||
var enter_acc = forward_context.AddValue(acc); | |||
// Add the stack_push op in the context of value.op. | |||
var swap_enabled = forward_context.swap_memory; | |||
var value_ctxt = util.GetOutputContext(value.op); | |||
if(value_ctxt == forward_context) | |||
{ | |||
// value is not nested in the forward context. | |||
forward_context.Enter(); | |||
push = gen_data_flow_ops.stack_push_v2(enter_acc, value, swap_memory: swap_enabled); | |||
forward_context.Exit(); | |||
// Protect stack push and order it before forward_index. | |||
forward_index.op._add_control_input(push.op); | |||
} | |||
else | |||
{ | |||
throw new NotImplementedException("AddForwardAccumulator"); | |||
} | |||
// Order stack push after the successor of forward_index | |||
var add_op = forward_index.op.inputs[0].op; | |||
push.op._add_control_input(add_op); | |||
return acc; | |||
}); | |||
} | |||
} | |||
// """Add the getter for an accumulated value in the grad context. | |||
// | |||
// This is added to the backprop loop. Called in the grad context to | |||
// get the value of an accumulated value. The stack pop op must be guarded | |||
// by the pred of the controlling cond. | |||
// | |||
// Args: | |||
// history_value: The history (a stack) of a value. | |||
// value: The value that is pushed onto the stack. | |||
// dead_branch: True iff the tensor is on a dead branch of a cond. | |||
// | |||
// Returns: | |||
// The current value (the top of the stack). | |||
// """ | |||
public Tensor AddBackpropAccumulatedValue(Tensor history_value, Tensor value, bool dead_branch= false) | |||
{ | |||
var history_ctxt = history_value.op._get_control_flow_context(); | |||
// Find the cond context that controls history_value if any. | |||
CondContext cond_ctxt = null; | |||
Tensor pop = null; | |||
var value_ctxt = value.op._get_control_flow_context(); | |||
while(value_ctxt != null && value_ctxt != history_ctxt) | |||
{ | |||
if (value_ctxt is CondContext cc) | |||
cond_ctxt = cc; | |||
value_ctxt = value_ctxt.outer_context; | |||
} | |||
tf_with(ops.control_dependencies(null), delegate | |||
{ | |||
grad_context.Enter(); | |||
if(cond_ctxt != null) | |||
{ | |||
throw new NotImplementedException("AddBackpropAccumulatedValue"); | |||
} | |||
pop = gen_data_flow_ops.stack_pop_v2(history_value, value.dtype.as_base_dtype()); | |||
pop.set_shape(value.TensorShape); | |||
grad_context.Exit(); | |||
}); | |||
var parallel_iterations = grad_context.parallel_iterations; | |||
if (parallel_iterations > 1) | |||
// All pops are ordered after pivot_for_body and before grad_sync. | |||
grad_sync._add_control_input(pop.op); | |||
return pop; | |||
} | |||
/// <summary> | |||
/// Get the real value of `value`. | |||
/// </summary> | |||
/// <param name="value">A tensor to be captured.</param> | |||
/// <returns>The same tensor obtained from the saved history.</returns> | |||
public Tensor GetRealValue(Tensor value) | |||
{ | |||
Tensor real_value = null; | |||
if(real_value == null) | |||
{ | |||
var cur_value = value; | |||
var cur_grad_state = this; | |||
Tensor history_value = null; | |||
while (true) | |||
{ | |||
var enter_op = util.GetLoopConstantEnter(cur_value); | |||
if(enter_op != null) | |||
{ | |||
// Special case: cur_value comes from a constant Enter node. | |||
cur_value = enter_op.inputs[0]; | |||
cur_grad_state = cur_grad_state.outer_grad_state; | |||
if(cur_grad_state == null) | |||
{ | |||
// We are now outside all nested loops for this gradient(), | |||
// so `value` is a loop invariant and there is no need to | |||
// save the history of value. Just make cur_value to enter | |||
// the right control flow context. | |||
real_value = _grad_context.AddValue(cur_value); | |||
break; | |||
} | |||
} | |||
else if (constant_op.is_constant(cur_value)) | |||
{ | |||
// We are now outside all nested loops for this gradient(), | |||
// so `value` is a loop invariant and there is no need to | |||
// save the history of value. Just make cur_value to enter | |||
// the right control flow context. | |||
real_value = constant_op.constant( | |||
tensor_util.constant_value(cur_value), dtype: cur_value.dtype); | |||
break; | |||
} | |||
else | |||
{ | |||
// Record the history of this value in forward_ctxt. | |||
_grad_context.Exit(); | |||
history_value = cur_grad_state.AddForwardAccumulator(cur_value); | |||
_grad_context.Enter(); | |||
break; | |||
} | |||
} | |||
if(real_value == null) | |||
{ | |||
// Add the stack pop op in the grad context. | |||
real_value = cur_grad_state.AddBackpropAccumulatedValue(history_value, cur_value); | |||
if (cur_grad_state != this) | |||
real_value = _grad_context.AddValue(real_value); | |||
} | |||
_history_map[value.name] = real_value; | |||
} | |||
return real_value; | |||
} | |||
} | |||
} |
@@ -1,17 +1,17 @@ | |||
/***************************************************************************** | |||
Copyright 2018 The TensorFlow.NET Authors. All Rights Reserved. | |||
Licensed under the Apache License, Version 2.0 (the "License"); | |||
you may not use this file except in compliance with the License. | |||
You may obtain a copy of the License at | |||
http://www.apache.org/licenses/LICENSE-2.0 | |||
Unless required by applicable law or agreed to in writing, software | |||
distributed under the License is distributed on an "AS IS" BASIS, | |||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |||
See the License for the specific language governing permissions and | |||
limitations under the License. | |||
/***************************************************************************** | |||
Copyright 2018 The TensorFlow.NET Authors. All Rights Reserved. | |||
Licensed under the Apache License, Version 2.0 (the "License"); | |||
you may not use this file except in compliance with the License. | |||
You may obtain a copy of the License at | |||
http://www.apache.org/licenses/LICENSE-2.0 | |||
Unless required by applicable law or agreed to in writing, software | |||
distributed under the License is distributed on an "AS IS" BASIS, | |||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |||
See the License for the specific language governing permissions and | |||
limitations under the License. | |||
******************************************************************************/ | |||
using static Tensorflow.Binding; | |||
@@ -22,23 +22,23 @@ namespace Tensorflow.Operations | |||
{ | |||
public static OpDefLibrary _op_def_lib = new OpDefLibrary(); | |||
/// <summary> | |||
/// Computes a 2-D convolution given 4-D `input` and `filter` tensors. | |||
/// | |||
/// Given an input tensor of shape `[batch, in_height, in_width, in_channels]` | |||
/// and a filter / kernel tensor of shape | |||
/// `[filter_height, filter_width, in_channels, out_channels]`, this op | |||
/// performs the following: | |||
/// | |||
/// 1. Flattens the filter to a 2-D matrix with shape | |||
/// `[filter_height * filter_width * in_channels, output_channels]`. | |||
/// 2. Extracts image patches from the input tensor to form a *virtual* | |||
/// tensor of shape `[batch, out_height, out_width, | |||
/// filter_height * filter_width * in_channels]`. | |||
/// 3. For each patch, right-multiplies the filter matrix and the image patch | |||
/// vector. | |||
/// </summary> | |||
/// <param name="parameters"></param> | |||
/// <summary> | |||
/// Computes a 2-D convolution given 4-D `input` and `filter` tensors. | |||
/// | |||
/// Given an input tensor of shape `[batch, in_height, in_width, in_channels]` | |||
/// and a filter / kernel tensor of shape | |||
/// `[filter_height, filter_width, in_channels, out_channels]`, this op | |||
/// performs the following: | |||
/// | |||
/// 1. Flattens the filter to a 2-D matrix with shape | |||
/// `[filter_height * filter_width * in_channels, output_channels]`. | |||
/// 2. Extracts image patches from the input tensor to form a *virtual* | |||
/// tensor of shape `[batch, out_height, out_width, | |||
/// filter_height * filter_width * in_channels]`. | |||
/// 3. For each patch, right-multiplies the filter matrix and the image patch | |||
/// vector. | |||
/// </summary> | |||
/// <param name="parameters"></param> | |||
/// <returns></returns> | |||
public static Tensor conv2d(Conv2dParams parameters) | |||
{ | |||
@@ -55,15 +55,15 @@ namespace Tensorflow.Operations | |||
}); | |||
return _op.outputs[0]; | |||
} | |||
/// <summary> | |||
/// Computes the gradients of convolution with respect to the filter. | |||
/// </summary> | |||
/// <param name="parameters"></param> | |||
/// <returns></returns> | |||
public static Tensor conv2d_backprop_filter(Conv2dParams parameters) | |||
{ | |||
} | |||
/// <summary> | |||
/// Computes the gradients of convolution with respect to the filter. | |||
/// </summary> | |||
/// <param name="parameters"></param> | |||
/// <returns></returns> | |||
public static Tensor conv2d_backprop_filter(Conv2dParams parameters) | |||
{ | |||
var _op = _op_def_lib._apply_op_helper("Conv2DBackpropFilter", name: parameters.Name, args: new | |||
{ | |||
input = parameters.Input, | |||
@@ -77,16 +77,16 @@ namespace Tensorflow.Operations | |||
dilations = parameters.Dilations | |||
}); | |||
return _op.outputs[0]; | |||
return _op.outputs[0]; | |||
} | |||
/// <summary> | |||
/// Computes the gradients of convolution with respect to the input. | |||
/// </summary> | |||
/// <param name="parameters"></param> | |||
/// <summary> | |||
/// Computes the gradients of convolution with respect to the input. | |||
/// </summary> | |||
/// <param name="parameters"></param> | |||
/// <returns></returns> | |||
public static Tensor conv2d_backprop_input(Conv2dParams parameters) | |||
{ | |||
public static Tensor conv2d_backprop_input(Conv2dParams parameters) | |||
{ | |||
var _op = _op_def_lib._apply_op_helper("Conv2DBackpropInput", name: parameters.Name, args: new | |||
{ | |||
input_sizes = parameters.InputSizes, | |||
@@ -100,7 +100,7 @@ namespace Tensorflow.Operations | |||
dilations = parameters.Dilations | |||
}); | |||
return _op.outputs[0]; | |||
return _op.outputs[0]; | |||
} | |||
public static Tensor bias_add(Tensor value, | |||
@@ -135,56 +135,73 @@ namespace Tensorflow.Operations | |||
}); | |||
return _op.outputs[0]; | |||
} | |||
/// <summary> | |||
/// Computes exponential linear: <c>exp(features) - 1</c> if &lt; 0, <c>features</c> otherwise. | |||
/// </summary> | |||
/// <param name="features"> | |||
/// </param> | |||
/// <param name="name"> | |||
/// If specified, the created operation in the graph will be this one, otherwise it will be named 'Elu'. | |||
/// </param> | |||
/// <returns> | |||
/// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. | |||
/// </returns> | |||
/// <remarks> | |||
/// See [Fast and Accurate Deep Network Learning by Exponential Linear Units (ELUs) | |||
/// ](http://arxiv.org/abs/1511.07289) | |||
/// </remarks> | |||
public static Tensor elu(Tensor features, string name = "Elu") | |||
{ | |||
var op = _op_def_lib._apply_op_helper("Elu", name: name, args: new { features }); | |||
return op.output; | |||
} | |||
/// <summary> | |||
/// Gradient for batch normalization. | |||
/// </summary> | |||
/// <param name="y_backprop"></param> | |||
/// <param name="x"></param> | |||
/// <param name="scale"></param> | |||
/// <param name="reserve_space_1"></param> | |||
/// <param name="reserve_space_2"></param> | |||
/// <param name="epsilon"></param> | |||
/// <param name="data_format"></param> | |||
/// <param name="is_training"></param> | |||
/// <param name="name"></param> | |||
/// <summary> | |||
/// Computes exponential linear: <c>exp(features) - 1</c> if &lt; 0, <c>features</c> otherwise. | |||
/// </summary> | |||
/// <param name="features"> | |||
/// </param> | |||
/// <param name="name"> | |||
/// If specified, the created operation in the graph will be this one, otherwise it will be named 'Elu'. | |||
/// </param> | |||
/// <returns> | |||
/// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. | |||
/// </returns> | |||
/// <remarks> | |||
/// See [Fast and Accurate Deep Network Learning by Exponential Linear Units (ELUs) | |||
/// ](http://arxiv.org/abs/1511.07289) | |||
/// </remarks> | |||
public static Tensor elu(Tensor features, string name = "Elu") | |||
{ | |||
var op = _op_def_lib._apply_op_helper("Elu", name: name, args: new { features }); | |||
return op.output; | |||
} | |||
/// <summary> | |||
/// Gradient for batch normalization. | |||
/// </summary> | |||
/// <param name="y_backprop"></param> | |||
/// <param name="x"></param> | |||
/// <param name="scale"></param> | |||
/// <param name="reserve_space_1"></param> | |||
/// <param name="reserve_space_2"></param> | |||
/// <param name="epsilon"></param> | |||
/// <param name="data_format"></param> | |||
/// <param name="is_training"></param> | |||
/// <param name="name"></param> | |||
/// <returns></returns> | |||
public static Tensor[] fused_batch_norm_grad(FusedBatchNormParams @params) | |||
{ | |||
var op = _op_def_lib._apply_op_helper("FusedBatchNormGrad", name: @params.Name, args: new | |||
{ | |||
y_backprop = @params.YBackprop, | |||
x = @params.X, | |||
scale = @params.Scale, | |||
reserve_space_1 = @params.ReserveSpace1, | |||
reserve_space_2 = @params.ReserveSpace2, | |||
epsilon = @params.Epsilon, | |||
data_format = @params.DataFormat, | |||
is_training = @params.IsTraining | |||
}); | |||
return op.outputs; | |||
public static Tensor[] fused_batch_norm_grad(FusedBatchNormParams @params) | |||
{ | |||
var op = _op_def_lib._apply_op_helper("FusedBatchNormGrad", name: @params.Name, args: new | |||
{ | |||
y_backprop = @params.YBackprop, | |||
x = @params.X, | |||
scale = @params.Scale, | |||
reserve_space_1 = @params.ReserveSpace1, | |||
reserve_space_2 = @params.ReserveSpace2, | |||
epsilon = @params.Epsilon, | |||
data_format = @params.DataFormat, | |||
is_training = @params.IsTraining | |||
}); | |||
return op.outputs; | |||
} | |||
public static Tensor[] fused_batch_norm_grad_v3(FusedBatchNormParams @params) | |||
{ | |||
var op = _op_def_lib._apply_op_helper("FusedBatchNormGradV3", name: @params.Name, args: new | |||
{ | |||
y_backprop = @params.YBackprop, | |||
x = @params.X, | |||
scale = @params.Scale, | |||
reserve_space_1 = @params.ReserveSpace1, | |||
reserve_space_2 = @params.ReserveSpace2, | |||
reserve_space_3 = @params.ReserveSpace3, | |||
epsilon = @params.Epsilon, | |||
data_format = @params.DataFormat, | |||
is_training = @params.IsTraining | |||
}); | |||
return op.outputs; | |||
} | |||
public static Tensor[] fused_batch_norm(Tensor x, | |||
@@ -212,19 +229,44 @@ namespace Tensorflow.Operations | |||
return _op.outputs; | |||
} | |||
/// <summary> | |||
/// Local Response Normalization. | |||
/// </summary> | |||
/// <param name="input"></param> | |||
/// <param name="depth_radius"></param> | |||
/// <param name="bias"></param> | |||
/// <param name="alpha"></param> | |||
/// <param name="beta"></param> | |||
/// <param name="name"></param> | |||
public static Tensor[] fused_batch_norm_v3(Tensor x, | |||
Tensor scale, | |||
Tensor offset, | |||
Tensor mean, | |||
Tensor variance, | |||
float epsilon = 0.0001f, | |||
string data_format = "NHWC", | |||
bool is_training = true, | |||
string name = null) | |||
{ | |||
var _op = _op_def_lib._apply_op_helper("FusedBatchNormV3", name: name, args: new | |||
{ | |||
x, | |||
scale, | |||
offset, | |||
mean, | |||
variance, | |||
epsilon, | |||
data_format, | |||
is_training | |||
}); | |||
return _op.outputs; | |||
} | |||
/// <summary> | |||
/// Local Response Normalization. | |||
/// </summary> | |||
/// <param name="input"></param> | |||
/// <param name="depth_radius"></param> | |||
/// <param name="bias"></param> | |||
/// <param name="alpha"></param> | |||
/// <param name="beta"></param> | |||
/// <param name="name"></param> | |||
/// <returns></returns> | |||
public static Tensor local_response_normalization(Tensor input, int depth_radius = 5, int bias = 1, | |||
int alpha = 1, float beta = 0.5f, string name = null) | |||
{ | |||
public static Tensor local_response_normalization(Tensor input, int depth_radius = 5, int bias = 1, | |||
int alpha = 1, float beta = 0.5f, string name = null) | |||
{ | |||
var _op = _op_def_lib._apply_op_helper("LRN", name: name, args: new | |||
{ | |||
input, | |||
@@ -234,7 +276,7 @@ namespace Tensorflow.Operations | |||
beta | |||
}); | |||
return _op.output; | |||
return _op.output; | |||
} | |||
public static Tensor log_softmax(Tensor logits, string name = null) | |||
@@ -245,16 +287,16 @@ namespace Tensorflow.Operations | |||
}); | |||
return _op.output; | |||
} | |||
/// <summary> | |||
/// Says whether the targets are in the top `K` predictions. | |||
/// </summary> | |||
/// <param name="predictions"></param> | |||
/// <param name="targets"></param> | |||
/// <param name="k"></param> | |||
/// <param name="name"></param> | |||
/// <returns>A `Tensor` of type `bool`.</returns> | |||
} | |||
/// <summary> | |||
/// Says whether the targets are in the top `K` predictions. | |||
/// </summary> | |||
/// <param name="predictions"></param> | |||
/// <param name="targets"></param> | |||
/// <param name="k"></param> | |||
/// <param name="name"></param> | |||
/// <returns>A `Tensor` of type `bool`.</returns> | |||
public static Tensor in_top_kv2(Tensor predictions, Tensor targets, int k, string name = null) | |||
{ | |||
var _op = _op_def_lib._apply_op_helper("InTopKV2", name: name, args: new | |||
@@ -265,8 +307,8 @@ namespace Tensorflow.Operations | |||
}); | |||
return _op.output; | |||
} | |||
} | |||
public static Tensor leaky_relu(Tensor features, float alpha = 0.2f, string name = null) | |||
{ | |||
var _op = _op_def_lib._apply_op_helper("LeakyRelu", name: name, args: new | |||
@@ -297,9 +339,9 @@ namespace Tensorflow.Operations | |||
return _op.outputs[0]; | |||
} | |||
public static Tensor max_pool_grad(Tensor orig_input, Tensor orig_output, Tensor grad, int[] ksize, int[] strides, string padding, | |||
string data_format= "NHWC", string name= null) | |||
{ | |||
public static Tensor max_pool_grad(Tensor orig_input, Tensor orig_output, Tensor grad, int[] ksize, int[] strides, string padding, | |||
string data_format= "NHWC", string name= null) | |||
{ | |||
var _op = _op_def_lib._apply_op_helper("MaxPoolGrad", name: name, args: new | |||
{ | |||
orig_input, | |||
@@ -311,7 +353,7 @@ namespace Tensorflow.Operations | |||
data_format | |||
}); | |||
return _op.outputs[0]; | |||
return _op.outputs[0]; | |||
} | |||
public static Tensor[] top_kv2(Tensor input, int k, bool sorted = true, string name = null) | |||
@@ -335,8 +377,8 @@ namespace Tensorflow.Operations | |||
}); | |||
return _op.outputs[0]; | |||
} | |||
} | |||
public static Tensor leaky_relu_grad(Tensor gradients, Tensor features, float alpha = 0.2f, string name = null) | |||
{ | |||
var _op = _op_def_lib._apply_op_helper("LeakyReluGrad", name: name, args: new | |||
@@ -377,81 +419,81 @@ namespace Tensorflow.Operations | |||
return (_op.outputs[0], _op.outputs[1]); | |||
} | |||
/// <summary> | |||
/// Computes softmax cross entropy cost and gradients to backpropagate. | |||
/// </summary> | |||
/// <param name="features"> | |||
/// batch_size x num_classes matrix | |||
/// </param> | |||
/// <param name="labels"> | |||
/// batch_size vector with values in [0, num_classes). | |||
/// This is the label for the given minibatch entry. | |||
/// </param> | |||
/// <param name="name"> | |||
/// If specified, the created operation in the graph will be this one, otherwise it will be named 'SparseSoftmaxCrossEntropyWithLogits'. | |||
/// </param> | |||
/// <returns> | |||
/// Returns a tuple with multiple values, as follows: | |||
/// loss : Per example loss (batch_size vector). | |||
/// backprop : backpropagated gradients (batch_size x num_classes matrix). | |||
/// The Operation can be fetched from any of the Tensorreturned in the tuple values, by fetching the Operation property. | |||
/// </returns> | |||
/// <remarks> | |||
/// Unlike <c>SoftmaxCrossEntropyWithLogits</c>, this operation does not accept | |||
/// a matrix of label probabilities, but rather a single label per row | |||
/// of features. This label is considered to have probability 1.0 for the | |||
/// given row. | |||
/// | |||
/// Inputs are the logits, not probabilities. | |||
/// </remarks> | |||
public static (Tensor loss, Tensor backprop) sparse_softmax_cross_entropy_with_logits(Tensor features, Tensor labels, string name = "SparseSoftmaxCrossEntropyWithLogits") | |||
{ | |||
var op = _op_def_lib._apply_op_helper("SparseSoftmaxCrossEntropyWithLogits", name: name, args: new { features, labels }); | |||
int _idx = 0; | |||
var loss = op.outputs[_idx++]; | |||
var backprop = op.outputs[_idx++]; | |||
return (loss, backprop); | |||
/// <summary> | |||
/// Computes softmax cross entropy cost and gradients to backpropagate. | |||
/// </summary> | |||
/// <param name="features"> | |||
/// batch_size x num_classes matrix | |||
/// </param> | |||
/// <param name="labels"> | |||
/// batch_size vector with values in [0, num_classes). | |||
/// This is the label for the given minibatch entry. | |||
/// </param> | |||
/// <param name="name"> | |||
/// If specified, the created operation in the graph will be this one, otherwise it will be named 'SparseSoftmaxCrossEntropyWithLogits'. | |||
/// </param> | |||
/// <returns> | |||
/// Returns a tuple with multiple values, as follows: | |||
/// loss : Per example loss (batch_size vector). | |||
/// backprop : backpropagated gradients (batch_size x num_classes matrix). | |||
/// The Operation can be fetched from any of the Tensorreturned in the tuple values, by fetching the Operation property. | |||
/// </returns> | |||
/// <remarks> | |||
/// Unlike <c>SoftmaxCrossEntropyWithLogits</c>, this operation does not accept | |||
/// a matrix of label probabilities, but rather a single label per row | |||
/// of features. This label is considered to have probability 1.0 for the | |||
/// given row. | |||
/// | |||
/// Inputs are the logits, not probabilities. | |||
/// </remarks> | |||
public static (Tensor loss, Tensor backprop) sparse_softmax_cross_entropy_with_logits(Tensor features, Tensor labels, string name = "SparseSoftmaxCrossEntropyWithLogits") | |||
{ | |||
var op = _op_def_lib._apply_op_helper("SparseSoftmaxCrossEntropyWithLogits", name: name, args: new { features, labels }); | |||
int _idx = 0; | |||
var loss = op.outputs[_idx++]; | |||
var backprop = op.outputs[_idx++]; | |||
return (loss, backprop); | |||
} | |||
/// <summary> | |||
/// Computes rectified linear: `max(features, 0)`. | |||
/// </summary> | |||
/// <param name="features">A `Tensor`. Must be one of the following types: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `int64`, `bfloat16`, `uint16`, `half`, `uint32`, `uint64`, `qint8`.</param> | |||
/// <summary> | |||
/// Computes rectified linear: `max(features, 0)`. | |||
/// </summary> | |||
/// <param name="features">A `Tensor`. Must be one of the following types: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `int64`, `bfloat16`, `uint16`, `half`, `uint32`, `uint64`, `qint8`.</param> | |||
/// <param name="name">A name for the operation (optional).</param> | |||
/// <returns>A `Tensor`. Has the same type as `features`.</returns> | |||
public static Tensor relu(Tensor features, string name = null) | |||
{ | |||
//_ctx = _context._context | |||
//if _ctx is not None and _ctx._eager_context.is_eager: | |||
// try: | |||
// _result = _pywrap_tensorflow.TFE_Py_FastPathExecute( | |||
// _ctx._context_handle, _ctx._eager_context.device_name, "Relu", name, | |||
// _ctx._post_execution_callbacks, features) | |||
// return _result | |||
// except _core._FallbackException: | |||
// try: | |||
// return relu_eager_fallback( | |||
// features, name=name, ctx=_ctx) | |||
// except _core._SymbolicException: | |||
// pass # Add nodes to the TensorFlow graph. | |||
// except (TypeError, ValueError): | |||
// result = _dispatch.dispatch( | |||
// relu, features=features, name=name) | |||
// if result is not _dispatch.OpDispatcher.NOT_SUPPORTED: | |||
// return result | |||
// raise | |||
// except _core._NotOkStatusException as e: | |||
// if name is not None: | |||
// message = e.message + " name: " + name | |||
// else: | |||
// message = e.message | |||
// _six.raise_from(_core._status_to_exception(e.code, message), None) | |||
{ | |||
//_ctx = _context._context | |||
//if _ctx is not None and _ctx._eager_context.is_eager: | |||
// try: | |||
// _result = _pywrap_tensorflow.TFE_Py_FastPathExecute( | |||
// _ctx._context_handle, _ctx._eager_context.device_name, "Relu", name, | |||
// _ctx._post_execution_callbacks, features) | |||
// return _result | |||
// except _core._FallbackException: | |||
// try: | |||
// return relu_eager_fallback( | |||
// features, name=name, ctx=_ctx) | |||
// except _core._SymbolicException: | |||
// pass # Add nodes to the TensorFlow graph. | |||
// except (TypeError, ValueError): | |||
// result = _dispatch.dispatch( | |||
// relu, features=features, name=name) | |||
// if result is not _dispatch.OpDispatcher.NOT_SUPPORTED: | |||
// return result | |||
// raise | |||
// except _core._NotOkStatusException as e: | |||
// if name is not None: | |||
// message = e.message + " name: " + name | |||
// else: | |||
// message = e.message | |||
// _six.raise_from(_core._status_to_exception(e.code, message), None) | |||
//# Add nodes to the TensorFlow graph. | |||
//try: | |||
OpDefLibrary _op_def_lib = new OpDefLibrary(); | |||
var _op = _op_def_lib._apply_op_helper("Relu", name: name, args: new { features }); | |||
return _op.outputs[0]; | |||
return _op.outputs[0]; | |||
//except (TypeError, ValueError): | |||
// result = _dispatch.dispatch( | |||
// relu, features=features, name=name) | |||
@@ -1,68 +1,68 @@ | |||
/***************************************************************************** | |||
Copyright 2018 The TensorFlow.NET Authors. All Rights Reserved. | |||
Licensed under the Apache License, Version 2.0 (the "License"); | |||
you may not use this file except in compliance with the License. | |||
You may obtain a copy of the License at | |||
http://www.apache.org/licenses/LICENSE-2.0 | |||
Unless required by applicable law or agreed to in writing, software | |||
distributed under the License is distributed on an "AS IS" BASIS, | |||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |||
See the License for the specific language governing permissions and | |||
limitations under the License. | |||
******************************************************************************/ | |||
using Tensorflow.Operations; | |||
using static Tensorflow.Binding; | |||
namespace Tensorflow | |||
{ | |||
public partial class Operation | |||
{ | |||
private ControlFlowContext _control_flow_context; | |||
/// <summary> | |||
/// Add this op to its control flow context. | |||
/// | |||
/// This may add new ops and change this op's inputs. self.inputs must be | |||
/// available before calling this method. | |||
/// </summary> | |||
public void _control_flow_post_processing() | |||
{ | |||
foreach(Tensor input_tensor in inputs) | |||
control_flow_util.CheckInputFromValidContext(this, input_tensor.op); | |||
if (_control_flow_context != null) | |||
_control_flow_context.AddOp(this); | |||
} | |||
public void _add_control_input(Operation op) | |||
{ | |||
//c_api.TF_AddControlInput(_operDesc, op); | |||
c_api.AddControlInput(graph, _handle, op); | |||
} | |||
public void _add_control_inputs(Operation[] ops) | |||
{ | |||
foreach (var op in ops) | |||
_add_control_input(op); | |||
} | |||
public void _set_control_flow_context(ControlFlowContext ctx) | |||
{ | |||
_control_flow_context = ctx; | |||
} | |||
public ControlFlowContext _get_control_flow_context() | |||
{ | |||
return _control_flow_context; | |||
} | |||
public WhileContext GetWhileContext() | |||
{ | |||
return _control_flow_context as WhileContext; | |||
} | |||
} | |||
} | |||
/***************************************************************************** | |||
Copyright 2018 The TensorFlow.NET Authors. All Rights Reserved. | |||
Licensed under the Apache License, Version 2.0 (the "License"); | |||
you may not use this file except in compliance with the License. | |||
You may obtain a copy of the License at | |||
http://www.apache.org/licenses/LICENSE-2.0 | |||
Unless required by applicable law or agreed to in writing, software | |||
distributed under the License is distributed on an "AS IS" BASIS, | |||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |||
See the License for the specific language governing permissions and | |||
limitations under the License. | |||
******************************************************************************/ | |||
using Tensorflow.Operations; | |||
using static Tensorflow.Binding; | |||
namespace Tensorflow | |||
{ | |||
public partial class Operation | |||
{ | |||
private ControlFlowContext _control_flow_context; | |||
/// <summary> | |||
/// Add this op to its control flow context. | |||
/// | |||
/// This may add new ops and change this op's inputs. self.inputs must be | |||
/// available before calling this method. | |||
/// </summary> | |||
public void _control_flow_post_processing() | |||
{ | |||
foreach(Tensor input_tensor in inputs) | |||
control_flow_util.CheckInputFromValidContext(this, input_tensor.op); | |||
if (_control_flow_context != null) | |||
_control_flow_context.AddOp(this); | |||
} | |||
public void _add_control_input(Operation op) | |||
{ | |||
//c_api.TF_AddControlInput(_operDesc, op); | |||
c_api.AddControlInput(graph, _handle, op); | |||
} | |||
public void _add_control_inputs(Operation[] ops) | |||
{ | |||
foreach (var op in ops) | |||
_add_control_input(op); | |||
} | |||
public void _set_control_flow_context(ControlFlowContext ctx) | |||
{ | |||
_control_flow_context = ctx; | |||
} | |||
public ControlFlowContext _get_control_flow_context() | |||
{ | |||
return _control_flow_context; | |||
} | |||
public WhileContext GetWhileContext() | |||
{ | |||
return _control_flow_context as WhileContext; | |||
} | |||
} | |||
} |
@@ -27,12 +27,16 @@ namespace Tensorflow | |||
public static implicit operator Operation(IntPtr handle) | |||
=> new Operation(handle); | |||
public static implicit operator IntPtr(Operation op) => op._handle; | |||
public static implicit operator Tensor(Operation op) => op.output; | |||
public static implicit operator IntPtr(Operation op) | |||
=> op._handle; | |||
public static implicit operator Tensor(Operation op) | |||
=> op.output; | |||
public static implicit operator RefVariable(Operation op) | |||
=> new RefVariable(op); | |||
public override string ToString() | |||
{ | |||
return _handle == IntPtr.Zero ? "tf.Operation Undefined" : $"tf.Operation '{name}' type={OpType}"; | |||
return _handle == IntPtr.Zero ? "tf.Operation Undefined" : $"<tf.Operation '{name}' type={OpType}>"; | |||
} | |||
public override bool Equals(object obj) | |||
@@ -1,109 +1,114 @@ | |||
/***************************************************************************** | |||
Copyright 2018 The TensorFlow.NET Authors. All Rights Reserved. | |||
Licensed under the Apache License, Version 2.0 (the "License"); | |||
you may not use this file except in compliance with the License. | |||
You may obtain a copy of the License at | |||
http://www.apache.org/licenses/LICENSE-2.0 | |||
Unless required by applicable law or agreed to in writing, software | |||
distributed under the License is distributed on an "AS IS" BASIS, | |||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |||
See the License for the specific language governing permissions and | |||
limitations under the License. | |||
******************************************************************************/ | |||
using System; | |||
using System.Linq; | |||
using System.Runtime.InteropServices; | |||
#if SERIALIZABLE | |||
using Newtonsoft.Json; | |||
#endif | |||
namespace Tensorflow | |||
{ | |||
// from ops.py | |||
public partial class Operation | |||
{ | |||
public TF_Output Input(int index) => c_api.TF_OperationInput(new TF_Input(_handle, index)); | |||
public TF_DataType InputType(int index) => c_api.TF_OperationInputType(new TF_Input(_handle, index)); | |||
public int InputListLength(string name) | |||
{ | |||
int num = 0; | |||
using(var status = new Status()) | |||
{ | |||
num = c_api.TF_OperationInputListLength(_handle, name, status); | |||
status.Check(true); | |||
} | |||
return num; | |||
} | |||
#if SERIALIZABLE | |||
[JsonIgnore] | |||
#endif | |||
public int NumInputs => c_api.TF_OperationNumInputs(_handle); | |||
private TF_DataType[] _input_types => _inputs_val._inputs.Select(x => x.dtype).ToArray(); | |||
private InputList _inputs_val; | |||
public InputList inputs | |||
{ | |||
get | |||
{ | |||
if (_inputs_val == null) | |||
{ | |||
var retval = new Tensor[NumInputs]; | |||
for (int i = 0; i < NumInputs; i++) | |||
{ | |||
var tf_output = Input(i); | |||
var op = GetOperation(tf_output.oper); | |||
retval[i] = op.outputs[tf_output.index]; | |||
} | |||
_inputs_val = new InputList(retval); | |||
} | |||
return _inputs_val; | |||
} | |||
} | |||
public int NumControlInputs => c_api.TF_OperationNumControlInputs(_handle); | |||
/// <summary> | |||
/// The `Operation` objects on which this op has a control dependency. | |||
/// | |||
/// Before this op is executed, TensorFlow will ensure that the | |||
/// operations in `self.control_inputs` have finished executing.This | |||
/// mechanism can be used to run ops sequentially for performance | |||
/// reasons, or to ensure that the side effects of an op are observed | |||
/// in the correct order. | |||
/// </summary> | |||
public Operation[] control_inputs | |||
{ | |||
get | |||
{ | |||
return GetControlInputs(); | |||
} | |||
} | |||
public unsafe Operation[] GetControlInputs() | |||
{ | |||
var control_inputs = new Operation[NumControlInputs]; | |||
if (NumControlInputs > 0) | |||
{ | |||
IntPtr control_input_handle = Marshal.AllocHGlobal(Marshal.SizeOf<IntPtr>() * NumControlInputs); | |||
c_api.TF_OperationGetControlInputs(_handle, control_input_handle, NumControlInputs); | |||
for (int i = 0; i < NumControlInputs; i++) | |||
{ | |||
var handle = control_input_handle + Marshal.SizeOf<IntPtr>() * i; | |||
control_inputs[i] = new Operation(*(IntPtr*)handle); | |||
} | |||
} | |||
return control_inputs; | |||
} | |||
} | |||
} | |||
/***************************************************************************** | |||
Copyright 2018 The TensorFlow.NET Authors. All Rights Reserved. | |||
Licensed under the Apache License, Version 2.0 (the "License"); | |||
you may not use this file except in compliance with the License. | |||
You may obtain a copy of the License at | |||
http://www.apache.org/licenses/LICENSE-2.0 | |||
Unless required by applicable law or agreed to in writing, software | |||
distributed under the License is distributed on an "AS IS" BASIS, | |||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |||
See the License for the specific language governing permissions and | |||
limitations under the License. | |||
******************************************************************************/ | |||
using System; | |||
using System.Linq; | |||
using System.Runtime.InteropServices; | |||
#if SERIALIZABLE | |||
using Newtonsoft.Json; | |||
#endif | |||
namespace Tensorflow | |||
{ | |||
// from ops.py | |||
public partial class Operation | |||
{ | |||
public TF_Output Input(int index) => c_api.TF_OperationInput(new TF_Input(_handle, index)); | |||
public TF_DataType InputType(int index) => c_api.TF_OperationInputType(new TF_Input(_handle, index)); | |||
public int InputListLength(string name) | |||
{ | |||
int num = 0; | |||
using(var status = new Status()) | |||
{ | |||
num = c_api.TF_OperationInputListLength(_handle, name, status); | |||
status.Check(true); | |||
} | |||
return num; | |||
} | |||
public int NumInputs => c_api.TF_OperationNumInputs(_handle); | |||
private TF_DataType[] _input_types => _inputs_val._inputs.Select(x => x.dtype).ToArray(); | |||
private InputList _inputs_val; | |||
public InputList inputs | |||
{ | |||
get | |||
{ | |||
if (_inputs_val == null) | |||
{ | |||
var retval = new Tensor[NumInputs]; | |||
for (int i = 0; i < NumInputs; i++) | |||
{ | |||
var tf_output = Input(i); | |||
var op = GetOperation(tf_output.oper); | |||
retval[i] = op.outputs[tf_output.index]; | |||
} | |||
_inputs_val = new InputList(retval); | |||
} | |||
return _inputs_val; | |||
} | |||
} | |||
public int NumControlInputs | |||
=> _handle == IntPtr.Zero ? 0 : c_api.TF_OperationNumControlInputs(_handle); | |||
Operation[] _control_inputs; | |||
/// <summary> | |||
/// The `Operation` objects on which this op has a control dependency. | |||
/// | |||
/// Before this op is executed, TensorFlow will ensure that the | |||
/// operations in `self.control_inputs` have finished executing.This | |||
/// mechanism can be used to run ops sequentially for performance | |||
/// reasons, or to ensure that the side effects of an op are observed | |||
/// in the correct order. | |||
/// </summary> | |||
#if SERIALIZABLE | |||
[JsonIgnore] | |||
#endif | |||
public Operation[] control_inputs | |||
{ | |||
get | |||
{ | |||
if (_control_inputs == null || _control_inputs.Length == 0) | |||
_control_inputs = GetControlInputs(); | |||
return _control_inputs; | |||
} | |||
} | |||
public unsafe Operation[] GetControlInputs() | |||
{ | |||
var control_inputs = new Operation[NumControlInputs]; | |||
if (NumControlInputs > 0) | |||
{ | |||
IntPtr control_input_handle = Marshal.AllocHGlobal(Marshal.SizeOf<IntPtr>() * NumControlInputs); | |||
c_api.TF_OperationGetControlInputs(_handle, control_input_handle, NumControlInputs); | |||
for (int i = 0; i < NumControlInputs; i++) | |||
{ | |||
var handle = control_input_handle + Marshal.SizeOf<IntPtr>() * i; | |||
control_inputs[i] = new Operation(*(IntPtr*)handle); | |||
} | |||
} | |||
return control_inputs; | |||
} | |||
} | |||
} |
@@ -26,9 +26,6 @@ namespace Tensorflow | |||
{ | |||
public partial class Operation | |||
{ | |||
#if SERIALIZABLE | |||
[JsonIgnore] | |||
#endif | |||
public int NumOutputs => c_api.TF_OperationNumOutputs(_handle); | |||
public TF_DataType OutputType(int index) => c_api.TF_OperationOutputType(_tf_output(index)); | |||
@@ -45,6 +42,9 @@ namespace Tensorflow | |||
} | |||
private Tensor[] _outputs; | |||
#if SERIALIZABLE | |||
[JsonIgnore] | |||
#endif | |||
public Tensor[] outputs => _outputs; | |||
#if SERIALIZABLE | |||
[JsonIgnore] | |||
@@ -1,27 +1,27 @@ | |||
/***************************************************************************** | |||
Copyright 2018 The TensorFlow.NET Authors. All Rights Reserved. | |||
Licensed under the Apache License, Version 2.0 (the "License"); | |||
you may not use this file except in compliance with the License. | |||
You may obtain a copy of the License at | |||
http://www.apache.org/licenses/LICENSE-2.0 | |||
Unless required by applicable law or agreed to in writing, software | |||
distributed under the License is distributed on an "AS IS" BASIS, | |||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |||
See the License for the specific language governing permissions and | |||
limitations under the License. | |||
/***************************************************************************** | |||
Copyright 2018 The TensorFlow.NET Authors. All Rights Reserved. | |||
Licensed under the Apache License, Version 2.0 (the "License"); | |||
you may not use this file except in compliance with the License. | |||
You may obtain a copy of the License at | |||
http://www.apache.org/licenses/LICENSE-2.0 | |||
Unless required by applicable law or agreed to in writing, software | |||
distributed under the License is distributed on an "AS IS" BASIS, | |||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |||
See the License for the specific language governing permissions and | |||
limitations under the License. | |||
******************************************************************************/ | |||
using Google.Protobuf.Collections; | |||
#if SERIALIZABLE | |||
using Newtonsoft.Json; | |||
#endif | |||
using Newtonsoft.Json; | |||
#endif | |||
using System; | |||
using System.Collections.Generic; | |||
using System.IO; | |||
using System.Linq; | |||
using System.Linq; | |||
using Tensorflow.Util; | |||
using static Tensorflow.Binding; | |||
@@ -47,26 +47,26 @@ namespace Tensorflow | |||
/// </summary> | |||
public partial class Operation : ITensorOrOperation | |||
{ | |||
private readonly IntPtr _handle; // _c_op in python | |||
private readonly IntPtr _handle; // _c_op in python | |||
private readonly Graph _graph; | |||
private NodeDef _node_def; | |||
private NodeDef _node_def; | |||
#if SERIALIZABLE | |||
[JsonIgnore] | |||
#endif | |||
public string type => OpType; | |||
public string type => OpType; | |||
#if SERIALIZABLE | |||
[JsonIgnore] | |||
#endif | |||
public Graph graph => _graph; | |||
public Graph graph => _graph; | |||
#if SERIALIZABLE | |||
[JsonIgnore] | |||
#endif | |||
public int _id => _id_value; | |||
public int _id => _id_value; | |||
#if SERIALIZABLE | |||
[JsonIgnore] | |||
#endif | |||
public int _id_value { get; set; } | |||
public int _id_value { get; set; } | |||
#if SERIALIZABLE | |||
[JsonIgnore] | |||
#endif | |||
@@ -74,11 +74,14 @@ namespace Tensorflow | |||
public TF_DataType dtype => TF_DataType.DtInvalid; | |||
public string name => _handle == IntPtr.Zero ? null : c_api.StringPiece(c_api.TF_OperationName(_handle)); | |||
public string OpType => _handle == IntPtr.Zero ? null : c_api.StringPiece(c_api.TF_OperationOpType(_handle)); | |||
public string Device => _handle == IntPtr.Zero ? null : c_api.StringPiece(c_api.TF_OperationDevice(_handle)); | |||
#if SERIALIZABLE | |||
[JsonIgnore] | |||
#endif | |||
bool _is_stateful; | |||
public string Device => _handle == IntPtr.Zero ? null : c_api.StringPiece(c_api.TF_OperationDevice(_handle)); | |||
#if SERIALIZABLE | |||
[JsonIgnore] | |||
#endif | |||
bool _is_stateful; | |||
#if SERIALIZABLE | |||
[JsonIgnore] | |||
#endif | |||
@@ -103,7 +106,7 @@ namespace Tensorflow | |||
_outputs = new Tensor[NumOutputs]; | |||
for (int i = 0; i < NumOutputs; i++) | |||
_outputs[i] = new Tensor(this, i, OutputType(i)); | |||
// Dict mapping op name to file and line information for op colocation | |||
// context managers. | |||
_control_flow_context = _graph._get_control_flow_context(); | |||
@@ -151,7 +154,6 @@ namespace Tensorflow | |||
public Operation(NodeDef node_def, Graph g, Tensor[] inputs = null, TF_DataType[] output_types = null, ITensorOrOperation[] control_inputs = null, TF_DataType[] input_types = null, string original_op = "", OpDef op_def = null) | |||
{ | |||
_graph = g; | |||
// Build the list of control inputs. | |||
var control_input_ops = new List<Operation>(); | |||
if (control_inputs != null) | |||
@@ -176,17 +178,17 @@ namespace Tensorflow | |||
} | |||
} | |||
_id_value = _graph._next_id(); | |||
_id_value = _graph._next_id(); | |||
// Dict mapping op name to file and line information for op colocation | |||
// context managers. | |||
_control_flow_context = graph._get_control_flow_context(); | |||
_control_flow_context = graph._get_control_flow_context(); | |||
// This will be set by self.inputs. | |||
if (op_def == null) | |||
op_def = g.GetOpDef(node_def.Op); | |||
var grouped_inputs = _reconstruct_sequence_inputs(op_def, inputs, node_def.Attr); | |||
op_def = g.GetOpDef(node_def.Op); | |||
var grouped_inputs = _reconstruct_sequence_inputs(op_def, inputs, node_def.Attr); | |||
_handle = ops._create_c_op(g, node_def, grouped_inputs, control_input_ops.ToArray()); | |||
_is_stateful = op_def.IsStateful; | |||
@@ -427,7 +427,7 @@ namespace Tensorflow | |||
if (!tf.context.executing_eagerly()) | |||
{ | |||
var input_tensor = ops.convert_to_tensor(input); | |||
var input_shape = tensor_util.to_shape(input_tensor.shape); | |||
var input_shape = input_tensor.TensorShape; | |||
if (optimize && input_tensor.NDims > -1 && input_shape.is_fully_defined()) | |||
{ | |||
var nd = np.array(input_tensor.shape).astype(out_type.as_numpy_dtype()); | |||
@@ -1,21 +1,21 @@ | |||
/***************************************************************************** | |||
Copyright 2018 The TensorFlow.NET Authors. All Rights Reserved. | |||
Licensed under the Apache License, Version 2.0 (the "License"); | |||
you may not use this file except in compliance with the License. | |||
You may obtain a copy of the License at | |||
http://www.apache.org/licenses/LICENSE-2.0 | |||
Unless required by applicable law or agreed to in writing, software | |||
distributed under the License is distributed on an "AS IS" BASIS, | |||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |||
See the License for the specific language governing permissions and | |||
limitations under the License. | |||
/***************************************************************************** | |||
Copyright 2018 The TensorFlow.NET Authors. All Rights Reserved. | |||
Licensed under the Apache License, Version 2.0 (the "License"); | |||
you may not use this file except in compliance with the License. | |||
You may obtain a copy of the License at | |||
http://www.apache.org/licenses/LICENSE-2.0 | |||
Unless required by applicable law or agreed to in writing, software | |||
distributed under the License is distributed on an "AS IS" BASIS, | |||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |||
See the License for the specific language governing permissions and | |||
limitations under the License. | |||
******************************************************************************/ | |||
using System; | |||
using System.Linq; | |||
using System.Linq; | |||
using Tensorflow.Operations; | |||
using static Tensorflow.Binding; | |||
@@ -31,26 +31,26 @@ namespace Tensorflow | |||
public static bool IsLoopExit(Operation op) | |||
{ | |||
return op.type == "Exit" || op.type == "RefExit"; | |||
} | |||
/// <summary> | |||
/// Returns true if `op` is an Enter. | |||
/// </summary> | |||
/// <param name="op"></param> | |||
/// <returns></returns> | |||
} | |||
/// <summary> | |||
/// Returns true if `op` is an Enter. | |||
/// </summary> | |||
/// <param name="op"></param> | |||
/// <returns></returns> | |||
public static bool IsLoopEnter(Operation op) | |||
{ | |||
return op.type == "Enter" || op.type == "RefEnter"; | |||
} | |||
/// <summary> | |||
/// Return true iff op is a loop invariant. | |||
/// </summary> | |||
/// <param name="op"></param> | |||
/// <summary> | |||
/// Return true iff op is a loop invariant. | |||
/// </summary> | |||
/// <param name="op"></param> | |||
/// <returns></returns> | |||
public static bool IsLoopConstantEnter(Operation op) | |||
{ | |||
return IsLoopEnter(op) && op.get_attr<bool>("is_constant"); | |||
public static bool IsLoopConstantEnter(Operation op) | |||
{ | |||
return IsLoopEnter(op) && op.get_attr<bool>("is_constant"); | |||
} | |||
/// <summary> | |||
@@ -61,141 +61,141 @@ namespace Tensorflow | |||
public static bool IsSwitch(Operation op) | |||
{ | |||
return op.type == "Switch" || op.type == "RefSwitch"; | |||
} | |||
public static WhileContext GetWhileContext(Operation op) | |||
} | |||
public static WhileContext GetWhileContext(Operation op) | |||
=> op.GetWhileContext(); | |||
public static bool IsCondSwitch(Operation op) | |||
{ | |||
if (!IsSwitch(op)) | |||
return false; | |||
if (op.outputs == null || op.outputs.Length == 0) | |||
return false; | |||
{ | |||
if (!IsSwitch(op)) | |||
return false; | |||
if (op.outputs == null || op.outputs.Length == 0) | |||
return false; | |||
// Switch nodes are not part of the cond control flow context that they | |||
// represent, so consider the consumers of its outputs to determine if it is | |||
// cond switch or not. A switch is a cond switch iff all its consumers are in | |||
// cond contexts. | |||
var is_cond_switch = true; | |||
foreach(var o in op.outputs) | |||
{ | |||
foreach(var c in o.consumers()) | |||
{ | |||
var ctxt = c._get_control_flow_context(); | |||
if (IsLoopEnter(c)) | |||
ctxt = ctxt.outer_context; | |||
is_cond_switch = is_cond_switch &&(ctxt != null && ctxt.IsCondContext()); | |||
} | |||
} | |||
return is_cond_switch; | |||
// cond contexts. | |||
var is_cond_switch = true; | |||
foreach(var o in op.outputs) | |||
{ | |||
foreach(var c in o.consumers()) | |||
{ | |||
var ctxt = c._get_control_flow_context(); | |||
if (IsLoopEnter(c)) | |||
ctxt = ctxt.outer_context; | |||
is_cond_switch = is_cond_switch &&(ctxt != null && ctxt.IsCondContext()); | |||
} | |||
} | |||
return is_cond_switch; | |||
} | |||
public static bool IsLoopSwitch(Operation op) | |||
{ | |||
if (IsSwitch(op)) | |||
{ | |||
var ctxt = op._get_control_flow_context(); | |||
return ctxt != null && ctxt.IsWhileContext() && !IsCondSwitch(op); | |||
} | |||
return false; | |||
public static bool IsLoopSwitch(Operation op) | |||
{ | |||
if (IsSwitch(op)) | |||
{ | |||
var ctxt = op._get_control_flow_context(); | |||
return ctxt != null && ctxt.IsWhileContext() && !IsCondSwitch(op); | |||
} | |||
return false; | |||
} | |||
/// <summary> | |||
/// Return the control flow context for the output of an op. | |||
/// </summary> | |||
/// <summary> | |||
/// Return the control flow context for the output of an op. | |||
/// </summary> | |||
public static ControlFlowContext GetOutputContext(Operation op) | |||
{ | |||
var ctxt = op._get_control_flow_context(); | |||
// Exit nodes usually have a control flow context, except in the case where the | |||
// exit node was imported via import_graph_def (in which case no nodes have | |||
// Exit nodes usually have a control flow context, except in the case where the | |||
// exit node was imported via import_graph_def (in which case no nodes have | |||
// control flow contexts). | |||
if (ctxt != null && IsLoopExit(op)) | |||
ctxt = ctxt.outer_context; | |||
return ctxt; | |||
} | |||
public static void CheckInputFromValidContext(Operation op, Operation input_op) | |||
{ | |||
var op_ctxt = op._get_control_flow_context(); | |||
var input_ctxt = GetOutputContext(input_op); | |||
var valid = false; | |||
if (input_ctxt == null) | |||
valid = true; | |||
else if (op_ctxt == input_ctxt) | |||
valid = true; | |||
else | |||
{ | |||
var while_ctxt = GetContainingWhileContext(op_ctxt); | |||
var input_while_ctxt = GetContainingWhileContext(input_ctxt); | |||
if (while_ctxt == null) | |||
{ | |||
public static void CheckInputFromValidContext(Operation op, Operation input_op) | |||
{ | |||
var op_ctxt = op._get_control_flow_context(); | |||
var input_ctxt = GetOutputContext(input_op); | |||
var valid = false; | |||
if (input_ctxt == null) | |||
valid = true; | |||
else if (op_ctxt == input_ctxt) | |||
valid = true; | |||
else | |||
{ | |||
var while_ctxt = GetContainingWhileContext(op_ctxt); | |||
var input_while_ctxt = GetContainingWhileContext(input_ctxt); | |||
if (while_ctxt == null) | |||
{ | |||
// Neither op nor input_op is in a while loop, but one or both are in | |||
// conds. We allow this, although execution will fail if the branch | |||
// corresponding to input_op's cond context isn't taken. | |||
if (input_while_ctxt == null) | |||
valid = true; | |||
// Invalid if op isn't in a while loop and input_op is. Unless... | |||
if (IsLoopEnter(op)) | |||
// WhileContext._BuildLoop clears context for Enter nodes. | |||
valid = true; | |||
if (IsSwitch(op)) | |||
// CondContext.AddValue clears context for Switch nodes. | |||
valid = true; | |||
} | |||
else if (IsContainingContext(while_ctxt, input_while_ctxt)) | |||
{ | |||
// input_op is in a while loop which contains op's while loop (or not in a | |||
// while loop at all). | |||
valid = true; | |||
} | |||
else if (while_ctxt.grad_state != null && | |||
// corresponding to input_op's cond context isn't taken. | |||
if (input_while_ctxt == null) | |||
valid = true; | |||
// Invalid if op isn't in a while loop and input_op is. Unless... | |||
if (IsLoopEnter(op)) | |||
// WhileContext._BuildLoop clears context for Enter nodes. | |||
valid = true; | |||
if (IsSwitch(op)) | |||
// CondContext.AddValue clears context for Switch nodes. | |||
valid = true; | |||
} | |||
else if (IsContainingContext(while_ctxt, input_while_ctxt)) | |||
{ | |||
// input_op is in a while loop which contains op's while loop (or not in a | |||
// while loop at all). | |||
valid = true; | |||
} | |||
else if (while_ctxt.grad_state != null && | |||
IsContainingContext(while_ctxt.grad_state.forward_context, | |||
input_while_ctxt)) | |||
{ | |||
valid = true; | |||
} | |||
else | |||
throw new NotImplementedException("CheckInputFromValidContext"); | |||
} | |||
if (!valid) | |||
{ | |||
throw new NotImplementedException("CheckInputFromValidContext"); | |||
} | |||
} | |||
public static Operation GetLoopConstantEnter(Tensor value) | |||
{ | |||
var id_ops = new string[] { "Switch", "RefSwitch", "Identity", "RefIdentity" }; | |||
var op = value.op; | |||
while (id_ops.Contains(op.type)) | |||
op = op.inputs[0].op; | |||
return IsLoopConstantEnter(op) ? op : null; | |||
input_while_ctxt)) | |||
{ | |||
valid = true; | |||
} | |||
else | |||
throw new NotImplementedException("CheckInputFromValidContext"); | |||
} | |||
if (!valid) | |||
{ | |||
throw new NotImplementedException("CheckInputFromValidContext"); | |||
} | |||
} | |||
public static bool IsContainingContext(WhileContext ctxt, WhileContext maybe_containing_ctxt) | |||
{ | |||
while(ctxt != maybe_containing_ctxt) | |||
{ | |||
if (ctxt == null) | |||
return false; | |||
ctxt = ctxt.outer_context as WhileContext; | |||
} | |||
return true; | |||
public static Operation GetLoopConstantEnter(Tensor value) | |||
{ | |||
var id_ops = new string[] { "Switch", "RefSwitch", "Identity", "RefIdentity" }; | |||
var op = value.op; | |||
while (id_ops.Contains(op.type)) | |||
op = op.inputs[0].op; | |||
return IsLoopConstantEnter(op) ? op : null; | |||
} | |||
public static WhileContext GetContainingWhileContext(ControlFlowContext ctxt, ControlFlowContext stop_ctxt = null) | |||
{ | |||
while (ctxt != null) | |||
{ | |||
if (ctxt.IsWhileContext() || ctxt == stop_ctxt) | |||
return ctxt as WhileContext; | |||
ctxt = ctxt.outer_context; | |||
} | |||
return null; | |||
public static bool IsContainingContext(WhileContext ctxt, WhileContext maybe_containing_ctxt) | |||
{ | |||
while(ctxt != maybe_containing_ctxt) | |||
{ | |||
if (ctxt == null) | |||
return false; | |||
ctxt = ctxt.outer_context as WhileContext; | |||
} | |||
return true; | |||
} | |||
public static WhileContext GetContainingWhileContext(ControlFlowContext ctxt, ControlFlowContext stop_ctxt = null) | |||
{ | |||
while (ctxt != null) | |||
{ | |||
if (ctxt.IsWhileContext() || ctxt == stop_ctxt) | |||
return ctxt as WhileContext; | |||
ctxt = ctxt.outer_context; | |||
} | |||
return null; | |||
} | |||
} | |||
} |
@@ -35,14 +35,17 @@ namespace Tensorflow | |||
x = ops.convert_to_tensor(x, name: "x"); | |||
if (x.dtype.is_complex()) | |||
throw new NotImplementedException("math_ops.abs for dtype.is_complex"); | |||
//return gen_math_ops.complex_abs(x, Tout: x.dtype.real_dtype, name: name); | |||
//return gen_math_ops.complex_abs(x, Tout: x.dtype.real_dtype, name: name); | |||
return gen_math_ops._abs(x, name: name); | |||
}); | |||
} | |||
public static Tensor add<Tx, Ty>(Tx x, Ty y, string name = null) | |||
public static Tensor add<Tx, Ty>(Tx x, Ty y, string name = null) | |||
=> gen_math_ops.add(x, y, name); | |||
public static Tensor add_v2<Tx, Ty>(Tx x, Ty y, string name = null) | |||
=> gen_math_ops.add_v2(x, y, name); | |||
/// <summary> | |||
/// Adds all input tensors element-wise. | |||
/// </summary> | |||
@@ -53,21 +56,38 @@ namespace Tensorflow | |||
{ | |||
inputs = ops.convert_n_to_tensor_or_indexed_slices(inputs); | |||
if(inputs.Length == 1) | |||
if (inputs.Length == 1) | |||
{ | |||
var values = inputs[0]; | |||
if (name != null) | |||
return array_ops.identity(values, name: name); | |||
return values; | |||
} | |||
return gen_math_ops.add_n(inputs, name: name); | |||
} | |||
public static Tensor cast(RefVariable x, TF_DataType dtype = TF_DataType.DtInvalid, string name = null) | |||
{ | |||
var base_type = dtype.as_base_dtype(); | |||
if (base_type == x.dtype) | |||
return x; | |||
return tf_with(ops.name_scope(name, "Cast", new { x }), scope => | |||
{ | |||
name = scope; | |||
var t_x = ops.convert_to_tensor(x, name: "x"); | |||
if (t_x.dtype.as_base_dtype() != base_type) | |||
t_x = gen_math_ops.cast(t_x, base_type, name: name); | |||
return x; | |||
}); | |||
} | |||
public static Tensor cast(Tensor x, TF_DataType dtype = TF_DataType.DtInvalid, string name = null) | |||
{ | |||
var base_type = dtype.as_base_dtype(); | |||
if(base_type == x.dtype) | |||
if (base_type == x.dtype) | |||
return x; | |||
return tf_with(ops.name_scope(name, "Cast", new { x }), scope => | |||
@@ -98,13 +118,13 @@ namespace Tensorflow | |||
public static Tensor cumsum<T>(Tensor x, T axis = default, bool exclusive = false, bool reverse = false, string name = null) | |||
{ | |||
return tf_with(ops.name_scope(name, "Cumsum", new {x}), scope => | |||
{ | |||
name = scope; | |||
x = ops.convert_to_tensor(x, name: "x"); | |||
return tf_with(ops.name_scope(name, "Cumsum", new { x }), scope => | |||
{ | |||
name = scope; | |||
x = ops.convert_to_tensor(x, name: "x"); | |||
return gen_math_ops.cumsum(x, axis: axis, exclusive: exclusive, reverse: reverse, name: name); | |||
}); | |||
return gen_math_ops.cumsum(x, axis: axis, exclusive: exclusive, reverse: reverse, name: name); | |||
}); | |||
} | |||
/// <summary> | |||
@@ -221,7 +241,7 @@ namespace Tensorflow | |||
public static Tensor reduce_mean(Tensor[] input_tensors, int? axis = null, bool keepdims = false, string name = null) | |||
{ | |||
if(axis == null) | |||
if (axis == null) | |||
{ | |||
var r = _ReductionDims(input_tensors, axis); | |||
var m = gen_math_ops.mean(input_tensors, r, keepdims, name); | |||
@@ -263,14 +283,8 @@ namespace Tensorflow | |||
return gen_math_ops.sigmoid(x_tensor, name: name); | |||
} | |||
public static Tensor sign(Tensor x, string name = null) | |||
{ | |||
return tf_with(ops.name_scope(name, "Sign", new {x}), scope => | |||
{ | |||
x = ops.convert_to_tensor(x, name: "x"); | |||
return gen_math_ops.sign(x); | |||
}); | |||
} | |||
public static Tensor sign<T>(T x, string name = null) | |||
=> gen_math_ops.sign(x, name: name); | |||
/// <summary> | |||
/// Returns (x - y)(x - y) element-wise. | |||
@@ -355,6 +369,9 @@ namespace Tensorflow | |||
return _may_reduce_to_scalar(keepdims, axis, all); | |||
} | |||
public static Tensor realdiv(Tensor x, Tensor y, string name = null) | |||
=> gen_math_ops.real_div(x, y, name: name); | |||
/// <summary> | |||
/// Computes log(sum(exp(elements across dimensions of a tensor))). | |||
/// Reduces `input_tensor` along the dimensions given in `axis`. | |||
@@ -561,6 +578,9 @@ namespace Tensorflow | |||
public static Tensor rsqrt(Tensor x, string name = null) | |||
=> gen_math_ops.rsqrt(x, name: name); | |||
public static Tensor pow<Tx, Ty>(Tx x, Ty y, string name = null) | |||
=> gen_math_ops.pow(x, y, name: name); | |||
public static Tensor range(object start, object limit = null, object delta = null, TF_DataType dtype = TF_DataType.DtInvalid, string name = "range") | |||
{ | |||
if(limit == null) | |||
@@ -117,7 +117,7 @@ namespace Tensorflow | |||
var min_epsilon = 1.001e-5f; | |||
epsilon = epsilon > min_epsilon ? epsilon : min_epsilon; | |||
var results = gen_nn_ops.fused_batch_norm(x, | |||
var results = gen_nn_ops.fused_batch_norm_v3(x, | |||
scale_tensor, | |||
offset_tensor, | |||
mean, | |||
@@ -1,8 +1,8 @@ | |||
using System.Collections; | |||
namespace Tensorflow.Sessions | |||
{ | |||
public class FeedDict : Hashtable | |||
{ | |||
} | |||
} | |||
using System.Collections; | |||
namespace Tensorflow.Sessions | |||
{ | |||
public class FeedDict : Hashtable | |||
{ | |||
} | |||
} |
@@ -5,7 +5,7 @@ | |||
<AssemblyName>TensorFlow.NET</AssemblyName> | |||
<RootNamespace>Tensorflow</RootNamespace> | |||
<TargetTensorFlow>1.14.1</TargetTensorFlow> | |||
<Version>0.14.2.0</Version> | |||
<Version>0.15.0</Version> | |||
<Authors>Haiping Chen, Meinrad Recheis, Eli Belash</Authors> | |||
<Company>SciSharp STACK</Company> | |||
<GeneratePackageOnBuild>true</GeneratePackageOnBuild> | |||
@@ -18,13 +18,15 @@ | |||
<Description>Google's TensorFlow full binding in .NET Standard. | |||
Building, training and infering deep learning models. | |||
https://tensorflownet.readthedocs.io</Description> | |||
<AssemblyVersion>0.14.2.0</AssemblyVersion> | |||
<AssemblyVersion>0.15.0.0</AssemblyVersion> | |||
<PackageReleaseNotes>Changes since v0.14.0: | |||
1: Add TransformGraphWithStringInputs. | |||
2: tf.trainer.load_graph, tf.trainer.freeze_graph | |||
3: Import Protobuf.Text</PackageReleaseNotes> | |||
3: Import Protobuf.Text | |||
4: Support YOLOv3 object detection | |||
5: Add implicitation for Operation to RefVariable</PackageReleaseNotes> | |||
<LangVersion>7.3</LangVersion> | |||
<FileVersion>0.14.2.0</FileVersion> | |||
<FileVersion>0.15.0.0</FileVersion> | |||
<PackageLicenseFile>LICENSE</PackageLicenseFile> | |||
<PackageRequireLicenseAcceptance>true</PackageRequireLicenseAcceptance> | |||
<SignAssembly>true</SignAssembly> | |||
@@ -61,8 +63,8 @@ https://tensorflownet.readthedocs.io</Description> | |||
</ItemGroup> | |||
<ItemGroup> | |||
<PackageReference Include="Google.Protobuf" Version="3.11.3" /> | |||
<PackageReference Include="NumSharp" Version="0.30.0-alpha" /> | |||
<PackageReference Include="Google.Protobuf" Version="3.11.4" /> | |||
<PackageReference Include="NumSharp.Lite" Version="0.1.4" /> | |||
<PackageReference Include="Protobuf.Text" Version="0.4.0" /> | |||
</ItemGroup> | |||
@@ -328,7 +328,7 @@ namespace Tensorflow | |||
switch (name.ToLowerInvariant()) | |||
{ | |||
case "add": | |||
result = math_ops.add(x1, y1, name: scope); | |||
result = math_ops.add_v2(x1, y1, name: scope); | |||
break; | |||
case "div": | |||
result = math_ops.div(x1, y1, name: scope); | |||
@@ -115,6 +115,9 @@ namespace Tensorflow | |||
/// <summary> | |||
/// The name of the device on which this tensor will be produced, or null. | |||
/// </summary> | |||
#if SERIALIZABLE | |||
[JsonIgnore] | |||
#endif | |||
public string Device => op.Device; | |||
#if SERIALIZABLE | |||
[JsonIgnore] | |||
@@ -205,7 +205,9 @@ namespace Tensorflow | |||
//} | |||
//else | |||
{ | |||
apply_updates = state_ops.assign_add(global_step, tf.constant(1), name: name); | |||
apply_updates = state_ops.assign_add(global_step, | |||
ops.convert_to_tensor(1, dtype: global_step.dtype), | |||
name: name); | |||
} | |||
}); | |||
} | |||
@@ -61,7 +61,11 @@ namespace Tensorflow | |||
{ | |||
_in_graph_mode = true; | |||
if (variable_def != null) | |||
if(initial_value is Operation op) | |||
{ | |||
_init_from_op(op); | |||
} | |||
else if (variable_def != null) | |||
{ | |||
if (initial_value != null) | |||
throw new ValueError("variable_def and initial_value are mutually exclusive."); | |||
@@ -73,6 +77,13 @@ namespace Tensorflow | |||
} | |||
} | |||
private void _init_from_op(Operation op) | |||
{ | |||
var g = ops.get_default_graph(); | |||
_initializer_op = op; | |||
_variable = op.output; | |||
} | |||
private void _init_from_proto(VariableDef variable_def, string import_scope = "") | |||
{ | |||
var g = ops.get_default_graph(); | |||
@@ -1,156 +1,156 @@ | |||
/***************************************************************************** | |||
Copyright 2018 The TensorFlow.NET Authors. All Rights Reserved. | |||
Licensed under the Apache License, Version 2.0 (the "License"); | |||
you may not use this file except in compliance with the License. | |||
You may obtain a copy of the License at | |||
http://www.apache.org/licenses/LICENSE-2.0 | |||
Unless required by applicable law or agreed to in writing, software | |||
distributed under the License is distributed on an "AS IS" BASIS, | |||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |||
See the License for the specific language governing permissions and | |||
limitations under the License. | |||
******************************************************************************/ | |||
using System; | |||
using System.Collections.Generic; | |||
using Tensorflow.Eager; | |||
namespace Tensorflow | |||
{ | |||
public class gen_state_ops | |||
{ | |||
public static OpDefLibrary _op_def_lib = new OpDefLibrary(); | |||
public static Execute _execute = new Execute(); | |||
/// <summary> | |||
/// Holds state in the form of a tensor that persists across steps. | |||
/// Outputs a ref to the tensor state so it may be read or modified. | |||
/// </summary> | |||
/// <param name="shape">The shape of the variable tensor.</param> | |||
/// <param name="dtype">The type of elements in the variable tensor.</param> | |||
/// <param name="name"></param> | |||
/// <param name="container"></param> | |||
/// <param name="shared_name"></param> | |||
/// <returns></returns> | |||
public static Tensor variable_v2(int[] shape, TF_DataType dtype, string name = null, string container = "", string shared_name = "") | |||
{ | |||
var _op = _op_def_lib._apply_op_helper("VariableV2", name: name, args: new { dtype, shape, container, shared_name }); | |||
var _result = _op.outputs; | |||
var _inputs_flat = _op.inputs; | |||
var _attrs = new Dictionary<string, object>(); | |||
_attrs["dtype"] = _op.get_attr("dtype"); | |||
_attrs["shape"] = _op.get_attr("shape"); | |||
_attrs["container"] = _op.get_attr("container"); | |||
_attrs["shared_name"] = _op.get_attr("shared_name"); | |||
_execute.record_gradient("VariableV2", _inputs_flat, _attrs, _result, name); | |||
return _result[0]; | |||
} | |||
/// <summary> | |||
/// Update 'ref' by assigning 'value' to it | |||
/// </summary> | |||
/// <param name="REF"></param> | |||
/// <param name="value"></param> | |||
/// <param name="validate_shape"></param> | |||
/// <param name="use_locking"></param> | |||
/// <param name="name"></param> | |||
public static Tensor assign(Tensor @ref, object value, | |||
bool validate_shape = true, | |||
bool use_locking = true, | |||
string name = null) | |||
{ | |||
var _op = _op_def_lib._apply_op_helper("Assign", name: name, args: new { @ref, value, validate_shape, use_locking }); | |||
var _result = _op.outputs; | |||
var _inputs_flat = _op.inputs; | |||
var _attrs = new Dictionary<string, object>(); | |||
_attrs["T"] = _op.get_attr("T"); | |||
_attrs["validate_shape"] = _op.get_attr("validate_shape"); | |||
_attrs["use_locking"] = _op.get_attr("use_locking"); | |||
_execute.record_gradient("Assign", _inputs_flat, _attrs, _result, name); | |||
return _result[0]; | |||
} | |||
public static Tensor assign(RefVariable @ref, object value, | |||
bool validate_shape = true, | |||
bool use_locking = true, | |||
string name = null) | |||
{ | |||
var _op = _op_def_lib._apply_op_helper("Assign", name: name, args: new { @ref, value, validate_shape, use_locking }); | |||
var _result = _op.outputs; | |||
var _inputs_flat = _op.inputs; | |||
var _attrs = new Dictionary<string, object>(); | |||
_attrs["T"] = _op.get_attr("T"); | |||
_attrs["validate_shape"] = _op.get_attr("validate_shape"); | |||
_attrs["use_locking"] = _op.get_attr("use_locking"); | |||
_execute.record_gradient("Assign", _inputs_flat, _attrs, _result, name); | |||
return _result[0]; | |||
} | |||
public static Tensor assign_sub(RefVariable @ref, | |||
Tensor value, | |||
bool use_locking = false, | |||
string name = null) | |||
{ | |||
var _op = _op_def_lib._apply_op_helper("AssignSub", name: name, args: new { @ref, value, use_locking }); | |||
return _op.outputs[0]; | |||
} | |||
// Update 'ref' by adding 'value' to it. | |||
// This operation outputs "ref" after the update is done. | |||
// This makes it easier to chain operations that need to use the reset value. | |||
// Args: | |||
// ref: A mutable `Tensor`. Must be one of the following types: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`, `qint32`, `bfloat16`, `uint16`, `complex128`, `half`, `uint32`, `uint64`. | |||
// Should be from a `Variable` node. | |||
// value: A `Tensor`. Must have the same type as `ref`. | |||
// The value to be added to the variable. | |||
// use_locking: An optional `bool`. Defaults to `False`. | |||
// If True, the addition will be protected by a lock; | |||
// otherwise the behavior is undefined, but may exhibit less contention. | |||
// name: A name for the operation(optional). | |||
// Returns: | |||
// A mutable `Tensor`. Has the same type as `ref`. | |||
public static Tensor assign_add<T>(RefVariable @ref, T value, bool use_locking = false, string name = null) | |||
{ | |||
var _op = _op_def_lib._apply_op_helper("AssignAdd", name: name, args: new { @ref, value, use_locking }); | |||
return _op.outputs[0]; | |||
} | |||
/// <summary> | |||
/// Adds sparse updates to a variable reference. | |||
/// </summary> | |||
/// <param name="ref"></param> | |||
/// <param name="indices"></param> | |||
/// <param name="updates"></param> | |||
/// <param name="use_locking"></param> | |||
/// <param name="name"></param> | |||
/// <returns></returns> | |||
public static Tensor scatter_add(RefVariable @ref, Tensor indices, Tensor updates, bool use_locking = false, string name = null) | |||
{ | |||
var _op = _op_def_lib._apply_op_helper("ScatterAdd", name: name, args: new { @ref, indices, updates, use_locking }); | |||
return _op.outputs[0]; | |||
} | |||
public static Tensor is_variable_initialized(RefVariable @ref, string name = null) | |||
{ | |||
var _op = _op_def_lib._apply_op_helper("IsVariableInitialized", name: name, args: new { @ref }); | |||
return _op.output; | |||
} | |||
} | |||
} | |||
/***************************************************************************** | |||
Copyright 2018 The TensorFlow.NET Authors. All Rights Reserved. | |||
Licensed under the Apache License, Version 2.0 (the "License"); | |||
you may not use this file except in compliance with the License. | |||
You may obtain a copy of the License at | |||
http://www.apache.org/licenses/LICENSE-2.0 | |||
Unless required by applicable law or agreed to in writing, software | |||
distributed under the License is distributed on an "AS IS" BASIS, | |||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |||
See the License for the specific language governing permissions and | |||
limitations under the License. | |||
******************************************************************************/ | |||
using System; | |||
using System.Collections.Generic; | |||
using Tensorflow.Eager; | |||
namespace Tensorflow | |||
{ | |||
public class gen_state_ops | |||
{ | |||
public static OpDefLibrary _op_def_lib = new OpDefLibrary(); | |||
public static Execute _execute = new Execute(); | |||
/// <summary> | |||
/// Holds state in the form of a tensor that persists across steps. | |||
/// Outputs a ref to the tensor state so it may be read or modified. | |||
/// </summary> | |||
/// <param name="shape">The shape of the variable tensor.</param> | |||
/// <param name="dtype">The type of elements in the variable tensor.</param> | |||
/// <param name="name"></param> | |||
/// <param name="container"></param> | |||
/// <param name="shared_name"></param> | |||
/// <returns></returns> | |||
public static Tensor variable_v2(int[] shape, TF_DataType dtype, string name = null, string container = "", string shared_name = "") | |||
{ | |||
var _op = _op_def_lib._apply_op_helper("VariableV2", name: name, args: new { dtype, shape, container, shared_name }); | |||
var _result = _op.outputs; | |||
var _inputs_flat = _op.inputs; | |||
var _attrs = new Dictionary<string, object>(); | |||
_attrs["dtype"] = _op.get_attr("dtype"); | |||
_attrs["shape"] = _op.get_attr("shape"); | |||
_attrs["container"] = _op.get_attr("container"); | |||
_attrs["shared_name"] = _op.get_attr("shared_name"); | |||
_execute.record_gradient("VariableV2", _inputs_flat, _attrs, _result, name); | |||
return _result[0]; | |||
} | |||
/// <summary> | |||
/// Update 'ref' by assigning 'value' to it | |||
/// </summary> | |||
/// <param name="REF"></param> | |||
/// <param name="value"></param> | |||
/// <param name="validate_shape"></param> | |||
/// <param name="use_locking"></param> | |||
/// <param name="name"></param> | |||
public static Tensor assign(Tensor @ref, object value, | |||
bool validate_shape = true, | |||
bool use_locking = true, | |||
string name = null) | |||
{ | |||
var _op = _op_def_lib._apply_op_helper("Assign", name: name, args: new { @ref, value, validate_shape, use_locking }); | |||
var _result = _op.outputs; | |||
var _inputs_flat = _op.inputs; | |||
var _attrs = new Dictionary<string, object>(); | |||
_attrs["T"] = _op.get_attr("T"); | |||
_attrs["validate_shape"] = _op.get_attr("validate_shape"); | |||
_attrs["use_locking"] = _op.get_attr("use_locking"); | |||
_execute.record_gradient("Assign", _inputs_flat, _attrs, _result, name); | |||
return _result[0]; | |||
} | |||
public static Tensor assign(RefVariable @ref, object value, | |||
bool validate_shape = true, | |||
bool use_locking = true, | |||
string name = null) | |||
{ | |||
var _op = _op_def_lib._apply_op_helper("Assign", name: name, args: new { @ref, value, validate_shape, use_locking }); | |||
var _result = _op.outputs; | |||
var _inputs_flat = _op.inputs; | |||
var _attrs = new Dictionary<string, object>(); | |||
_attrs["T"] = _op.get_attr("T"); | |||
_attrs["validate_shape"] = _op.get_attr("validate_shape"); | |||
_attrs["use_locking"] = _op.get_attr("use_locking"); | |||
_execute.record_gradient("Assign", _inputs_flat, _attrs, _result, name); | |||
return _result[0]; | |||
} | |||
public static Tensor assign_sub(RefVariable @ref, | |||
Tensor value, | |||
bool use_locking = false, | |||
string name = null) | |||
{ | |||
var _op = _op_def_lib._apply_op_helper("AssignSub", name: name, args: new { @ref, value, use_locking }); | |||
return _op.outputs[0]; | |||
} | |||
// Update 'ref' by adding 'value' to it. | |||
// This operation outputs "ref" after the update is done. | |||
// This makes it easier to chain operations that need to use the reset value. | |||
// Args: | |||
// ref: A mutable `Tensor`. Must be one of the following types: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`, `qint32`, `bfloat16`, `uint16`, `complex128`, `half`, `uint32`, `uint64`. | |||
// Should be from a `Variable` node. | |||
// value: A `Tensor`. Must have the same type as `ref`. | |||
// The value to be added to the variable. | |||
// use_locking: An optional `bool`. Defaults to `False`. | |||
// If True, the addition will be protected by a lock; | |||
// otherwise the behavior is undefined, but may exhibit less contention. | |||
// name: A name for the operation(optional). | |||
// Returns: | |||
// A mutable `Tensor`. Has the same type as `ref`. | |||
public static Tensor assign_add<T>(RefVariable @ref, T value, bool use_locking = false, string name = null) | |||
{ | |||
var _op = _op_def_lib._apply_op_helper("AssignAdd", name: name, args: new { @ref, value, use_locking }); | |||
return _op.outputs[0]; | |||
} | |||
/// <summary> | |||
/// Adds sparse updates to a variable reference. | |||
/// </summary> | |||
/// <param name="ref"></param> | |||
/// <param name="indices"></param> | |||
/// <param name="updates"></param> | |||
/// <param name="use_locking"></param> | |||
/// <param name="name"></param> | |||
/// <returns></returns> | |||
public static Tensor scatter_add(RefVariable @ref, Tensor indices, Tensor updates, bool use_locking = false, string name = null) | |||
{ | |||
var _op = _op_def_lib._apply_op_helper("ScatterAdd", name: name, args: new { @ref, indices, updates, use_locking }); | |||
return _op.outputs[0]; | |||
} | |||
public static Tensor is_variable_initialized(RefVariable @ref, string name = null) | |||
{ | |||
var _op = _op_def_lib._apply_op_helper("IsVariableInitialized", name: name, args: new { @ref }); | |||
return _op.output; | |||
} | |||
} | |||
} |
@@ -1,123 +1,123 @@ | |||
/***************************************************************************** | |||
Copyright 2018 The TensorFlow.NET Authors. All Rights Reserved. | |||
Licensed under the Apache License, Version 2.0 (the "License"); | |||
you may not use this file except in compliance with the License. | |||
You may obtain a copy of the License at | |||
http://www.apache.org/licenses/LICENSE-2.0 | |||
Unless required by applicable law or agreed to in writing, software | |||
distributed under the License is distributed on an "AS IS" BASIS, | |||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |||
See the License for the specific language governing permissions and | |||
limitations under the License. | |||
******************************************************************************/ | |||
using System; | |||
namespace Tensorflow | |||
{ | |||
public class state_ops | |||
{ | |||
/// <summary> | |||
/// Create a variable Operation. | |||
/// </summary> | |||
/// <param name="shape"></param> | |||
/// <param name="dtype"></param> | |||
/// <param name="name"></param> | |||
/// <param name="container"></param> | |||
/// <param name="shared_name"></param> | |||
/// <returns></returns> | |||
public static Tensor variable_op_v2(int[] shape, | |||
TF_DataType dtype, | |||
string name = "Variable", | |||
string container = "", | |||
string shared_name = "") => gen_state_ops.variable_v2(shape, | |||
dtype, | |||
name: name, | |||
container: container, | |||
shared_name: shared_name); | |||
public static Tensor assign(Tensor @ref, object value, | |||
bool validate_shape = true, | |||
bool use_locking = true, | |||
string name = null) | |||
{ | |||
if (@ref.dtype.is_ref_dtype()) | |||
return gen_state_ops.assign(@ref, | |||
value, | |||
validate_shape: validate_shape, | |||
use_locking: use_locking, | |||
name: name); | |||
return @ref.assign((Tensor)value, name: name); | |||
} | |||
public static Tensor assign(RefVariable @ref, object value, | |||
bool validate_shape = true, | |||
bool use_locking = true, | |||
string name = null) | |||
{ | |||
return gen_state_ops.assign(@ref, | |||
value, | |||
validate_shape: validate_shape, | |||
use_locking: use_locking, | |||
name: name); | |||
} | |||
public static Tensor assign_sub(RefVariable @ref, | |||
Tensor value, | |||
bool use_locking = false, | |||
string name = null) => gen_state_ops.assign_sub(@ref, | |||
value, | |||
use_locking: use_locking, | |||
name: name); | |||
//"""Update 'ref' by adding 'value' to it. | |||
// | |||
// This operation outputs "ref" after the update is done. | |||
// This makes it easier to chain operations that need to use the reset value. | |||
// | |||
// Args: | |||
// ref: A mutable `Tensor`. Must be one of the following types: | |||
// `float32`, `float64`, `int64`, `int32`, `uint8`, `uint16`, `int16`, | |||
// `int8`, `complex64`, `complex128`, `qint8`, `quint8`, `qint32`, `half`. | |||
// Should be from a `Variable` node. | |||
// value: A `Tensor`. Must have the same type as `ref`. | |||
// The value to be added to the variable. | |||
// use_locking: An optional `bool`. Defaults to `False`. | |||
// If True, the addition will be protected by a lock; | |||
// otherwise the behavior is undefined, but may exhibit less contention. | |||
// name: A name for the operation (optional). | |||
// | |||
// Returns: | |||
// Same as "ref". Returned as a convenience for operations that want | |||
// to use the new value after the variable has been updated. | |||
public static Tensor assign_add<T>(RefVariable @ref, | |||
T value, | |||
bool use_locking = false, | |||
string name = null) | |||
{ | |||
if (@ref.dtype.is_ref_dtype()) | |||
return gen_state_ops.assign_add(@ref, value, use_locking: use_locking, name: name); | |||
throw new NotImplementedException("assign_add"); | |||
} | |||
public static Tensor scatter_add(RefVariable @ref, Tensor indices, Tensor updates, bool use_locking = false, string name = null) | |||
{ | |||
if (@ref.dtype.is_ref_dtype()) | |||
return gen_state_ops.scatter_add(@ref, indices, updates, use_locking: use_locking, name: name); | |||
throw new NotImplementedException("scatter_add"); | |||
} | |||
public static Tensor is_variable_initialized(RefVariable @ref, string name = null) | |||
{ | |||
if (@ref.dtype.is_ref_dtype()) | |||
return gen_state_ops.is_variable_initialized(@ref: @ref, name: name); | |||
throw new NotImplementedException(""); | |||
//return @ref.is_initialized(name: name); | |||
} | |||
} | |||
} | |||
/***************************************************************************** | |||
Copyright 2018 The TensorFlow.NET Authors. All Rights Reserved. | |||
Licensed under the Apache License, Version 2.0 (the "License"); | |||
you may not use this file except in compliance with the License. | |||
You may obtain a copy of the License at | |||
http://www.apache.org/licenses/LICENSE-2.0 | |||
Unless required by applicable law or agreed to in writing, software | |||
distributed under the License is distributed on an "AS IS" BASIS, | |||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |||
See the License for the specific language governing permissions and | |||
limitations under the License. | |||
******************************************************************************/ | |||
using System; | |||
namespace Tensorflow | |||
{ | |||
public class state_ops | |||
{ | |||
/// <summary> | |||
/// Create a variable Operation. | |||
/// </summary> | |||
/// <param name="shape"></param> | |||
/// <param name="dtype"></param> | |||
/// <param name="name"></param> | |||
/// <param name="container"></param> | |||
/// <param name="shared_name"></param> | |||
/// <returns></returns> | |||
public static Tensor variable_op_v2(int[] shape, | |||
TF_DataType dtype, | |||
string name = "Variable", | |||
string container = "", | |||
string shared_name = "") => gen_state_ops.variable_v2(shape, | |||
dtype, | |||
name: name, | |||
container: container, | |||
shared_name: shared_name); | |||
public static Tensor assign(Tensor @ref, object value, | |||
bool validate_shape = true, | |||
bool use_locking = true, | |||
string name = null) | |||
{ | |||
if (@ref.dtype.is_ref_dtype()) | |||
return gen_state_ops.assign(@ref, | |||
value, | |||
validate_shape: validate_shape, | |||
use_locking: use_locking, | |||
name: name); | |||
return @ref.assign((Tensor)value, name: name); | |||
} | |||
public static Tensor assign(RefVariable @ref, object value, | |||
bool validate_shape = true, | |||
bool use_locking = true, | |||
string name = null) | |||
{ | |||
return gen_state_ops.assign(@ref, | |||
value, | |||
validate_shape: validate_shape, | |||
use_locking: use_locking, | |||
name: name); | |||
} | |||
public static Tensor assign_sub(RefVariable @ref, | |||
Tensor value, | |||
bool use_locking = false, | |||
string name = null) => gen_state_ops.assign_sub(@ref, | |||
value, | |||
use_locking: use_locking, | |||
name: name); | |||
//"""Update 'ref' by adding 'value' to it. | |||
// | |||
// This operation outputs "ref" after the update is done. | |||
// This makes it easier to chain operations that need to use the reset value. | |||
// | |||
// Args: | |||
// ref: A mutable `Tensor`. Must be one of the following types: | |||
// `float32`, `float64`, `int64`, `int32`, `uint8`, `uint16`, `int16`, | |||
// `int8`, `complex64`, `complex128`, `qint8`, `quint8`, `qint32`, `half`. | |||
// Should be from a `Variable` node. | |||
// value: A `Tensor`. Must have the same type as `ref`. | |||
// The value to be added to the variable. | |||
// use_locking: An optional `bool`. Defaults to `False`. | |||
// If True, the addition will be protected by a lock; | |||
// otherwise the behavior is undefined, but may exhibit less contention. | |||
// name: A name for the operation (optional). | |||
// | |||
// Returns: | |||
// Same as "ref". Returned as a convenience for operations that want | |||
// to use the new value after the variable has been updated. | |||
public static Tensor assign_add<T>(RefVariable @ref, | |||
T value, | |||
bool use_locking = false, | |||
string name = null) | |||
{ | |||
if (@ref.dtype.is_ref_dtype()) | |||
return gen_state_ops.assign_add(@ref, value, use_locking: use_locking, name: name); | |||
throw new NotImplementedException("assign_add"); | |||
} | |||
public static Tensor scatter_add(RefVariable @ref, Tensor indices, Tensor updates, bool use_locking = false, string name = null) | |||
{ | |||
if (@ref.dtype.is_ref_dtype()) | |||
return gen_state_ops.scatter_add(@ref, indices, updates, use_locking: use_locking, name: name); | |||
throw new NotImplementedException("scatter_add"); | |||
} | |||
public static Tensor is_variable_initialized(RefVariable @ref, string name = null) | |||
{ | |||
if (@ref.dtype.is_ref_dtype()) | |||
return gen_state_ops.is_variable_initialized(@ref: @ref, name: name); | |||
throw new NotImplementedException(""); | |||
//return @ref.is_initialized(name: name); | |||
} | |||
} | |||
} |
@@ -68,7 +68,7 @@ namespace Tensorflow | |||
var g = get_default_graph(); | |||
g._name_stack = old_stack; | |||
} | |||
public void __exit__() | |||
{ | |||
} | |||
@@ -82,7 +82,7 @@ namespace Tensorflow | |||
{ | |||
} | |||
/// <summary> | |||
/// __enter__() | |||
/// </summary> | |||
@@ -2,7 +2,7 @@ | |||
<PropertyGroup> | |||
<RootNamespace>Tensorflow.Hub</RootNamespace> | |||
<TargetFramework>netstandard2.0</TargetFramework> | |||
<Version>0.0.6</Version> | |||
<Version>0.1.1</Version> | |||
<Authors>Kerry Jiang, Haiping Chen</Authors> | |||
<Company>SciSharp STACK</Company> | |||
<Copyright>Apache 2.0</Copyright> | |||
@@ -14,7 +14,7 @@ | |||
<PackageId>SciSharp.TensorFlowHub</PackageId> | |||
<GeneratePackageOnBuild>true</GeneratePackageOnBuild> | |||
<PackageReleaseNotes>Fix GetNextBatch() bug. | |||
Change to NumSharp compact version.</PackageReleaseNotes> | |||
Upgrade NumSharp.Lite 0.1.4.</PackageReleaseNotes> | |||
<PackageIconUrl>https://avatars3.githubusercontent.com/u/44989469?s=200&v=4</PackageIconUrl> | |||
<AssemblyName>TensorFlow.Hub</AssemblyName> | |||
</PropertyGroup> | |||
@@ -22,6 +22,6 @@ Change to NumSharp compact version.</PackageReleaseNotes> | |||
<DefineConstants>DEBUG;TRACE</DefineConstants> | |||
</PropertyGroup> | |||
<ItemGroup> | |||
<PackageReference Include="NumSharp" Version="0.30.0-alpha" /> | |||
<PackageReference Include="NumSharp.Lite" Version="0.1.4" /> | |||
</ItemGroup> | |||
</Project> |
@@ -19,8 +19,8 @@ | |||
<ItemGroup> | |||
<PackageReference Include="BenchmarkDotNet" Version="0.12.0" /> | |||
<PackageReference Include="SciSharp.TensorFlow.Redist" Version="1.14.1" /> | |||
<PackageReference Include="TensorFlow.NET" Version="0.14.0" /> | |||
<PackageReference Include="SciSharp.TensorFlow.Redist" Version="1.15.1" /> | |||
<PackageReference Include="TensorFlow.NET" Version="0.14.2" /> | |||
</ItemGroup> | |||
</Project> |
@@ -1,27 +1,12 @@ | |||
TensorFlow.NET pack all required libraries in architecture-specific assemblies folders per NuGet standard [Deprecated] . | |||
We changed to use `Microsoft.ML.TensorFlow.Redist` to maintain the TensorFlow library. | |||
### Download manually | |||
Here are some pre-built TensorFlow binaries you can use for each platform: | |||
- Linux | |||
- CPU-only: https://storage.googleapis.com/tensorflow/libtensorflow/libtensorflow-cpu-linux-x86_64-1.14.0.tar.gz | |||
- GPU-enabled: https://storage.googleapis.com/tensorflow/libtensorflow/libtensorflow-gpu-linux-x86_64-1.14.0.tar.gz | |||
- Mac: https://storage.googleapis.com/tensorflow/libtensorflow/libtensorflow-cpu-darwin-x86_64-1.14.0.tar.gz | |||
- Windows | |||
- CPU-only: https://storage.googleapis.com/tensorflow/libtensorflow/libtensorflow-cpu-windows-x86_64-1.14.0.zip | |||
- GPU-enabled: https://storage.googleapis.com/tensorflow/libtensorflow/libtensorflow-gpu-windows-x86_64-1.14.0.zip | |||
TensorFlow.NET pack all required libraries in architecture-specific assemblies folders per NuGet standard. | |||
```powershell | |||
PM> Install-Package TensorFlow.NET | |||
PM> Install-Package SciSharp.TensorFlow.Redist | |||
``` | |||
### Run in Linux | |||
`Install-Package TensorFlow.NET` | |||
Download Linux pre-built library and unzip `libtensorflow.so` and `libtensorflow_framework.so` into current running directory. | |||
To run image recognition in Linux, please ensure some prerequisite libraries is install. | |||
@@ -33,20 +18,34 @@ sudo apt install libgdiplus | |||
More information about [System.Drawing on Linux](<https://www.hanselman.com/blog/HowDoYouUseSystemDrawingInNETCore.aspx>). | |||
### Run TensorFlow in GPU | |||
Before running verify you installed CUDA and cuDNN (TensorFlow v1.15 is compatible with CUDA v10.0 and cuDNN v7.4), and make sure the corresponding cuda version is compatible. | |||
#### Run in Mac OS | |||
There is no GPU support for macOS. | |||
### Run in Mac OS | |||
### Tensorflow GPU for Windows | |||
Before running verify you installed CUDA and cuDNN (TensorFlow v1.14 is compatible with CUDA v10.0 and cuDNN v7.4), and make sure the corresponding cuda version is compatible. | |||
#### Tensorflow GPU for Windows | |||
```powershell | |||
PM> Install-Package SciSharp.TensorFlow.Redist-Windows-GPU | |||
``` | |||
#### Tensorflow GPU for Linux | |||
```powershell | |||
PM> Install-Package SciSharp.TensorFlow.Redist-Linux-GPU | |||
``` | |||
### Download prebuild binary manually | |||
Here are some pre-built TensorFlow binaries you can use for each platform: | |||
- Linux | |||
- CPU-only: https://storage.googleapis.com/tensorflow/libtensorflow/libtensorflow-cpu-linux-x86_64-1.15.0.tar.gz | |||
- GPU-enabled: https://storage.googleapis.com/tensorflow/libtensorflow/libtensorflow-gpu-linux-x86_64-1.15.0.tar.gz | |||
- Mac: https://storage.googleapis.com/tensorflow/libtensorflow/libtensorflow-cpu-darwin-x86_64-1.15.0.tar.gz | |||
- Windows | |||
- CPU-only: https://storage.googleapis.com/tensorflow/libtensorflow/libtensorflow-cpu-windows-x86_64-1.15.0.zip | |||
- GPU-enabled: https://storage.googleapis.com/tensorflow/libtensorflow/libtensorflow-gpu-windows-x86_64-1.15.0.zip | |||
### Build from source for Windows | |||
@@ -69,7 +68,7 @@ https://www.tensorflow.org/install/source_windows | |||
4. Install from local wheel file. | |||
`pip install C:/tmp/tensorflow_pkg/tensorflow-1.14.0-cp36-cp36m-win_amd64.whl` | |||
`pip install C:/tmp/tensorflow_pkg/tensorflow-1.15.0-cp36-cp36m-win_amd64.whl` | |||
### Export more APIs | |||
@@ -2,7 +2,7 @@ using Microsoft.VisualStudio.TestTools.UnitTesting; | |||
using System.Threading.Tasks; | |||
using Tensorflow.Hub; | |||
namespace UnitTest | |||
namespace TensorFlowNET.UnitTest | |||
{ | |||
[TestClass] | |||
public class MnistModelLoaderTest | |||
@@ -1,334 +1,334 @@ | |||
using System; | |||
using System.Collections; | |||
using System.Linq; | |||
using Microsoft.VisualStudio.TestTools.UnitTesting; | |||
using Newtonsoft.Json.Linq; | |||
using NumSharp; | |||
using Tensorflow; | |||
using static Tensorflow.Binding; | |||
namespace TensorFlowNET.UnitTest | |||
{ | |||
/// <summary> | |||
/// Use as base class for test classes to get additional assertions | |||
/// </summary> | |||
public class PythonTest | |||
{ | |||
#region python compatibility layer | |||
protected PythonTest self { get => this; } | |||
protected object None | |||
{ | |||
get { return null; } | |||
} | |||
#endregion | |||
#region pytest assertions | |||
public void assertItemsEqual(ICollection given, ICollection expected) | |||
{ | |||
if (given is Hashtable && expected is Hashtable) | |||
{ | |||
Assert.AreEqual(JObject.FromObject(expected).ToString(), JObject.FromObject(given).ToString()); | |||
return; | |||
} | |||
Assert.IsNotNull(expected); | |||
Assert.IsNotNull(given); | |||
var e = expected.OfType<object>().ToArray(); | |||
var g = given.OfType<object>().ToArray(); | |||
Assert.AreEqual(e.Length, g.Length, $"The collections differ in length expected {e.Length} but got {g.Length}"); | |||
for (int i = 0; i < e.Length; i++) | |||
{ | |||
/*if (g[i] is NDArray && e[i] is NDArray) | |||
assertItemsEqual((g[i] as NDArray).GetData<object>(), (e[i] as NDArray).GetData<object>()); | |||
else*/ if (e[i] is ICollection && g[i] is ICollection) | |||
assertEqual(g[i], e[i]); | |||
else | |||
Assert.AreEqual(e[i], g[i], $"Items differ at index {i}, expected {e[i]} but got {g[i]}"); | |||
} | |||
} | |||
public void assertAllEqual(ICollection given, ICollection expected) | |||
{ | |||
assertItemsEqual(given, expected); | |||
} | |||
public void assertFloat32Equal(float expected, float actual, string msg) | |||
{ | |||
float eps = 1e-6f; | |||
Assert.IsTrue(Math.Abs(expected - actual) < eps * Math.Max(1.0f, Math.Abs(expected)), $"{msg}: expected {expected} vs actual {actual}"); | |||
} | |||
public void assertFloat64Equal(double expected, double actual, string msg) | |||
{ | |||
double eps = 1e-16f; | |||
Assert.IsTrue(Math.Abs(expected - actual) < eps * Math.Max(1.0f, Math.Abs(expected)), $"{msg}: expected {expected} vs actual {actual}"); | |||
} | |||
public void assertEqual(object given, object expected) | |||
{ | |||
/*if (given is NDArray && expected is NDArray) | |||
{ | |||
assertItemsEqual((given as NDArray).GetData<object>(), (expected as NDArray).GetData<object>()); | |||
return; | |||
}*/ | |||
if (given is Hashtable && expected is Hashtable) | |||
{ | |||
Assert.AreEqual(JObject.FromObject(expected).ToString(), JObject.FromObject(given).ToString()); | |||
return; | |||
} | |||
if (given is ICollection && expected is ICollection) | |||
{ | |||
assertItemsEqual(given as ICollection, expected as ICollection); | |||
return; | |||
} | |||
if (given is float && expected is float) | |||
{ | |||
assertFloat32Equal((float)expected, (float)given, ""); | |||
return; | |||
} | |||
if (given is double && expected is double) | |||
{ | |||
assertFloat64Equal((double)expected, (double)given, ""); | |||
return; | |||
} | |||
Assert.AreEqual(expected, given); | |||
} | |||
public void assertEquals(object given, object expected) | |||
{ | |||
assertEqual(given, expected); | |||
} | |||
public void assert(object given) | |||
{ | |||
if (given is bool) | |||
Assert.IsTrue((bool)given); | |||
Assert.IsNotNull(given); | |||
} | |||
public void assertIsNotNone(object given) | |||
{ | |||
Assert.IsNotNull(given); | |||
} | |||
public void assertFalse(bool cond) | |||
{ | |||
Assert.IsFalse(cond); | |||
} | |||
public void assertTrue(bool cond) | |||
{ | |||
Assert.IsTrue(cond); | |||
} | |||
public void assertAllClose(NDArray array1, NDArray array2, double eps = 1e-5) | |||
{ | |||
Assert.IsTrue(np.allclose(array1, array2, rtol: eps)); | |||
} | |||
public void assertAllClose(double value, NDArray array2, double eps = 1e-5) | |||
{ | |||
var array1 = np.ones_like(array2) * value; | |||
Assert.IsTrue(np.allclose(array1, array2, rtol: eps)); | |||
} | |||
public void assertProtoEquals(object toProto, object o) | |||
{ | |||
throw new NotImplementedException(); | |||
} | |||
#endregion | |||
#region tensor evaluation and test session | |||
//protected object _eval_helper(Tensor[] tensors) | |||
//{ | |||
// if (tensors == null) | |||
// return null; | |||
// return nest.map_structure(self._eval_tensor, tensors); | |||
//} | |||
protected object _eval_tensor(object tensor) | |||
{ | |||
if (tensor == None) | |||
return None; | |||
//else if (callable(tensor)) | |||
// return self._eval_helper(tensor()) | |||
else | |||
{ | |||
try | |||
{ | |||
//TODO: | |||
// if sparse_tensor.is_sparse(tensor): | |||
// return sparse_tensor.SparseTensorValue(tensor.indices, tensor.values, | |||
// tensor.dense_shape) | |||
//return (tensor as Tensor).numpy(); | |||
} | |||
catch (Exception) | |||
{ | |||
throw new ValueError("Unsupported type: " + tensor.GetType()); | |||
} | |||
return null; | |||
} | |||
} | |||
/// <summary> | |||
/// This function is used in many original tensorflow unit tests to evaluate tensors | |||
/// in a test session with special settings (for instance constant folding off) | |||
/// | |||
/// </summary> | |||
public T evaluate<T>(Tensor tensor) | |||
{ | |||
object result = null; | |||
// if context.executing_eagerly(): | |||
// return self._eval_helper(tensors) | |||
// else: | |||
{ | |||
using (var sess = tf.Session()) | |||
{ | |||
var ndarray=tensor.eval(sess); | |||
if (typeof(T) == typeof(double)) | |||
{ | |||
double x = ndarray; | |||
result=x; | |||
} | |||
else if (typeof(T) == typeof(int)) | |||
{ | |||
int x = ndarray; | |||
result = x; | |||
} | |||
else | |||
{ | |||
result = ndarray; | |||
} | |||
} | |||
return (T)result; | |||
} | |||
} | |||
public Session cached_session() | |||
{ | |||
throw new NotImplementedException(); | |||
} | |||
//Returns a TensorFlow Session for use in executing tests. | |||
public Session session(Graph graph = null, object config = null, bool use_gpu = false, bool force_gpu = false) | |||
{ | |||
//Note that this will set this session and the graph as global defaults. | |||
//Use the `use_gpu` and `force_gpu` options to control where ops are run.If | |||
//`force_gpu` is True, all ops are pinned to `/device:GPU:0`. Otherwise, if | |||
//`use_gpu` is True, TensorFlow tries to run as many ops on the GPU as | |||
//possible.If both `force_gpu and `use_gpu` are False, all ops are pinned to | |||
//the CPU. | |||
//Example: | |||
//```python | |||
//class MyOperatorTest(test_util.TensorFlowTestCase): | |||
// def testMyOperator(self): | |||
// with self.session(use_gpu= True): | |||
// valid_input = [1.0, 2.0, 3.0, 4.0, 5.0] | |||
// result = MyOperator(valid_input).eval() | |||
// self.assertEqual(result, [1.0, 2.0, 3.0, 5.0, 8.0] | |||
// invalid_input = [-1.0, 2.0, 7.0] | |||
// with self.assertRaisesOpError("negative input not supported"): | |||
// MyOperator(invalid_input).eval() | |||
//``` | |||
//Args: | |||
// graph: Optional graph to use during the returned session. | |||
// config: An optional config_pb2.ConfigProto to use to configure the | |||
// session. | |||
// use_gpu: If True, attempt to run as many ops as possible on GPU. | |||
// force_gpu: If True, pin all ops to `/device:GPU:0`. | |||
//Yields: | |||
// A Session object that should be used as a context manager to surround | |||
// the graph building and execution code in a test case. | |||
Session s = null; | |||
//if (context.executing_eagerly()) | |||
// yield None | |||
//else | |||
//{ | |||
s = self._create_session(graph, config, force_gpu); | |||
self._constrain_devices_and_set_default(s, use_gpu, force_gpu); | |||
//} | |||
return s.as_default(); | |||
} | |||
private IObjectLife _constrain_devices_and_set_default(Session sess, bool useGpu, bool forceGpu) | |||
{ | |||
//def _constrain_devices_and_set_default(self, sess, use_gpu, force_gpu): | |||
//"""Set the session and its graph to global default and constrain devices.""" | |||
//if context.executing_eagerly(): | |||
// yield None | |||
//else: | |||
// with sess.graph.as_default(), sess.as_default(): | |||
// if force_gpu: | |||
// # Use the name of an actual device if one is detected, or | |||
// # '/device:GPU:0' otherwise | |||
// gpu_name = gpu_device_name() | |||
// if not gpu_name: | |||
// gpu_name = "/device:GPU:0" | |||
// with sess.graph.device(gpu_name): | |||
// yield sess | |||
// elif use_gpu: | |||
// yield sess | |||
// else: | |||
// with sess.graph.device("/device:CPU:0"): | |||
// yield sess | |||
return sess; | |||
} | |||
// See session() for details. | |||
private Session _create_session(Graph graph, object cfg, bool forceGpu) | |||
{ | |||
var prepare_config = new Func<object, object>((config) => | |||
{ | |||
// """Returns a config for sessions. | |||
// Args: | |||
// config: An optional config_pb2.ConfigProto to use to configure the | |||
// session. | |||
// Returns: | |||
// A config_pb2.ConfigProto object. | |||
//TODO: config | |||
// # use_gpu=False. Currently many tests rely on the fact that any device | |||
// # will be used even when a specific device is supposed to be used. | |||
// allow_soft_placement = not force_gpu | |||
// if config is None: | |||
// config = config_pb2.ConfigProto() | |||
// config.allow_soft_placement = allow_soft_placement | |||
// config.gpu_options.per_process_gpu_memory_fraction = 0.3 | |||
// elif not allow_soft_placement and config.allow_soft_placement: | |||
// config_copy = config_pb2.ConfigProto() | |||
// config_copy.CopyFrom(config) | |||
// config = config_copy | |||
// config.allow_soft_placement = False | |||
// # Don't perform optimizations for tests so we don't inadvertently run | |||
// # gpu ops on cpu | |||
// config.graph_options.optimizer_options.opt_level = -1 | |||
// # Disable Grappler constant folding since some tests & benchmarks | |||
// # use constant input and become meaningless after constant folding. | |||
// # DO NOT DISABLE GRAPPLER OPTIMIZERS WITHOUT CONSULTING WITH THE | |||
// # GRAPPLER TEAM. | |||
// config.graph_options.rewrite_options.constant_folding = ( | |||
// rewriter_config_pb2.RewriterConfig.OFF) | |||
// config.graph_options.rewrite_options.pin_to_host_optimization = ( | |||
// rewriter_config_pb2.RewriterConfig.OFF) | |||
return config; | |||
}); | |||
//TODO: use this instead of normal session | |||
//return new ErrorLoggingSession(graph = graph, config = prepare_config(config)) | |||
return new Session(graph);//, config = prepare_config(config)) | |||
} | |||
#endregion | |||
} | |||
} | |||
using System; | |||
using System.Collections; | |||
using System.Linq; | |||
using Microsoft.VisualStudio.TestTools.UnitTesting; | |||
using Newtonsoft.Json.Linq; | |||
using NumSharp; | |||
using Tensorflow; | |||
using static Tensorflow.Binding; | |||
namespace TensorFlowNET.UnitTest | |||
{ | |||
/// <summary> | |||
/// Use as base class for test classes to get additional assertions | |||
/// </summary> | |||
public class PythonTest | |||
{ | |||
#region python compatibility layer | |||
protected PythonTest self { get => this; } | |||
protected object None | |||
{ | |||
get { return null; } | |||
} | |||
#endregion | |||
#region pytest assertions | |||
public void assertItemsEqual(ICollection given, ICollection expected) | |||
{ | |||
if (given is Hashtable && expected is Hashtable) | |||
{ | |||
Assert.AreEqual(JObject.FromObject(expected).ToString(), JObject.FromObject(given).ToString()); | |||
return; | |||
} | |||
Assert.IsNotNull(expected); | |||
Assert.IsNotNull(given); | |||
var e = expected.OfType<object>().ToArray(); | |||
var g = given.OfType<object>().ToArray(); | |||
Assert.AreEqual(e.Length, g.Length, $"The collections differ in length expected {e.Length} but got {g.Length}"); | |||
for (int i = 0; i < e.Length; i++) | |||
{ | |||
/*if (g[i] is NDArray && e[i] is NDArray) | |||
assertItemsEqual((g[i] as NDArray).GetData<object>(), (e[i] as NDArray).GetData<object>()); | |||
else*/ if (e[i] is ICollection && g[i] is ICollection) | |||
assertEqual(g[i], e[i]); | |||
else | |||
Assert.AreEqual(e[i], g[i], $"Items differ at index {i}, expected {e[i]} but got {g[i]}"); | |||
} | |||
} | |||
public void assertAllEqual(ICollection given, ICollection expected) | |||
{ | |||
assertItemsEqual(given, expected); | |||
} | |||
public void assertFloat32Equal(float expected, float actual, string msg) | |||
{ | |||
float eps = 1e-6f; | |||
Assert.IsTrue(Math.Abs(expected - actual) < eps * Math.Max(1.0f, Math.Abs(expected)), $"{msg}: expected {expected} vs actual {actual}"); | |||
} | |||
public void assertFloat64Equal(double expected, double actual, string msg) | |||
{ | |||
double eps = 1e-16f; | |||
Assert.IsTrue(Math.Abs(expected - actual) < eps * Math.Max(1.0f, Math.Abs(expected)), $"{msg}: expected {expected} vs actual {actual}"); | |||
} | |||
public void assertEqual(object given, object expected) | |||
{ | |||
/*if (given is NDArray && expected is NDArray) | |||
{ | |||
assertItemsEqual((given as NDArray).GetData<object>(), (expected as NDArray).GetData<object>()); | |||
return; | |||
}*/ | |||
if (given is Hashtable && expected is Hashtable) | |||
{ | |||
Assert.AreEqual(JObject.FromObject(expected).ToString(), JObject.FromObject(given).ToString()); | |||
return; | |||
} | |||
if (given is ICollection && expected is ICollection) | |||
{ | |||
assertItemsEqual(given as ICollection, expected as ICollection); | |||
return; | |||
} | |||
if (given is float && expected is float) | |||
{ | |||
assertFloat32Equal((float)expected, (float)given, ""); | |||
return; | |||
} | |||
if (given is double && expected is double) | |||
{ | |||
assertFloat64Equal((double)expected, (double)given, ""); | |||
return; | |||
} | |||
Assert.AreEqual(expected, given); | |||
} | |||
public void assertEquals(object given, object expected) | |||
{ | |||
assertEqual(given, expected); | |||
} | |||
public void assert(object given) | |||
{ | |||
if (given is bool) | |||
Assert.IsTrue((bool)given); | |||
Assert.IsNotNull(given); | |||
} | |||
public void assertIsNotNone(object given) | |||
{ | |||
Assert.IsNotNull(given); | |||
} | |||
public void assertFalse(bool cond) | |||
{ | |||
Assert.IsFalse(cond); | |||
} | |||
public void assertTrue(bool cond) | |||
{ | |||
Assert.IsTrue(cond); | |||
} | |||
public void assertAllClose(NDArray array1, NDArray array2, double eps = 1e-5) | |||
{ | |||
Assert.IsTrue(np.allclose(array1, array2, rtol: eps)); | |||
} | |||
public void assertAllClose(double value, NDArray array2, double eps = 1e-5) | |||
{ | |||
var array1 = np.ones_like(array2) * value; | |||
Assert.IsTrue(np.allclose(array1, array2, rtol: eps)); | |||
} | |||
public void assertProtoEquals(object toProto, object o) | |||
{ | |||
throw new NotImplementedException(); | |||
} | |||
#endregion | |||
#region tensor evaluation and test session | |||
//protected object _eval_helper(Tensor[] tensors) | |||
//{ | |||
// if (tensors == null) | |||
// return null; | |||
// return nest.map_structure(self._eval_tensor, tensors); | |||
//} | |||
protected object _eval_tensor(object tensor) | |||
{ | |||
if (tensor == None) | |||
return None; | |||
//else if (callable(tensor)) | |||
// return self._eval_helper(tensor()) | |||
else | |||
{ | |||
try | |||
{ | |||
//TODO: | |||
// if sparse_tensor.is_sparse(tensor): | |||
// return sparse_tensor.SparseTensorValue(tensor.indices, tensor.values, | |||
// tensor.dense_shape) | |||
//return (tensor as Tensor).numpy(); | |||
} | |||
catch (Exception) | |||
{ | |||
throw new ValueError("Unsupported type: " + tensor.GetType()); | |||
} | |||
return null; | |||
} | |||
} | |||
/// <summary> | |||
/// This function is used in many original tensorflow unit tests to evaluate tensors | |||
/// in a test session with special settings (for instance constant folding off) | |||
/// | |||
/// </summary> | |||
public T evaluate<T>(Tensor tensor) | |||
{ | |||
object result = null; | |||
// if context.executing_eagerly(): | |||
// return self._eval_helper(tensors) | |||
// else: | |||
{ | |||
using (var sess = tf.Session()) | |||
{ | |||
var ndarray=tensor.eval(sess); | |||
if (typeof(T) == typeof(double)) | |||
{ | |||
double x = ndarray; | |||
result=x; | |||
} | |||
else if (typeof(T) == typeof(int)) | |||
{ | |||
int x = ndarray; | |||
result = x; | |||
} | |||
else | |||
{ | |||
result = ndarray; | |||
} | |||
} | |||
return (T)result; | |||
} | |||
} | |||
public Session cached_session() | |||
{ | |||
throw new NotImplementedException(); | |||
} | |||
//Returns a TensorFlow Session for use in executing tests. | |||
public Session session(Graph graph = null, object config = null, bool use_gpu = false, bool force_gpu = false) | |||
{ | |||
//Note that this will set this session and the graph as global defaults. | |||
//Use the `use_gpu` and `force_gpu` options to control where ops are run.If | |||
//`force_gpu` is True, all ops are pinned to `/device:GPU:0`. Otherwise, if | |||
//`use_gpu` is True, TensorFlow tries to run as many ops on the GPU as | |||
//possible.If both `force_gpu and `use_gpu` are False, all ops are pinned to | |||
//the CPU. | |||
//Example: | |||
//```python | |||
//class MyOperatorTest(test_util.TensorFlowTestCase): | |||
// def testMyOperator(self): | |||
// with self.session(use_gpu= True): | |||
// valid_input = [1.0, 2.0, 3.0, 4.0, 5.0] | |||
// result = MyOperator(valid_input).eval() | |||
// self.assertEqual(result, [1.0, 2.0, 3.0, 5.0, 8.0] | |||
// invalid_input = [-1.0, 2.0, 7.0] | |||
// with self.assertRaisesOpError("negative input not supported"): | |||
// MyOperator(invalid_input).eval() | |||
//``` | |||
//Args: | |||
// graph: Optional graph to use during the returned session. | |||
// config: An optional config_pb2.ConfigProto to use to configure the | |||
// session. | |||
// use_gpu: If True, attempt to run as many ops as possible on GPU. | |||
// force_gpu: If True, pin all ops to `/device:GPU:0`. | |||
//Yields: | |||
// A Session object that should be used as a context manager to surround | |||
// the graph building and execution code in a test case. | |||
Session s = null; | |||
//if (context.executing_eagerly()) | |||
// yield None | |||
//else | |||
//{ | |||
s = self._create_session(graph, config, force_gpu); | |||
self._constrain_devices_and_set_default(s, use_gpu, force_gpu); | |||
//} | |||
return s.as_default(); | |||
} | |||
private IObjectLife _constrain_devices_and_set_default(Session sess, bool useGpu, bool forceGpu) | |||
{ | |||
//def _constrain_devices_and_set_default(self, sess, use_gpu, force_gpu): | |||
//"""Set the session and its graph to global default and constrain devices.""" | |||
//if context.executing_eagerly(): | |||
// yield None | |||
//else: | |||
// with sess.graph.as_default(), sess.as_default(): | |||
// if force_gpu: | |||
// # Use the name of an actual device if one is detected, or | |||
// # '/device:GPU:0' otherwise | |||
// gpu_name = gpu_device_name() | |||
// if not gpu_name: | |||
// gpu_name = "/device:GPU:0" | |||
// with sess.graph.device(gpu_name): | |||
// yield sess | |||
// elif use_gpu: | |||
// yield sess | |||
// else: | |||
// with sess.graph.device("/device:CPU:0"): | |||
// yield sess | |||
return sess; | |||
} | |||
// See session() for details. | |||
private Session _create_session(Graph graph, object cfg, bool forceGpu) | |||
{ | |||
var prepare_config = new Func<object, object>((config) => | |||
{ | |||
// """Returns a config for sessions. | |||
// Args: | |||
// config: An optional config_pb2.ConfigProto to use to configure the | |||
// session. | |||
// Returns: | |||
// A config_pb2.ConfigProto object. | |||
//TODO: config | |||
// # use_gpu=False. Currently many tests rely on the fact that any device | |||
// # will be used even when a specific device is supposed to be used. | |||
// allow_soft_placement = not force_gpu | |||
// if config is None: | |||
// config = config_pb2.ConfigProto() | |||
// config.allow_soft_placement = allow_soft_placement | |||
// config.gpu_options.per_process_gpu_memory_fraction = 0.3 | |||
// elif not allow_soft_placement and config.allow_soft_placement: | |||
// config_copy = config_pb2.ConfigProto() | |||
// config_copy.CopyFrom(config) | |||
// config = config_copy | |||
// config.allow_soft_placement = False | |||
// # Don't perform optimizations for tests so we don't inadvertently run | |||
// # gpu ops on cpu | |||
// config.graph_options.optimizer_options.opt_level = -1 | |||
// # Disable Grappler constant folding since some tests & benchmarks | |||
// # use constant input and become meaningless after constant folding. | |||
// # DO NOT DISABLE GRAPPLER OPTIMIZERS WITHOUT CONSULTING WITH THE | |||
// # GRAPPLER TEAM. | |||
// config.graph_options.rewrite_options.constant_folding = ( | |||
// rewriter_config_pb2.RewriterConfig.OFF) | |||
// config.graph_options.rewrite_options.pin_to_host_optimization = ( | |||
// rewriter_config_pb2.RewriterConfig.OFF) | |||
return config; | |||
}); | |||
//TODO: use this instead of normal session | |||
//return new ErrorLoggingSession(graph = graph, config = prepare_config(config)) | |||
return new Session(graph);//, config = prepare_config(config)) | |||
} | |||
#endregion | |||
} | |||
} |
@@ -28,11 +28,11 @@ | |||
</ItemGroup> | |||
<ItemGroup> | |||
<PackageReference Include="FluentAssertions" Version="5.10.0" /> | |||
<PackageReference Include="Microsoft.NET.Test.Sdk" Version="16.4.0" /> | |||
<PackageReference Include="MSTest.TestAdapter" Version="2.0.0" /> | |||
<PackageReference Include="MSTest.TestFramework" Version="2.0.0" /> | |||
<PackageReference Include="SciSharp.TensorFlow.Redist" Version="1.14.1" /> | |||
<PackageReference Include="FluentAssertions" Version="5.10.2" /> | |||
<PackageReference Include="Microsoft.NET.Test.Sdk" Version="16.5.0" /> | |||
<PackageReference Include="MSTest.TestAdapter" Version="2.1.0" /> | |||
<PackageReference Include="MSTest.TestFramework" Version="2.1.0" /> | |||
<PackageReference Include="SciSharp.TensorFlow.Redist" Version="1.15.1" /> | |||
</ItemGroup> | |||
<ItemGroup> | |||
@@ -1,86 +1,86 @@ | |||
using Microsoft.VisualStudio.TestTools.UnitTesting; | |||
using Tensorflow; | |||
using static Tensorflow.Binding; | |||
namespace TensorFlowNET.UnitTest.control_flow_ops_test | |||
{ | |||
/// <summary> | |||
/// excerpt of tensorflow/python/framework/ops/control_flow_ops_test.py | |||
/// </summary> | |||
[TestClass] | |||
public class CondTestCases : PythonTest | |||
{ | |||
[TestMethod] | |||
public void testCondTrue_ConstOnly() | |||
{ | |||
var graph = tf.Graph().as_default(); | |||
using (var sess = tf.Session(graph)) | |||
{ | |||
var x = tf.constant(2, name: "x"); | |||
var y = tf.constant(5, name: "y"); | |||
var z = control_flow_ops.cond(tf.less(x, y), | |||
() => tf.constant(22, name: "t22"), | |||
() => tf.constant(55, name: "f55")); | |||
int result = z.eval(sess); | |||
assertEquals(result, 22); | |||
} | |||
} | |||
[TestMethod] | |||
public void testCondFalse_ConstOnly() | |||
{ | |||
var graph = tf.Graph().as_default(); | |||
using (var sess = tf.Session(graph)) | |||
{ | |||
var x = tf.constant(2, name: "x"); | |||
var y = tf.constant(1, name: "y"); | |||
var z = control_flow_ops.cond(tf.less(x, y), | |||
() => tf.constant(22, name: "t22"), | |||
() => tf.constant(11, name: "f11")); | |||
int result = z.eval(sess); | |||
assertEquals(result, 11); | |||
} | |||
} | |||
[TestMethod] | |||
public void testCondTrue() | |||
{ | |||
tf.Graph().as_default(); | |||
var x = tf.constant(2, name: "x"); | |||
var y = tf.constant(5, name: "y"); | |||
var z = control_flow_ops.cond(tf.less(x, y), | |||
() => tf.multiply(x, 17), | |||
() => tf.add(y, 23)); | |||
var result = evaluate<int>(z); | |||
assertEquals(result, 34); | |||
} | |||
[TestMethod] | |||
public void testCondFalse() | |||
{ | |||
tf.Graph().as_default(); | |||
var x = tf.constant(2); | |||
var y = tf.constant(1); | |||
var z = control_flow_ops.cond(tf.less(x, y), | |||
() => tf.multiply(x, 17), | |||
() => tf.add(y, 23)); | |||
var result = evaluate<int>(z); | |||
assertEquals(result, 24); | |||
} | |||
// NOTE: all other python test cases of this class are either not needed due to strong typing or test a deprecated api | |||
} | |||
} | |||
using Microsoft.VisualStudio.TestTools.UnitTesting; | |||
using Tensorflow; | |||
using static Tensorflow.Binding; | |||
namespace TensorFlowNET.UnitTest.control_flow_ops_test | |||
{ | |||
/// <summary> | |||
/// excerpt of tensorflow/python/framework/ops/control_flow_ops_test.py | |||
/// </summary> | |||
[TestClass] | |||
public class CondTestCases : PythonTest | |||
{ | |||
[TestMethod] | |||
public void testCondTrue_ConstOnly() | |||
{ | |||
var graph = tf.Graph().as_default(); | |||
using (var sess = tf.Session(graph)) | |||
{ | |||
var x = tf.constant(2, name: "x"); | |||
var y = tf.constant(5, name: "y"); | |||
var z = control_flow_ops.cond(tf.less(x, y), | |||
() => tf.constant(22, name: "t22"), | |||
() => tf.constant(55, name: "f55")); | |||
int result = z.eval(sess); | |||
assertEquals(result, 22); | |||
} | |||
} | |||
[TestMethod] | |||
public void testCondFalse_ConstOnly() | |||
{ | |||
var graph = tf.Graph().as_default(); | |||
using (var sess = tf.Session(graph)) | |||
{ | |||
var x = tf.constant(2, name: "x"); | |||
var y = tf.constant(1, name: "y"); | |||
var z = control_flow_ops.cond(tf.less(x, y), | |||
() => tf.constant(22, name: "t22"), | |||
() => tf.constant(11, name: "f11")); | |||
int result = z.eval(sess); | |||
assertEquals(result, 11); | |||
} | |||
} | |||
[TestMethod] | |||
public void testCondTrue() | |||
{ | |||
tf.Graph().as_default(); | |||
var x = tf.constant(2, name: "x"); | |||
var y = tf.constant(5, name: "y"); | |||
var z = control_flow_ops.cond(tf.less(x, y), | |||
() => tf.multiply(x, 17), | |||
() => tf.add(y, 23)); | |||
var result = evaluate<int>(z); | |||
assertEquals(result, 34); | |||
} | |||
[TestMethod] | |||
public void testCondFalse() | |||
{ | |||
tf.Graph().as_default(); | |||
var x = tf.constant(2); | |||
var y = tf.constant(1); | |||
var z = control_flow_ops.cond(tf.less(x, y), | |||
() => tf.multiply(x, 17), | |||
() => tf.add(y, 23)); | |||
var result = evaluate<int>(z); | |||
assertEquals(result, 24); | |||
} | |||
// NOTE: all other python test cases of this class are either not needed due to strong typing or test a deprecated api | |||
} | |||
} |
@@ -1,23 +1,23 @@ | |||
using Microsoft.VisualStudio.TestTools.UnitTesting; | |||
using Tensorflow; | |||
namespace TensorFlowNET.UnitTest.control_flow_ops_test | |||
{ | |||
/// <summary> | |||
/// excerpt of tensorflow/python/framework/ops/control_flow_ops_test.py | |||
/// </summary> | |||
[TestClass] | |||
public class ShapeTestCase : PythonTest | |||
{ | |||
[TestMethod] | |||
public void testShape() | |||
{ | |||
var tensor = constant_op.constant(new[]{1.0, 2.0}); | |||
self.assertEquals(new int[] {2}, tensor.shape); | |||
self.assertEquals(new int[] {2}, | |||
control_flow_ops.with_dependencies(new[] {constant_op.constant(1.0).op}, tensor).shape); | |||
} | |||
} | |||
} | |||
using Microsoft.VisualStudio.TestTools.UnitTesting; | |||
using Tensorflow; | |||
namespace TensorFlowNET.UnitTest.control_flow_ops_test | |||
{ | |||
/// <summary> | |||
/// excerpt of tensorflow/python/framework/ops/control_flow_ops_test.py | |||
/// </summary> | |||
[TestClass] | |||
public class ShapeTestCase : PythonTest | |||
{ | |||
[TestMethod] | |||
public void testShape() | |||
{ | |||
var tensor = constant_op.constant(new[]{1.0, 2.0}); | |||
self.assertEquals(new int[] {2}, tensor.shape); | |||
self.assertEquals(new int[] {2}, | |||
control_flow_ops.with_dependencies(new[] {constant_op.constant(1.0).op}, tensor).shape); | |||
} | |||
} | |||
} |
@@ -1,173 +1,173 @@ | |||
using Microsoft.VisualStudio.TestTools.UnitTesting; | |||
using Tensorflow; | |||
namespace TensorFlowNET.UnitTest.control_flow_ops_test | |||
{ | |||
/// <summary> | |||
/// excerpt of tensorflow/python/framework/ops/control_flow_ops_test.py | |||
/// </summary> | |||
[TestClass] | |||
public class SwitchTestCase : PythonTest | |||
{ | |||
[Ignore("TODO")] | |||
[TestMethod] | |||
public void testResourceReadInLoop() | |||
{ | |||
//var embedding_matrix = variable_scope.get_variable( | |||
//"embedding_matrix", initializer: new double[,] { { 2.0 }, { 3.0 } }, use_resource: true); | |||
/* | |||
Tensor cond(Tensor it, Tensor _) | |||
{ | |||
return it < 5; | |||
} | |||
*/ | |||
// TODO: below code doesn't compile | |||
//(Tensor, Tensor) body(Tensor it, Tensor cost) | |||
//{ | |||
// var embedding = embedding_ops.embedding_lookup(embedding_matrix, new int[]{0}); | |||
// cost += math_ops.reduce_sum(embedding); | |||
// return (it + 1, cost); | |||
//} | |||
//var (_, cost1) = control_flow_ops.while_loop( | |||
// cond, body, new[] | |||
// { | |||
// constant_op.constant(0), | |||
// constant_op.constant(0.0) | |||
// }); | |||
//with<Session>(this.cached_session(), sess => | |||
//{ | |||
// self.evaluate(variables.global_variables_initializer()); | |||
// self.assertAllEqual(10.0, self.evaluate(cost1)); | |||
//}); | |||
} | |||
[Ignore("TODO")] | |||
[TestMethod] | |||
public void testIndexedSlicesGradientInCondInWhileLoop() | |||
{ | |||
doTestIndexedSlicesGradientInCondInWhileLoop(use_resource: false); | |||
} | |||
[Ignore("TODO")] | |||
[TestMethod] | |||
public void testIndexedSlicesGradientInCondInWhileLoopResource() | |||
{ | |||
doTestIndexedSlicesGradientInCondInWhileLoop(use_resource: true); | |||
} | |||
private void doTestIndexedSlicesGradientInCondInWhileLoop(bool use_resource = false) | |||
{ | |||
//def doTestIndexedSlicesGradientInCondInWhileLoop(self, use_resource=False): | |||
// embedding_matrix = variable_scope.get_variable( | |||
// "embedding_matrix", [5, 5], | |||
// initializer=init_ops.random_normal_initializer(), | |||
// use_resource=use_resource) | |||
// def cond(it, _): | |||
// return it < 5 | |||
// def body(it, cost): | |||
// embedding = embedding_ops.embedding_lookup(embedding_matrix, [0]) | |||
// cost = control_flow_ops.cond( | |||
// math_ops.equal(it, 3), lambda: math_ops.square(cost), | |||
// (lambda: cost + math_ops.reduce_sum(embedding))) | |||
// return it + 1, cost | |||
// _, cost = control_flow_ops.while_loop( | |||
// cond, body, [constant_op.constant(0), | |||
// constant_op.constant(0.0)]) | |||
// dynamic_grads = gradients_impl.gradients(cost, [embedding_matrix])[0] | |||
// dynamic_grads = math_ops.segment_sum(dynamic_grads.values, | |||
// dynamic_grads.indices) | |||
// embedding = embedding_ops.embedding_lookup(embedding_matrix, [0]) | |||
// static = math_ops.square( | |||
// math_ops.reduce_sum(embedding) + math_ops.reduce_sum(embedding) + | |||
// math_ops.reduce_sum(embedding)) + math_ops.reduce_sum(embedding) | |||
// static_grads = gradients_impl.gradients(static, [embedding_matrix])[0] | |||
// static_grads = math_ops.segment_sum(static_grads.values, | |||
// static_grads.indices) | |||
// with self.cached_session(): | |||
// self.evaluate(variables.global_variables_initializer()) | |||
// self.assertAllEqual(*self.evaluate([static_grads, dynamic_grads])) | |||
} | |||
[Ignore("TODO")] | |||
[TestMethod] | |||
public void testIndexedSlicesWithShapeGradientInWhileLoop() | |||
{ | |||
//@test_util.run_v1_only("b/120545219") | |||
//def testIndexedSlicesWithShapeGradientInWhileLoop(self): | |||
// for dtype in [dtypes.float32, dtypes.float64]: | |||
// with self.cached_session() as sess: | |||
// num_steps = 9 | |||
// inputs = array_ops.placeholder(dtype=dtype, shape=[num_steps]) | |||
// initial_outputs = tensor_array_ops.TensorArray( | |||
// dtype=dtype, size=num_steps) | |||
// initial_i = constant_op.constant(0, dtype=dtypes.int32) | |||
// def cond(i, _): | |||
// return i < num_steps # pylint: disable=cell-var-from-loop | |||
// def body(i, outputs): | |||
// x = array_ops.gather(inputs, i) # pylint: disable=cell-var-from-loop | |||
// outputs = outputs.write(i, x) | |||
// return i + 1, outputs | |||
// _, outputs = control_flow_ops.while_loop(cond, body, | |||
// [initial_i, initial_outputs]) | |||
// outputs = math_ops.reduce_sum(outputs.stack()) | |||
// r = gradients_impl.gradients([outputs], [inputs])[0] | |||
// grad_wr_inputs = ops.convert_to_tensor(r) | |||
// o, grad = sess.run([outputs, grad_wr_inputs], | |||
// feed_dict={inputs: [4, 6, 0, 7, 0, 0, 1, 2, 0]}) | |||
// self.assertEquals(o, 20) | |||
// self.assertAllEqual(grad, [1] * num_steps) | |||
} | |||
[Ignore("TODO")] | |||
[TestMethod] | |||
public void testIndexedSlicesWithDynamicShapeGradientInWhileLoop() | |||
{ | |||
//@test_util.run_v1_only("b/120545219") | |||
//def testIndexedSlicesWithDynamicShapeGradientInWhileLoop(self): | |||
// for dtype in [dtypes.float32, dtypes.float64]: | |||
// with self.cached_session() as sess: | |||
// inputs = array_ops.placeholder(dtype=dtype) | |||
// initial_outputs = tensor_array_ops.TensorArray( | |||
// dtype=dtype, dynamic_size=True, size=1) | |||
// initial_i = constant_op.constant(0, dtype=dtypes.int32) | |||
// def cond(i, _): | |||
// return i < array_ops.size(inputs) # pylint: disable=cell-var-from-loop | |||
// def body(i, outputs): | |||
// x = array_ops.gather(inputs, i) # pylint: disable=cell-var-from-loop | |||
// outputs = outputs.write(i, x) | |||
// return i + 1, outputs | |||
// _, outputs = control_flow_ops.while_loop(cond, body, | |||
// [initial_i, initial_outputs]) | |||
// outputs = math_ops.reduce_sum(outputs.stack()) | |||
// r = gradients_impl.gradients([outputs], [inputs])[0] | |||
// grad_wr_inputs = ops.convert_to_tensor(r) | |||
// o, grad = sess.run([outputs, grad_wr_inputs], | |||
// feed_dict={inputs: [1, 3, 2]}) | |||
// self.assertEquals(o, 6) | |||
// self.assertAllEqual(grad, [1] * 3) | |||
} | |||
} | |||
} | |||
using Microsoft.VisualStudio.TestTools.UnitTesting; | |||
using Tensorflow; | |||
namespace TensorFlowNET.UnitTest.control_flow_ops_test | |||
{ | |||
/// <summary> | |||
/// excerpt of tensorflow/python/framework/ops/control_flow_ops_test.py | |||
/// </summary> | |||
[TestClass] | |||
public class SwitchTestCase : PythonTest | |||
{ | |||
[Ignore("TODO")] | |||
[TestMethod] | |||
public void testResourceReadInLoop() | |||
{ | |||
//var embedding_matrix = variable_scope.get_variable( | |||
//"embedding_matrix", initializer: new double[,] { { 2.0 }, { 3.0 } }, use_resource: true); | |||
/* | |||
Tensor cond(Tensor it, Tensor _) | |||
{ | |||
return it < 5; | |||
} | |||
*/ | |||
// TODO: below code doesn't compile | |||
//(Tensor, Tensor) body(Tensor it, Tensor cost) | |||
//{ | |||
// var embedding = embedding_ops.embedding_lookup(embedding_matrix, new int[]{0}); | |||
// cost += math_ops.reduce_sum(embedding); | |||
// return (it + 1, cost); | |||
//} | |||
//var (_, cost1) = control_flow_ops.while_loop( | |||
// cond, body, new[] | |||
// { | |||
// constant_op.constant(0), | |||
// constant_op.constant(0.0) | |||
// }); | |||
//with<Session>(this.cached_session(), sess => | |||
//{ | |||
// self.evaluate(variables.global_variables_initializer()); | |||
// self.assertAllEqual(10.0, self.evaluate(cost1)); | |||
//}); | |||
} | |||
[Ignore("TODO")] | |||
[TestMethod] | |||
public void testIndexedSlicesGradientInCondInWhileLoop() | |||
{ | |||
doTestIndexedSlicesGradientInCondInWhileLoop(use_resource: false); | |||
} | |||
[Ignore("TODO")] | |||
[TestMethod] | |||
public void testIndexedSlicesGradientInCondInWhileLoopResource() | |||
{ | |||
doTestIndexedSlicesGradientInCondInWhileLoop(use_resource: true); | |||
} | |||
private void doTestIndexedSlicesGradientInCondInWhileLoop(bool use_resource = false) | |||
{ | |||
//def doTestIndexedSlicesGradientInCondInWhileLoop(self, use_resource=False): | |||
// embedding_matrix = variable_scope.get_variable( | |||
// "embedding_matrix", [5, 5], | |||
// initializer=init_ops.random_normal_initializer(), | |||
// use_resource=use_resource) | |||
// def cond(it, _): | |||
// return it < 5 | |||
// def body(it, cost): | |||
// embedding = embedding_ops.embedding_lookup(embedding_matrix, [0]) | |||
// cost = control_flow_ops.cond( | |||
// math_ops.equal(it, 3), lambda: math_ops.square(cost), | |||
// (lambda: cost + math_ops.reduce_sum(embedding))) | |||
// return it + 1, cost | |||
// _, cost = control_flow_ops.while_loop( | |||
// cond, body, [constant_op.constant(0), | |||
// constant_op.constant(0.0)]) | |||
// dynamic_grads = gradients_impl.gradients(cost, [embedding_matrix])[0] | |||
// dynamic_grads = math_ops.segment_sum(dynamic_grads.values, | |||
// dynamic_grads.indices) | |||
// embedding = embedding_ops.embedding_lookup(embedding_matrix, [0]) | |||
// static = math_ops.square( | |||
// math_ops.reduce_sum(embedding) + math_ops.reduce_sum(embedding) + | |||
// math_ops.reduce_sum(embedding)) + math_ops.reduce_sum(embedding) | |||
// static_grads = gradients_impl.gradients(static, [embedding_matrix])[0] | |||
// static_grads = math_ops.segment_sum(static_grads.values, | |||
// static_grads.indices) | |||
// with self.cached_session(): | |||
// self.evaluate(variables.global_variables_initializer()) | |||
// self.assertAllEqual(*self.evaluate([static_grads, dynamic_grads])) | |||
} | |||
[Ignore("TODO")] | |||
[TestMethod] | |||
public void testIndexedSlicesWithShapeGradientInWhileLoop() | |||
{ | |||
//@test_util.run_v1_only("b/120545219") | |||
//def testIndexedSlicesWithShapeGradientInWhileLoop(self): | |||
// for dtype in [dtypes.float32, dtypes.float64]: | |||
// with self.cached_session() as sess: | |||
// num_steps = 9 | |||
// inputs = array_ops.placeholder(dtype=dtype, shape=[num_steps]) | |||
// initial_outputs = tensor_array_ops.TensorArray( | |||
// dtype=dtype, size=num_steps) | |||
// initial_i = constant_op.constant(0, dtype=dtypes.int32) | |||
// def cond(i, _): | |||
// return i < num_steps # pylint: disable=cell-var-from-loop | |||
// def body(i, outputs): | |||
// x = array_ops.gather(inputs, i) # pylint: disable=cell-var-from-loop | |||
// outputs = outputs.write(i, x) | |||
// return i + 1, outputs | |||
// _, outputs = control_flow_ops.while_loop(cond, body, | |||
// [initial_i, initial_outputs]) | |||
// outputs = math_ops.reduce_sum(outputs.stack()) | |||
// r = gradients_impl.gradients([outputs], [inputs])[0] | |||
// grad_wr_inputs = ops.convert_to_tensor(r) | |||
// o, grad = sess.run([outputs, grad_wr_inputs], | |||
// feed_dict={inputs: [4, 6, 0, 7, 0, 0, 1, 2, 0]}) | |||
// self.assertEquals(o, 20) | |||
// self.assertAllEqual(grad, [1] * num_steps) | |||
} | |||
[Ignore("TODO")] | |||
[TestMethod] | |||
public void testIndexedSlicesWithDynamicShapeGradientInWhileLoop() | |||
{ | |||
//@test_util.run_v1_only("b/120545219") | |||
//def testIndexedSlicesWithDynamicShapeGradientInWhileLoop(self): | |||
// for dtype in [dtypes.float32, dtypes.float64]: | |||
// with self.cached_session() as sess: | |||
// inputs = array_ops.placeholder(dtype=dtype) | |||
// initial_outputs = tensor_array_ops.TensorArray( | |||
// dtype=dtype, dynamic_size=True, size=1) | |||
// initial_i = constant_op.constant(0, dtype=dtypes.int32) | |||
// def cond(i, _): | |||
// return i < array_ops.size(inputs) # pylint: disable=cell-var-from-loop | |||
// def body(i, outputs): | |||
// x = array_ops.gather(inputs, i) # pylint: disable=cell-var-from-loop | |||
// outputs = outputs.write(i, x) | |||
// return i + 1, outputs | |||
// _, outputs = control_flow_ops.while_loop(cond, body, | |||
// [initial_i, initial_outputs]) | |||
// outputs = math_ops.reduce_sum(outputs.stack()) | |||
// r = gradients_impl.gradients([outputs], [inputs])[0] | |||
// grad_wr_inputs = ops.convert_to_tensor(r) | |||
// o, grad = sess.run([outputs, grad_wr_inputs], | |||
// feed_dict={inputs: [1, 3, 2]}) | |||
// self.assertEquals(o, 6) | |||
// self.assertAllEqual(grad, [1] * 3) | |||
} | |||
} | |||
} |
@@ -1,52 +1,52 @@ | |||
using System; | |||
using Microsoft.VisualStudio.TestTools.UnitTesting; | |||
using Tensorflow; | |||
using static Tensorflow.Binding; | |||
namespace TensorFlowNET.UnitTest.control_flow_ops_test | |||
{ | |||
[TestClass] | |||
public class WhileContextTestCase : PythonTest | |||
{ | |||
/// <summary> | |||
/// https://www.tensorflow.org/api_docs/python/tf/while_loop | |||
/// </summary> | |||
[Ignore] | |||
[TestMethod] | |||
public void SimpleWhileLoop() | |||
{ | |||
var i = constant_op.constant(0, name: "i"); | |||
var c = new Func<Tensor, Tensor>(x => tf.less(x, 10, name: "c")); | |||
var b = new Func<Tensor, Tensor>(x => tf.add(x, 1, name: "c")); | |||
//var r = control_flow_ops.while_loop(c, b, i); | |||
} | |||
private void _testWhileContextHelper(int maximum_iterations) | |||
{ | |||
// TODO: implement missing code dependencies | |||
using (var sess = this.cached_session()) | |||
{ | |||
var i = constant_op.constant(0, name: "i"); | |||
var c = new Func<Tensor, Tensor>(x => gen_math_ops.less(x, 10, name: "c")); | |||
var b = new Func<Tensor, Tensor>(x => gen_math_ops.add(x, 1, name: "c")); | |||
//control_flow_ops.while_loop( | |||
// c, b, i , maximum_iterations: tf.constant(maximum_iterations)); | |||
foreach (Operation op in sess.graph.get_operations()) | |||
{ | |||
var control_flow_context = op._get_control_flow_context(); | |||
/*if (control_flow_context != null) | |||
self.assertProtoEquals(control_flow_context.to_proto(), | |||
WhileContext.from_proto( | |||
control_flow_context.to_proto()).to_proto(), "");*/ | |||
} | |||
} | |||
} | |||
[Ignore("TODO")] | |||
[TestMethod] | |||
public void testWhileContextWithMaximumIterations() | |||
{ | |||
_testWhileContextHelper(maximum_iterations: 10); | |||
} | |||
} | |||
} | |||
using System; | |||
using Microsoft.VisualStudio.TestTools.UnitTesting; | |||
using Tensorflow; | |||
using static Tensorflow.Binding; | |||
namespace TensorFlowNET.UnitTest.control_flow_ops_test | |||
{ | |||
[TestClass] | |||
public class WhileContextTestCase : PythonTest | |||
{ | |||
/// <summary> | |||
/// https://www.tensorflow.org/api_docs/python/tf/while_loop | |||
/// </summary> | |||
[Ignore] | |||
[TestMethod] | |||
public void SimpleWhileLoop() | |||
{ | |||
var i = constant_op.constant(0, name: "i"); | |||
var c = new Func<Tensor, Tensor>(x => tf.less(x, 10, name: "c")); | |||
var b = new Func<Tensor, Tensor>(x => tf.add(x, 1, name: "c")); | |||
//var r = control_flow_ops.while_loop(c, b, i); | |||
} | |||
private void _testWhileContextHelper(int maximum_iterations) | |||
{ | |||
// TODO: implement missing code dependencies | |||
using (var sess = this.cached_session()) | |||
{ | |||
var i = constant_op.constant(0, name: "i"); | |||
var c = new Func<Tensor, Tensor>(x => gen_math_ops.less(x, 10, name: "c")); | |||
var b = new Func<Tensor, Tensor>(x => gen_math_ops.add(x, 1, name: "c")); | |||
//control_flow_ops.while_loop( | |||
// c, b, i , maximum_iterations: tf.constant(maximum_iterations)); | |||
foreach (Operation op in sess.graph.get_operations()) | |||
{ | |||
var control_flow_context = op._get_control_flow_context(); | |||
/*if (control_flow_context != null) | |||
self.assertProtoEquals(control_flow_context.to_proto(), | |||
WhileContext.from_proto( | |||
control_flow_context.to_proto()).to_proto(), "");*/ | |||
} | |||
} | |||
} | |||
[Ignore("TODO")] | |||
[TestMethod] | |||
public void testWhileContextWithMaximumIterations() | |||
{ | |||
_testWhileContextHelper(maximum_iterations: 10); | |||
} | |||
} | |||
} |
@@ -1,87 +1,87 @@ | |||
using System; | |||
using System.Linq; | |||
using Microsoft.VisualStudio.TestTools.UnitTesting; | |||
using NumSharp; | |||
using Tensorflow; | |||
using static Tensorflow.Binding; | |||
namespace TensorFlowNET.UnitTest.nn_test | |||
{ | |||
[TestClass] | |||
public class ZeroFractionTest : PythonTest | |||
{ | |||
protected double _ZeroFraction(NDArray x) | |||
{ | |||
assert(x.shape); | |||
int total_elements = np.prod(x.shape); | |||
var eps = 1e-8; | |||
var nonzeros = x.Data<double>().Count(d=>Math.Abs(d)> eps); | |||
return 1.0 - nonzeros / (double)total_elements; | |||
} | |||
[Ignore("TODO implement nn_impl.zero_fraction")] | |||
[TestMethod] | |||
public void testZeroFraction() | |||
{ | |||
var x_shape = new Shape(5, 17); | |||
var x_np = np.random.randint(0, 2, x_shape); | |||
//x_np.astype(np.float32); | |||
var y_np = this._ZeroFraction(x_np); | |||
var x_tf = constant_op.constant(x_np); | |||
x_tf.set_shape(x_shape); | |||
var y_tf = nn_impl.zero_fraction(x_tf); | |||
var y_tf_np = self.evaluate<NDArray>(y_tf); | |||
var eps = 1e-8; | |||
self.assertAllClose(y_tf_np, y_np, eps); | |||
} | |||
[Ignore("TODO implement nn_impl.zero_fraction")] | |||
[TestMethod] | |||
public void testZeroFractionEmpty() | |||
{ | |||
var x = np.zeros(0); | |||
var y = self.evaluate<NDArray>(nn_impl.zero_fraction(new Tensor(x))); | |||
self.assertTrue(np.isnan(y)); | |||
} | |||
[Ignore("TODO implement nn_impl.zero_fraction")] | |||
[TestMethod] | |||
public void testZeroFraction2_27Zeros() | |||
{ | |||
var sparsity = nn_impl.zero_fraction( | |||
array_ops.zeros(new Shape((int) Math.Pow(2, 27 * 1.01)), dtypes.int8)); | |||
self.assertAllClose(1.0, self.evaluate<NDArray>(sparsity)); | |||
} | |||
[Ignore("TODO implement nn_impl.zero_fraction")] | |||
[TestMethod] | |||
public void testZeroFraction2_27Ones() | |||
{ | |||
var sparsity = nn_impl.zero_fraction( | |||
array_ops.ones(new TensorShape((int)Math.Pow(2, 27 * 1.01)), dtypes.int8)); | |||
self.assertAllClose(0.0, self.evaluate<NDArray>(sparsity)); | |||
} | |||
[Ignore("TODO implement nn_impl.zero_fraction")] | |||
[TestMethod] | |||
public void testUnknownSize() | |||
{ | |||
var value = array_ops.placeholder(dtype: dtypes.float32); | |||
var sparsity = nn_impl.zero_fraction(value); | |||
using (var sess = self.cached_session()) | |||
{ | |||
// TODO: make this compile | |||
//self.assertAllClose( | |||
// 0.25, | |||
// sess.run(sparsity, {value: [[0., 1.], [0.3, 2.]]})); | |||
} | |||
} | |||
} | |||
} | |||
using System; | |||
using System.Linq; | |||
using Microsoft.VisualStudio.TestTools.UnitTesting; | |||
using NumSharp; | |||
using Tensorflow; | |||
using static Tensorflow.Binding; | |||
namespace TensorFlowNET.UnitTest.nn_test | |||
{ | |||
[TestClass] | |||
public class ZeroFractionTest : PythonTest | |||
{ | |||
protected double _ZeroFraction(NDArray x) | |||
{ | |||
assert(x.shape); | |||
int total_elements = np.prod(x.shape); | |||
var eps = 1e-8; | |||
var nonzeros = x.Data<double>().Count(d=>Math.Abs(d)> eps); | |||
return 1.0 - nonzeros / (double)total_elements; | |||
} | |||
[Ignore("TODO implement nn_impl.zero_fraction")] | |||
[TestMethod] | |||
public void testZeroFraction() | |||
{ | |||
var x_shape = new Shape(5, 17); | |||
var x_np = np.random.randint(0, 2, x_shape); | |||
//x_np.astype(np.float32); | |||
var y_np = this._ZeroFraction(x_np); | |||
var x_tf = constant_op.constant(x_np); | |||
x_tf.set_shape(x_shape); | |||
var y_tf = nn_impl.zero_fraction(x_tf); | |||
var y_tf_np = self.evaluate<NDArray>(y_tf); | |||
var eps = 1e-8; | |||
self.assertAllClose(y_tf_np, y_np, eps); | |||
} | |||
[Ignore("TODO implement nn_impl.zero_fraction")] | |||
[TestMethod] | |||
public void testZeroFractionEmpty() | |||
{ | |||
var x = np.zeros(0); | |||
var y = self.evaluate<NDArray>(nn_impl.zero_fraction(new Tensor(x))); | |||
self.assertTrue(np.isnan(y)); | |||
} | |||
[Ignore("TODO implement nn_impl.zero_fraction")] | |||
[TestMethod] | |||
public void testZeroFraction2_27Zeros() | |||
{ | |||
var sparsity = nn_impl.zero_fraction( | |||
array_ops.zeros(new Shape((int) Math.Pow(2, 27 * 1.01)), dtypes.int8)); | |||
self.assertAllClose(1.0, self.evaluate<NDArray>(sparsity)); | |||
} | |||
[Ignore("TODO implement nn_impl.zero_fraction")] | |||
[TestMethod] | |||
public void testZeroFraction2_27Ones() | |||
{ | |||
var sparsity = nn_impl.zero_fraction( | |||
array_ops.ones(new TensorShape((int)Math.Pow(2, 27 * 1.01)), dtypes.int8)); | |||
self.assertAllClose(0.0, self.evaluate<NDArray>(sparsity)); | |||
} | |||
[Ignore("TODO implement nn_impl.zero_fraction")] | |||
[TestMethod] | |||
public void testUnknownSize() | |||
{ | |||
var value = array_ops.placeholder(dtype: dtypes.float32); | |||
var sparsity = nn_impl.zero_fraction(value); | |||
using (var sess = self.cached_session()) | |||
{ | |||
// TODO: make this compile | |||
//self.assertAllClose( | |||
// 0.25, | |||
// sess.run(sparsity, {value: [[0., 1.], [0.3, 2.]]})); | |||
} | |||
} | |||
} | |||
} |
@@ -1,316 +1,316 @@ | |||
using System; | |||
using System.Linq; | |||
using Microsoft.VisualStudio.TestTools.UnitTesting; | |||
using Tensorflow; | |||
using Tensorflow.Eager; | |||
using static Tensorflow.Binding; | |||
namespace TensorFlowNET.UnitTest.ops_test | |||
{ | |||
/// <summary> | |||
/// excerpt of tensorflow/python/framework/ops_test.py | |||
/// </summary> | |||
[TestClass] | |||
public class ControlDependenciesTest : PythonTest | |||
{ | |||
[TestMethod] | |||
public void TestBasic() | |||
{ | |||
var g = tf.Graph().as_default(); | |||
Tensor a = null, b = null, c = null, d = null, e = null; | |||
a = constant_op.constant(1.0); | |||
b = constant_op.constant(1.0); | |||
tf_with(g.control_dependencies(new[] { a }), x => | |||
{ | |||
c = constant_op.constant(1.0); | |||
d = array_ops.identity(b); | |||
e = array_ops.identity(c); | |||
}); | |||
Assert.IsTrue(Enumerable.SequenceEqual(c.op.control_inputs, new[] { a.op })); | |||
Assert.IsTrue(Enumerable.SequenceEqual(d.op.control_inputs, new[] { a.op })); | |||
// e should be dominated by c. | |||
Assert.AreEqual(0, e.op.control_inputs.Length); | |||
} | |||
[Ignore("Future is not supported yet")] | |||
[TestMethod] | |||
public void TestEager() | |||
{ | |||
Tensor a = null, c = null; | |||
object b = null; | |||
var calls = 0; | |||
Func<Tensor> future = () => | |||
{ | |||
calls += 1; | |||
return constant_op.constant(2.0); | |||
}; | |||
using (var opts = new ContextOptions()) | |||
using (var status = new Status()) | |||
using (var context = new Context(opts, status)) | |||
{ | |||
if (context.executing_eagerly()) | |||
{ | |||
// TODO: make this compile (see original Python code below) | |||
a = constant_op.constant(1.0); | |||
b = future; // <--- {henon} obviously, this doesn't compile, looks like control_dependencies needs to be able to take callables as well. | |||
tf_with(ops.control_dependencies(new object[] { a, b }), ctrl => | |||
{ | |||
return c = constant_op.constant(3.0); | |||
}); | |||
Assert.AreEqual(calls, 1); | |||
} | |||
else | |||
{ | |||
var g = tf.Graph().as_default(); | |||
a = constant_op.constant(1.0); | |||
var b1 = future(); | |||
tf_with(g.control_dependencies(new[] { a, b }), ctrl => | |||
{ | |||
c = constant_op.constant(3.0); | |||
}); | |||
Assert.IsTrue(Enumerable.SequenceEqual(c.op.control_inputs, new[] { a.op, b1.op })); | |||
Assert.AreEqual(1, calls); | |||
} | |||
} | |||
/* | |||
def testEager(self): | |||
def future(): | |||
future.calls += 1 | |||
return constant_op.constant(2.0) | |||
future.calls = 0 | |||
if context.executing_eagerly(): | |||
a = constant_op.constant(1.0) | |||
b = future | |||
with ops.control_dependencies([a, b]): | |||
c = constant_op.constant(3.0) | |||
self.assertEqual(future.calls, 1) | |||
else: | |||
g = ops.Graph() | |||
with g.as_default(): | |||
a = constant_op.constant(1.0) | |||
b = future() | |||
with g.control_dependencies([a, b]): | |||
c = constant_op.constant(3.0) | |||
self.assertEqual(c.op.control_inputs, [a.op, b.op]) | |||
self.assertEqual(future.calls, 1) | |||
*/ | |||
} | |||
[Ignore("How to port the ConvertibleObj?")] | |||
[TestMethod] | |||
public void TestBasicWithConversion() | |||
{ | |||
var g = tf.Graph().as_default(); | |||
// Note: _apply_op can be replaced by g.create_op | |||
var a = g.create_op("FloatOutput", new Tensor[] { }, new[] { TF_DataType.TF_FLOAT }); | |||
// TODO: ConvertibleObj, see original source below | |||
/* | |||
def testBasicWithConversion(self): | |||
g = ops.Graph() | |||
a = _apply_op(g, "FloatOutput", [], [dtypes.float32]) | |||
class ConvertibleObj(object): | |||
def _as_graph_element(self): | |||
return a | |||
with g.control_dependencies([ConvertibleObj()]): | |||
c = _apply_op(g, "FloatOutput", [], [dtypes.float32]) | |||
self.assertEqual(c.op.control_inputs, [a.op]) | |||
*/ | |||
} | |||
[TestMethod] | |||
public void TestNested() | |||
{ | |||
var g = tf.Graph().as_default(); | |||
var a_1 = constant_op.constant(1.0); | |||
var a_2 = constant_op.constant(3.0); | |||
var a_3 = constant_op.constant(4.0); | |||
var a_4 = constant_op.constant(5.0); | |||
Tensor b_1 = null, b_2 = null; | |||
tf_with(g.control_dependencies(new[] { a_1, a_2, a_3, a_4 }), ctrl => | |||
{ | |||
b_1 = constant_op.constant(6.0); | |||
}); | |||
tf_with(g.control_dependencies(new[] { a_1 }), ctrl1 => | |||
{ | |||
tf_with(g.control_dependencies(new[] { a_2 }), ctrl2 => | |||
{ | |||
tf_with(g.control_dependencies(new[] { a_3 }), ctrl3 => | |||
{ | |||
tf_with(g.control_dependencies(new[] { a_4 }), ctrl4 => | |||
{ | |||
b_2 = constant_op.constant(7.0); | |||
}); | |||
}); | |||
}); | |||
}); | |||
//var z=tf.add(a_1, tf.multiply(b_2, b_1)); | |||
//with(g.control_dependencies(new[] {z}), ctrl => | |||
//{ | |||
// var z1 = tf.add(a_3, tf.multiply(a_4, a_2)); | |||
//}); | |||
//tf.train.export_meta_graph(@"D:\dev\tensorboard\logdir\sharp.meta", as_text: false); | |||
assertItemsEqual(b_1.op.control_inputs, new[] { a_1.op, a_2.op, a_3.op, a_4.op }); | |||
assertItemsEqual(b_2.op.control_inputs, b_1.op.control_inputs); | |||
} | |||
[TestMethod] | |||
public void TestClear() | |||
{ | |||
var g = tf.Graph().as_default(); | |||
var a_1 = constant_op.constant(1.0); | |||
var a_2 = constant_op.constant(3.0); | |||
var a_3 = constant_op.constant(4.0); | |||
var a_4 = constant_op.constant(5.0); | |||
Operation b_3_4 = null, b_3 = null, b_none = null, b_1 = null, b_1_2 = null, b_none2 = null; | |||
tf_with(g.control_dependencies(new[] { a_1 }), ctrl1 => | |||
{ | |||
tf_with(g.control_dependencies(new[] { a_2 }), ctrl2 => | |||
{ | |||
tf_with(g.control_dependencies(null), ctrl3 => | |||
{ | |||
tf_with(g.control_dependencies(new[] { a_3 }), ctrl4 => | |||
{ | |||
tf_with(g.control_dependencies(new[] { a_4 }), ctrl5 => | |||
{ | |||
// deps [a_3, a_4] | |||
b_3_4 = constant_op.constant(7.0); | |||
}); | |||
// deps = [a_3] | |||
b_3 = constant_op.constant(8.0); | |||
}); | |||
// deps back to None | |||
b_none = constant_op.constant(9.0); | |||
}); | |||
// deps back to [a_1, a_2] | |||
b_1_2 = constant_op.constant(10.0); | |||
}); | |||
// deps back to [a_1] | |||
b_1 = constant_op.constant(11.0); | |||
tf_with(g.control_dependencies(null), ctrl6 => | |||
{ | |||
// deps are None again | |||
b_none2 = constant_op.constant(12.0); | |||
}); | |||
}); | |||
// Note assertItemsEqual(given, expected), expected and given parameters should be swapped below | |||
assertItemsEqual(new[] { a_3.op, a_4.op }, b_3_4.op.control_inputs); | |||
assertItemsEqual(new[] { a_3.op }, b_3.op.control_inputs); | |||
assertItemsEqual(new object[0], b_none.op.control_inputs); | |||
assertItemsEqual(new[] { a_1.op, a_2.op }, b_1_2.op.control_inputs); | |||
assertItemsEqual(new[] { a_1.op }, b_1.op.control_inputs); | |||
assertItemsEqual(new object[0], b_none2.op.control_inputs); | |||
} | |||
[TestMethod] | |||
public void TestComplex() | |||
{ | |||
var g = tf.Graph().as_default(); | |||
// Usage pattern: | |||
// * Nodes a_i are constants defined at the outermost scope, and are used | |||
// as control inputs for the ith nested scope. | |||
// * Nodes b_i are defined as Mul(a_3, a_4) at each scope. | |||
// * Nodes c_i are defined as Mul(a_1, b_1) at each scope. | |||
// * Nodes d_i are defined as Mul(b_i, c_i) at each scope. | |||
// * Nodes e_i are defined as Mul(e_i-1, e_i-1) at each scope i > 1. | |||
var a_1 = constant_op.constant(1.0); | |||
var a_2 = constant_op.constant(2.0); | |||
var a_3 = constant_op.constant(3.0); | |||
var a_4 = constant_op.constant(4.0); | |||
Operation b_1 = null, b_2 = null, b_3 = null, b_4 = null; | |||
Operation c_1 = null, c_2 = null, c_3 = null, c_4 = null; | |||
Operation d_1 = null, d_2 = null, d_3 = null, d_4 = null; | |||
Operation e_1 = null, e_2 = null, e_3 = null, e_4 = null; | |||
tf_with(g.control_dependencies(new[] { a_1 }), ctrl1 => | |||
{ | |||
b_1 = tf.multiply(a_3, a_4); | |||
c_1 = tf.multiply(a_1, b_1.output); | |||
d_1 = tf.multiply(b_1.output, c_1.output); | |||
e_1 = constant_op.constant(5.0); | |||
tf_with(g.control_dependencies(new[] { a_2 }), ctrl2 => | |||
{ | |||
b_2 = tf.multiply(a_3, a_4); | |||
c_2 = tf.multiply(a_1, b_1.output); | |||
d_2 = tf.multiply(b_2.output, c_2.output); | |||
e_2 = tf.multiply(e_1.output, e_1.output); | |||
tf_with(g.control_dependencies(new[] { a_3 }), ctrl3 => | |||
{ | |||
b_3 = tf.multiply(a_3, a_4); | |||
c_3 = tf.multiply(a_1, b_1.output); | |||
d_3 = tf.multiply(b_3.output, c_3.output); | |||
e_3 = tf.multiply(e_2.output, e_2.output); | |||
tf_with(g.control_dependencies(new[] { a_4 }), ctrl4 => | |||
{ | |||
b_4 = tf.multiply(a_3, a_4); | |||
c_4 = tf.multiply(a_1, b_1.output); | |||
d_4 = tf.multiply(b_4.output, c_4.output); | |||
e_4 = tf.multiply(e_3.output, e_3.output); | |||
}); | |||
}); | |||
}); | |||
}); | |||
// Note assertItemsEqual(given, expected), expected and given parameters should be swapped below | |||
assertItemsEqual(new[] {a_1.op}, b_1.op.control_inputs); | |||
assertItemsEqual(new[] {a_1.op, a_2.op}, b_2.op.control_inputs); | |||
assertItemsEqual(new[] { a_1.op, a_2.op}, b_3.op.control_inputs); | |||
assertItemsEqual(new[] {a_1.op, a_2.op}, b_4.op.control_inputs); | |||
assertItemsEqual(new object[0], c_1.op.control_inputs); | |||
assertItemsEqual(new[] {a_2.op}, c_2.op.control_inputs); | |||
assertItemsEqual(new[] {a_2.op, a_3.op}, c_3.op.control_inputs); | |||
assertItemsEqual(new[] {a_2.op, a_3.op, a_4.op}, c_4.op.control_inputs); | |||
assertItemsEqual(new object[0], d_1.op.control_inputs); | |||
assertItemsEqual(new object[0], d_2.op.control_inputs); | |||
assertItemsEqual(new object[0], d_3.op.control_inputs); | |||
assertItemsEqual(new object[0], d_4.op.control_inputs); | |||
assertItemsEqual(new[] {a_1.op}, e_1.op.control_inputs); | |||
assertItemsEqual(new[] {a_2.op}, e_2.op.control_inputs); | |||
assertItemsEqual(new[] {a_3.op}, e_3.op.control_inputs); | |||
assertItemsEqual(new[] {a_4.op}, e_4.op.control_inputs); | |||
} | |||
[Ignore("Don't know how to create an operation with two outputs")] | |||
[TestMethod] | |||
public void TestRepeatedDependency() | |||
{ | |||
/* | |||
def testRepeatedDependency(self): | |||
g = ops.Graph() | |||
a = g.create_op("TwoFloatOutputs", [], [dtypes.float32, dtypes.float32]) | |||
a_0, a_1 = a.outputs | |||
with g.control_dependencies([a_0]): | |||
b = _apply_op(g, "FloatOutput", [], [dtypes.float32]) | |||
with g.control_dependencies([a_1]): | |||
c = _apply_op(g, "FloatOutput", [], [dtypes.float32]) | |||
self.assertEqual(b.op.control_inputs, [a]) | |||
self.assertEqual(c.op.control_inputs, [a]) | |||
*/ | |||
} | |||
[TestMethod] | |||
public void TestNoControlDependencyWithDataDependency() | |||
{ | |||
var g = tf.Graph().as_default(); | |||
Operation b = null; | |||
var a = constant_op.constant(100.0); | |||
tf_with(g.control_dependencies(new[] { a }), ctrl1 => | |||
{ | |||
b = array_ops.identity(a); | |||
}); | |||
Assert.AreEqual(0, b.op.control_inputs.Length); | |||
} | |||
} | |||
} | |||
using System; | |||
using System.Linq; | |||
using Microsoft.VisualStudio.TestTools.UnitTesting; | |||
using Tensorflow; | |||
using Tensorflow.Eager; | |||
using static Tensorflow.Binding; | |||
namespace TensorFlowNET.UnitTest.ops_test | |||
{ | |||
/// <summary> | |||
/// excerpt of tensorflow/python/framework/ops_test.py | |||
/// </summary> | |||
[TestClass] | |||
public class ControlDependenciesTest : PythonTest | |||
{ | |||
[TestMethod] | |||
public void TestBasic() | |||
{ | |||
var g = tf.Graph().as_default(); | |||
Tensor a = null, b = null, c = null, d = null, e = null; | |||
a = constant_op.constant(1.0); | |||
b = constant_op.constant(1.0); | |||
tf_with(g.control_dependencies(new[] { a }), x => | |||
{ | |||
c = constant_op.constant(1.0); | |||
d = array_ops.identity(b); | |||
e = array_ops.identity(c); | |||
}); | |||
Assert.IsTrue(Enumerable.SequenceEqual(c.op.control_inputs, new[] { a.op })); | |||
Assert.IsTrue(Enumerable.SequenceEqual(d.op.control_inputs, new[] { a.op })); | |||
// e should be dominated by c. | |||
Assert.AreEqual(0, e.op.control_inputs.Length); | |||
} | |||
[Ignore("Future is not supported yet")] | |||
[TestMethod] | |||
public void TestEager() | |||
{ | |||
Tensor a = null, c = null; | |||
object b = null; | |||
var calls = 0; | |||
Func<Tensor> future = () => | |||
{ | |||
calls += 1; | |||
return constant_op.constant(2.0); | |||
}; | |||
using (var opts = new ContextOptions()) | |||
using (var status = new Status()) | |||
using (var context = new Context(opts, status)) | |||
{ | |||
if (context.executing_eagerly()) | |||
{ | |||
// TODO: make this compile (see original Python code below) | |||
a = constant_op.constant(1.0); | |||
b = future; // <--- {henon} obviously, this doesn't compile, looks like control_dependencies needs to be able to take callables as well. | |||
tf_with(ops.control_dependencies(new object[] { a, b }), ctrl => | |||
{ | |||
return c = constant_op.constant(3.0); | |||
}); | |||
Assert.AreEqual(calls, 1); | |||
} | |||
else | |||
{ | |||
var g = tf.Graph().as_default(); | |||
a = constant_op.constant(1.0); | |||
var b1 = future(); | |||
tf_with(g.control_dependencies(new[] { a, b }), ctrl => | |||
{ | |||
c = constant_op.constant(3.0); | |||
}); | |||
Assert.IsTrue(Enumerable.SequenceEqual(c.op.control_inputs, new[] { a.op, b1.op })); | |||
Assert.AreEqual(1, calls); | |||
} | |||
} | |||
/* | |||
def testEager(self): | |||
def future(): | |||
future.calls += 1 | |||
return constant_op.constant(2.0) | |||
future.calls = 0 | |||
if context.executing_eagerly(): | |||
a = constant_op.constant(1.0) | |||
b = future | |||
with ops.control_dependencies([a, b]): | |||
c = constant_op.constant(3.0) | |||
self.assertEqual(future.calls, 1) | |||
else: | |||
g = ops.Graph() | |||
with g.as_default(): | |||
a = constant_op.constant(1.0) | |||
b = future() | |||
with g.control_dependencies([a, b]): | |||
c = constant_op.constant(3.0) | |||
self.assertEqual(c.op.control_inputs, [a.op, b.op]) | |||
self.assertEqual(future.calls, 1) | |||
*/ | |||
} | |||
[Ignore("How to port the ConvertibleObj?")] | |||
[TestMethod] | |||
public void TestBasicWithConversion() | |||
{ | |||
var g = tf.Graph().as_default(); | |||
// Note: _apply_op can be replaced by g.create_op | |||
var a = g.create_op("FloatOutput", new Tensor[] { }, new[] { TF_DataType.TF_FLOAT }); | |||
// TODO: ConvertibleObj, see original source below | |||
/* | |||
def testBasicWithConversion(self): | |||
g = ops.Graph() | |||
a = _apply_op(g, "FloatOutput", [], [dtypes.float32]) | |||
class ConvertibleObj(object): | |||
def _as_graph_element(self): | |||
return a | |||
with g.control_dependencies([ConvertibleObj()]): | |||
c = _apply_op(g, "FloatOutput", [], [dtypes.float32]) | |||
self.assertEqual(c.op.control_inputs, [a.op]) | |||
*/ | |||
} | |||
[TestMethod] | |||
public void TestNested() | |||
{ | |||
var g = tf.Graph().as_default(); | |||
var a_1 = constant_op.constant(1.0); | |||
var a_2 = constant_op.constant(3.0); | |||
var a_3 = constant_op.constant(4.0); | |||
var a_4 = constant_op.constant(5.0); | |||
Tensor b_1 = null, b_2 = null; | |||
tf_with(g.control_dependencies(new[] { a_1, a_2, a_3, a_4 }), ctrl => | |||
{ | |||
b_1 = constant_op.constant(6.0); | |||
}); | |||
tf_with(g.control_dependencies(new[] { a_1 }), ctrl1 => | |||
{ | |||
tf_with(g.control_dependencies(new[] { a_2 }), ctrl2 => | |||
{ | |||
tf_with(g.control_dependencies(new[] { a_3 }), ctrl3 => | |||
{ | |||
tf_with(g.control_dependencies(new[] { a_4 }), ctrl4 => | |||
{ | |||
b_2 = constant_op.constant(7.0); | |||
}); | |||
}); | |||
}); | |||
}); | |||
//var z=tf.add(a_1, tf.multiply(b_2, b_1)); | |||
//with(g.control_dependencies(new[] {z}), ctrl => | |||
//{ | |||
// var z1 = tf.add(a_3, tf.multiply(a_4, a_2)); | |||
//}); | |||
//tf.train.export_meta_graph(@"D:\dev\tensorboard\logdir\sharp.meta", as_text: false); | |||
assertItemsEqual(b_1.op.control_inputs, new[] { a_1.op, a_2.op, a_3.op, a_4.op }); | |||
assertItemsEqual(b_2.op.control_inputs, b_1.op.control_inputs); | |||
} | |||
[TestMethod] | |||
public void TestClear() | |||
{ | |||
var g = tf.Graph().as_default(); | |||
var a_1 = constant_op.constant(1.0); | |||
var a_2 = constant_op.constant(3.0); | |||
var a_3 = constant_op.constant(4.0); | |||
var a_4 = constant_op.constant(5.0); | |||
Operation b_3_4 = null, b_3 = null, b_none = null, b_1 = null, b_1_2 = null, b_none2 = null; | |||
tf_with(g.control_dependencies(new[] { a_1 }), ctrl1 => | |||
{ | |||
tf_with(g.control_dependencies(new[] { a_2 }), ctrl2 => | |||
{ | |||
tf_with(g.control_dependencies(null), ctrl3 => | |||
{ | |||
tf_with(g.control_dependencies(new[] { a_3 }), ctrl4 => | |||
{ | |||
tf_with(g.control_dependencies(new[] { a_4 }), ctrl5 => | |||
{ | |||
// deps [a_3, a_4] | |||
b_3_4 = constant_op.constant(7.0); | |||
}); | |||
// deps = [a_3] | |||
b_3 = constant_op.constant(8.0); | |||
}); | |||
// deps back to None | |||
b_none = constant_op.constant(9.0); | |||
}); | |||
// deps back to [a_1, a_2] | |||
b_1_2 = constant_op.constant(10.0); | |||
}); | |||
// deps back to [a_1] | |||
b_1 = constant_op.constant(11.0); | |||
tf_with(g.control_dependencies(null), ctrl6 => | |||
{ | |||
// deps are None again | |||
b_none2 = constant_op.constant(12.0); | |||
}); | |||
}); | |||
// Note assertItemsEqual(given, expected), expected and given parameters should be swapped below | |||
assertItemsEqual(new[] { a_3.op, a_4.op }, b_3_4.op.control_inputs); | |||
assertItemsEqual(new[] { a_3.op }, b_3.op.control_inputs); | |||
assertItemsEqual(new object[0], b_none.op.control_inputs); | |||
assertItemsEqual(new[] { a_1.op, a_2.op }, b_1_2.op.control_inputs); | |||
assertItemsEqual(new[] { a_1.op }, b_1.op.control_inputs); | |||
assertItemsEqual(new object[0], b_none2.op.control_inputs); | |||
} | |||
[TestMethod] | |||
public void TestComplex() | |||
{ | |||
var g = tf.Graph().as_default(); | |||
// Usage pattern: | |||
// * Nodes a_i are constants defined at the outermost scope, and are used | |||
// as control inputs for the ith nested scope. | |||
// * Nodes b_i are defined as Mul(a_3, a_4) at each scope. | |||
// * Nodes c_i are defined as Mul(a_1, b_1) at each scope. | |||
// * Nodes d_i are defined as Mul(b_i, c_i) at each scope. | |||
// * Nodes e_i are defined as Mul(e_i-1, e_i-1) at each scope i > 1. | |||
var a_1 = constant_op.constant(1.0); | |||
var a_2 = constant_op.constant(2.0); | |||
var a_3 = constant_op.constant(3.0); | |||
var a_4 = constant_op.constant(4.0); | |||
Operation b_1 = null, b_2 = null, b_3 = null, b_4 = null; | |||
Operation c_1 = null, c_2 = null, c_3 = null, c_4 = null; | |||
Operation d_1 = null, d_2 = null, d_3 = null, d_4 = null; | |||
Operation e_1 = null, e_2 = null, e_3 = null, e_4 = null; | |||
tf_with(g.control_dependencies(new[] { a_1 }), ctrl1 => | |||
{ | |||
b_1 = tf.multiply(a_3, a_4); | |||
c_1 = tf.multiply(a_1, b_1.output); | |||
d_1 = tf.multiply(b_1.output, c_1.output); | |||
e_1 = constant_op.constant(5.0); | |||
tf_with(g.control_dependencies(new[] { a_2 }), ctrl2 => | |||
{ | |||
b_2 = tf.multiply(a_3, a_4); | |||
c_2 = tf.multiply(a_1, b_1.output); | |||
d_2 = tf.multiply(b_2.output, c_2.output); | |||
e_2 = tf.multiply(e_1.output, e_1.output); | |||
tf_with(g.control_dependencies(new[] { a_3 }), ctrl3 => | |||
{ | |||
b_3 = tf.multiply(a_3, a_4); | |||
c_3 = tf.multiply(a_1, b_1.output); | |||
d_3 = tf.multiply(b_3.output, c_3.output); | |||
e_3 = tf.multiply(e_2.output, e_2.output); | |||
tf_with(g.control_dependencies(new[] { a_4 }), ctrl4 => | |||
{ | |||
b_4 = tf.multiply(a_3, a_4); | |||
c_4 = tf.multiply(a_1, b_1.output); | |||
d_4 = tf.multiply(b_4.output, c_4.output); | |||
e_4 = tf.multiply(e_3.output, e_3.output); | |||
}); | |||
}); | |||
}); | |||
}); | |||
// Note assertItemsEqual(given, expected), expected and given parameters should be swapped below | |||
assertItemsEqual(new[] {a_1.op}, b_1.op.control_inputs); | |||
assertItemsEqual(new[] {a_1.op, a_2.op}, b_2.op.control_inputs); | |||
assertItemsEqual(new[] { a_1.op, a_2.op}, b_3.op.control_inputs); | |||
assertItemsEqual(new[] {a_1.op, a_2.op}, b_4.op.control_inputs); | |||
assertItemsEqual(new object[0], c_1.op.control_inputs); | |||
assertItemsEqual(new[] {a_2.op}, c_2.op.control_inputs); | |||
assertItemsEqual(new[] {a_2.op, a_3.op}, c_3.op.control_inputs); | |||
assertItemsEqual(new[] {a_2.op, a_3.op, a_4.op}, c_4.op.control_inputs); | |||
assertItemsEqual(new object[0], d_1.op.control_inputs); | |||
assertItemsEqual(new object[0], d_2.op.control_inputs); | |||
assertItemsEqual(new object[0], d_3.op.control_inputs); | |||
assertItemsEqual(new object[0], d_4.op.control_inputs); | |||
assertItemsEqual(new[] {a_1.op}, e_1.op.control_inputs); | |||
assertItemsEqual(new[] {a_2.op}, e_2.op.control_inputs); | |||
assertItemsEqual(new[] {a_3.op}, e_3.op.control_inputs); | |||
assertItemsEqual(new[] {a_4.op}, e_4.op.control_inputs); | |||
} | |||
[Ignore("Don't know how to create an operation with two outputs")] | |||
[TestMethod] | |||
public void TestRepeatedDependency() | |||
{ | |||
/* | |||
def testRepeatedDependency(self): | |||
g = ops.Graph() | |||
a = g.create_op("TwoFloatOutputs", [], [dtypes.float32, dtypes.float32]) | |||
a_0, a_1 = a.outputs | |||
with g.control_dependencies([a_0]): | |||
b = _apply_op(g, "FloatOutput", [], [dtypes.float32]) | |||
with g.control_dependencies([a_1]): | |||
c = _apply_op(g, "FloatOutput", [], [dtypes.float32]) | |||
self.assertEqual(b.op.control_inputs, [a]) | |||
self.assertEqual(c.op.control_inputs, [a]) | |||
*/ | |||
} | |||
[TestMethod] | |||
public void TestNoControlDependencyWithDataDependency() | |||
{ | |||
var g = tf.Graph().as_default(); | |||
Operation b = null; | |||
var a = constant_op.constant(100.0); | |||
tf_with(g.control_dependencies(new[] { a }), ctrl1 => | |||
{ | |||
b = array_ops.identity(a); | |||
}); | |||
Assert.AreEqual(0, b.op.control_inputs.Length); | |||
} | |||
} | |||
} |
@@ -1,220 +1,220 @@ | |||
using System; | |||
using System.Linq; | |||
using Microsoft.VisualStudio.TestTools.UnitTesting; | |||
using Tensorflow; | |||
using Tensorflow.Operations; | |||
using static Tensorflow.Binding; | |||
namespace TensorFlowNET.UnitTest.ops_test | |||
{ | |||
/// <summary> | |||
/// excerpt of tensorflow/python/framework/ops_test.py | |||
/// # These cases test the private Graph._create_op_from_tf_operation | |||
/// # method. Arguably we should only test the public APIs that depend on this | |||
/// # method. However, this logic is complex and tricky, and it can be difficult to | |||
/// # ascertain if we have adequate coverage (e.g. a graph may run successfully if | |||
/// # the control flow context isn't set properly, but a more complicated use case | |||
/// # that might not be obvious to test will fail). Thus we instead explicitly test | |||
/// # the low-level behavior. | |||
/// </summary> | |||
[TestClass] | |||
public class CreateOpFromTfOperationTest : PythonTest | |||
{ | |||
[TestMethod] | |||
public void TestShape() | |||
{ | |||
using (var g = tf.Graph().as_default()) | |||
{ | |||
var x = constant_op.constant(new[,] {{1, 2, 3}, {4, 5, 6}}); | |||
var c_op = ops._create_c_op(g, ops._NodeDef("Identity", "myop"), new[] {x}, new Operation[0]); | |||
var op = g._create_op_from_tf_operation(c_op); | |||
Assert.AreEqual("myop", op.name); | |||
Assert.AreEqual("Identity", op.type); | |||
Assert.AreEqual(1, len(op.outputs)); | |||
assertItemsEqual(new[] {2, 3}, op.outputs[0].shape); | |||
} | |||
} | |||
[TestMethod] | |||
public void TestUniqueName() | |||
{ | |||
var graph = tf.Graph().as_default(); | |||
//var (c_op,op_desc) = ops._create_c_op(g, ops._NodeDef("Const", "myop"), new Tensor[0], new Operation[0]); | |||
//var (c_op2, op_desc1) = ops._create_c_op(g, ops._NodeDef("Const", "myop_1"), new Tensor[0], new Operation[0]); | |||
//var op = g._create_op_from_tf_operation(c_op); | |||
//var op2 = g._create_op_from_tf_operation(c_op2); | |||
var op = constant_op.constant(0, name: "myop").op; | |||
var op2 = constant_op.constant(0, name: "myop_1").op; | |||
// Create ops with same names as op1 and op2. We expect the new names to be | |||
// uniquified. | |||
var op3 = constant_op.constant(0, name: "myop").op; | |||
var op4 = constant_op.constant(0, name: "myop_1").op; | |||
self.assertEqual(op.name, "myop"); | |||
self.assertEqual(op2.name, "myop_1"); | |||
self.assertEqual(op3.name, "myop_2"); | |||
self.assertEqual(op4.name, "myop_1_1"); | |||
} | |||
[Ignore("need tesnroflow expose UpdateEdge API")] | |||
[TestMethod] | |||
public void TestCond() | |||
{ | |||
var g = tf.Graph().as_default(); | |||
var x = constant_op.constant(10); | |||
var true_fn = new Func<Tensor>(() => | |||
{ | |||
var c_op = ops._create_c_op(g, ops._NodeDef("Identity", "cond/myop"), new[] { x }, new Operation[0]); | |||
var new_ops = g._add_new_tf_operations(); | |||
self.assertEqual(len(new_ops), 1); | |||
return x; | |||
}); | |||
control_flow_ops.cond(x < 10, true_fn, () => x); | |||
var op = g.get_operation_by_name("cond/myop"); | |||
//tf.train.export_meta_graph(@"D:\dev\tensorboard\logdir\sharp.meta.txt", as_text:true); | |||
//tf.train.export_meta_graph(@"D:\dev\tensorboard\logdir\sharp.meta", as_text: false); | |||
self.assertIsNotNone(op); | |||
self.assertEqual(op.name, "cond/myop"); | |||
self.assertEqual(op.type, "Identity"); | |||
//self.assertEqual(op.outputs, new object[0]); | |||
var op_input = op.inputs[0].op; | |||
self.assertEqual(op_input.type, "Switch"); | |||
self.assertEqual(op_input.inputs[0].name, x.name); | |||
self.assertEqual(op.graph, g); | |||
self.assertIsNotNone(op._get_control_flow_context()); | |||
var cond_text = op._get_control_flow_context() as ControlFlowContext; | |||
self.assertEqual(cond_text.name, "cond/cond_text"); | |||
} | |||
[Ignore("Todo: Port")] | |||
[TestMethod] | |||
public void TestWhileLoop() | |||
{ | |||
var graph = tf.Graph().as_default(); | |||
Operation x=null; | |||
x = constant_op.constant(42); | |||
var body = new Func<int, int>(i => | |||
{ | |||
ops._create_c_op(ops.get_default_graph(), ops._NodeDef("Identity", "myloop/myop"), new[] {x}, | |||
new Operation[0]); | |||
var new_ops = graph._add_new_tf_operations(); | |||
self.assertEqual(len(new_ops), 1); | |||
return i; | |||
}); | |||
// TODO: port control_flow_ops.while_loop | |||
//control_flow_ops.while_loop( i => i < 10, body, new int[]{0}, name = "myloop"); | |||
var op = graph.get_operation_by_name("myloop/myop"); | |||
self.assertIsNotNone(op); | |||
self.assertEqual(op.name, "myloop/myop"); | |||
self.assertEqual(op.type, "Identity"); | |||
self.assertEqual(op.outputs.Length, 0); | |||
var op_input = op.inputs[0].op; | |||
self.assertEqual(op_input.type, "Enter"); | |||
self.assertItemsEqual(op_input.inputs.OfType<Operation>().ToArray(), new[] {x}); | |||
self.assertEqual(op.graph, graph); | |||
self.assertIsNotNone(op._get_control_flow_context()); | |||
self.assertEqual(((ControlFlowContext)op._get_control_flow_context()).name, "myloop/while_context"); | |||
/* | |||
@test_util.run_v1_only("b/120545219") | |||
def testWhileLoop(self): | |||
g = ops.Graph() | |||
with g.as_default(): | |||
x = test_ops.int_output() | |||
def body(i): | |||
ops._create_c_op(ops.get_default_graph(), | |||
ops._NodeDef("IntInput", "myloop/myop"), [x], []) | |||
new_ops = g._add_new_tf_operations() | |||
self.assertEqual(len(new_ops), 1) | |||
return i | |||
control_flow_ops.while_loop(lambda i: i < 10, body, [0], name="myloop") | |||
op = g.get_operation_by_name("myloop/myop") | |||
self.assertIsNotNone(op) | |||
self.assertEqual(op.name, "myloop/myop") | |||
self.assertEqual(op.type, "IntInput") | |||
self.assertEqual(op.outputs, []) | |||
op_input = op.inputs[0].op | |||
self.assertEqual(op_input.type, "Enter") | |||
self.assertEqual(list(op_input.inputs), [x]) | |||
self.assertEqual(op.graph, g) | |||
# pylint: disable=protected-access | |||
self.assertIsNotNone(op._get_control_flow_context()) | |||
self.assertEqual(op._get_control_flow_context().name, | |||
"myloop/while_context") | |||
# pylint: enable=protected-access | |||
*/ | |||
} | |||
[Ignore("Todo: Port")] | |||
[TestMethod] | |||
public void TestWhileLoopWithInternalControlDep() | |||
{ | |||
/* | |||
@test_util.run_v1_only("b/120545219") | |||
def testWhileLoopWithInternalControlDep(self): | |||
g = ops.Graph() | |||
with g.as_default(): | |||
x = test_ops.int_output() | |||
def body(i): | |||
c = constant_op.constant(1.0, name="c") | |||
ops._create_c_op(ops.get_default_graph(), | |||
ops._NodeDef("IntInput", "myloop/myop"), [x], []) | |||
with ops.control_dependencies([c]): | |||
new_ops = g._add_new_tf_operations() | |||
self.assertEqual(len(new_ops), 1) | |||
return i | |||
control_flow_ops.while_loop(lambda i: i < 10, body, [0], name="myloop") | |||
op = g.get_operation_by_name("myloop/myop") | |||
self.assertIsNotNone(op) | |||
c = g.get_operation_by_name("myloop/c") | |||
self.assertIsNotNone(c) | |||
# Internal control dep is preserved | |||
self.assertEqual(op.control_inputs, [c]) | |||
*/ | |||
} | |||
[Ignore("Todo: Port")] | |||
[TestMethod] | |||
public void TestWhileLoopWithExternalControlDep() | |||
{ | |||
/* | |||
@test_util.run_v1_only("b/120545219") | |||
def testWhileLoopWithExternalControlDep(self): | |||
g = ops.Graph() | |||
with g.as_default(): | |||
x = test_ops.int_output() | |||
c = constant_op.constant(1.0) | |||
def body(i): | |||
ops._create_c_op(ops.get_default_graph(), | |||
ops._NodeDef("IntInput", "myloop/myop"), [x], []) | |||
with ops.control_dependencies([c]): | |||
new_ops = g._add_new_tf_operations() | |||
self.assertEqual(len(new_ops), 1) | |||
return i | |||
control_flow_ops.while_loop(lambda i: i < 10, body, [0], name="myloop") | |||
op = g.get_operation_by_name("myloop/myop") | |||
self.assertIsNotNone(op) | |||
# External control dep is removed and replaced with internal control dep | |||
self.assertNotEqual(op.control_inputs[0], c.op) | |||
self.assertIsNotNone(op.control_inputs[0]._get_control_flow_context()) | |||
*/ | |||
} | |||
} | |||
} | |||
using System; | |||
using System.Linq; | |||
using Microsoft.VisualStudio.TestTools.UnitTesting; | |||
using Tensorflow; | |||
using Tensorflow.Operations; | |||
using static Tensorflow.Binding; | |||
namespace TensorFlowNET.UnitTest.ops_test | |||
{ | |||
/// <summary> | |||
/// excerpt of tensorflow/python/framework/ops_test.py | |||
/// # These cases test the private Graph._create_op_from_tf_operation | |||
/// # method. Arguably we should only test the public APIs that depend on this | |||
/// # method. However, this logic is complex and tricky, and it can be difficult to | |||
/// # ascertain if we have adequate coverage (e.g. a graph may run successfully if | |||
/// # the control flow context isn't set properly, but a more complicated use case | |||
/// # that might not be obvious to test will fail). Thus we instead explicitly test | |||
/// # the low-level behavior. | |||
/// </summary> | |||
[TestClass] | |||
public class CreateOpFromTfOperationTest : PythonTest | |||
{ | |||
[TestMethod] | |||
public void TestShape() | |||
{ | |||
using (var g = tf.Graph().as_default()) | |||
{ | |||
var x = constant_op.constant(new[,] {{1, 2, 3}, {4, 5, 6}}); | |||
var c_op = ops._create_c_op(g, ops._NodeDef("Identity", "myop"), new[] {x}, new Operation[0]); | |||
var op = g._create_op_from_tf_operation(c_op); | |||
Assert.AreEqual("myop", op.name); | |||
Assert.AreEqual("Identity", op.type); | |||
Assert.AreEqual(1, len(op.outputs)); | |||
assertItemsEqual(new[] {2, 3}, op.outputs[0].shape); | |||
} | |||
} | |||
[TestMethod] | |||
public void TestUniqueName() | |||
{ | |||
var graph = tf.Graph().as_default(); | |||
//var (c_op,op_desc) = ops._create_c_op(g, ops._NodeDef("Const", "myop"), new Tensor[0], new Operation[0]); | |||
//var (c_op2, op_desc1) = ops._create_c_op(g, ops._NodeDef("Const", "myop_1"), new Tensor[0], new Operation[0]); | |||
//var op = g._create_op_from_tf_operation(c_op); | |||
//var op2 = g._create_op_from_tf_operation(c_op2); | |||
var op = constant_op.constant(0, name: "myop").op; | |||
var op2 = constant_op.constant(0, name: "myop_1").op; | |||
// Create ops with same names as op1 and op2. We expect the new names to be | |||
// uniquified. | |||
var op3 = constant_op.constant(0, name: "myop").op; | |||
var op4 = constant_op.constant(0, name: "myop_1").op; | |||
self.assertEqual(op.name, "myop"); | |||
self.assertEqual(op2.name, "myop_1"); | |||
self.assertEqual(op3.name, "myop_2"); | |||
self.assertEqual(op4.name, "myop_1_1"); | |||
} | |||
[Ignore("need tesnroflow expose UpdateEdge API")] | |||
[TestMethod] | |||
public void TestCond() | |||
{ | |||
var g = tf.Graph().as_default(); | |||
var x = constant_op.constant(10); | |||
var true_fn = new Func<Tensor>(() => | |||
{ | |||
var c_op = ops._create_c_op(g, ops._NodeDef("Identity", "cond/myop"), new[] { x }, new Operation[0]); | |||
var new_ops = g._add_new_tf_operations(); | |||
self.assertEqual(len(new_ops), 1); | |||
return x; | |||
}); | |||
control_flow_ops.cond(x < 10, true_fn, () => x); | |||
var op = g.get_operation_by_name("cond/myop"); | |||
//tf.train.export_meta_graph(@"D:\dev\tensorboard\logdir\sharp.meta.txt", as_text:true); | |||
//tf.train.export_meta_graph(@"D:\dev\tensorboard\logdir\sharp.meta", as_text: false); | |||
self.assertIsNotNone(op); | |||
self.assertEqual(op.name, "cond/myop"); | |||
self.assertEqual(op.type, "Identity"); | |||
//self.assertEqual(op.outputs, new object[0]); | |||
var op_input = op.inputs[0].op; | |||
self.assertEqual(op_input.type, "Switch"); | |||
self.assertEqual(op_input.inputs[0].name, x.name); | |||
self.assertEqual(op.graph, g); | |||
self.assertIsNotNone(op._get_control_flow_context()); | |||
var cond_text = op._get_control_flow_context() as ControlFlowContext; | |||
self.assertEqual(cond_text.name, "cond/cond_text"); | |||
} | |||
[Ignore("Todo: Port")] | |||
[TestMethod] | |||
public void TestWhileLoop() | |||
{ | |||
var graph = tf.Graph().as_default(); | |||
Operation x=null; | |||
x = constant_op.constant(42); | |||
var body = new Func<int, int>(i => | |||
{ | |||
ops._create_c_op(ops.get_default_graph(), ops._NodeDef("Identity", "myloop/myop"), new[] {x}, | |||
new Operation[0]); | |||
var new_ops = graph._add_new_tf_operations(); | |||
self.assertEqual(len(new_ops), 1); | |||
return i; | |||
}); | |||
// TODO: port control_flow_ops.while_loop | |||
//control_flow_ops.while_loop( i => i < 10, body, new int[]{0}, name = "myloop"); | |||
var op = graph.get_operation_by_name("myloop/myop"); | |||
self.assertIsNotNone(op); | |||
self.assertEqual(op.name, "myloop/myop"); | |||
self.assertEqual(op.type, "Identity"); | |||
self.assertEqual(op.outputs.Length, 0); | |||
var op_input = op.inputs[0].op; | |||
self.assertEqual(op_input.type, "Enter"); | |||
self.assertItemsEqual(op_input.inputs.OfType<Operation>().ToArray(), new[] {x}); | |||
self.assertEqual(op.graph, graph); | |||
self.assertIsNotNone(op._get_control_flow_context()); | |||
self.assertEqual(((ControlFlowContext)op._get_control_flow_context()).name, "myloop/while_context"); | |||
/* | |||
@test_util.run_v1_only("b/120545219") | |||
def testWhileLoop(self): | |||
g = ops.Graph() | |||
with g.as_default(): | |||
x = test_ops.int_output() | |||
def body(i): | |||
ops._create_c_op(ops.get_default_graph(), | |||
ops._NodeDef("IntInput", "myloop/myop"), [x], []) | |||
new_ops = g._add_new_tf_operations() | |||
self.assertEqual(len(new_ops), 1) | |||
return i | |||
control_flow_ops.while_loop(lambda i: i < 10, body, [0], name="myloop") | |||
op = g.get_operation_by_name("myloop/myop") | |||
self.assertIsNotNone(op) | |||
self.assertEqual(op.name, "myloop/myop") | |||
self.assertEqual(op.type, "IntInput") | |||
self.assertEqual(op.outputs, []) | |||
op_input = op.inputs[0].op | |||
self.assertEqual(op_input.type, "Enter") | |||
self.assertEqual(list(op_input.inputs), [x]) | |||
self.assertEqual(op.graph, g) | |||
# pylint: disable=protected-access | |||
self.assertIsNotNone(op._get_control_flow_context()) | |||
self.assertEqual(op._get_control_flow_context().name, | |||
"myloop/while_context") | |||
# pylint: enable=protected-access | |||
*/ | |||
} | |||
[Ignore("Todo: Port")] | |||
[TestMethod] | |||
public void TestWhileLoopWithInternalControlDep() | |||
{ | |||
/* | |||
@test_util.run_v1_only("b/120545219") | |||
def testWhileLoopWithInternalControlDep(self): | |||
g = ops.Graph() | |||
with g.as_default(): | |||
x = test_ops.int_output() | |||
def body(i): | |||
c = constant_op.constant(1.0, name="c") | |||
ops._create_c_op(ops.get_default_graph(), | |||
ops._NodeDef("IntInput", "myloop/myop"), [x], []) | |||
with ops.control_dependencies([c]): | |||
new_ops = g._add_new_tf_operations() | |||
self.assertEqual(len(new_ops), 1) | |||
return i | |||
control_flow_ops.while_loop(lambda i: i < 10, body, [0], name="myloop") | |||
op = g.get_operation_by_name("myloop/myop") | |||
self.assertIsNotNone(op) | |||
c = g.get_operation_by_name("myloop/c") | |||
self.assertIsNotNone(c) | |||
# Internal control dep is preserved | |||
self.assertEqual(op.control_inputs, [c]) | |||
*/ | |||
} | |||
[Ignore("Todo: Port")] | |||
[TestMethod] | |||
public void TestWhileLoopWithExternalControlDep() | |||
{ | |||
/* | |||
@test_util.run_v1_only("b/120545219") | |||
def testWhileLoopWithExternalControlDep(self): | |||
g = ops.Graph() | |||
with g.as_default(): | |||
x = test_ops.int_output() | |||
c = constant_op.constant(1.0) | |||
def body(i): | |||
ops._create_c_op(ops.get_default_graph(), | |||
ops._NodeDef("IntInput", "myloop/myop"), [x], []) | |||
with ops.control_dependencies([c]): | |||
new_ops = g._add_new_tf_operations() | |||
self.assertEqual(len(new_ops), 1) | |||
return i | |||
control_flow_ops.while_loop(lambda i: i < 10, body, [0], name="myloop") | |||
op = g.get_operation_by_name("myloop/myop") | |||
self.assertIsNotNone(op) | |||
# External control dep is removed and replaced with internal control dep | |||
self.assertNotEqual(op.control_inputs[0], c.op) | |||
self.assertIsNotNone(op.control_inputs[0]._get_control_flow_context()) | |||
*/ | |||
} | |||
} | |||
} |
@@ -1,195 +1,195 @@ | |||
using Microsoft.VisualStudio.TestTools.UnitTesting; | |||
using Tensorflow; | |||
namespace TensorFlowNET.UnitTest.ops_test | |||
{ | |||
/// <summary> | |||
/// excerpt of tensorflow/python/framework/ops_test.py | |||
/// </summary> | |||
[TestClass] | |||
public class GraphTest : PythonTest | |||
{ | |||
[TestInitialize] | |||
public void SetUp() | |||
{ | |||
ops.reset_default_graph(); | |||
} | |||
[TestCleanup] | |||
public void TearDown() | |||
{ | |||
ops.reset_default_graph(); | |||
} | |||
private void _AssertDefault(Graph expected) { | |||
Assert.AreSame(ops.get_default_graph(), expected); | |||
} | |||
[Ignore("Todo: Port")] | |||
[TestMethod] | |||
public void testResetDefaultGraphNesting() | |||
{ | |||
/* | |||
def testResetDefaultGraphNesting(self): | |||
g0 = ops.Graph() | |||
with self.assertRaises(AssertionError): | |||
with g0.as_default(): | |||
ops.reset_default_graph() | |||
*/ | |||
} | |||
[Ignore("Todo: Port")] | |||
[TestMethod] | |||
public void testGraphContextManagerCancelsEager() | |||
{ | |||
/* | |||
def testGraphContextManagerCancelsEager(self): | |||
with context.eager_mode(): | |||
with ops.Graph().as_default(): | |||
self.assertFalse(context.executing_eagerly()) | |||
*/ | |||
} | |||
[Ignore("Todo: Port")] | |||
[TestMethod] | |||
public void testGraphContextManager() | |||
{ | |||
/* | |||
def testGraphContextManager(self): | |||
g0 = ops.Graph() | |||
with g0.as_default() as g1: | |||
self.assertIs(g0, g1) | |||
*/ | |||
} | |||
[Ignore("Todo: Port")] | |||
[TestMethod] | |||
public void testDefaultGraph() | |||
{ | |||
/* | |||
def testDefaultGraph(self): | |||
orig = ops.get_default_graph() | |||
self._AssertDefault(orig) | |||
g0 = ops.Graph() | |||
self._AssertDefault(orig) | |||
context_manager_0 = g0.as_default() | |||
self._AssertDefault(orig) | |||
with context_manager_0 as g0: | |||
self._AssertDefault(g0) | |||
with ops.Graph().as_default() as g1: | |||
self._AssertDefault(g1) | |||
self._AssertDefault(g0) | |||
self._AssertDefault(orig) | |||
*/ | |||
} | |||
[Ignore("Todo: Port")] | |||
[TestMethod] | |||
public void testPreventFeeding() | |||
{ | |||
/* | |||
def testPreventFeeding(self): | |||
g = ops.Graph() | |||
a = constant_op.constant(2.0) | |||
self.assertTrue(g.is_feedable(a)) | |||
g.prevent_feeding(a) | |||
self.assertFalse(g.is_feedable(a)) | |||
*/ | |||
} | |||
[Ignore("Todo: Port")] | |||
[TestMethod] | |||
public void testAsGraphElementConversions() | |||
{ | |||
/* | |||
def testAsGraphElementConversions(self): | |||
class ConvertibleObj(object): | |||
def _as_graph_element(self): | |||
return "FloatOutput:0" | |||
class NonConvertibleObj(object): | |||
pass | |||
g = ops.Graph() | |||
a = _apply_op(g, "FloatOutput", [], [dtypes.float32]) | |||
self.assertEqual(a, g.as_graph_element(ConvertibleObj())) | |||
with self.assertRaises(TypeError): | |||
g.as_graph_element(NonConvertibleObj()) | |||
*/ | |||
} | |||
[Ignore("Todo: Port")] | |||
[TestMethod] | |||
public void testGarbageCollected() | |||
{ | |||
/* | |||
# Regression test against creating custom __del__ functions in classes | |||
# involved in cyclic references, e.g. Graph and Operation. (Python won't gc | |||
# cycles that require calling a __del__ method, because the __del__ method can | |||
# theoretically increase the object's refcount to "save" it from gc, and any | |||
# already-deleted objects in the cycle would have be to restored.) | |||
def testGarbageCollected(self): | |||
# Create a graph we can delete and a weak reference to monitor if it's gc'd | |||
g = ops.Graph() | |||
g_ref = weakref.ref(g) | |||
# Create some ops | |||
with g.as_default(): | |||
a = constant_op.constant(2.0) | |||
b = constant_op.constant(3.0) | |||
c = math_ops.add(a, b) | |||
# Create a session we can delete | |||
with session.Session(graph=g) as sess: | |||
self.evaluate(c) | |||
# Delete all references and trigger gc | |||
del g | |||
del a | |||
del b | |||
del c | |||
del sess | |||
gc.collect() | |||
self.assertIsNone(g_ref()) | |||
*/ | |||
} | |||
[Ignore("Todo: Port")] | |||
[TestMethod] | |||
public void testRunnableAfterInvalidShape() | |||
{ | |||
/* | |||
def testRunnableAfterInvalidShape(self): | |||
with ops.Graph().as_default(): | |||
with self.assertRaises(ValueError): | |||
math_ops.add([1, 2], [1, 2, 3]) | |||
a = constant_op.constant(1) | |||
with session.Session() as sess: | |||
self.evaluate(a) | |||
*/ | |||
} | |||
[Ignore("Todo: Port")] | |||
[TestMethod] | |||
public void testRunnableAfterInvalidShapeWithKernelLabelMap() | |||
{ | |||
/* | |||
def testRunnableAfterInvalidShapeWithKernelLabelMap(self): | |||
g = ops.Graph() | |||
with g.as_default(): | |||
with g._kernel_label_map({"KernelLabelRequired": "overload_1"}): | |||
with self.assertRaises(ValueError): | |||
test_ops.kernel_label_required(1) | |||
a = constant_op.constant(1) | |||
with session.Session() as sess: | |||
self.evaluate(a) | |||
*/ | |||
} | |||
} | |||
} | |||
using Microsoft.VisualStudio.TestTools.UnitTesting; | |||
using Tensorflow; | |||
namespace TensorFlowNET.UnitTest.ops_test | |||
{ | |||
/// <summary> | |||
/// excerpt of tensorflow/python/framework/ops_test.py | |||
/// </summary> | |||
[TestClass] | |||
public class GraphTest : PythonTest | |||
{ | |||
[TestInitialize] | |||
public void SetUp() | |||
{ | |||
ops.reset_default_graph(); | |||
} | |||
[TestCleanup] | |||
public void TearDown() | |||
{ | |||
ops.reset_default_graph(); | |||
} | |||
private void _AssertDefault(Graph expected) { | |||
Assert.AreSame(ops.get_default_graph(), expected); | |||
} | |||
[Ignore("Todo: Port")] | |||
[TestMethod] | |||
public void testResetDefaultGraphNesting() | |||
{ | |||
/* | |||
def testResetDefaultGraphNesting(self): | |||
g0 = ops.Graph() | |||
with self.assertRaises(AssertionError): | |||
with g0.as_default(): | |||
ops.reset_default_graph() | |||
*/ | |||
} | |||
[Ignore("Todo: Port")] | |||
[TestMethod] | |||
public void testGraphContextManagerCancelsEager() | |||
{ | |||
/* | |||
def testGraphContextManagerCancelsEager(self): | |||
with context.eager_mode(): | |||
with ops.Graph().as_default(): | |||
self.assertFalse(context.executing_eagerly()) | |||
*/ | |||
} | |||
[Ignore("Todo: Port")] | |||
[TestMethod] | |||
public void testGraphContextManager() | |||
{ | |||
/* | |||
def testGraphContextManager(self): | |||
g0 = ops.Graph() | |||
with g0.as_default() as g1: | |||
self.assertIs(g0, g1) | |||
*/ | |||
} | |||
[Ignore("Todo: Port")] | |||
[TestMethod] | |||
public void testDefaultGraph() | |||
{ | |||
/* | |||
def testDefaultGraph(self): | |||
orig = ops.get_default_graph() | |||
self._AssertDefault(orig) | |||
g0 = ops.Graph() | |||
self._AssertDefault(orig) | |||
context_manager_0 = g0.as_default() | |||
self._AssertDefault(orig) | |||
with context_manager_0 as g0: | |||
self._AssertDefault(g0) | |||
with ops.Graph().as_default() as g1: | |||
self._AssertDefault(g1) | |||
self._AssertDefault(g0) | |||
self._AssertDefault(orig) | |||
*/ | |||
} | |||
[Ignore("Todo: Port")] | |||
[TestMethod] | |||
public void testPreventFeeding() | |||
{ | |||
/* | |||
def testPreventFeeding(self): | |||
g = ops.Graph() | |||
a = constant_op.constant(2.0) | |||
self.assertTrue(g.is_feedable(a)) | |||
g.prevent_feeding(a) | |||
self.assertFalse(g.is_feedable(a)) | |||
*/ | |||
} | |||
[Ignore("Todo: Port")] | |||
[TestMethod] | |||
public void testAsGraphElementConversions() | |||
{ | |||
/* | |||
def testAsGraphElementConversions(self): | |||
class ConvertibleObj(object): | |||
def _as_graph_element(self): | |||
return "FloatOutput:0" | |||
class NonConvertibleObj(object): | |||
pass | |||
g = ops.Graph() | |||
a = _apply_op(g, "FloatOutput", [], [dtypes.float32]) | |||
self.assertEqual(a, g.as_graph_element(ConvertibleObj())) | |||
with self.assertRaises(TypeError): | |||
g.as_graph_element(NonConvertibleObj()) | |||
*/ | |||
} | |||
[Ignore("Todo: Port")] | |||
[TestMethod] | |||
public void testGarbageCollected() | |||
{ | |||
/* | |||
# Regression test against creating custom __del__ functions in classes | |||
# involved in cyclic references, e.g. Graph and Operation. (Python won't gc | |||
# cycles that require calling a __del__ method, because the __del__ method can | |||
# theoretically increase the object's refcount to "save" it from gc, and any | |||
# already-deleted objects in the cycle would have be to restored.) | |||
def testGarbageCollected(self): | |||
# Create a graph we can delete and a weak reference to monitor if it's gc'd | |||
g = ops.Graph() | |||
g_ref = weakref.ref(g) | |||
# Create some ops | |||
with g.as_default(): | |||
a = constant_op.constant(2.0) | |||
b = constant_op.constant(3.0) | |||
c = math_ops.add(a, b) | |||
# Create a session we can delete | |||
with session.Session(graph=g) as sess: | |||
self.evaluate(c) | |||
# Delete all references and trigger gc | |||
del g | |||
del a | |||
del b | |||
del c | |||
del sess | |||
gc.collect() | |||
self.assertIsNone(g_ref()) | |||
*/ | |||
} | |||
[Ignore("Todo: Port")] | |||
[TestMethod] | |||
public void testRunnableAfterInvalidShape() | |||
{ | |||
/* | |||
def testRunnableAfterInvalidShape(self): | |||
with ops.Graph().as_default(): | |||
with self.assertRaises(ValueError): | |||
math_ops.add([1, 2], [1, 2, 3]) | |||
a = constant_op.constant(1) | |||
with session.Session() as sess: | |||
self.evaluate(a) | |||
*/ | |||
} | |||
[Ignore("Todo: Port")] | |||
[TestMethod] | |||
public void testRunnableAfterInvalidShapeWithKernelLabelMap() | |||
{ | |||
/* | |||
def testRunnableAfterInvalidShapeWithKernelLabelMap(self): | |||
g = ops.Graph() | |||
with g.as_default(): | |||
with g._kernel_label_map({"KernelLabelRequired": "overload_1"}): | |||
with self.assertRaises(ValueError): | |||
test_ops.kernel_label_required(1) | |||
a = constant_op.constant(1) | |||
with session.Session() as sess: | |||
self.evaluate(a) | |||
*/ | |||
} | |||
} | |||
} |
@@ -7,10 +7,13 @@ | |||
</PropertyGroup> | |||
<ItemGroup> | |||
<PackageReference Include="Microsoft.NET.Test.Sdk" Version="16.2.0" /> | |||
<PackageReference Include="MSTest.TestAdapter" Version="2.0.0" /> | |||
<PackageReference Include="MSTest.TestFramework" Version="2.0.0" /> | |||
<PackageReference Include="coverlet.collector" Version="1.0.1" /> | |||
<PackageReference Include="Microsoft.NET.Test.Sdk" Version="16.5.0" /> | |||
<PackageReference Include="MSTest.TestAdapter" Version="2.1.0" /> | |||
<PackageReference Include="MSTest.TestFramework" Version="2.1.0" /> | |||
<PackageReference Include="coverlet.collector" Version="1.2.0"> | |||
<PrivateAssets>all</PrivateAssets> | |||
<IncludeAssets>runtime; build; native; contentfiles; analyzers; buildtransitive</IncludeAssets> | |||
</PackageReference> | |||
</ItemGroup> | |||
<ItemGroup> | |||