@@ -0,0 +1,12 @@ | |||
# These are supported funding model platforms | |||
github: # Replace with up to 4 GitHub Sponsors-enabled usernames e.g., [user1, user2] | |||
patreon: # Replace with a single Patreon username | |||
open_collective: # Replace with a single Open Collective username | |||
ko_fi: # Replace with a single Ko-fi username | |||
tidelift: # Replace with a single Tidelift platform-name/package-name e.g., npm/babel | |||
community_bridge: # Replace with a single Community Bridge project-name e.g., cloud-foundry | |||
liberapay: # Replace with a single Liberapay username | |||
issuehunt: # Replace with a single IssueHunt username | |||
otechie: # Replace with a single Otechie username | |||
custom: ['https://bit.ly/2op1mu5']# Replace with up to 4 custom sponsorship URLs e.g., ['link1', 'link2'] |
@@ -107,34 +107,9 @@ Run this example in [Jupyter Notebook](https://github.com/SciSharp/SciSharpCube) | |||
Read the docs & book [The Definitive Guide to Tensorflow.NET](https://tensorflownet.readthedocs.io/en/latest/FrontCover.html). | |||
### More examples: | |||
There are many examples reside at [TensorFlow.NET Examples](https://github.com/SciSharp/TensorFlow.NET-Examples). | |||
Run specific example in shell: | |||
```cs | |||
dotnet TensorFlowNET.Examples.dll -ex "MNIST CNN" | |||
``` | |||
Example runner will download all the required files like training data and model pb files. | |||
* [Hello World](test/TensorFlowNET.Examples/HelloWorld.cs) | |||
* [Basic Operations](test/TensorFlowNET.Examples/BasicOperations.cs) | |||
* [Linear Regression](test/TensorFlowNET.Examples/BasicModels/LinearRegression.cs) | |||
* [Logistic Regression](test/TensorFlowNET.Examples/BasicModels/LogisticRegression.cs) | |||
* [Nearest Neighbor](test/TensorFlowNET.Examples/BasicModels/NearestNeighbor.cs) | |||
* [Naive Bayes Classification](test/TensorFlowNET.Examples/BasicModels/NaiveBayesClassifier.cs) | |||
* [Full Connected Neural Network](test/TensorFlowNET.Examples/ImageProcess/DigitRecognitionNN.cs) | |||
* [Image Processing](test/TensorFlowNET.Examples/ImageProcessing) | |||
* [K-means Clustering](test/TensorFlowNET.Examples/BasicModels/KMeansClustering.cs) | |||
* [NN XOR](test/TensorFlowNET.Examples/BasicModels/NeuralNetXor.cs) | |||
* [Object Detection](test/TensorFlowNET.Examples/ImageProcessing/ObjectDetection.cs) | |||
* [Text Classification](test/TensorFlowNET.Examples/TextProcessing/BinaryTextClassification.cs) | |||
* [CNN Text Classification](test/TensorFlowNET.Examples/TextProcessing/cnn_models/VdCnn.cs) | |||
* [MNIST CNN](test/TensorFlowNET.Examples/ImageProcessing/DigitRecognitionCNN.cs) | |||
* [Named Entity Recognition](test/TensorFlowNET.Examples/TextProcessing/NER) | |||
* [Transfer Learning for Image Classification in InceptionV3](test/TensorFlowNET.Examples/ImageProcessing/RetrainImageClassifier.cs) | |||
More troubleshooting of running example refer [here](tensorflowlib/README.md). | |||
Troubleshooting of running example or installation, please refer [here](tensorflowlib/README.md). | |||
### Contribute: | |||
@@ -5,23 +5,15 @@ VisualStudioVersion = 16.0.29102.190 | |||
MinimumVisualStudioVersion = 10.0.40219.1 | |||
Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "TensorFlowNET.UnitTest", "test\TensorFlowNET.UnitTest\TensorFlowNET.UnitTest.csproj", "{029A8CF1-CF95-4DCB-98AA-9D3D96A83B3E}" | |||
EndProject | |||
Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "TensorFlowNET.Examples", "test\TensorFlowNET.Examples\TensorFlowNET.Examples.csproj", "{1FE60088-157C-4140-91AB-E96B915E4BAE}" | |||
EndProject | |||
Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "TensorFlowNET.Core", "src\TensorFlowNET.Core\TensorFlowNET.Core.csproj", "{FD682AC0-7B2D-45D3-8B0D-C6D678B04144}" | |||
EndProject | |||
Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "Keras.Core", "src\KerasNET.Core\Keras.Core.csproj", "{902E188F-A953-43B4-9991-72BAB1697BC3}" | |||
EndProject | |||
Project("{6EC3EE1D-3C4E-46DD-8F32-0CC8E7565705}") = "TensorFlowNET.Examples.FSharp", "test\TensorFlowNET.Examples.FSharp\TensorFlowNET.Examples.FSharp.fsproj", "{62BC3801-F0D3-44A9-A0AC-712F40C8F961}" | |||
EndProject | |||
Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "TensorFlowBenchmark", "src\TensorFlowNet.Benchmarks\TensorFlowBenchmark.csproj", "{68861442-971A-4196-876E-C9330F0B3C54}" | |||
EndProject | |||
Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "TensorFlowHub", "src\TensorFlowHub\TensorFlowHub.csproj", "{8FD59A5A-97EB-457E-B9F1-D88B0C822C6E}" | |||
Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "TensorFlowNET.Models", "src\TensorFlowNET.Models\TensorFlowNET.Models.csproj", "{D03F94CF-B283-4730-B177-21A57641061F}" | |||
EndProject | |||
Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "TensorFlowText", "src\TensorFlowText\TensorFlowText.csproj", "{B598E5D5-BD2D-4191-8532-F2FBAC31AB81}" | |||
Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "TensorFlowNET.Text", "src\TensorFlowNET.Text\TensorFlowNET.Text.csproj", "{904472F8-40E1-4650-AA6F-C7F209B3691B}" | |||
EndProject | |||
Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "TensorFlowDatasets", "src\TensorFlowDatasets\TensorFlowDatasets.csproj", "{DF151A51-E9FD-41BD-B0F4-08A743755D44}" | |||
Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "TensorFlowNET.Hub", "src\TensorFlowNET.Hub\TensorFlowNET.Hub.csproj", "{4EAFAE19-C832-47C6-B01E-0F4268C9072C}" | |||
EndProject | |||
Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "TensorFlowNET.Examples.GPU", "test\TensorFlowNET.Examples\TensorFlowNET.Examples.GPU.csproj", "{6F6B3382-8F87-4CD9-BF87-C81D5405685A}" | |||
Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "TensorFlowNET.Datasets", "src\TensorFlowNET.Datasets\TensorFlowNET.Datasets.csproj", "{494D6CAD-2C0D-4C0B-90E2-B097DB039383}" | |||
EndProject | |||
Global | |||
GlobalSection(SolutionConfigurationPlatforms) = preSolution | |||
@@ -33,42 +25,26 @@ Global | |||
{029A8CF1-CF95-4DCB-98AA-9D3D96A83B3E}.Debug|Any CPU.Build.0 = Debug|Any CPU | |||
{029A8CF1-CF95-4DCB-98AA-9D3D96A83B3E}.Release|Any CPU.ActiveCfg = Release|Any CPU | |||
{029A8CF1-CF95-4DCB-98AA-9D3D96A83B3E}.Release|Any CPU.Build.0 = Release|Any CPU | |||
{1FE60088-157C-4140-91AB-E96B915E4BAE}.Debug|Any CPU.ActiveCfg = Debug|Any CPU | |||
{1FE60088-157C-4140-91AB-E96B915E4BAE}.Debug|Any CPU.Build.0 = Debug|Any CPU | |||
{1FE60088-157C-4140-91AB-E96B915E4BAE}.Release|Any CPU.ActiveCfg = Release|Any CPU | |||
{1FE60088-157C-4140-91AB-E96B915E4BAE}.Release|Any CPU.Build.0 = Release|Any CPU | |||
{FD682AC0-7B2D-45D3-8B0D-C6D678B04144}.Debug|Any CPU.ActiveCfg = Debug|Any CPU | |||
{FD682AC0-7B2D-45D3-8B0D-C6D678B04144}.Debug|Any CPU.Build.0 = Debug|Any CPU | |||
{FD682AC0-7B2D-45D3-8B0D-C6D678B04144}.Release|Any CPU.ActiveCfg = Release|Any CPU | |||
{FD682AC0-7B2D-45D3-8B0D-C6D678B04144}.Release|Any CPU.Build.0 = Release|Any CPU | |||
{902E188F-A953-43B4-9991-72BAB1697BC3}.Debug|Any CPU.ActiveCfg = Debug|Any CPU | |||
{902E188F-A953-43B4-9991-72BAB1697BC3}.Debug|Any CPU.Build.0 = Debug|Any CPU | |||
{902E188F-A953-43B4-9991-72BAB1697BC3}.Release|Any CPU.ActiveCfg = Release|Any CPU | |||
{902E188F-A953-43B4-9991-72BAB1697BC3}.Release|Any CPU.Build.0 = Release|Any CPU | |||
{62BC3801-F0D3-44A9-A0AC-712F40C8F961}.Debug|Any CPU.ActiveCfg = Debug|Any CPU | |||
{62BC3801-F0D3-44A9-A0AC-712F40C8F961}.Debug|Any CPU.Build.0 = Debug|Any CPU | |||
{62BC3801-F0D3-44A9-A0AC-712F40C8F961}.Release|Any CPU.ActiveCfg = Release|Any CPU | |||
{62BC3801-F0D3-44A9-A0AC-712F40C8F961}.Release|Any CPU.Build.0 = Release|Any CPU | |||
{68861442-971A-4196-876E-C9330F0B3C54}.Debug|Any CPU.ActiveCfg = Debug|Any CPU | |||
{68861442-971A-4196-876E-C9330F0B3C54}.Debug|Any CPU.Build.0 = Debug|Any CPU | |||
{68861442-971A-4196-876E-C9330F0B3C54}.Release|Any CPU.ActiveCfg = Release|Any CPU | |||
{68861442-971A-4196-876E-C9330F0B3C54}.Release|Any CPU.Build.0 = Release|Any CPU | |||
{8FD59A5A-97EB-457E-B9F1-D88B0C822C6E}.Debug|Any CPU.ActiveCfg = Debug|Any CPU | |||
{8FD59A5A-97EB-457E-B9F1-D88B0C822C6E}.Debug|Any CPU.Build.0 = Debug|Any CPU | |||
{8FD59A5A-97EB-457E-B9F1-D88B0C822C6E}.Release|Any CPU.ActiveCfg = Release|Any CPU | |||
{8FD59A5A-97EB-457E-B9F1-D88B0C822C6E}.Release|Any CPU.Build.0 = Release|Any CPU | |||
{B598E5D5-BD2D-4191-8532-F2FBAC31AB81}.Debug|Any CPU.ActiveCfg = Debug|Any CPU | |||
{B598E5D5-BD2D-4191-8532-F2FBAC31AB81}.Debug|Any CPU.Build.0 = Debug|Any CPU | |||
{B598E5D5-BD2D-4191-8532-F2FBAC31AB81}.Release|Any CPU.ActiveCfg = Release|Any CPU | |||
{B598E5D5-BD2D-4191-8532-F2FBAC31AB81}.Release|Any CPU.Build.0 = Release|Any CPU | |||
{DF151A51-E9FD-41BD-B0F4-08A743755D44}.Debug|Any CPU.ActiveCfg = Debug|Any CPU | |||
{DF151A51-E9FD-41BD-B0F4-08A743755D44}.Debug|Any CPU.Build.0 = Debug|Any CPU | |||
{DF151A51-E9FD-41BD-B0F4-08A743755D44}.Release|Any CPU.ActiveCfg = Release|Any CPU | |||
{DF151A51-E9FD-41BD-B0F4-08A743755D44}.Release|Any CPU.Build.0 = Release|Any CPU | |||
{6F6B3382-8F87-4CD9-BF87-C81D5405685A}.Debug|Any CPU.ActiveCfg = Debug|Any CPU | |||
{6F6B3382-8F87-4CD9-BF87-C81D5405685A}.Debug|Any CPU.Build.0 = Debug|Any CPU | |||
{6F6B3382-8F87-4CD9-BF87-C81D5405685A}.Release|Any CPU.ActiveCfg = Release|Any CPU | |||
{6F6B3382-8F87-4CD9-BF87-C81D5405685A}.Release|Any CPU.Build.0 = Release|Any CPU | |||
{D03F94CF-B283-4730-B177-21A57641061F}.Debug|Any CPU.ActiveCfg = Debug|Any CPU | |||
{D03F94CF-B283-4730-B177-21A57641061F}.Debug|Any CPU.Build.0 = Debug|Any CPU | |||
{D03F94CF-B283-4730-B177-21A57641061F}.Release|Any CPU.ActiveCfg = Release|Any CPU | |||
{D03F94CF-B283-4730-B177-21A57641061F}.Release|Any CPU.Build.0 = Release|Any CPU | |||
{904472F8-40E1-4650-AA6F-C7F209B3691B}.Debug|Any CPU.ActiveCfg = Debug|Any CPU | |||
{904472F8-40E1-4650-AA6F-C7F209B3691B}.Debug|Any CPU.Build.0 = Debug|Any CPU | |||
{904472F8-40E1-4650-AA6F-C7F209B3691B}.Release|Any CPU.ActiveCfg = Release|Any CPU | |||
{904472F8-40E1-4650-AA6F-C7F209B3691B}.Release|Any CPU.Build.0 = Release|Any CPU | |||
{4EAFAE19-C832-47C6-B01E-0F4268C9072C}.Debug|Any CPU.ActiveCfg = Debug|Any CPU | |||
{4EAFAE19-C832-47C6-B01E-0F4268C9072C}.Debug|Any CPU.Build.0 = Debug|Any CPU | |||
{4EAFAE19-C832-47C6-B01E-0F4268C9072C}.Release|Any CPU.ActiveCfg = Release|Any CPU | |||
{4EAFAE19-C832-47C6-B01E-0F4268C9072C}.Release|Any CPU.Build.0 = Release|Any CPU | |||
{494D6CAD-2C0D-4C0B-90E2-B097DB039383}.Debug|Any CPU.ActiveCfg = Debug|Any CPU | |||
{494D6CAD-2C0D-4C0B-90E2-B097DB039383}.Debug|Any CPU.Build.0 = Debug|Any CPU | |||
{494D6CAD-2C0D-4C0B-90E2-B097DB039383}.Release|Any CPU.ActiveCfg = Release|Any CPU | |||
{494D6CAD-2C0D-4C0B-90E2-B097DB039383}.Release|Any CPU.Build.0 = Release|Any CPU | |||
EndGlobalSection | |||
GlobalSection(SolutionProperties) = preSolution | |||
HideSolutionNode = FALSE | |||
@@ -0,0 +1,2 @@ | |||
<wpf:ResourceDictionary xml:space="preserve" xmlns:x="http://schemas.microsoft.com/winfx/2006/xaml" xmlns:s="clr-namespace:System;assembly=mscorlib" xmlns:ss="urn:shemas-jetbrains-com:settings-storage-xaml" xmlns:wpf="http://schemas.microsoft.com/winfx/2006/xaml/presentation"> | |||
<s:Boolean x:Key="/Default/UserDictionary/Words/=Tensorflow/@EntryIndexedValue">True</s:Boolean></wpf:ResourceDictionary> |
@@ -0,0 +1,157 @@ | |||
# Chapter. Queue | |||
ThensorFlow is capable to handle multiple threads, and queues are powerful mechanism for asynchronous computation. If we have large datasets this can significantly speed up the training process of our models. This functionality is especially handy when reading, pre-processing and extracting in mini-batches our training data. The secret to being able to do professional and high performance training of our model is understanding TensorFlow queuing operations. TensorFlow has implemented 4 types of Queue: **FIFOQueue**, **PaddingFIFOQueue**, **PriorityQueue** and **RandomShuffleQueue**. | |||
 | |||
Like everything in TensorFlow, a queue is a node in a computation graph. It's a stateful node, like a variable: other nodes can modify its content, In particular, nodes can enqueue new items into the queue, or dequeue existing items from the queue. | |||
To get started with queue, let's consider a simple example. We will create a "first in, first out" queue (FIFOQueue) and fill it with numbers. Then we'll construct a graph that takes an item off the queue, adds one to that item, and puts it back on the end of the queue. | |||
```csharp | |||
[TestMethod] | |||
public void FIFOQueue() | |||
{ | |||
// create a first in first out queue with capacity up to 2 | |||
// and data type set as int32 | |||
var queue = tf.FIFOQueue(2, tf.int32); | |||
// init queue, push 2 elements into queue. | |||
var init = queue.enqueue_many(new[] { 10, 20 }); | |||
// pop out the first element | |||
var x = queue.dequeue(); | |||
// add 1 | |||
var y = x + 1; | |||
// push back into queue | |||
var inc = queue.enqueue(y); | |||
using (var sess = tf.Session()) | |||
{ | |||
// init queue | |||
init.run(); | |||
// pop out first element and push back calculated y | |||
(int dequeued, _) = sess.run((x, inc)); | |||
Assert.AreEqual(10, dequeued); | |||
(dequeued, _) = sess.run((x, inc)); | |||
Assert.AreEqual(20, dequeued); | |||
(dequeued, _) = sess.run((x, inc)); | |||
Assert.AreEqual(11, dequeued); | |||
(dequeued, _) = sess.run((x, inc)); | |||
Assert.AreEqual(21, dequeued); | |||
// thread will hang or block if you run sess.run(x) again | |||
// until queue has more element. | |||
} | |||
} | |||
``` | |||
`Enqueue`, `EnqueueMany` and `Dequeue` are special nodes. They take a pointer to the queue instead of a normal value, allowing them to change it. I first create a FIFOQueue *queue* of size up to 3, I enqueue two values into the *queue*. Then I immediately attempt to *dequeue* a value from it and assign it to *y* where I simply add 1 to the dequeued variable. Next, we start up a *session* and run. After we've run this operation a few times the queue will be empty - if we try and run the operation again, the main thread of the program will hang or block - this is because it will be waiting for another operation to be run to put more values in the queue. | |||
#### FIFOQueue | |||
Creates a queue that dequeues elements in a first-in first-out order. A `FIFOQueue` has bounded capacity; supports multiple concurrent producers and consumers; and provides exactly-once delivery. A `FIFOQueue` holds a list of up to `capacity` elements. Each element is a fixed-length tuple of tensors whose dtypes are described by `dtypes`, and whose shapes are optionally described by the `shapes` argument. | |||
#### PaddingFIFOQueue | |||
A FIFOQueue that supports batching variable-sized tensors by padding. A `PaddingFIFOQueue` may contain components with dynamic shape, while also supporting `dequeue_many`. A `PaddingFIFOQueue` holds a list of up to `capacity` elements. Each element is a fixed-length tuple of tensors whose dtypes are described by `dtypes`, and whose shapes are described by the `shapes` argument. | |||
```chsarp | |||
[TestMethod] | |||
public void PaddingFIFOQueue() | |||
{ | |||
var numbers = tf.placeholder(tf.int32); | |||
var queue = tf.PaddingFIFOQueue(10, tf.int32, new TensorShape(-1)); | |||
var enqueue = queue.enqueue(numbers); | |||
var dequeue_many = queue.dequeue_many(n: 3); | |||
using(var sess = tf.Session()) | |||
{ | |||
sess.run(enqueue, (numbers, new[] { 1 })); | |||
sess.run(enqueue, (numbers, new[] { 2, 3 })); | |||
sess.run(enqueue, (numbers, new[] { 3, 4, 5 })); | |||
var result = sess.run(dequeue_many[0]); | |||
Assert.IsTrue(Enumerable.SequenceEqual(new int[] { 1, 0, 0 }, result[0].ToArray<int>())); | |||
Assert.IsTrue(Enumerable.SequenceEqual(new int[] { 2, 3, 0 }, result[1].ToArray<int>())); | |||
Assert.IsTrue(Enumerable.SequenceEqual(new int[] { 3, 4, 5 }, result[2].ToArray<int>())); | |||
} | |||
} | |||
``` | |||
#### PriorityQueue | |||
A queue implementation that dequeues elements in prioritized order. A `PriorityQueue` has bounded capacity; supports multiple concurrent producers and consumers; and provides exactly-once delivery. A `PriorityQueue` holds a list of up to `capacity` elements. Each element is a fixed-length tuple of tensors whose dtypes are described by `types`, and whose shapes are optionally described by the `shapes` argument. | |||
```csharp | |||
[TestMethod] | |||
public void PriorityQueue() | |||
{ | |||
var queue = tf.PriorityQueue(3, tf.@string); | |||
var init = queue.enqueue_many(new[] { 2L, 4L, 3L }, new[] { "p1", "p2", "p3" }); | |||
var x = queue.dequeue(); | |||
using (var sess = tf.Session()) | |||
{ | |||
init.run(); | |||
// output will 2, 3, 4 | |||
var result = sess.run(x); | |||
Assert.AreEqual(result[0].GetInt64(), 2L); | |||
result = sess.run(x); | |||
Assert.AreEqual(result[0].GetInt64(), 3L); | |||
result = sess.run(x); | |||
Assert.AreEqual(result[0].GetInt64(), 4L); | |||
} | |||
} | |||
``` | |||
#### RandomShuffleQueue | |||
A queue implementation that dequeues elements in a random order. A `RandomShuffleQueue` has bounded capacity; supports multiple concurrent producers and consumers; and provides exactly-once delivery. A `RandomShuffleQueue` holds a list of up to `capacity` elements. Each element is a fixed-length tuple of tensors whose dtypes are described by `dtypes`, and whose shapes are optionally described by the `shapes` argument. | |||
```csharp | |||
[TestMethod] | |||
public void RandomShuffleQueue() | |||
{ | |||
var queue = tf.RandomShuffleQueue(10, min_after_dequeue: 1, dtype: tf.int32); | |||
var init = queue.enqueue_many(new[] { 1, 2, 3, 4, 5, 6, 7, 8, 9, 10 }); | |||
var x = queue.dequeue(); | |||
string results = ""; | |||
using (var sess = tf.Session()) | |||
{ | |||
init.run(); | |||
foreach(var i in range(9)) | |||
results += (int)sess.run(x) + "."; | |||
// output in random order | |||
// 1.2.3.4.5.6.7.8.9. | |||
} | |||
} | |||
``` | |||
Queue methods must run on the same device as the queue. `FIFOQueue` and `RandomShuffleQueue` are important TensorFlow objects for computing tensor asynchronously in a graph. For example, a typical input architecture is to use a `RandomShuffleQueue` to prepare inputs for training a model: | |||
* Multiple threads prepare training examples and push them in the queue. | |||
* A training thread executes a training op that dequeues mini-batches from the queue. | |||
This architecture simplifies the construction of input pipelines. | |||
From the above example, once the output gets to the point above you’ll actually have to terminate the program as it is blocked. Now, this isn’t very useful. What we really want to happen is for our little program to reload or enqueue more values whenever our queue is empty or is about to become empty. We could fix this by explicitly running our *enqueue_op* again in the code above to reload our queue with values. However, for large, more realistic programs, this will become unwieldy. Thankfully, TensorFlow has a solution. | |||
TensorFlow provides two classes to help multi-threading task: `tf.Coordinator` and `tf.QueueRunner`. There two classes are designed to be used together. The `Coordinator` class helps multiple threads stop together and report exceptions to a main thread. The `QueueRunner` class is used to create a number of threads cooperating to enqueue tensors in the same queue. |
@@ -22,6 +22,7 @@ Welcome to TensorFlow.NET's documentation! | |||
Graph | |||
Session | |||
Operation | |||
Queue | |||
Gradient | |||
Train | |||
EagerMode | |||
@@ -0,0 +1,63 @@ | |||
<Project Sdk="Microsoft.NET.Sdk"> | |||
<PropertyGroup> | |||
<TargetFramework>netstandard2.0</TargetFramework> | |||
<RuntimeIdentifiers>win-x64;linux-x64</RuntimeIdentifiers> | |||
<AssemblyName>SciSharp.Tensorflow-Cpu.Redist</AssemblyName> | |||
<PackageId>SciSharp.Tensorflow-Cpu.Redist</PackageId> | |||
<Version>1.0.0</Version> | |||
<Authors>SciSharp team</Authors> | |||
<Company>SciSharp STACK</Company> | |||
<RepositoryUrl>https://github.com/SciSharp/TensorFlow.NET</RepositoryUrl> | |||
<RepositoryType>git</RepositoryType> | |||
<Description> | |||
Meta-package for GPU Tensoflow library runtime distribution. | |||
Libraries can be directly downloaded from https://storage.googleapis.com/tensorflow/libtensorflow/ | |||
</Description> | |||
<PackageLicenseExpression>Apache-2.0</PackageLicenseExpression> | |||
<PackageProjectUrl>https://github.com/SciSharp/TensorFlow.NET</PackageProjectUrl> | |||
<PackageTags>native;tensorflow;machine-learning;ML</PackageTags> | |||
<PackageOutputPath>../../packages</PackageOutputPath> | |||
<GeneratePackageOnBuild>false</GeneratePackageOnBuild> | |||
<EnableDefaultCompileItems>false</EnableDefaultCompileItems> | |||
<ProduceReferenceAssembly>false</ProduceReferenceAssembly> | |||
<IncludeBuildOutput>false</IncludeBuildOutput> | |||
</PropertyGroup> | |||
<!-- Need to add some dependencies so Meta-Project will pull runtime Project(s) --> | |||
<Target Name="ValidateRuntimePack" BeforeTargets="GenerateNuspec"> | |||
<ItemGroup> | |||
<RuntimeLinux Include="../../packages/runtime.linux-x64.SciSharp.Tensorflow-Cpu.Redist.*.nupkg" /> | |||
<RuntimeWin Include="../../packages/runtime.win-x64.SciSharp.Tensorflow-Cpu.Redist.*.nupkg" /> | |||
</ItemGroup> | |||
<Message Importance="high" Text="Package runtime.linux-x64.SciSharp.Tensorflow-Cpu.Redist: found" Condition="Exists('@(RuntimeLinux)')" /> | |||
<Message Importance="high" Text="Package runtime.linux-x64.SciSharp.Tensorflow-Cpu.Redist: not found" Condition="!Exists('@(RuntimeLinux)')" /> | |||
<Message Importance="high" Text="Package runtime.win-x64.SciSharp.Tensorflow-Cpu.Redist: found" Condition="Exists('@(RuntimeWin)')" /> | |||
<Message Importance="high" Text="Package runtime.win-x64.SciSharp.Tensorflow-Cpu.Redist: not found" Condition="!Exists('@(RuntimeWin)')" /> | |||
<Error Text="You must locally 'dotnet pack' at least one runtime.{rid}.SciSharp.Tensorflow-Cpu.Redist project." Condition="!Exists('@(RuntimeLinux)') AND !Exists('@(RuntimeWin)')" /> | |||
</Target> | |||
<PropertyGroup> | |||
<RestoreSources>../../packages;$(RestoreSources);https://api.nuget.org/v3/index.json</RestoreSources> | |||
</PropertyGroup> | |||
<ItemGroup Condition="Exists('../../packages/runtime.linux-x64.SciSharp.Tensorflow-Cpu.Redist.1.0.0.nupkg')"> | |||
<PackageReference Include="runtime.linux-x64.SciSharp.Tensorflow-Cpu.Redist" Version="1.0.0" /> | |||
</ItemGroup> | |||
<ItemGroup Condition="Exists('../../packages/runtime.win-x64.SciSharp.Tensorflow-Cpu.Redist.1.0.0.nupkg')"> | |||
<PackageReference Include="runtime.win-x64.SciSharp.Tensorflow-Cpu.Redist" Version="1.0.0" /> | |||
</ItemGroup> | |||
<ItemGroup> | |||
<RuntimeLinux Include="../../packages/runtime.linux-x64.SciSharp.Tensorflow-Cpu.Redist.*.nupkg" /> | |||
<RuntimeWin Include="../../packages/runtime.win-x64.SciSharp.Tensorflow-Cpu.Redist.*.nupkg" /> | |||
<Content Include="runtime.json" Condition="Exists('@(RuntimeLinux)') AND Exists('@(RuntimeWin)')"> | |||
<PackagePath>runtime.json</PackagePath> | |||
<Pack>true</Pack> | |||
<CopyToOutputDirectory>PreserveNewest</CopyToOutputDirectory> | |||
</Content> | |||
</ItemGroup> | |||
</Project> |
@@ -0,0 +1,14 @@ | |||
{ | |||
"runtimes": { | |||
"linux-x64": { | |||
"SciSharp.TensorFlow-Gpu.Redist": { | |||
"runtime.linux-x64.SciSharp.Tensorflow-Cpu.Redist": "1.0.0" | |||
} | |||
}, | |||
"win-x64": { | |||
"SciSharp.TensorFlow-Gpu.Redist": { | |||
"runtime.win-x64.SciSharp.Tensorflow-Cpu.Redist": "1.0.0" | |||
} | |||
} | |||
} | |||
} |
@@ -0,0 +1,81 @@ | |||
<Project Sdk="Microsoft.NET.Sdk"> | |||
<PropertyGroup> | |||
<OutputType>Library</OutputType> | |||
<TargetFramework>netstandard2.0</TargetFramework> | |||
<!-- TensorFlow doesn't support Gpus on Mac Os. --> | |||
<RuntimeIdentifiers>win-x64;linux-x64</RuntimeIdentifiers> | |||
<AssemblyName>SciSharp.Tensorflow-Gpu.Redist</AssemblyName> | |||
<PackageId>SciSharp.Tensorflow-Gpu.Redist</PackageId> | |||
<Version>1.0.0</Version> | |||
<Authors>SciSharp team</Authors> | |||
<Company>SciSharp STACK</Company> | |||
<RepositoryUrl>https://github.com/SciSharp/TensorFlow.NET</RepositoryUrl> | |||
<RepositoryType>git</RepositoryType> | |||
<Description> | |||
Meta-package for GPU Tensoflow library runtime distribution. | |||
Libraries can be directly downloaded from https://storage.googleapis.com/tensorflow/libtensorflow/ | |||
</Description> | |||
<PackageLicenseExpression>Apache-2.0</PackageLicenseExpression> | |||
<PackageProjectUrl>https://github.com/SciSharp/TensorFlow.NET</PackageProjectUrl> | |||
<PackageTags>native;tensorflow;machine-learning;ML</PackageTags> | |||
<PackageOutputPath>../../packages</PackageOutputPath> | |||
<GeneratePackageOnBuild>false</GeneratePackageOnBuild> | |||
<EnableDefaultCompileItems>false</EnableDefaultCompileItems> | |||
<ProduceReferenceAssembly>false</ProduceReferenceAssembly> | |||
<IncludeBuildOutput>false</IncludeBuildOutput> | |||
</PropertyGroup> | |||
<!-- Need to add some dependencies so Meta-Project will pull runtime Project(s) --> | |||
<Target Name="ValidateRuntimePack" BeforeTargets="GenerateNuspec"> | |||
<ItemGroup> | |||
<RuntimeLinux Include="../../packages/runtime.linux-x64.SciSharp.Tensorflow-Gpu.Redist.*.nupkg" /> | |||
<RuntimeWin Include="../../packages/runtime.win-x64.SciSharp.Tensorflow-Gpu.Redist.*.nupkg" /> | |||
</ItemGroup> | |||
<Message | |||
Importance="high" | |||
Text="Package runtime.linux-x64.SciSharp.Tensorflow-Gpu.Redist: found" | |||
Condition="Exists('@(RuntimeLinux)')" /> | |||
<Message | |||
Importance="high" | |||
Text="Package runtime.linux-x64.SciSharp.Tensorflow-Gpu.Redist: not found" | |||
Condition="!Exists('@(RuntimeLinux)')" /> | |||
<Message | |||
Importance="high" | |||
Text="Package runtime.win-x64.SciSharp.Tensorflow-Gpu.Redist: found" | |||
Condition="Exists('@(RuntimeWin)')" /> | |||
<Message | |||
Importance="high" | |||
Text="Package runtime.win-x64.SciSharp.Tensorflow-Gpu.Redist: not found" | |||
Condition="!Exists('@(RuntimeWin)')" /> | |||
<Error | |||
Text="You must locally 'dotnet pack' at least one runtime.{rid}.SciSharp.Tensorflow-Gpu.Redist project." | |||
Condition="!Exists('@(RuntimeLinux)') AND !Exists('@(RuntimeWin)')" /> | |||
</Target> | |||
<PropertyGroup> | |||
<RestoreSources>../../packages;$(RestoreSources);https://api.nuget.org/v3/index.json</RestoreSources> | |||
</PropertyGroup> | |||
<ItemGroup Condition="Exists('../../packages/runtime.linux-x64.SciSharp.Tensorflow-Gpu.Redist.1.0.0.nupkg')"> | |||
<PackageReference Include="runtime.linux-x64.SciSharp.Tensorflow-Gpu.Redist" Version="1.0.0" /> | |||
</ItemGroup> | |||
<ItemGroup Condition="Exists('../../packages/runtime.win-x64.SciSharp.Tensorflow-Gpu.Redist.1.0.0.nupkg')"> | |||
<PackageReference Include="runtime.win-x64.SciSharp.Tensorflow-Gpu.Redist" Version="1.0.0" /> | |||
</ItemGroup> | |||
<ItemGroup> | |||
<RuntimeLinux Include="../../packages/runtime.linux-x64.SciSharp.Tensorflow-Gpu.Redist.*.nupkg" /> | |||
<RuntimeWin Include="../../packages/runtime.win-x64.SciSharp.Tensorflow-Gpu.Redist.*.nupkg" /> | |||
<Content | |||
Include="runtime.json" | |||
Condition="Exists('@(RuntimeLinux)') AND Exists('@(RuntimeWin)')"> | |||
<PackagePath>runtime.json</PackagePath> | |||
<Pack>true</Pack> | |||
<CopyToOutputDirectory>PreserveNewest</CopyToOutputDirectory> | |||
</Content> | |||
</ItemGroup> | |||
</Project> |
@@ -0,0 +1,14 @@ | |||
{ | |||
"runtimes": { | |||
"linux-x64": { | |||
"SciSharp.TensorFlow-Gpu.Redist": { | |||
"runtime.linux-x64.SciSharp.Tensorflow-Gpu.Redist": "1.0.0" | |||
} | |||
}, | |||
"win-x64": { | |||
"SciSharp.TensorFlow-Gpu.Redist": { | |||
"runtime.win-x64.SciSharp.Tensorflow-Gpu.Redist": "1.0.0" | |||
} | |||
} | |||
} | |||
} |
@@ -0,0 +1,60 @@ | |||
| |||
Microsoft Visual Studio Solution File, Format Version 12.00 | |||
# Visual Studio Version 16 | |||
VisualStudioVersion = 16.0.29102.190 | |||
MinimumVisualStudioVersion = 10.0.40219.1 | |||
Project("{2150E333-8FDC-42A3-9474-1A3956D46DE8}") = "Solution Items", "Solution Items", "{1E65784D-C976-4DFF-991A-DD5C57FFC8E2}" | |||
ProjectSection(SolutionItems) = preProject | |||
scripts\Copy-NativeTensorFlowLibs.ps1 = scripts\Copy-NativeTensorFlowLibs.ps1 | |||
EndProjectSection | |||
EndProject | |||
Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "runtime.linux-x64.SciSharp.TensorFlow-Cpu.Redist", "runtime.linux-x64.SciSharp.TensorFlow-Cpu.Redist\runtime.linux-x64.SciSharp.TensorFlow-Cpu.Redist.csproj", "{9834D2B4-01BF-4D18-8DCF-F498AC481FE7}" | |||
EndProject | |||
Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "runtime.linux-x64.SciSharp.TensorFlow-Gpu.Redist", "runtime.linux-x64.SciSharp.TensorFlow-Gpu.Redist\runtime.linux-x64.SciSharp.TensorFlow-Gpu.Redist.csproj", "{9D853997-3143-4F87-B995-7D7024CF4E1A}" | |||
EndProject | |||
Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "runtime.win-x64.SciSharp.TensorFlow-Cpu.Redist", "runtime.win-x64.SciSharp.TensorFlow-Cpu.Redist\runtime.win-x64.SciSharp.TensorFlow-Cpu.Redist.csproj", "{878C1EE4-B945-41BF-98DE-C4747C28022A}" | |||
EndProject | |||
Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "runtime.win-x64.SciSharp.TensorFlow-Gpu.Redist", "runtime.win-x64.SciSharp.TensorFlow-Gpu.Redist\runtime.win-x64.SciSharp.TensorFlow-Gpu.Redist.csproj", "{744A3D51-CEF6-4685-B4C3-718FA61143A0}" | |||
EndProject | |||
Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "SciSharp.TensorFlow-Cpu.Redist", "SciSharp.TensorFlow-Cpu.Redist\SciSharp.TensorFlow-Cpu.Redist.csproj", "{0A281E9C-6E3D-4172-84BA-2B5F6E9F4D5B}" | |||
EndProject | |||
Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "SciSharp.TensorFlow-Gpu.Redist", "SciSharp.TensorFlow-Gpu.Redist\SciSharp.TensorFlow-Gpu.Redist.csproj", "{1910BE36-82E3-4465-B3B1-788BFD252DB7}" | |||
EndProject | |||
Global | |||
GlobalSection(SolutionConfigurationPlatforms) = preSolution | |||
Debug|Any CPU = Debug|Any CPU | |||
Release|Any CPU = Release|Any CPU | |||
EndGlobalSection | |||
GlobalSection(ProjectConfigurationPlatforms) = postSolution | |||
{9834D2B4-01BF-4D18-8DCF-F498AC481FE7}.Debug|Any CPU.ActiveCfg = Debug|Any CPU | |||
{9834D2B4-01BF-4D18-8DCF-F498AC481FE7}.Debug|Any CPU.Build.0 = Debug|Any CPU | |||
{9834D2B4-01BF-4D18-8DCF-F498AC481FE7}.Release|Any CPU.ActiveCfg = Release|Any CPU | |||
{9834D2B4-01BF-4D18-8DCF-F498AC481FE7}.Release|Any CPU.Build.0 = Release|Any CPU | |||
{9D853997-3143-4F87-B995-7D7024CF4E1A}.Debug|Any CPU.ActiveCfg = Debug|Any CPU | |||
{9D853997-3143-4F87-B995-7D7024CF4E1A}.Debug|Any CPU.Build.0 = Debug|Any CPU | |||
{9D853997-3143-4F87-B995-7D7024CF4E1A}.Release|Any CPU.ActiveCfg = Release|Any CPU | |||
{9D853997-3143-4F87-B995-7D7024CF4E1A}.Release|Any CPU.Build.0 = Release|Any CPU | |||
{878C1EE4-B945-41BF-98DE-C4747C28022A}.Debug|Any CPU.ActiveCfg = Debug|Any CPU | |||
{878C1EE4-B945-41BF-98DE-C4747C28022A}.Debug|Any CPU.Build.0 = Debug|Any CPU | |||
{878C1EE4-B945-41BF-98DE-C4747C28022A}.Release|Any CPU.ActiveCfg = Release|Any CPU | |||
{878C1EE4-B945-41BF-98DE-C4747C28022A}.Release|Any CPU.Build.0 = Release|Any CPU | |||
{744A3D51-CEF6-4685-B4C3-718FA61143A0}.Debug|Any CPU.ActiveCfg = Debug|Any CPU | |||
{744A3D51-CEF6-4685-B4C3-718FA61143A0}.Debug|Any CPU.Build.0 = Debug|Any CPU | |||
{744A3D51-CEF6-4685-B4C3-718FA61143A0}.Release|Any CPU.ActiveCfg = Release|Any CPU | |||
{744A3D51-CEF6-4685-B4C3-718FA61143A0}.Release|Any CPU.Build.0 = Release|Any CPU | |||
{0A281E9C-6E3D-4172-84BA-2B5F6E9F4D5B}.Debug|Any CPU.ActiveCfg = Debug|Any CPU | |||
{0A281E9C-6E3D-4172-84BA-2B5F6E9F4D5B}.Debug|Any CPU.Build.0 = Debug|Any CPU | |||
{0A281E9C-6E3D-4172-84BA-2B5F6E9F4D5B}.Release|Any CPU.ActiveCfg = Release|Any CPU | |||
{0A281E9C-6E3D-4172-84BA-2B5F6E9F4D5B}.Release|Any CPU.Build.0 = Release|Any CPU | |||
{1910BE36-82E3-4465-B3B1-788BFD252DB7}.Debug|Any CPU.ActiveCfg = Debug|Any CPU | |||
{1910BE36-82E3-4465-B3B1-788BFD252DB7}.Debug|Any CPU.Build.0 = Debug|Any CPU | |||
{1910BE36-82E3-4465-B3B1-788BFD252DB7}.Release|Any CPU.ActiveCfg = Release|Any CPU | |||
{1910BE36-82E3-4465-B3B1-788BFD252DB7}.Release|Any CPU.Build.0 = Release|Any CPU | |||
EndGlobalSection | |||
GlobalSection(SolutionProperties) = preSolution | |||
HideSolutionNode = FALSE | |||
EndGlobalSection | |||
GlobalSection(ExtensibilityGlobals) = postSolution | |||
SolutionGuid = {CD7D5F34-42AE-4CCB-BDFA-1619B3A84708} | |||
EndGlobalSection | |||
EndGlobal |
@@ -0,0 +1,39 @@ | |||
<Project Sdk="Microsoft.NET.Sdk"> | |||
<PropertyGroup> | |||
<TargetFramework>netstandard2.0</TargetFramework> | |||
<RuntimeIdentifier>linux-x64</RuntimeIdentifier> | |||
<AssemblyName>SciSharp.Tensorflow-Cpu.Redist</AssemblyName> | |||
<PackageId>runtime.linux-x64.SciSharp.Tensorflow-Cpu.Redist</PackageId> | |||
<Version>1.0.0</Version> | |||
<Authors>SciSharp team</Authors> | |||
<Company>SciSharp STACK</Company> | |||
<RepositoryUrl>https://github.com/SciSharp/TensorFlow.NET</RepositoryUrl> | |||
<RepositoryType>git</RepositoryType> | |||
<Description> | |||
Distribution of the Linux CPU Tensoflow library. | |||
The libraries can be directly downloaded from https://storage.googleapis.com/tensorflow/libtensorflow/ | |||
</Description> | |||
<PackageLicenseExpression>Apache-2.0</PackageLicenseExpression> | |||
<PackageProjectUrl>https://github.com/SciSharp/TensorFlow.NET</PackageProjectUrl> | |||
<PackageTags>native;tensorflow;machine-learning;ML</PackageTags> | |||
<PackageOutputPath>../../packages</PackageOutputPath> | |||
<GeneratePackageOnBuild>false</GeneratePackageOnBuild> | |||
<EnableDefaultCompileItems>false</EnableDefaultCompileItems> | |||
<ProduceReferenceAssembly>false</ProduceReferenceAssembly> | |||
<IncludeBuildOutput>false</IncludeBuildOutput> | |||
</PropertyGroup> | |||
<ItemGroup> | |||
<!-- All dlls will be dropped in a folder available through DllImport.--> | |||
<Content Include="*.so.*"> | |||
<PackagePath>runtimes/$(RuntimeIdentifier)/native/%(Filename)%(Extension)</PackagePath> | |||
<Pack>true</Pack> | |||
<CopyToOutputDirectory>PreserveNewest</CopyToOutputDirectory> | |||
</Content> | |||
</ItemGroup> | |||
</Project> |
@@ -0,0 +1,40 @@ | |||
<Project Sdk="Microsoft.NET.Sdk"> | |||
<PropertyGroup> | |||
<OutputType>Library</OutputType> | |||
<TargetFramework>netstandard2.0</TargetFramework> | |||
<RuntimeIdentifier>linux-x64</RuntimeIdentifier> | |||
<AssemblyName>SciSharp.Tensorflow-Gpu.Redist</AssemblyName> | |||
<PackageId>runtime.linux-x64.SciSharp.Tensorflow-Gpu.Redist</PackageId> | |||
<Version>1.0.0</Version> | |||
<Authors>SciSharp team</Authors> | |||
<Company>SciSharp STACK</Company> | |||
<RepositoryUrl>https://github.com/SciSharp/TensorFlow.NET</RepositoryUrl> | |||
<RepositoryType>git</RepositoryType> | |||
<Description> | |||
Distribution of the Linux GPU Tensoflow library. | |||
Dll can be directly downloaded from https://storage.googleapis.com/tensorflow/libtensorflow/ | |||
</Description> | |||
<PackageLicenseExpression>Apache-2.0</PackageLicenseExpression> | |||
<PackageProjectUrl>https://github.com/SciSharp/TensorFlow.NET</PackageProjectUrl> | |||
<PackageTags>native;tensorflow;machine-learning;ML</PackageTags> | |||
<PackageOutputPath>../../packages</PackageOutputPath> | |||
<GeneratePackageOnBuild>false</GeneratePackageOnBuild> | |||
<EnableDefaultCompileItems>false</EnableDefaultCompileItems> | |||
<ProduceReferenceAssembly>false</ProduceReferenceAssembly> | |||
<IncludeBuildOutput>false</IncludeBuildOutput> | |||
</PropertyGroup> | |||
<ItemGroup> | |||
<!-- All dlls will be dropped in a folder available through DllImport.--> | |||
<Content Include="*.so.*"> | |||
<PackagePath>runtimes/$(RuntimeIdentifier)/native/%(Filename)%(Extension)</PackagePath> | |||
<Pack>true</Pack> | |||
<CopyToOutputDirectory>PreserveNewest</CopyToOutputDirectory> | |||
</Content> | |||
</ItemGroup> | |||
</Project> |
@@ -0,0 +1,39 @@ | |||
<Project Sdk="Microsoft.NET.Sdk"> | |||
<PropertyGroup> | |||
<TargetFramework>netstandard2.0</TargetFramework> | |||
<RuntimeIdentifier>win-x64</RuntimeIdentifier> | |||
<AssemblyName>SciSharp.Tensorflow-Cpu.Redist</AssemblyName> | |||
<PackageId>runtime.win-x64.SciSharp.Tensorflow-Cpu.Redist</PackageId> | |||
<Version>1.0.0</Version> | |||
<Authors>SciSharp team</Authors> | |||
<Company>SciSharp STACK</Company> | |||
<RepositoryUrl>https://github.com/SciSharp/TensorFlow.NET</RepositoryUrl> | |||
<RepositoryType>git</RepositoryType> | |||
<Description> | |||
Distribution of the windows GPU Tensoflow library. | |||
The libraries can be directly downloaded from https://storage.googleapis.com/tensorflow/libtensorflow/ | |||
</Description> | |||
<PackageLicenseExpression>Apache-2.0</PackageLicenseExpression> | |||
<PackageProjectUrl>https://github.com/SciSharp/TensorFlow.NET</PackageProjectUrl> | |||
<PackageTags>native;tensorflow;machine-learning;ML</PackageTags> | |||
<PackageOutputPath>../../packages</PackageOutputPath> | |||
<GeneratePackageOnBuild>false</GeneratePackageOnBuild> | |||
<EnableDefaultCompileItems>false</EnableDefaultCompileItems> | |||
<ProduceReferenceAssembly>false</ProduceReferenceAssembly> | |||
<IncludeBuildOutput>false</IncludeBuildOutput> | |||
</PropertyGroup> | |||
<ItemGroup> | |||
<!-- All dlls will be dropped in a folder available through DllImport.--> | |||
<Content Include="*.dll"> | |||
<PackagePath>runtimes/$(RuntimeIdentifier)/native/%(Filename)%(Extension)</PackagePath> | |||
<Pack>true</Pack> | |||
<CopyToOutputDirectory>PreserveNewest</CopyToOutputDirectory> | |||
</Content> | |||
</ItemGroup> | |||
</Project> |
@@ -0,0 +1 @@ | |||
tensorflow.dll |
@@ -0,0 +1,40 @@ | |||
<Project Sdk="Microsoft.NET.Sdk"> | |||
<PropertyGroup> | |||
<OutputType>Library</OutputType> | |||
<TargetFramework>netstandard2.0</TargetFramework> | |||
<RuntimeIdentifier>win-x64</RuntimeIdentifier> | |||
<AssemblyName>SciSharp.Tensorflow-Gpu.Redist</AssemblyName> | |||
<PackageId>runtime.win-x64.SciSharp.Tensorflow-Gpu.Redist</PackageId> | |||
<Version>1.0.0</Version> | |||
<Authors>SciSharp team</Authors> | |||
<Company>SciSharp STACK</Company> | |||
<RepositoryUrl>https://github.com/SciSharp/TensorFlow.NET</RepositoryUrl> | |||
<RepositoryType>git</RepositoryType> | |||
<Description> | |||
Distribution of the windows GPU Tensoflow library. | |||
Dll can be directly downloaded from https://storage.googleapis.com/tensorflow/libtensorflow/ | |||
</Description> | |||
<PackageLicenseExpression>Apache-2.0</PackageLicenseExpression> | |||
<PackageProjectUrl>https://github.com/SciSharp/TensorFlow.NET</PackageProjectUrl> | |||
<PackageTags>native;tensorflow;machine-learning;ML</PackageTags> | |||
<PackageOutputPath>../../packages</PackageOutputPath> | |||
<GeneratePackageOnBuild>false</GeneratePackageOnBuild> | |||
<EnableDefaultCompileItems>false</EnableDefaultCompileItems> | |||
<ProduceReferenceAssembly>false</ProduceReferenceAssembly> | |||
<IncludeBuildOutput>false</IncludeBuildOutput> | |||
</PropertyGroup> | |||
<ItemGroup> | |||
<!-- All dlls will be dropped in a folder available through DllImport.--> | |||
<Content Include="*.dll"> | |||
<PackagePath>runtimes/$(RuntimeIdentifier)/native/%(Filename)%(Extension)</PackagePath> | |||
<Pack>true</Pack> | |||
<CopyToOutputDirectory>PreserveNewest</CopyToOutputDirectory> | |||
</Content> | |||
</ItemGroup> | |||
</Project> |
@@ -0,0 +1,167 @@ | |||
<# | |||
.SYNOPSIS | |||
Copy the native TensorFlow library to enable the packing a nuget to make | |||
them available to TensorFlow.NET | |||
.DESCRIPTION | |||
The TensorFlow libraries are copied for Windows and Linux and it becomes | |||
possible to bundle a meta-package containing them. | |||
.PARAMETER SkipCpuLibraries | |||
Setting this to true skips the downloading of the CPU version of the | |||
TensorFlow libraries. | |||
By default the CPU version of the libraries are downloaded and put in the | |||
relevant projects. | |||
.PARAMETER SkipGpuLibraries | |||
Setting this to tru skips the downloading of the GPU version of the | |||
TensorFlow libraries. | |||
By default the GPU version of the libraries are downloaded and put in the | |||
releavant projects. | |||
#> | |||
param( | |||
[switch] $SkipCpuLibraries = $false, | |||
[switch] $SkipGpuLibraries = $false | |||
) | |||
function Expand-TarGzFiles { | |||
<# | |||
.SYNOPSIS | |||
Expands the given list of files from the given archive into the given | |||
target directory. | |||
.PARAMETER Archive | |||
Path to the archive that should be considered. | |||
.PARAMETER Files | |||
Files that should be extracted from the archive. | |||
.PARAMETER TargetDirectory | |||
Directory into which the files should be expanded. | |||
#> | |||
param | |||
( | |||
[Parameter(Mandatory = $true, ValueFromPipeline = $true, ValueFromPipelineByPropertyName = $true)] [string] $Archive, | |||
[Parameter(Mandatory = $true, ValueFromPipeline = $true, ValueFromPipelineByPropertyName = $true)] [string []] $Files, | |||
[Parameter(Mandatory = $true, ValueFromPipeline = $true, ValueFromPipelineByPropertyName = $true)] [string] $TargetDirectory | |||
) | |||
& 7z e $Archive -o"$TargetDirectory" | |||
$TarArchive = Join-Path $TargetDirectory "libtensorflow.tar" | |||
& 7z e $TarArchive $Files -o"$TargetDirectory" | |||
Remove-Item $TarArchive | |||
} | |||
function Expand-ZipFiles { | |||
<# | |||
.SYNOPSIS | |||
Expands the given list of files from the given archive into the given target directory. | |||
.PARAMETER Archive | |||
Path to the archive that should be considered. | |||
.PARAMETER Files | |||
Files that should be extracted from the archive. | |||
.PARAMETER TargetDirectory | |||
Directory into which the files should be expanded. | |||
#> | |||
param( | |||
[Parameter(Mandatory = $true, ValueFromPipeline = $true, ValueFromPipelineByPropertyName = $true)] [string] $Archive, | |||
[Parameter(Mandatory = $true, ValueFromPipeline = $true, ValueFromPipelineByPropertyName = $true)] [string []] $Files, | |||
[Parameter(Mandatory = $true, ValueFromPipeline = $true, ValueFromPipelineByPropertyName = $true)] [string] $TargetDirectory | |||
) | |||
& 7z e $Archive $Files -o"$TargetDirectory" | |||
} | |||
function Split-ArchiveFromUrl { | |||
<# | |||
.SYNOPSIS | |||
Extracts the archive name out of the given Url. | |||
.PARAMETER ArchiveUrl | |||
Url of the archive that will be downloaded. | |||
#> | |||
param( | |||
[Parameter(Mandatory = $true, ValueFromPipeline = $true, ValueFromPipelineByPropertyName = $true)] [string] $ArchiveUrl | |||
) | |||
$uriParts = $ArchiveUrl.split("/") | |||
$ArchivePath = $uriParts[$uriParts.Count - 1] | |||
return $ArchivePath | |||
} | |||
function Copy-Archive { | |||
<# | |||
.SYNOPSIS | |||
This function copies the given binary file to the given target location. | |||
.PARAMETER ArchiveUrl | |||
Url where the archive should be downloaded from. | |||
.PARAMETER TargetDirectory | |||
Target directory where the archive should be downloaded. | |||
#> | |||
param ( | |||
[Parameter(Mandatory = $true, ValueFromPipeline = $true, ValueFromPipelineByPropertyName = $true)] | |||
[string] $ArchiveUrl, | |||
[Parameter(Mandatory = $true, ValueFromPipeline = $true, ValueFromPipelineByPropertyName = $true)] | |||
[string] $TargetDirectory | |||
) | |||
$ArchiveName = Split-ArchiveFromUrl $ArchiveUrl | |||
$TargetPath = [IO.Path]::Combine($PSScriptRoot, "..", "packages", $ArchiveName) | |||
if (Test-Path $TargetPath -PathType Leaf) { | |||
Write-Error "$TargetPath already exists, please remove to download againg." | |||
return $TargetPath | |||
} | |||
if (-not (Test-Path $TargetDirectory -PathType Container)) { | |||
Write-Host "Creating missing $TargetDirectory" | |||
New-Item -Path $TargetDirectory -ItemType Directory | |||
} | |||
Write-Host "Downloading $ArchiveUrl, this might take a while..." | |||
$wc = New-Object System.Net.WebClient | |||
$wc.DownloadFile($ArchiveUrl, $TargetPath) | |||
return $TargetPath | |||
} | |||
$LinuxGpuArchive = "https://storage.googleapis.com/tensorflow/libtensorflow/libtensorflow-gpu-linux-x86_64-1.14.0.tar.gz" | |||
$LinuxCpuArchive = "https://storage.googleapis.com/tensorflow/libtensorflow/libtensorflow-cpu-linux-x86_64-1.14.0.tar.gz" | |||
$LinuxFiles = @(".\libtensorflow.tar", ".\lib\libtensorflow.so", ".\lib\libtensorflow.so.1", ".\lib\libtensorflow.so.1.14.0", ` | |||
".\lib\libtensorflow_framework.so", ".\lib\libtensorflow_framework.so.1", ".\lib\libtensorflow_framework.so.1.14.0") | |||
$WindowsGpuArchive = "https://storage.googleapis.com/tensorflow/libtensorflow/libtensorflow-gpu-windows-x86_64-1.14.0.zip" | |||
$WindowsCpuArchive = "https://storage.googleapis.com/tensorflow/libtensorflow/libtensorflow-cpu-windows-x86_64-1.14.0.zip" | |||
$WindowsFiles = @("lib\tensorflow.dll") | |||
$PackagesDirectory = [IO.Path]::Combine($PSScriptRoot, "..", "packages") | |||
if (-not $SkipGpuLibraries) { | |||
$Archive = Copy-Archive -ArchiveUrl $WindowsGpuArchive -TargetDirectory $PackagesDirectory | |||
$TargetDirectory = [IO.Path]::Combine($PSScriptRoot, "..", "redist", "runtime.win-x64.SciSharp.TensorFlow-Gpu.Redist") | |||
Expand-ZipFiles $Archive $WindowsFiles $TargetDirectory | |||
$Archive = Copy-Archive -ArchiveUrl $LinuxGpuArchive -TargetDirectory $PackagesDirectory | |||
$TargetDirectory = [IO.Path]::Combine($PSScriptRoot, "..", "redist", "runtime.linux-x64.SciSharp.Tensorflow-Gpu.Redist") | |||
Expand-TarGzFiles $Archive $LinuxFiles $TargetDirectory | |||
} | |||
if (-not $SkipCpuLibraries) { | |||
$Archive = Copy-Archive -ArchiveUrl $WindowsCpuArchive -TargetDirectory $PackagesDirectory | |||
$TargetDirectory = [IO.Path]::Combine($PSScriptRoot, "..", "redist", "runtime.win-x64.SciSharp.TensorFlow-Cpu.Redist") | |||
Expand-ZipFiles $Archive $WindowsFiles $TargetDirectory | |||
$Archive = Copy-Archive -ArchiveUrl $LinuxCpuArchive -TargetDirectory $PackagesDirectory | |||
$TargetDirectory = [IO.Path]::Combine($PSScriptRoot, "..", "redist", "runtime.linux-x64.SciSharp.Tensorflow-Cpu.Redist") | |||
Expand-TarGzFiles $Archive $LinuxFiles $TargetDirectory | |||
} | |||
@@ -10,7 +10,7 @@ PM> Install-Package SciSharp.TensorFlow.Redist | |||
* GPU version for Windows | |||
```powershell | |||
PM> Install-Package SciSharp.TensorFlow.Redist | |||
PM> Install-Package SciSharp.TensorFlow.Redist-Windows-GPU | |||
``` | |||
https://www.nuget.org/packages/SciSharp.TensorFlow.Redist | |||
@@ -21,8 +21,7 @@ Related merged [commits](https://github.com/SciSharp/TensorFlow.NET/commit/854a5 | |||
On Windows, the tar command does not support extracting archives with symlinks. So when `dotnet pack` runs on Windows it will only package the Windows binaries. | |||
1. Run `dotnet pack` under `src/SciSharp.TensorFlow.Redist` directory in Linux. | |||
1. Run `dotnet pack SciSharp.TensorFlow.Redist-CPU.nupkgproj` under `src/SciSharp.TensorFlow.Redist` directory in Linux. | |||
2. Run `dotnet nuget push SciSharp.TensorFlow.Redist.1.14.0.nupkg -k APIKEY -s https://api.nuget.org/v3/index.json` | |||
@@ -71,7 +71,7 @@ | |||
DownloadShaFile="$(BinDir)%(FileName)%(Extension).sha" | |||
ExtractDirectory="$(BinDir)%(FileName)" | |||
ExtractSemaphore="$(BinDir)%(FileName)\.extracted" | |||
LocalShaFile="$(MSBuildProjectDirectory)\%(FileName)%(Extension).sha"/> | |||
LocalShaFile="$(BinDir)\%(FileName)%(Extension).sha"/> | |||
</ItemGroup> | |||
<Message Importance="high" Text="%(TensorFlowConfig.Runtime)"/> | |||
</Target> | |||
@@ -0,0 +1,174 @@ | |||
<Project Sdk="Microsoft.NET.Sdk"> | |||
<PropertyGroup> | |||
<ProjDir>$(MSBuildThisFileDirectory)</ProjDir> | |||
<BinDir>$(ProjDir)bin\</BinDir> | |||
<ObjDir>$(ProjDir)obj\</ObjDir> | |||
<TargetArchitecture Condition="'$(TargetArchitecture)' == ''">x64</TargetArchitecture> | |||
<TargetFramework>netstandard2.0</TargetFramework> | |||
<TensorFlowVersion>1.14.0</TensorFlowVersion> | |||
<TensorFlowMajorVersion>1</TensorFlowMajorVersion> | |||
<PackageAssetsPath>$(BinDir)packages\</PackageAssetsPath> | |||
<PackageId>$(MSBuildProjectName)</PackageId> | |||
<PackageVersion>$(TensorFlowVersion)</PackageVersion> | |||
<NoBuild>true</NoBuild> | |||
<IncludeBuildOutput>false</IncludeBuildOutput> | |||
<NuspecFile>Redist-Windows-GPU.nuspec</NuspecFile> | |||
<NuspecProperties>packageId=$(PackageId);version=$(PackageVersion)</NuspecProperties> | |||
<NuspecBasePath>$(ProjDir)</NuspecBasePath> | |||
<GenerateNuspecDependsOn>CopyFilesFromArchive</GenerateNuspecDependsOn> | |||
<PackageRid Condition="'$(OS)' == 'Windows_NT'">win</PackageRid> | |||
<PackageRid Condition="'$(OS)' != 'Windows_NT'">linux</PackageRid> | |||
<PackageRid Condition="$([MSBuild]::IsOSPlatform('osx'))">osx</PackageRid> | |||
<PackageRid>$(PackageRid)-$(TargetArchitecture)</PackageRid> | |||
</PropertyGroup> | |||
<PropertyGroup> | |||
<IncludeMLNetNotices>false</IncludeMLNetNotices> | |||
</PropertyGroup> | |||
<ItemGroup> | |||
<TensorFlowConfig Condition="'$(OS)' != 'Windows_NT'" | |||
Include="linux" | |||
FileExtension=".tar.gz" | |||
FilesFromArchive="lib\libtensorflow.so; | |||
lib\libtensorflow_framework.so.$(TensorFlowMajorVersion); | |||
include\tensorflow\c\LICENSE" | |||
Runtime="linux-x64" /> | |||
<AdditionalDownloadFile Include="https://raw.githubusercontent.com/tensorflow/tensorflow/master/LICENSE" | |||
DownloadFile="$(BinDir)LICENSE" /> | |||
</ItemGroup> | |||
<Target Name="PrepareArchives"> | |||
<ItemGroup> | |||
<!-- although we could extract all archives on all machines, mac requires a fixup which can only be run on mac | |||
so we split these per-rid and join during the official build packaging. --> | |||
<TensorFlowArchive | |||
Include="@(TensorFlowConfig->'https://storage.googleapis.com/tensorflow/libtensorflow/libtensorflow-gpu-%(Identity)-x86_64-$(TensorFlowVersion)%(FileExtension)')" /> | |||
<!-- set up metdata used by all targets --> | |||
<TensorFlowArchive DownloadFile="$(BinDir)%(FileName)%(Extension)" | |||
DownloadShaFile="$(BinDir)%(FileName)%(Extension).sha" | |||
ExtractDirectory="$(BinDir)%(FileName)" | |||
ExtractSemaphore="$(BinDir)%(FileName)\.extracted" | |||
LocalShaFile="$(BinDir)\%(FileName)%(Extension).sha"/> | |||
</ItemGroup> | |||
<Message Importance="high" Text="%(TensorFlowConfig.Runtime)"/> | |||
</Target> | |||
<Target Name="DownloadArchives" | |||
DependsOnTargets="PrepareArchives" | |||
Inputs="$(MSBuildProjectFile)" | |||
Outputs="@(TensorFlowArchive->'%(DownloadFile)');@(AdditionalDownloadFile->'%(DownloadFile)')"> | |||
<MakeDir Directories="$(BinDir)" /> | |||
<ItemGroup> | |||
<_downloadFiles Include="@(TensorFlowArchive);@(AdditionalDownloadFile)" Url="%(Identity)" DestinationFile="%(DownloadFile)" /> | |||
</ItemGroup> | |||
<Message Importance="High" Text="Downloading '%(_downloadFiles.Identity)' to '$(BinDir)'." /> | |||
<DownloadFile SourceUrl="%(_downloadFiles.Identity)" DestinationFolder="$(BinDir)"> | |||
<Output TaskParameter="DownloadedFile" ItemName="Content" /> | |||
</DownloadFile> | |||
</Target> | |||
<Target Name="ValidateAndExtractArchives" | |||
DependsOnTargets="DownloadArchives" | |||
Inputs="@(TensorFlowArchive->'%(DownloadFile)')" | |||
Outputs="@(TensorFlowArchive->'%(ExtractSemaphore)')"> | |||
<GetFileHash Files="@(TensorFlowArchive->'%(DownloadFile)')" Algorithm="SHA512"> | |||
<Output | |||
TaskParameter="Items" | |||
ItemName="FilesWithHashes" /> | |||
</GetFileHash> | |||
<WriteLinesToFile File="%(FilesWithHashes.Identity).sha" Lines="%(FilesWithHashes.FileHash)" Overwrite="true"/> | |||
<!-- If specified we'll update the checked in SHAs with the downloaded ones. --> | |||
<Copy Condition="'$(UpdateSHA)' == 'true'" | |||
SourceFiles="@(TensorFlowArchive->'%(DownloadShaFile)')" | |||
DestinationFiles="@(TensorFlowArchive->'%(LocalShaFile)')" /> | |||
<ItemGroup> | |||
<TensorFlowArchive> | |||
<DownloadSha>@(FilesWithHashes->'%(FileHash)')</DownloadSha> | |||
<LocalSha>$([System.IO.File]::ReadAllText('%(LocalShaFile)').Replace("%0A", "").Replace("%0D", ""))</LocalSha> | |||
</TensorFlowArchive> | |||
</ItemGroup> | |||
<Error Condition="!Exists('%(TensorFlowArchive.LocalShaFile)')" Text="SHA file '%(TensorFlowArchive.LocalShaFile)' does not exist. Build with /p:UpdateSHA=true to save it." /> | |||
<Message Importance="High" Text="@TensorFlowArchive->'%(TensorFlowArchive.DownloadFile) - %(TensorFlowArchive.LocalSha) - %(TensorFlowArchive.DownloadSha)"/> | |||
<!-- Validate that the downloaded SHAs match the expected checked in SHAs --> | |||
<Error Condition="'%(TensorFlowArchive.LocalSha)' != '%(TensorFlowArchive.DownloadSha)'" Text="Downloaded file '%(TensorFlowArchive.DownloadFile)' has unexpected SHA.%0A expected: %(TensorFlowArchive.LocalSha)%0A --actual: %(TensorFlowArchive.DownloadSha)%0ABuild with /p:UpdateSHA=true if you intentionally changed the URL and wish to update the SHAs, otherwise this could indicate an incomplete download or intercerpted URL and should be examined." /> | |||
<!-- The archives are valid, lets extract them, ensuring an empty directory --> | |||
<RemoveDir Directories="@(TensorFlowArchive->'%(ExtractDirectory)')" /> | |||
<MakeDir Directories="@(TensorFlowArchive->'%(ExtractDirectory)')" /> | |||
<Message Importance="High" Text="Decompressing '%(TensorFlowArchive.DownloadFile)' to '%(TensorFlowArchive.ExtractDirectory)'." /> | |||
<Unzip Condition="'%(TensorFlowArchive.FileExtension)' == '.zip'" | |||
SourceFiles="%(TensorFlowArchive.DownloadFile)" | |||
DestinationFolder="%(TensorFlowArchive.ExtractDirectory)" /> | |||
<Exec Condition="'$(OS)' != 'Windows_NT' AND '%(TensorFlowArchive.FileExtension)' == '.tar.gz'" | |||
WorkingDirectory="$(MSBuildThisFileDirectory)" | |||
Command="tar -xzm --hard-dereference -f %(TensorFlowArchive.DownloadFile) -C %(TensorFlowArchive.ExtractDirectory)" /> | |||
<Exec Condition="'$(OS)' != 'Windows_NT'" | |||
Command="chmod -R +w %(TensorFlowArchive.ExtractDirectory)" /> | |||
<Touch Files="@(TensorFlowArchive->'%(ExtractSemaphore)')" AlwaysCreate="true" /> | |||
</Target> | |||
<!-- Select the files we want to copy out of each archive. --> | |||
<Target Name="GetFilesFromArchive" | |||
DependsOnTargets="ValidateAndExtractArchives" > | |||
<ItemGroup> | |||
<!-- batch rather than transform so that we can split FilesFromArchive metadata --> | |||
<_fileFromArchive Include="%(TensorFlowArchive.FilesFromArchive)" ExtractDirectory="%(TensorFlowArchive.ExtractDirectory)" Runtime="%(TensorFlowArchive.Runtime)" /> | |||
<_fileFromArchive DestinationFile="%(FileName)%(Extension)"/> | |||
<_fileFromArchive PackagePath="runtimes\%(_fileFromArchive.Runtime)\native\%(_fileFromArchive.DestinationFile)" /> | |||
<!-- LICENSE from the package is actually THIRD_PARTY_NOTICES--> | |||
<_fileFromArchive Condition="'%(DestinationFile)' == 'LICENSE'" PackagePath="THIRD_PARTY_NOTICES.txt" Runtime="" /> | |||
<!-- copy to packaging location --> | |||
<FilesFromArchive Include="@(_fileFromArchive->'%(ExtractDirectory)\%(Identity)')" | |||
TargetPath="$(PackageAssetsPath)$(MSBuildProjectName)\%(PackagePath)" /> | |||
<!-- include LICENSE that was downloaded from GitHub --> | |||
<FilesFromArchive Include="$(BinDir)\LICENSE" | |||
TargetPath="$(PackageAssetsPath)$(MSBuildProjectName)\LICENSE.txt" /> | |||
<!-- copy to NativeAssets location, only for current RID, so that they may be used by tests --> | |||
<!--<FilesFromArchive Condition="'$(PackageRID)' == '%(_fileFromArchive.Runtime)'" | |||
Include="@(_fileFromArchive->'%(ExtractDirectory)\%(Identity)')" | |||
TargetPath="$(NativeAssetsBuiltPath)\%(_fileFromArchive.DestinationFile)" />--> | |||
</ItemGroup> | |||
</Target> | |||
<Target Name="CopyFilesFromArchive" | |||
DependsOnTargets="GetFilesFromArchive"> | |||
<Message Importance="High" Text="@(FilesFromArchive) -> %(FilesFromArchive.TargetPath)" /> | |||
<Copy SourceFiles="@(FilesFromArchive)" | |||
DestinationFiles="@(FilesFromArchive->'%(TargetPath)')" /> | |||
</Target> | |||
<Target Name="Clean"> | |||
<Message Importance="High" Text="Deleting $(BinDir);$(ObjDir)" /> | |||
<RemoveDir Directories="$(BinDir);$(ObjDir)" /> | |||
</Target> | |||
</Project> |
@@ -41,21 +41,6 @@ | |||
include\tensorflow\c\LICENSE" | |||
Runtime="win-x64"/> | |||
<TensorFlowConfig Condition="'$(OS)' != 'Windows_NT'" | |||
Include="linux" | |||
FileExtension=".tar.gz" | |||
FilesFromArchive="lib\libtensorflow.so; | |||
lib\libtensorflow_framework.so.$(TensorFlowMajorVersion); | |||
include\tensorflow\c\LICENSE" | |||
Runtime="linux-x64" /> | |||
<TensorFlowConfig Condition="'$(OS)' != 'Windows_NT'" | |||
Include="darwin" FileExtension=".tar.gz" | |||
FilesFromArchive="lib\libtensorflow.dylib; | |||
lib\libtensorflow_framework.$(TensorFlowMajorVersion).dylib; | |||
include\tensorflow\c\LICENSE" | |||
Runtime="osx-x64" /> | |||
<AdditionalDownloadFile Include="https://raw.githubusercontent.com/tensorflow/tensorflow/master/LICENSE" | |||
DownloadFile="$(BinDir)LICENSE" /> | |||
</ItemGroup> | |||
@@ -71,7 +56,7 @@ | |||
DownloadShaFile="$(BinDir)%(FileName)%(Extension).sha" | |||
ExtractDirectory="$(BinDir)%(FileName)" | |||
ExtractSemaphore="$(BinDir)%(FileName)\.extracted" | |||
LocalShaFile="$(MSBuildProjectDirectory)\%(FileName)%(Extension).sha"/> | |||
LocalShaFile="$(BinDir)\%(FileName)%(Extension).sha"/> | |||
</ItemGroup> | |||
<Message Importance="high" Text="%(TensorFlowConfig.Runtime)"/> | |||
</Target> | |||
@@ -1 +0,0 @@ | |||
7002EF701BD23C5EF5FF94192E935F0DDF960A21BE2531CEE158586830C00E0BA889900F7F6E8AB568BEE0ACF1F5A6A246BB43D11C4109E9DC782B46377D8142 |
@@ -1 +0,0 @@ | |||
E3F6D0309117E9E45780ECF8BC4D0268B3FC9F12E3E38FFE58496789330A4ACD2DC8FF721F3B8900357F6155F8A54000E45B99495F823486B558E8B42532392D |
@@ -1 +0,0 @@ | |||
59A2B80B441439B851202358CE4A65BA0DDDB319A8A29E87B135DCD9954BC5B0628F2C0C8E72D6942EA3CDCE172805C2BD5421815B3D0210B62BC0936DC59A08 |
@@ -1 +0,0 @@ | |||
850A27858FA951DF77A78CD1BD78B54F6EE2532DD5A49F0579A7B02C795C62F0212F20177EAEA2BD77BD451A57FBBD1348362492F9E14BFE5CA5028C71711293 |
@@ -54,6 +54,15 @@ namespace Tensorflow | |||
public struct DeallocatorArgs | |||
{ | |||
internal static unsafe c_api.DeallocatorArgs* EmptyPtr; | |||
internal static unsafe IntPtr Empty; | |||
static unsafe DeallocatorArgs() | |||
{ | |||
Empty = new IntPtr(EmptyPtr = (DeallocatorArgs*) Marshal.AllocHGlobal(Marshal.SizeOf<DeallocatorArgs>())); | |||
*EmptyPtr = new DeallocatorArgs() {gc_handle = IntPtr.Zero, deallocator_called = false}; | |||
} | |||
public bool deallocator_called; | |||
public IntPtr gc_handle; | |||
} | |||
@@ -14,6 +14,7 @@ | |||
limitations under the License. | |||
******************************************************************************/ | |||
using NumSharp; | |||
using System; | |||
using System.Collections.Generic; | |||
using System.Linq; | |||
@@ -22,6 +23,39 @@ namespace Tensorflow | |||
{ | |||
public partial class tensorflow | |||
{ | |||
/// <summary> | |||
/// A convenient alias for None, useful for indexing arrays. | |||
/// </summary> | |||
public Slice newaxis = Slice.NewAxis; | |||
/// <summary> | |||
/// BatchToSpace for N-D tensors of type T. | |||
/// </summary> | |||
/// <typeparam name="T"></typeparam> | |||
/// <param name="input"></param> | |||
/// <param name="block_shape"></param> | |||
/// <param name="crops"></param> | |||
/// <param name="name"></param> | |||
/// <returns></returns> | |||
public Tensor batch_to_space_nd<T>(T input, int[] block_shape, int[,] crops, string name = null) | |||
=> gen_array_ops.batch_to_space_nd(input, block_shape, crops, name: name); | |||
/// <summary> | |||
/// Apply boolean mask to tensor. | |||
/// </summary> | |||
/// <typeparam name="T1"></typeparam> | |||
/// <typeparam name="T2"></typeparam> | |||
/// <param name="tensor">N-D tensor.</param> | |||
/// <param name="mask">K-D boolean tensor, K <= N and K must be known statically.</param> | |||
/// <param name="name"></param> | |||
/// <param name="axis">A 0-D int Tensor representing the axis in tensor to mask from. </param> | |||
/// <returns>(N-K+1)-dimensional tensor populated by entries in tensor corresponding to True values in mask.</returns> | |||
public Tensor boolean_mask<T1, T2>(T1 tensor, T2 mask, string name = "boolean_mask", int axis = 0) | |||
=> array_ops.boolean_mask(tensor, mask, name: name, axis: axis); | |||
public Tensor check_numerics(Tensor tensor, string message, string name = null) | |||
=> gen_array_ops.check_numerics(tensor, message, name: name); | |||
/// <summary> | |||
/// Concatenates tensors along one dimension. | |||
/// </summary> | |||
@@ -61,6 +95,26 @@ namespace Tensorflow | |||
public Tensor fill<T>(Tensor dims, T value, string name = null) | |||
=> gen_array_ops.fill(dims, value, name: name); | |||
/// <summary> | |||
/// Return a tensor with the same shape and contents as input. | |||
/// </summary> | |||
/// <param name="input"></param> | |||
/// <param name="name"></param> | |||
/// <returns></returns> | |||
public static Tensor identity(Tensor input, string name = null) | |||
=> array_ops.identity(input, name: name); | |||
/// <summary> | |||
/// Gather slices from params axis axis according to indices. | |||
/// </summary> | |||
/// <param name="params"></param> | |||
/// <param name="indices"></param> | |||
/// <param name="name"></param> | |||
/// <param name="axis"></param> | |||
/// <returns></returns> | |||
public Tensor gather(Tensor @params, Tensor indices, string name = null, int axis = 0) | |||
=> array_ops.gather(@params, indices, name: name, axis: axis); | |||
/// <summary> | |||
/// Return the elements, either from `x` or `y`, depending on the `condition`. | |||
/// </summary> | |||
@@ -79,6 +133,39 @@ namespace Tensorflow | |||
public Tensor transpose<T1>(T1 a, int[] perm = null, string name = "transpose", bool conjugate = false) | |||
=> array_ops.transpose(a, perm, name, conjugate); | |||
/// <summary> | |||
/// Reverses specific dimensions of a tensor. | |||
/// </summary> | |||
/// <param name="tensor"></param> | |||
/// <param name="axis"></param> | |||
/// <param name="name"></param> | |||
/// <returns></returns> | |||
public static Tensor reverse(Tensor tensor, int[] axis, string name = null) | |||
=> gen_array_ops.reverse(tensor, axis, name: name); | |||
public static Tensor reverse(Tensor tensor, Tensor axis, string name = null) | |||
=> gen_array_ops.reverse(tensor, axis, name: name); | |||
/// <summary> | |||
/// Returns the rank of a tensor. | |||
/// </summary> | |||
/// <param name="input"></param> | |||
/// <param name="name"></param> | |||
/// <returns>Returns a 0-D `int32` `Tensor` representing the rank of `input`.</returns> | |||
public Tensor rank(Tensor input, string name = null) | |||
=> array_ops.rank(input, name: name); | |||
/// <summary> | |||
/// Extracts a slice from a tensor. | |||
/// </summary> | |||
/// <param name="input">A `Tensor`.</param> | |||
/// <param name="begin">An `int32` or `int64` `Tensor`.</param> | |||
/// <param name="size">An `int32` or `int64` `Tensor`.</param> | |||
/// <param name="name">A name for the operation (optional).</param> | |||
/// <returns>A `Tensor` the same type as `input`.</returns> | |||
public Tensor slice<Tb, Ts>(Tensor input, Tb[] begin, Ts[] size, string name = null) | |||
=> array_ops.slice(input, begin, size, name: name); | |||
public Tensor squeeze(Tensor input, int[] axis = null, string name = null, int squeeze_dims = -1) | |||
=> gen_array_ops.squeeze(input, axis, name); | |||
@@ -92,6 +179,20 @@ namespace Tensorflow | |||
public Tensor stack(object values, int axis = 0, string name = "stack") | |||
=> array_ops.stack(values, axis, name: name); | |||
/// <summary> | |||
/// Creates a tensor with all elements set to 1. | |||
/// </summary> | |||
/// <param name="tensor"></param> | |||
/// <param name="dtype"></param> | |||
/// <param name="name">A name for the operation (optional).</param> | |||
/// <param name="optimize"> | |||
/// if true, attempt to statically determine the shape of 'tensor' and | |||
/// encode it as a constant. | |||
/// </param> | |||
/// <returns>A `Tensor` with all elements set to 1.</returns> | |||
public Tensor ones_like(Tensor tensor, TF_DataType dtype = TF_DataType.DtInvalid, string name = null, bool optimize = true) | |||
=> array_ops.ones_like(tensor, dtype: dtype, name: name, optimize: optimize); | |||
public Tensor one_hot(Tensor indices, int depth, | |||
Tensor on_value = null, | |||
Tensor off_value = null, | |||
@@ -99,6 +200,18 @@ namespace Tensorflow | |||
int axis = -1, | |||
string name = null) => array_ops.one_hot(indices, depth, dtype: dtype, axis: axis, name: name); | |||
/// <summary> | |||
/// Pads a tensor | |||
/// </summary> | |||
/// <param name="tensor"></param> | |||
/// <param name="paddings"></param> | |||
/// <param name="mode"></param> | |||
/// <param name="name"></param> | |||
/// <param name="constant_values"></param> | |||
/// <returns></returns> | |||
public Tensor pad(Tensor tensor, Tensor paddings, string mode = "CONSTANT", string name = null, int constant_values = 0) | |||
=> array_ops.pad(tensor, paddings, mode: mode, name: name, constant_values: constant_values); | |||
/// <summary> | |||
/// A placeholder op that passes through `input` when its output is not fed. | |||
/// </summary> | |||
@@ -112,5 +225,47 @@ namespace Tensorflow | |||
/// <returns>A `Tensor`. Has the same type as `input`.</returns> | |||
public Tensor placeholder_with_default<T>(T input, int[] shape, string name = null) | |||
=> gen_array_ops.placeholder_with_default(input, shape, name: name); | |||
/// <summary> | |||
/// Returns the shape of a tensor. | |||
/// </summary> | |||
/// <param name="input"></param> | |||
/// <param name="name"></param> | |||
/// <param name="out_type"></param> | |||
/// <returns></returns> | |||
public Tensor shape(Tensor input, string name = null, TF_DataType out_type = TF_DataType.TF_INT32) | |||
=> array_ops.shape_internal(input, name, optimize: true, out_type: out_type); | |||
/// <summary> | |||
/// Stacks a list of rank-`R` tensors into one rank-`(R+1)` tensor. | |||
/// </summary> | |||
/// <param name="values"></param> | |||
/// <param name="axis"></param> | |||
/// <param name="name"></param> | |||
/// <returns>A stacked `Tensor` with the same type as `values`.</returns> | |||
public Tensor stack(Tensor[] values, int axis = 0, string name = "stack") | |||
=> array_ops.stack(values, axis: axis, name: name); | |||
/// <summary> | |||
/// Unpacks the given dimension of a rank-`R` tensor into rank-`(R-1)` tensors. | |||
/// </summary> | |||
/// <param name="value"></param> | |||
/// <param name="num"></param> | |||
/// <param name="axis"></param> | |||
/// <param name="name"></param> | |||
/// <returns></returns> | |||
public Tensor[] unstack(Tensor value, int? num = null, int axis = 0, string name = "unstack") | |||
=> array_ops.unstack(value, num: num, axis: axis, name: name); | |||
/// <summary> | |||
/// Creates a tensor with all elements set to zero. | |||
/// </summary> | |||
/// <param name="tensor"></param> | |||
/// <param name="dtype"></param> | |||
/// <param name="name"></param> | |||
/// <param name="optimize"></param> | |||
/// <returns>A `Tensor` with all elements set to zero.</returns> | |||
public Tensor zeros_like(Tensor tensor, TF_DataType dtype = TF_DataType.DtInvalid, string name = null, bool optimize = true) | |||
=> array_ops.zeros_like(tensor, dtype: dtype, name: name, optimize: optimize); | |||
} | |||
} |
@@ -20,6 +20,23 @@ namespace Tensorflow | |||
{ | |||
public partial class tensorflow | |||
{ | |||
public Tensor cond(Tensor pred, | |||
Func<ITensorOrOperation> true_fn = null, | |||
Func<ITensorOrOperation> false_fn = null, | |||
bool strict = false, | |||
string name = null) | |||
=> control_flow_ops.cond(pred, true_fn, false_fn, strict: strict, name: name); | |||
/// <summary> | |||
/// Create an op that groups multiple operations. | |||
/// </summary> | |||
/// <typeparam name="T"></typeparam> | |||
/// <param name="inputs"></param> | |||
/// <param name="name"></param> | |||
/// <returns>An Operation that executes all its inputs.</returns> | |||
public Operation group<T>(T[] inputs, string name = null) where T : ITensorOrOperation | |||
=> control_flow_ops.group(inputs, name: name); | |||
public Tensor while_loop(Func<Tensor, Tensor> cond, Func<Tensor, Tensor> body, Tensor[] loop_vars, | |||
TensorShape shape_invariants = null, | |||
int parallel_iterations = 10, | |||
@@ -37,7 +54,7 @@ namespace Tensorflow | |||
maximum_iterations: maximum_iterations, | |||
return_same_structure: return_same_structure); | |||
public _ControlDependenciesController control_dependencies(Operation[] control_inputs) | |||
public _ControlDependenciesController control_dependencies(ITensorOrOperation[] control_inputs) | |||
=> ops.control_dependencies(control_inputs); | |||
} | |||
} |
@@ -0,0 +1,33 @@ | |||
/***************************************************************************** | |||
Copyright 2018 The TensorFlow.NET Authors. All Rights Reserved. | |||
Licensed under the Apache License, Version 2.0 (the "License"); | |||
you may not use this file except in compliance with the License. | |||
You may obtain a copy of the License at | |||
http://www.apache.org/licenses/LICENSE-2.0 | |||
Unless required by applicable law or agreed to in writing, software | |||
distributed under the License is distributed on an "AS IS" BASIS, | |||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |||
See the License for the specific language governing permissions and | |||
limitations under the License. | |||
******************************************************************************/ | |||
using System; | |||
namespace Tensorflow | |||
{ | |||
public partial class tensorflow | |||
{ | |||
/// <summary> | |||
/// Interleave the values from the data tensors into a single tensor. | |||
/// </summary> | |||
/// <param name="indices"></param> | |||
/// <param name="data"></param> | |||
/// <param name="name"></param> | |||
/// <returns></returns> | |||
public static Tensor dynamic_stitch(Tensor[] indices, Tensor[] data, string name = null) | |||
=> gen_data_flow_ops.dynamic_stitch(indices, data, name: name); | |||
} | |||
} |
@@ -0,0 +1,44 @@ | |||
/***************************************************************************** | |||
Copyright 2018 The TensorFlow.NET Authors. All Rights Reserved. | |||
Licensed under the Apache License, Version 2.0 (the "License"); | |||
you may not use this file except in compliance with the License. | |||
You may obtain a copy of the License at | |||
http://www.apache.org/licenses/LICENSE-2.0 | |||
Unless required by applicable law or agreed to in writing, software | |||
distributed under the License is distributed on an "AS IS" BASIS, | |||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |||
See the License for the specific language governing permissions and | |||
limitations under the License. | |||
******************************************************************************/ | |||
namespace Tensorflow | |||
{ | |||
public partial class tensorflow | |||
{ | |||
/// <summary> | |||
/// Assert the condition `x == y` holds element-wise. | |||
/// </summary> | |||
/// <typeparam name="T1"></typeparam> | |||
/// <typeparam name="T2"></typeparam> | |||
/// <param name="t1"></param> | |||
/// <param name="t2"></param> | |||
/// <param name="data"></param> | |||
/// <param name="message"></param> | |||
/// <param name="name"></param> | |||
/// <returns></returns> | |||
public Tensor assert_equal<T1, T2>(T1 t1, | |||
T2 t2, | |||
object[] data = null, | |||
string message = null, | |||
string name = null) | |||
=> check_ops.assert_equal(t1, | |||
t2, | |||
data: data, | |||
message: message, | |||
name: name); | |||
} | |||
} |
@@ -0,0 +1,57 @@ | |||
/***************************************************************************** | |||
Copyright 2018 The TensorFlow.NET Authors. All Rights Reserved. | |||
Licensed under the Apache License, Version 2.0 (the "License"); | |||
you may not use this file except in compliance with the License. | |||
You may obtain a copy of the License at | |||
http://www.apache.org/licenses/LICENSE-2.0 | |||
Unless required by applicable law or agreed to in writing, software | |||
distributed under the License is distributed on an "AS IS" BASIS, | |||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |||
See the License for the specific language governing permissions and | |||
limitations under the License. | |||
******************************************************************************/ | |||
using System; | |||
using static Tensorflow.Binding; | |||
using Tensorflow.Estimators; | |||
using Tensorflow.Data; | |||
namespace Tensorflow | |||
{ | |||
public partial class tensorflow | |||
{ | |||
public Estimator_Internal estimator { get; } = new Estimator_Internal(); | |||
public class Estimator_Internal | |||
{ | |||
public Estimator Estimator(Action model_fn, RunConfig config) | |||
=> new Estimator(model_fn: model_fn, config: config); | |||
public RunConfig RunConfig(string model_dir) | |||
=> new RunConfig(model_dir: model_dir); | |||
public void train_and_evaluate(Estimator estimator, TrainSpec train_spec, EvalSpec eval_spec) | |||
=> Training.train_and_evaluate(estimator: estimator, train_spec: train_spec, eval_spec: eval_spec); | |||
public TrainSpec TrainSpec(Func<DatasetV1Adapter> input_fn, int max_steps) | |||
=> new TrainSpec(input_fn: input_fn, max_steps: max_steps); | |||
/// <summary> | |||
/// Create an `Exporter` to use with `tf.estimator.EvalSpec`. | |||
/// </summary> | |||
/// <param name="name"></param> | |||
/// <param name="serving_input_receiver_fn"></param> | |||
/// <param name="as_text"></param> | |||
/// <returns></returns> | |||
public FinalExporter FinalExporter(string name, Action serving_input_receiver_fn, bool as_text = false) | |||
=> new FinalExporter(name: name, serving_input_receiver_fn: serving_input_receiver_fn, | |||
as_text: as_text); | |||
public EvalSpec EvalSpec(string name, Action input_fn, FinalExporter exporters) | |||
=> new EvalSpec(name: name, input_fn: input_fn, exporters: exporters); | |||
} | |||
} | |||
} |
@@ -29,7 +29,19 @@ namespace Tensorflow | |||
return ops.get_default_graph(); | |||
} | |||
public Graph Graph() | |||
/// <summary> | |||
/// Equivalent to <see cref="get_default_graph"/> but does not create a new graph if it there is none. | |||
/// </summary> | |||
public Graph peak_default_graph() | |||
{ | |||
return ops.default_graph_stack.peak_controller(); | |||
} | |||
/// <summary> | |||
/// Creates a new graph. | |||
/// </summary> | |||
///<remarks>Has no interaction with graph defaulting. Equivalent to new Graph();</remarks> | |||
public Graph Graph() | |||
=> new Graph(); | |||
} | |||
} | |||
} |
@@ -40,6 +40,11 @@ namespace Tensorflow | |||
public Tensor resize_bilinear(Tensor images, Tensor size, bool align_corners = false, string name = null) | |||
=> gen_image_ops.resize_bilinear(images, size, align_corners: align_corners, name: name); | |||
public Tensor resize_images(Tensor images, Tensor size, ResizeMethod method = ResizeMethod.BILINEAR, | |||
bool align_corners = false, bool preserve_aspect_ratio = false, string name = null) | |||
=> image_ops_impl.resize_images(images, size, method: method, | |||
align_corners: align_corners, preserve_aspect_ratio: preserve_aspect_ratio, name: name); | |||
public Tensor convert_image_dtype(Tensor image, TF_DataType dtype, bool saturate = false, string name = null) | |||
=> gen_image_ops.convert_image_dtype(image, dtype, saturate: saturate, name: name); | |||
@@ -54,8 +59,22 @@ namespace Tensorflow | |||
/// <param name="contents"></param> | |||
/// <param name="name"></param> | |||
/// <returns></returns> | |||
public static Tensor is_jpeg(Tensor contents, string name = null) | |||
public Tensor is_jpeg(Tensor contents, string name = null) | |||
=> image_ops_impl.is_jpeg(contents, name: name); | |||
/// <summary> | |||
/// Resize `images` to `size` using nearest neighbor interpolation. | |||
/// </summary> | |||
/// <param name="images"></param> | |||
/// <param name="size"></param> | |||
/// <param name="align_corners"></param> | |||
/// <param name="name"></param> | |||
/// <param name="half_pixel_centers"></param> | |||
/// <returns></returns> | |||
public Tensor resize_nearest_neighbor<Tsize>(Tensor images, Tsize size, bool align_corners = false, | |||
string name = null, bool half_pixel_centers = false) | |||
=> image_ops_impl.resize_nearest_neighbor(images, size, align_corners: align_corners, | |||
name: name, half_pixel_centers: half_pixel_centers); | |||
} | |||
} | |||
} |
@@ -20,6 +20,8 @@ namespace Tensorflow | |||
{ | |||
public partial class tensorflow | |||
{ | |||
public IInitializer constant_initializer<T>(T value, TF_DataType dtype = TF_DataType.TF_FLOAT, bool verify_shape = false) | |||
=> new Constant<T>(value, dtype: dtype, verify_shape: verify_shape); | |||
public IInitializer zeros_initializer => new Zeros(); | |||
public IInitializer ones_initializer => new Ones(); | |||
public IInitializer glorot_uniform_initializer => new GlorotUniform(); | |||
@@ -60,5 +62,25 @@ namespace Tensorflow | |||
stddev: stddev, | |||
seed: seed, | |||
dtype: dtype); | |||
/// <summary> | |||
/// Initializer capable of adapting its scale to the shape of weights tensors. | |||
/// </summary> | |||
/// <param name="scale"></param> | |||
/// <param name="mode"></param> | |||
/// <param name="distribution"></param> | |||
/// <param name="seed"></param> | |||
/// <param name="dtype"></param> | |||
/// <returns></returns> | |||
public IInitializer variance_scaling_initializer(float scale = 1.0f, | |||
string mode = "fan_in", | |||
string distribution = "truncated_normal", | |||
int? seed = null, | |||
TF_DataType dtype = TF_DataType.TF_FLOAT) => new VarianceScaling( | |||
scale: scale, | |||
mode: mode, | |||
distribution: distribution, | |||
seed: seed, | |||
dtype: dtype); | |||
} | |||
} |
@@ -14,6 +14,9 @@ | |||
limitations under the License. | |||
******************************************************************************/ | |||
using System.Collections.Generic; | |||
using System.Linq; | |||
using NumSharp; | |||
using Tensorflow.Keras.Layers; | |||
using Tensorflow.Operations.Activation; | |||
using static Tensorflow.Binding; | |||
@@ -143,6 +146,20 @@ namespace Tensorflow | |||
return layer.apply(inputs); | |||
} | |||
/// <summary> | |||
/// Densely-connected layer class. aka fully-connected<br></br> | |||
/// `outputs = activation(inputs * kernel + bias)` | |||
/// </summary> | |||
/// <param name="inputs"></param> | |||
/// <param name="units">Python integer, dimensionality of the output space.</param> | |||
/// <param name="activation"></param> | |||
/// <param name="use_bias">Boolean, whether the layer uses a bias.</param> | |||
/// <param name="kernel_initializer"></param> | |||
/// <param name="bias_initializer"></param> | |||
/// <param name="trainable"></param> | |||
/// <param name="name"></param> | |||
/// <param name="reuse"></param> | |||
/// <returns></returns> | |||
public Tensor dense(Tensor inputs, | |||
int units, | |||
IActivation activation = null, | |||
@@ -159,10 +176,60 @@ namespace Tensorflow | |||
var layer = new Dense(units, activation, | |||
use_bias: use_bias, | |||
bias_initializer: bias_initializer, | |||
kernel_initializer: kernel_initializer); | |||
kernel_initializer: kernel_initializer, | |||
trainable: trainable); | |||
return layer.apply(inputs); | |||
} | |||
/// <summary> | |||
/// Flattens an input tensor while preserving the batch axis (axis 0). | |||
/// </summary> | |||
/// <param name="inputs">Tensor input.</param> | |||
/// <param name="name">The name of the layer.</param> | |||
/// <param name="data_format"> | |||
/// A string, one of `channels_last` (default) or `channels_first`. <br></br> | |||
/// The ordering of the dimensions in the inputs. <br></br> | |||
/// `channels_last` corresponds to inputs with shape <br></br> | |||
/// `(batch, height, width, channels)` while `channels_first` corresponds to <br></br> | |||
/// inputs with shape `(batch, channels, height, width)`. | |||
/// </param> | |||
/// <returns></returns> | |||
public Tensor flatten(Tensor inputs, | |||
string name = null, | |||
string data_format = "channels_last") | |||
{ | |||
var input_shape = inputs.shape; | |||
if (inputs.shape.Length == 0) | |||
throw new ValueError($"Input 0 of layer flatten is incompatible with the layer: : expected min_ndim={1}, found ndim={0}. Full shape received: ()"); | |||
var premutation = new List<int>() {0}; | |||
if (data_format == "channels_first" && inputs.NDims > 1) | |||
{ | |||
premutation.AddRange(Binding.range(2, inputs.NDims)); | |||
premutation.Add(1); | |||
inputs = array_ops.transpose(inputs, premutation.ToArray()); | |||
} | |||
var ret = array_ops.reshape(inputs, compute_output_shape(input_shape)); | |||
//ret.set_shape(compute_output_shape(ret.shape)); | |||
return ret; | |||
int[] compute_output_shape(int[] inputshape) | |||
{ | |||
if (inputshape == null || inputshape.Length == 0) | |||
inputshape = new int[] {1}; | |||
if (inputshape.Skip(1).All(d => d > 0)) | |||
{ | |||
int[] output_shape = new int[2]; | |||
output_shape[0] = inputshape[0]; | |||
output_shape[1] = inputshape.Skip(1).Aggregate(1, (acc, rhs) => acc*rhs); //calculate size of all the rest dimensions | |||
return output_shape; | |||
} else | |||
return new int[] {inputshape[0], -1}; //-1 == Binding.None | |||
} | |||
} | |||
} | |||
} | |||
} |
@@ -14,6 +14,8 @@ | |||
limitations under the License. | |||
******************************************************************************/ | |||
using Tensorflow.Operations; | |||
namespace Tensorflow | |||
{ | |||
public partial class tensorflow | |||
@@ -42,6 +44,15 @@ namespace Tensorflow | |||
public Tensor add<Tx, Ty>(Tx a, Ty b, string name = null) | |||
=> gen_math_ops.add(a, b, name: name); | |||
/// <summary> | |||
/// Adds all input tensors element-wise. | |||
/// </summary> | |||
/// <param name="inputs"></param> | |||
/// <param name="name"></param> | |||
/// <returns>A `Tensor` of same shape and type as the elements of `inputs`.</returns> | |||
public Tensor add_n(Tensor[] inputs, string name = null) | |||
=> math_ops.add_n(inputs, name: name); | |||
/// <summary> | |||
/// Computes atan of x element-wise. | |||
/// </summary> | |||
@@ -211,6 +222,36 @@ namespace Tensorflow | |||
/// <returns></returns> | |||
public Tensor _clip_by_value(Tensor t, Tensor clip_value_min, Tensor clip_value_max, string name = null) | |||
=> gen_math_ops._clip_by_value(t, clip_value_min, clip_value_max); | |||
/// <summary> | |||
/// Clips tensor values to a specified min and max. | |||
/// </summary> | |||
/// <param name="t"> | |||
/// A <c>Tensor</c>. | |||
/// </param> | |||
/// <param name="clip_value_min"> | |||
/// A 0-D (scalar) <c>Tensor</c>, or a <c>Tensor</c> with the same shape | |||
/// as <c>t</c>. The minimum value to clip by. | |||
/// </param> | |||
/// <param name="clip_value_max"> | |||
/// A 0-D (scalar) <c>Tensor</c>, or a <c>Tensor</c> with the same shape | |||
/// as <c>t</c>. The maximum value to clip by. | |||
/// </param> | |||
/// <param name="name"> | |||
/// If specified, the created operation in the graph will be this one, otherwise it will be named 'ClipByValue'. | |||
/// </param> | |||
/// <returns> | |||
/// A clipped <c>Tensor</c> with the same shape as input 't'. | |||
/// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. | |||
/// </returns> | |||
/// <remarks> | |||
/// Given a tensor <c>t</c>, this operation returns a tensor of the same type and | |||
/// shape as <c>t</c> with its values clipped to <c>clip_value_min</c> and <c>clip_value_max</c>. | |||
/// Any values less than <c>clip_value_min</c> are set to <c>clip_value_min</c>. Any values | |||
/// greater than <c>clip_value_max</c> are set to <c>clip_value_max</c>. | |||
/// </remarks> | |||
public Tensor clip_by_value (Tensor t, Tensor clip_value_min, Tensor clip_value_max, string name = "ClipByValue") | |||
=> gen_ops.clip_by_value(t, clip_value_min, clip_value_max, name); | |||
public Tensor sub(Tensor a, Tensor b) | |||
=> gen_math_ops.sub(a, b); | |||
@@ -299,6 +340,16 @@ namespace Tensorflow | |||
public Tensor negative(Tensor x, string name = null) | |||
=> gen_math_ops.neg(x, name); | |||
/// <summary> | |||
/// Returns the truth value of (x != y) element-wise. | |||
/// </summary> | |||
/// <param name="x"></param> | |||
/// <param name="y"></param> | |||
/// <param name="name"></param> | |||
/// <returns>A `Tensor` of type bool with the same size as that of x or y.</returns> | |||
public Tensor not_equal<Tx, Ty>(Tx x, Ty y, string name = null) | |||
=> math_ops.not_equal(x, y, name: name); | |||
/// <summary> | |||
/// Divides x / y elementwise (using Python 2 division operator semantics). | |||
/// </summary> | |||
@@ -315,21 +366,84 @@ namespace Tensorflow | |||
public Tensor pow<T1, T2>(T1 x, T2 y) | |||
=> gen_math_ops.pow(x, y); | |||
/// <summary> | |||
/// Divides `x / y` elementwise, rounding toward the most negative integer. | |||
/// </summary> | |||
/// <param name="x"></param> | |||
/// <param name="y"></param> | |||
/// <param name="name"></param> | |||
/// <returns>`x / y` rounded down.</returns> | |||
public Tensor floordiv(Tensor x, Tensor y, string name = null) | |||
=> math_ops.floordiv(x, y, name: name); | |||
/// <summary> | |||
/// Divides x / y elementwise (using Python 3 division operator semantics). | |||
/// </summary> | |||
/// <param name="x"></param> | |||
/// <param name="y"></param> | |||
/// <param name="name"></param> | |||
/// <returns>`x / y` evaluated in floating point.</returns> | |||
public static Tensor truediv(Tensor x, Tensor y, string name = null) | |||
=> math_ops.truediv(x, y, name: name); | |||
public Tensor range(object start, object limit = null, object delta = null, TF_DataType dtype = TF_DataType.DtInvalid, string name = "range") | |||
=> math_ops.range(start, limit: limit, delta: delta, dtype: dtype, name: name); | |||
/// <summary> | |||
/// Computes the "logical or" of elements across dimensions of a tensor. | |||
/// </summary> | |||
/// <param name="input_tensor">The boolean tensor to reduce.</param> | |||
/// <param name="axis">The dimensions to reduce.</param> | |||
/// <param name="keepdims">If true, retains reduced dimensions with length 1.</param> | |||
/// <param name="name"></param> | |||
/// <returns>The reduced tensor.</returns> | |||
public Tensor reduce_any(Tensor input_tensor, int[] axis = null, bool keepdims = false, string name = null) | |||
=> math_ops.reduce_any(input_tensor, axis: axis, keepdims: keepdims, name: name); | |||
public Tensor reduce_any(Tensor input_tensor, int axis = 0, bool keepdims = false, string name = null) | |||
=> math_ops.reduce_any(input_tensor, axis: new[] { axis }, keepdims: keepdims, name: name); | |||
/// <summary> | |||
/// Computes the "logical and" of elements across dimensions of a tensor. | |||
/// </summary> | |||
/// <param name="input_tensor"></param> | |||
/// <param name="axis"></param> | |||
/// <param name="keepdims"></param> | |||
/// <param name="name"></param> | |||
/// <returns>The reduced tensor.</returns> | |||
public Tensor reduce_all(Tensor input_tensor, int[] axis = null, bool keepdims = false, string name = null) | |||
=> math_ops.reduce_all(input_tensor, axis: axis, keepdims: keepdims, name: name); | |||
/// <summary> | |||
/// Computes the product of elements across dimensions of a tensor. | |||
/// </summary> | |||
/// <param name="input_tensor"></param> | |||
/// <param name="axis"></param> | |||
/// <param name="keepdims"></param> | |||
/// <param name="name"></param> | |||
/// <returns></returns> | |||
public Tensor reduce_prod(Tensor input_tensor, int[] axis = null, bool keepdims = false, string name = null) | |||
=> math_ops.reduce_prod(input_tensor, axis: axis, keepdims: keepdims, name: name); | |||
/// <summary> | |||
/// Computes the sum of elements across dimensions of a tensor. | |||
/// </summary> | |||
/// <param name="input"></param> | |||
/// <param name="axis"></param> | |||
/// <returns></returns> | |||
public Tensor reduce_sum(Tensor input, int? axis = null, int? reduction_indices = null) | |||
public Tensor reduce_sum(Tensor input, int? axis = null, int? reduction_indices = null, | |||
bool keepdims = false, string name = null) | |||
{ | |||
if(!axis.HasValue && reduction_indices.HasValue) | |||
return math_ops.reduce_sum(input, reduction_indices.Value); | |||
return math_ops.reduce_sum(input); | |||
else if (axis.HasValue && !reduction_indices.HasValue) | |||
return math_ops.reduce_sum(input, axis.Value); | |||
return math_ops.reduce_sum(input, keepdims: keepdims, name: name); | |||
} | |||
public Tensor reduce_sum(Tensor input, int axis, int? reduction_indices = null) | |||
=> math_ops.reduce_sum(input, axis); | |||
public Tensor reduce_sum(Tensor input, int[] axis, int? reduction_indices = null, | |||
bool keepdims = false, string name = null) | |||
=> math_ops.reduce_sum(input, axis, keepdims: keepdims, name: name); | |||
/// <summary> | |||
/// Computes the maximum of elements across dimensions of a tensor. | |||
@@ -168,6 +168,9 @@ namespace Tensorflow | |||
public rnn_cell_impl rnn_cell => new rnn_cell_impl(); | |||
public Tensor sigmoid_cross_entropy_with_logits(Tensor labels, Tensor logits, string name = null) | |||
=> nn_impl.sigmoid_cross_entropy_with_logits(labels: labels, logits: logits, name: name); | |||
public Tensor softmax(Tensor logits, int axis = -1, string name = null) | |||
=> gen_nn_ops.softmax(logits, name); | |||
@@ -14,15 +14,36 @@ | |||
limitations under the License. | |||
******************************************************************************/ | |||
using System; | |||
using System.Collections.Generic; | |||
namespace Tensorflow | |||
{ | |||
public partial class tensorflow | |||
{ | |||
public void add_to_collection<T>(string name, T value) | |||
=> get_default_graph().add_to_collection(name, value); | |||
public void add_to_collections<T>(List<string> names, T value) | |||
=> get_default_graph().add_to_collections(names, value); | |||
public Tensor assign(Tensor @ref, object value, bool validate_shape = true, bool use_locking = true, string name = null) | |||
=> state_ops.assign(@ref, value, validate_shape, use_locking, name); | |||
public object get_collection(string key, string scope = "") | |||
=> get_default_graph().get_collection(key, scope: scope); | |||
public Tensor assign(RefVariable @ref, object value, bool validate_shape = true, bool use_locking = true, string name = null) | |||
=> state_ops.assign(@ref, value, validate_shape, use_locking, name); | |||
public void device(string device_name) | |||
=> get_default_graph().device(device_name); | |||
public List<T> get_collection<T>(string key, string scope = "") | |||
=> get_default_graph().get_collection<T>(key, scope: scope); | |||
/// <summary> | |||
/// A context manager that lifts ops out of control-flow scopes and function-building graphs. | |||
/// </summary> | |||
public void init_scope() | |||
=> ops.init_scope(); | |||
/// <summary> | |||
/// Returns a context manager that creates hierarchical names for operations. | |||
@@ -39,7 +60,36 @@ namespace Tensorflow | |||
/// </summary> | |||
/// <param name="name"></param> | |||
/// <returns></returns> | |||
public Tensor no_op(string name = null) | |||
public Operation no_op(string name = null) | |||
=> gen_control_flow_ops.no_op(name: name); | |||
/// <summary> | |||
/// map on the list of tensors unpacked from `elems` on dimension 0. | |||
/// </summary> | |||
/// <param name="fn"></param> | |||
/// <param name="elems"></param> | |||
/// <param name="dtype"></param> | |||
/// <param name="parallel_iterations"></param> | |||
/// <param name="back_prop"></param> | |||
/// <param name="swap_memory"></param> | |||
/// <param name="infer_shape"></param> | |||
/// <param name="name"></param> | |||
/// <returns>A tensor or (possibly nested) sequence of tensors.</returns> | |||
public Tensor map_fn(Func<Tensor, Tensor> fn, | |||
Tensor elems, | |||
TF_DataType dtype = TF_DataType.DtInvalid, | |||
int parallel_iterations = -1, | |||
bool back_prop = true, | |||
bool swap_memory = false, | |||
bool infer_shape = true, | |||
string name = null) | |||
=> Operation.map_fn(fn, | |||
elems, | |||
dtype, | |||
parallel_iterations: parallel_iterations, | |||
back_prop: back_prop, | |||
swap_memory: swap_memory, | |||
infer_shape: infer_shape, | |||
name: name); | |||
} | |||
} |
@@ -0,0 +1,127 @@ | |||
/***************************************************************************** | |||
Copyright 2018 The TensorFlow.NET Authors. All Rights Reserved. | |||
Licensed under the Apache License, Version 2.0 (the "License"); | |||
you may not use this file except in compliance with the License. | |||
You may obtain a copy of the License at | |||
http://www.apache.org/licenses/LICENSE-2.0 | |||
Unless required by applicable law or agreed to in writing, software | |||
distributed under the License is distributed on an "AS IS" BASIS, | |||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |||
See the License for the specific language governing permissions and | |||
limitations under the License. | |||
******************************************************************************/ | |||
using System; | |||
using Tensorflow.Queues; | |||
namespace Tensorflow | |||
{ | |||
public partial class tensorflow | |||
{ | |||
/// <summary> | |||
/// A FIFOQueue that supports batching variable-sized tensors by padding. | |||
/// </summary> | |||
/// <param name="capacity"></param> | |||
/// <param name="dtypes"></param> | |||
/// <param name="shapes"></param> | |||
/// <param name="names"></param> | |||
/// <param name="shared_name"></param> | |||
/// <param name="name"></param> | |||
/// <returns></returns> | |||
public PaddingFIFOQueue PaddingFIFOQueue(int capacity, | |||
TF_DataType[] dtypes, | |||
TensorShape[] shapes, | |||
string[] names = null, | |||
string shared_name = null, | |||
string name = "padding_fifo_queue") | |||
=> new PaddingFIFOQueue(capacity, | |||
dtypes, | |||
shapes, | |||
names, | |||
shared_name: shared_name, | |||
name: name); | |||
public PaddingFIFOQueue PaddingFIFOQueue(int capacity, | |||
TF_DataType dtype, | |||
TensorShape shape, | |||
string shared_name = null, | |||
string name = "padding_fifo_queue") | |||
=> new PaddingFIFOQueue(capacity, | |||
new[] { dtype }, | |||
new[] { shape }, | |||
shared_name: shared_name, | |||
name: name); | |||
/// <summary> | |||
/// A queue implementation that dequeues elements in first-in first-out order. | |||
/// </summary> | |||
/// <param name="capacity"></param> | |||
/// <param name="dtypes"></param> | |||
/// <param name="shapes"></param> | |||
/// <param name="names"></param> | |||
/// <param name="shared_name"></param> | |||
/// <param name="name"></param> | |||
/// <returns></returns> | |||
public FIFOQueue FIFOQueue(int capacity, | |||
TF_DataType[] dtypes, | |||
TensorShape[] shapes = null, | |||
string[] names = null, | |||
string shared_name = null, | |||
string name = "fifo_queue") | |||
=> new FIFOQueue(capacity, | |||
dtypes, | |||
shapes, | |||
names, | |||
shared_name: shared_name, | |||
name: name); | |||
public FIFOQueue FIFOQueue(int capacity, | |||
TF_DataType dtype, | |||
TensorShape shape = null, | |||
string shared_name = null, | |||
string name = "fifo_queue") | |||
=> new FIFOQueue(capacity, | |||
new[] { dtype }, | |||
new[] { shape ?? new TensorShape() }, | |||
shared_name: shared_name, | |||
name: name); | |||
/// <summary> | |||
/// Creates a queue that dequeues elements in a first-in first-out order. | |||
/// </summary> | |||
/// <param name="capacity"></param> | |||
/// <param name="dtype"></param> | |||
/// <param name="shape"></param> | |||
/// <param name="shared_name"></param> | |||
/// <param name="name"></param> | |||
/// <returns></returns> | |||
public PriorityQueue PriorityQueue(int capacity, | |||
TF_DataType dtype, | |||
TensorShape shape = null, | |||
string shared_name = null, | |||
string name = "priority_queue") | |||
=> new PriorityQueue(capacity, | |||
new[] { dtype }, | |||
new[] { shape ?? new TensorShape() }, | |||
shared_name: shared_name, | |||
name: name); | |||
public RandomShuffleQueue RandomShuffleQueue(int capacity, | |||
int min_after_dequeue, | |||
TF_DataType dtype, | |||
TensorShape shape = null, | |||
int? seed = null, | |||
string shared_name = null, | |||
string name = "random_shuffle_queue") | |||
=> new RandomShuffleQueue(capacity, | |||
min_after_dequeue: min_after_dequeue, | |||
new[] { dtype }, | |||
new[] { shape ?? new TensorShape() }, | |||
seed: seed, | |||
shared_name: shared_name, | |||
name: name); | |||
} | |||
} |
@@ -49,5 +49,18 @@ namespace Tensorflow | |||
int? seed = null, | |||
string name = null) | |||
=> random_ops.truncated_normal(shape, mean, stddev, dtype, seed, name); | |||
/// <summary> | |||
/// Randomly shuffles a tensor along its first dimension. | |||
/// </summary> | |||
/// <param name="value"></param> | |||
/// <param name="seed"></param> | |||
/// <param name="name"></param> | |||
/// <returns> | |||
/// A tensor of same shape and type as value, shuffled along its | |||
/// first dimension. | |||
/// </returns> | |||
public Tensor random_shuffle(Tensor value, int? seed = null, string name = null) | |||
=> random_ops.random_shuffle(value, seed: seed, name: name); | |||
} | |||
} |
@@ -18,8 +18,8 @@ namespace Tensorflow | |||
{ | |||
public partial class tensorflow | |||
{ | |||
public Tensor reshape(Tensor tensor, | |||
Tensor shape, | |||
public Tensor reshape<T1, T2>(T1 tensor, | |||
T2 shape, | |||
string name = null) => gen_array_ops.reshape(tensor, shape, name); | |||
public Tensor reshape(Tensor tensor, | |||
@@ -0,0 +1,61 @@ | |||
/***************************************************************************** | |||
Copyright 2018 The TensorFlow.NET Authors. All Rights Reserved. | |||
Licensed under the Apache License, Version 2.0 (the "License"); | |||
you may not use this file except in compliance with the License. | |||
You may obtain a copy of the License at | |||
http://www.apache.org/licenses/LICENSE-2.0 | |||
Unless required by applicable law or agreed to in writing, software | |||
distributed under the License is distributed on an "AS IS" BASIS, | |||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |||
See the License for the specific language governing permissions and | |||
limitations under the License. | |||
******************************************************************************/ | |||
using Tensorflow.Framework; | |||
namespace Tensorflow | |||
{ | |||
public partial class tensorflow | |||
{ | |||
public SparseTensor<T> SparseTensor<T>(long[,] indices, T[] values, long[] dense_shape) | |||
=> new SparseTensor<T>(indices, values, dense_shape); | |||
public Tensor sparse_tensor_to_dense<T>(SparseTensor<T> sp_input, | |||
T default_value = default, | |||
bool validate_indices = true, | |||
string name = null) | |||
=> gen_sparse_ops.sparse_to_dense(sp_input.indices, | |||
sp_input.dense_shape, | |||
sp_input.values, | |||
default_value: default_value, | |||
validate_indices: validate_indices, | |||
name: name); | |||
/// <summary> | |||
/// Converts a sparse representation into a dense tensor. | |||
/// </summary> | |||
/// <typeparam name="T"></typeparam> | |||
/// <param name="sparse_indices"></param> | |||
/// <param name="output_shape"></param> | |||
/// <param name="sparse_values"></param> | |||
/// <param name="default_value"></param> | |||
/// <param name="validate_indices"></param> | |||
/// <param name="name"></param> | |||
/// <returns>Dense `Tensor` of shape `output_shape`. Has the same type as `sparse_values`.</returns> | |||
public Tensor sparse_to_dense<T>(Tensor sparse_indices, | |||
TensorShape output_shape, | |||
T sparse_values, | |||
T default_value = default, | |||
bool validate_indices = true, | |||
string name = null) | |||
=> gen_sparse_ops.sparse_to_dense(sparse_indices, | |||
output_shape, | |||
sparse_values, | |||
default_value: default_value, | |||
validate_indices: validate_indices, | |||
name: name); | |||
} | |||
} |
@@ -0,0 +1,25 @@ | |||
/***************************************************************************** | |||
Copyright 2018 The TensorFlow.NET Authors. All Rights Reserved. | |||
Licensed under the Apache License, Version 2.0 (the "License"); | |||
you may not use this file except in compliance with the License. | |||
You may obtain a copy of the License at | |||
http://www.apache.org/licenses/LICENSE-2.0 | |||
Unless required by applicable law or agreed to in writing, software | |||
distributed under the License is distributed on an "AS IS" BASIS, | |||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |||
See the License for the specific language governing permissions and | |||
limitations under the License. | |||
******************************************************************************/ | |||
namespace Tensorflow | |||
{ | |||
public partial class tensorflow | |||
{ | |||
public Tensor assign_add<T>(RefVariable @ref, T value, | |||
bool use_locking = false, string name = null) | |||
=> state_ops.assign_add(@ref, value, use_locking: use_locking, name: name); | |||
} | |||
} |
@@ -18,8 +18,8 @@ namespace Tensorflow | |||
{ | |||
public partial class tensorflow | |||
{ | |||
public Tensor convert_to_tensor(object value, | |||
string name = null) => ops.convert_to_tensor(value, name: name); | |||
public Tensor convert_to_tensor(object value, TF_DataType dtype = TF_DataType.DtInvalid, string name = null, TF_DataType preferred_dtype = TF_DataType.DtInvalid) | |||
=> ops.convert_to_tensor(value, dtype, name, preferred_dtype); | |||
public Tensor strided_slice(Tensor input, Tensor begin, Tensor end, Tensor strides = null, | |||
int begin_mask = 0, | |||
@@ -54,5 +54,23 @@ namespace Tensorflow | |||
new_axis_mask: new_axis_mask, | |||
shrink_axis_mask: shrink_axis_mask, | |||
name: name); | |||
/// <summary> | |||
/// Splits a tensor into sub tensors. | |||
/// </summary> | |||
/// <param name="value">The Tensor to split.</param> | |||
/// <param name="num_split">Either an integer indicating the number of splits along split_dim or a 1-D integer | |||
/// Tensor or Python list containing the sizes of each output tensor along split_dim. | |||
/// If a scalar then it must evenly divide value.shape[axis]; otherwise the sum of sizes along the split dimension must match that of the value.</param> | |||
/// <param name="axis">An integer or scalar int32 Tensor. The dimension along which to split. Must be in the range [-rank(value), rank(value)). Defaults to 0.</param> | |||
/// <param name="name">A name for the operation (optional)</param> | |||
/// <returns>if num_or_size_splits is a scalar returns num_or_size_splits Tensor objects; | |||
/// if num_or_size_splits is a 1-D Tensor returns num_or_size_splits.get_shape[0] Tensor objects resulting from splitting value.</returns> | |||
public Tensor[] split(Tensor value, int num_split, Tensor axis, string name = null) => gen_array_ops.split( | |||
value: value, | |||
axis: axis, | |||
num_split: num_split, | |||
name: name | |||
); | |||
} | |||
} |
@@ -20,12 +20,8 @@ namespace Tensorflow | |||
{ | |||
public partial class tensorflow | |||
{ | |||
public Tensor tile(Tensor input, | |||
Tensor multiples, | |||
public Tensor tile<T>(Tensor input, | |||
T multiples, | |||
string name = null) => gen_array_ops.tile(input, multiples, name); | |||
public Tensor tile(NDArray input, | |||
int[] multiples, | |||
string name = null) => gen_array_ops.tile(input, multiples, name); | |||
} | |||
} |
@@ -25,16 +25,26 @@ namespace Tensorflow | |||
public class train_internal | |||
{ | |||
public RefVariable create_global_step(Graph graph) | |||
=> TrainingUtil.create_global_step(graph); | |||
public RefVariable get_global_step(Graph graph) | |||
=> TrainingUtil.get_global_step(graph); | |||
public Optimizer GradientDescentOptimizer(float learning_rate) | |||
=> new GradientDescentOptimizer(learning_rate); | |||
public Optimizer AdamOptimizer(float learning_rate, string name = "Adam") | |||
=> new AdamOptimizer(learning_rate, name: name); | |||
public Optimizer AdamOptimizer(Tensor learning_rate, string name = "Adam") | |||
=> new AdamOptimizer(learning_rate, name: name); | |||
public ExponentialMovingAverage ExponentialMovingAverage(float decay) | |||
=> new ExponentialMovingAverage(decay); | |||
public Saver Saver(VariableV1[] var_list = null) => new Saver(var_list: var_list); | |||
public Saver Saver(VariableV1[] var_list = null, int max_to_keep = 5) | |||
=> new Saver(var_list: var_list, max_to_keep: max_to_keep); | |||
public string write_graph(Graph graph, string logdir, string name, bool as_text = true) | |||
=> graph_io.write_graph(graph, logdir, name, as_text); | |||
@@ -45,7 +55,7 @@ namespace Tensorflow | |||
clear_devices, | |||
import_scope).Item1; | |||
public (MetaGraphDef, Dictionary<string, RefVariable>) export_meta_graph(string filename = "", | |||
public (MetaGraphDef, Dictionary<string, VariableV1>) export_meta_graph(string filename = "", | |||
bool as_text = false, | |||
bool clear_devices = false, | |||
bool clear_extraneous_savers = false, | |||
@@ -54,6 +64,12 @@ namespace Tensorflow | |||
clear_devices: clear_devices, | |||
clear_extraneous_savers: clear_extraneous_savers, | |||
strip_default_attrs: strip_default_attrs); | |||
public string latest_checkpoint(string checkpoint_dir, string latest_filename = null) | |||
=> checkpoint_management.latest_checkpoint(checkpoint_dir, latest_filename: latest_filename); | |||
public CheckpointState get_checkpoint_state(string checkpoint_dir, string latest_filename = null) | |||
=> checkpoint_management.get_checkpoint_state(checkpoint_dir, latest_filename: latest_filename); | |||
} | |||
} | |||
} |
@@ -46,6 +46,7 @@ namespace Tensorflow | |||
TF_DataType dtype = TF_DataType.DtInvalid, | |||
object initializer = null, // IInitializer or Tensor | |||
bool? trainable = null, | |||
List<string> collections = null, | |||
bool? use_resource = null, | |||
bool validate_shape = true, | |||
VariableSynchronization synchronization = VariableSynchronization.Auto, | |||
@@ -60,7 +61,11 @@ namespace Tensorflow | |||
use_resource: use_resource, | |||
validate_shape: validate_shape, | |||
initializer: initializer, | |||
trainable: trainable); | |||
trainable: trainable, | |||
collections: collections); | |||
} | |||
public VariableScope get_variable_scope() | |||
=> Tensorflow.variable_scope.get_variable_scope(); | |||
} | |||
} |
@@ -21,6 +21,7 @@ using System.Collections.Generic; | |||
using System.ComponentModel; | |||
using System.Diagnostics; | |||
using System.Linq; | |||
using NumSharp.Utilities; | |||
namespace Tensorflow | |||
{ | |||
@@ -29,9 +30,37 @@ namespace Tensorflow | |||
/// </summary> | |||
public static partial class Binding | |||
{ | |||
private static string _tostring(object obj) | |||
{ | |||
switch (obj) | |||
{ | |||
case NDArray nd: | |||
return nd.ToString(false); | |||
case Array arr: | |||
if (arr.Rank!=1 || arr.GetType().GetElementType()?.IsArray == true) | |||
arr = Arrays.Flatten(arr); | |||
var objs = toObjectArray(arr); | |||
return $"[{string.Join(", ", objs.Select(_tostring))}]"; | |||
default: | |||
return obj?.ToString() ?? "null"; | |||
} | |||
object[] toObjectArray(Array arr) | |||
{ | |||
var len = arr.LongLength; | |||
var ret = new object[len]; | |||
for (long i = 0; i < len; i++) | |||
{ | |||
ret[i] = arr.GetValue(i); | |||
} | |||
return ret; | |||
} | |||
} | |||
public static void print(object obj) | |||
{ | |||
Console.WriteLine(obj.ToString()); | |||
Console.WriteLine(_tostring(obj)); | |||
} | |||
public static int len(object a) | |||
@@ -77,11 +106,6 @@ namespace Tensorflow | |||
py.__enter__(); | |||
action(py); | |||
} | |||
catch (Exception ex) | |||
{ | |||
Console.WriteLine(ex.ToString()); | |||
throw; | |||
} | |||
finally | |||
{ | |||
py.__exit__(); | |||
@@ -97,11 +121,6 @@ namespace Tensorflow | |||
py.__enter__(); | |||
action(py); | |||
} | |||
catch (Exception ex) | |||
{ | |||
Console.WriteLine(ex.ToString()); | |||
throw; | |||
} | |||
finally | |||
{ | |||
py.__exit__(); | |||
@@ -117,11 +136,6 @@ namespace Tensorflow | |||
py.__enter__(); | |||
return action(py); | |||
} | |||
catch (Exception ex) | |||
{ | |||
Console.WriteLine(ex.ToString()); | |||
return default(TOut); | |||
} | |||
finally | |||
{ | |||
py.__exit__(); | |||
@@ -139,7 +153,7 @@ namespace Tensorflow | |||
{ | |||
var a = t1.AsIterator<T>(); | |||
var b = t2.AsIterator<T>(); | |||
while (a.HasNext()) | |||
while (a.HasNext() && b.HasNext()) | |||
yield return (a.MoveNext(), b.MoveNext()); | |||
} | |||
@@ -155,19 +169,13 @@ namespace Tensorflow | |||
{ | |||
var a = t1.AsIterator<T1>(); | |||
var b = t2.AsIterator<T2>(); | |||
while(a.HasNext()) | |||
while(a.HasNext() && b.HasNext()) | |||
yield return (a.MoveNext(), b.MoveNext()); | |||
} | |||
public static IEnumerable<(T1, T2)> zip<T1, T2>(IEnumerable<T1> e1, IEnumerable<T2> e2) | |||
{ | |||
var iter2 = e2.GetEnumerator(); | |||
foreach (var v1 in e1) | |||
{ | |||
iter2.MoveNext(); | |||
var v2 = iter2.Current; | |||
yield return (v1, v2); | |||
} | |||
return e1.Zip(e2, (t1, t2) => (t1, t2)); | |||
} | |||
public static IEnumerable<(TKey, TValue)> enumerate<TKey, TValue>(Dictionary<TKey, TValue> values) | |||
@@ -277,39 +285,6 @@ namespace Tensorflow | |||
return (__memberobject__.Length > 0) ? true : false; | |||
} | |||
public delegate object __object__(params object[] args); | |||
public static __object__ getattr(object obj, string key, params Type[] ___parameter_type__) | |||
{ | |||
var __dyn_obj__ = obj.GetType().GetMember(key); | |||
if (__dyn_obj__.Length == 0) | |||
throw new Exception("The object \"" + nameof(obj) + "\" doesnot have a defination \"" + key + "\""); | |||
var __type__ = __dyn_obj__[0]; | |||
if (__type__.MemberType == System.Reflection.MemberTypes.Method) | |||
{ | |||
try | |||
{ | |||
var __method__ = (___parameter_type__.Length > 0) ? obj.GetType().GetMethod(key, ___parameter_type__) : obj.GetType().GetMethod(key); | |||
return (object[] args) => __method__.Invoke(obj, args); | |||
} | |||
catch (System.Reflection.AmbiguousMatchException ex) | |||
{ | |||
throw new Exception("AmbigousFunctionMatchFound : (Probable cause : Function Overloading) Please add parameter types of the function."); | |||
} | |||
} | |||
else if (__type__.MemberType == System.Reflection.MemberTypes.Field) | |||
{ | |||
var __field__ = obj.GetType().GetField(key).GetValue(obj); | |||
return (object[] args) => { return __field__; }; | |||
} | |||
else if (__type__.MemberType == System.Reflection.MemberTypes.Property) | |||
{ | |||
var __property__ = obj.GetType().GetProperty(key).GetValue(obj); | |||
return (object[] args) => { return __property__; }; | |||
} | |||
return (object[] args) => { return "NaN"; }; | |||
} | |||
public static IEnumerable TupleToEnumerable(object tuple) | |||
{ | |||
Type t = tuple.GetType(); | |||
@@ -338,5 +313,15 @@ namespace Tensorflow | |||
return true; | |||
return false; | |||
} | |||
public static Func<Tin1, Tout> partial<Tin1, Tout>(Func<Tin1, Tout> func, Tin1 args) | |||
{ | |||
Func<Tin1, Tout> newfunc = (args1) => | |||
{ | |||
return func(args1); | |||
}; | |||
return newfunc; | |||
} | |||
} | |||
} |
@@ -1,5 +1,6 @@ | |||
using System; | |||
using System.Collections.Generic; | |||
using System.Dynamic; | |||
using System.Text; | |||
namespace Tensorflow | |||
@@ -7,5 +8,17 @@ namespace Tensorflow | |||
public static partial class Binding | |||
{ | |||
public static tensorflow tf { get; } = New<tensorflow>(); | |||
/// <summary> | |||
/// Alias to null, similar to python's None. | |||
/// For TensorShape, please use Unknown | |||
/// </summary> | |||
public static readonly object None = null; | |||
/// <summary> | |||
/// Used for TensorShape None | |||
/// </summary> | |||
/// | |||
public static readonly int Unknown = -1; | |||
} | |||
} |
@@ -70,7 +70,7 @@ namespace Tensorflow | |||
public Buffer() => _handle = TF_NewBuffer(); | |||
internal Buffer(IntPtr handle) | |||
public Buffer(IntPtr handle) | |||
{ | |||
if (handle == IntPtr.Zero) | |||
throw new ArgumentException("Handle (IntPtr) can't be zero.", nameof(handle)); | |||
@@ -0,0 +1,19 @@ | |||
using System; | |||
using System.Collections.Generic; | |||
using System.Text; | |||
namespace Tensorflow.Contrib.Train | |||
{ | |||
/// <summary> | |||
/// Class to hold a set of hyperparameters as name-value pairs. | |||
/// </summary> | |||
public class HParams | |||
{ | |||
public bool load_pretrained { get; set; } | |||
public HParams(bool load_pretrained) | |||
{ | |||
this.load_pretrained = load_pretrained; | |||
} | |||
} | |||
} |
@@ -1,36 +0,0 @@ | |||
using System; | |||
using Tensorflow.Data; | |||
namespace Tensorflow.Estimator | |||
{ | |||
/// <summary> | |||
/// Estimator class to train and evaluate TensorFlow models. | |||
/// <see cref="tensorflow_estimator\python\estimator\estimator.py"/> | |||
/// </summary> | |||
public class EstimatorV2 : IEstimator | |||
{ | |||
public EstimatorV2(string model_dir = null) | |||
{ | |||
} | |||
/// <summary> | |||
/// Calls the input function. | |||
/// </summary> | |||
/// <param name="mode"></param> | |||
public void call_input_fn(string mode = null) | |||
{ | |||
} | |||
public void train_model_default(Func<string, string, HyperParams, bool, DatasetV1Adapter> input_fn) | |||
{ | |||
} | |||
public void get_features_and_labels_from_input_fn() | |||
{ | |||
} | |||
} | |||
} |
@@ -1,7 +0,0 @@ | |||
namespace Tensorflow.Estimator | |||
{ | |||
public interface IEstimator | |||
{ | |||
} | |||
} |
@@ -1,15 +0,0 @@ | |||
namespace Tensorflow.Estimator | |||
{ | |||
/// <summary> | |||
/// The executor to run `Estimator` training and evaluation. | |||
/// <see cref="tensorflow_estimator\python\estimator\training.py"/> | |||
/// </summary> | |||
public class TrainingExecutor | |||
{ | |||
private IEstimator _estimator; | |||
public TrainingExecutor(IEstimator estimator) | |||
{ | |||
_estimator = estimator; | |||
} | |||
} | |||
} |
@@ -0,0 +1,138 @@ | |||
using System; | |||
using System.Collections.Generic; | |||
using System.Diagnostics; | |||
using System.IO; | |||
using System.Text; | |||
using Tensorflow.Data; | |||
using Tensorflow.Train; | |||
using static Tensorflow.Binding; | |||
namespace Tensorflow.Estimators | |||
{ | |||
/// <summary> | |||
/// Estimator class to train and evaluate TensorFlow models. | |||
/// </summary> | |||
public class Estimator : IObjectLife | |||
{ | |||
RunConfig _config; | |||
public RunConfig config => _config; | |||
ConfigProto _session_config; | |||
public ConfigProto session_config => _session_config; | |||
string _model_dir; | |||
Action _model_fn; | |||
public Estimator(Action model_fn, RunConfig config) | |||
{ | |||
_config = config; | |||
_model_dir = _config.model_dir; | |||
_session_config = _config.session_config; | |||
_model_fn = model_fn; | |||
} | |||
public Estimator train(Func<DatasetV1Adapter> input_fn, int max_steps = 1, Action[] hooks = null, | |||
_NewCheckpointListenerForEvaluate[] saving_listeners = null) | |||
{ | |||
if(max_steps > 0) | |||
{ | |||
var start_step = _load_global_step_from_checkpoint_dir(_model_dir); | |||
if (max_steps <= start_step) | |||
{ | |||
Console.WriteLine("Skipping training since max_steps has already saved."); | |||
return this; | |||
} | |||
} | |||
var loss = _train_model(input_fn); | |||
print($"Loss for final step: {loss}."); | |||
return this; | |||
} | |||
private int _load_global_step_from_checkpoint_dir(string checkpoint_dir) | |||
{ | |||
// var cp = tf.train.latest_checkpoint(checkpoint_dir); | |||
// should use NewCheckpointReader (not implemented) | |||
var cp = tf.train.get_checkpoint_state(checkpoint_dir); | |||
return cp.AllModelCheckpointPaths.Count - 1; | |||
} | |||
private Tensor _train_model(Func<DatasetV1Adapter> input_fn) | |||
{ | |||
return _train_model_default(input_fn); | |||
} | |||
private Tensor _train_model_default(Func<DatasetV1Adapter> input_fn) | |||
{ | |||
using (var g = tf.Graph().as_default()) | |||
{ | |||
var global_step_tensor = _create_and_assert_global_step(g); | |||
// Skip creating a read variable if _create_and_assert_global_step | |||
// returns None (e.g. tf.contrib.estimator.SavedModelEstimator). | |||
if (global_step_tensor != null) | |||
TrainingUtil._get_or_create_global_step_read(g); | |||
var (features, labels) = _get_features_and_labels_from_input_fn(input_fn, "train"); | |||
} | |||
throw new NotImplementedException(""); | |||
} | |||
private (Dictionary<string, Tensor>, Dictionary<string, Tensor>) _get_features_and_labels_from_input_fn(Func<DatasetV1Adapter> input_fn, string mode) | |||
{ | |||
var result = _call_input_fn(input_fn, mode); | |||
return EstimatorUtil.parse_input_fn_result(result); | |||
} | |||
/// <summary> | |||
/// Calls the input function. | |||
/// </summary> | |||
/// <param name="input_fn"></param> | |||
/// <param name="mode"></param> | |||
private DatasetV1Adapter _call_input_fn(Func<DatasetV1Adapter> input_fn, string mode) | |||
{ | |||
return input_fn(); | |||
} | |||
private Tensor _create_and_assert_global_step(Graph graph) | |||
{ | |||
var step = _create_global_step(graph); | |||
Debug.Assert(step == tf.train.get_global_step(graph)); | |||
Debug.Assert(step.dtype.is_integer()); | |||
return step; | |||
} | |||
private RefVariable _create_global_step(Graph graph) | |||
{ | |||
return tf.train.create_global_step(graph); | |||
} | |||
public void __init__() | |||
{ | |||
throw new NotImplementedException(); | |||
} | |||
public void __enter__() | |||
{ | |||
throw new NotImplementedException(); | |||
} | |||
public void __del__() | |||
{ | |||
throw new NotImplementedException(); | |||
} | |||
public void __exit__() | |||
{ | |||
throw new NotImplementedException(); | |||
} | |||
public void Dispose() | |||
{ | |||
throw new NotImplementedException(); | |||
} | |||
} | |||
} |
@@ -0,0 +1,15 @@ | |||
using System; | |||
using System.Collections.Generic; | |||
using System.Text; | |||
using Tensorflow.Data; | |||
namespace Tensorflow.Estimators | |||
{ | |||
public class EstimatorUtil | |||
{ | |||
public static (Dictionary<string, Tensor>, Dictionary<string, Tensor>) parse_input_fn_result(DatasetV1Adapter result) | |||
{ | |||
throw new NotImplementedException(""); | |||
} | |||
} | |||
} |
@@ -0,0 +1,16 @@ | |||
using System; | |||
using System.Collections.Generic; | |||
using System.Text; | |||
namespace Tensorflow.Estimators | |||
{ | |||
public class EvalSpec | |||
{ | |||
string _name; | |||
public EvalSpec(string name, Action input_fn, FinalExporter exporters) | |||
{ | |||
_name = name; | |||
} | |||
} | |||
} |
@@ -0,0 +1,11 @@ | |||
using System; | |||
using System.Collections.Generic; | |||
using System.Text; | |||
namespace Tensorflow.Estimators | |||
{ | |||
public abstract class Exporter | |||
{ | |||
public abstract void export(Estimator estimator, string export_path, string checkpoint_path); | |||
} | |||
} |
@@ -0,0 +1,14 @@ | |||
using System; | |||
using System.Collections.Generic; | |||
using System.Text; | |||
namespace Tensorflow.Estimators | |||
{ | |||
public class FinalExporter | |||
{ | |||
public FinalExporter(string name, Action serving_input_receiver_fn, bool as_text = false) | |||
{ | |||
} | |||
} | |||
} |
@@ -1,6 +1,6 @@ | |||
using System.IO; | |||
namespace Tensorflow.Estimator | |||
namespace Tensorflow.Estimators | |||
{ | |||
public class HyperParams | |||
{ |
@@ -1,6 +1,7 @@ | |||
### TensorFlow Estimator | |||
# TensorFlow Estimator | |||
TensorFlow Estimator is a high-level TensorFlow API that greatly simplifies machine learning programming. Estimators encapsulate training, evaluation, prediction, and exporting for your model. | |||
Guide: <https://www.tensorflow.org/guide/estimators> | |||
https://github.com/tensorflow/estimator |
@@ -0,0 +1,101 @@ | |||
using System; | |||
namespace Tensorflow.Estimators | |||
{ | |||
public class RunConfig | |||
{ | |||
// A list of the property names in RunConfig that the user is allowed to change. | |||
private static readonly string[] _DEFAULT_REPLACEABLE_LIST = new [] | |||
{ | |||
"model_dir", | |||
"tf_random_seed", | |||
"save_summary_steps", | |||
"save_checkpoints_steps", | |||
"save_checkpoints_secs", | |||
"session_config", | |||
"keep_checkpoint_max", | |||
"keep_checkpoint_every_n_hours", | |||
"log_step_count_steps", | |||
"train_distribute", | |||
"device_fn", | |||
"protocol", | |||
"eval_distribute", | |||
"experimental_distribute", | |||
"experimental_max_worker_delay_secs", | |||
"session_creation_timeout_secs" | |||
}; | |||
#region const values | |||
private const string _SAVE_CKPT_ERR = "`save_checkpoints_steps` and `save_checkpoints_secs` cannot be both set."; | |||
private const string _TF_CONFIG_ENV = "TF_CONFIG"; | |||
private const string _TASK_ENV_KEY = "task"; | |||
private const string _TASK_TYPE_KEY = "type"; | |||
private const string _TASK_ID_KEY = "index"; | |||
private const string _CLUSTER_KEY = "cluster"; | |||
private const string _SERVICE_KEY = "service"; | |||
private const string _SESSION_MASTER_KEY = "session_master"; | |||
private const string _EVAL_SESSION_MASTER_KEY = "eval_session_master"; | |||
private const string _MODEL_DIR_KEY = "model_dir"; | |||
private const string _LOCAL_MASTER = ""; | |||
private const string _GRPC_SCHEME = "grpc://"; | |||
#endregion | |||
public string model_dir { get; set; } | |||
public ConfigProto session_config { get; set; } | |||
public int? tf_random_seed { get; set; } | |||
public int save_summary_steps { get; set; } = 100; | |||
public int save_checkpoints_steps { get; set; } | |||
public int save_checkpoints_secs { get; set; } = 600; | |||
public int keep_checkpoint_max { get; set; } = 5; | |||
public int keep_checkpoint_every_n_hours { get; set; } = 10000; | |||
public int log_step_count_steps{ get; set; } = 100; | |||
public object train_distribute { get; set; } | |||
public object device_fn { get; set; } | |||
public object protocol { get; set; } | |||
public object eval_distribute { get; set; } | |||
public object experimental_distribute { get; set; } | |||
public object experimental_max_worker_delay_secs { get; set; } | |||
public int session_creation_timeout_secs { get; set; } = 7200; | |||
public object service { get; set; } | |||
public RunConfig() | |||
{ | |||
Initialize(); | |||
} | |||
public RunConfig(string model_dir) | |||
{ | |||
this.model_dir = model_dir; | |||
Initialize(); | |||
} | |||
public RunConfig( | |||
string model_dir = null, | |||
int? tf_random_seed = null, | |||
int save_summary_steps=100, | |||
object save_checkpoints_steps = null, // _USE_DEFAULT | |||
object save_checkpoints_secs = null, // _USE_DEFAULT | |||
object session_config = null, | |||
int keep_checkpoint_max = 5, | |||
int keep_checkpoint_every_n_hours = 10000, | |||
int log_step_count_steps = 100, | |||
object train_distribute = null, | |||
object device_fn = null, | |||
object protocol = null, | |||
object eval_distribute = null, | |||
object experimental_distribute = null, | |||
object experimental_max_worker_delay_secs = null, | |||
int session_creation_timeout_secs = 7200) | |||
{ | |||
this.model_dir = model_dir; | |||
Initialize(); | |||
} | |||
private void Initialize() | |||
{ | |||
} | |||
} | |||
} |
@@ -0,0 +1,22 @@ | |||
using System; | |||
using System.Collections.Generic; | |||
using System.Text; | |||
using Tensorflow.Data; | |||
namespace Tensorflow.Estimators | |||
{ | |||
public class TrainSpec | |||
{ | |||
int _max_steps; | |||
public int max_steps => _max_steps; | |||
Func<DatasetV1Adapter> _input_fn; | |||
public Func<DatasetV1Adapter> input_fn => _input_fn; | |||
public TrainSpec(Func<DatasetV1Adapter> input_fn, int max_steps) | |||
{ | |||
_max_steps = max_steps; | |||
_input_fn = input_fn; | |||
} | |||
} | |||
} |
@@ -0,0 +1,17 @@ | |||
using System; | |||
using System.Collections.Generic; | |||
using System.Text; | |||
namespace Tensorflow.Estimators | |||
{ | |||
public class Training | |||
{ | |||
public static void train_and_evaluate(Estimator estimator, TrainSpec train_spec, EvalSpec eval_spec) | |||
{ | |||
var executor = new _TrainingExecutor(estimator: estimator, train_spec: train_spec, eval_spec: eval_spec); | |||
var config = estimator.config; | |||
executor.run(); | |||
} | |||
} | |||
} |
@@ -0,0 +1,14 @@ | |||
using System; | |||
using System.Collections.Generic; | |||
using System.Text; | |||
namespace Tensorflow.Estimators | |||
{ | |||
public class _Evaluator | |||
{ | |||
public _Evaluator(Estimator estimator, EvalSpec eval_spec, int max_training_steps) | |||
{ | |||
} | |||
} | |||
} |
@@ -0,0 +1,16 @@ | |||
using System; | |||
using System.Collections.Generic; | |||
using System.Text; | |||
namespace Tensorflow.Estimators | |||
{ | |||
public class _NewCheckpointListenerForEvaluate | |||
{ | |||
_Evaluator _evaluator; | |||
public _NewCheckpointListenerForEvaluate(_Evaluator evaluator, int eval_throttle_secs) | |||
{ | |||
_evaluator = evaluator; | |||
} | |||
} | |||
} |
@@ -0,0 +1,14 @@ | |||
using System; | |||
using System.Collections.Generic; | |||
using System.Text; | |||
namespace Tensorflow.Estimators | |||
{ | |||
public class _SavedModelExporter : Exporter | |||
{ | |||
public override void export(Estimator estimator, string export_path, string checkpoint_path) | |||
{ | |||
} | |||
} | |||
} |
@@ -0,0 +1,48 @@ | |||
using System; | |||
using System.Collections.Generic; | |||
using System.Text; | |||
namespace Tensorflow.Estimators | |||
{ | |||
/// <summary> | |||
/// The executor to run `Estimator` training and evaluation. | |||
/// </summary> | |||
internal class _TrainingExecutor | |||
{ | |||
Estimator _estimator; | |||
EvalSpec _eval_spec; | |||
TrainSpec _train_spec; | |||
public _TrainingExecutor(Estimator estimator, TrainSpec train_spec, EvalSpec eval_spec) | |||
{ | |||
_estimator = estimator; | |||
_train_spec = train_spec; | |||
_eval_spec = eval_spec; | |||
} | |||
public void run() | |||
{ | |||
var config = _estimator.config; | |||
Console.WriteLine("Running training and evaluation locally (non-distributed)."); | |||
run_local(); | |||
} | |||
/// <summary> | |||
/// Runs training and evaluation locally (non-distributed). | |||
/// </summary> | |||
private void run_local() | |||
{ | |||
var train_hooks = new Action[0]; | |||
Console.WriteLine("Start train and evaluate loop. The evaluate will happen " + | |||
"after every checkpoint. Checkpoint frequency is determined " + | |||
$"based on RunConfig arguments: save_checkpoints_steps {_estimator.config.save_checkpoints_steps} or " + | |||
$"save_checkpoints_secs {_estimator.config.save_checkpoints_secs}."); | |||
var evaluator = new _Evaluator(_estimator, _eval_spec, _train_spec.max_steps); | |||
var saving_listeners = new _NewCheckpointListenerForEvaluate[0]; | |||
_estimator.train(input_fn: _train_spec.input_fn, | |||
max_steps: _train_spec.max_steps, | |||
hooks: train_hooks, | |||
saving_listeners: saving_listeners); | |||
} | |||
} | |||
} |
@@ -0,0 +1,17 @@ | |||
using System; | |||
namespace Tensorflow | |||
{ | |||
public class LookupError : TensorflowException | |||
{ | |||
public LookupError() : base() | |||
{ | |||
} | |||
public LookupError(string message) : base(message) | |||
{ | |||
} | |||
} | |||
} |
@@ -0,0 +1,17 @@ | |||
using System; | |||
namespace Tensorflow | |||
{ | |||
public class StopIteration : TensorflowException | |||
{ | |||
public StopIteration() : base() | |||
{ | |||
} | |||
public StopIteration(string message) : base(message) | |||
{ | |||
} | |||
} | |||
} |
@@ -167,7 +167,7 @@ namespace Tensorflow | |||
/// <param name="strip_default_attrs"></param> | |||
/// <param name="meta_info_def"></param> | |||
/// <returns></returns> | |||
public static (MetaGraphDef, Dictionary<string, RefVariable>) export_scoped_meta_graph(string filename = "", | |||
public static (MetaGraphDef, Dictionary<string, VariableV1>) export_scoped_meta_graph(string filename = "", | |||
GraphDef graph_def = null, | |||
bool as_text = false, | |||
string unbound_inputs_col_name = "unbound_inputs", | |||
@@ -179,8 +179,8 @@ namespace Tensorflow | |||
{ | |||
var graph = ops.get_default_graph(); | |||
var var_list = new Dictionary<string, RefVariable>(); | |||
var variables = graph.get_collection(tf.GraphKeys.GLOBAL_VARIABLES) as List<RefVariable>; | |||
var var_list = new Dictionary<string, VariableV1>(); | |||
var variables = graph.get_collection<VariableV1>(tf.GraphKeys.GLOBAL_VARIABLES); | |||
if (variables != null) | |||
{ |
@@ -20,15 +20,24 @@ namespace Tensorflow.Framework | |||
{ | |||
public class smart_module | |||
{ | |||
public static Tensor[] smart_cond<T>(Tensor pred, | |||
Func<T[]> true_fn = null, | |||
Func<T[]> false_fn = null, | |||
public static Tensor[] smart_cond<T>(Tensor pred, | |||
Func<T[]> true_fn = null, | |||
Func<T[]> false_fn = null, | |||
string name = null) | |||
{ | |||
return control_flow_ops.cond(pred, | |||
true_fn: true_fn, | |||
false_fn: false_fn, | |||
name: name); | |||
var pred_value = smart_constant_value(pred); | |||
if (pred_value.HasValue) | |||
{ | |||
if (pred_value.Value) | |||
return true_fn() as Tensor[]; | |||
else | |||
return false_fn() as Tensor[]; | |||
} | |||
else | |||
return control_flow_ops.cond(pred, | |||
true_fn: true_fn, | |||
false_fn: false_fn, | |||
name: name); | |||
} | |||
public static bool? smart_constant_value(Tensor pred) | |||
@@ -1,19 +1,63 @@ | |||
namespace Tensorflow.Framework | |||
{ | |||
public interface _TensorLike | |||
{ } | |||
using System; | |||
using System.Linq; | |||
using static Tensorflow.Binding; | |||
public class SparseTensor : CompositeTensor, _TensorLike | |||
namespace Tensorflow.Framework | |||
{ | |||
/// <summary> | |||
/// Represents a sparse tensor. | |||
/// </summary> | |||
public class SparseTensor<T> : CompositeTensor, _TensorLike | |||
{ | |||
private static Tensor _dense_shape { get; set; } | |||
long[,] _indices; | |||
public Tensor indices; | |||
T[] _values; | |||
public Tensor values; | |||
long[] _dense_shape; | |||
public Tensor dense_shape; | |||
TensorShape _shape; | |||
public TensorShape shape => _shape; | |||
public TF_DataType dtype => dtypes.as_dtype(typeof(T)); | |||
public SparseTensor(long[,] indices_, T[] values_, long[] dense_shape_) | |||
{ | |||
tf_with(ops.name_scope(null, "SparseTensor", new { }), delegate | |||
{ | |||
indices = ops.convert_to_tensor( | |||
indices_, name: "indices", dtype: dtypes.int64); | |||
values = ops.internal_convert_to_tensor(values_, name: "values"); | |||
dense_shape = ops.convert_to_tensor( | |||
dense_shape_, name: "dense_shape", dtype: dtypes.int64); | |||
}); | |||
_indices = indices_; | |||
_values = values_; | |||
_dense_shape = dense_shape_; | |||
var indices_shape = indices.TensorShape.with_rank(2); | |||
var values_shape = values.TensorShape.with_rank(1); | |||
var dense_shape_shape = dense_shape.TensorShape.with_rank(1); | |||
indices_shape[0].merge_with(values_shape.dims[0]); | |||
indices_shape[1].merge_with(dense_shape_shape.dims[0]); | |||
_shape = new TensorShape(_dense_shape.Select(x => Convert.ToInt32(x)).ToArray()); | |||
} | |||
} | |||
public interface _TensorLike | |||
{ | |||
} | |||
public static class sparse_tensor | |||
public static class sparse_tensor_extension | |||
{ | |||
public static bool is_sparse(this _TensorLike x) | |||
{ | |||
return x is SparseTensor; | |||
return x.GetType().Name.Contains("SparseTensor"); | |||
} | |||
} | |||
} |
@@ -0,0 +1,33 @@ | |||
/***************************************************************************** | |||
Copyright 2018 The TensorFlow.NET Authors. All Rights Reserved. | |||
Licensed under the Apache License, Version 2.0 (the "License"); | |||
you may not use this file except in compliance with the License. | |||
You may obtain a copy of the License at | |||
http://www.apache.org/licenses/LICENSE-2.0 | |||
Unless required by applicable law or agreed to in writing, software | |||
distributed under the License is distributed on an "AS IS" BASIS, | |||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |||
See the License for the specific language governing permissions and | |||
limitations under the License. | |||
******************************************************************************/ | |||
using System; | |||
namespace Tensorflow.Gradients | |||
{ | |||
/// <summary> | |||
/// REGISTER_NO_GRADIENT_OP(""); | |||
/// </summary> | |||
public class RegisterNoGradient : Attribute | |||
{ | |||
public string Name { get; set; } | |||
public RegisterNoGradient(string name) | |||
{ | |||
Name = name; | |||
} | |||
} | |||
} |
@@ -190,6 +190,26 @@ namespace Tensorflow.Gradients | |||
return new Tensor[] { array_ops.reshape(grads[0], array_ops.shape(op.inputs[0])), null }; | |||
} | |||
[RegisterGradient("Pad")] | |||
public static Tensor[] _PadGrad(Operation op, Tensor[] grads) | |||
{ | |||
var grad = grads[0]; | |||
var x = op.inputs[0]; | |||
var a = op.inputs[1]; | |||
var size = array_ops.stack(new object[] { array_ops.rank(x), 1 }); | |||
var pad_before = array_ops.slice(a, new[] { 0, 0 }, size); | |||
// Make it a 1-D tensor. | |||
var begin = array_ops.reshape(pad_before, new[] { -1 }); | |||
var sizes = array_ops.shape(x); | |||
var x_grad = array_ops.slice(grad, begin, sizes); | |||
if (len(op.inputs) == 3) | |||
return new Tensor[] { x_grad, null, null }; | |||
else | |||
return new Tensor[] { x_grad, null }; | |||
} | |||
[RegisterGradient("Squeeze")] | |||
public static Tensor[] _SqueezeGrad(Operation op, Tensor[] grads) | |||
{ | |||
@@ -1,28 +1,30 @@ | |||
/***************************************************************************** | |||
Copyright 2018 The TensorFlow.NET Authors. All Rights Reserved. | |||
Licensed under the Apache License, Version 2.0 (the "License"); | |||
you may not use this file except in compliance with the License. | |||
You may obtain a copy of the License at | |||
http://www.apache.org/licenses/LICENSE-2.0 | |||
Unless required by applicable law or agreed to in writing, software | |||
distributed under the License is distributed on an "AS IS" BASIS, | |||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |||
See the License for the specific language governing permissions and | |||
limitations under the License. | |||
/***************************************************************************** | |||
Copyright 2018 The TensorFlow.NET Authors. All Rights Reserved. | |||
Licensed under the Apache License, Version 2.0 (the "License"); | |||
you may not use this file except in compliance with the License. | |||
You may obtain a copy of the License at | |||
http://www.apache.org/licenses/LICENSE-2.0 | |||
Unless required by applicable law or agreed to in writing, software | |||
distributed under the License is distributed on an "AS IS" BASIS, | |||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |||
See the License for the specific language governing permissions and | |||
limitations under the License. | |||
******************************************************************************/ | |||
using System; | |||
using System.Linq; | |||
using Tensorflow.Operations; | |||
using static Tensorflow.Binding; | |||
namespace Tensorflow.Gradients | |||
{ | |||
/// <summary> | |||
/// Gradients for operators defined in control_flow_ops.py.cs | |||
/// </summary> | |||
[RegisterGradient("control_flow_grad")] | |||
public class control_flow_grad | |||
{ | |||
/// <summary> | |||
@@ -33,118 +35,117 @@ namespace Tensorflow.Gradients | |||
/// on the second visit. A next_iteration is also added on second visit. | |||
/// </summary> | |||
/// <returns></returns> | |||
public Tensor[] _SwitchGrad(Tensor op, Tensor[] grads) | |||
[RegisterGradient("Switch")] | |||
public static Tensor[] _SwitchGrad(Operation op, Tensor[] grads) | |||
{ | |||
var grad = grads[0]; | |||
var graph = ops.get_default_graph(); | |||
var op_ctxt = op._get_control_flow_context(); | |||
var grad_ctxt = graph._get_control_flow_context(); | |||
switch (op_ctxt) | |||
{ | |||
case WhileContext cwhile: | |||
throw new NotImplementedException("_SwitchGrad WhileContext"); | |||
case CondContext ccond: | |||
{ | |||
var zero_grad = grads[1 - op_ctxt.branch]; | |||
// At this point, we have created zero_grad guarded by the right switch. | |||
// Unfortunately, we may still get None here for not trainable data types. | |||
if(zero_grad == null) | |||
{ | |||
throw new NotImplementedException("_SwitchGrad CondContext zero_grad"); | |||
} | |||
return new Tensor[] | |||
{ | |||
merge(grads, name: "cond_grad")[0], | |||
null | |||
}; | |||
} | |||
default: | |||
throw new NotImplementedException("_SwitchGrad WhileContext"); | |||
} | |||
throw new NotImplementedException("_SwitchGrad"); | |||
//graph = ops.get_default_graph() | |||
//# pylint: disable=protected-access | |||
//op_ctxt = op._get_control_flow_context() | |||
//grad_ctxt = graph._get_control_flow_context() | |||
//# pylint: enable=protected-access | |||
//if isinstance(op_ctxt, WhileContext): | |||
// merge_grad = grad_ctxt.grad_state.switch_map.get(op) | |||
// if merge_grad is not None: | |||
// # This is the second time this Switch is visited. It comes from | |||
// # the non-exit branch of the Switch, so update the second input | |||
// # to the Merge. | |||
// # TODO(yuanbyu): Perform shape inference with this new input. | |||
// if grad[1] is not None: | |||
// # pylint: disable=protected-access | |||
// control_flow_ops._AddNextAndBackEdge(merge_grad, grad[1], | |||
// enforce_shape_invariant=False) | |||
// # pylint: enable=protected-access | |||
// return None, None | |||
// elif grad[0] is not None: | |||
// # This is the first time this Switch is visited. It comes from | |||
// # the Exit branch, which is grad[0]. grad[1] is empty at this point. | |||
// # Use grad[0] for both inputs to merge for now, but update the second | |||
// # input of merge when we see this Switch the second time. | |||
// merge_grad = merge([grad[0], grad[0]], name="b_switch")[0] | |||
// grad_ctxt.grad_state.switch_map[op] = merge_grad | |||
// return merge_grad, None | |||
// else: | |||
// # This is the first time this Switch is visited. It comes from the | |||
// # Identity branch. Such a Switch has `None` gradient for the Exit branch, | |||
// # meaning the output is not differentiable. | |||
// return None, None | |||
//elif isinstance(op_ctxt, CondContext): | |||
// zero_grad = grad[1 - op_ctxt.branch] | |||
// # At this point, we have created zero_grad guarded by the right switch. | |||
// # Unfortunately, we may still get None here for not trainable data types. | |||
// if zero_grad is None: | |||
// # For resource variables we get None always on the other branch, so bypass | |||
// # this. | |||
// if op.inputs[0].dtype == dtypes.resource: | |||
// return merge( | |||
// [grad[op_ctxt.branch]] * 2, name="cond_resource_grad")[0], None | |||
// return None, None | |||
// return merge(grad, name="cond_grad")[0], None | |||
//else: | |||
// false_grad = switch(grad[0], op.inputs[1])[0] | |||
// true_grad = switch(grad[1], op.inputs[1])[1] | |||
// return merge([false_grad, true_grad])[0], None | |||
} | |||
} | |||
/// <summary> | |||
/// Returns the value of an available element of `inputs`. | |||
/// </summary> | |||
/// <param name="inputs"></param> | |||
/// <param name="name"></param> | |||
/// <returns></returns> | |||
internal static Tensor[] merge(Tensor[] inputs, string name = null) | |||
{ | |||
return tf_with(ops.name_scope(name, "Merge", inputs), scope => | |||
{ | |||
name = scope; | |||
if (inputs.Count(x => x.dtype.is_ref_dtype()) == inputs.Length) | |||
return gen_control_flow_ops.ref_merge(inputs, name: name); | |||
else | |||
return gen_control_flow_ops.merge(inputs, name: name); | |||
}); | |||
} | |||
/// <summary> | |||
/// Gradients for a Merge op are calculated using a Switch op. | |||
/// </summary> | |||
[RegisterGradient("Merge")] | |||
[RegisterGradient("Merge")] | |||
public static Tensor[] _MergeGrad(Operation op, Tensor[] grads) | |||
{ | |||
var grad = grads[0]; | |||
var _ = grads[1]; | |||
var input_op = op.inputs[0].op; | |||
var graph = ops.get_default_graph(); | |||
var op_ctxt = control_flow_util.GetOutputContext(input_op); | |||
var grad_ctxt = graph._get_control_flow_context(); | |||
switch (op_ctxt) | |||
{ | |||
case WhileContext cwhile: | |||
{ | |||
return control_flow_ops._SwitchRefOrTensor(grad, grad_ctxt.pivot); | |||
case WhileContext cwhile: | |||
{ | |||
return control_flow_ops._SwitchRefOrTensor(grad, grad_ctxt.pivot); | |||
} | |||
case CondContext ccond: | |||
{ | |||
var pred = ccond.pred; | |||
if (grad_ctxt != null && grad_ctxt.grad_state != null) | |||
{ | |||
//# This Merge node is part of a cond within a loop. | |||
//# The backprop needs to have the value of this predicate for every | |||
//# iteration. So we must have its values accumulated in the forward, and | |||
//# use the accumulated values as the predicate for this backprop switch. | |||
var grad_state = grad_ctxt.grad_state; | |||
var real_pred = grad_state.history_map[pred.name] as Tensor; | |||
if (real_pred == null) | |||
{ | |||
//# Remember the value of pred for every iteration. | |||
grad_ctxt = grad_state.grad_context; | |||
grad_ctxt.Exit(); | |||
var history_pred = grad_state.AddForwardAccumulator(pred); | |||
grad_ctxt.Enter(); | |||
//# Add the stack pop op. If pred.op is in a (outer) CondContext, | |||
//# the stack pop will be guarded with a switch. | |||
real_pred = grad_state.AddBackpropAccumulatedValue(history_pred, pred); | |||
grad_state.history_map[pred.name] = real_pred; | |||
} | |||
pred = real_pred; | |||
} | |||
var results = control_flow_ops._SwitchRefOrTensor(grad, pred, name: "cond_grad"); | |||
return results; | |||
case CondContext ccond: | |||
{ | |||
var pred = ccond.pred; | |||
if (grad_ctxt != null && grad_ctxt.grad_state != null) | |||
{ | |||
//# This Merge node is part of a cond within a loop. | |||
//# The backprop needs to have the value of this predicate for every | |||
//# iteration. So we must have its values accumulated in the forward, and | |||
//# use the accumulated values as the predicate for this backprop switch. | |||
var grad_state = grad_ctxt.grad_state; | |||
var real_pred = grad_state.history_map[pred.name] as Tensor; | |||
if (real_pred == null) | |||
{ | |||
//# Remember the value of pred for every iteration. | |||
grad_ctxt = grad_state.grad_context; | |||
grad_ctxt.Exit(); | |||
var history_pred = grad_state.AddForwardAccumulator(pred); | |||
grad_ctxt.Enter(); | |||
//# Add the stack pop op. If pred.op is in a (outer) CondContext, | |||
//# the stack pop will be guarded with a switch. | |||
real_pred = grad_state.AddBackpropAccumulatedValue(history_pred, pred); | |||
grad_state.history_map[pred.name] = real_pred; | |||
} | |||
pred = real_pred; | |||
} | |||
var results = control_flow_ops._SwitchRefOrTensor(grad, pred, name: "cond_grad"); | |||
return results; | |||
} | |||
default: | |||
{ | |||
var num_inputs = op.inputs.Length; | |||
var cond = new Tensor[num_inputs]; | |||
for (int i = 0; i < num_inputs; i++) | |||
cond[i] = math_ops.equal(op.outputs[1], i); | |||
var result = cond.Select(t => control_flow_ops._SwitchRefOrTensor(grad, t)[1]).ToArray(); | |||
return result; | |||
default: | |||
{ | |||
var num_inputs = op.inputs.Length; | |||
var cond = new Tensor[num_inputs]; | |||
for (int i = 0; i < num_inputs; i++) | |||
cond[i] = math_ops.equal(op.outputs[1], i); | |||
var result = cond.Select(t => control_flow_ops._SwitchRefOrTensor(grad, t)[1]).ToArray(); | |||
return result; | |||
} | |||
} | |||
} | |||
[RegisterGradient("RefMerge")] | |||
public Tensor[] _RefMergeGrad(Operation op, Tensor[] grads) | |||
{ | |||
return _MergeGrad(op, grads); | |||
@@ -153,6 +154,7 @@ namespace Tensorflow.Gradients | |||
/// <summary> | |||
/// Gradients for an exit op are calculated using an Enter op. | |||
/// </summary> | |||
[RegisterGradient("Exit")] | |||
public Tensor[] _ExitGrad(Operation op, Tensor[] grads) | |||
{ | |||
throw new NotImplementedException("_ExitGrad"); | |||
@@ -197,14 +199,16 @@ namespace Tensorflow.Gradients | |||
/// | |||
/// Note that the backprop next_iteration is added in switch grad. | |||
/// </summary> | |||
public (object, Tensor[]) _NextIterationGrad(object _, Tensor[] grad) | |||
[RegisterGradient("NextIteration")] | |||
public Tensor[] _NextIterationGrad(object _, Tensor[] grad) | |||
{ | |||
return (_, grad); | |||
return grad; | |||
} | |||
public (object, Tensor[]) _RefNextIterationGrad(object _, Tensor[] grad) | |||
[RegisterGradient("RefNextIteration")] | |||
public Tensor[] _RefNextIterationGrad(object _, Tensor[] grad) | |||
{ | |||
return (_, grad); | |||
return grad; | |||
} | |||
/// <summary> | |||
@@ -213,7 +217,8 @@ namespace Tensorflow.Gradients | |||
/// For loop variables, grad is the gradient so just add an exit. | |||
/// For loop invariants, we need to add an accumulator loop. | |||
/// </summary> | |||
public (object, Tensor[]) _EnterGrad(Tensor op, Tensor[] grad) | |||
[RegisterGradient("Enter")] | |||
public Tensor[] _EnterGrad(Tensor op, Tensor[] grad) | |||
{ | |||
throw new NotImplementedException("_EnterGrad"); | |||
// graph = ops.get_default_graph() | |||
@@ -242,7 +247,9 @@ namespace Tensorflow.Gradients | |||
// return result | |||
} | |||
public (object, Tensor[]) _RefEnterGrad(Tensor op, Tensor[] grad) | |||
[RegisterGradient("RefEnter")] | |||
public Tensor[] _RefEnterGrad(Tensor op, Tensor[] grad) | |||
{ | |||
return _EnterGrad(op, grad); | |||
} | |||
@@ -250,10 +257,11 @@ namespace Tensorflow.Gradients | |||
/// <summary> | |||
/// Stop backprop for the predicate of a while loop. | |||
/// </summary> | |||
public object _LoopCondGrad(object _) | |||
[RegisterGradient("LoopCond")] | |||
public Tensor[] _LoopCondGrad(Tensor op, Tensor[] grad) | |||
{ | |||
return null; | |||
} | |||
} | |||
} | |||
} | |||
} |
@@ -61,7 +61,7 @@ namespace Tensorflow | |||
string grad_scope = scope; | |||
// Get a uid for this call to gradients that can be used to help | |||
// cluster ops for compilation. | |||
var gradient_uid = ops.get_default_graph().unique_name("uid"); | |||
var gradient_uid = curr_graph.unique_name("uid"); | |||
ys = ops.convert_n_to_tensor_or_indexed_slices(ys, name: "y"); | |||
xs = ops.internal_convert_n_to_tensor_or_indexed_slices(xs, name: "x", as_ref: true); | |||
grad_ys = _DefaultGradYs(grad_ys, ys, colocate_gradients_with_ops, gradient_uid); | |||
@@ -80,7 +80,7 @@ namespace Tensorflow | |||
var to_ops = ys.Select(x => x.op).ToList(); | |||
var from_ops = xs.Select(x => x.op).ToList(); | |||
var stop_gradient_ops = stop_gradients.Select(x => x.op).ToList(); | |||
(var reachable_to_ops, var pending_count, var loop_state) = _PendingCount(to_ops, from_ops, colocate_gradients_with_ops, new List<object>(), xs); | |||
var (reachable_to_ops, pending_count, loop_state) = _PendingCount(to_ops, from_ops, colocate_gradients_with_ops, new List<object>(), xs); | |||
foreach (var (y, grad_y) in zip(ys, grad_ys)) | |||
_SetGrad(grads, y, grad_y); | |||
@@ -117,23 +117,52 @@ namespace Tensorflow | |||
Tensor[] in_grads = null; | |||
var is_partitioned_call = _IsPartitionedCall(op); | |||
var is_func_call = false; | |||
var has_out_grads = true; | |||
var has_out_grads = out_grads.Exists(x => x != null); | |||
if (has_out_grads && !stop_ops.Contains(op)) | |||
{ | |||
// A grad_fn must be defined, either as a function or as None | |||
// for ops that do not have gradients. | |||
var grad_fn = ops.get_gradient_function(op); | |||
if (is_func_call) | |||
Func<Operation, Tensor[], Tensor[]> grad_fn = null; | |||
try | |||
{ | |||
grad_fn = ops.get_gradient_function(op); | |||
} | |||
catch (LookupError) | |||
{ | |||
if (is_func_call) | |||
{ | |||
if (is_partitioned_call) | |||
{ | |||
} | |||
else | |||
{ | |||
} | |||
} | |||
else | |||
{ | |||
throw new LookupError($"No gradient defined for operation '{op.name}' (op type: {op.type})"); | |||
} | |||
} | |||
else | |||
// if (loop_state) | |||
//loop_state.EnterGradWhileContext(op, before: false); | |||
if ((is_func_call || grad_fn != null) && has_out_grads) | |||
{ | |||
// NOTE: If _AggregatedGrads didn't compute a value for the i'th | |||
// output, it means that the cost does not depend on output[i], | |||
// therefore dC/doutput[i] is 0. | |||
foreach (var (i, out_grad) in enumerate(out_grads)) | |||
{ | |||
if (out_grad == null) | |||
if (out_grad == null && | |||
(grad_fn == null || _IsTrainable(op.outputs[i]))) | |||
{ | |||
// Only trainable outputs or outputs for a function call that | |||
// will use SymbolicGradient get a zero gradient. Gradient | |||
// functions should ignore the gradient for other outputs. | |||
if (loop_state != null) | |||
; | |||
else | |||
@@ -143,13 +172,19 @@ namespace Tensorflow | |||
tf_with(ops.name_scope(op.name + "_grad"), scope1 => | |||
{ | |||
string name1 = scope1; | |||
if (grad_fn != null) | |||
{ | |||
in_grads = _MaybeCompile(grad_scope, op, out_grads.Select(x => x[0]).ToArray(), null, grad_fn); | |||
_VerifyGeneratedGradients(in_grads, op); | |||
in_grads = _MaybeCompile(grad_scope, | |||
op, | |||
out_grads.Where(x => x != null).Select(x => x[0]).ToArray(), | |||
null, | |||
grad_fn); | |||
} | |||
else | |||
{ | |||
throw new NotImplementedException("lambda: _SymGrad(op, out_grads)"); | |||
} | |||
_VerifyGeneratedGradients(in_grads, op); | |||
if (gate_gradients && in_grads.Count(x => x != null) > 1) | |||
{ | |||
ops._colocate_with_for_gradient(null, gradient_uid, ignore_existing: true); | |||
@@ -157,6 +192,12 @@ namespace Tensorflow | |||
} | |||
}); | |||
} | |||
else | |||
{ | |||
// If no grad_fn is defined or none of out_grads is available, | |||
// just propagate a list of None backwards. | |||
in_grads = new Tensor[_NonEagerInputs(op, xs).Count()]; | |||
} | |||
} | |||
else | |||
{ | |||
@@ -168,11 +209,11 @@ namespace Tensorflow | |||
{ | |||
if (in_grad != null) | |||
{ | |||
if (in_grad is Tensor && | |||
if (!(in_grad is null) && | |||
in_grad.Tag == null && // maybe a IndexedSlice | |||
t_in.dtype != TF_DataType.TF_RESOURCE) | |||
{ | |||
in_grad.shape = t_in.shape; | |||
in_grad.set_shape(t_in.TensorShape); | |||
} | |||
_SetGrad(grads, t_in, in_grad); | |||
@@ -0,0 +1,54 @@ | |||
/***************************************************************************** | |||
Copyright 2018 The TensorFlow.NET Authors. All Rights Reserved. | |||
Licensed under the Apache License, Version 2.0 (the "License"); | |||
you may not use this file except in compliance with the License. | |||
You may obtain a copy of the License at | |||
http://www.apache.org/licenses/LICENSE-2.0 | |||
Unless required by applicable law or agreed to in writing, software | |||
distributed under the License is distributed on an "AS IS" BASIS, | |||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |||
See the License for the specific language governing permissions and | |||
limitations under the License. | |||
******************************************************************************/ | |||
using System; | |||
using System.Collections.Generic; | |||
using System.Linq; | |||
using System.Text; | |||
using Tensorflow.Framework; | |||
using static Tensorflow.Binding; | |||
namespace Tensorflow.Gradients | |||
{ | |||
[RegisterGradient("image_grad")] | |||
public class image_grad | |||
{ | |||
[RegisterGradient("ResizeNearestNeighbor")] | |||
public static Tensor[] _ResizeNearestNeighborGrad(Operation op, Tensor[] grads) | |||
{ | |||
var grad = grads[0]; | |||
var image = op.inputs[0]; | |||
var shape = new TensorShape(image.shape.Skip(1).Take(2).ToArray()); | |||
Tensor image_shape = null; | |||
if (shape.is_fully_defined()) | |||
throw new NotImplementedException("_ResizeNearestNeighborGrad shape.is_fully_defined"); | |||
else | |||
image_shape = array_ops.shape(image)["1:3"]; | |||
grad = gen_image_ops.resize_nearest_neighbor_grad( | |||
grad, | |||
image_shape, | |||
align_corners: op.get_attr<bool>("align_corners"), | |||
half_pixel_centers: op.get_attr<bool>("half_pixel_centers")); | |||
return new Tensor[] | |||
{ | |||
grad, | |||
null | |||
}; | |||
} | |||
} | |||
} |
@@ -42,7 +42,8 @@ namespace Tensorflow.Gradients | |||
var x = op.inputs[0]; | |||
var y = op.inputs[1]; | |||
var grad = grads[0]; | |||
if (grad is Tensor && _ShapesFullySpecifiedAndEqual(x, y, grad)) | |||
if (grad is Tensor && | |||
_ShapesFullySpecifiedAndEqual(x, y, grad)) | |||
return new Tensor[] { grad, grad }; | |||
var sx = array_ops.shape(x); | |||
@@ -96,6 +97,12 @@ namespace Tensorflow.Gradients | |||
}); | |||
} | |||
[RegisterNoGradient("GreaterEqual")] | |||
public static Tensor[] _GreaterEqualGrad(Operation op, Tensor[] grads) => null; | |||
[RegisterNoGradient("ZerosLike")] | |||
public static Tensor[] _ZerosLike(Operation op, Tensor[] grads) => null; | |||
[RegisterGradient("Identity")] | |||
public static Tensor[] _IdGrad(Operation op, Tensor[] grads) | |||
{ | |||
@@ -124,6 +131,17 @@ namespace Tensorflow.Gradients | |||
}); | |||
} | |||
[RegisterGradient("Log1p")] | |||
public static Tensor[] _Log1pGrad(Operation op, Tensor[] grads) | |||
{ | |||
var grad = grads[0]; | |||
var x = op.inputs[0]; | |||
return tf_with(ops.control_dependencies(new Operation[] { grad }), dp => { | |||
x = math_ops.conj(x); | |||
return new Tensor[] { grad * math_ops.reciprocal(1 + x) }; | |||
}); | |||
} | |||
[RegisterGradient("Mul")] | |||
public static Tensor[] _MulGrad(Operation op, Tensor[] grads) | |||
{ | |||
@@ -332,6 +350,21 @@ namespace Tensorflow.Gradients | |||
return new Tensor[] { -grads[0] }; | |||
} | |||
[RegisterGradient("Select")] | |||
public static Tensor[] _SelectGrad(Operation op, Tensor[] grads) | |||
{ | |||
var grad = grads[0]; | |||
var c = op.inputs[0]; | |||
var x = op.inputs[1]; | |||
var zeros = array_ops.zeros_like(x); | |||
return new Tensor[] | |||
{ | |||
null, | |||
array_ops.where(c, grad, zeros), | |||
array_ops.where(c, zeros, grad) | |||
}; | |||
} | |||
private static Tensor _safe_shape_div(Tensor x, Tensor y) | |||
{ | |||
return math_ops.floordiv(x, gen_math_ops.maximum(y, 1)); | |||
@@ -343,7 +376,8 @@ namespace Tensorflow.Gradients | |||
var grad = grads[0]; | |||
var x = op.inputs[0]; | |||
var y = op.inputs[1]; | |||
if (grad is Tensor && _ShapesFullySpecifiedAndEqual(x, y, grad)) | |||
if (grad is Tensor && | |||
_ShapesFullySpecifiedAndEqual(x, y, grad)) | |||
return new Tensor[] { grad, -grad }; | |||
var sx = array_ops.shape(x); | |||
@@ -361,9 +395,10 @@ namespace Tensorflow.Gradients | |||
var x_shape = x._shape_tuple(); | |||
var y_shape = y._shape_tuple(); | |||
var grad_shape = grad._shape_tuple(); | |||
return Enumerable.SequenceEqual(x_shape, y_shape) && | |||
return x_shape != null && | |||
y_shape != null && | |||
Enumerable.SequenceEqual(x_shape, y_shape) && | |||
Enumerable.SequenceEqual(y_shape, grad_shape) && | |||
x.NDims != -1 && | |||
!x_shape.Contains(-1); | |||
} | |||
@@ -382,7 +417,9 @@ namespace Tensorflow.Gradients | |||
var rank = input_0_shape.Length; | |||
if (Enumerable.SequenceEqual(Enumerable.Range(0, rank), axes.Data<int>())) | |||
{ | |||
grad = array_ops.reshape(grad, new int[] { 1 }); | |||
var new_shape = range(rank).Select(x => 1).ToArray(); | |||
grad = array_ops.reshape(grad, new_shape); | |||
// If shape is not fully defined (but rank is), we use Shape. | |||
if (!input_0_shape.Contains(-1)) | |||
input_shape = constant_op.constant(input_0_shape); | |||
else | |||
@@ -47,6 +47,15 @@ namespace Tensorflow.Gradients | |||
return new Tensor[] { gen_nn_ops.relu_grad(grads[0], op.outputs[0]) }; | |||
} | |||
[RegisterGradient("LeakyRelu")] | |||
public static Tensor[] _LeakyReluGrad(Operation op, Tensor[] grads) | |||
{ | |||
var grad = grads[0]; | |||
var x = op.inputs[0]; | |||
var alpha = (float)op.get_attr("alpha"); | |||
return new Tensor[] { gen_nn_ops.leaky_relu_grad(grad, x, alpha: alpha)}; | |||
} | |||
/// <summary> | |||
/// The derivative of the softmax nonlinearity. | |||
/// </summary> | |||
@@ -157,6 +166,94 @@ namespace Tensorflow.Gradients | |||
}; | |||
} | |||
[RegisterGradient("FusedBatchNorm")] | |||
public static Tensor[] _FusedBatchNormGrad(Operation op, Tensor[] grads) | |||
=> _BaseFusedBatchNormGrad(op, 0, grads); | |||
/// <summary> | |||
/// Return the gradients for the 3 inputs of BatchNorm. | |||
/// </summary> | |||
/// <param name="op"></param> | |||
/// <param name="version"></param> | |||
/// <param name="grads"></param> | |||
/// <returns></returns> | |||
public static Tensor[] _BaseFusedBatchNormGrad(Operation op, int version, Tensor[] grads) | |||
{ | |||
var x = op.inputs[0]; | |||
var grad_y = grads[0]; | |||
var scale = op.inputs[1]; | |||
var epsilon = op.get_attr<float>("epsilon"); | |||
var data_format = op.get_attr<string>("data_format"); | |||
var is_training = op.get_attr<bool>("is_training"); | |||
Func<FusedBatchNormParams, Tensor[]> grad_fun = null; | |||
switch (version) | |||
{ | |||
case 2: | |||
throw new NotImplementedException(""); | |||
case 1: | |||
throw new NotImplementedException(""); | |||
default: | |||
grad_fun = gen_nn_ops.fused_batch_norm_grad; | |||
break; | |||
} | |||
if (is_training) | |||
{ | |||
return grad_fun(new FusedBatchNormParams | |||
{ | |||
YBackprop = grad_y, | |||
X = x, | |||
Scale = scale, | |||
ReserveSpace1 = op.outputs[3], | |||
ReserveSpace2 = op.outputs[4], | |||
ReserveSpace3 = version == 2 ? op.outputs[5] : null, | |||
Epsilon = epsilon, | |||
DataFormat = data_format, | |||
IsTraining = is_training | |||
}); | |||
} | |||
else | |||
{ | |||
var pop_mean = op.inputs[3]; | |||
var pop_var = op.inputs[4]; | |||
if (data_format == "NCHW") | |||
throw new NotImplementedException(""); | |||
var results = grad_fun(new FusedBatchNormParams | |||
{ | |||
YBackprop = grad_y, | |||
X = x, | |||
Scale = scale, | |||
ReserveSpace1 = op.outputs[3], | |||
ReserveSpace2 = op.outputs[4], | |||
ReserveSpace3 = version == 2 ? op.outputs[5] : null, | |||
Epsilon = epsilon, | |||
DataFormat = data_format, | |||
IsTraining = is_training | |||
}); | |||
var (dx, dscale, doffset) = (results[0], results[1], results[2]); | |||
if (data_format == "NCHW") | |||
throw new NotImplementedException(""); | |||
return new Tensor[] | |||
{ | |||
dx, | |||
dscale, | |||
doffset, | |||
null, | |||
null | |||
}; | |||
} | |||
} | |||
[RegisterGradient("BatchNormWithGlobalNormalization")] | |||
public static Tensor _BatchNormWithGlobalNormalizationGrad(Operation op, Tensor[] grads) | |||
{ | |||
throw new NotImplementedException("BatchNormWithGlobalNormalization"); | |||
} | |||
private static bool IsZero(Tensor g) | |||
{ | |||
if (new string[] { "ZerosLike", "Zeros" }.Contains(g.op.type)) | |||
@@ -39,6 +39,14 @@ namespace Tensorflow | |||
gradientFunctions[name] = func; | |||
} | |||
public static void RegisterNoGradientFunction(string name) | |||
{ | |||
if (gradientFunctions == null) | |||
gradientFunctions = new Dictionary<string, Func<Operation, Tensor[], Tensor[]>>(); | |||
gradientFunctions[name] = null; | |||
} | |||
public static Func<Operation, Tensor[], Tensor[]> get_gradient_function(Operation op) | |||
{ | |||
if (op.inputs == null) return null; | |||
@@ -68,11 +76,18 @@ namespace Tensorflow | |||
args: new object[] { oper, out_grads }) as Tensor[] | |||
); | |||
} | |||
// REGISTER_NO_GRADIENT_OP | |||
methods = g.GetMethods().Where(x => x.GetCustomAttribute<RegisterNoGradient>() != null) | |||
.ToArray(); | |||
foreach (var m in methods) | |||
RegisterNoGradientFunction(m.GetCustomAttribute<RegisterNoGradient>().Name); | |||
} | |||
} | |||
if (!gradientFunctions.ContainsKey(op.type)) | |||
throw new NotImplementedException($"can't get graident function through get_gradient_function {op.type}"); | |||
throw new LookupError($"can't get graident function through get_gradient_function {op.type}"); | |||
return gradientFunctions[op.type]; | |||
} | |||
@@ -21,11 +21,10 @@ using static Tensorflow.Binding; | |||
namespace Tensorflow | |||
{ | |||
/// <summary> | |||
/// Serves as a stack for determining current default graph. | |||
/// </summary> | |||
public class DefaultGraphStack | |||
public class DefaultGraphStack | |||
{ | |||
private readonly List<StackModel> _stack = new List<StackModel>(); | |||
@@ -40,7 +39,7 @@ namespace Tensorflow | |||
public Graph get_controller() | |||
{ | |||
if (_stack.Count(x => x.IsDefault) == 0) | |||
if (_stack.Count == 0 || _stack.Count(x => x.IsDefault) == 0) | |||
_stack.Add(new StackModel {Graph = tf.Graph(), IsDefault = true}); | |||
for (var i = _stack.Count - 1; i >= 0; i--) | |||
{ | |||
@@ -52,6 +51,20 @@ namespace Tensorflow | |||
throw new TensorflowException("Unable to find a default graph"); | |||
} | |||
public Graph peak_controller() | |||
{ | |||
if (_stack.Count == 0 || _stack.Count(x => x.IsDefault) == 0) | |||
return null; | |||
for (var i = _stack.Count - 1; i >= 0; i--) | |||
{ | |||
var x = _stack[i]; | |||
if (x.IsDefault) | |||
return x.Graph; | |||
} | |||
return null; | |||
} | |||
public bool remove(Graph g) | |||
{ | |||
if (_stack.Count == 0) | |||
@@ -14,6 +14,7 @@ | |||
limitations under the License. | |||
******************************************************************************/ | |||
using Google.Protobuf; | |||
using System.IO; | |||
using Tensorflow.Util; | |||
@@ -37,7 +38,9 @@ namespace Tensorflow | |||
using (var buffer = ToGraphDef(status)) | |||
{ | |||
status.Check(true); | |||
def = GraphDef.Parser.ParseFrom(buffer.MemoryBlock.Stream()); | |||
// limit size to 250M, recursion to max 100 | |||
var inputStream = CodedInputStream.CreateWithLimits(buffer.MemoryBlock.Stream(), 250 * 1024 * 1024, 100); | |||
def = GraphDef.Parser.ParseFrom(inputStream); | |||
} | |||
// Strip the experimental library field iff it's empty. | |||
@@ -54,19 +54,35 @@ namespace Tensorflow | |||
var handle = return_oper_handle.node + tf_op_size * i; | |||
return_opers[i] = new Operation(*(IntPtr*)handle); | |||
} | |||
} | |||
} | |||
return return_opers; | |||
} | |||
/// <summary> | |||
/// Get operation with given <paramref name="operName"/> | |||
/// </summary> | |||
/// <exception cref="ValueError">When <paramref name="operName"/> is not found current graph.</exception> | |||
/// <exception cref="RuntimeError">When tf.get_default_graph() is not current graph.</exception> | |||
/// <example> | |||
/// graph.GetOperationByName("CustomInputName"); | |||
/// </example> | |||
public Operation OperationByName(string operName) | |||
{ | |||
if (operName == null) | |||
throw new ArgumentNullException(nameof(operName)); | |||
var handle = c_api.TF_GraphOperationByName(_handle, operName); | |||
if(graph_key != tf.get_default_graph().graph_key) | |||
{ | |||
Console.WriteLine($"Current graph is not default graph."); | |||
// throw new ValueError($"Current graph is not default graph."); | |||
if (handle == IntPtr.Zero) | |||
throw new ValueError($"Could not find operation \"{operName}\" inside graph \"{_graph_key}\"."); | |||
var defaultKey = tf.get_default_graph().graph_key; | |||
if (graph_key != defaultKey) | |||
{ | |||
//Console.WriteLine($"Current graph is not default graph."); | |||
throw new RuntimeError($"Current graph is not default graph. Default Graph Key: {defaultKey}, Current Graph Key: {graph_key}"); | |||
} | |||
return new Operation(handle, g: this); | |||
} | |||
@@ -175,6 +175,10 @@ namespace Tensorflow | |||
if (_nodes_by_name.ContainsKey(op_name)) | |||
return _nodes_by_name[op_name].outputs[out_n]; | |||
else | |||
throw new KeyError($"The name {name} refers to a Tensor which does not " + | |||
$"exist. The operation, {op_name}, does not exist in the " + | |||
"graph."); | |||
} | |||
else if (!name.Contains(":") & allow_operation) | |||
{ | |||
@@ -223,6 +227,10 @@ namespace Tensorflow | |||
public void add_to_collection<T>(string name, T value) | |||
{ | |||
if(name == "update_ops") | |||
{ | |||
} | |||
_check_not_finalized(); | |||
if (_collections.ContainsKey(name)) | |||
(_collections[name] as List<T>).Add(value); | |||
@@ -242,7 +250,7 @@ namespace Tensorflow | |||
throw new RuntimeError("Graph is finalized and cannot be modified."); | |||
} | |||
public unsafe Operation create_op(string op_type, Tensor[] inputs, TF_DataType[] dtypes, | |||
public Operation create_op(string op_type, Tensor[] inputs, TF_DataType[] dtypes, | |||
TF_DataType[] input_types = null, string name = null, | |||
Dictionary<string, AttrValue> attrs = null, OpDef op_def = null) | |||
{ | |||
@@ -284,6 +292,11 @@ namespace Tensorflow | |||
return op; | |||
} | |||
public void device(string device_name) | |||
{ | |||
throw new NotImplementedException(""); | |||
} | |||
private void _create_op_helper(Operation op, bool compute_device = true) | |||
{ | |||
_record_op_seen_by_control_dependencies(op); | |||
@@ -420,14 +433,36 @@ namespace Tensorflow | |||
public List<T> get_collection<T>(string name, string scope = null) | |||
{ | |||
return _collections.ContainsKey(name) ? _collections[name] as List<T> : new List<T>(); | |||
List<T> t = default; | |||
var collection = _collections.ContainsKey(name) ? _collections[name] : new List<T>(); | |||
switch (collection) | |||
{ | |||
case List<VariableV1> list: | |||
t = list.Select(x => (T)(object)x).ToList(); | |||
break; | |||
case List<ResourceVariable> list: | |||
t = list.Select(x => (T)(object)x).ToList(); | |||
break; | |||
case List<RefVariable> list: | |||
t = list.Select(x => (T)(object)x).ToList(); | |||
break; | |||
case List<Tensor> list: | |||
t = list.Select(x => (T)(object)x).ToList(); | |||
break; | |||
case List<Operation> list: | |||
t = list.Select(x => (T)(object)x).ToList(); | |||
break; | |||
default: | |||
throw new NotImplementedException($"get_collection<{typeof(T).FullName}>"); | |||
} | |||
return t; | |||
} | |||
public object get_collection_ref(string name) | |||
public List<T> get_collection_ref<T>(string name) | |||
{ | |||
if (!_collections.ContainsKey(name)) | |||
_collections[name] = new List<object>(); | |||
return _collections[name]; | |||
_collections[name] = new List<T>(); | |||
return _collections[name] as List<T>; | |||
} | |||
public void prevent_feeding(Tensor tensor) | |||
@@ -28,7 +28,7 @@ namespace Tensorflow.Keras | |||
//Func<Array, bool> py_any = any; | |||
//Func<double, double, double, IEnumerable<double>> py_slice = slice; | |||
public static Session _SESSION = tf.defaultSession; | |||
public static Session _SESSION = ops.get_default_session(); | |||
public static Graph _GRAPH = null; | |||
public static Dictionary<Graph, GraphLearningPhase> _GRAPH_LEARNING_PHASES; | |||
//Dictionary<Graph, Dictionary<string, int>> PER_GRAPH_LAYER_NAME_UIDS; | |||
@@ -90,7 +90,7 @@ namespace Tensorflow.Layers | |||
{ | |||
foreach(var name in collection_list) | |||
{ | |||
var collection = ops.get_collection_ref(name) as List<object>; | |||
var collection = ops.get_collection_ref<Operation>(name); | |||
foreach (var element in elements) | |||
if (!collection.Contains(element)) | |||
@@ -14,20 +14,192 @@ | |||
limitations under the License. | |||
******************************************************************************/ | |||
using System; | |||
using static Tensorflow.Binding; | |||
namespace Tensorflow.Operations.Activation | |||
{ | |||
public class sigmoid : IActivation | |||
{ | |||
public Tensor Activate(Tensor x, string name = null) | |||
{ | |||
return tf.sigmoid(x); | |||
} | |||
} | |||
public class tanh : IActivation | |||
{ | |||
public Tensor Activate(Tensor x, string name = null) | |||
{ | |||
return tf.tanh(x); | |||
} | |||
} | |||
public class leakyrelu : IActivation | |||
{ | |||
private readonly float _alpha; | |||
public leakyrelu(float alpha = 0.3f) { | |||
_alpha = alpha; | |||
} | |||
public Tensor Activate(Tensor x, string name = null) | |||
{ | |||
return nn_ops.leaky_relu(x, _alpha); | |||
} | |||
} | |||
public class elu : IActivation | |||
{ | |||
private readonly float _alpha; | |||
public elu(float alpha = 0.1f) | |||
{ | |||
_alpha = alpha; | |||
} | |||
public Tensor Activate(Tensor x, string name = null) | |||
{ | |||
var res = gen_ops.elu(x); | |||
if (Math.Abs(_alpha - 0.1f) < 0.00001f) | |||
{ | |||
return res; | |||
} | |||
return array_ops.@where(x > 0, res, _alpha * res); | |||
} | |||
} | |||
public class softmax : IActivation | |||
{ | |||
private readonly int _axis; | |||
/// <summary>Initializes a new instance of the <see cref="T:System.Object"></see> class.</summary> | |||
public softmax(int axis = -1) | |||
{ | |||
_axis = axis; | |||
} | |||
public Tensor Activate(Tensor x, string name = null) | |||
{ | |||
return nn_ops.softmax(x, _axis); | |||
} | |||
} | |||
public class softplus : IActivation | |||
{ | |||
public Tensor Activate(Tensor x, string name = null) | |||
{ | |||
return gen_ops.softplus(x); | |||
} | |||
} | |||
public class softsign : IActivation | |||
{ | |||
public Tensor Activate(Tensor x, string name = null) | |||
{ | |||
return gen_ops.softsign(x); | |||
} | |||
} | |||
public class linear : IActivation | |||
{ | |||
public Tensor Activate(Tensor x, string name = null) | |||
{ | |||
return x; | |||
} | |||
} | |||
public class exponential : IActivation | |||
{ | |||
public Tensor Activate(Tensor x, string name = null) | |||
{ | |||
return tf.exp(x, name: name); | |||
} | |||
} | |||
public class relu : IActivation | |||
{ | |||
public Tensor Activate(Tensor features, string name = null) | |||
private readonly float _threshold; | |||
private readonly float _alpha; | |||
private readonly float? _maxValue; | |||
public relu(float threshold = 0f, float alpha = 0.2f, float? max_value = null) | |||
{ | |||
_threshold = threshold; | |||
_alpha = alpha; | |||
_maxValue = max_value; | |||
} | |||
public Tensor Activate(Tensor x, string name = null) | |||
{ | |||
OpDefLibrary _op_def_lib = new OpDefLibrary(); | |||
//based on keras/backend.py | |||
if (Math.Abs(_alpha) > 0.000001f) | |||
{ | |||
if (!_maxValue.HasValue && Math.Abs(_threshold) < 0.0001) | |||
{ | |||
return nn_ops.leaky_relu(x, _alpha); | |||
} | |||
} | |||
Tensor negative_part; | |||
if (Math.Abs(_threshold) > 0.000001f) | |||
{ | |||
negative_part = gen_ops.relu(-x + _threshold); | |||
} else | |||
{ | |||
negative_part = gen_ops.relu(-x + _threshold); | |||
} | |||
if (Math.Abs(_threshold) > 0.000001f) | |||
{ | |||
x = x * math_ops.cast(tf.greater(x, _threshold), TF_DataType.TF_FLOAT); | |||
} else if (Math.Abs(_maxValue.Value - 6f) < 0.0001f) | |||
{ | |||
x = gen_ops.relu6(x); | |||
} else | |||
{ | |||
x = gen_ops.relu(x); | |||
} | |||
bool clip_max = _maxValue.HasValue; | |||
if (clip_max) | |||
{ | |||
Tensor maxval = constant_op.constant(_maxValue, x.dtype.as_base_dtype()); | |||
var zero = constant_op.constant(0.0f, x.dtype.as_base_dtype()); | |||
x = gen_ops.clip_by_value(x, zero, maxval); | |||
} | |||
var _op = _op_def_lib._apply_op_helper("Relu", name: name, args: new | |||
if (Math.Abs(_alpha) > 0.00001) | |||
{ | |||
features | |||
}); | |||
var a = constant_op.constant(_alpha, x.dtype.as_base_dtype()); | |||
x -= a * negative_part; | |||
} | |||
return _op.outputs[0]; | |||
return x; | |||
} | |||
} | |||
public class selu : IActivation | |||
{ | |||
public Tensor Activate(Tensor x, string name = null) | |||
{ | |||
const float alpha = 1.6732632423543772848170429916717f; | |||
const float scale = 1.0507009873554804934193349852946f; | |||
return scale * new elu(alpha).Activate(x, name); | |||
} | |||
} | |||
public class hard_sigmoid : IActivation | |||
{ | |||
public Tensor Activate(Tensor x, string name = null) | |||
{ | |||
x = (0.2 * x) + 0.5; | |||
var zero = tf.convert_to_tensor(0.0f, x.dtype.as_base_dtype()); | |||
var one = tf.convert_to_tensor(1.0f, x.dtype.as_base_dtype()); | |||
return tf.clip_by_value(x, zero, one); | |||
} | |||
} | |||
} | |||
} |
@@ -27,20 +27,6 @@ namespace Tensorflow.Operations | |||
/// </summary> | |||
public class CondContext : ControlFlowContext, IProtoBuf<CondContextDef, CondContext> | |||
{ | |||
/// <summary> | |||
/// The boolean tensor for the cond predicate | |||
/// </summary> | |||
private Tensor _pred; | |||
public Tensor pred => _pred; | |||
/// <summary> | |||
/// 0 or 1 representing this branch | |||
/// </summary> | |||
private int _branch; | |||
private Dictionary<string, Tensor> _external_values = new Dictionary<string, Tensor>(); | |||
/// <summary> | |||
@@ -45,10 +45,19 @@ namespace Tensorflow.Operations | |||
/// The predicate tensor in this branch | |||
/// </summary> | |||
protected Tensor _pivot; | |||
public Tensor pivot | |||
{ | |||
get => _pivot; | |||
} | |||
public Tensor pivot => _pivot; | |||
/// <summary> | |||
/// The boolean tensor for the cond predicate | |||
/// </summary> | |||
protected Tensor _pred; | |||
public Tensor pred => _pred; | |||
/// <summary> | |||
/// 0 or 1 representing this branch | |||
/// </summary> | |||
protected int _branch; | |||
public int branch => _branch; | |||
protected Stack<ControlFlowContext> _context_stack; | |||
protected ControlFlowContext _outer_context; | |||
@@ -0,0 +1,55 @@ | |||
/***************************************************************************** | |||
Copyright 2018 The TensorFlow.NET Authors. All Rights Reserved. | |||
Licensed under the Apache License, Version 2.0 (the "License"); | |||
you may not use this file except in compliance with the License. | |||
You may obtain a copy of the License at | |||
http://www.apache.org/licenses/LICENSE-2.0 | |||
Unless required by applicable law or agreed to in writing, software | |||
distributed under the License is distributed on an "AS IS" BASIS, | |||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |||
See the License for the specific language governing permissions and | |||
limitations under the License. | |||
******************************************************************************/ | |||
namespace Tensorflow.Operations.Initializers | |||
{ | |||
public class Constant<T> : IInitializer | |||
{ | |||
TF_DataType dtype; | |||
T value; | |||
bool _verify_shape; | |||
public Constant(T value, TF_DataType dtype = TF_DataType.TF_FLOAT, bool verify_shape = false) | |||
{ | |||
this.value = value; | |||
this.dtype = dtype; | |||
_verify_shape = verify_shape; | |||
} | |||
public Tensor call(TensorShape shape, TF_DataType dtype = TF_DataType.DtInvalid, bool? verify_shape = null) | |||
{ | |||
if (dtype == TF_DataType.DtInvalid) | |||
dtype = this.dtype; | |||
if (!verify_shape.HasValue) | |||
verify_shape = _verify_shape; | |||
return constant_op._constant_impl(value, dtype, shape, | |||
name: "Const", | |||
verify_shape: verify_shape.Value, | |||
allow_broadcast: false); | |||
} | |||
public object get_config() | |||
{ | |||
return new | |||
{ | |||
value, | |||
dtype = dtype.name() | |||
}; | |||
} | |||
} | |||
} |
@@ -18,7 +18,7 @@ namespace Tensorflow | |||
{ | |||
public interface IInitializer | |||
{ | |||
Tensor call(TensorShape shape, TF_DataType dtype = TF_DataType.DtInvalid); | |||
Tensor call(TensorShape shape, TF_DataType dtype = TF_DataType.DtInvalid, bool? verify_shape = null); | |||
object get_config(); | |||
} | |||
} |
@@ -25,7 +25,7 @@ namespace Tensorflow.Operations.Initializers | |||
this.dtype = dtype; | |||
} | |||
public Tensor call(TensorShape shape, TF_DataType dtype = TF_DataType.DtInvalid) | |||
public Tensor call(TensorShape shape, TF_DataType dtype = TF_DataType.DtInvalid, bool? verify_shape = null) | |||
{ | |||
if (dtype == TF_DataType.DtInvalid) | |||
dtype = this.dtype; | |||
@@ -38,7 +38,7 @@ namespace Tensorflow.Operations.Initializers | |||
this.dtype = dtype; | |||
} | |||
public Tensor call(TensorShape shape, TF_DataType dtype = TF_DataType.DtInvalid) | |||
public Tensor call(TensorShape shape, TF_DataType dtype = TF_DataType.DtInvalid, bool? verify_shape = null) | |||
{ | |||
if (dtype == TF_DataType.DtInvalid) | |||
dtype = this.dtype; | |||
@@ -28,7 +28,7 @@ namespace Tensorflow.Operations.Initializers | |||
} | |||
public Tensor call(TensorShape shape, TF_DataType dtype = TF_DataType.DtInvalid) | |||
public Tensor call(TensorShape shape, TF_DataType dtype = TF_DataType.DtInvalid, bool? verify_shape = null) | |||
{ | |||
return random_ops.random_uniform(shape, | |||
minval: minval, | |||
@@ -34,7 +34,7 @@ namespace Tensorflow.Operations.Initializers | |||
this.dtype = dtype; | |||
} | |||
public Tensor call(TensorShape shape, TF_DataType dtype) | |||
public Tensor call(TensorShape shape, TF_DataType dtype, bool? verify_shape = null) | |||
{ | |||
return random_ops.truncated_normal(shape, mean, stddev, dtype : dtype, seed: seed); | |||
} | |||
@@ -45,7 +45,7 @@ namespace Tensorflow.Operations.Initializers | |||
_dtype = dtype; | |||
} | |||
public Tensor call(TensorShape shape, TF_DataType dtype) | |||
public Tensor call(TensorShape shape, TF_DataType dtype, bool? verify_shape = null) | |||
{ | |||
var (fan_in, fan_out) = _compute_fans(shape); | |||
if (_mode == "fan_in") | |||
@@ -25,7 +25,7 @@ namespace Tensorflow.Operations.Initializers | |||
this.dtype = dtype; | |||
} | |||
public Tensor call(TensorShape shape, TF_DataType dtype = TF_DataType.DtInvalid) | |||
public Tensor call(TensorShape shape, TF_DataType dtype = TF_DataType.DtInvalid, bool? verify_shape = null) | |||
{ | |||
if (dtype == TF_DataType.DtInvalid) | |||
dtype = this.dtype; | |||
@@ -0,0 +1,27 @@ | |||
using System; | |||
using System.Collections.Generic; | |||
using System.Text; | |||
namespace Tensorflow.Operations | |||
{ | |||
public class FusedBatchNormParams | |||
{ | |||
public string Name { get; set; } | |||
public Tensor YBackprop { get; set; } | |||
public Tensor X { get; set; } | |||
public Tensor Scale { get; set; } | |||
public Tensor ReserveSpace1 { get; set; } | |||
public Tensor ReserveSpace2 { get; set; } | |||
public Tensor ReserveSpace3 { get; set; } | |||
public float Epsilon { get; set; } | |||
public string DataFormat { get; set; } | |||
public bool IsTraining { get; set; } | |||
public FusedBatchNormParams() | |||
{ | |||
Epsilon = 0.0001f; | |||
DataFormat = "NHWC"; | |||
IsTraining = true; | |||
} | |||
} | |||
} |