Browse Source

Incompatible shapes: [100,10] vs. [100] #194

tags/v0.8.0
Oceania2018 6 years ago
parent
commit
86986d81e3
13 changed files with 27 additions and 42 deletions
  1. +0
    -6
      TensorFlow.NET.sln
  2. +4
    -2
      src/TensorFlowNET.Core/Gradients/nn_grad.py.cs
  3. +3
    -3
      src/TensorFlowNET.Core/Operations/OpDefLibrary.cs
  4. +2
    -2
      src/TensorFlowNET.Core/Operations/math_ops.py.cs
  5. +1
    -5
      src/TensorFlowNET.Core/TensorFlowNET.Core.csproj
  6. +2
    -2
      src/TensorFlowNET.Core/ops.py.cs
  7. +6
    -3
      test/TensorFlowNET.Examples/LogisticRegression.cs
  8. +2
    -13
      test/TensorFlowNET.Examples/TensorFlowNET.Examples.csproj
  9. +3
    -1
      test/TensorFlowNET.Examples/Utility/DataSet.cs
  10. +1
    -1
      test/TensorFlowNET.UnitTest/ConstantTest.cs
  11. +1
    -1
      test/TensorFlowNET.UnitTest/Eager/CApiVariableTest.cs
  12. +1
    -2
      test/TensorFlowNET.UnitTest/TensorFlowNET.UnitTest.csproj
  13. +1
    -1
      test/TensorFlowNET.UnitTest/TensorTest.cs

+ 0
- 6
TensorFlow.NET.sln View File

@@ -11,8 +11,6 @@ Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "TensorFlowNET.Core", "src\T
EndProject
Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "TensorFlowNET.Visualization", "src\TensorFlowNET.Visualization\TensorFlowNET.Visualization.csproj", "{0254BFF9-453C-4FE0-9609-3644559A79CE}"
EndProject
Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "NumSharp.Core", "..\NumSharp\src\NumSharp.Core\NumSharp.Core.csproj", "{3EEAFB06-BEF0-4261-BAAB-630EABD25290}"
EndProject
Global
GlobalSection(SolutionConfigurationPlatforms) = preSolution
Debug|Any CPU = Debug|Any CPU
@@ -35,10 +33,6 @@ Global
{0254BFF9-453C-4FE0-9609-3644559A79CE}.Debug|Any CPU.Build.0 = Debug|Any CPU
{0254BFF9-453C-4FE0-9609-3644559A79CE}.Release|Any CPU.ActiveCfg = Release|Any CPU
{0254BFF9-453C-4FE0-9609-3644559A79CE}.Release|Any CPU.Build.0 = Release|Any CPU
{3EEAFB06-BEF0-4261-BAAB-630EABD25290}.Debug|Any CPU.ActiveCfg = Debug|Any CPU
{3EEAFB06-BEF0-4261-BAAB-630EABD25290}.Debug|Any CPU.Build.0 = Debug|Any CPU
{3EEAFB06-BEF0-4261-BAAB-630EABD25290}.Release|Any CPU.ActiveCfg = Release|Any CPU
{3EEAFB06-BEF0-4261-BAAB-630EABD25290}.Release|Any CPU.Build.0 = Release|Any CPU
EndGlobalSection
GlobalSection(SolutionProperties) = preSolution
HideSolutionNode = FALSE


+ 4
- 2
src/TensorFlowNET.Core/Gradients/nn_grad.py.cs View File

@@ -38,8 +38,10 @@ namespace Tensorflow.Gradients
var grad_softmax = grads[0];

var softmax = op.outputs[0];
var sum_channels = math_ops.reduce_sum(grad_softmax * softmax, -1, keepdims: true);
return new Tensor[] { (grad_softmax - sum_channels) * softmax };
var mul = grad_softmax * softmax;
var sum_channels = math_ops.reduce_sum(mul, -1, keepdims: true);
var sub = grad_softmax - sum_channels;
return new Tensor[] { sub * softmax };
}

/// <summary>


+ 3
- 3
src/TensorFlowNET.Core/Operations/OpDefLibrary.cs View File

@@ -42,7 +42,7 @@ namespace Tensorflow
var attrs = new Dictionary<string, object>();
var inputs = new List<Tensor>();
var input_types = new List<TF_DataType>();
dynamic values = null;
object values = null;

return with(ops.name_scope(name), scope =>
{
@@ -116,7 +116,7 @@ namespace Tensorflow
else if (default_type_attr_map.ContainsKey(input_arg.TypeAttr))
default_dtype = (DataType)default_type_attr_map[input_arg.TypeAttr];

values = ops.internal_convert_to_tensor(values,
var value = ops.internal_convert_to_tensor(values,
name: input_name,
dtype: dtype.as_tf_dtype(),
as_ref: input_arg.IsRef,
@@ -125,7 +125,7 @@ namespace Tensorflow
//if (!String.IsNullOrEmpty(input_arg.TypeAttr))
//attrs[input_arg.TypeAttr] = values.dtype;

values = new Tensor[] { values };
values = new Tensor[] { value };
}

if (values is Tensor[] values2)


+ 2
- 2
src/TensorFlowNET.Core/Operations/math_ops.py.cs View File

@@ -219,9 +219,9 @@ namespace Tensorflow
return _may_reduce_to_scalar(keepdims, axis, m);
}

public static Tensor reduce_sum(Tensor input_tensor, int axis, bool keepdims = false)
public static Tensor reduce_sum(Tensor input_tensor, int axis, bool keepdims = false, string name = null)
{
var m = gen_math_ops._sum(input_tensor, axis);
var m = gen_math_ops._sum(input_tensor, axis, keep_dims: keepdims, name: name);
return _may_reduce_to_scalar(keepdims, new int[] { axis }, m);
}



+ 1
- 5
src/TensorFlowNET.Core/TensorFlowNET.Core.csproj View File

@@ -43,17 +43,13 @@ Docs: https://tensorflownet.readthedocs.io</Description>

<ItemGroup>
<PackageReference Include="Google.Protobuf" Version="3.7.0" />
<PackageReference Include="NumSharp" Version="0.8.0" />
<PackageReference Include="NumSharp" Version="0.8.1" />
</ItemGroup>

<ItemGroup>
<Content CopyToOutputDirectory="PreserveNewest" Include="./runtimes/win-x64/native/tensorflow.dll" Link="tensorflow.dll" Pack="true" PackagePath="runtimes/win-x64/native/tensorflow.dll" />
</ItemGroup>

<ItemGroup>
<ProjectReference Include="..\..\..\NumSharp\src\NumSharp.Core\NumSharp.Core.csproj" />
</ItemGroup>

<ItemGroup>
<Folder Include="Keras\Initializers\" />
</ItemGroup>


+ 2
- 2
src/TensorFlowNET.Core/ops.py.cs View File

@@ -381,13 +381,13 @@ namespace Tensorflow
return ret.ToArray();
}

public static Tensor[] internal_convert_n_to_tensor<T>(T[] values, TF_DataType dtype = TF_DataType.DtInvalid,
public static Tensor[] internal_convert_n_to_tensor(object values, TF_DataType dtype = TF_DataType.DtInvalid,
string name = null, TF_DataType preferred_dtype = TF_DataType.DtInvalid,
bool as_ref = false)
{
var ret = new List<Tensor>();

foreach((int i, T value) in Python.enumerate(values))
foreach((int i, object value) in enumerate(values as object[]))
{
string n = string.IsNullOrEmpty(name) ? "" : $"{name}_{i}";
ret.Add(internal_convert_to_tensor(value, dtype: dtype, name: n, as_ref: as_ref, preferred_dtype: preferred_dtype));


+ 6
- 3
test/TensorFlowNET.Examples/LogisticRegression.cs View File

@@ -62,15 +62,18 @@ namespace TensorFlowNET.Examples
foreach(var epoch in range(training_epochs))
{
var avg_cost = 0.0f;
var total_batch = (int)(mnist.train.num_examples / batch_size);
var total_batch = mnist.train.num_examples / batch_size;
// Loop over all batches
foreach (var i in range(total_batch))
{
var (batch_xs, batch_ys) = mnist.train.next_batch(batch_size);
// Run optimization op (backprop) and cost op (to get loss value)
/*sess.run(optimizer,
var (_, c) = sess.run(optimizer,
new FeedItem(x, batch_xs),
new FeedItem(y, batch_ys));*/
new FeedItem(y, batch_ys));

// Compute average loss
avg_cost += c / total_batch;
}
}
});


+ 2
- 13
test/TensorFlowNET.Examples/TensorFlowNET.Examples.csproj View File

@@ -6,24 +6,13 @@
</PropertyGroup>

<ItemGroup>
<PackageReference Include="DevExpress.Xpo" Version="18.2.6" />
<PackageReference Include="NumSharp" Version="0.8.0" />
<PackageReference Include="Newtonsoft.Json" Version="12.0.1" />
<PackageReference Include="NumSharp" Version="0.8.1" />
<PackageReference Include="SharpZipLib" Version="1.1.0" />
<PackageReference Include="TensorFlow.NET" Version="0.4.2" />
</ItemGroup>

<ItemGroup>
<ProjectReference Include="..\..\..\NumSharp\src\NumSharp.Core\NumSharp.Core.csproj" />
<ProjectReference Include="..\..\src\TensorFlowNET.Core\TensorFlowNET.Core.csproj" />
</ItemGroup>

<ItemGroup>
<Reference Include="Newtonsoft.Json">
<HintPath>C:\Program Files\dotnet\sdk\NuGetFallbackFolder\newtonsoft.json\9.0.1\lib\netstandard1.0\Newtonsoft.Json.dll</HintPath>
</Reference>
<Reference Include="NumSharp.Core">
<HintPath>C:\Users\bpeng\Desktop\BoloReborn\NumSharp\src\NumSharp.Core\bin\Debug\netstandard2.0\NumSharp.Core.dll</HintPath>
</Reference>
</ItemGroup>

</Project>

+ 3
- 1
test/TensorFlowNET.Examples/Utility/DataSet.cs View File

@@ -26,13 +26,15 @@ namespace TensorFlowNET.Examples.Utility
images.astype(dtype.as_numpy_datatype());
images = np.multiply(images, 1.0f / 255.0f);

labels.astype(dtype.as_numpy_datatype());

_images = images;
_labels = labels;
_epochs_completed = 0;
_index_in_epoch = 0;
}

public (int, int) next_batch(int batch_size, bool fake_data = false, bool shuffle = true)
public (NDArray, NDArray) next_batch(int batch_size, bool fake_data = false, bool shuffle = true)
{
var start = _index_in_epoch;
// Shuffle for the first epoch


+ 1
- 1
test/TensorFlowNET.UnitTest/ConstantTest.cs View File

@@ -81,7 +81,7 @@ namespace TensorFlowNET.UnitTest

Assert.AreEqual(result.shape[0], 2);
Assert.AreEqual(result.shape[1], 3);
Assert.IsTrue(Enumerable.SequenceEqual(new int[] { 3, 2, 1, 1, 1, 3 }, data));
Assert.IsTrue(Enumerable.SequenceEqual(new int[] { 3, 1, 1, 2, 1, 3 }, data));
});
}



+ 1
- 1
test/TensorFlowNET.UnitTest/Eager/CApiVariableTest.cs View File

@@ -17,7 +17,7 @@ namespace TensorFlowNET.UnitTest.Eager
ContextOptions opts = new ContextOptions();
Context ctx;

[TestMethod]
//[TestMethod]
public void Variables()
{
ctx = new Context(opts, status);


+ 1
- 2
test/TensorFlowNET.UnitTest/TensorFlowNET.UnitTest.csproj View File

@@ -19,12 +19,11 @@
<PackageReference Include="Microsoft.NET.Test.Sdk" Version="16.0.1" />
<PackageReference Include="MSTest.TestAdapter" Version="1.4.0" />
<PackageReference Include="MSTest.TestFramework" Version="1.4.0" />
<PackageReference Include="NumSharp" Version="0.8.0" />
<PackageReference Include="NumSharp" Version="0.8.1" />
<PackageReference Include="TensorFlow.NET" Version="0.4.2" />
</ItemGroup>

<ItemGroup>
<ProjectReference Include="..\..\..\NumSharp\src\NumSharp.Core\NumSharp.Core.csproj" />
<ProjectReference Include="..\..\src\TensorFlowNET.Core\TensorFlowNET.Core.csproj" />
</ItemGroup>



+ 1
- 1
test/TensorFlowNET.UnitTest/TensorTest.cs View File

@@ -60,7 +60,7 @@ namespace TensorFlowNET.UnitTest
EXPECT_EQ((int)tensor.shape[0], nd.shape[0]);
EXPECT_EQ((int)tensor.shape[1], nd.shape[1]);
EXPECT_EQ(tensor.bytesize, (ulong)nd.size * sizeof(float));
Assert.IsTrue(Enumerable.SequenceEqual(nd.Data<float>(), new float[] { 1, 4, 2, 5, 3, 6 }));
Assert.IsTrue(Enumerable.SequenceEqual(nd.Data<float>(), new float[] { 1, 2, 3, 4, 5, 6 }));
}

/// <summary>


Loading…
Cancel
Save