Browse Source

Merge pull request #187 from PppBr/master

implemented class nn
tags/v0.8.0
Haiping GitHub 6 years ago
parent
commit
b15b98fbe5
No known key found for this signature in database GPG Key ID: 4AEE18F83AFDEB23
6 changed files with 60 additions and 27 deletions
  1. +7
    -1
      src/TensorFlowNET.Core/APIs/tf.nn.cs
  2. +1
    -1
      src/TensorFlowNET.Core/Operations/gen_math_ops.cs
  3. +25
    -1
      src/TensorFlowNET.Core/Operations/math_ops.py.cs
  4. +5
    -11
      src/TensorFlowNET.Core/Operations/nn_impl.py.cs
  5. +19
    -13
      test/TensorFlowNET.Examples/NaiveBayesClassifier.cs
  6. +3
    -0
      test/TensorFlowNET.Examples/TensorFlowNET.Examples.csproj

+ 7
- 1
src/TensorFlowNET.Core/APIs/tf.nn.cs View File

@@ -6,6 +6,12 @@ namespace Tensorflow
{ {
public static partial class tf public static partial class tf
{ {
public static nn_impl nn => new nn_impl();
public static class nn
{
public static (Tensor, Tensor) moments(Tensor x,
int[] axes,
string name = null,
bool keep_dims = false) => nn_impl.moments(x, axes, name: name, keep_dims: keep_dims);
}
} }
} }

+ 1
- 1
src/TensorFlowNET.Core/Operations/gen_math_ops.cs View File

@@ -22,7 +22,7 @@ namespace Tensorflow
/// <returns> A `Tensor`. Has the same type as `input`.</returns> /// <returns> A `Tensor`. Has the same type as `input`.</returns>
public static Tensor mean(Tensor input, Tensor axis, bool keep_dims= false, string name = null) public static Tensor mean(Tensor input, Tensor axis, bool keep_dims= false, string name = null)
{ {
var _op = _op_def_lib._apply_op_helper("Mean", name, args: new { input, axis });
var _op = _op_def_lib._apply_op_helper("Mean", name, args: new { input, reduction_indices = axis, keep_dims = keep_dims });
return _op.outputs[0]; return _op.outputs[0];
} }


+ 25
- 1
src/TensorFlowNET.Core/Operations/math_ops.py.cs View File

@@ -39,7 +39,7 @@ namespace Tensorflow
{ {
var r = _ReductionDims(input_tensor, new Tensor(axis)); var r = _ReductionDims(input_tensor, new Tensor(axis));
var m = gen_math_ops.mean(input_tensor, r); var m = gen_math_ops.mean(input_tensor, r);
return _may_reduce_to_scalar(keepdims, m);
return _may_reduce_to_scalar(keepdims,axis, m);
} }
/// <summary> /// <summary>
/// Returns (x - y)(x - y) element-wise. /// Returns (x - y)(x - y) element-wise.
@@ -117,6 +117,12 @@ namespace Tensorflow
return output; return output;
} }


private static Tensor _may_reduce_to_scalar(bool keepdims, int[] axos, Tensor output)
{
output.shape = new long[0];
return output;
}

private static Tensor _ReductionDims(Tensor x, Tensor axis) private static Tensor _ReductionDims(Tensor x, Tensor axis)
{ {
if (axis != null) if (axis != null)
@@ -130,6 +136,24 @@ namespace Tensorflow
} }
} }


private static int[] _ReductionDims(Tensor x, int[] axis)
{
if (axis != null)
{
return axis;
}
else
{
var rank = array_ops.rank(x);
if (rank != null)
{
// return constant_op.constant();
}
// return range(0, rank, 1);
throw new NotFiniteNumberException();
}
}

public static Tensor range(object start, object limit = null, object delta = null, TF_DataType dtype = TF_DataType.DtInvalid, string name = "range" ) public static Tensor range(object start, object limit = null, object delta = null, TF_DataType dtype = TF_DataType.DtInvalid, string name = "range" )
{ {
if(limit == null) if(limit == null)


+ 5
- 11
src/TensorFlowNET.Core/Operations/nn_impl.py.cs View File

@@ -14,13 +14,12 @@ namespace Tensorflow
/// <param name="name"> Name used to scope the operations that compute the moments.</param> /// <param name="name"> Name used to scope the operations that compute the moments.</param>
/// <param name="keep_dims"> Produce moments with the same dimensionality as the input.</param> /// <param name="keep_dims"> Produce moments with the same dimensionality as the input.</param>
/// <returns> Two `Tensor` objects: `mean` and `variance`.</returns> /// <returns> Two `Tensor` objects: `mean` and `variance`.</returns>
public Tuple<Tensor, Tensor> moments(Tensor x,
public static (Tensor, Tensor) moments(Tensor x,
int[] axes, int[] axes,
string name = null, string name = null,
bool keep_dims = false) bool keep_dims = false)
{ {
Tuple<Tensor, Tensor> t = null;
with(new ops.name_scope(name, "moments", new { x, axes }), scope =>
return with<ops.name_scope, (Tensor, Tensor)>(new ops.name_scope(name, "moments", new { x, axes }), scope =>
{ {
// The dynamic range of fp16 is too limited to support the collection of // The dynamic range of fp16 is too limited to support the collection of
// sufficient statistics. As a workaround we simply perform the operations // sufficient statistics. As a workaround we simply perform the operations
@@ -40,15 +39,10 @@ namespace Tensorflow
} }
// TODO: if x.dtype == dtypes.float16: // TODO: if x.dtype == dtypes.float16:
if (x.dtype == TF_DataType.TF_FLOAT) if (x.dtype == TF_DataType.TF_FLOAT)
{
t = Tuple.Create(math_ops.cast(mean, x.dtype), math_ops.cast(variance, x.dtype));
return;
}
else {
t = Tuple.Create(mean, variance);
}
return (math_ops.cast(mean, x.dtype), math_ops.cast(variance, x.dtype));
else
return (mean, variance);
}); });
return t;
} }
} }
} }

+ 19
- 13
test/TensorFlowNET.Examples/NaiveBayesClassifier.cs View File

@@ -19,7 +19,6 @@ namespace TensorFlowNET.Examples
// var X = np.array<float[]>(new float[][] { new float[] { 1.0f, 1.0f}, new float[] { 2.0f, 2.0f }, new float[] { -1.0f, -1.0f }, new float[] { -2.0f, -2.0f }, new float[] { 1.0f, -1.0f }, new float[] { 2.0f, -2.0f }, }); // var X = np.array<float[]>(new float[][] { new float[] { 1.0f, 1.0f}, new float[] { 2.0f, 2.0f }, new float[] { -1.0f, -1.0f }, new float[] { -2.0f, -2.0f }, new float[] { 1.0f, -1.0f }, new float[] { 2.0f, -2.0f }, });
var X = np.array<float>(new float[][] { new float[] { 1.0f, 1.0f }, new float[] { 2.0f, 2.0f }, new float[] { -1.0f, -1.0f }, new float[] { -2.0f, -2.0f }, new float[] { 1.0f, -1.0f }, new float[] { 2.0f, -2.0f }, }); var X = np.array<float>(new float[][] { new float[] { 1.0f, 1.0f }, new float[] { 2.0f, 2.0f }, new float[] { -1.0f, -1.0f }, new float[] { -2.0f, -2.0f }, new float[] { 1.0f, -1.0f }, new float[] { 2.0f, -2.0f }, });
var y = np.array<int>(0,0,1,1,2,2); var y = np.array<int>(0,0,1,1,2,2);

fit(X, y); fit(X, y);
// Create a regular grid and classify each point // Create a regular grid and classify each point
@@ -28,12 +27,12 @@ namespace TensorFlowNET.Examples
public void fit(NDArray X, NDArray y) public void fit(NDArray X, NDArray y)
{ {
NDArray unique_y = y.unique<long>(); NDArray unique_y = y.unique<long>();
Dictionary<long, List<NDArray>> dic = new Dictionary<long, List<NDArray>>();
Dictionary<long, List<List<float>>> dic = new Dictionary<long, List<List<float>>>();
// Init uy in dic // Init uy in dic
foreach (int uy in unique_y.Data<int>()) foreach (int uy in unique_y.Data<int>())
{ {
dic.Add(uy, new List<NDArray>());
dic.Add(uy, new List<List<float>>());
} }
// Separate training points by class // Separate training points by class
// Shape : nb_classes * nb_samples * nb_features // Shape : nb_classes * nb_samples * nb_features
@@ -41,28 +40,35 @@ namespace TensorFlowNET.Examples
for (int i = 0; i < y.size; i++) for (int i = 0; i < y.size; i++)
{ {
long curClass = (long)y[i]; long curClass = (long)y[i];
List<NDArray> l = dic[curClass];
l.Add(X[i] as NDArray);
List<List<float>> l = dic[curClass];
List<float> pair = new List<float>();
pair.Add((float)X[i,0]);
pair.Add((float)X[i, 1]);
l.Add(pair);
if (l.Count > maxCount) if (l.Count > maxCount)
{ {
maxCount = l.Count; maxCount = l.Count;
} }
dic[curClass] = l; dic[curClass] = l;
} }
NDArray points_by_class = np.zeros(new int[] { dic.Count, maxCount, X.shape[1] });
foreach (KeyValuePair<long, List<NDArray>> kv in dic)
float[,,] points = new float[dic.Count, maxCount, X.shape[1]];
foreach (KeyValuePair<long, List<List<float>>> kv in dic)
{ {
var cls = kv.Value.ToArray();
for (int i = 0; i < dic.Count; i++)
int j = (int) kv.Key;
for (int i = 0; i < maxCount; i++)
{ {
points_by_class[i] = dic[i];
for (int k = 0; k < X.shape[1]; k++)
{
points[j, i, k] = kv.Value[i][k];
}
} }
}


}
NDArray points_by_class = np.array<float>(points);
// estimate mean and variance for each class / feature // estimate mean and variance for each class / feature
// shape : nb_classes * nb_features // shape : nb_classes * nb_features
var cons = tf.constant(points_by_class); var cons = tf.constant(points_by_class);
Tuple<Tensor, Tensor> tup = tf.nn.moments(cons, new int[]{1});
var tup = tf.nn.moments(cons, new int[]{1});
var mean = tup.Item1; var mean = tup.Item1;
var variance = tup.Item2; var variance = tup.Item2;
// Create a 3x2 univariate normal distribution with the // Create a 3x2 univariate normal distribution with the


+ 3
- 0
test/TensorFlowNET.Examples/TensorFlowNET.Examples.csproj View File

@@ -20,6 +20,9 @@
<Reference Include="Newtonsoft.Json"> <Reference Include="Newtonsoft.Json">
<HintPath>C:\Program Files\dotnet\sdk\NuGetFallbackFolder\newtonsoft.json\9.0.1\lib\netstandard1.0\Newtonsoft.Json.dll</HintPath> <HintPath>C:\Program Files\dotnet\sdk\NuGetFallbackFolder\newtonsoft.json\9.0.1\lib\netstandard1.0\Newtonsoft.Json.dll</HintPath>
</Reference> </Reference>
<Reference Include="NumSharp.Core">
<HintPath>C:\Users\bpeng\Desktop\BoloReborn\NumSharp\src\NumSharp.Core\bin\Debug\netstandard2.0\NumSharp.Core.dll</HintPath>
</Reference>
</ItemGroup> </ItemGroup>


</Project> </Project>

Loading…
Cancel
Save