Browse Source

implementing normal.py.cs API log_prob() for NB classifier prediction

tags/v0.8.0
Bo Peng 6 years ago
parent
commit
d19e51300d
4 changed files with 55 additions and 4 deletions
  1. +36
    -1
      src/TensorFlowNET.Core/Operations/Distributions/distribution.py.cs
  2. +14
    -0
      src/TensorFlowNET.Core/Operations/Distributions/normal.py.cs
  3. +5
    -0
      src/TensorFlowNET.Core/Operations/math_ops.py.cs
  4. +0
    -3
      test/TensorFlowNET.Examples/NaiveBayesClassifier.cs

+ 36
- 1
src/TensorFlowNET.Core/Operations/Distributions/distribution.py.cs View File

@@ -27,6 +27,37 @@ namespace Tensorflow
public List<Tensor> _graph_parents {get;set;} public List<Tensor> _graph_parents {get;set;}
public string _name {get;set;} public string _name {get;set;}



/// <summary>
/// Log probability density/mass function.
/// </summary>
/// <param name="value"> `Tensor`.</param>
/// <param name="name"> Python `str` prepended to names of ops created by this function.</param>
/// <returns>log_prob: a `Tensor` of shape `sample_shape(x) + self.batch_shape` with values of type `self.dtype`.</returns>

/*
public Tensor log_prob(Tensor value, string name = "log_prob")
{
return _call_log_prob(value, name);
}

private Tensor _call_log_prob (Tensor value, string name)
{
with(ops.name_scope(name, "moments", new { value }), scope =>
{
value = _convert_to_tensor(value, "value", _dtype);
});

throw new NotImplementedException();

}

private Tensor _convert_to_tensor(Tensor value, string name = null, TF_DataType preferred_dtype)
{
throw new NotImplementedException();
}
*/

/// <summary> /// <summary>
/// Constructs the `Distribution' /// Constructs the `Distribution'
/// **This is a private method for subclass use.** /// **This is a private method for subclass use.**
@@ -47,7 +78,7 @@ namespace Tensorflow
/// <param name = "name"> Name prefixed to Ops created by this class. Default: subclass name.</param> /// <param name = "name"> Name prefixed to Ops created by this class. Default: subclass name.</param>
/// <returns> Two `Tensor` objects: `mean` and `variance`.</returns> /// <returns> Two `Tensor` objects: `mean` and `variance`.</returns>


/*
/*
private Distribution ( private Distribution (
TF_DataType dtype, TF_DataType dtype,
ReparameterizationType reparameterization_type, ReparameterizationType reparameterization_type,
@@ -66,6 +97,10 @@ namespace Tensorflow
this._name = name; this._name = name;
} }
*/ */




} }


/// <summary> /// <summary>


+ 14
- 0
src/TensorFlowNET.Core/Operations/Distributions/normal.py.cs View File

@@ -78,5 +78,19 @@ namespace Tensorflow
return array_ops.broadcast_static_shape(new Tensor(_loc.shape), new Tensor(_scale.shape)); return array_ops.broadcast_static_shape(new Tensor(_loc.shape), new Tensor(_scale.shape));
} }


private Tensor _log_prob(Tensor x)
{
return _log_unnormalized_prob(_z(x));
}

private Tensor _log_unnormalized_prob (Tensor x)
{
return -0.5 * math_ops.square(_z(x));
}

private Tensor _z (Tensor x)
{
return (x - this._loc) / this._scale;
}
} }
} }

+ 5
- 0
src/TensorFlowNET.Core/Operations/math_ops.py.cs View File

@@ -55,6 +55,11 @@ namespace Tensorflow
return m; return m;
} }


public static Tensor square(Tensor x, string name = null)
{
throw new NotImplementedException();
}

/// <summary> /// <summary>
/// Helper function for reduction ops. /// Helper function for reduction ops.
/// </summary> /// </summary>


+ 0
- 3
test/TensorFlowNET.Examples/NaiveBayesClassifier.cs View File

@@ -15,13 +15,10 @@ namespace TensorFlowNET.Examples
public void Run() public void Run()
{ {
np.array<float>(1.0f, 1.0f); np.array<float>(1.0f, 1.0f);
// var X = np.array<float>(np.array<float>(1.0f, 1.0f), np.array<float>(2.0f, 2.0f), np.array<float>(1.0f, -1.0f), np.array<float>(2.0f, -2.0f), np.array<float>(-1.0f, -1.0f), np.array<float>(-1.0f, 1.0f),);
// var X = np.array<float[]>(new float[][] { new float[] { 1.0f, 1.0f}, new float[] { 2.0f, 2.0f }, new float[] { -1.0f, -1.0f }, new float[] { -2.0f, -2.0f }, new float[] { 1.0f, -1.0f }, new float[] { 2.0f, -2.0f }, });
var X = np.array<float>(new float[][] { new float[] { 1.0f, 1.0f }, new float[] { 2.0f, 2.0f }, new float[] { -1.0f, -1.0f }, new float[] { -2.0f, -2.0f }, new float[] { 1.0f, -1.0f }, new float[] { 2.0f, -2.0f }, }); var X = np.array<float>(new float[][] { new float[] { 1.0f, 1.0f }, new float[] { 2.0f, 2.0f }, new float[] { -1.0f, -1.0f }, new float[] { -2.0f, -2.0f }, new float[] { 1.0f, -1.0f }, new float[] { 2.0f, -2.0f }, });
var y = np.array<int>(0,0,1,1,2,2); var y = np.array<int>(0,0,1,1,2,2);
fit(X, y); fit(X, y);
// Create a regular grid and classify each point // Create a regular grid and classify each point
} }


public void fit(NDArray X, NDArray y) public void fit(NDArray X, NDArray y)


Loading…
Cancel
Save