@@ -0,0 +1,35 @@ | |||||
using System; | |||||
using System.Collections.Generic; | |||||
using System.Text; | |||||
using Tensorflow.Layers; | |||||
using Tensorflow.Operations.Activation; | |||||
namespace Tensorflow | |||||
{ | |||||
public static partial class tf | |||||
{ | |||||
public static class layers | |||||
{ | |||||
public static Tensor conv2d(Tensor inputs, | |||||
int filters, | |||||
int[] kernel_size, | |||||
int[] strides = null, | |||||
string padding = "valid", | |||||
string data_format= "channels_last", | |||||
int[] dilation_rate = null, | |||||
bool use_bias = true, | |||||
IActivation activation = null, | |||||
IInitializer kernel_initializer = null) | |||||
{ | |||||
if (strides == null) | |||||
strides = new int[] { 1, 1 }; | |||||
if (dilation_rate == null) | |||||
dilation_rate = new int[] { 1, 1 }; | |||||
var layer = new Conv2D(filters, kernel_size); | |||||
return layer.apply(inputs); | |||||
} | |||||
} | |||||
} | |||||
} |
@@ -1,6 +1,7 @@ | |||||
using System; | using System; | ||||
using System.Collections.Generic; | using System.Collections.Generic; | ||||
using System.Text; | using System.Text; | ||||
using Tensorflow.Operations.Activation; | |||||
namespace Tensorflow | namespace Tensorflow | ||||
{ | { | ||||
@@ -9,9 +10,22 @@ namespace Tensorflow | |||||
public static class nn | public static class nn | ||||
{ | { | ||||
public static (Tensor, Tensor) moments(Tensor x, | public static (Tensor, Tensor) moments(Tensor x, | ||||
int[] axes, | |||||
string name = null, | |||||
bool keep_dims = false) => nn_impl.moments(x, axes, name: name, keep_dims: keep_dims); | |||||
int[] axes, | |||||
string name = null, | |||||
bool keep_dims = false) => nn_impl.moments(x, | |||||
axes, | |||||
name: name, | |||||
keep_dims: keep_dims); | |||||
public static Tensor embedding_lookup(RefVariable @params, | |||||
Tensor ids, | |||||
string partition_strategy = "mod", | |||||
string name = null) => embedding_ops._embedding_lookup_and_transform(@params, | |||||
ids, | |||||
partition_strategy: partition_strategy, | |||||
name: name); | |||||
public static IActivation relu => new relu(); | |||||
} | } | ||||
} | } | ||||
} | } |
@@ -0,0 +1,35 @@ | |||||
using System; | |||||
using System.Collections.Generic; | |||||
using System.Text; | |||||
using Tensorflow.Operations.Activation; | |||||
namespace Tensorflow.Layers | |||||
{ | |||||
public class Conv2D | |||||
{ | |||||
private int filters; | |||||
private int[] kernel_size; | |||||
private int[] strides; | |||||
public Conv2D(int filters, | |||||
int[] kernel_size, | |||||
int[] strides = null, | |||||
string padding = "valid", | |||||
string data_format = "channels_last", | |||||
int[] dilation_rate = null, | |||||
IActivation activation = null, | |||||
bool use_bias = true, | |||||
IInitializer kernel_initializer = null, | |||||
IInitializer bias_initializer = null, | |||||
bool trainable = true, | |||||
string name = null) | |||||
{ | |||||
} | |||||
public Tensor apply(Tensor inputs) | |||||
{ | |||||
throw new NotImplementedException("apply"); | |||||
} | |||||
} | |||||
} |
@@ -0,0 +1,11 @@ | |||||
using System; | |||||
using System.Collections.Generic; | |||||
using System.Text; | |||||
namespace Tensorflow.Operations.Activation | |||||
{ | |||||
public interface IActivation | |||||
{ | |||||
} | |||||
} |
@@ -0,0 +1,11 @@ | |||||
using System; | |||||
using System.Collections.Generic; | |||||
using System.Text; | |||||
namespace Tensorflow.Operations.Activation | |||||
{ | |||||
public class relu : IActivation | |||||
{ | |||||
} | |||||
} |
@@ -364,8 +364,13 @@ namespace Tensorflow | |||||
object obj = propertyDescriptor.GetValue(dyn); | object obj = propertyDescriptor.GetValue(dyn); | ||||
string name = propertyDescriptor.Name; | string name = propertyDescriptor.Name; | ||||
// avoid .net keyword | // avoid .net keyword | ||||
if (name == "_ref_") | |||||
name = "ref"; | |||||
switch (name) | |||||
{ | |||||
case "_ref_": | |||||
name = "ref"; | |||||
break; | |||||
} | |||||
dictionary.Add(name, obj); | dictionary.Add(name, obj); | ||||
} | } | ||||
return dictionary; | return dictionary; | ||||
@@ -268,5 +268,9 @@ namespace Tensorflow | |||||
return Framework.common_shapes.broadcast_shape(shape_x, shape_y); | return Framework.common_shapes.broadcast_shape(shape_x, shape_y); | ||||
} | } | ||||
public static Tensor gather(Tensor @params, Tensor indices, string name = null, int axis = 0) | |||||
{ | |||||
return gen_array_ops.gather_v2(@params, indices, axis, name: name); | |||||
} | |||||
} | } | ||||
} | } |
@@ -4,11 +4,45 @@ using System.Text; | |||||
namespace Tensorflow | namespace Tensorflow | ||||
{ | { | ||||
public class embedding_ops | |||||
public class embedding_ops : Python | |||||
{ | { | ||||
public Tensor _embedding_lookup_and_transform() | |||||
/// <summary> | |||||
/// Helper function for embedding_lookup and _compute_sampled_logits. | |||||
/// </summary> | |||||
/// <param name="params"></param> | |||||
/// <param name="ids"></param> | |||||
/// <param name="partition_strategy"></param> | |||||
/// <param name="name"></param> | |||||
/// <returns></returns> | |||||
public static Tensor _embedding_lookup_and_transform(RefVariable @params, | |||||
Tensor ids, | |||||
string partition_strategy = "mod", | |||||
string name = null, | |||||
string max_norm = null) | |||||
{ | { | ||||
throw new NotImplementedException(""); | |||||
return with(new ops.name_scope(name, "embedding_lookup", new { @params, ids }), scope => | |||||
{ | |||||
name = scope; | |||||
int np = 1; | |||||
ids = ops.convert_to_tensor(ids, name: "ids"); | |||||
if(np == 1) | |||||
{ | |||||
var gather = array_ops.gather(@params, ids, name: name); | |||||
var result = _clip(@params, ids, max_norm); | |||||
return array_ops.identity(result); | |||||
} | |||||
throw new NotImplementedException("_embedding_lookup_and_transform"); | |||||
}); | |||||
} | |||||
public static Tensor _clip(RefVariable @params, Tensor ids, string max_norm = null) | |||||
{ | |||||
if (max_norm == null) | |||||
return @params; | |||||
throw new NotImplementedException("_clip"); | |||||
} | } | ||||
} | } | ||||
} | } |
@@ -19,6 +19,13 @@ namespace Tensorflow | |||||
return _op.outputs[0]; | return _op.outputs[0]; | ||||
} | } | ||||
public static Tensor gather_v2(Tensor @params, Tensor indices, int axis, string name = null) | |||||
{ | |||||
var _op = _op_def_lib._apply_op_helper("GatherV2", name: name, new { @params, indices, axis }); | |||||
return _op.outputs[0]; | |||||
} | |||||
public static Tensor greater<Tx, Ty>(Tx x, Ty y, string name = null) | public static Tensor greater<Tx, Ty>(Tx x, Ty y, string name = null) | ||||
{ | { | ||||
var _op = _op_def_lib._apply_op_helper("Greater", name: name, args: new { x, y }); | var _op = _op_def_lib._apply_op_helper("Greater", name: name, args: new { x, y }); | ||||
@@ -51,7 +51,7 @@ namespace Tensorflow | |||||
VariableSynchronization synchronization = VariableSynchronization.AUTO, | VariableSynchronization synchronization = VariableSynchronization.AUTO, | ||||
VariableAggregation aggregation = VariableAggregation.NONE) | VariableAggregation aggregation = VariableAggregation.NONE) | ||||
{ | { | ||||
bool is_scalar = shape.NDim == 0; | |||||
bool is_scalar = !(shape is null) && shape.NDim == 0; | |||||
if (initializer is IInitializer init) | if (initializer is IInitializer init) | ||||
{ | { | ||||
@@ -19,6 +19,7 @@ namespace TensorFlowNET.Examples.TextClassification | |||||
private RefVariable global_step; | private RefVariable global_step; | ||||
private RefVariable embeddings; | private RefVariable embeddings; | ||||
private Tensor x_emb; | private Tensor x_emb; | ||||
private Tensor x_expanded; | |||||
public VdCnn(int alphabet_size, int document_max_len, int num_class) | public VdCnn(int alphabet_size, int document_max_len, int num_class) | ||||
{ | { | ||||
@@ -33,11 +34,23 @@ namespace TensorFlowNET.Examples.TextClassification | |||||
is_training = tf.placeholder(tf.boolean, new TensorShape(), name: "is_training"); | is_training = tf.placeholder(tf.boolean, new TensorShape(), name: "is_training"); | ||||
global_step = tf.Variable(0, trainable: false); | global_step = tf.Variable(0, trainable: false); | ||||
// Embedding Layer | |||||
with(tf.name_scope("embedding"), delegate | with(tf.name_scope("embedding"), delegate | ||||
{ | { | ||||
var init_embeddings = tf.random_uniform(new int[] { alphabet_size, embedding_size }, -1.0f, 1.0f); | var init_embeddings = tf.random_uniform(new int[] { alphabet_size, embedding_size }, -1.0f, 1.0f); | ||||
embeddings = tf.get_variable("embeddings", initializer: init_embeddings); | embeddings = tf.get_variable("embeddings", initializer: init_embeddings); | ||||
// x_emb = tf.nn.embedding_lookup(embeddings, x); | |||||
x_emb = tf.nn.embedding_lookup(embeddings, x); | |||||
x_expanded = tf.expand_dims(x_emb, -1); | |||||
}); | |||||
// First Convolution Layer | |||||
with(tf.variable_scope("conv-0"), delegate | |||||
{ | |||||
var conv0 = tf.layers.conv2d(x_expanded, | |||||
filters: num_filters[0], | |||||
kernel_size: new int[] { filter_sizes[0], embedding_size }, | |||||
kernel_initializer: cnn_initializer, | |||||
activation: tf.nn.relu); | |||||
}); | }); | ||||
} | } | ||||
} | } | ||||