@@ -17,7 +17,7 @@ namespace Tensorflow | |||
/// A `Tensor` with the same data as `input`, but its shape has an additional | |||
/// dimension of size 1 added. | |||
/// </returns> | |||
public static Tensor expand_dims(Tensor input, int axis = -1, string name = "", int dim = -1) | |||
public static Tensor expand_dims(Tensor input, int axis = -1, string name = null, int dim = -1) | |||
=> array_ops.expand_dims(input, axis, name, dim); | |||
} | |||
} |
@@ -6,14 +6,14 @@ namespace Tensorflow | |||
{ | |||
public partial class tf | |||
{ | |||
public static Tensor read_file(string filename, string name = "") => gen_io_ops.read_file(filename, name); | |||
public static Tensor read_file(string filename, string name = null) => gen_io_ops.read_file(filename, name); | |||
public static gen_image_ops image => new gen_image_ops(); | |||
public static void import_graph_def(GraphDef graph_def, | |||
Dictionary<string, Tensor> input_map = null, | |||
string[] return_elements = null, | |||
string name = "", | |||
string name = null, | |||
OpList producer_op_list = null) => importer.import_graph_def(graph_def, input_map, return_elements, name, producer_op_list); | |||
} | |||
} |
@@ -10,12 +10,12 @@ namespace Tensorflow | |||
public static Tensor sub(Tensor a, Tensor b) => gen_math_ops.sub(a, b); | |||
public static Tensor subtract<T>(Tensor x, T[] y, string name = "") where T : struct | |||
public static Tensor subtract<T>(Tensor x, T[] y, string name = null) where T : struct | |||
=> gen_math_ops.sub(x, ops.convert_to_tensor(y, dtype: x.dtype.as_base_dtype(), name: "y"), name); | |||
public static Tensor multiply(Tensor x, Tensor y) => gen_math_ops.mul(x, y); | |||
public static Tensor divide<T>(Tensor x, T[] y, string name = "") where T : struct | |||
public static Tensor divide<T>(Tensor x, T[] y, string name = null) where T : struct | |||
=> x / ops.convert_to_tensor(y, dtype: x.dtype.as_base_dtype(), name: "y"); | |||
public static Tensor pow<T1, T2>(T1 x, T2 y) => gen_math_ops.pow(x, y); | |||
@@ -28,7 +28,7 @@ namespace Tensorflow | |||
/// <returns></returns> | |||
public static Tensor reduce_sum(Tensor input, int[] axis = null) => math_ops.reduce_sum(input); | |||
public static Tensor cast(Tensor x, TF_DataType dtype = TF_DataType.DtInvalid, string name = "") | |||
public static Tensor cast(Tensor x, TF_DataType dtype = TF_DataType.DtInvalid, string name = null) | |||
=> math_ops.cast(x, dtype, name); | |||
} | |||
} |
@@ -21,6 +21,6 @@ namespace Tensorflow | |||
float stddev = 1.0f, | |||
TF_DataType dtype = TF_DataType.TF_FLOAT, | |||
int? seed = null, | |||
string name = "") => random_ops.random_normal(shape, mean, stddev, dtype, seed, name); | |||
string name = null) => random_ops.random_normal(shape, mean, stddev, dtype, seed, name); | |||
} | |||
} |
@@ -6,7 +6,7 @@ namespace Tensorflow.Eager | |||
{ | |||
public class Execute | |||
{ | |||
public void record_gradient(string op_name, InputList inputs, Dictionary<string, object> attrs, Tensor[] results, string name = "") | |||
public void record_gradient(string op_name, InputList inputs, Dictionary<string, object> attrs, Tensor[] results, string name = null) | |||
{ | |||
pywrap_tfe_src.RecordGradient(op_name, inputs._inputs, attrs, results, name); | |||
} | |||
@@ -10,7 +10,7 @@ namespace Tensorflow.Eager | |||
/// </summary> | |||
public class pywrap_tfe_src | |||
{ | |||
public static void RecordGradient(string op_name, Tensor[] inputs, Dictionary<string, object> attrs, Tensor[] results, string name = "") | |||
public static void RecordGradient(string op_name, Tensor[] inputs, Dictionary<string, object> attrs, Tensor[] results, string name = null) | |||
{ | |||
var input_ids = inputs.Select(x => x.Id).ToArray(); | |||
var input_dtypes = inputs.Select(x => x.dtype).ToArray(); | |||
@@ -12,7 +12,7 @@ namespace Tensorflow | |||
public static ITensorOrOperation[] import_graph_def(GraphDef graph_def, | |||
Dictionary<string, Tensor> input_map = null, | |||
string[] return_elements = null, | |||
string name = "", | |||
string name = null, | |||
OpList producer_op_list = null) | |||
{ | |||
var op_dict = op_def_registry.get_registered_ops(); | |||
@@ -123,7 +123,7 @@ namespace Tensorflow | |||
/// <param name="strip_default_attrs"></param> | |||
/// <param name="meta_info_def"></param> | |||
/// <returns></returns> | |||
public static MetaGraphDef export_scoped_meta_graph(string filename = "", | |||
public static (MetaGraphDef, Dictionary<string, RefVariable>) export_scoped_meta_graph(string filename = "", | |||
GraphDef graph_def = null, | |||
bool as_text = false, | |||
string unbound_inputs_col_name = "unbound_inputs", | |||
@@ -138,7 +138,7 @@ namespace Tensorflow | |||
var var_list = new Dictionary<string, RefVariable>(); | |||
var variables = graph.get_collection(ops.GraphKeys.GLOBAL_VARIABLES); | |||
foreach(var v in variables as RefVariable[]) | |||
foreach(var v in variables as List<RefVariable>) | |||
{ | |||
var_list[v.name] = v; | |||
} | |||
@@ -151,7 +151,10 @@ namespace Tensorflow | |||
saver_def: saver_def, | |||
strip_default_attrs: strip_default_attrs); | |||
throw new NotImplementedException("meta_graph.export_scoped_meta_graph"); | |||
if (!string.IsNullOrEmpty(filename)) | |||
graph_io.write_graph(scoped_meta_graph_def, "", filename, as_text: as_text); | |||
return (scoped_meta_graph_def, var_list); | |||
} | |||
private static bool _should_include_node() | |||
@@ -159,7 +162,7 @@ namespace Tensorflow | |||
return true; | |||
} | |||
private static byte[] create_meta_graph_def(MetaInfoDef meta_info_def = null, | |||
private static MetaGraphDef create_meta_graph_def(MetaInfoDef meta_info_def = null, | |||
GraphDef graph_def = null, | |||
string export_scope = "", | |||
string exclude_nodes = "", | |||
@@ -168,7 +171,7 @@ namespace Tensorflow | |||
bool strip_default_attrs = false) | |||
{ | |||
// Sets graph to default graph if it's not passed in. | |||
var graph = ops.get_default_graph(); | |||
var graph = ops.get_default_graph().as_default(); | |||
// Creates a MetaGraphDef proto. | |||
var meta_graph_def = new MetaGraphDef(); | |||
if (meta_info_def == null) | |||
@@ -186,10 +189,55 @@ namespace Tensorflow | |||
meta_graph_def.GraphDef = graph_def; | |||
// Fills in meta_info_def.stripped_op_list using the ops from graph_def. | |||
if (meta_graph_def.MetaInfoDef.StrippedOpList.Op.Count == 0) | |||
if (meta_graph_def.MetaInfoDef.StrippedOpList == null || | |||
meta_graph_def.MetaInfoDef.StrippedOpList.Op.Count == 0) | |||
meta_graph_def.MetaInfoDef.StrippedOpList = stripped_op_list_for_graph(meta_graph_def.GraphDef); | |||
throw new NotImplementedException("create_meta_graph_def"); | |||
var clist = graph.get_all_collection_keys(); | |||
foreach(var ctype in clist) | |||
{ | |||
if (clear_extraneous_savers) | |||
{ | |||
throw new NotImplementedException("create_meta_graph_def clear_extraneous_savers"); | |||
} | |||
else | |||
{ | |||
add_collection_def(meta_graph_def, ctype, graph); | |||
} | |||
} | |||
return meta_graph_def; | |||
} | |||
private static void add_collection_def(MetaGraphDef meta_graph_def, | |||
string key, | |||
Graph graph = null, | |||
string export_scope = "") | |||
{ | |||
if (!meta_graph_def.CollectionDef.ContainsKey(key)) | |||
meta_graph_def.CollectionDef[key] = new CollectionDef(); | |||
var col_def = meta_graph_def.CollectionDef[key]; | |||
switch (graph.get_collection(key)) | |||
{ | |||
case List<RefVariable> collection_list: | |||
col_def.BytesList = new Types.BytesList(); | |||
foreach (var x in collection_list) | |||
{ | |||
var proto = x.to_proto(export_scope); | |||
col_def.BytesList.Value.Add(proto.ToByteString()); | |||
} | |||
break; | |||
case List<object> collection_list: | |||
col_def.NodeList = new Types.NodeList(); | |||
foreach (var x in collection_list) | |||
if (x is ITensorOrOperation x2) | |||
col_def.NodeList.Value.Add(ops.strip_name_scope(x2.name, export_scope)); | |||
break; | |||
case List<Operation> collection_list: | |||
break; | |||
} | |||
} | |||
private static OpList stripped_op_list_for_graph(GraphDef graph_def) | |||
@@ -118,7 +118,7 @@ namespace Tensorflow | |||
if (obj is Tensor tensor && allow_tensor) | |||
{ | |||
if (tensor.Graph.Equals(this)) | |||
if (tensor.graph.Equals(this)) | |||
{ | |||
return tensor; | |||
} | |||
@@ -164,7 +164,7 @@ namespace Tensorflow | |||
} | |||
public unsafe Operation create_op(string op_type, Tensor[] inputs, TF_DataType[] dtypes, | |||
TF_DataType[] input_types = null, string name = "", | |||
TF_DataType[] input_types = null, string name = null, | |||
Dictionary<string, AttrValue> attrs = null, OpDef op_def = null) | |||
{ | |||
if (inputs == null) | |||
@@ -1,4 +1,5 @@ | |||
using System; | |||
using Google.Protobuf; | |||
using System; | |||
using System.Collections.Generic; | |||
using System.IO; | |||
using System.Text; | |||
@@ -9,12 +10,22 @@ namespace Tensorflow | |||
{ | |||
public static string write_graph(Graph graph, string logdir, string name, bool as_text = true) | |||
{ | |||
var def = graph._as_graph_def(); | |||
var graph_def = graph._as_graph_def(); | |||
string path = Path.Combine(logdir, name); | |||
string text = def.ToString(); | |||
if (as_text) | |||
File.WriteAllText(path, text); | |||
File.WriteAllText(path, graph_def.ToString()); | |||
else | |||
File.WriteAllBytes(path, graph_def.ToByteArray()); | |||
return path; | |||
} | |||
public static string write_graph(MetaGraphDef graph_def, string logdir, string name, bool as_text = true) | |||
{ | |||
string path = Path.Combine(logdir, name); | |||
if (as_text) | |||
File.WriteAllText(path, graph_def.ToString()); | |||
else | |||
File.WriteAllBytes(path, graph_def.ToByteArray()); | |||
return path; | |||
} | |||
} | |||
@@ -12,7 +12,7 @@ namespace Tensorflow | |||
{ | |||
public class OpDefLibrary | |||
{ | |||
public Operation _apply_op_helper(string op_type_name, string name = "", dynamic args = null) | |||
public Operation _apply_op_helper(string op_type_name, string name = null, dynamic args = null) | |||
{ | |||
Dictionary<string, object> keywords = ConvertToDict(args); | |||
var g = ops.get_default_graph(); | |||
@@ -1,5 +1,7 @@ | |||
using System; | |||
using Newtonsoft.Json; | |||
using System; | |||
using System.Collections.Generic; | |||
using System.Linq; | |||
using System.Runtime.InteropServices; | |||
using System.Text; | |||
@@ -13,6 +15,8 @@ namespace Tensorflow | |||
private Tensor[] _outputs; | |||
public Tensor[] outputs => _outputs; | |||
[JsonIgnore] | |||
public Tensor output => _outputs.FirstOrDefault(); | |||
public int NumControlOutputs => c_api.TF_OperationNumControlOutputs(_handle); | |||
public int OutputNumConsumers(int index) => c_api.TF_OperationOutputNumConsumers(new TF_Output(_handle, index)); | |||
@@ -1,4 +1,5 @@ | |||
using Google.Protobuf.Collections; | |||
using Newtonsoft.Json; | |||
using System; | |||
using System.Collections.Generic; | |||
using System.Linq; | |||
@@ -11,11 +12,16 @@ namespace Tensorflow | |||
{ | |||
private readonly IntPtr _handle; // _c_op in python | |||
public Graph graph { get; } | |||
private Graph _graph; | |||
[JsonIgnore] | |||
public Graph graph => _graph; | |||
[JsonIgnore] | |||
public int _id => _id_value; | |||
[JsonIgnore] | |||
public int _id_value; | |||
public string type => OpType; | |||
[JsonIgnore] | |||
public Operation op => this; | |||
public TF_DataType dtype => TF_DataType.DtInvalid; | |||
private Status status = new Status(); | |||
@@ -42,7 +48,7 @@ namespace Tensorflow | |||
return; | |||
_handle = handle; | |||
this.graph = ops.get_default_graph(); | |||
_graph = ops.get_default_graph(); | |||
_outputs = new Tensor[NumOutputs]; | |||
for (int i = 0; i < NumOutputs; i++) | |||
_outputs[i] = new Tensor(this, i, OutputType(i)); | |||
@@ -50,7 +56,7 @@ namespace Tensorflow | |||
public Operation(Graph g, string opType, string oper_name) | |||
{ | |||
graph = g; | |||
_graph = g; | |||
var desc = c_api.TF_NewOperation(g, opType, oper_name); | |||
c_api.TF_SetAttrType(desc, "dtype", TF_DataType.TF_INT32); | |||
@@ -78,7 +84,7 @@ namespace Tensorflow | |||
/// <param name="op_def"></param> | |||
public Operation(NodeDef node_def, Graph g, Tensor[] inputs = null, TF_DataType[] output_types = null, ITensorOrOperation[] control_inputs = null, TF_DataType[] input_types = null, string original_op = "", OpDef op_def = null) | |||
{ | |||
graph = g; | |||
_graph = g; | |||
// Build the list of control inputs. | |||
var control_input_ops = new List<Operation>(); | |||
@@ -7,9 +7,9 @@ namespace Tensorflow | |||
{ | |||
public class array_ops | |||
{ | |||
public static Tensor placeholder_with_default<T>(T input, int[] shape, string name = "") => gen_array_ops.placeholder_with_default(input, shape, name); | |||
public static Tensor placeholder_with_default<T>(T input, int[] shape, string name = null) => gen_array_ops.placeholder_with_default(input, shape, name); | |||
public static Tensor zeros(Shape shape, TF_DataType dtype = TF_DataType.TF_FLOAT, string name = "") | |||
public static Tensor zeros(Shape shape, TF_DataType dtype = TF_DataType.TF_FLOAT, string name = null) | |||
{ | |||
dtype = dtype.as_base_dtype(); | |||
return Python.with<ops.name_scope, Tensor>(new ops.name_scope(name, "zeros", shape), scope => | |||
@@ -46,11 +46,11 @@ namespace Tensorflow | |||
} | |||
} | |||
public static Tensor expand_dims(Tensor input, int axis = -1, string name = "", int dim = -1) => expand_dims_v2(input, axis, name); | |||
public static Tensor expand_dims(Tensor input, int axis = -1, string name = null, int dim = -1) => expand_dims_v2(input, axis, name); | |||
private static Tensor expand_dims_v2(Tensor input, int axis, string name = "") => gen_array_ops.expand_dims(input, axis, name); | |||
private static Tensor expand_dims_v2(Tensor input, int axis, string name = null) => gen_array_ops.expand_dims(input, axis, name); | |||
public static Tensor rank(Tensor input, string name = "") | |||
public static Tensor rank(Tensor input, string name = null) | |||
{ | |||
return math_ops.rank_internal(input, name, optimize: true); | |||
} | |||
@@ -63,7 +63,7 @@ namespace Tensorflow | |||
/// <param name="name"></param> | |||
/// <param name="optimize"></param> | |||
/// <returns></returns> | |||
public static Tensor ones_like<T>(T tensor, TF_DataType dtype = TF_DataType.DtInvalid, string name = "", bool optimize = true) | |||
public static Tensor ones_like<T>(T tensor, TF_DataType dtype = TF_DataType.DtInvalid, string name = null, bool optimize = true) | |||
=> ones_like_impl(tensor, dtype, name, optimize); | |||
private static Tensor ones_like_impl<T>(T tensor, TF_DataType dtype, string name, bool optimize = true) | |||
@@ -81,7 +81,7 @@ namespace Tensorflow | |||
}); | |||
} | |||
public static Tensor ones(Tensor shape, TF_DataType dtype = TF_DataType.TF_FLOAT, string name = "") | |||
public static Tensor ones(Tensor shape, TF_DataType dtype = TF_DataType.TF_FLOAT, string name = null) | |||
{ | |||
dtype = dtype.as_base_dtype(); | |||
return Python.with<ops.name_scope, Tensor>(new ops.name_scope(name, "ones", new { shape }), scope => | |||
@@ -92,7 +92,7 @@ namespace Tensorflow | |||
}); | |||
} | |||
public static Tensor where(Tensor condition, Tensor x = null, Tensor y = null, string name = "") | |||
public static Tensor where(Tensor condition, Tensor x = null, Tensor y = null, string name = null) | |||
{ | |||
if( x == null && y == null) | |||
{ | |||
@@ -118,19 +118,19 @@ namespace Tensorflow | |||
/// (`int32` or `int64`). Defaults to `tf.int32`. | |||
/// </param> | |||
/// <returns>A `Tensor` of type `out_type`.</returns> | |||
public static Tensor shape(Tensor input, string name = "", TF_DataType out_type = TF_DataType.TF_INT32) | |||
public static Tensor shape(Tensor input, string name = null, TF_DataType out_type = TF_DataType.TF_INT32) | |||
{ | |||
return shape_internal(input, name, optimize: true, out_type: out_type); | |||
} | |||
public static Tensor size(Tensor input, string name = "", bool optimize = true, TF_DataType out_type = TF_DataType.TF_INT32) | |||
public static Tensor size(Tensor input, string name = null, bool optimize = true, TF_DataType out_type = TF_DataType.TF_INT32) | |||
{ | |||
return size_internal(input, name, optimize: optimize, out_type: out_type); | |||
} | |||
private static Tensor shape_internal(Tensor input, string name = "", bool optimize = true, TF_DataType out_type = TF_DataType.TF_INT32) | |||
private static Tensor shape_internal(Tensor input, string name = null, bool optimize = true, TF_DataType out_type = TF_DataType.TF_INT32) | |||
{ | |||
return Python.with<ops.name_scope, Tensor>(new ops.name_scope(name, "Shape", new Tensor[] { input }), scope => | |||
return Python.with<ops.name_scope, Tensor>(new ops.name_scope(name, "Shape", new { input }), scope => | |||
{ | |||
name = scope; | |||
@@ -138,18 +138,18 @@ namespace Tensorflow | |||
{ | |||
var input_tensor = ops.convert_to_tensor(input); | |||
var input_shape = tensor_util.to_shape(input_tensor.shape); | |||
if (optimize && input_shape.is_fully_defined()) | |||
if (optimize && input_tensor.NDims > -1 && input_shape.is_fully_defined()) | |||
{ | |||
var nd = np.array(input_tensor.shape, out_type.as_numpy_datatype()); | |||
return constant_op.constant(nd, name: name); | |||
} | |||
} | |||
return gen_array_ops.shape(input); | |||
return gen_array_ops.shape(input, name: name, out_type: out_type); | |||
}); | |||
} | |||
private static Tensor size_internal(Tensor input, string name = "", bool optimize = true, TF_DataType out_type = TF_DataType.TF_INT32) | |||
private static Tensor size_internal(Tensor input, string name = null, bool optimize = true, TF_DataType out_type = TF_DataType.TF_INT32) | |||
{ | |||
return Python.with<ops.name_scope, Tensor>(new ops.name_scope(name, "Size", new Tensor[] { input }), scope => | |||
{ | |||
@@ -180,7 +180,7 @@ namespace Tensorflow | |||
}); | |||
} | |||
public static Tensor zeros_like(Tensor tensor, TF_DataType dtype = TF_DataType.DtInvalid, string name = "", bool optimize = true) | |||
public static Tensor zeros_like(Tensor tensor, TF_DataType dtype = TF_DataType.DtInvalid, string name = null, bool optimize = true) | |||
{ | |||
return Python.with<ops.name_scope, Tensor>(new ops.name_scope(name, "zeros_like", new Tensor[] { tensor }), scope => | |||
{ | |||
@@ -9,7 +9,7 @@ namespace Tensorflow | |||
public static (Tensor, Tensor, float) remove_squeezable_dimensions(Tensor labels, | |||
Tensor predictions, | |||
int expected_rank_diff = 0, | |||
string name = "") | |||
string name = null) | |||
{ | |||
throw new NotImplementedException("remove_squeezable_dimensions"); | |||
} | |||
@@ -5,11 +5,11 @@ using System.Text; | |||
namespace Tensorflow | |||
{ | |||
public class control_flow_ops | |||
public class control_flow_ops : Python | |||
{ | |||
public static Operation group<T>(T[] inputs, string name = "") where T : ITensorOrOperation | |||
public static Operation group<T>(T[] inputs, string name = null) where T : ITensorOrOperation | |||
{ | |||
return Python.with<ops.name_scope, Operation>(new ops.name_scope(name, "group_deps", inputs), scope => | |||
return with<ops.name_scope, Operation>(new ops.name_scope(name, "group_deps", inputs), scope => | |||
{ | |||
name = scope; | |||
@@ -37,7 +37,7 @@ namespace Tensorflow | |||
}); | |||
} | |||
private static Operation _GroupControlDeps(string dev, Operation[] deps, string name = "") | |||
private static Operation _GroupControlDeps(string dev, Operation[] deps, string name = null) | |||
{ | |||
return Python.with<_ControlDependenciesController, Operation>(ops.control_dependencies(deps), ctl => | |||
{ | |||
@@ -81,7 +81,7 @@ namespace Tensorflow | |||
return op.OpType == "Exit" || op.OpType == "RefExit"; | |||
} | |||
public static Tensor[] tuple(Tensor[] tensors, string name = "", Operation[] control_inputs = null) | |||
public static Tensor[] tuple(Tensor[] tensors, string name = null, Operation[] control_inputs = null) | |||
{ | |||
return Python.with<ops.name_scope, Tensor[]>(new ops.name_scope(name, "tuple", tensors), scope => | |||
{ | |||
@@ -109,7 +109,7 @@ namespace Tensorflow | |||
}); | |||
} | |||
public static Tensor with_dependencies(Operation[] dependencies, Tensor output_tensor, string name = "") | |||
public static Tensor with_dependencies(Operation[] dependencies, Tensor output_tensor, string name = null) | |||
{ | |||
var values = new List<object>(); | |||
values.AddRange(dependencies); | |||
@@ -127,7 +127,7 @@ namespace Tensorflow | |||
}); | |||
} | |||
public static Tensor _Identity(Tensor data, string name = "") | |||
public static Tensor _Identity(Tensor data, string name = null) | |||
{ | |||
data = ops.internal_convert_to_tensor_or_composite(data, as_ref: true); | |||
if ((int)data.dtype > 100) | |||
@@ -12,28 +12,28 @@ namespace Tensorflow | |||
public static OpDefLibrary _op_def_lib = new OpDefLibrary(); | |||
public static Execute _execute = new Execute(); | |||
public static Tensor expand_dims(Tensor input, int axis, string name = "") | |||
public static Tensor expand_dims(Tensor input, int axis, string name = null) | |||
{ | |||
var _op = _op_def_lib._apply_op_helper("ExpandDims", name: name, args: new { input, dim = axis }); | |||
return _op.outputs[0]; | |||
} | |||
public static Tensor greater<Tx, Ty>(Tx x, Ty y, string name = "") | |||
public static Tensor greater<Tx, Ty>(Tx x, Ty y, string name = null) | |||
{ | |||
var _op = _op_def_lib._apply_op_helper("Greater", name: name, args: new { x, y }); | |||
return _op.outputs[0]; | |||
} | |||
public static Tensor less<Tx, Ty>(Tx x, Ty y, string name = "") | |||
public static Tensor less<Tx, Ty>(Tx x, Ty y, string name = null) | |||
{ | |||
var _op = _op_def_lib._apply_op_helper("Less", name: name, args: new { x, y }); | |||
return _op.outputs[0]; | |||
} | |||
public static Tensor placeholder(TF_DataType dtype, TensorShape shape = null, string name = "") | |||
public static Tensor placeholder(TF_DataType dtype, TensorShape shape = null, string name = null) | |||
{ | |||
var _op = _op_def_lib._apply_op_helper("Placeholder", args: new { dtype, shape }); | |||
var _result = _op.outputs; | |||
@@ -53,21 +53,21 @@ namespace Tensorflow | |||
/// </summary> | |||
/// <param name="input"></param> | |||
/// <param name="name"></param> | |||
public static Tensor identity(Tensor input, string name = "") | |||
public static Tensor identity(Tensor input, string name = null) | |||
{ | |||
var _op = _op_def_lib._apply_op_helper("Identity", name, new { input }); | |||
return _op.outputs[0]; | |||
} | |||
public static Tensor log(Tensor x, string name = "") | |||
public static Tensor log(Tensor x, string name = null) | |||
{ | |||
var _op = _op_def_lib._apply_op_helper("Log", name: name, args: new { x }); | |||
return _op.outputs[0]; | |||
} | |||
public static Tensor rank(Tensor input, string name = "") | |||
public static Tensor rank(Tensor input, string name = null) | |||
{ | |||
var _op = _op_def_lib._apply_op_helper("Rank", name: name, args: new { input }); | |||
@@ -81,7 +81,7 @@ namespace Tensorflow | |||
/// <param name="value">A `Tensor`. 0-D (scalar). Value to fill the returned tensor.</param> | |||
/// <param name="name">A name for the operation (optional).</param> | |||
/// <returns>A `Tensor`. Has the same type as `value`.</returns> | |||
public static Tensor fill<T>(Tensor dims, T value, string name = "") | |||
public static Tensor fill<T>(Tensor dims, T value, string name = null) | |||
{ | |||
var _op = _op_def_lib._apply_op_helper("Fill", name, new { dims, value }); | |||
@@ -102,7 +102,7 @@ namespace Tensorflow | |||
return (_op.outputs[0], _op.outputs[1]); | |||
} | |||
public static Tensor reshape(Tensor tensor, Tensor shape, string name = "") | |||
public static Tensor reshape(Tensor tensor, Tensor shape, string name = null) | |||
{ | |||
var _op = _op_def_lib._apply_op_helper("Reshape", name, new { tensor, shape }); | |||
return _op.outputs[0]; | |||
@@ -120,37 +120,37 @@ namespace Tensorflow | |||
/// <param name="shape"></param> | |||
/// <param name="name"></param> | |||
/// <returns></returns> | |||
public static Tensor placeholder_with_default<T>(T input, int[] shape, string name = "") | |||
public static Tensor placeholder_with_default<T>(T input, int[] shape, string name = null) | |||
{ | |||
var _op = _op_def_lib._apply_op_helper("PlaceholderWithDefault", name, new { input, shape, name }); | |||
return _op.outputs[0]; | |||
} | |||
public static Tensor select(Tensor condition, Tensor t, Tensor e, string name = "") | |||
public static Tensor select(Tensor condition, Tensor t, Tensor e, string name = null) | |||
{ | |||
var _op = _op_def_lib._apply_op_helper("Select", name, new { condition, t, e }); | |||
return _op.outputs[0]; | |||
} | |||
public static Tensor shape(Tensor input, TF_DataType out_type = TF_DataType.TF_INT32, string name = "") | |||
public static Tensor shape(Tensor input, TF_DataType out_type = TF_DataType.TF_INT32, string name = null) | |||
{ | |||
var _op = _op_def_lib._apply_op_helper("Shape", name, new { input, out_type }); | |||
return _op.outputs[0]; | |||
} | |||
public static Tensor size(Tensor input, TF_DataType out_type = TF_DataType.TF_INT32, string name = "") | |||
public static Tensor size(Tensor input, TF_DataType out_type = TF_DataType.TF_INT32, string name = null) | |||
{ | |||
var _op = _op_def_lib._apply_op_helper("Size", name, new { input, out_type }); | |||
return _op.outputs[0]; | |||
} | |||
public static Tensor tile(Tensor input, Tensor multiples, string name = "") | |||
public static Tensor tile(Tensor input, Tensor multiples, string name = null) | |||
{ | |||
var _op = _op_def_lib._apply_op_helper("Tile", name, new { input, multiples }); | |||
return _op.outputs[0]; | |||
} | |||
public static Tensor zeros_like(Tensor x, string name = "") | |||
public static Tensor zeros_like(Tensor x, string name = null) | |||
{ | |||
var _op = _op_def_lib._apply_op_helper("ZerosLike", name, new { x }); | |||
return _op.outputs[0]; | |||
@@ -8,7 +8,7 @@ namespace Tensorflow | |||
{ | |||
public static OpDefLibrary _op_def_lib = new OpDefLibrary(); | |||
public static Operation no_op(string name = "") | |||
public static Operation no_op(string name = null) | |||
{ | |||
var _op = _op_def_lib._apply_op_helper("NoOp", name, null); | |||
@@ -8,7 +8,7 @@ namespace Tensorflow | |||
{ | |||
public static OpDefLibrary _op_def_lib = new OpDefLibrary(); | |||
public static Tensor dynamic_stitch(Tensor[] indices, Tensor[] data, string name = "") | |||
public static Tensor dynamic_stitch(Tensor[] indices, Tensor[] data, string name = null) | |||
{ | |||
var _attr_N = indices.Length; | |||
var _op = _op_def_lib._apply_op_helper("DynamicStitch", name, new { indices, data }); | |||
@@ -15,7 +15,7 @@ namespace Tensorflow | |||
bool try_recover_truncated = false, | |||
float acceptable_fraction = 1, | |||
string dct_method = "", | |||
string name = "") | |||
string name = null) | |||
{ | |||
// Add nodes to the TensorFlow graph. | |||
if (tf.context.executing_eagerly()) | |||
@@ -39,7 +39,7 @@ namespace Tensorflow | |||
} | |||
} | |||
public Tensor resize_bilinear(Tensor images, Tensor size, bool align_corners = false, string name = "") | |||
public Tensor resize_bilinear(Tensor images, Tensor size, bool align_corners = false, string name = null) | |||
{ | |||
if (tf.context.executing_eagerly()) | |||
{ | |||
@@ -8,21 +8,21 @@ namespace Tensorflow | |||
{ | |||
public static OpDefLibrary _op_def_lib = new OpDefLibrary(); | |||
public static Operation save_v2(Tensor prefix, string[] tensor_names, string[] shape_and_slices, Tensor[] tensors, string name = "") | |||
public static Operation save_v2(Tensor prefix, string[] tensor_names, string[] shape_and_slices, Tensor[] tensors, string name = null) | |||
{ | |||
var _op = _op_def_lib._apply_op_helper("SaveV2", name: name, args: new { prefix, tensor_names, shape_and_slices, tensors }); | |||
return _op; | |||
} | |||
public static Tensor[] restore_v2(Tensor prefix, string[] tensor_names, string[] shape_and_slices, TF_DataType[] dtypes, string name = "") | |||
public static Tensor[] restore_v2(Tensor prefix, string[] tensor_names, string[] shape_and_slices, TF_DataType[] dtypes, string name = null) | |||
{ | |||
var _op = _op_def_lib._apply_op_helper("RestoreV2", name: name, args: new { prefix, tensor_names, shape_and_slices, dtypes }); | |||
return _op.outputs; | |||
} | |||
public static Tensor read_file(string filename, string name = "") | |||
public static Tensor read_file(string filename, string name = null) | |||
{ | |||
var _op = _op_def_lib._apply_op_helper("ReadFile", name: name, args: new { filename }); | |||
@@ -10,7 +10,7 @@ namespace Tensorflow | |||
{ | |||
public static OpDefLibrary _op_def_lib = new OpDefLibrary(); | |||
public static Tensor add(Tensor x, Tensor y, string name = "") | |||
public static Tensor add(Tensor x, Tensor y, string name = null) | |||
{ | |||
var _op = _op_def_lib._apply_op_helper("Add", name, args: new { x, y }); | |||
@@ -24,42 +24,42 @@ namespace Tensorflow | |||
return _op.outputs[0]; | |||
} | |||
public static Tensor neg(Tensor x, string name = "") | |||
public static Tensor neg(Tensor x, string name = null) | |||
{ | |||
var _op = _op_def_lib._apply_op_helper("Neg", name, args: new { x }); | |||
return _op.outputs[0]; | |||
} | |||
public static Tensor sub<Tx, Ty>(Tx x, Ty y, string name = "") | |||
public static Tensor sub<Tx, Ty>(Tx x, Ty y, string name = null) | |||
{ | |||
var _op = _op_def_lib._apply_op_helper("Sub", name, args: new { x, y }); | |||
return _op.outputs[0]; | |||
} | |||
public static Tensor mul(Tensor x, Tensor y, string name = "") | |||
public static Tensor mul(Tensor x, Tensor y, string name = null) | |||
{ | |||
var _op = _op_def_lib._apply_op_helper("Mul", name, args: new { x, y }); | |||
return _op.outputs[0]; | |||
} | |||
public static Tensor real_div(Tensor x, Tensor y, string name = "") | |||
public static Tensor real_div(Tensor x, Tensor y, string name = null) | |||
{ | |||
var _op = _op_def_lib._apply_op_helper("RealDiv", name, args: new { x, y }); | |||
return _op.outputs[0]; | |||
} | |||
public static Tensor floor_mod(Tensor x, Tensor y, string name = "") | |||
public static Tensor floor_mod(Tensor x, Tensor y, string name = null) | |||
{ | |||
var _op = _op_def_lib._apply_op_helper("FloorMod", name, args: new { x, y }); | |||
return _op.outputs[0]; | |||
} | |||
public static Tensor floor_div(Tensor x, Tensor y, string name = "") | |||
public static Tensor floor_div(Tensor x, Tensor y, string name = null) | |||
{ | |||
var _op = _op_def_lib._apply_op_helper("FloorDiv", name, args: new { x, y }); | |||
@@ -75,7 +75,7 @@ namespace Tensorflow | |||
/// <param name="transpose_b"></param> | |||
/// <param name="name"></param> | |||
/// <returns></returns> | |||
public static Tensor mat_mul(Tensor a, Tensor b, bool transpose_a = false, bool transpose_b = false, string name = "") | |||
public static Tensor mat_mul(Tensor a, Tensor b, bool transpose_a = false, bool transpose_b = false, string name = null) | |||
{ | |||
var _op = _op_def_lib._apply_op_helper("MatMul", name, args: new { a, b, transpose_a, transpose_b }); | |||
@@ -89,21 +89,21 @@ namespace Tensorflow | |||
/// <param name="y"></param> | |||
/// <param name="name"></param> | |||
/// <returns></returns> | |||
public static Tensor maximum<T1, T2>(T1 x, T2 y, string name = "") | |||
public static Tensor maximum<T1, T2>(T1 x, T2 y, string name = null) | |||
{ | |||
var _op = _op_def_lib._apply_op_helper("Maximum", name, args: new { x, y }); | |||
return _op.outputs[0]; | |||
} | |||
public static Tensor pow<Tx, Ty>(Tx x, Ty y, string name = "") | |||
public static Tensor pow<Tx, Ty>(Tx x, Ty y, string name = null) | |||
{ | |||
var _op = _op_def_lib._apply_op_helper("Pow", name, args: new { x, y }); | |||
return _op.outputs[0]; | |||
} | |||
public static Tensor sum(Tensor input, Tensor axis = null, bool keep_dims = false, string name = "") | |||
public static Tensor sum(Tensor input, Tensor axis = null, bool keep_dims = false, string name = null) | |||
{ | |||
var _op = _op_def_lib._apply_op_helper("Sum", name, args: new { input, reduction_indices = axis, keep_dims }); | |||
@@ -118,7 +118,7 @@ namespace Tensorflow | |||
/// <param name="delta"></param> | |||
/// <param name="name"></param> | |||
/// <returns></returns> | |||
public static Tensor range(Tensor start, Tensor limit, Tensor delta, string name = "") | |||
public static Tensor range(Tensor start, Tensor limit, Tensor delta, string name = null) | |||
{ | |||
var _op = _op_def_lib._apply_op_helper("Range", name, new { start, limit, delta }); | |||
@@ -17,7 +17,7 @@ namespace Tensorflow | |||
/// <param name="seed2"></param> | |||
/// <param name="name"></param> | |||
/// <returns></returns> | |||
public static Tensor random_standard_normal(Tensor shape, TF_DataType dtype = TF_DataType.DtInvalid, int? seed = null, int? seed2 = null, string name = "") | |||
public static Tensor random_standard_normal(Tensor shape, TF_DataType dtype = TF_DataType.DtInvalid, int? seed = null, int? seed2 = null, string name = null) | |||
{ | |||
if (!seed.HasValue) | |||
seed = 0; | |||
@@ -6,9 +6,9 @@ namespace Tensorflow | |||
{ | |||
public class math_ops : Python | |||
{ | |||
public static Tensor add(Tensor x, Tensor y, string name = "") => gen_math_ops.add(x, y, name); | |||
public static Tensor add(Tensor x, Tensor y, string name = null) => gen_math_ops.add(x, y, name); | |||
public static Tensor cast(Tensor x, TF_DataType dtype = TF_DataType.DtInvalid, string name = "") | |||
public static Tensor cast(Tensor x, TF_DataType dtype = TF_DataType.DtInvalid, string name = null) | |||
{ | |||
var base_type = dtype.as_base_dtype(); | |||
if(base_type == x.dtype) | |||
@@ -64,7 +64,7 @@ namespace Tensorflow | |||
/// <param name="dtype"></param> | |||
/// <param name="name"></param> | |||
/// <returns>A `Tensor` or `SparseTensor` or `IndexedSlices` with same shape as `x` and same type as `dtype`.</returns> | |||
public static Tensor __case__(Tensor x, TF_DataType dtype, string name = "") | |||
public static Tensor __case__(Tensor x, TF_DataType dtype, string name = null) | |||
{ | |||
var base_type = dtype.as_base_dtype(); | |||
if (x is Tensor && base_type == x.dtype) | |||
@@ -122,15 +122,15 @@ namespace Tensorflow | |||
}); | |||
} | |||
public static Tensor floordiv(Tensor x, Tensor y, string name = "") | |||
public static Tensor floordiv(Tensor x, Tensor y, string name = null) | |||
{ | |||
return with<ops.name_scope, Tensor>(new ops.name_scope("", "floordiv", new { x, y }), scope => | |||
return with<ops.name_scope, Tensor>(new ops.name_scope(name, "floordiv", new { x, y }), scope => | |||
{ | |||
return gen_math_ops.floor_div(x, y, scope); | |||
}); | |||
} | |||
public static Tensor rank_internal(Tensor input, string name = "", bool optimize = true) | |||
public static Tensor rank_internal(Tensor input, string name = null, bool optimize = true) | |||
{ | |||
return with<ops.name_scope, Tensor>(new ops.name_scope(name, "Rank", new List<Tensor> { input }), scope => | |||
{ | |||
@@ -148,7 +148,7 @@ namespace Tensorflow | |||
bool transpose_a = false, bool transpose_b = false, | |||
bool adjoint_a = false, bool adjoint_b = false, | |||
bool a_is_sparse = false, bool b_is_sparse = false, | |||
string name = "") | |||
string name = null) | |||
{ | |||
Tensor result = null; | |||
@@ -176,7 +176,7 @@ namespace Tensorflow | |||
/// <param name="x">`Tensor` to conjugate. Must have numeric or variant type.</param> | |||
/// <param name="name">A name for the operation (optional).</param> | |||
/// <returns>A `Tensor` that is the conjugate of `x` (with the same type).</returns> | |||
public static Tensor conj(Tensor x, string name = "") | |||
public static Tensor conj(Tensor x, string name = null) | |||
{ | |||
var dt = x.dtype; | |||
if (dt.is_floating() || dt.is_integer()) | |||
@@ -11,7 +11,7 @@ namespace Tensorflow | |||
float stddev = 1.0f, | |||
TF_DataType dtype = TF_DataType.TF_FLOAT, | |||
int? seed = null, | |||
string name = "") | |||
string name = null) | |||
{ | |||
return Python.with<ops.name_scope, Tensor>(new ops.name_scope(name, "random_normal", new object[] { shape, mean, stddev }), scope => | |||
{ | |||
@@ -0,0 +1,31 @@ | |||
using System; | |||
using System.Collections.Generic; | |||
using System.Text; | |||
namespace Tensorflow | |||
{ | |||
/// <summary> | |||
/// In order for a object to be serialized to and from MetaGraphDef, | |||
/// the class must implement to_proto() and from_proto() methods | |||
/// </summary> | |||
public interface IProtoBuf | |||
{ | |||
string name { get; } | |||
/// <summary> | |||
/// Converts a `Variable` to a `VariableDef` protocol buffer. | |||
/// </summary> | |||
/// <param name="export_scope"></param> | |||
/// <returns></returns> | |||
VariableDef to_proto(string export_scope); | |||
/// <summary> | |||
/// Returns a `Variable` object created from `variable_def`. | |||
/// </summary> | |||
/// <typeparam name="T"></typeparam> | |||
/// <param name="variable_def"></param> | |||
/// <param name="import_scope"></param> | |||
/// <returns></returns> | |||
T from_proto<T>(VariableDef variable_def, string import_scope); | |||
} | |||
} |
@@ -52,4 +52,10 @@ Upgraded to TensorFlow 1.13 RC2. | |||
<Content CopyToOutputDirectory="PreserveNewest" Include="./runtimes/win-x64/native/tensorflow.dll" Link="tensorflow.dll" Pack="true" PackagePath="runtimes/win-x64/native/tensorflow.dll" /> | |||
</ItemGroup> | |||
<ItemGroup> | |||
<Reference Include="Newtonsoft.Json"> | |||
<HintPath>C:\Program Files\dotnet\sdk\NuGetFallbackFolder\newtonsoft.json\9.0.1\lib\netstandard1.0\Newtonsoft.Json.dll</HintPath> | |||
</Reference> | |||
</ItemGroup> | |||
</Project> |
@@ -41,7 +41,7 @@ namespace Tensorflow | |||
if( y is Tensor tr) | |||
dtype = tr.dtype.as_base_dtype(); | |||
var namescope = new ops.name_scope("", name, new { x, y }); | |||
var namescope = new ops.name_scope(null, name, new { x, y }); | |||
return Python.with<ops.name_scope, Tensor>(namescope, scope => | |||
{ | |||
Tensor result = null; | |||
@@ -1,4 +1,5 @@ | |||
using NumSharp.Core; | |||
using Newtonsoft.Json; | |||
using NumSharp.Core; | |||
using System; | |||
using System.Collections.Generic; | |||
using System.Linq; | |||
@@ -17,10 +18,13 @@ namespace Tensorflow | |||
private readonly IntPtr _handle; | |||
private int _id; | |||
[JsonIgnore] | |||
public int Id => _id; | |||
public Graph Graph => op?.graph; | |||
[JsonIgnore] | |||
public Graph graph => op?.graph; | |||
[JsonIgnore] | |||
public Operation op { get; } | |||
[JsonIgnore] | |||
public Tensor[] outputs => op.outputs; | |||
/// <summary> | |||
@@ -63,9 +67,9 @@ namespace Tensorflow | |||
set | |||
{ | |||
if (value == null) | |||
c_api.TF_GraphSetTensorShape(this.Graph, this._as_tf_output(), null, -1, status); | |||
c_api.TF_GraphSetTensorShape(this.graph, this._as_tf_output(), null, -1, status); | |||
else | |||
c_api.TF_GraphSetTensorShape(this.Graph, this._as_tf_output(), value, value.Length, status); | |||
c_api.TF_GraphSetTensorShape(this.graph, this._as_tf_output(), value, value.Length, status); | |||
} | |||
} | |||
@@ -100,6 +104,7 @@ namespace Tensorflow | |||
public int NDims => rank; | |||
[JsonIgnore] | |||
public Operation[] Consumers => consumers(); | |||
public string Device => op.Device; | |||
@@ -108,7 +113,7 @@ namespace Tensorflow | |||
{ | |||
var output = _as_tf_output(); | |||
var consumer_names = c_api.TF_OperationOutputConsumers_wrapper(output); | |||
return consumer_names.Select(x => Graph.OperationByName(x)).ToArray(); | |||
return consumer_names.Select(x => graph.OperationByName(x)).ToArray(); | |||
} | |||
public TF_Output _as_tf_output() | |||
@@ -154,7 +159,7 @@ namespace Tensorflow | |||
/// <returns></returns> | |||
public NDArray eval(FeedItem[] feed_dict = null, Session session = null) | |||
{ | |||
return ops._eval_using_default_session(this, feed_dict, Graph, session); | |||
return ops._eval_using_default_session(this, feed_dict, graph, session); | |||
} | |||
public TF_DataType ToTFDataType(Type type) | |||
@@ -65,7 +65,7 @@ namespace Tensorflow | |||
/// <param name="name"></param> | |||
/// <param name="as_ref"></param> | |||
/// <returns></returns> | |||
public static Tensor _tensor_shape_tensor_conversion_function(TensorShape s, TF_DataType dtype = TF_DataType.DtInvalid, string name = "", bool as_ref = false) | |||
public static Tensor _tensor_shape_tensor_conversion_function(TensorShape s, TF_DataType dtype = TF_DataType.DtInvalid, string name = null, bool as_ref = false) | |||
{ | |||
var s_list = s.Dimensions; | |||
var int64_value = 0; | |||
@@ -105,10 +105,16 @@ namespace Tensorflow | |||
nparray = Convert.ToSingle(values); | |||
break; | |||
case "Double": | |||
nparray = Convert.ToDouble(values); | |||
if (values.GetType().IsArray) | |||
nparray = np.array((double[])values, np_dt); | |||
else | |||
nparray = Convert.ToDouble(values); | |||
break; | |||
case "String": | |||
nparray = Convert.ToString(values); | |||
if (values.GetType().IsArray) | |||
nparray = np.array((string[])values, np_dt); | |||
else | |||
nparray = Convert.ToString(values); | |||
break; | |||
default: | |||
throw new NotImplementedException("make_tensor_proto Not Implemented"); | |||
@@ -20,10 +20,10 @@ namespace Tensorflow | |||
verify_shape: verify_shape, | |||
allow_broadcast: false); | |||
public static Tensor zeros(Shape shape, TF_DataType dtype = TF_DataType.TF_FLOAT, string name = "") => array_ops.zeros(shape, dtype, name); | |||
public static Tensor zeros(Shape shape, TF_DataType dtype = TF_DataType.TF_FLOAT, string name = null) => array_ops.zeros(shape, dtype, name); | |||
public static Tensor size(Tensor input, | |||
string name = "", | |||
string name = null, | |||
TF_DataType out_type = TF_DataType.TF_INT32) => array_ops.size(input, | |||
name, | |||
optimize: true, | |||
@@ -27,7 +27,7 @@ namespace Tensorflow | |||
public Dictionary<string, object> _non_slot_dict; | |||
public Dictionary<string, object> _deferred_slot_restorations; | |||
public Optimizer(float learning_rate, bool use_locking, string name = "") | |||
public Optimizer(float learning_rate, bool use_locking, string name = null) | |||
{ | |||
if (String.IsNullOrEmpty(name)) | |||
throw new NotImplementedException("Must specify the optimizer name"); | |||
@@ -64,7 +64,7 @@ namespace Tensorflow | |||
return apply_gradients(grads_and_vars); | |||
} | |||
public Operation apply_gradients(Tuple<Tensor, RefVariable>[] grads_and_vars, Tensor global_step = null, string name = "") | |||
public Operation apply_gradients(Tuple<Tensor, RefVariable>[] grads_and_vars, Tensor global_step = null, string name = null) | |||
{ | |||
// No DistributionStrategy case. | |||
var converted_grads_and_vars = new List<Tuple<Tensor, RefVariable, _OptimizableVariable>>(); | |||
@@ -15,7 +15,7 @@ namespace Tensorflow | |||
bool sharded = false, | |||
int max_to_keep = 5, | |||
float keep_checkpoint_every_n_hours = 10000, | |||
string name = "", | |||
string name = null, | |||
bool restore_sequentially = false, | |||
string filename = "model", | |||
bool build_save = true, | |||
@@ -282,7 +282,7 @@ namespace Tensorflow | |||
clear_devices: clear_devices, | |||
clear_extraneous_savers: clear_extraneous_savers, | |||
strip_default_attrs: strip_default_attrs); | |||
return meta_graph_def; | |||
return meta_graph_def.Item1; | |||
} | |||
/// <summary> | |||
@@ -75,7 +75,7 @@ namespace Tensorflow | |||
} | |||
else | |||
{ | |||
string name = ""; | |||
string name = null; | |||
Tensor tensor = null; | |||
if (convert_variable_to_tensor) | |||
@@ -8,7 +8,7 @@ namespace Tensorflow | |||
{ | |||
public static OpDefLibrary _op_def_lib = new OpDefLibrary(); | |||
public static Tensor apply_gradient_descent(RefVariable var, Tensor alpha, Tensor delta, bool use_locking = false, string name = "") | |||
public static Tensor apply_gradient_descent(RefVariable var, Tensor alpha, Tensor delta, bool use_locking = false, string name = null) | |||
{ | |||
var _op = _op_def_lib._apply_op_helper("ApplyGradientDescent", name, new | |||
{ | |||
@@ -20,6 +20,16 @@ namespace Tensorflow | |||
string import_scope = "") => saver._import_meta_graph_with_return_elements(meta_graph_or_file, | |||
clear_devices, | |||
import_scope).Item1; | |||
public static (MetaGraphDef, Dictionary<string, RefVariable>) export_meta_graph(string filename = "", | |||
bool as_text = false, | |||
bool clear_devices = false, | |||
bool clear_extraneous_savers = false, | |||
bool strip_default_attrs = false) => meta_graph.export_scoped_meta_graph(filename: filename, | |||
as_text: as_text, | |||
clear_devices: clear_devices, | |||
clear_extraneous_savers: clear_extraneous_savers, | |||
strip_default_attrs: strip_default_attrs); | |||
} | |||
} | |||
} |
@@ -17,7 +17,7 @@ namespace Tensorflow | |||
private static Tensor op_helper<T>(string default_name, RefVariable x, T y) | |||
{ | |||
var tensor1 = x.value(); | |||
return Python.with<ops.name_scope, Tensor>(new ops.name_scope("", default_name, new object[] { tensor1, y }), scope => { | |||
return Python.with<ops.name_scope, Tensor>(new ops.name_scope(null, default_name, new object[] { tensor1, y }), scope => { | |||
var tensor2 = ops.convert_to_tensor(y, tensor1.dtype.as_base_dtype(), "y"); | |||
return gen_math_ops.add(tensor1, tensor2, scope); | |||
}); | |||
@@ -5,7 +5,7 @@ using System.Text; | |||
namespace Tensorflow | |||
{ | |||
public partial class RefVariable : VariableV1 | |||
public partial class RefVariable : VariableV1, IProtoBuf | |||
{ | |||
public bool _in_graph_mode = true; | |||
public Tensor _initial_value; | |||
@@ -13,11 +13,12 @@ namespace Tensorflow | |||
public bool _trainable; | |||
public Tensor _variable; | |||
public Tensor _snapshot; | |||
public bool _save_slice_info; | |||
private Operation _initializer_op; | |||
public Operation initializer => _initializer_op; | |||
public Operation op => _variable.op; | |||
public Graph graph => _variable.Graph; | |||
public Graph graph => _variable.graph; | |||
public TF_DataType dtype => _variable.dtype; | |||
public TensorShape shape => tensor_util.to_shape(_variable.shape); | |||
@@ -28,7 +29,7 @@ namespace Tensorflow | |||
List<string> collections = null, | |||
bool validate_shape = true, | |||
string caching_device = "", | |||
string name = "", | |||
string name = null, | |||
VariableDef variable_def = null, | |||
TF_DataType dtype = TF_DataType.DtInvalid, | |||
string import_scope = "") : base(initial_value, | |||
@@ -92,7 +93,7 @@ namespace Tensorflow | |||
List<string> collections = null, | |||
bool validate_shape = true, | |||
string caching_device = "", | |||
string name = "", | |||
string name = null, | |||
TF_DataType dtype = TF_DataType.DtInvalid) | |||
{ | |||
if (initial_value is null) | |||
@@ -235,7 +236,7 @@ namespace Tensorflow | |||
/// A `Tensor` that will hold the new value of this variable after | |||
/// the assignment has completed. | |||
/// </returns> | |||
public ITensorOrOperation assign(object value, bool use_locking = false, string name = "", bool read_value = true) | |||
public ITensorOrOperation assign(object value, bool use_locking = false, string name = null, bool read_value = true) | |||
{ | |||
var assign = gen_state_ops.assign(_variable, value, use_locking: use_locking, name: name); | |||
if (read_value) | |||
@@ -247,5 +248,30 @@ namespace Tensorflow | |||
{ | |||
return $"tf.Variable '{name}' shape={shape} dtype={dtype}"; | |||
} | |||
public VariableDef to_proto(string export_scope) | |||
{ | |||
if(string.IsNullOrEmpty(export_scope) || _variable.name.StartsWith(export_scope)) | |||
{ | |||
var var_def = new VariableDef(); | |||
var_def.VariableName = ops.strip_name_scope(_variable.name, export_scope); | |||
if (_initial_value != null) | |||
var_def.InitialValueName = ops.strip_name_scope(_initial_value.name, export_scope); | |||
var_def.Trainable = _trainable; | |||
var_def.InitializerName = ops.strip_name_scope(initializer.name, export_scope); | |||
var_def.SnapshotName = ops.strip_name_scope(_snapshot.name, export_scope); | |||
if (_save_slice_info) | |||
throw new NotImplementedException("to_proto _save_slice_info"); | |||
return var_def; | |||
} | |||
throw new NotImplementedException("to_proto RefVariable"); | |||
} | |||
public T from_proto<T>(VariableDef variable_def, string import_scope) | |||
{ | |||
throw new NotImplementedException(); | |||
} | |||
} | |||
} |
@@ -21,7 +21,7 @@ namespace Tensorflow | |||
List<string> collections = null, | |||
bool validate_shape = true, | |||
string caching_device = "", | |||
string name = "", | |||
string name = null, | |||
TF_DataType dtype = TF_DataType.DtInvalid) | |||
{ | |||
@@ -21,7 +21,7 @@ namespace Tensorflow | |||
/// <param name="container"></param> | |||
/// <param name="shared_name"></param> | |||
/// <returns></returns> | |||
public static Tensor variable_v2(long[] shape, TF_DataType dtype, string name = "", string container = "", string shared_name = "") | |||
public static Tensor variable_v2(long[] shape, TF_DataType dtype, string name = null, string container = "", string shared_name = "") | |||
{ | |||
var _op = _op_def_lib._apply_op_helper("VariableV2", name: name, args: new { dtype, shape, container, shared_name }); | |||
@@ -50,7 +50,7 @@ namespace Tensorflow | |||
public static Tensor assign(Tensor tensor, object value, | |||
bool validate_shape = true, | |||
bool use_locking = true, | |||
string name = "") | |||
string name = null) | |||
{ | |||
var _op = _op_def_lib._apply_op_helper("Assign", name: name, args: new { _ref_ = tensor, value, validate_shape, use_locking }); | |||
@@ -11,7 +11,7 @@ namespace Tensorflow | |||
public static bool _DEFAULT_USE_RESOURCE = false; | |||
public static RefVariable default_variable_creator(object initial_value, | |||
string name = "", | |||
string name = null, | |||
bool? trainable = null, | |||
TF_DataType dtype = TF_DataType.DtInvalid, | |||
bool validate_shape = false, | |||
@@ -81,22 +81,22 @@ namespace Tensorflow | |||
/// <param name="dtype"></param> | |||
/// <param name="name"></param> | |||
/// <returns></returns> | |||
public static Tensor convert_to_tensor(object value, TF_DataType dtype = TF_DataType.DtInvalid, string name = "", TF_DataType preferred_dtype = TF_DataType.DtInvalid) | |||
public static Tensor convert_to_tensor(object value, TF_DataType dtype = TF_DataType.DtInvalid, string name = null, TF_DataType preferred_dtype = TF_DataType.DtInvalid) | |||
{ | |||
return convert_to_tensor_v2(value, dtype, preferred_dtype, name); | |||
} | |||
public static Tensor convert_to_tensor_v2(object value, TF_DataType dtype = TF_DataType.DtInvalid, TF_DataType dtype_hint = TF_DataType.DtInvalid, string name = "") | |||
public static Tensor convert_to_tensor_v2(object value, TF_DataType dtype = TF_DataType.DtInvalid, TF_DataType dtype_hint = TF_DataType.DtInvalid, string name = null) | |||
{ | |||
return internal_convert_to_tensor(value, dtype: dtype, name: name, preferred_dtype: dtype_hint, as_ref: false); | |||
} | |||
public static Tensor convert_to_tensor_or_composite(Tensor value, TF_DataType dtype = TF_DataType.DtInvalid, string name = "") | |||
public static Tensor convert_to_tensor_or_composite(Tensor value, TF_DataType dtype = TF_DataType.DtInvalid, string name = null) | |||
{ | |||
return internal_convert_to_tensor_or_composite(value: value, dtype: dtype, name: name, as_ref: false); | |||
} | |||
public static Tensor internal_convert_to_tensor_or_composite(Tensor value, TF_DataType dtype = TF_DataType.DtInvalid, string name = "", bool as_ref = false) | |||
public static Tensor internal_convert_to_tensor_or_composite(Tensor value, TF_DataType dtype = TF_DataType.DtInvalid, string name = null, bool as_ref = false) | |||
{ | |||
return internal_convert_to_tensor(value, dtype: dtype, name: name, as_ref: as_ref); | |||
} | |||
@@ -382,22 +382,22 @@ namespace Tensorflow | |||
}; | |||
} | |||
public static Tensor[] convert_n_to_tensor_or_indexed_slices(Tensor[] values, TF_DataType dtype = TF_DataType.DtInvalid, string name = "") | |||
public static Tensor[] convert_n_to_tensor_or_indexed_slices(Tensor[] values, TF_DataType dtype = TF_DataType.DtInvalid, string name = null) | |||
{ | |||
return internal_convert_n_to_tensor_or_indexed_slices(values, dtype: dtype, name: name); | |||
} | |||
public static Tensor convert_to_tensor_or_indexed_slices(Tensor value, TF_DataType dtype = TF_DataType.DtInvalid, string name = "") | |||
public static Tensor convert_to_tensor_or_indexed_slices(Tensor value, TF_DataType dtype = TF_DataType.DtInvalid, string name = null) | |||
{ | |||
return internal_convert_to_tensor_or_indexed_slices(value: value, dtype: dtype, name: name, as_ref: false); | |||
} | |||
public static Tensor internal_convert_to_tensor_or_indexed_slices(Tensor value, TF_DataType dtype = TF_DataType.DtInvalid, string name = "", bool as_ref = false) | |||
public static Tensor internal_convert_to_tensor_or_indexed_slices(Tensor value, TF_DataType dtype = TF_DataType.DtInvalid, string name = null, bool as_ref = false) | |||
{ | |||
return value; | |||
} | |||
public static Tensor[] internal_convert_n_to_tensor_or_indexed_slices(Tensor[] values, TF_DataType dtype = TF_DataType.DtInvalid, string name = "", bool as_ref = false) | |||
public static Tensor[] internal_convert_n_to_tensor_or_indexed_slices(Tensor[] values, TF_DataType dtype = TF_DataType.DtInvalid, string name = null, bool as_ref = false) | |||
{ | |||
var ret = new List<Tensor>(); | |||
@@ -418,7 +418,7 @@ namespace Tensorflow | |||
} | |||
public static Tensor[] internal_convert_n_to_tensor<T>(T[] values, TF_DataType dtype = TF_DataType.DtInvalid, | |||
string name = "", TF_DataType preferred_dtype = TF_DataType.DtInvalid, | |||
string name = null, TF_DataType preferred_dtype = TF_DataType.DtInvalid, | |||
bool as_ref = false) | |||
{ | |||
var ret = new List<Tensor>(); | |||
@@ -433,7 +433,7 @@ namespace Tensorflow | |||
} | |||
public static Tensor internal_convert_to_tensor(object value, TF_DataType dtype = TF_DataType.DtInvalid, | |||
string name = "", TF_DataType preferred_dtype = TF_DataType.DtInvalid, | |||
string name = null, TF_DataType preferred_dtype = TF_DataType.DtInvalid, | |||
bool as_ref = false) | |||
{ | |||
if (dtype == TF_DataType.DtInvalid) | |||
@@ -18,7 +18,7 @@ namespace Tensorflow | |||
public static Session defaultSession; | |||
public static RefVariable Variable<T>(T data, string name = "", TF_DataType dtype = TF_DataType.DtInvalid) | |||
public static RefVariable Variable<T>(T data, string name = null, TF_DataType dtype = TF_DataType.DtInvalid) | |||
{ | |||
return variable_scope.default_variable_creator(data, name: name, dtype: TF_DataType.DtInvalid); | |||
} | |||
@@ -1,4 +1,5 @@ | |||
using NumSharp.Core; | |||
using Newtonsoft.Json; | |||
using NumSharp.Core; | |||
using System; | |||
using System.Collections.Generic; | |||
using System.Text; | |||
@@ -31,7 +32,7 @@ namespace TensorFlowNET.Examples | |||
var n_samples = train_X.shape[0]; | |||
// tf Graph Input | |||
/*var X = tf.placeholder(tf.float32); | |||
var X = tf.placeholder(tf.float32); | |||
var Y = tf.placeholder(tf.float32); | |||
// Set model weights | |||
@@ -50,25 +51,32 @@ namespace TensorFlowNET.Examples | |||
var reduce = tf.reduce_sum(pow); | |||
var cost = reduce / (2.0f * n_samples); | |||
// import graph | |||
// radient descent | |||
// Note, minimize() knows to modify W and b because Variable objects are trainable=True by default | |||
var grad = tf.train.GradientDescentOptimizer(learning_rate); | |||
var optimizer = grad.minimize(cost);*/ | |||
var optimizer = grad.minimize(cost); | |||
var new_saver = tf.train.import_meta_graph("linear_regression.meta"); | |||
//tf.train.export_meta_graph(filename: "linear_regression.meta.bin"); | |||
// import meta | |||
// var new_saver = tf.train.import_meta_graph("linear_regression.meta.bin"); | |||
var text = JsonConvert.SerializeObject(graph, new JsonSerializerSettings | |||
{ | |||
Formatting = Formatting.Indented | |||
}); | |||
var X = graph.OperationByName("Placeholder"); | |||
var Y = graph.OperationByName("Placeholder_1"); | |||
var W = graph.OperationByName("weight"); | |||
/*var cost = graph.OperationByName("truediv").output; | |||
var pred = graph.OperationByName("Add").output; | |||
var optimizer = graph.OperationByName("GradientDescent"); | |||
var X = graph.OperationByName("Placeholder").output; | |||
var Y = graph.OperationByName("Placeholder_1").output; | |||
var W = graph.OperationByName("weight").output; | |||
var b = graph.OperationByName("bias").output;*/ | |||
// Initialize the variables (i.e. assign their default value) | |||
var init = tf.global_variables_initializer(); | |||
// Start training | |||
Python.with<Session>(tf.Session(graph), sess => | |||
with<Session>(tf.Session(graph), sess => | |||
{ | |||
// Run the initializer | |||
sess.run(init); | |||
@@ -78,11 +86,10 @@ namespace TensorFlowNET.Examples | |||
{ | |||
foreach (var (x, y) in zip<float>(train_X, train_Y)) | |||
{ | |||
var w = sess.run(W); | |||
sess.run(optimizer, | |||
sess.run(optimizer, | |||
new FeedItem(X, x), | |||
new FeedItem(Y, y)); | |||
w = sess.run(W); | |||
var rW = sess.run(W); | |||
} | |||
// Display logs per epoch step | |||
@@ -6,6 +6,7 @@ | |||
</PropertyGroup> | |||
<ItemGroup> | |||
<PackageReference Include="Newtonsoft.Json" Version="12.0.1" /> | |||
<PackageReference Include="NumSharp" Version="0.7.3" /> | |||
<PackageReference Include="TensorFlow.NET" Version="0.3.0" /> | |||
</ItemGroup> | |||
@@ -49,7 +49,7 @@ if False: | |||
# tf.train.export_meta_graph(filename='save_model.meta'); | |||
else: | |||
# tf Graph Input | |||
new_saver = tf.train.import_meta_graph("save_model.meta") | |||
new_saver = tf.train.import_meta_graph("linear_regression.meta") | |||
nodes = tf.get_default_graph()._nodes_by_name; | |||
optimizer = nodes["GradientDescent"] | |||
cost = nodes["truediv"].outputs[0] | |||