@@ -115,7 +115,7 @@ namespace Keras | |||
var init = tf.global_variables_initializer(); | |||
float loss_value = 0; | |||
with(tf.Session(graph), sess => | |||
using (var sess = tf.Session(graph)) | |||
{ | |||
sess.run(init); | |||
var step = 0; | |||
@@ -133,7 +133,7 @@ namespace Keras | |||
Console.WriteLine($"Step {step} loss: {loss_value}"); | |||
} | |||
Console.WriteLine($"Final loss: {loss_value}"); | |||
}); | |||
} | |||
return loss_value; | |||
} | |||
@@ -136,7 +136,7 @@ namespace Tensorflow | |||
public static Tensor bias_add(Tensor value, RefVariable bias, string data_format = null, string name = null) | |||
{ | |||
return Python.with(ops.name_scope(name, "BiasAdd", new { value, bias }), scope => | |||
return Python.tf_with(ops.name_scope(name, "BiasAdd", new { value, bias }), scope => | |||
{ | |||
name = scope; | |||
return gen_nn_ops.bias_add(value, bias, data_format: data_format, name: name); | |||
@@ -169,7 +169,7 @@ namespace Tensorflow | |||
/// <returns></returns> | |||
public static Tensor softmax_cross_entropy_with_logits(Tensor labels, Tensor logits, int dim = -1, string name = null) | |||
{ | |||
with(ops.name_scope(name, "softmax_cross_entropy_with_logits_sg", new { logits, labels }), scope => | |||
tf_with(ops.name_scope(name, "softmax_cross_entropy_with_logits_sg", new { logits, labels }), scope => | |||
{ | |||
name = scope; | |||
labels = array_ops.stop_gradient(labels, name: "labels_stop_gradient"); | |||
@@ -66,7 +66,7 @@ namespace Tensorflow | |||
return buffer.Data; | |||
} | |||
protected override void DisposeUnManagedState() | |||
=> c_api.TF_DeleteBuffer(_handle); | |||
protected override void DisposeUnManagedState(IntPtr handle) | |||
=> c_api.TF_DeleteBuffer(handle); | |||
} | |||
} |
@@ -68,7 +68,7 @@ namespace Tensorflow.Clustering | |||
private Tensor _initialize() | |||
{ | |||
return with(ops.control_dependencies(new Operation[] | |||
return tf_with(ops.control_dependencies(new Operation[] | |||
{ | |||
check_ops.assert_positive(_num_remaining) | |||
}), delegate | |||
@@ -34,38 +34,27 @@ namespace Tensorflow | |||
_handle = handle; | |||
} | |||
private bool disposedValue = false; // To detect redundant calls | |||
protected virtual void DisposeManagedState() | |||
{ | |||
} | |||
protected abstract void DisposeUnManagedState(); | |||
protected abstract void DisposeUnManagedState(IntPtr handle); | |||
protected virtual void Dispose(bool disposing) | |||
{ | |||
if (!disposedValue) | |||
if (disposing) | |||
{ | |||
if (disposing) | |||
// free unmanaged resources (unmanaged objects) and override a finalizer below. | |||
if (_handle != IntPtr.Zero) | |||
{ | |||
// dispose managed state (managed objects). | |||
DisposeManagedState(); | |||
} | |||
// free unmanaged resources (unmanaged objects) and override a finalizer below. | |||
/*IntPtr h = IntPtr.Zero; | |||
lock (this) | |||
{ | |||
h = _handle; | |||
_handle = IntPtr.Zero; | |||
}*/ | |||
if (_handle != IntPtr.Zero) | |||
DisposeUnManagedState(); | |||
// set large fields to null. | |||
_handle = IntPtr.Zero; | |||
// set large fields to null. | |||
DisposeUnManagedState(_handle); | |||
disposedValue = true; | |||
_handle = IntPtr.Zero; | |||
} | |||
} | |||
} | |||
@@ -42,7 +42,7 @@ namespace Tensorflow | |||
string prefix = ""; | |||
var graph = ops.get_default_graph(); | |||
with(ops.name_scope(name, "import", input_map.Values), scope => | |||
tf_with(ops.name_scope(name, "import", input_map.Values), scope => | |||
{ | |||
prefix = scope; | |||
/*if (!string.IsNullOrEmpty(prefix)) | |||
@@ -55,7 +55,7 @@ namespace Tensorflow | |||
**/ | |||
var grads = new Dictionary<string, List<List<Tensor>>>(); | |||
with(ops.name_scope(name, "gradients", | |||
tf_with(ops.name_scope(name, "gradients", | |||
values: ys.Concat(xs).Concat(stop_gradients).Concat(grad_ys)), scope => | |||
{ | |||
string grad_scope = scope; | |||
@@ -141,7 +141,7 @@ namespace Tensorflow | |||
} | |||
} | |||
with(ops.name_scope(op.name + "_grad"), scope1 => | |||
tf_with(ops.name_scope(op.name + "_grad"), scope1 => | |||
{ | |||
string name1 = scope1; | |||
if (grad_fn != null) | |||
@@ -90,7 +90,7 @@ namespace Tensorflow.Gradients | |||
{ | |||
var grad = grads[0]; | |||
var y = op.outputs[0]; // y = e^x | |||
return with(ops.control_dependencies(new Operation[] { grad }), dp => { | |||
return tf_with(ops.control_dependencies(new Operation[] { grad }), dp => { | |||
y = math_ops.conj(y); | |||
return new Tensor[] { math_ops.mul_no_nan(y, grad) }; | |||
}); | |||
@@ -107,7 +107,7 @@ namespace Tensorflow.Gradients | |||
{ | |||
var grad = grads[0]; | |||
var x = op.inputs[0]; | |||
return with(ops.control_dependencies(new Operation[] { grad }), dp => { | |||
return tf_with(ops.control_dependencies(new Operation[] { grad }), dp => { | |||
x = math_ops.conj(x); | |||
return new Tensor[] { grad * math_ops.digamma(x) }; | |||
}); | |||
@@ -118,7 +118,7 @@ namespace Tensorflow.Gradients | |||
{ | |||
var grad = grads[0]; | |||
var x = op.inputs[0]; | |||
return with(ops.control_dependencies(new Operation[] { grad }), dp => { | |||
return tf_with(ops.control_dependencies(new Operation[] { grad }), dp => { | |||
x = math_ops.conj(x); | |||
return new Tensor[] { grad * math_ops.reciprocal(x) }; | |||
}); | |||
@@ -431,7 +431,7 @@ namespace Tensorflow.Gradients | |||
var grad = grads[0]; | |||
var y = op.outputs[0]; | |||
return with(ops.control_dependencies(grads), delegate | |||
return tf_with(ops.control_dependencies(grads), delegate | |||
{ | |||
y = math_ops.conj(y); | |||
return new Tensor[] { gen_math_ops.sigmoid_grad(y, grad) }; | |||
@@ -453,7 +453,7 @@ namespace Tensorflow.Gradients | |||
var grad = grads[0]; | |||
var x = op.inputs[0]; | |||
return with(ops.control_dependencies(grads), delegate | |||
return tf_with(ops.control_dependencies(grads), delegate | |||
{ | |||
x = math_ops.conj(x); | |||
var y = constant_op.constant(2.0f, dtype: x.dtype); | |||
@@ -467,7 +467,7 @@ namespace Tensorflow.Gradients | |||
var grad = grads[0]; | |||
var y = op.outputs[0]; | |||
return with(ops.control_dependencies(grads), delegate | |||
return tf_with(ops.control_dependencies(grads), delegate | |||
{ | |||
y = math_ops.conj(y); | |||
return new Tensor[] { gen_math_ops.tanh_grad(y, grad) }; | |||
@@ -207,7 +207,7 @@ namespace Tensorflow.Keras.Layers | |||
public Tensor _assign_moving_average(RefVariable variable, Tensor value, Tensor momentum) | |||
{ | |||
return Python.with(ops.name_scope(null, "AssignMovingAvg", new { variable, value, momentum }), scope => | |||
return Python.tf_with(ops.name_scope(null, "AssignMovingAvg", new { variable, value, momentum }), scope => | |||
{ | |||
// var cm = ops.colocate_with(variable); | |||
var decay = ops.convert_to_tensor(1.0f - momentum, name: "decay"); | |||
@@ -125,7 +125,7 @@ namespace Tensorflow.Keras.Layers | |||
// Symbolic execution on symbolic tensors. We will attempt to build | |||
// the corresponding TF subgraph inside `backend.get_graph()` | |||
var graph = backend.get_graph().as_default(); | |||
with(ops.name_scope(_name_scope()), delegate | |||
tf_with(ops.name_scope(_name_scope()), delegate | |||
{ | |||
// Build layer if applicable (if the `build` method has been | |||
// overridden). | |||
@@ -72,7 +72,7 @@ namespace Tensorflow.Layers | |||
} | |||
Tensor outputs = null; | |||
with(scope_context_manager, scope2 => | |||
tf_with(scope_context_manager, scope2 => | |||
{ | |||
_current_scope = scope2; | |||
// Actually call layer | |||
@@ -136,12 +136,12 @@ namespace Tensorflow.Layers | |||
_set_scope(); | |||
var reuse = built || (_reuse != null && _reuse.Value); | |||
return with(tf.variable_scope(_scope, | |||
return tf_with(tf.variable_scope(_scope, | |||
reuse: reuse, | |||
auxiliary_name_scope: false), scope => | |||
{ | |||
_current_scope = scope; | |||
return with(ops.name_scope(_name_scope()), delegate | |||
return tf_with(ops.name_scope(_name_scope()), delegate | |||
{ | |||
var variable = base.add_weight(name, | |||
shape, | |||
@@ -183,7 +183,7 @@ namespace Tensorflow.Layers | |||
} | |||
else | |||
{ | |||
with(tf.variable_scope(scope, default_name: _base_name), captured_scope => | |||
tf_with(tf.variable_scope(scope, default_name: _base_name), captured_scope => | |||
{ | |||
// convert variable_scope to VariableScope | |||
_scope = captured_scope; | |||
@@ -122,7 +122,7 @@ namespace Tensorflow.Operations | |||
_external_values[result.name] = result; | |||
} | |||
with(ops.control_dependencies(null), ctrl => | |||
tf_with(ops.control_dependencies(null), ctrl => | |||
{ | |||
var results = control_flow_ops._SwitchRefOrTensor(result, _pred); | |||
result = results[_branch]; | |||
@@ -58,7 +58,7 @@ namespace Tensorflow | |||
private Tensor _call_log_prob (Tensor value, string name) | |||
{ | |||
return with(ops.name_scope(name, "moments", new { value }), scope => | |||
return tf_with(ops.name_scope(name, "moments", new { value }), scope => | |||
{ | |||
try | |||
{ | |||
@@ -50,9 +50,9 @@ namespace Tensorflow | |||
parameters.Add("validate_args", validate_args); | |||
parameters.Add("allow_nan_stats", allow_nan_stats); | |||
with(ops.name_scope(name, "", new { loc, scale }), scope => | |||
tf_with(ops.name_scope(name, "", new { loc, scale }), scope => | |||
{ | |||
with(ops.control_dependencies(validate_args ? new Operation[] { scale.op} : new Operation[] { }), cd => | |||
tf_with(ops.control_dependencies(validate_args ? new Operation[] { scale.op} : new Operation[] { }), cd => | |||
{ | |||
this._loc = array_ops.identity(loc, name); | |||
this._scale = array_ops.identity(scale, name); | |||
@@ -24,7 +24,7 @@ namespace Tensorflow | |||
public Tensor compute_weighted_loss(Tensor losses, Tensor weights = null, string scope = null, | |||
string loss_collection = ops.GraphKeys.LOSSES, string reduction = Reduction.SUM_BY_NONZERO_WEIGHTS) | |||
{ | |||
return with(ops.name_scope(scope, default_name: "weighted_loss", (losses, weights)), delegate | |||
return tf_with(ops.name_scope(scope, default_name: "weighted_loss", (losses, weights)), delegate | |||
{ | |||
// Save the `reduction` argument for loss normalization when distributing | |||
// to multiple replicas. Used only for estimator + v1 optimizer flow. | |||
@@ -77,7 +77,7 @@ namespace Tensorflow | |||
public Tensor _num_present(Tensor losses, Tensor weights, bool per_batch = false) | |||
{ | |||
return with(ops.name_scope(null, default_name: "num_present", (losses, weights)), name_scope => | |||
return tf_with(ops.name_scope(null, default_name: "num_present", (losses, weights)), name_scope => | |||
{ | |||
string scope = name_scope; | |||
weights = math_ops.cast(weights, dtype: dtypes.float32); | |||
@@ -104,7 +104,7 @@ namespace Tensorflow | |||
string loss_collection= ops.GraphKeys.LOSSES, | |||
string reduction = Reduction.SUM_BY_NONZERO_WEIGHTS) | |||
{ | |||
return with(ops.name_scope(scope, | |||
return tf_with(ops.name_scope(scope, | |||
"sparse_softmax_cross_entropy_loss", | |||
(logits, labels, weights)), | |||
name_scope => | |||
@@ -30,7 +30,7 @@ namespace Tensorflow.Operations | |||
string data_format = "NHWC", | |||
string name = null) | |||
{ | |||
return with(ops.name_scope(name, "MaxPool", value), scope => | |||
return tf_with(ops.name_scope(name, "MaxPool", value), scope => | |||
{ | |||
name = scope; | |||
value = ops.convert_to_tensor(value, name: "input"); | |||
@@ -30,7 +30,7 @@ namespace Tensorflow.Operations | |||
TF_DataType dtype = TF_DataType.DtInvalid, | |||
int? parallel_iterations = null, bool swap_memory = false, bool time_major = false) | |||
{ | |||
with(tf.variable_scope("rnn"), scope => | |||
tf_with(tf.variable_scope("rnn"), scope => | |||
{ | |||
VariableScope varscope = scope; | |||
var flat_input = nest.flatten(inputs_tensor); | |||
@@ -140,7 +140,7 @@ namespace Tensorflow.Operations | |||
var time = array_ops.constant(0, dtype: dtypes.int32, name: "time"); | |||
string base_name = null; | |||
with(ops.name_scope("dynamic_rnn"), scope => base_name = scope); | |||
tf_with(ops.name_scope("dynamic_rnn"), scope => base_name = scope); | |||
Func<string, TensorShape, TF_DataType, Tensor> _create_ta = (name, element_shape, dtype_) => | |||
{ | |||
@@ -58,7 +58,7 @@ namespace Tensorflow | |||
var input_types = new List<TF_DataType>(); | |||
object values = null; | |||
return with(ops.name_scope(name), scope => | |||
return tf_with(ops.name_scope(name), scope => | |||
{ | |||
var inferred_from = new Dictionary<string, object>(); | |||
var base_types = new List<TF_DataType>(); | |||
@@ -82,7 +82,7 @@ namespace Tensorflow | |||
{ | |||
Tensor output = null; | |||
var state_size = this.state_size; | |||
with(ops.name_scope($"{this.GetType().Name}ZeroState", values: new { batch_size }), delegate | |||
tf_with(ops.name_scope($"{this.GetType().Name}ZeroState", values: new { batch_size }), delegate | |||
{ | |||
output = _zero_state_tensors(state_size, batch_size, dtype); | |||
}); | |||
@@ -66,7 +66,7 @@ namespace Tensorflow.Operations | |||
_element_shape = new List<TensorShape> { }; | |||
} | |||
with(ops.name_scope(name, "", new { handle, size, flow }), scope => | |||
tf_with(ops.name_scope(name, "", new { handle, size, flow }), scope => | |||
{ | |||
if(handle != null) | |||
{ | |||
@@ -43,7 +43,7 @@ namespace Tensorflow | |||
public static Tensor zeros(TensorShape shape, TF_DataType dtype = TF_DataType.TF_FLOAT, string name = null) | |||
{ | |||
dtype = dtype.as_base_dtype(); | |||
return with(ops.name_scope(name, "zeros", shape), scope => | |||
return tf_with(ops.name_scope(name, "zeros", shape), scope => | |||
{ | |||
name = scope; | |||
switch (dtype) | |||
@@ -67,7 +67,7 @@ namespace Tensorflow | |||
public static Tensor zeros(Tensor shape, TF_DataType dtype = TF_DataType.TF_FLOAT, string name = null) | |||
{ | |||
dtype = dtype.as_base_dtype(); | |||
return with(ops.name_scope(name, "zeros", shape), scope => | |||
return tf_with(ops.name_scope(name, "zeros", shape), scope => | |||
{ | |||
name = scope; | |||
switch (dtype) | |||
@@ -140,7 +140,7 @@ namespace Tensorflow | |||
{ | |||
var must_pack = false; | |||
var converted_elems = new List<object>(); | |||
return with(ops.name_scope(name), scope => | |||
return tf_with(ops.name_scope(name), scope => | |||
{ | |||
foreach (var (i, elem) in enumerate(list_or_tuple)) | |||
{ | |||
@@ -189,7 +189,7 @@ namespace Tensorflow | |||
public static Tensor rank_internal(Tensor input, string name = null, bool optimize = true) | |||
{ | |||
return with(ops.name_scope(name, "Rank", new List<Tensor> { input }), scope => | |||
return tf_with(ops.name_scope(name, "Rank", new List<Tensor> { input }), scope => | |||
{ | |||
name = scope; | |||
var input_tensor = ops.convert_to_tensor(input); | |||
@@ -217,7 +217,7 @@ namespace Tensorflow | |||
private static Tensor ones_like_impl<T>(T tensor, TF_DataType dtype, string name, bool optimize = true) | |||
{ | |||
return with(ops.name_scope(name, "ones_like", new { tensor }), scope => | |||
return tf_with(ops.name_scope(name, "ones_like", new { tensor }), scope => | |||
{ | |||
name = scope; | |||
var tensor1 = ops.convert_to_tensor(tensor, name: "tensor"); | |||
@@ -233,7 +233,7 @@ namespace Tensorflow | |||
public static Tensor ones(Tensor shape, TF_DataType dtype = TF_DataType.TF_FLOAT, string name = null) | |||
{ | |||
dtype = dtype.as_base_dtype(); | |||
return with(ops.name_scope(name, "ones", new { shape }), scope => | |||
return tf_with(ops.name_scope(name, "ones", new { shape }), scope => | |||
{ | |||
name = scope; | |||
var output = gen_array_ops.fill(shape, constant_op.constant(1.0f, dtype: dtype), name: name); | |||
@@ -244,7 +244,7 @@ namespace Tensorflow | |||
public static Tensor ones(Tensor[] shape, TF_DataType dtype = TF_DataType.TF_FLOAT, string name = null) | |||
{ | |||
dtype = dtype.as_base_dtype(); | |||
return with(ops.name_scope(name, "ones", new { shape }), scope => | |||
return tf_with(ops.name_scope(name, "ones", new { shape }), scope => | |||
{ | |||
name = scope; | |||
var output = _constant_if_small(1, shape[0]); | |||
@@ -257,7 +257,7 @@ namespace Tensorflow | |||
public static Tensor ones(int[] dims, TF_DataType dtype = TF_DataType.TF_FLOAT, string name = null) | |||
{ | |||
dtype = dtype.as_base_dtype(); | |||
return with(ops.name_scope(name, "ones", new { dims }), scope => | |||
return tf_with(ops.name_scope(name, "ones", new { dims }), scope => | |||
{ | |||
name = scope; | |||
var shape = ops.convert_to_tensor(dims, dtype: TF_DataType.TF_INT32); | |||
@@ -273,7 +273,7 @@ namespace Tensorflow | |||
int axis = -1, | |||
string name = null) | |||
{ | |||
return with(ops.name_scope(name, "one_hot", new { indices, depth, dtype }), scope => | |||
return tf_with(ops.name_scope(name, "one_hot", new { indices, depth, dtype }), scope => | |||
{ | |||
name = scope; | |||
var on_exists = false; | |||
@@ -341,7 +341,7 @@ namespace Tensorflow | |||
private static Tensor shape_internal(Tensor input, string name = null, bool optimize = true, TF_DataType out_type = TF_DataType.TF_INT32) | |||
{ | |||
return with(ops.name_scope(name, "Shape", new { input }), scope => | |||
return tf_with(ops.name_scope(name, "Shape", new { input }), scope => | |||
{ | |||
name = scope; | |||
@@ -362,7 +362,7 @@ namespace Tensorflow | |||
private static Tensor size_internal(Tensor input, string name = null, bool optimize = true, TF_DataType out_type = TF_DataType.TF_INT32) | |||
{ | |||
return with(ops.name_scope(name, "Size", new { input }), scope => | |||
return tf_with(ops.name_scope(name, "Size", new { input }), scope => | |||
{ | |||
name = scope; | |||
@@ -382,7 +382,7 @@ namespace Tensorflow | |||
public static Tensor zeros_like(Tensor tensor, TF_DataType dtype = TF_DataType.DtInvalid, string name = null, bool optimize = true) | |||
{ | |||
return with(ops.name_scope(name, "zeros_like", new Tensor[] { tensor }), scope => | |||
return tf_with(ops.name_scope(name, "zeros_like", new Tensor[] { tensor }), scope => | |||
{ | |||
name = scope; | |||
tensor = ops.convert_to_tensor(tensor, name: "tensor"); | |||
@@ -516,7 +516,7 @@ namespace Tensorflow | |||
{ | |||
if(values.Length == 1) // Degenerate case of one tensor. | |||
{ | |||
return with(ops.name_scope(name), scope => { | |||
return tf_with(ops.name_scope(name), scope => { | |||
var t = ops.convert_to_tensor(axis, name: "concat_dim", dtype: TF_DataType.TF_INT32); | |||
return identity(values[0], name: scope); | |||
}); | |||
@@ -535,7 +535,7 @@ namespace Tensorflow | |||
public static Tensor transpose<T1, T2>(T1 a, T2 perm, string name = "transpose", bool conjugate = false) | |||
{ | |||
return with(ops.name_scope(name, "transpose", new { a }), scope => | |||
return tf_with(ops.name_scope(name, "transpose", new { a }), scope => | |||
{ | |||
return gen_array_ops.transpose(a, perm, name: scope); | |||
}); | |||
@@ -31,7 +31,7 @@ namespace Tensorflow | |||
if (message == null) | |||
message = ""; | |||
return with(ops.name_scope(name, "assert_equal", new { t1, t2, data }), delegate | |||
return tf_with(ops.name_scope(name, "assert_equal", new { t1, t2, data }), delegate | |||
{ | |||
var x = ops.convert_to_tensor(t1, name: "x"); | |||
var y = ops.convert_to_tensor(t2, name: "y"); | |||
@@ -62,7 +62,7 @@ namespace Tensorflow | |||
if (message == null) | |||
message = ""; | |||
return with(ops.name_scope(name, "assert_positive", new { x, data }), delegate | |||
return tf_with(ops.name_scope(name, "assert_positive", new { x, data }), delegate | |||
{ | |||
x = ops.convert_to_tensor(x, name: "x"); | |||
if (data == null) | |||
@@ -86,7 +86,7 @@ namespace Tensorflow | |||
if (message == null) | |||
message = ""; | |||
return with(ops.name_scope(name, "assert_less", new { x, y, data }), delegate | |||
return tf_with(ops.name_scope(name, "assert_less", new { x, y, data }), delegate | |||
{ | |||
x = ops.convert_to_tensor(x, name: "x"); | |||
y = ops.convert_to_tensor(y, name: "y"); | |||
@@ -34,7 +34,7 @@ namespace Tensorflow | |||
int expected_rank_diff = 0, | |||
string name = null) | |||
{ | |||
return with(ops.name_scope(name, default_name: "remove_squeezable_dimensions", (labels, predictions)), delegate | |||
return tf_with(ops.name_scope(name, default_name: "remove_squeezable_dimensions", (labels, predictions)), delegate | |||
{ | |||
predictions = ops.convert_to_tensor(predictions); | |||
labels = ops.convert_to_tensor(labels); | |||
@@ -28,7 +28,7 @@ namespace Tensorflow | |||
{ | |||
public static Operation Assert(Tensor condition, object[] data, int? summarize = null, string name = null) | |||
{ | |||
return with(ops.name_scope(name, "Assert", new { condition, data }), scope => | |||
return tf_with(ops.name_scope(name, "Assert", new { condition, data }), scope => | |||
{ | |||
name = scope; | |||
var xs = ops.convert_n_to_tensor(data); | |||
@@ -53,7 +53,7 @@ namespace Tensorflow | |||
public static Operation group<T>(T[] inputs, string name = null) where T : ITensorOrOperation | |||
{ | |||
return with(ops.name_scope(name, "group_deps", inputs), scope => | |||
return tf_with(ops.name_scope(name, "group_deps", inputs), scope => | |||
{ | |||
name = scope; | |||
@@ -91,7 +91,7 @@ namespace Tensorflow | |||
private static Operation _GroupControlDeps(string dev, Operation[] deps, string name = null) | |||
{ | |||
return with(ops.control_dependencies(deps), ctl => | |||
return tf_with(ops.control_dependencies(deps), ctl => | |||
{ | |||
if (dev == null) | |||
{ | |||
@@ -135,7 +135,7 @@ namespace Tensorflow | |||
public static Tensor[] tuple(Tensor[] tensors, string name = null, Operation[] control_inputs = null) | |||
{ | |||
return with(ops.name_scope(name, "tuple", tensors), scope => | |||
return tf_with(ops.name_scope(name, "tuple", tensors), scope => | |||
{ | |||
name = scope; | |||
var gating_ops = tensors.Where(x => x != null).Select(x => x.op).ToList(); | |||
@@ -189,13 +189,13 @@ namespace Tensorflow | |||
values.AddRange(dependencies); | |||
values.Add(output_tensor); | |||
return with(ops.name_scope(name, "control_dependency", values), scope => | |||
return tf_with(ops.name_scope(name, "control_dependency", values), scope => | |||
{ | |||
name = scope; | |||
// TODO: missing original code | |||
//with ops.colocate_with(output_tensor): | |||
{ | |||
return with(ops.control_dependencies(dependencies), ctl => | |||
return tf_with(ops.control_dependencies(dependencies), ctl => | |||
{ | |||
output_tensor = ops.convert_to_tensor_or_composite(output_tensor); | |||
return _Identity(output_tensor, name: name); | |||
@@ -306,7 +306,7 @@ namespace Tensorflow | |||
bool strict = false, | |||
string name = null) | |||
{ | |||
return with(ops.name_scope(name, "cond", new { pred }), delegate | |||
return tf_with(ops.name_scope(name, "cond", new { pred }), delegate | |||
{ | |||
// TODO: here a chunk of original code is missing | |||
/* | |||
@@ -398,7 +398,7 @@ namespace Tensorflow | |||
bool strict = false, | |||
string name = null) | |||
{ | |||
return with(ops.name_scope(name, "cond", new { pred }), delegate | |||
return tf_with(ops.name_scope(name, "cond", new { pred }), delegate | |||
{ | |||
// Add the Switch to the graph. | |||
var switch_result = @switch(pred, pred); | |||
@@ -467,7 +467,7 @@ namespace Tensorflow | |||
{ | |||
if (inputs.Any(x => x == null)) | |||
throw new ValueError($"At least one of the merge inputs is null: {inputs}"); | |||
return with(ops.name_scope(name, "Merge", inputs), scope => | |||
return tf_with(ops.name_scope(name, "Merge", inputs), scope => | |||
{ | |||
name = scope; | |||
inputs = inputs.Select(inp => | |||
@@ -489,7 +489,7 @@ namespace Tensorflow | |||
TF_DataType dtype = TF_DataType.DtInvalid, | |||
string name = null) | |||
{ | |||
return with(ops.name_scope(name, "Switch", new { data, pred }), scope => | |||
return tf_with(ops.name_scope(name, "Switch", new { data, pred }), scope => | |||
{ | |||
name = scope; | |||
data = ops.internal_convert_to_tensor_or_indexed_slices(data, | |||
@@ -35,7 +35,7 @@ namespace Tensorflow | |||
string name = null, | |||
string max_norm = null) | |||
{ | |||
return with(ops.name_scope(name, "embedding_lookup", new { @params, ids }), scope => | |||
return tf_with(ops.name_scope(name, "embedding_lookup", new { @params, ids }), scope => | |||
{ | |||
name = scope; | |||
int np = 1; | |||
@@ -58,7 +58,7 @@ namespace Tensorflow | |||
string name = null, | |||
string max_norm = null) | |||
{ | |||
return with(ops.name_scope(name, "embedding_lookup", new { @params, ids }), scope => | |||
return tf_with(ops.name_scope(name, "embedding_lookup", new { @params, ids }), scope => | |||
{ | |||
name = scope; | |||
int np = @params.Length; | |||
@@ -28,7 +28,7 @@ namespace Tensorflow | |||
if (dtype == image.dtype) | |||
return array_ops.identity(image, name: name); | |||
return with(ops.name_scope(name, "convert_image", image), scope => | |||
return tf_with(ops.name_scope(name, "convert_image", image), scope => | |||
{ | |||
name = scope; | |||
@@ -29,7 +29,7 @@ namespace Tensorflow | |||
{ | |||
public static Tensor abs(Tensor x, string name = null) | |||
{ | |||
return with(ops.name_scope(name, "Abs", new { x }), scope => | |||
return tf_with(ops.name_scope(name, "Abs", new { x }), scope => | |||
{ | |||
x = ops.convert_to_tensor(x, name: "x"); | |||
if (x.dtype.is_complex()) | |||
@@ -69,7 +69,7 @@ namespace Tensorflow | |||
if(base_type == x.dtype) | |||
return x; | |||
return with(ops.name_scope(name, "Cast", new { x }), scope => | |||
return tf_with(ops.name_scope(name, "Cast", new { x }), scope => | |||
{ | |||
name = scope; | |||
x = ops.convert_to_tensor(x, name: "x"); | |||
@@ -82,7 +82,7 @@ namespace Tensorflow | |||
public static Tensor cumsum(Tensor x, int axis = 0, bool exclusive = false, bool reverse = false, string name = null) | |||
{ | |||
return with(ops.name_scope(name, "Cumsum", new {x}), scope => | |||
return tf_with(ops.name_scope(name, "Cumsum", new {x}), scope => | |||
{ | |||
name = scope; | |||
x = ops.convert_to_tensor(x, name: "x"); | |||
@@ -110,7 +110,7 @@ namespace Tensorflow | |||
/// <returns>`x / y` returns the quotient of x and y.</returns> | |||
public static Tensor div(Tensor x, Tensor y, string name = null) | |||
{ | |||
return with(ops.name_scope(name, "div", (x, y)), name_scope => | |||
return tf_with(ops.name_scope(name, "div", (x, y)), name_scope => | |||
{ | |||
name = name_scope; | |||
x = ops.convert_to_tensor(x, name: "x"); | |||
@@ -146,7 +146,7 @@ namespace Tensorflow | |||
/// </remarks> | |||
public static Tensor div_no_nan(Tensor x, Tensor y, string name = null) | |||
{ | |||
return with(ops.name_scope(name, "div_no_nan", (x, y)), name_scope => | |||
return tf_with(ops.name_scope(name, "div_no_nan", (x, y)), name_scope => | |||
{ | |||
name = name_scope; | |||
x = ops.convert_to_tensor(x, name: "x"); | |||
@@ -229,7 +229,7 @@ namespace Tensorflow | |||
public static Tensor sign(Tensor x, string name = null) | |||
{ | |||
return with(ops.name_scope(name, "Sign", new {x}), scope => | |||
return tf_with(ops.name_scope(name, "Sign", new {x}), scope => | |||
{ | |||
x = ops.convert_to_tensor(x, name: "x"); | |||
return gen_math_ops.sign(x); | |||
@@ -337,7 +337,7 @@ namespace Tensorflow | |||
/// <returns> The reduced tensor.</returns> | |||
public static Tensor reduce_logsumexp(Tensor input_tensor, int[] axis = null, bool keepdims = false, string name = null) | |||
{ | |||
return with(ops.name_scope(name, "ReduceLogSumExp", new { input_tensor }), scope => | |||
return tf_with(ops.name_scope(name, "ReduceLogSumExp", new { input_tensor }), scope => | |||
{ | |||
var raw_max = reduce_max(input_tensor, axis, true); | |||
var my_max = array_ops.stop_gradient(array_ops.where(gen_math_ops.is_finite(raw_max), raw_max, array_ops.zeros_like(raw_max))); | |||
@@ -497,7 +497,7 @@ namespace Tensorflow | |||
if (delta == null) | |||
delta = 1; | |||
return with(ops.name_scope(name, "Range", new { start, limit, delta }), scope => | |||
return tf_with(ops.name_scope(name, "Range", new { start, limit, delta }), scope => | |||
{ | |||
name = scope; | |||
var start1 = ops.convert_to_tensor(start, name: "start"); | |||
@@ -510,7 +510,7 @@ namespace Tensorflow | |||
public static Tensor floordiv(Tensor x, Tensor y, string name = null) | |||
{ | |||
return with(ops.name_scope(name, "floordiv", new { x, y }), scope => | |||
return tf_with(ops.name_scope(name, "floordiv", new { x, y }), scope => | |||
{ | |||
return gen_math_ops.floor_div(x, y, scope); | |||
}); | |||
@@ -527,7 +527,7 @@ namespace Tensorflow | |||
{ | |||
Tensor result = null; | |||
with(ops.name_scope(name, "MatMul", new Tensor[] { a, b }), scope => | |||
tf_with(ops.name_scope(name, "MatMul", new Tensor[] { a, b }), scope => | |||
{ | |||
name = scope; | |||
@@ -551,7 +551,7 @@ namespace Tensorflow | |||
{ | |||
Tensor result = null; | |||
with(ops.name_scope(name, "MatMul", new Tensor[] { x, y }), scope => | |||
tf_with(ops.name_scope(name, "MatMul", new Tensor[] { x, y }), scope => | |||
{ | |||
name = scope; | |||
@@ -576,7 +576,7 @@ namespace Tensorflow | |||
if (dt.is_floating() || dt.is_integer()) | |||
return x; | |||
return with(ops.name_scope(name, "Conj", new List<Tensor> { x }), scope => | |||
return tf_with(ops.name_scope(name, "Conj", new List<Tensor> { x }), scope => | |||
{ | |||
return x; | |||
@@ -591,7 +591,7 @@ namespace Tensorflow | |||
public static Tensor _truediv_python3(Tensor x, Tensor y, string name = null) | |||
{ | |||
return with(ops.name_scope(name, "truediv", new { x, y }), scope => | |||
return tf_with(ops.name_scope(name, "truediv", new { x, y }), scope => | |||
{ | |||
name = scope; | |||
var x_dtype = x.dtype.as_base_dtype(); | |||
@@ -34,7 +34,7 @@ namespace Tensorflow | |||
float epsilon = 1e-12f, | |||
string name = null) | |||
{ | |||
return with(ops.name_scope(name, "l2_normalize", new { x }), scope => | |||
return tf_with(ops.name_scope(name, "l2_normalize", new { x }), scope => | |||
{ | |||
x = ops.convert_to_tensor(x, name: "x"); | |||
var sq = math_ops.square(x); | |||
@@ -57,7 +57,7 @@ namespace Tensorflow | |||
string name = null, | |||
bool keep_dims = false) | |||
{ | |||
return with(ops.name_scope(name, "moments", new { x, axes }), scope => | |||
return tf_with(ops.name_scope(name, "moments", new { x, axes }), scope => | |||
{ | |||
// The dynamic range of fp16 is too limited to support the collection of | |||
// sufficient statistics. As a workaround we simply perform the operations | |||
@@ -123,7 +123,7 @@ namespace Tensorflow | |||
/// <returns>number of nonzero values with type dtype</returns> | |||
private static Tensor _count_nonzero(Tensor input_tensor, TF_DataType dtype = TF_DataType.TF_INT64) | |||
{ | |||
return with(ops.name_scope("count_nonzero", "count_nonzero", new { input_tensor }), scope => | |||
return tf_with(ops.name_scope("count_nonzero", "count_nonzero", new { input_tensor }), scope => | |||
{ | |||
var zero = array_ops.zeros(new NumSharp.Shape(), dtype: input_tensor.dtype); | |||
var nonzero_count = math_ops.reduce_sum( | |||
@@ -140,7 +140,7 @@ namespace Tensorflow | |||
/// <returns>The fraction of zeros in value, with type float32.</returns> | |||
public static Tensor zero_fraction(Tensor value, string name = null) | |||
{ | |||
return with(ops.name_scope(name, "zero_fraction", new { value }), scope => | |||
return tf_with(ops.name_scope(name, "zero_fraction", new { value }), scope => | |||
{ | |||
value = ops.convert_to_tensor(value, name: "value"); | |||
Tensor size = array_ops.size(value, out_type: dtypes.int64); | |||
@@ -153,7 +153,7 @@ namespace Tensorflow | |||
() => _count_nonzero(value, dtype: dtypes.int64) | |||
); | |||
with(ops.name_scope("counts_to_fraction"), count_scope => | |||
tf_with(ops.name_scope("counts_to_fraction"), count_scope => | |||
{ | |||
var num_zero = math_ops.subtract(math_ops.cast(size, TF_DataType.TF_INT64), num_nonzero); | |||
var num_zero_float32 = math_ops.cast(num_zero, dtype: dtypes.float32); | |||
@@ -50,7 +50,7 @@ namespace Tensorflow | |||
string data_format = null, | |||
string name = null) | |||
{ | |||
return Python.with(ops.name_scope(name, "BiasAdd", new { value, bias }), scope => | |||
return Python.tf_with(ops.name_scope(name, "BiasAdd", new { value, bias }), scope => | |||
{ | |||
name = scope; | |||
value = ops.convert_to_tensor(value, name: "input"); | |||
@@ -70,7 +70,7 @@ namespace Tensorflow | |||
/// <returns></returns> | |||
public static Tensor dropout_v2(Tensor x, Tensor rate, Tensor noise_shape = null, int? seed = null, string name = null) | |||
{ | |||
return with(ops.name_scope(name, "dropout", x), scope => | |||
return tf_with(ops.name_scope(name, "dropout", x), scope => | |||
{ | |||
name = scope; | |||
x = ops.convert_to_tensor(x, name: "x"); | |||
@@ -134,7 +134,7 @@ namespace Tensorflow | |||
/// <returns></returns> | |||
public static Tensor max_pool(Tensor value, int[] ksize, int[] strides, string padding, string data_format = "NHWC", string name = null) | |||
{ | |||
return with(ops.name_scope(name, "MaxPool", value), scope => | |||
return tf_with(ops.name_scope(name, "MaxPool", value), scope => | |||
{ | |||
name = scope; | |||
value = ops.convert_to_tensor(value, name: "input"); | |||
@@ -171,7 +171,7 @@ namespace Tensorflow | |||
Tensor logits = null, string name = null) | |||
{ | |||
// Reshape logits and labels to rank 2. | |||
return with(ops.name_scope(name, default_name: "SparseSoftmaxCrossEntropyWithLogits", (labels, logits)), delegate | |||
return tf_with(ops.name_scope(name, default_name: "SparseSoftmaxCrossEntropyWithLogits", (labels, logits)), delegate | |||
{ | |||
labels = ops.convert_to_tensor(labels); | |||
logits = ops.convert_to_tensor(logits); | |||
@@ -206,7 +206,7 @@ namespace Tensorflow | |||
int axis = -1, | |||
string name = null) | |||
{ | |||
return with(ops.name_scope(name, "softmax_cross_entropy_with_logits", new { logits, labels }), scope => | |||
return tf_with(ops.name_scope(name, "softmax_cross_entropy_with_logits", new { logits, labels }), scope => | |||
{ | |||
name = scope; | |||
var precise_logits = logits; | |||
@@ -37,7 +37,7 @@ namespace Tensorflow | |||
int? seed = null, | |||
string name = null) | |||
{ | |||
return with(ops.name_scope(name, "random_normal", new { shape, mean, stddev }), scope => | |||
return tf_with(ops.name_scope(name, "random_normal", new { shape, mean, stddev }), scope => | |||
{ | |||
var shape_tensor = _ShapeTensor(shape); | |||
var mean_tensor = ops.convert_to_tensor(mean, dtype: dtype, name: "mean"); | |||
@@ -67,7 +67,7 @@ namespace Tensorflow | |||
int? seed = null, | |||
string name = null) | |||
{ | |||
return with(ops.name_scope(name, "random_uniform", new { shape, minval, maxval }), scope => | |||
return tf_with(ops.name_scope(name, "random_uniform", new { shape, minval, maxval }), scope => | |||
{ | |||
name = scope; | |||
var tensorShape = _ShapeTensor(shape); | |||
@@ -85,7 +85,7 @@ namespace Tensorflow | |||
int? seed = null, | |||
string name = null) | |||
{ | |||
return with(ops.name_scope(name, "random_uniform", new { shape, minval, maxval }), scope => | |||
return tf_with(ops.name_scope(name, "random_uniform", new { shape, minval, maxval }), scope => | |||
{ | |||
name = scope; | |||
var minTensor = ops.convert_to_tensor(minval, dtype: dtype, name: "min"); | |||
@@ -110,7 +110,7 @@ namespace Tensorflow | |||
int? seed = null, | |||
string name = null) | |||
{ | |||
return with(ops.name_scope(name, "truncated_normal", new { shape, mean, stddev }), scope => | |||
return tf_with(ops.name_scope(name, "truncated_normal", new { shape, mean, stddev }), scope => | |||
{ | |||
name = scope; | |||
var shape_tensor = _ShapeTensor(shape); | |||
@@ -22,7 +22,7 @@ namespace Tensorflow | |||
{ | |||
public static Tensor broadcast_weights(Tensor weights, Tensor values) | |||
{ | |||
return with(ops.name_scope(null, "broadcast_weights", (weights, values)), scope => | |||
return tf_with(ops.name_scope(null, "broadcast_weights", (weights, values)), scope => | |||
{ | |||
values = ops.convert_to_tensor(values, name: "values"); | |||
weights = ops.convert_to_tensor( | |||
@@ -76,7 +76,7 @@ namespace Tensorflow | |||
} | |||
[DebuggerNonUserCode()] // with "Just My Code" enabled this lets the debugger break at the origin of the exception | |||
public static void with(IPython py, Action<IPython> action) | |||
public static void tf_with(IPython py, Action<IPython> action) | |||
{ | |||
try | |||
{ | |||
@@ -96,7 +96,7 @@ namespace Tensorflow | |||
} | |||
[DebuggerNonUserCode()] // with "Just My Code" enabled this lets the debugger break at the origin of the exception | |||
public static void with<T>(T py, Action<T> action) where T : IPython | |||
public static void tf_with<T>(T py, Action<T> action) where T : IPython | |||
{ | |||
try | |||
{ | |||
@@ -116,7 +116,7 @@ namespace Tensorflow | |||
} | |||
[DebuggerNonUserCode()] // with "Just My Code" enabled this lets the debugger break at the origin of the exception | |||
public static TOut with<TIn, TOut>(TIn py, Func<TIn, TOut> action) where TIn : IPython | |||
public static TOut tf_with<TIn, TOut>(TIn py, Func<TIn, TOut> action) where TIn : IPython | |||
{ | |||
try | |||
{ | |||
@@ -365,11 +365,11 @@ namespace Tensorflow | |||
Dispose(); | |||
} | |||
protected override void DisposeUnManagedState() | |||
protected override void DisposeUnManagedState(IntPtr handle) | |||
{ | |||
using (var status = new Status()) | |||
{ | |||
c_api.TF_DeleteSession(_handle, status); | |||
c_api.TF_DeleteSession(handle, status); | |||
status.Check(true); | |||
} | |||
} | |||
@@ -32,8 +32,8 @@ namespace Tensorflow | |||
_handle = handle; | |||
} | |||
protected override void DisposeUnManagedState() | |||
=> c_api.TF_DeleteSessionOptions(_handle); | |||
protected override void DisposeUnManagedState(IntPtr handle) | |||
=> c_api.TF_DeleteSessionOptions(handle); | |||
public void SetConfig(ConfigProto config) | |||
{ | |||
@@ -65,7 +65,7 @@ namespace Tensorflow | |||
return status._handle; | |||
} | |||
protected override void DisposeUnManagedState() | |||
=> c_api.TF_DeleteStatus(_handle); | |||
protected override void DisposeUnManagedState(IntPtr handle) | |||
=> c_api.TF_DeleteStatus(handle); | |||
} | |||
} |
@@ -55,7 +55,7 @@ namespace Tensorflow.Summaries | |||
/// <returns></returns> | |||
public Tensor merge(Tensor[] inputs, string[] collections = null, string name = null) | |||
{ | |||
return with(ops.name_scope(name, "Merge", inputs), delegate | |||
return tf_with(ops.name_scope(name, "Merge", inputs), delegate | |||
{ | |||
var val = gen_logging_ops.merge_summary(inputs: inputs, name: name); | |||
collect(val, collections?.ToList(), new List<string>()); | |||
@@ -88,7 +88,7 @@ namespace Tensorflow.Summaries | |||
public (string, string) summary_scope(string name, string family = null, string default_name = null, Tensor[] values = null) | |||
{ | |||
string scope_base_name = string.IsNullOrEmpty(family) ? name : $"{family}/{name}"; | |||
return with(ops.name_scope(scope_base_name, default_name: default_name, values), scope => | |||
return tf_with(ops.name_scope(scope_base_name, default_name: default_name, values), scope => | |||
{ | |||
var tag = scope._name_scope; | |||
if (string.IsNullOrEmpty(family)) | |||
@@ -5,7 +5,7 @@ | |||
<AssemblyName>TensorFlow.NET</AssemblyName> | |||
<RootNamespace>Tensorflow</RootNamespace> | |||
<TargetTensorFlow>1.14.0</TargetTensorFlow> | |||
<Version>0.10.7.2</Version> | |||
<Version>0.10.8</Version> | |||
<Authors>Haiping Chen, Meinrad Recheis</Authors> | |||
<Company>SciSharp STACK</Company> | |||
<GeneratePackageOnBuild>true</GeneratePackageOnBuild> | |||
@@ -17,7 +17,7 @@ | |||
<PackageTags>TensorFlow, NumSharp, SciSharp, MachineLearning, TensorFlow.NET, C#</PackageTags> | |||
<Description>Google's TensorFlow full binding in .NET Standard. | |||
Docs: https://tensorflownet.readthedocs.io</Description> | |||
<AssemblyVersion>0.10.7.2</AssemblyVersion> | |||
<AssemblyVersion>0.10.8.0</AssemblyVersion> | |||
<PackageReleaseNotes>Changes since v0.9.0: | |||
1. Added full connected Convolution Neural Network example. | |||
@@ -34,9 +34,10 @@ Docs: https://tensorflownet.readthedocs.io</Description> | |||
12. Add Tensor operator overloads. | |||
13. Fix default graph and operation issue when import model. | |||
14. Fix TF_String endcode and decode. | |||
15. Fix Tensor memory leak.</PackageReleaseNotes> | |||
15. Fix Tensor memory leak. | |||
16. Rename with to tf_with that is only used to build graph purpose.</PackageReleaseNotes> | |||
<LangVersion>7.3</LangVersion> | |||
<FileVersion>0.10.7.2</FileVersion> | |||
<FileVersion>0.10.8.0</FileVersion> | |||
<PackageLicenseFile>LICENSE</PackageLicenseFile> | |||
<PackageRequireLicenseAcceptance>true</PackageRequireLicenseAcceptance> | |||
<SignAssembly>true</SignAssembly> | |||
@@ -123,7 +123,7 @@ namespace Tensorflow | |||
dtype = tr.dtype.as_base_dtype(); | |||
var namescope = ops.name_scope(null, name, new { x, y }); | |||
return with(namescope, scope => | |||
return tf_with(namescope, scope => | |||
{ | |||
Tensor result = null; | |||
var x1 = ops.convert_to_tensor(x, dtype: dtype, name: "x"); | |||
@@ -300,7 +300,7 @@ namespace Tensorflow | |||
index += 1; | |||
} | |||
return with(ops.name_scope(null, "strided_slice", new { begin, end, strides }), scope => | |||
return tf_with(ops.name_scope(null, "strided_slice", new { begin, end, strides }), scope => | |||
{ | |||
string name = scope; | |||
if (begin != null) | |||
@@ -349,7 +349,7 @@ namespace Tensorflow | |||
index += 1; | |||
} | |||
return with(ops.name_scope(null, "strided_slice", new { begin, end, strides }), scope => | |||
return tf_with(ops.name_scope(null, "strided_slice", new { begin, end, strides }), scope => | |||
{ | |||
string name = scope; | |||
if (begin != null) | |||
@@ -392,8 +392,13 @@ namespace Tensorflow | |||
return $"tf.Tensor '{name}' shape=({string.Join(",", shape)}) dtype={dtype}"; | |||
} | |||
protected override void DisposeUnManagedState() | |||
=> c_api.TF_DeleteTensor(_handle); | |||
protected override void DisposeUnManagedState(IntPtr handle) | |||
{ | |||
if(handle != IntPtr.Zero) | |||
{ | |||
c_api.TF_DeleteTensor(handle); | |||
} | |||
} | |||
public bool IsDisposed | |||
{ | |||
@@ -81,7 +81,7 @@ namespace Tensorflow.Train | |||
var m = get_slot(var, "m"); | |||
var m_scaled_g_values = grad * (1 - beta1_t); | |||
var m_t = state_ops.assign(m, m * beta1_t, use_locking: _use_locking); | |||
with(ops.control_dependencies(new[] { m_t }), delegate | |||
tf_with(ops.control_dependencies(new[] { m_t }), delegate | |||
{ | |||
m_t = scatter_add(m, indices, m_scaled_g_values); | |||
}); | |||
@@ -89,7 +89,7 @@ namespace Tensorflow.Train | |||
var v = get_slot(var, "v"); | |||
var v_scaled_g_values = (grad * grad) * (1 - beta2_t); | |||
var v_t = state_ops.assign(v, v * beta2_t, use_locking: _use_locking); | |||
with(ops.control_dependencies(new[] { v_t }), delegate | |||
tf_with(ops.control_dependencies(new[] { v_t }), delegate | |||
{ | |||
v_t = scatter_add(v, indices, v_scaled_g_values); | |||
}); | |||
@@ -117,7 +117,7 @@ namespace Tensorflow.Train | |||
var operations = new List<ITensorOrOperation>(); | |||
operations.AddRange(update_ops); | |||
with(ops.control_dependencies(update_ops), delegate | |||
tf_with(ops.control_dependencies(update_ops), delegate | |||
{ | |||
var (beta1_power, beta2_power) = _get_beta_accumulators(); | |||
ops.colocate_with(beta1_power); | |||
@@ -151,7 +151,7 @@ namespace Tensorflow | |||
_create_slots(var_list); | |||
var update_ops = new List<Operation>(); | |||
return with(ops.name_scope(name, Name), scope => | |||
return tf_with(ops.name_scope(name, Name), scope => | |||
{ | |||
name = scope; | |||
_prepare(); | |||
@@ -162,7 +162,7 @@ namespace Tensorflow | |||
continue; | |||
var scope_name = var.op.name; | |||
with(ops.name_scope("update_" + scope_name), scope2 => | |||
tf_with(ops.name_scope("update_" + scope_name), scope2 => | |||
{ | |||
var op = processor.update_op(this, grad); | |||
update_ops.Add(op); | |||
@@ -176,7 +176,7 @@ namespace Tensorflow | |||
} | |||
else | |||
{ | |||
with(ops.control_dependencies(new object[] {_finish(update_ops.ToArray(), "update")}), dep => | |||
tf_with(ops.control_dependencies(new object[] {_finish(update_ops.ToArray(), "update")}), dep => | |||
{ | |||
ops.colocate_with(global_step); | |||
// TODO: port this if branch once ResourceVariable has been ported! | |||
@@ -102,7 +102,7 @@ namespace Tensorflow | |||
Tensor save_tensor = null; | |||
Operation restore_op = null; | |||
return with(ops.name_scope(name, "save", saveables.Select(x => x.op).ToArray()), scope => | |||
return tf_with(ops.name_scope(name, "save", saveables.Select(x => x.op).ToArray()), scope => | |||
{ | |||
name = scope; | |||
@@ -57,7 +57,7 @@ namespace Tensorflow.Train | |||
{ | |||
var validate_shape = shape.is_fully_defined(); | |||
var prefix = primary.op.name; | |||
return with(new variable_scope(string.Empty, prefix + "/" + name), delegate | |||
return tf_with(new variable_scope(string.Empty, prefix + "/" + name), delegate | |||
{ | |||
return _create_slot_var(primary, initializer, "", validate_shape, shape, dtype); | |||
}); | |||
@@ -32,7 +32,7 @@ namespace Tensorflow | |||
private static Tensor op_helper<T>(string default_name, RefVariable x, T y) | |||
{ | |||
var tensor1 = x.value(); | |||
return with(ops.name_scope(null, default_name, new { tensor1, y }), scope => { | |||
return tf_with(ops.name_scope(null, default_name, new { tensor1, y }), scope => { | |||
var tensor2 = ops.convert_to_tensor(y, tensor1.dtype.as_base_dtype(), "y"); | |||
return gen_math_ops.add(tensor1, tensor2, scope); | |||
}); | |||
@@ -134,7 +134,7 @@ namespace Tensorflow | |||
ops.init_scope(); | |||
var values = init_from_fn ? new object[0] : new object[] { initial_value }; | |||
with(ops.name_scope(name, "Variable", values), scope => | |||
tf_with(ops.name_scope(name, "Variable", values), scope => | |||
{ | |||
name = scope; | |||
if (init_from_fn) | |||
@@ -148,7 +148,7 @@ namespace Tensorflow | |||
List = new AttrValue.Types.ListValue() | |||
}; | |||
attr.List.S.Add(ByteString.CopyFromUtf8($"loc:{true_name}")); | |||
with(ops.name_scope("Initializer"), scope2 => | |||
tf_with(ops.name_scope("Initializer"), scope2 => | |||
{ | |||
_initial_value = (initial_value as Func<Tensor>)(); | |||
_initial_value = ops.convert_to_tensor(_initial_value, name: "initial_value", dtype: dtype); | |||
@@ -56,7 +56,7 @@ namespace Tensorflow | |||
VariableAggregation aggregation= VariableAggregation.None) | |||
{ | |||
string full_name = !string.IsNullOrEmpty(this.name) ? this.name + "/" + name : name; | |||
return with(ops.name_scope(null), scope => | |||
return tf_with(ops.name_scope(null), scope => | |||
{ | |||
if (dtype == TF_DataType.DtInvalid) | |||
dtype = _dtype; | |||
@@ -295,7 +295,7 @@ namespace Tensorflow | |||
// inner_device_stack = default_graph._device_function_stack | |||
// var outer_context = default_graph.as_default; | |||
with(ops.control_dependencies(null), delegate | |||
tf_with(ops.control_dependencies(null), delegate | |||
{ | |||
var outer_graph = get_default_graph(); | |||
// outer_device_stack = None | |||
@@ -77,7 +77,7 @@ let run()= | |||
let init = tf.global_variables_initializer() | |||
Tensorflow.Python.``with``(tf.Session(), fun (sess:Session) -> | |||
Tensorflow.Python.``tf_with``(tf.Session(), fun (sess:Session) -> | |||
sess.run(init) |> ignore | |||
// Loop over epochs | |||
for epoch in [0..training_epochs] do | |||
@@ -52,10 +52,10 @@ namespace TensorFlowNET.Examples | |||
{ | |||
PrepareData(); | |||
var graph = ImportGraph(); | |||
with(tf.Session(graph), sess => | |||
using (var sess = tf.Session(graph)) | |||
{ | |||
Train(sess); | |||
}); | |||
} | |||
return accuray_test > 0.70; | |||
} | |||
@@ -71,7 +71,7 @@ namespace TensorFlowNET.Examples | |||
var init = tf.global_variables_initializer(); | |||
// Start training | |||
return with(tf.Session(), sess => | |||
using (var sess = tf.Session()) | |||
{ | |||
// Run the initializer | |||
sess.run(init); | |||
@@ -114,7 +114,7 @@ namespace TensorFlowNET.Examples | |||
Console.WriteLine($"Absolute mean square loss difference: {diff}"); | |||
return diff < 0.01; | |||
}); | |||
} | |||
} | |||
public void PrepareData() | |||
@@ -73,7 +73,7 @@ namespace TensorFlowNET.Examples | |||
var sw = new Stopwatch(); | |||
return with(tf.Session(), sess => | |||
using (var sess = tf.Session()) | |||
{ | |||
// Run the initializer | |||
sess.run(init); | |||
@@ -119,7 +119,7 @@ namespace TensorFlowNET.Examples | |||
print($"Accuracy: {acc.ToString("F4")}"); | |||
return acc > 0.9; | |||
}); | |||
} | |||
} | |||
public void PrepareData() | |||
@@ -48,14 +48,14 @@ namespace TensorFlowNET.Examples | |||
float y_max = X.amax(0).Data<float>()[1] + 0.5f; | |||
var (xx, yy) = np.meshgrid(np.linspace(x_min, x_max, 30), np.linspace(y_min, y_max, 30)); | |||
with(tf.Session(), sess => | |||
using (var sess = tf.Session()) | |||
{ | |||
//var samples = np.vstack<float>(xx.ravel(), yy.ravel()); | |||
//samples = np.transpose(samples); | |||
var array = np.Load<double[,]>(Path.Join("nb", "nb_example.npy")); | |||
var samples = np.array(array).astype(np.float32); | |||
var Z = sess.run(predict(samples)); | |||
}); | |||
} | |||
return true; | |||
} | |||
@@ -54,7 +54,7 @@ namespace TensorFlowNET.Examples | |||
float accuracy = 0f; | |||
// Initialize the variables (i.e. assign their default value) | |||
var init = tf.global_variables_initializer(); | |||
with(tf.Session(), sess => | |||
using (var sess = tf.Session()) | |||
{ | |||
// Run the initializer | |||
sess.run(init); | |||
@@ -77,7 +77,7 @@ namespace TensorFlowNET.Examples | |||
} | |||
print($"Accuracy: {accuracy}"); | |||
}); | |||
} | |||
return accuracy > 0.8; | |||
} | |||
@@ -90,7 +90,7 @@ namespace TensorFlowNET.Examples | |||
var init = tf.global_variables_initializer(); | |||
float loss_value = 0; | |||
// Start tf session | |||
with(tf.Session(graph), sess => | |||
using (var sess = tf.Session(graph)) | |||
{ | |||
sess.run(init); | |||
var step = 0; | |||
@@ -110,7 +110,7 @@ namespace TensorFlowNET.Examples | |||
Console.WriteLine($"Step {step} loss: {loss_value}"); | |||
} | |||
Console.WriteLine($"Final loss: {loss_value}"); | |||
}); | |||
} | |||
return loss_value; | |||
} | |||
@@ -128,7 +128,7 @@ namespace TensorFlowNET.Examples | |||
float loss_value = 0; | |||
// Start tf session | |||
with(tf.Session(graph), sess => | |||
using (var sess = tf.Session(graph)) | |||
{ | |||
sess.run(init); | |||
var step = 0; | |||
@@ -143,7 +143,7 @@ namespace TensorFlowNET.Examples | |||
Console.WriteLine($"Step {step} loss: {loss_value}"); | |||
} | |||
Console.WriteLine($"Final loss: {loss_value}"); | |||
}); | |||
} | |||
return loss_value; | |||
} | |||
@@ -134,7 +134,7 @@ namespace TensorFlowNET.Examples | |||
3, 3, 2)); | |||
var batchMul = tf.batch_matmul(firstTensor, secondTensor); | |||
var checkTensor = np.array<float>(0, 6, 0, 15, 0, 24, 3, 1, 6, 4, 9, 7, 6, 0, 15, 0, 24, 0); | |||
return with(tf.Session(), sess => | |||
using (var sess = tf.Session()) | |||
{ | |||
var result = sess.run(batchMul); | |||
Console.WriteLine(result.ToString()); | |||
@@ -152,7 +152,7 @@ namespace TensorFlowNET.Examples | |||
// [24, 0]]]) | |||
return np.reshape(result, 18) | |||
.array_equal(checkTensor); | |||
}); | |||
} | |||
} | |||
public void PrepareData() | |||
@@ -25,13 +25,13 @@ namespace TensorFlowNET.Examples | |||
var hello = tf.constant(str); | |||
// Start tf session | |||
return with(tf.Session(), sess => | |||
using (var sess = tf.Session()) | |||
{ | |||
// Run the op | |||
var result = sess.run(hello); | |||
Console.WriteLine(result.ToString()); | |||
return result.ToString().Equals(str); | |||
}); | |||
} | |||
} | |||
public void PrepareData() | |||
@@ -79,11 +79,11 @@ namespace TensorFlowNET.Examples | |||
PrepareData(); | |||
BuildGraph(); | |||
with(tf.Session(), sess => | |||
using (var sess = tf.Session()) | |||
{ | |||
Train(sess); | |||
Test(sess); | |||
}); | |||
} | |||
return loss_test < 0.05 && accuracy_test > 0.98; | |||
} | |||
@@ -92,7 +92,7 @@ namespace TensorFlowNET.Examples | |||
{ | |||
var graph = new Graph().as_default(); | |||
with(tf.name_scope("Input"), delegate | |||
tf_with(tf.name_scope("Input"), delegate | |||
{ | |||
// Placeholders for inputs (x) and outputs(y) | |||
x = tf.placeholder(tf.float32, shape: (-1, img_h, img_w, n_channels), name: "X"); | |||
@@ -107,25 +107,25 @@ namespace TensorFlowNET.Examples | |||
var fc1 = fc_layer(layer_flat, h1, "FC1", use_relu: true); | |||
var output_logits = fc_layer(fc1, n_classes, "OUT", use_relu: false); | |||
with(tf.variable_scope("Train"), delegate | |||
tf_with(tf.variable_scope("Train"), delegate | |||
{ | |||
with(tf.variable_scope("Loss"), delegate | |||
tf_with(tf.variable_scope("Loss"), delegate | |||
{ | |||
loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels: y, logits: output_logits), name: "loss"); | |||
}); | |||
with(tf.variable_scope("Optimizer"), delegate | |||
tf_with(tf.variable_scope("Optimizer"), delegate | |||
{ | |||
optimizer = tf.train.AdamOptimizer(learning_rate: learning_rate, name: "Adam-op").minimize(loss); | |||
}); | |||
with(tf.variable_scope("Accuracy"), delegate | |||
tf_with(tf.variable_scope("Accuracy"), delegate | |||
{ | |||
var correct_prediction = tf.equal(tf.argmax(output_logits, 1), tf.argmax(y, 1), name: "correct_pred"); | |||
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32), name: "accuracy"); | |||
}); | |||
with(tf.variable_scope("Prediction"), delegate | |||
tf_with(tf.variable_scope("Prediction"), delegate | |||
{ | |||
cls_prediction = tf.argmax(output_logits, axis: 1, name: "predictions"); | |||
}); | |||
@@ -204,7 +204,7 @@ namespace TensorFlowNET.Examples | |||
/// <returns>The output array</returns> | |||
private Tensor conv_layer(Tensor x, int filter_size, int num_filters, int stride, string name) | |||
{ | |||
return with(tf.variable_scope(name), delegate { | |||
return tf_with(tf.variable_scope(name), delegate { | |||
var num_in_channel = x.shape[x.NDims - 1]; | |||
var shape = new[] { filter_size, filter_size, num_in_channel, num_filters }; | |||
@@ -244,7 +244,7 @@ namespace TensorFlowNET.Examples | |||
/// <returns>flattened array</returns> | |||
private Tensor flatten_layer(Tensor layer) | |||
{ | |||
return with(tf.variable_scope("Flatten_layer"), delegate | |||
return tf_with(tf.variable_scope("Flatten_layer"), delegate | |||
{ | |||
var layer_shape = layer.TensorShape; | |||
var num_features = layer_shape[new Slice(1, 4)].size; | |||
@@ -293,7 +293,7 @@ namespace TensorFlowNET.Examples | |||
/// <returns>The output array</returns> | |||
private Tensor fc_layer(Tensor x, int num_units, string name, bool use_relu = true) | |||
{ | |||
return with(tf.variable_scope(name), delegate | |||
return tf_with(tf.variable_scope(name), delegate | |||
{ | |||
var in_dim = x.shape[1]; | |||
@@ -59,11 +59,11 @@ namespace TensorFlowNET.Examples | |||
PrepareData(); | |||
BuildGraph(); | |||
with(tf.Session(), sess => | |||
using (var sess = tf.Session()) | |||
{ | |||
Train(sess); | |||
Test(sess); | |||
}); | |||
}; | |||
return loss_test < 0.09 && accuracy_test > 0.95; | |||
} | |||
@@ -64,11 +64,11 @@ namespace TensorFlowNET.Examples | |||
PrepareData(); | |||
BuildGraph(); | |||
with(tf.Session(), sess => | |||
using (var sess = tf.Session()) | |||
{ | |||
Train(sess); | |||
Test(sess); | |||
}); | |||
} | |||
return loss_test < 0.09 && accuracy_test > 0.95; | |||
} | |||
@@ -32,11 +32,11 @@ namespace TensorFlowNET.Examples | |||
Tensor output = graph.OperationByName("SemanticPredictions"); | |||
with(tf.Session(graph), sess => | |||
using (var sess = tf.Session(graph)) | |||
{ | |||
// Runs inference on a single image. | |||
sess.run(output, new FeedItem(output, "[np.asarray(resized_image)]")); | |||
}); | |||
} | |||
return false; | |||
} | |||
@@ -45,7 +45,7 @@ namespace TensorFlowNET.Examples | |||
var result_labels = new List<string>(); | |||
var sw = new Stopwatch(); | |||
with(tf.Session(graph), sess => | |||
using (var sess = tf.Session(graph)) | |||
{ | |||
foreach (var nd in file_ndarrays) | |||
{ | |||
@@ -58,7 +58,7 @@ namespace TensorFlowNET.Examples | |||
Console.WriteLine($"{labels[idx]} {results[idx]} in {sw.ElapsedMilliseconds}ms", Color.Tan); | |||
result_labels.Add(labels[idx]); | |||
} | |||
}); | |||
} | |||
return result_labels.Contains("military uniform"); | |||
} | |||
@@ -69,19 +69,19 @@ namespace TensorFlowNET.Examples | |||
int input_mean = 117, | |||
int input_std = 1) | |||
{ | |||
return with(tf.Graph().as_default(), graph => | |||
{ | |||
var file_reader = tf.read_file(file_name, "file_reader"); | |||
var decodeJpeg = tf.image.decode_jpeg(file_reader, channels: 3, name: "DecodeJpeg"); | |||
var cast = tf.cast(decodeJpeg, tf.float32); | |||
var dims_expander = tf.expand_dims(cast, 0); | |||
var resize = tf.constant(new int[] { input_height, input_width }); | |||
var bilinear = tf.image.resize_bilinear(dims_expander, resize); | |||
var sub = tf.subtract(bilinear, new float[] { input_mean }); | |||
var normalized = tf.divide(sub, new float[] { input_std }); | |||
return with(tf.Session(graph), sess => sess.run(normalized)); | |||
}); | |||
var graph = tf.Graph().as_default(); | |||
var file_reader = tf.read_file(file_name, "file_reader"); | |||
var decodeJpeg = tf.image.decode_jpeg(file_reader, channels: 3, name: "DecodeJpeg"); | |||
var cast = tf.cast(decodeJpeg, tf.float32); | |||
var dims_expander = tf.expand_dims(cast, 0); | |||
var resize = tf.constant(new int[] { input_height, input_width }); | |||
var bilinear = tf.image.resize_bilinear(dims_expander, resize); | |||
var sub = tf.subtract(bilinear, new float[] { input_mean }); | |||
var normalized = tf.divide(sub, new float[] { input_std }); | |||
using (var sess = tf.Session(graph)) | |||
return sess.run(normalized); | |||
} | |||
public void PrepareData() | |||
@@ -45,9 +45,12 @@ namespace TensorFlowNET.Examples | |||
var input_operation = graph.get_operation_by_name(input_name); | |||
var output_operation = graph.get_operation_by_name(output_name); | |||
var results = with(tf.Session(graph), | |||
sess => sess.run(output_operation.outputs[0], | |||
new FeedItem(input_operation.outputs[0], nd))); | |||
NDArray results; | |||
using (var sess = tf.Session(graph)) | |||
{ | |||
results = sess.run(output_operation.outputs[0], | |||
new FeedItem(input_operation.outputs[0], nd)); | |||
} | |||
results = np.squeeze(results); | |||
@@ -69,19 +72,19 @@ namespace TensorFlowNET.Examples | |||
int input_mean = 0, | |||
int input_std = 255) | |||
{ | |||
return with(tf.Graph().as_default(), graph => | |||
{ | |||
var file_reader = tf.read_file(file_name, "file_reader"); | |||
var image_reader = tf.image.decode_jpeg(file_reader, channels: 3, name: "jpeg_reader"); | |||
var caster = tf.cast(image_reader, tf.float32); | |||
var dims_expander = tf.expand_dims(caster, 0); | |||
var resize = tf.constant(new int[] { input_height, input_width }); | |||
var bilinear = tf.image.resize_bilinear(dims_expander, resize); | |||
var sub = tf.subtract(bilinear, new float[] { input_mean }); | |||
var normalized = tf.divide(sub, new float[] { input_std }); | |||
return with(tf.Session(graph), sess => sess.run(normalized)); | |||
}); | |||
var graph = tf.Graph().as_default(); | |||
var file_reader = tf.read_file(file_name, "file_reader"); | |||
var image_reader = tf.image.decode_jpeg(file_reader, channels: 3, name: "jpeg_reader"); | |||
var caster = tf.cast(image_reader, tf.float32); | |||
var dims_expander = tf.expand_dims(caster, 0); | |||
var resize = tf.constant(new int[] { input_height, input_width }); | |||
var bilinear = tf.image.resize_bilinear(dims_expander, resize); | |||
var sub = tf.subtract(bilinear, new float[] { input_mean }); | |||
var normalized = tf.divide(sub, new float[] { input_std }); | |||
using (var sess = tf.Session(graph)) | |||
return sess.run(normalized); | |||
} | |||
public void PrepareData() | |||
@@ -51,7 +51,8 @@ namespace TensorFlowNET.Examples | |||
var graph = IsImportingGraph ? ImportGraph() : BuildGraph(); | |||
with(tf.Session(graph), sess => Predict(sess)); | |||
using (var sess = tf.Session(graph)) | |||
Predict(sess); | |||
return true; | |||
} | |||
@@ -101,14 +102,15 @@ namespace TensorFlowNET.Examples | |||
private NDArray ReadTensorFromImageFile(string file_name) | |||
{ | |||
return with(tf.Graph().as_default(), graph => | |||
{ | |||
var file_reader = tf.read_file(file_name, "file_reader"); | |||
var decodeJpeg = tf.image.decode_jpeg(file_reader, channels: 3, name: "DecodeJpeg"); | |||
var casted = tf.cast(decodeJpeg, TF_DataType.TF_UINT8); | |||
var dims_expander = tf.expand_dims(casted, 0); | |||
return with(tf.Session(graph), sess => sess.run(dims_expander)); | |||
}); | |||
var graph = tf.Graph().as_default(); | |||
var file_reader = tf.read_file(file_name, "file_reader"); | |||
var decodeJpeg = tf.image.decode_jpeg(file_reader, channels: 3, name: "DecodeJpeg"); | |||
var casted = tf.cast(decodeJpeg, TF_DataType.TF_UINT8); | |||
var dims_expander = tf.expand_dims(casted, 0); | |||
using (var sess = tf.Session(graph)) | |||
return sess.run(dims_expander); | |||
} | |||
private void buildOutputImage(NDArray[] resultArr) | |||
@@ -92,10 +92,10 @@ namespace TensorFlowNET.Examples | |||
var graph = IsImportingGraph ? ImportGraph() : BuildGraph(); | |||
with(tf.Session(graph), sess => | |||
using (var sess = tf.Session(graph)) | |||
{ | |||
Train(sess); | |||
}); | |||
} | |||
return test_accuracy > 0.75f; | |||
} | |||
@@ -141,20 +141,18 @@ namespace TensorFlowNET.Examples | |||
Tensor evaluation_step = null; | |||
Tensor prediction = null; | |||
with(eval_graph.as_default(), graph => | |||
{ | |||
// Add the new layer for exporting. | |||
var (_, _, bottleneck_input, ground_truth_input, final_tensor) = | |||
add_final_retrain_ops(class_count, final_tensor_name, bottleneck_tensor, | |||
wants_quantization, is_training: false); | |||
var graph = eval_graph.as_default(); | |||
// Add the new layer for exporting. | |||
var (_, _, bottleneck_input, ground_truth_input, final_tensor) = | |||
add_final_retrain_ops(class_count, final_tensor_name, bottleneck_tensor, | |||
wants_quantization, is_training: false); | |||
// Now we need to restore the values from the training graph to the eval | |||
// graph. | |||
tf.train.Saver().restore(eval_sess, CHECKPOINT_NAME); | |||
// Now we need to restore the values from the training graph to the eval | |||
// graph. | |||
tf.train.Saver().restore(eval_sess, CHECKPOINT_NAME); | |||
(evaluation_step, prediction) = add_evaluation_step(final_tensor, | |||
ground_truth_input); | |||
}); | |||
(evaluation_step, prediction) = add_evaluation_step(final_tensor, | |||
ground_truth_input); | |||
return (eval_sess, resized_input_tensor, bottleneck_input, ground_truth_input, | |||
evaluation_step, prediction); | |||
@@ -180,7 +178,7 @@ namespace TensorFlowNET.Examples | |||
Tensor bottleneck_tensor, bool quantize_layer, bool is_training) | |||
{ | |||
var (batch_size, bottleneck_tensor_size) = (bottleneck_tensor.TensorShape.dims[0], bottleneck_tensor.TensorShape.dims[1]); | |||
with(tf.name_scope("input"), scope => | |||
tf_with(tf.name_scope("input"), scope => | |||
{ | |||
bottleneck_input = tf.placeholder_with_default( | |||
bottleneck_tensor, | |||
@@ -193,10 +191,10 @@ namespace TensorFlowNET.Examples | |||
// Organizing the following ops so they are easier to see in TensorBoard. | |||
string layer_name = "final_retrain_ops"; | |||
Tensor logits = null; | |||
with(tf.name_scope(layer_name), scope => | |||
tf_with(tf.name_scope(layer_name), scope => | |||
{ | |||
RefVariable layer_weights = null; | |||
with(tf.name_scope("weights"), delegate | |||
tf_with(tf.name_scope("weights"), delegate | |||
{ | |||
var initial_value = tf.truncated_normal(new int[] { bottleneck_tensor_size, class_count }, stddev: 0.001f); | |||
layer_weights = tf.Variable(initial_value, name: "final_weights"); | |||
@@ -204,13 +202,13 @@ namespace TensorFlowNET.Examples | |||
}); | |||
RefVariable layer_biases = null; | |||
with(tf.name_scope("biases"), delegate | |||
tf_with(tf.name_scope("biases"), delegate | |||
{ | |||
layer_biases = tf.Variable(tf.zeros(new TensorShape(class_count)), name: "final_biases"); | |||
variable_summaries(layer_biases); | |||
}); | |||
with(tf.name_scope("Wx_plus_b"), delegate | |||
tf_with(tf.name_scope("Wx_plus_b"), delegate | |||
{ | |||
logits = tf.matmul(bottleneck_input, layer_weights) + layer_biases; | |||
tf.summary.histogram("pre_activations", logits); | |||
@@ -239,7 +237,7 @@ namespace TensorFlowNET.Examples | |||
return (null, null, bottleneck_input, ground_truth_input, final_tensor); | |||
Tensor cross_entropy_mean = null; | |||
with(tf.name_scope("cross_entropy"), delegate | |||
tf_with(tf.name_scope("cross_entropy"), delegate | |||
{ | |||
cross_entropy_mean = tf.losses.sparse_softmax_cross_entropy( | |||
labels: ground_truth_input, logits: logits); | |||
@@ -247,7 +245,7 @@ namespace TensorFlowNET.Examples | |||
tf.summary.scalar("cross_entropy", cross_entropy_mean); | |||
with(tf.name_scope("train"), delegate | |||
tf_with(tf.name_scope("train"), delegate | |||
{ | |||
var optimizer = tf.train.GradientDescentOptimizer(learning_rate); | |||
train_step = optimizer.minimize(cross_entropy_mean); | |||
@@ -259,12 +257,12 @@ namespace TensorFlowNET.Examples | |||
private void variable_summaries(RefVariable var) | |||
{ | |||
with(tf.name_scope("summaries"), delegate | |||
tf_with(tf.name_scope("summaries"), delegate | |||
{ | |||
var mean = tf.reduce_mean(var); | |||
tf.summary.scalar("mean", mean); | |||
Tensor stddev = null; | |||
with(tf.name_scope("stddev"), delegate | |||
tf_with(tf.name_scope("stddev"), delegate | |||
{ | |||
stddev = tf.sqrt(tf.reduce_mean(tf.square(var - mean))); | |||
}); | |||
@@ -279,7 +277,7 @@ namespace TensorFlowNET.Examples | |||
{ | |||
var (height, width) = (299, 299); | |||
return with(tf.Graph().as_default(), graph => | |||
return tf_with(tf.Graph().as_default(), graph => | |||
{ | |||
tf.train.import_meta_graph("graph/InceptionV3.meta"); | |||
Tensor resized_input_tensor = graph.OperationByName("Placeholder"); //tf.placeholder(tf.float32, new TensorShape(-1, height, width, 3)); | |||
@@ -350,15 +348,15 @@ namespace TensorFlowNET.Examples | |||
{ | |||
Tensor evaluation_step = null, correct_prediction = null, prediction = null; | |||
with(tf.name_scope("accuracy"), scope => | |||
tf_with(tf.name_scope("accuracy"), scope => | |||
{ | |||
with(tf.name_scope("correct_prediction"), delegate | |||
tf_with(tf.name_scope("correct_prediction"), delegate | |||
{ | |||
prediction = tf.argmax(result_tensor, 1); | |||
correct_prediction = tf.equal(prediction, ground_truth_tensor); | |||
}); | |||
with(tf.name_scope("accuracy"), delegate | |||
tf_with(tf.name_scope("accuracy"), delegate | |||
{ | |||
evaluation_step = tf.reduce_mean(tf.cast(correct_prediction, tf.float32)); | |||
}); | |||
@@ -596,7 +594,7 @@ namespace TensorFlowNET.Examples | |||
create_module_graph(); | |||
// Add the new layer that we'll be training. | |||
with(graph.as_default(), delegate | |||
tf_with(graph.as_default(), delegate | |||
{ | |||
(train_step, cross_entropy, bottleneck_input, | |||
ground_truth_input, final_tensor) = add_final_retrain_ops( | |||
@@ -745,13 +743,13 @@ namespace TensorFlowNET.Examples | |||
Tensor input = graph.OperationByName("Placeholder"); | |||
Tensor output = graph.OperationByName("final_result"); | |||
with(tf.Session(graph), sess => | |||
using (var sess = tf.Session(graph)) | |||
{ | |||
var result = sess.run(output, new FeedItem(input, fileBytes)); | |||
var prob = np.squeeze(result); | |||
var idx = np.argmax(prob); | |||
print($"Prediction result: [{labels[idx]} {prob[idx][0]}] for {img_path}."); | |||
}); | |||
} | |||
} | |||
private NDArray ReadTensorFromImageFile(string file_name, | |||
@@ -760,19 +758,19 @@ namespace TensorFlowNET.Examples | |||
int input_mean = 0, | |||
int input_std = 255) | |||
{ | |||
return with(tf.Graph().as_default(), graph => | |||
{ | |||
var file_reader = tf.read_file(file_name, "file_reader"); | |||
var image_reader = tf.image.decode_jpeg(file_reader, channels: 3, name: "jpeg_reader"); | |||
var caster = tf.cast(image_reader, tf.float32); | |||
var dims_expander = tf.expand_dims(caster, 0); | |||
var resize = tf.constant(new int[] { input_height, input_width }); | |||
var bilinear = tf.image.resize_bilinear(dims_expander, resize); | |||
var sub = tf.subtract(bilinear, new float[] { input_mean }); | |||
var normalized = tf.divide(sub, new float[] { input_std }); | |||
return with(tf.Session(graph), sess => sess.run(normalized)); | |||
}); | |||
var graph = tf.Graph().as_default(); | |||
var file_reader = tf.read_file(file_name, "file_reader"); | |||
var image_reader = tf.image.decode_jpeg(file_reader, channels: 3, name: "jpeg_reader"); | |||
var caster = tf.cast(image_reader, tf.float32); | |||
var dims_expander = tf.expand_dims(caster, 0); | |||
var resize = tf.constant(new int[] { input_height, input_width }); | |||
var bilinear = tf.image.resize_bilinear(dims_expander, resize); | |||
var sub = tf.subtract(bilinear, new float[] { input_mean }); | |||
var normalized = tf.divide(sub, new float[] { input_std }); | |||
using (var sess = tf.Session(graph)) | |||
return sess.run(normalized); | |||
} | |||
public void Test(Session sess_) | |||
@@ -783,7 +781,7 @@ namespace TensorFlowNET.Examples | |||
var graph = Graph.ImportFromPB(output_graph); | |||
var (jpeg_data_tensor, decoded_image_tensor) = add_jpeg_decoding(); | |||
with(tf.Session(graph), sess => | |||
tf_with(tf.Session(graph), sess => | |||
{ | |||
(test_accuracy, predictions) = run_final_eval(sess, null, class_count, image_lists, | |||
jpeg_data_tensor, decoded_image_tensor, resized_image_tensor, | |||
@@ -64,7 +64,9 @@ namespace TensorFlowNET.Examples | |||
{ | |||
PrepareData(); | |||
var graph = IsImportingGraph ? ImportGraph() : BuildGraph(); | |||
with(tf.Session(graph), sess => Train(sess)); | |||
using (var sess = tf.Session(graph)) | |||
Train(sess); | |||
return max_accuracy > 0.9; | |||
} | |||
@@ -63,7 +63,7 @@ namespace TensorFlowNET.Examples.Text.NER | |||
var init = tf.global_variables_initializer(); | |||
with(tf.Session(), sess => | |||
using (var sess = tf.Session()) | |||
{ | |||
sess.run(init); | |||
@@ -73,7 +73,7 @@ namespace TensorFlowNET.Examples.Text.NER | |||
loss_value = run_epoch(sess, train, dev, epoch); | |||
print($"train loss: {loss_value}"); | |||
} | |||
}); | |||
} | |||
return loss_value < 0.1; | |||
} | |||
@@ -66,7 +66,7 @@ namespace TensorFlowNET.Examples | |||
// Initialize the variables (i.e. assign their default value) | |||
var init = tf.global_variables_initializer(); | |||
with(tf.Session(graph), sess => | |||
using (var sess = tf.Session(graph)) | |||
{ | |||
// Run the initializer | |||
sess.run(init); | |||
@@ -112,7 +112,7 @@ namespace TensorFlowNET.Examples | |||
} | |||
} | |||
} | |||
}); | |||
} | |||
return average_loss < 100; | |||
} | |||
@@ -26,7 +26,7 @@ namespace TensorFlowNET.Examples.Text | |||
Tensor conv3 = null, conv4 = null, conv5 = null, conv6 = null; | |||
Tensor h_pool = null; | |||
with(tf.name_scope("conv-maxpool-1"), delegate | |||
tf_with(tf.name_scope("conv-maxpool-1"), delegate | |||
{ | |||
var conv1 = tf.layers.conv2d(x_expanded, | |||
filters: num_filters, | |||
@@ -40,7 +40,7 @@ namespace TensorFlowNET.Examples.Text | |||
pool1 = tf.transpose(pool1, new[] { 0, 1, 3, 2 }); | |||
}); | |||
with(tf.name_scope("conv-maxpool-2"), delegate | |||
tf_with(tf.name_scope("conv-maxpool-2"), delegate | |||
{ | |||
var conv2 = tf.layers.conv2d(pool1, | |||
filters: num_filters, | |||
@@ -54,7 +54,7 @@ namespace TensorFlowNET.Examples.Text | |||
pool2 = tf.transpose(pool2, new[] { 0, 1, 3, 2 }); | |||
}); | |||
with(tf.name_scope("conv-3"), delegate | |||
tf_with(tf.name_scope("conv-3"), delegate | |||
{ | |||
conv3 = tf.layers.conv2d(pool2, | |||
filters: num_filters, | |||
@@ -64,7 +64,7 @@ namespace TensorFlowNET.Examples.Text | |||
conv3 = tf.transpose(conv3, new[] { 0, 1, 3, 2 }); | |||
}); | |||
with(tf.name_scope("conv-4"), delegate | |||
tf_with(tf.name_scope("conv-4"), delegate | |||
{ | |||
conv4 = tf.layers.conv2d(conv3, | |||
filters: num_filters, | |||
@@ -74,7 +74,7 @@ namespace TensorFlowNET.Examples.Text | |||
conv4 = tf.transpose(conv4, new[] { 0, 1, 3, 2 }); | |||
}); | |||
with(tf.name_scope("conv-5"), delegate | |||
tf_with(tf.name_scope("conv-5"), delegate | |||
{ | |||
conv5 = tf.layers.conv2d(conv4, | |||
filters: num_filters, | |||
@@ -84,7 +84,7 @@ namespace TensorFlowNET.Examples.Text | |||
conv5 = tf.transpose(conv5, new[] { 0, 1, 3, 2 }); | |||
}); | |||
with(tf.name_scope("conv-maxpool-6"), delegate | |||
tf_with(tf.name_scope("conv-maxpool-6"), delegate | |||
{ | |||
conv6 = tf.layers.conv2d(conv5, | |||
filters: num_filters, | |||
@@ -105,7 +105,7 @@ namespace TensorFlowNET.Examples.Text | |||
Tensor logits = null; | |||
Tensor predictions = null; | |||
with(tf.name_scope("fc-1"), delegate | |||
tf_with(tf.name_scope("fc-1"), delegate | |||
{ | |||
fc1_out = tf.layers.dense(h_pool, | |||
1024, | |||
@@ -113,7 +113,7 @@ namespace TensorFlowNET.Examples.Text | |||
kernel_initializer: kernel_initializer); | |||
}); | |||
with(tf.name_scope("fc-2"), delegate | |||
tf_with(tf.name_scope("fc-2"), delegate | |||
{ | |||
fc2_out = tf.layers.dense(fc1_out, | |||
1024, | |||
@@ -121,7 +121,7 @@ namespace TensorFlowNET.Examples.Text | |||
kernel_initializer: kernel_initializer); | |||
}); | |||
with(tf.name_scope("fc-3"), delegate | |||
tf_with(tf.name_scope("fc-3"), delegate | |||
{ | |||
logits = tf.layers.dense(fc2_out, | |||
num_class, | |||
@@ -129,7 +129,7 @@ namespace TensorFlowNET.Examples.Text | |||
predictions = tf.argmax(logits, -1, output_type: tf.int32); | |||
}); | |||
with(tf.name_scope("loss"), delegate | |||
tf_with(tf.name_scope("loss"), delegate | |||
{ | |||
var y_one_hot = tf.one_hot(y, num_class); | |||
var loss = tf.reduce_mean( | |||
@@ -137,7 +137,7 @@ namespace TensorFlowNET.Examples.Text | |||
var optimizer = tf.train.AdamOptimizer(learning_rate).minimize(loss, global_step: global_step); | |||
}); | |||
with(tf.name_scope("accuracy"), delegate | |||
tf_with(tf.name_scope("accuracy"), delegate | |||
{ | |||
var correct_predictions = tf.equal(predictions, y); | |||
var accuracy = tf.reduce_mean(tf.cast(correct_predictions, tf.float32), name: "accuracy"); | |||
@@ -41,7 +41,7 @@ namespace TensorFlowNET.Examples.Text | |||
global_step = tf.Variable(0, trainable: false); | |||
// Embedding Layer | |||
with(tf.name_scope("embedding"), delegate | |||
tf_with(tf.name_scope("embedding"), delegate | |||
{ | |||
var init_embeddings = tf.random_uniform(new int[] { alphabet_size, embedding_size }, -1.0f, 1.0f); | |||
embeddings = tf.get_variable("embeddings", initializer: init_embeddings); | |||
@@ -59,7 +59,7 @@ namespace TensorFlowNET.Examples.Text | |||
Tensor fc2_out = null; | |||
// First Convolution Layer | |||
with(tf.variable_scope("conv-0"), delegate | |||
tf_with(tf.variable_scope("conv-0"), delegate | |||
{ | |||
conv0 = tf.layers.conv2d(x_expanded, | |||
filters: num_filters[0], | |||
@@ -70,25 +70,25 @@ namespace TensorFlowNET.Examples.Text | |||
conv0 = tf.transpose(conv0, new int[] { 0, 1, 3, 2 }); | |||
}); | |||
with(tf.name_scope("conv-block-1"), delegate { | |||
tf_with(tf.name_scope("conv-block-1"), delegate { | |||
conv1 = conv_block(conv0, 1); | |||
}); | |||
with(tf.name_scope("conv-block-2"), delegate { | |||
tf_with(tf.name_scope("conv-block-2"), delegate { | |||
conv2 = conv_block(conv1, 2); | |||
}); | |||
with(tf.name_scope("conv-block-3"), delegate { | |||
tf_with(tf.name_scope("conv-block-3"), delegate { | |||
conv3 = conv_block(conv2, 3); | |||
}); | |||
with(tf.name_scope("conv-block-4"), delegate | |||
tf_with(tf.name_scope("conv-block-4"), delegate | |||
{ | |||
conv4 = conv_block(conv3, 4, max_pool: false); | |||
}); | |||
// ============= k-max Pooling ============= | |||
with(tf.name_scope("k-max-pooling"), delegate | |||
tf_with(tf.name_scope("k-max-pooling"), delegate | |||
{ | |||
var h = tf.transpose(tf.squeeze(conv4, new int[] { -1 }), new int[] { 0, 2, 1 }); | |||
var top_k = tf.nn.top_k(h, k: 8, sorted: false)[0]; | |||
@@ -96,30 +96,30 @@ namespace TensorFlowNET.Examples.Text | |||
}); | |||
// ============= Fully Connected Layers ============= | |||
with(tf.name_scope("fc-1"), scope => | |||
tf_with(tf.name_scope("fc-1"), scope => | |||
{ | |||
fc1_out = tf.layers.dense(h_flat, 2048, activation: tf.nn.relu(), kernel_initializer: fc_initializer); | |||
}); | |||
with(tf.name_scope("fc-2"), scope => | |||
tf_with(tf.name_scope("fc-2"), scope => | |||
{ | |||
fc2_out = tf.layers.dense(fc1_out, 2048, activation: tf.nn.relu(), kernel_initializer: fc_initializer); | |||
}); | |||
with(tf.name_scope("fc-3"), scope => | |||
tf_with(tf.name_scope("fc-3"), scope => | |||
{ | |||
logits = tf.layers.dense(fc2_out, num_class, activation: null, kernel_initializer: fc_initializer); | |||
predictions = tf.argmax(logits, -1, output_type: tf.int32); | |||
}); | |||
// ============= Loss and Accuracy ============= | |||
with(tf.name_scope("loss"), delegate | |||
tf_with(tf.name_scope("loss"), delegate | |||
{ | |||
var y_one_hot = tf.one_hot(y, num_class); | |||
loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits_v2(logits: logits, labels: y_one_hot)); | |||
var update_ops = tf.get_collection(ops.GraphKeys.UPDATE_OPS) as List<object>; | |||
with(tf.control_dependencies(update_ops.Select(x => (Operation)x).ToArray()), delegate | |||
tf_with(tf.control_dependencies(update_ops.Select(x => (Operation)x).ToArray()), delegate | |||
{ | |||
var adam = tf.train.AdamOptimizer(learning_rate); | |||
adam.minimize(loss, global_step: global_step); | |||
@@ -129,13 +129,13 @@ namespace TensorFlowNET.Examples.Text | |||
private Tensor conv_block(Tensor input, int i, bool max_pool = true) | |||
{ | |||
return with(tf.variable_scope($"conv-block-{i}"), delegate | |||
return tf_with(tf.variable_scope($"conv-block-{i}"), delegate | |||
{ | |||
Tensor conv = null; | |||
// Two "conv-batch_norm-relu" layers. | |||
foreach (var j in Enumerable.Range(0, 2)) | |||
{ | |||
with(tf.variable_scope($"conv-{j}"), delegate | |||
tf_with(tf.variable_scope($"conv-{j}"), delegate | |||
{ | |||
// convolution | |||
conv = tf.layers.conv2d( | |||
@@ -36,7 +36,7 @@ namespace TensorFlowNET.Examples.Text | |||
var keep_prob = tf.where(is_training, 0.5f, 1.0f); | |||
Tensor x_emb = null; | |||
with(tf.name_scope("embedding"), scope => | |||
tf_with(tf.name_scope("embedding"), scope => | |||
{ | |||
var init_embeddings = tf.random_uniform(new int[] { vocabulary_size, embedding_size }); | |||
var embeddings = tf.get_variable("embeddings", initializer: init_embeddings); | |||
@@ -68,20 +68,20 @@ namespace TensorFlowNET.Examples.Text | |||
var h_pool = tf.concat(pooled_outputs, 3); | |||
var h_pool_flat = tf.reshape(h_pool, new TensorShape(-1, num_filters * filter_sizes.Rank)); | |||
Tensor h_drop = null; | |||
with(tf.name_scope("dropout"), delegate | |||
tf_with(tf.name_scope("dropout"), delegate | |||
{ | |||
h_drop = tf.nn.dropout(h_pool_flat, keep_prob); | |||
}); | |||
Tensor logits = null; | |||
Tensor predictions = null; | |||
with(tf.name_scope("output"), delegate | |||
tf_with(tf.name_scope("output"), delegate | |||
{ | |||
logits = tf.layers.dense(h_drop, num_class); | |||
predictions = tf.argmax(logits, -1, output_type: tf.int32); | |||
}); | |||
with(tf.name_scope("loss"), delegate | |||
tf_with(tf.name_scope("loss"), delegate | |||
{ | |||
var sscel = tf.nn.sparse_softmax_cross_entropy_with_logits(logits: logits, labels: y); | |||
var loss = tf.reduce_mean(sscel); | |||
@@ -89,7 +89,7 @@ namespace TensorFlowNET.Examples.Text | |||
var optimizer = adam.minimize(loss, global_step: global_step); | |||
}); | |||
with(tf.name_scope("accuracy"), delegate | |||
tf_with(tf.name_scope("accuracy"), delegate | |||
{ | |||
var correct_predictions = tf.equal(predictions, y); | |||
var accuracy = tf.reduce_mean(tf.cast(correct_predictions, TF_DataType.TF_FLOAT), name: "accuracy"); | |||
@@ -12,13 +12,13 @@ namespace TensorFlowNET.UnitTest.Basics | |||
{ | |||
var x = tf.constant(new[,] { { 1, 2 } }); | |||
var neg_x = tf.negative(x); | |||
with(tf.Session(), session => | |||
using (var sess = tf.Session()) | |||
{ | |||
var result = session.run(neg_x); | |||
var result = sess.run(neg_x); | |||
Assert.AreEqual(result[0][0], -1); | |||
Assert.AreEqual(result[0][1], -2); | |||
}); | |||
} | |||
} | |||
} | |||
} |
@@ -94,18 +94,18 @@ namespace TensorFlowNET.UnitTest | |||
{ | |||
// small size | |||
var tensor = tf.zeros(new Shape(3, 2), TF_DataType.TF_INT32, "small"); | |||
with(tf.Session(), sess => | |||
using (var sess = tf.Session()) | |||
{ | |||
var result = sess.run(tensor); | |||
Assert.AreEqual(result.shape[0], 3); | |||
Assert.AreEqual(result.shape[1], 2); | |||
Assert.IsTrue(Enumerable.SequenceEqual(new int[] { 0, 0, 0, 0, 0, 0 }, result.Data<int>())); | |||
}); | |||
} | |||
// big size | |||
tensor = tf.zeros(new Shape(200, 100), TF_DataType.TF_INT32, "big"); | |||
with(tf.Session(), sess => | |||
using (var sess = tf.Session()) | |||
{ | |||
var result = sess.run(tensor); | |||
@@ -116,21 +116,21 @@ namespace TensorFlowNET.UnitTest | |||
Assert.AreEqual(0, data[0]); | |||
Assert.AreEqual(0, data[500]); | |||
Assert.AreEqual(0, data[result.size - 1]); | |||
}); | |||
} | |||
} | |||
[TestMethod] | |||
public void OnesConst() | |||
{ | |||
var ones = tf.ones(new Shape(3, 2), TF_DataType.TF_DOUBLE, "ones"); | |||
with(tf.Session(), sess => | |||
using (var sess = tf.Session()) | |||
{ | |||
var result = sess.run(ones); | |||
Assert.AreEqual(result.shape[0], 3); | |||
Assert.AreEqual(result.shape[1], 2); | |||
Assert.IsTrue(new[] { 1, 1, 1, 1, 1, 1 }.SequenceEqual(result.Data<int>())); | |||
}); | |||
} | |||
} | |||
[TestMethod] | |||
@@ -138,14 +138,14 @@ namespace TensorFlowNET.UnitTest | |||
{ | |||
var ones = tf.ones(new Shape(3, 2), TF_DataType.TF_DOUBLE, "ones"); | |||
var halfes = ones * 0.5; | |||
with(tf.Session(), sess => | |||
using (var sess = tf.Session()) | |||
{ | |||
var result = sess.run(halfes); | |||
Assert.AreEqual(result.shape[0], 3); | |||
Assert.AreEqual(result.shape[1], 2); | |||
Assert.IsTrue(new[] { .5, .5, .5, .5, .5, .5 }.SequenceEqual(result.Data<double>())); | |||
}); | |||
} | |||
} | |||
[TestMethod] | |||
@@ -158,7 +158,7 @@ namespace TensorFlowNET.UnitTest | |||
}); | |||
var tensor = tf.constant(nd); | |||
with(tf.Session(), sess => | |||
using (var sess = tf.Session()) | |||
{ | |||
var result = sess.run(tensor); | |||
var data = result.Data<int>(); | |||
@@ -166,7 +166,7 @@ namespace TensorFlowNET.UnitTest | |||
Assert.AreEqual(result.shape[0], 2); | |||
Assert.AreEqual(result.shape[1], 3); | |||
Assert.IsTrue(Enumerable.SequenceEqual(new int[] { 3, 1, 1, 2, 1, 3 }, data)); | |||
}); | |||
} | |||
} | |||
[TestMethod] | |||
@@ -33,7 +33,8 @@ namespace TensorFlowNET.UnitTest | |||
public void Gradient2x() | |||
{ | |||
var graph = tf.Graph().as_default(); | |||
with(tf.Session(graph), sess => { | |||
using (var sess = tf.Session(graph)) | |||
{ | |||
var x = tf.constant(7.0f); | |||
var y = x * x * tf.constant(0.1f); | |||
@@ -42,14 +43,14 @@ namespace TensorFlowNET.UnitTest | |||
float r = sess.run(grad[0]); | |||
Assert.AreEqual(r, 1.4f); | |||
}); | |||
} | |||
} | |||
[TestMethod] | |||
public void Gradient3x() | |||
{ | |||
var graph = tf.Graph().as_default(); | |||
with(tf.Session(graph), sess => { | |||
tf_with(tf.Session(graph), sess => { | |||
var x = tf.constant(7.0f); | |||
var y = x * x * x * tf.constant(0.1f); | |||
@@ -91,14 +92,14 @@ namespace TensorFlowNET.UnitTest | |||
var g = tf.gradients(y, new Tensor[] { slice, slice }); | |||
with(tf.Session(graph), sess => | |||
using (var sess = tf.Session(graph)) | |||
{ | |||
var r = sess.run(slice); | |||
Assert.IsTrue(Enumerable.SequenceEqual(r.shape, new[] { 2, 1, 2 })); | |||
Assert.IsTrue(Enumerable.SequenceEqual(r[0].GetData<int>(), new[] { 11, 13 })); | |||
Assert.IsTrue(Enumerable.SequenceEqual(r[1].GetData<int>(), new[] { 51, 53 })); | |||
}); | |||
} | |||
} | |||
} | |||
} |
@@ -419,7 +419,7 @@ namespace TensorFlowNET.UnitTest | |||
public void ImportGraphMeta() | |||
{ | |||
var dir = "my-save-dir/"; | |||
with(tf.Session(), sess => | |||
using (var sess = tf.Session()) | |||
{ | |||
var new_saver = tf.train.import_meta_graph(dir + "my-model-10000.meta"); | |||
new_saver.restore(sess, dir + "my-model-10000"); | |||
@@ -428,7 +428,7 @@ namespace TensorFlowNET.UnitTest | |||
var logits = (tf.get_collection("logits") as List<ITensorOrOperation>)[0] as Tensor; | |||
var loss = tf.losses.sparse_softmax_cross_entropy(labels: labels, | |||
logits: logits); | |||
}); | |||
} | |||
} | |||
} | |||
} |
@@ -13,7 +13,7 @@ namespace TensorFlowNET.UnitTest | |||
[TestMethod] | |||
public void NestedNameScope() | |||
{ | |||
with(new ops.NameScope("scope1"), scope1 => | |||
tf_with(new ops.NameScope("scope1"), scope1 => | |||
{ | |||
name = scope1; | |||
Assert.AreEqual("scope1", g._name_stack); | |||
@@ -22,7 +22,7 @@ namespace TensorFlowNET.UnitTest | |||
var const1 = tf.constant(1.0); | |||
Assert.AreEqual("scope1/Const:0", const1.name); | |||
with(new ops.NameScope("scope2"), scope2 => | |||
tf_with(new ops.NameScope("scope2"), scope2 => | |||
{ | |||
name = scope2; | |||
Assert.AreEqual("scope1/scope2", g._name_stack); | |||
@@ -13,12 +13,12 @@ namespace TensorFlowNET.UnitTest | |||
var x = tf.placeholder(tf.int32); | |||
var y = x * 3; | |||
with(tf.Session(), sess => | |||
using (var sess = tf.Session()) | |||
{ | |||
var result = sess.run(y, | |||
new FeedItem(x, 2)); | |||
Assert.AreEqual((int)result, 6); | |||
}); | |||
} | |||
} | |||
} | |||
} |
@@ -164,7 +164,7 @@ namespace TensorFlowNET.UnitTest | |||
// return self._eval_helper(tensors) | |||
// else: | |||
{ | |||
with(tf.Session(), s => | |||
using (var sess = tf.Session()) | |||
{ | |||
var ndarray=tensor.eval(); | |||
if (typeof(T) == typeof(double)) | |||
@@ -181,7 +181,8 @@ namespace TensorFlowNET.UnitTest | |||
{ | |||
result = ndarray; | |||
} | |||
}); | |||
} | |||
return (T)result; | |||
} | |||
} | |||
@@ -82,11 +82,11 @@ namespace TensorFlowNET.UnitTest | |||
var a = constant_op.constant(np.array(3.0).reshape(1, 1)); | |||
var b = constant_op.constant(np.array(2.0).reshape(1, 1)); | |||
var c = math_ops.matmul(a, b, name: "matmul"); | |||
with(tf.Session(), delegate | |||
using (var sess = tf.Session()) | |||
{ | |||
var result = c.eval(); | |||
Assert.AreEqual(6, result.Data<double>()[0]); | |||
}); | |||
} | |||
} | |||
} | |||
} |
@@ -12,7 +12,7 @@ namespace TensorFlowNET.UnitTest | |||
[TestClass] | |||
public class TensorTest : CApiTest | |||
{ | |||
//[TestMethod] | |||
[Ignore("Not for mult-thread")] | |||
public void TensorDeallocationThreadSafety() | |||
{ | |||
var tensors = new Tensor[1000]; | |||
@@ -17,10 +17,10 @@ namespace TensorFlowNET.UnitTest | |||
public void ImportGraph() | |||
{ | |||
with(tf.Session(), sess => | |||
using (var sess = tf.Session()) | |||
{ | |||
var new_saver = tf.train.import_meta_graph("C:/tmp/my-model.meta"); | |||
}); | |||
} | |||
//tf.train.export_meta_graph(filename: "linear_regression.meta.bin"); | |||
// import meta | |||
@@ -42,7 +42,7 @@ namespace TensorFlowNET.UnitTest | |||
public void ImportSavedModel() | |||
{ | |||
with(Session.LoadFromSavedModel("mobilenet"), sess => | |||
tf_with(Session.LoadFromSavedModel("mobilenet"), sess => | |||
{ | |||
}); | |||
@@ -63,14 +63,14 @@ namespace TensorFlowNET.UnitTest | |||
// Add ops to save and restore all the variables. | |||
var saver = tf.train.Saver(); | |||
with(tf.Session(), sess => | |||
using (var sess = tf.Session()) | |||
{ | |||
sess.run(init_op); | |||
// Save the variables to disk. | |||
var save_path = saver.save(sess, "/tmp/model1.ckpt"); | |||
Console.WriteLine($"Model saved in path: {save_path}"); | |||
}); | |||
} | |||
} | |||
public void Save2() | |||
@@ -87,7 +87,7 @@ namespace TensorFlowNET.UnitTest | |||
// Add ops to save and restore all the variables. | |||
var saver = tf.train.Saver(); | |||
with<Session>(tf.Session(), sess => | |||
using (var sess = tf.Session()) | |||
{ | |||
sess.run(init_op); | |||
// o some work with the model. | |||
@@ -97,7 +97,7 @@ namespace TensorFlowNET.UnitTest | |||
// Save the variables to disk. | |||
var save_path = saver.save(sess, "/tmp/model2.ckpt"); | |||
Console.WriteLine($"Model saved in path: {save_path}"); | |||
}); | |||
} | |||
} | |||
} | |||
} |
@@ -35,9 +35,9 @@ namespace TensorFlowNET.UnitTest | |||
public void VarCreation() | |||
{ | |||
tf.Graph().as_default(); | |||
with(tf.variable_scope("foo"), delegate | |||
tf_with(tf.variable_scope("foo"), delegate | |||
{ | |||
with(tf.variable_scope("bar"), delegate | |||
tf_with(tf.variable_scope("bar"), delegate | |||
{ | |||
var v = tf.get_variable("v", new TensorShape(1)); | |||
Assert.AreEqual(v.name, "foo/bar/v:0"); | |||
@@ -53,14 +53,14 @@ namespace TensorFlowNET.UnitTest | |||
{ | |||
tf.Graph().as_default(); | |||
variable_scope vs = null; | |||
with(tf.variable_scope("foo"), v => vs = v); | |||
tf_with(tf.variable_scope("foo"), v => vs = v); | |||
// Re-enter the variable scope. | |||
with(tf.variable_scope(vs, auxiliary_name_scope: false), v => | |||
tf_with(tf.variable_scope(vs, auxiliary_name_scope: false), v => | |||
{ | |||
var vs1 = (VariableScope)v; | |||
// Restore the original name_scope. | |||
with(tf.name_scope(vs1.original_name_scope), delegate | |||
tf_with(tf.name_scope(vs1.original_name_scope), delegate | |||
{ | |||
var v1 = tf.get_variable("v", new TensorShape(1)); | |||
Assert.AreEqual(v1.name, "foo/v:0"); | |||
@@ -89,21 +89,20 @@ namespace TensorFlowNET.UnitTest | |||
[TestMethod] | |||
public void Assign1() | |||
{ | |||
with(tf.Graph().as_default(), graph => | |||
{ | |||
var variable = tf.Variable(31, name: "tree"); | |||
var init = tf.global_variables_initializer(); | |||
var graph = tf.Graph().as_default(); | |||
var sess = tf.Session(graph); | |||
sess.run(init); | |||
var variable = tf.Variable(31, name: "tree"); | |||
var init = tf.global_variables_initializer(); | |||
var result = sess.run(variable); | |||
Assert.IsTrue((int)result == 31); | |||
var sess = tf.Session(graph); | |||
sess.run(init); | |||
var assign = variable.assign(12); | |||
result = sess.run(assign); | |||
Assert.IsTrue((int)result == 12); | |||
}); | |||
var result = sess.run(variable); | |||
Assert.IsTrue((int)result == 31); | |||
var assign = variable.assign(12); | |||
result = sess.run(assign); | |||
Assert.IsTrue((int)result == 12); | |||
} | |||
[TestMethod] | |||
@@ -115,12 +114,12 @@ namespace TensorFlowNET.UnitTest | |||
// Add an op to initialize the variables. | |||
var init_op = tf.global_variables_initializer(); | |||
with(tf.Session(), sess => | |||
using (var sess = tf.Session()) | |||
{ | |||
sess.run(init_op); | |||
// o some work with the model. | |||
inc_v1.op.run(); | |||
}); | |||
} | |||
} | |||
/// <summary> | |||
@@ -16,7 +16,7 @@ namespace TensorFlowNET.UnitTest.control_flow_ops_test | |||
{ | |||
var graph = tf.Graph().as_default(); | |||
with(tf.Session(graph), sess => | |||
using (var sess = tf.Session(graph)) | |||
{ | |||
var x = tf.constant(2, name: "x"); | |||
var y = tf.constant(5, name: "y"); | |||
@@ -27,7 +27,7 @@ namespace TensorFlowNET.UnitTest.control_flow_ops_test | |||
int result = z.eval(sess); | |||
assertEquals(result, 22); | |||
}); | |||
} | |||
} | |||
[Ignore("need tesnroflow expose AddControlInput API")] | |||
@@ -36,7 +36,7 @@ namespace TensorFlowNET.UnitTest.control_flow_ops_test | |||
{ | |||
var graph = tf.Graph().as_default(); | |||
with(tf.Session(graph), sess => | |||
using (var sess = tf.Session(graph)) | |||
{ | |||
var x = tf.constant(2, name: "x"); | |||
var y = tf.constant(1, name: "y"); | |||
@@ -47,7 +47,7 @@ namespace TensorFlowNET.UnitTest.control_flow_ops_test | |||
int result = z.eval(sess); | |||
assertEquals(result, 11); | |||
}); | |||
} | |||
} | |||
[Ignore("need tesnroflow expose AddControlInput API")] | |||
@@ -11,7 +11,7 @@ namespace TensorFlowNET.UnitTest.control_flow_ops_test | |||
private void _testWhileContextHelper(int? maximum_iterations = null) | |||
{ | |||
// TODO: implement missing code dependencies | |||
with<Session>(this.cached_session(), sess => | |||
using (var sess = this.cached_session()) | |||
{ | |||
var i = constant_op.constant(0, name: "i"); | |||
var c = new Func<Tensor, Tensor>(x => gen_math_ops.less(x, 10, name: "c")); | |||
@@ -26,7 +26,7 @@ namespace TensorFlowNET.UnitTest.control_flow_ops_test | |||
WhileContext.from_proto( | |||
control_flow_context.to_proto()).to_proto(), "");*/ | |||
} | |||
}); | |||
} | |||
} | |||
[Ignore("TODO")] | |||
@@ -15,20 +15,18 @@ namespace TensorFlowNET.UnitTest.gradients_test | |||
[TestMethod] | |||
public void testGradients() | |||
{ | |||
with(tf.Graph().as_default(), g => | |||
{ | |||
var inp = tf.constant(1.0, shape: new[] { 32, 100 }, name: "in"); | |||
var w = tf.constant(1.0, shape: new[] { 100, 10 }, name: "w"); | |||
var b = tf.constant(1.0, shape: new[] { 10 }, name: "b"); | |||
var xw = math_ops.matmul(inp, w, name: "xw"); | |||
var h = nn_ops.bias_add(xw, b, name: "h"); | |||
var w_grad = gradients_impl.gradients(new[] { h }, new[] { w })[0]; | |||
self.assertEquals("MatMul", w_grad.op.type); | |||
// TODO: Operation._original_op | |||
//self.assertEquals(w_grad.op._original_op, xw.op); | |||
self.assertTrue((bool)w_grad.op.get_attr("transpose_a")); | |||
self.assertFalse((bool)w_grad.op.get_attr("transpose_b")); | |||
}); | |||
var g = tf.Graph().as_default(); | |||
var inp = tf.constant(1.0, shape: new[] { 32, 100 }, name: "in"); | |||
var w = tf.constant(1.0, shape: new[] { 100, 10 }, name: "w"); | |||
var b = tf.constant(1.0, shape: new[] { 10 }, name: "b"); | |||
var xw = math_ops.matmul(inp, w, name: "xw"); | |||
var h = nn_ops.bias_add(xw, b, name: "h"); | |||
var w_grad = gradients_impl.gradients(new[] { h }, new[] { w })[0]; | |||
self.assertEquals("MatMul", w_grad.op.type); | |||
// TODO: Operation._original_op | |||
//self.assertEquals(w_grad.op._original_op, xw.op); | |||
self.assertTrue((bool)w_grad.op.get_attr("transpose_a")); | |||
self.assertFalse((bool)w_grad.op.get_attr("transpose_b")); | |||
} | |||
[TestMethod] | |||
@@ -104,14 +102,14 @@ namespace TensorFlowNET.UnitTest.gradients_test | |||
tf.constant(new[] { 1 }, tf.int32, new[] { 1 }) | |||
); | |||
var g = tf.gradients(b, a); | |||
with(tf.Session(), sess => | |||
using (var sess = tf.Session()) | |||
{ | |||
var result = sess.run(new object[] { g, b }); | |||
var actualDeriv = np.squeeze(result[0]); | |||
var actual = np.squeeze(result[1]); | |||
self.assertEquals(new float[] { 1, 0 }, new float[] { actualDeriv[0], actualDeriv[1] }); | |||
self.assertEquals(0.9640276f, (float)actual); | |||
}); | |||
} | |||
} | |||
[TestMethod] | |||
@@ -73,12 +73,13 @@ namespace TensorFlowNET.UnitTest.nn_test | |||
{ | |||
var value = array_ops.placeholder(dtype: dtypes.float32); | |||
var sparsity = nn_impl.zero_fraction(value); | |||
with<Session>(self.cached_session(), sess => { | |||
using (var sess = self.cached_session()) | |||
{ | |||
// TODO: make this compile | |||
//self.assertAllClose( | |||
// 0.25, | |||
// sess.run(sparsity, {value: [[0., 1.], [0.3, 2.]]})); | |||
}); | |||
//self.assertAllClose( | |||
// 0.25, | |||
// sess.run(sparsity, {value: [[0., 1.], [0.3, 2.]]})); | |||
} | |||
} | |||
@@ -16,19 +16,18 @@ namespace TensorFlowNET.UnitTest.ops_test | |||
[TestMethod] | |||
public void TestBasic() | |||
{ | |||
var graph = tf.Graph().as_default(); | |||
var g = tf.Graph().as_default(); | |||
Tensor a = null, b = null, c = null, d = null, e = null; | |||
with<Graph>(graph, g => | |||
a = constant_op.constant(1.0); | |||
b = constant_op.constant(1.0); | |||
tf_with(g.control_dependencies(new[] { a }), x => | |||
{ | |||
a = constant_op.constant(1.0); | |||
b = constant_op.constant(1.0); | |||
with(g.control_dependencies(new[] { a }), x => | |||
{ | |||
c = constant_op.constant(1.0); | |||
d = array_ops.identity(b); | |||
e = array_ops.identity(c); | |||
}); | |||
c = constant_op.constant(1.0); | |||
d = array_ops.identity(b); | |||
e = array_ops.identity(c); | |||
}); | |||
Assert.IsTrue(Enumerable.SequenceEqual(c.op.control_inputs, new[] { a.op })); | |||
Assert.IsTrue(Enumerable.SequenceEqual(d.op.control_inputs, new[] { a.op })); | |||
// e should be dominated by c. | |||
@@ -56,7 +55,7 @@ namespace TensorFlowNET.UnitTest.ops_test | |||
// TODO: make this compile (see original Python code below) | |||
a = constant_op.constant(1.0); | |||
b = future; // <--- {henon} obviously, this doesn't compile, looks like control_dependencies needs to be able to take callables as well. | |||
with(ops.control_dependencies(new object[] { a, b }), ctrl => | |||
tf_with(ops.control_dependencies(new object[] { a, b }), ctrl => | |||
{ | |||
return c = constant_op.constant(3.0); | |||
}); | |||
@@ -64,19 +63,15 @@ namespace TensorFlowNET.UnitTest.ops_test | |||
} | |||
else | |||
{ | |||
var graph = tf.Graph().as_default(); | |||
with<Graph>(graph, g => | |||
var g = tf.Graph().as_default(); | |||
a = constant_op.constant(1.0); | |||
var b1 = future(); | |||
tf_with(g.control_dependencies(new[] { a, b }), ctrl => | |||
{ | |||
a = constant_op.constant(1.0); | |||
var b1 = future(); | |||
with(g.control_dependencies(new[] { a, b }), ctrl => | |||
{ | |||
c = constant_op.constant(3.0); | |||
}); | |||
Assert.IsTrue(Enumerable.SequenceEqual(c.op.control_inputs, new[] { a.op, b1.op })); | |||
Assert.AreEqual(1, calls); | |||
c = constant_op.constant(3.0); | |||
}); | |||
Assert.IsTrue(Enumerable.SequenceEqual(c.op.control_inputs, new[] { a.op, b1.op })); | |||
Assert.AreEqual(1, calls); | |||
} | |||
} | |||
/* | |||
@@ -139,17 +134,17 @@ namespace TensorFlowNET.UnitTest.ops_test | |||
var a_3 = constant_op.constant(4.0); | |||
var a_4 = constant_op.constant(5.0); | |||
Tensor b_1 = null, b_2 = null; | |||
with(g.control_dependencies(new[] { a_1, a_2, a_3, a_4 }), ctrl => | |||
tf_with(g.control_dependencies(new[] { a_1, a_2, a_3, a_4 }), ctrl => | |||
{ | |||
b_1 = constant_op.constant(6.0); | |||
}); | |||
with(g.control_dependencies(new[] { a_1 }), ctrl1 => | |||
tf_with(g.control_dependencies(new[] { a_1 }), ctrl1 => | |||
{ | |||
with(g.control_dependencies(new[] { a_2 }), ctrl2 => | |||
tf_with(g.control_dependencies(new[] { a_2 }), ctrl2 => | |||
{ | |||
with(g.control_dependencies(new[] { a_3 }), ctrl3 => | |||
tf_with(g.control_dependencies(new[] { a_3 }), ctrl3 => | |||
{ | |||
with(g.control_dependencies(new[] { a_4 }), ctrl4 => | |||
tf_with(g.control_dependencies(new[] { a_4 }), ctrl4 => | |||
{ | |||
b_2 = constant_op.constant(7.0); | |||
}); | |||
@@ -175,15 +170,15 @@ namespace TensorFlowNET.UnitTest.ops_test | |||
var a_3 = constant_op.constant(4.0); | |||
var a_4 = constant_op.constant(5.0); | |||
Operation b_3_4 = null, b_3 = null, b_none = null, b_1 = null, b_1_2 = null, b_none2 = null; | |||
with(g.control_dependencies(new[] { a_1 }), ctrl1 => | |||
tf_with(g.control_dependencies(new[] { a_1 }), ctrl1 => | |||
{ | |||
with(g.control_dependencies(new[] { a_2 }), ctrl2 => | |||
tf_with(g.control_dependencies(new[] { a_2 }), ctrl2 => | |||
{ | |||
with(g.control_dependencies(null), ctrl3 => | |||
tf_with(g.control_dependencies(null), ctrl3 => | |||
{ | |||
with(g.control_dependencies(new[] { a_3 }), ctrl4 => | |||
tf_with(g.control_dependencies(new[] { a_3 }), ctrl4 => | |||
{ | |||
with(g.control_dependencies(new[] { a_4 }), ctrl5 => | |||
tf_with(g.control_dependencies(new[] { a_4 }), ctrl5 => | |||
{ | |||
// deps [a_3, a_4] | |||
b_3_4 = constant_op.constant(7.0); | |||
@@ -199,7 +194,7 @@ namespace TensorFlowNET.UnitTest.ops_test | |||
}); | |||
// deps back to [a_1] | |||
b_1 = constant_op.constant(11.0); | |||
with(g.control_dependencies(null), ctrl6 => | |||
tf_with(g.control_dependencies(null), ctrl6 => | |||
{ | |||
// deps are None again | |||
b_none2 = constant_op.constant(12.0); | |||
@@ -233,25 +228,25 @@ namespace TensorFlowNET.UnitTest.ops_test | |||
Operation c_1 = null, c_2 = null, c_3 = null, c_4 = null; | |||
Operation d_1 = null, d_2 = null, d_3 = null, d_4 = null; | |||
Operation e_1 = null, e_2 = null, e_3 = null, e_4 = null; | |||
with(g.control_dependencies(new[] { a_1 }), ctrl1 => | |||
tf_with(g.control_dependencies(new[] { a_1 }), ctrl1 => | |||
{ | |||
b_1 = tf.multiply(a_3, a_4); | |||
c_1 = tf.multiply(a_1, b_1.output); | |||
d_1 = tf.multiply(b_1.output, c_1.output); | |||
e_1 = constant_op.constant(5.0); | |||
with(g.control_dependencies(new[] { a_2 }), ctrl2 => | |||
tf_with(g.control_dependencies(new[] { a_2 }), ctrl2 => | |||
{ | |||
b_2 = tf.multiply(a_3, a_4); | |||
c_2 = tf.multiply(a_1, b_1.output); | |||
d_2 = tf.multiply(b_2.output, c_2.output); | |||
e_2 = tf.multiply(e_1.output, e_1.output); | |||
with(g.control_dependencies(new[] { a_3 }), ctrl3 => | |||
tf_with(g.control_dependencies(new[] { a_3 }), ctrl3 => | |||
{ | |||
b_3 = tf.multiply(a_3, a_4); | |||
c_3 = tf.multiply(a_1, b_1.output); | |||
d_3 = tf.multiply(b_3.output, c_3.output); | |||
e_3 = tf.multiply(e_2.output, e_2.output); | |||
with(g.control_dependencies(new[] { a_4 }), ctrl4 => | |||
tf_with(g.control_dependencies(new[] { a_4 }), ctrl4 => | |||
{ | |||
b_4 = tf.multiply(a_3, a_4); | |||
c_4 = tf.multiply(a_1, b_1.output); | |||
@@ -310,7 +305,7 @@ namespace TensorFlowNET.UnitTest.ops_test | |||
var g = tf.Graph().as_default(); | |||
Operation b = null; | |||
var a = constant_op.constant(100.0); | |||
with(g.control_dependencies(new[] { a }), ctrl1 => | |||
tf_with(g.control_dependencies(new[] { a }), ctrl1 => | |||
{ | |||
b = array_ops.identity(a); | |||
}); | |||
@@ -24,81 +24,73 @@ namespace TensorFlowNET.UnitTest.ops_test | |||
[TestMethod] | |||
public void TestShape() | |||
{ | |||
var graph = tf.Graph().as_default(); | |||
with<Graph>(graph, g => | |||
{ | |||
var x = constant_op.constant(new[,] { { 1, 2, 3 }, { 4, 5, 6 } }); | |||
var (c_op, op_desc) = ops._create_c_op(g, ops._NodeDef("Identity", "myop"), new[] { x }, new Operation[0]); | |||
var op = g._create_op_from_tf_operation(c_op); | |||
Assert.AreEqual("myop", op.name); | |||
Assert.AreEqual("Identity", op.type); | |||
Assert.AreEqual(1, len(op.outputs)); | |||
assertItemsEqual(new[] { 2, 3 }, op.outputs[0].shape); | |||
}); | |||
var g = tf.Graph().as_default(); | |||
var x = constant_op.constant(new[,] { { 1, 2, 3 }, { 4, 5, 6 } }); | |||
var (c_op, op_desc) = ops._create_c_op(g, ops._NodeDef("Identity", "myop"), new[] { x }, new Operation[0]); | |||
var op = g._create_op_from_tf_operation(c_op); | |||
Assert.AreEqual("myop", op.name); | |||
Assert.AreEqual("Identity", op.type); | |||
Assert.AreEqual(1, len(op.outputs)); | |||
assertItemsEqual(new[] { 2, 3 }, op.outputs[0].shape); | |||
} | |||
[TestMethod] | |||
public void TestUniqueName() | |||
{ | |||
var graph = tf.Graph().as_default(); | |||
with<Graph>(graph, g => | |||
{ | |||
//var (c_op,op_desc) = ops._create_c_op(g, ops._NodeDef("Const", "myop"), new Tensor[0], new Operation[0]); | |||
//var (c_op2, op_desc1) = ops._create_c_op(g, ops._NodeDef("Const", "myop_1"), new Tensor[0], new Operation[0]); | |||
//var op = g._create_op_from_tf_operation(c_op); | |||
//var op2 = g._create_op_from_tf_operation(c_op2); | |||
var op = constant_op.constant(0, name: "myop").op; | |||
var op2 = constant_op.constant(0, name: "myop_1").op; | |||
// Create ops with same names as op1 and op2. We expect the new names to be | |||
// uniquified. | |||
var op3 = constant_op.constant(0, name: "myop").op; | |||
var op4 = constant_op.constant(0, name: "myop_1").op; | |||
self.assertEqual(op.name, "myop"); | |||
self.assertEqual(op2.name, "myop_1"); | |||
self.assertEqual(op3.name, "myop_2"); | |||
self.assertEqual(op4.name, "myop_1_1"); | |||
}); | |||
//var (c_op,op_desc) = ops._create_c_op(g, ops._NodeDef("Const", "myop"), new Tensor[0], new Operation[0]); | |||
//var (c_op2, op_desc1) = ops._create_c_op(g, ops._NodeDef("Const", "myop_1"), new Tensor[0], new Operation[0]); | |||
//var op = g._create_op_from_tf_operation(c_op); | |||
//var op2 = g._create_op_from_tf_operation(c_op2); | |||
var op = constant_op.constant(0, name: "myop").op; | |||
var op2 = constant_op.constant(0, name: "myop_1").op; | |||
// Create ops with same names as op1 and op2. We expect the new names to be | |||
// uniquified. | |||
var op3 = constant_op.constant(0, name: "myop").op; | |||
var op4 = constant_op.constant(0, name: "myop_1").op; | |||
self.assertEqual(op.name, "myop"); | |||
self.assertEqual(op2.name, "myop_1"); | |||
self.assertEqual(op3.name, "myop_2"); | |||
self.assertEqual(op4.name, "myop_1_1"); | |||
} | |||
[Ignore("need tesnroflow expose UpdateEdge API")] | |||
[TestMethod] | |||
public void TestCond() | |||
{ | |||
var graph = tf.Graph().as_default(); | |||
with(graph, g => | |||
var g = tf.Graph().as_default(); | |||
var x = constant_op.constant(10); | |||
var true_fn = new Func<Tensor>(() => | |||
{ | |||
var x = constant_op.constant(10); | |||
var true_fn = new Func<Tensor>(() => | |||
{ | |||
var (c_op, op_desc) = ops._create_c_op(g, ops._NodeDef("Identity", "cond/myop"), new[] { x }, new Operation[0]); | |||
var new_ops = g._add_new_tf_operations(); | |||
self.assertEqual(len(new_ops), 1); | |||
return x; | |||
}); | |||
control_flow_ops.cond(x < 10, true_fn, () => x); | |||
var op = g.get_operation_by_name("cond/myop"); | |||
//tf.train.export_meta_graph(@"D:\dev\tensorboard\logdir\sharp.meta.txt", as_text:true); | |||
//tf.train.export_meta_graph(@"D:\dev\tensorboard\logdir\sharp.meta", as_text: false); | |||
self.assertIsNotNone(op); | |||
self.assertEqual(op.name, "cond/myop"); | |||
self.assertEqual(op.type, "Identity"); | |||
//self.assertEqual(op.outputs, new object[0]); | |||
var op_input = op.inputs[0].op; | |||
self.assertEqual(op_input.type, "Switch"); | |||
self.assertEqual(op_input.inputs[0].name, x.name); | |||
self.assertEqual(op.graph, g); | |||
self.assertIsNotNone(op._get_control_flow_context()); | |||
var cond_text = op._get_control_flow_context() as ControlFlowContext; | |||
self.assertEqual(cond_text.name, "cond/cond_text"); | |||
var (c_op, op_desc) = ops._create_c_op(g, ops._NodeDef("Identity", "cond/myop"), new[] { x }, new Operation[0]); | |||
var new_ops = g._add_new_tf_operations(); | |||
self.assertEqual(len(new_ops), 1); | |||
return x; | |||
}); | |||
control_flow_ops.cond(x < 10, true_fn, () => x); | |||
var op = g.get_operation_by_name("cond/myop"); | |||
//tf.train.export_meta_graph(@"D:\dev\tensorboard\logdir\sharp.meta.txt", as_text:true); | |||
//tf.train.export_meta_graph(@"D:\dev\tensorboard\logdir\sharp.meta", as_text: false); | |||
self.assertIsNotNone(op); | |||
self.assertEqual(op.name, "cond/myop"); | |||
self.assertEqual(op.type, "Identity"); | |||
//self.assertEqual(op.outputs, new object[0]); | |||
var op_input = op.inputs[0].op; | |||
self.assertEqual(op_input.type, "Switch"); | |||
self.assertEqual(op_input.inputs[0].name, x.name); | |||
self.assertEqual(op.graph, g); | |||
self.assertIsNotNone(op._get_control_flow_context()); | |||
var cond_text = op._get_control_flow_context() as ControlFlowContext; | |||
self.assertEqual(cond_text.name, "cond/cond_text"); | |||
} | |||
[Ignore("Todo: Port")] | |||
@@ -107,20 +99,17 @@ namespace TensorFlowNET.UnitTest.ops_test | |||
{ | |||
var graph = tf.Graph().as_default(); | |||
Operation x=null; | |||
with<Graph>(graph, g => | |||
x = constant_op.constant(42); | |||
var body = new Func<int, int>(i => | |||
{ | |||
x = constant_op.constant(42); | |||
var body = new Func<int, int>(i => | |||
{ | |||
ops._create_c_op(ops.get_default_graph(), ops._NodeDef("Identity", "myloop/myop"), new[] {x}, | |||
new Operation[0]); | |||
var new_ops = g._add_new_tf_operations(); | |||
self.assertEqual(len(new_ops), 1); | |||
return i; | |||
}); | |||
// TODO: port control_flow_ops.while_loop | |||
//control_flow_ops.while_loop( i => i < 10, body, new int[]{0}, name = "myloop"); | |||
ops._create_c_op(ops.get_default_graph(), ops._NodeDef("Identity", "myloop/myop"), new[] {x}, | |||
new Operation[0]); | |||
var new_ops = graph._add_new_tf_operations(); | |||
self.assertEqual(len(new_ops), 1); | |||
return i; | |||
}); | |||
// TODO: port control_flow_ops.while_loop | |||
//control_flow_ops.while_loop( i => i < 10, body, new int[]{0}, name = "myloop"); | |||
var op = graph.get_operation_by_name("myloop/myop"); | |||
self.assertIsNotNone(op); | |||
self.assertEqual(op.name, "myloop/myop"); | |||