You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

tf.layers.cs 10 kB

4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248
  1. /*****************************************************************************
  2. Copyright 2018 The TensorFlow.NET Authors. All Rights Reserved.
  3. Licensed under the Apache License, Version 2.0 (the "License");
  4. you may not use this file except in compliance with the License.
  5. You may obtain a copy of the License at
  6. http://www.apache.org/licenses/LICENSE-2.0
  7. Unless required by applicable law or agreed to in writing, software
  8. distributed under the License is distributed on an "AS IS" BASIS,
  9. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  10. See the License for the specific language governing permissions and
  11. limitations under the License.
  12. ******************************************************************************/
  13. using System.Collections.Generic;
  14. using System.Linq;
  15. using Tensorflow.Keras.ArgsDefinition;
  16. using Tensorflow.Keras.Layers;
  17. using static Tensorflow.Binding;
  18. namespace Tensorflow.Keras
  19. {
  20. public class tensorflow_layers
  21. {
  22. public layers_internal layers { get; } = new layers_internal();
  23. public class layers_internal
  24. {
  25. public Tensor conv2d(Tensor inputs,
  26. int filters,
  27. int[] kernel_size,
  28. int[] strides = null,
  29. string padding = "valid",
  30. string data_format = "channels_last",
  31. int[] dilation_rate = null,
  32. bool use_bias = true,
  33. Activation activation = null,
  34. IInitializer kernel_initializer = null,
  35. IInitializer bias_initializer = null,
  36. bool trainable = true,
  37. string name = null)
  38. {
  39. if (strides == null)
  40. strides = new int[] { 1, 1 };
  41. if (dilation_rate == null)
  42. dilation_rate = new int[] { 1, 1 };
  43. if (bias_initializer == null)
  44. bias_initializer = tf.zeros_initializer;
  45. var layer = new Conv2D(new Conv2DArgs
  46. {
  47. Filters = filters,
  48. KernelSize = kernel_size,
  49. Strides = strides,
  50. Padding = padding,
  51. DataFormat = data_format,
  52. DilationRate = dilation_rate,
  53. Activation = activation,
  54. UseBias = use_bias,
  55. KernelInitializer = kernel_initializer,
  56. BiasInitializer = bias_initializer,
  57. Trainable = trainable,
  58. Name = name
  59. });
  60. return layer.Apply(inputs);
  61. }
  62. /// <summary>
  63. /// Functional interface for the batch normalization layer.
  64. /// http://arxiv.org/abs/1502.03167
  65. /// </summary>
  66. /// <param name="inputs"></param>
  67. /// <param name="axis"></param>
  68. /// <param name="momentum"></param>
  69. /// <param name="epsilon"></param>
  70. /// <param name="center"></param>
  71. /// <param name="scale"></param>
  72. /// <param name="beta_initializer"></param>
  73. /// <param name="gamma_initializer"></param>
  74. /// <param name="moving_mean_initializer"></param>
  75. /// <param name="moving_variance_initializer"></param>
  76. /// <param name="training"></param>
  77. /// <param name="trainable"></param>
  78. /// <param name="name"></param>
  79. /// <param name="renorm"></param>
  80. /// <param name="renorm_momentum"></param>
  81. /// <returns></returns>
  82. public Tensors batch_normalization(Tensor inputs,
  83. int axis = -1,
  84. float momentum = 0.99f,
  85. float epsilon = 0.001f,
  86. bool center = true,
  87. bool scale = true,
  88. IInitializer beta_initializer = null,
  89. IInitializer gamma_initializer = null,
  90. IInitializer moving_mean_initializer = null,
  91. IInitializer moving_variance_initializer = null,
  92. Tensor training = null,
  93. bool trainable = true,
  94. string name = null,
  95. bool renorm = false,
  96. float renorm_momentum = 0.99f)
  97. {
  98. var layer = new BatchNormalization(new BatchNormalizationArgs
  99. {
  100. Axis = axis,
  101. Momentum = momentum,
  102. Epsilon = epsilon,
  103. Center = center,
  104. Scale = scale,
  105. BetaInitializer = beta_initializer,
  106. GammaInitializer = gamma_initializer,
  107. MovingMeanInitializer = moving_mean_initializer,
  108. MovingVarianceInitializer = moving_variance_initializer,
  109. Renorm = renorm,
  110. RenormMomentum = renorm_momentum,
  111. Trainable = trainable,
  112. Name = name
  113. });
  114. return layer.Apply(inputs);
  115. }
  116. /// <summary>
  117. /// Max pooling layer for 2D inputs (e.g. images).
  118. /// </summary>
  119. /// <param name="inputs">The tensor over which to pool. Must have rank 4.</param>
  120. /// <param name="pool_size"></param>
  121. /// <param name="strides"></param>
  122. /// <param name="padding"></param>
  123. /// <param name="data_format"></param>
  124. /// <param name="name"></param>
  125. /// <returns></returns>
  126. public Tensor MaxPooling2D(Tensor inputs,
  127. int[] pool_size,
  128. int[] strides,
  129. string padding = "valid",
  130. string data_format = "channels_last",
  131. string name = null)
  132. {
  133. var layer = new MaxPooling2D(new MaxPooling2DArgs
  134. {
  135. PoolSize = pool_size,
  136. Strides = strides,
  137. Padding = padding,
  138. DataFormat = data_format,
  139. Name = name
  140. });
  141. return layer.Apply(inputs);
  142. }
  143. /// <summary>
  144. /// Densely-connected layer class. aka fully-connected<br></br>
  145. /// `outputs = activation(inputs * kernel + bias)`
  146. /// </summary>
  147. /// <param name="inputs"></param>
  148. /// <param name="units">Python integer, dimensionality of the output space.</param>
  149. /// <param name="activation"></param>
  150. /// <param name="use_bias">Boolean, whether the layer uses a bias.</param>
  151. /// <param name="kernel_initializer"></param>
  152. /// <param name="bias_initializer"></param>
  153. /// <param name="trainable"></param>
  154. /// <param name="name"></param>
  155. /// <param name="reuse"></param>
  156. /// <returns></returns>
  157. public Tensor dense(Tensor inputs,
  158. int units,
  159. Activation activation = null,
  160. bool use_bias = true,
  161. IInitializer kernel_initializer = null,
  162. IInitializer bias_initializer = null,
  163. bool trainable = true,
  164. string name = null,
  165. bool? reuse = null)
  166. {
  167. if (bias_initializer == null)
  168. bias_initializer = tf.zeros_initializer;
  169. var layer = new Dense(new DenseArgs
  170. {
  171. Units = units,
  172. Activation = activation,
  173. UseBias = use_bias,
  174. BiasInitializer = bias_initializer,
  175. KernelInitializer = kernel_initializer,
  176. Trainable = trainable,
  177. Name = name
  178. });
  179. return layer.Apply(inputs);
  180. }
  181. /// <summary>
  182. /// Flattens an input tensor while preserving the batch axis (axis 0).
  183. /// </summary>
  184. /// <param name="inputs">Tensor input.</param>
  185. /// <param name="name">The name of the layer.</param>
  186. /// <param name="data_format">
  187. /// A string, one of `channels_last` (default) or `channels_first`. <br></br>
  188. /// The ordering of the dimensions in the inputs. <br></br>
  189. /// `channels_last` corresponds to inputs with shape <br></br>
  190. /// `(batch, height, width, channels)` while `channels_first` corresponds to <br></br>
  191. /// inputs with shape `(batch, channels, height, width)`.
  192. /// </param>
  193. /// <returns></returns>
  194. public Tensor flatten(Tensor inputs,
  195. string name = null,
  196. string data_format = "channels_last")
  197. {
  198. var input_shape = inputs.shape;
  199. if (inputs.shape.ndim == 0)
  200. throw new ValueError($"Input 0 of layer flatten is incompatible with the layer: : expected min_ndim={1}, found ndim={0}. Full shape received: ()");
  201. var premutation = new List<int>() { 0 };
  202. if (data_format == "channels_first" && inputs.ndim > 1)
  203. {
  204. premutation.AddRange(Binding.range(2, inputs.ndim));
  205. premutation.Add(1);
  206. inputs = array_ops.transpose(inputs, premutation.ToArray());
  207. }
  208. var ret = array_ops.reshape(inputs, compute_output_shape(input_shape));
  209. //ret.set_shape(compute_output_shape(ret.shape));
  210. return ret;
  211. int[] compute_output_shape(int[] inputshape)
  212. {
  213. if (inputshape == null || inputshape.Length == 0)
  214. inputshape = new int[] { 1 };
  215. if (inputshape.Skip(1).All(d => d > 0))
  216. {
  217. int[] output_shape = new int[2];
  218. output_shape[0] = inputshape[0];
  219. output_shape[1] = inputshape.Skip(1).Aggregate(1, (acc, rhs) => acc * rhs); //calculate size of all the rest dimensions
  220. return output_shape;
  221. }
  222. else
  223. return new int[] { inputshape[0], -1 }; //-1 == Binding.None
  224. }
  225. }
  226. }
  227. }
  228. }