You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

LayersApi.cs 15 kB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380
  1. using NumSharp;
  2. using Tensorflow.Keras.ArgsDefinition;
  3. using Tensorflow.Keras.Engine;
  4. using static Tensorflow.Binding;
  5. using static Tensorflow.KerasApi;
  6. namespace Tensorflow.Keras.Layers
  7. {
  8. public class LayersApi
  9. {
  10. /// <summary>
  11. /// Functional interface for the batch normalization layer.
  12. /// http://arxiv.org/abs/1502.03167
  13. /// </summary>
  14. /// <param name="inputs"></param>
  15. /// <param name="axis"></param>
  16. /// <param name="momentum"></param>
  17. /// <param name="epsilon"></param>
  18. /// <param name="center"></param>
  19. /// <param name="scale"></param>
  20. /// <param name="beta_initializer"></param>
  21. /// <param name="gamma_initializer"></param>
  22. /// <param name="moving_mean_initializer"></param>
  23. /// <param name="moving_variance_initializer"></param>
  24. /// <param name="training"></param>
  25. /// <param name="trainable"></param>
  26. /// <param name="name"></param>
  27. /// <param name="renorm"></param>
  28. /// <param name="renorm_momentum"></param>
  29. /// <returns></returns>
  30. public Tensors batch_normalization(Tensor inputs,
  31. int axis = -1,
  32. float momentum = 0.99f,
  33. float epsilon = 0.001f,
  34. bool center = true,
  35. bool scale = true,
  36. IInitializer beta_initializer = null,
  37. IInitializer gamma_initializer = null,
  38. IInitializer moving_mean_initializer = null,
  39. IInitializer moving_variance_initializer = null,
  40. Tensor training = null,
  41. bool trainable = true,
  42. string name = null,
  43. bool renorm = false,
  44. float renorm_momentum = 0.99f)
  45. {
  46. var layer = new BatchNormalization(new BatchNormalizationArgs
  47. {
  48. Axis = axis,
  49. Momentum = momentum,
  50. Epsilon = epsilon,
  51. Center = center,
  52. Scale = scale,
  53. BetaInitializer = beta_initializer,
  54. GammaInitializer = gamma_initializer,
  55. MovingMeanInitializer = moving_mean_initializer,
  56. MovingVarianceInitializer = moving_variance_initializer,
  57. Renorm = renorm,
  58. RenormMomentum = renorm_momentum,
  59. Trainable = trainable,
  60. Name = name
  61. });
  62. return layer.Apply(inputs);
  63. }
  64. /// <summary>
  65. ///
  66. /// </summary>
  67. /// <param name="filters"></param>
  68. /// <param name="kernel_size"></param>
  69. /// <param name="strides"></param>
  70. /// <param name="padding"></param>
  71. /// <param name="data_format"></param>
  72. /// <param name="dilation_rate"></param>
  73. /// <param name="groups"></param>
  74. /// <param name="activation">tf.keras.activations</param>
  75. /// <param name="use_bias"></param>
  76. /// <param name="kernel_initializer"></param>
  77. /// <param name="bias_initializer"></param>
  78. /// <param name="kernel_regularizer"></param>
  79. /// <param name="bias_regularizer"></param>
  80. /// <param name="activity_regularizer"></param>
  81. /// <returns></returns>
  82. public Conv2D Conv2D(int filters,
  83. TensorShape kernel_size = null,
  84. TensorShape strides = null,
  85. string padding = "valid",
  86. string data_format = null,
  87. TensorShape dilation_rate = null,
  88. int groups = 1,
  89. Activation activation = null,
  90. bool use_bias = true,
  91. IInitializer kernel_initializer = null,
  92. IInitializer bias_initializer = null,
  93. IRegularizer kernel_regularizer = null,
  94. IRegularizer bias_regularizer = null,
  95. IRegularizer activity_regularizer = null)
  96. => new Conv2D(new Conv2DArgs
  97. {
  98. Rank = 2,
  99. Filters = filters,
  100. KernelSize = kernel_size,
  101. Strides = strides == null ? (1, 1) : strides,
  102. Padding = padding,
  103. DataFormat = data_format,
  104. DilationRate = dilation_rate == null ? (1, 1) : dilation_rate,
  105. Groups = groups,
  106. UseBias = use_bias,
  107. KernelRegularizer = kernel_regularizer,
  108. KernelInitializer = kernel_initializer == null ? tf.glorot_uniform_initializer : kernel_initializer,
  109. BiasInitializer = bias_initializer == null ? tf.zeros_initializer : bias_initializer,
  110. BiasRegularizer = bias_regularizer,
  111. ActivityRegularizer = activity_regularizer,
  112. Activation = activation ?? keras.activations.Linear
  113. });
  114. public Tensor conv2d(Tensor inputs,
  115. int filters,
  116. int[] kernel_size,
  117. int[] strides = null,
  118. string padding = "valid",
  119. string data_format = "channels_last",
  120. int[] dilation_rate = null,
  121. bool use_bias = true,
  122. Activation activation = null,
  123. IInitializer kernel_initializer = null,
  124. IInitializer bias_initializer = null,
  125. bool trainable = true,
  126. string name = null)
  127. {
  128. if (strides == null)
  129. strides = new int[] { 1, 1 };
  130. if (dilation_rate == null)
  131. dilation_rate = new int[] { 1, 1 };
  132. if (bias_initializer == null)
  133. bias_initializer = tf.zeros_initializer;
  134. var layer = new Conv2D(new Conv2DArgs
  135. {
  136. Filters = filters,
  137. KernelSize = kernel_size,
  138. Strides = strides,
  139. Padding = padding,
  140. DataFormat = data_format,
  141. DilationRate = dilation_rate,
  142. Activation = activation,
  143. UseBias = use_bias,
  144. KernelInitializer = kernel_initializer,
  145. BiasInitializer = bias_initializer,
  146. Trainable = trainable,
  147. Name = name
  148. });
  149. return layer.Apply(inputs);
  150. }
  151. public Dense Dense(int units,
  152. Activation activation = null,
  153. TensorShape input_shape = null)
  154. => new Dense(new DenseArgs
  155. {
  156. Units = units,
  157. Activation = activation ?? keras.activations.Linear,
  158. InputShape = input_shape
  159. });
  160. /// <summary>
  161. /// Densely-connected layer class. aka fully-connected<br></br>
  162. /// `outputs = activation(inputs * kernel + bias)`
  163. /// </summary>
  164. /// <param name="inputs"></param>
  165. /// <param name="units">Python integer, dimensionality of the output space.</param>
  166. /// <param name="activation"></param>
  167. /// <param name="use_bias">Boolean, whether the layer uses a bias.</param>
  168. /// <param name="kernel_initializer"></param>
  169. /// <param name="bias_initializer"></param>
  170. /// <param name="trainable"></param>
  171. /// <param name="name"></param>
  172. /// <param name="reuse"></param>
  173. /// <returns></returns>
  174. public Tensor dense(Tensor inputs,
  175. int units,
  176. Activation activation = null,
  177. bool use_bias = true,
  178. IInitializer kernel_initializer = null,
  179. IInitializer bias_initializer = null,
  180. bool trainable = true,
  181. string name = null,
  182. bool? reuse = null)
  183. {
  184. if (bias_initializer == null)
  185. bias_initializer = tf.zeros_initializer;
  186. var layer = new Dense(new DenseArgs
  187. {
  188. Units = units,
  189. Activation = activation,
  190. UseBias = use_bias,
  191. BiasInitializer = bias_initializer,
  192. KernelInitializer = kernel_initializer,
  193. Trainable = trainable,
  194. Name = name
  195. });
  196. return layer.Apply(inputs);
  197. }
  198. public Dropout Dropout(float rate, TensorShape noise_shape = null, int? seed = null)
  199. => new Dropout(new DropoutArgs
  200. {
  201. Rate = rate,
  202. NoiseShape = noise_shape,
  203. Seed = seed
  204. });
  205. /// <summary>
  206. /// Turns positive integers (indexes) into dense vectors of fixed size.
  207. /// This layer can only be used as the first layer in a model.
  208. /// e.g. [[4], [20]] -> [[0.25, 0.1], [0.6, -0.2]]
  209. /// https://www.tensorflow.org/api_docs/python/tf/keras/layers/Embedding
  210. /// </summary>
  211. /// <param name="input_dim">Size of the vocabulary, i.e. maximum integer index + 1.</param>
  212. /// <param name="output_dim">Dimension of the dense embedding.</param>
  213. /// <param name="embeddings_initializer">Initializer for the embeddings matrix (see keras.initializers).</param>
  214. /// <param name="mask_zero"></param>
  215. /// <returns></returns>
  216. public Embedding Embedding(int input_dim,
  217. int output_dim,
  218. IInitializer embeddings_initializer = null,
  219. bool mask_zero = false,
  220. TensorShape input_shape = null,
  221. int input_length = -1)
  222. => new Embedding(new EmbeddingArgs
  223. {
  224. InputDim = input_dim,
  225. OutputDim = output_dim,
  226. MaskZero = mask_zero,
  227. InputShape = input_shape ?? input_length,
  228. InputLength = input_length,
  229. EmbeddingsInitializer = embeddings_initializer
  230. });
  231. public Flatten Flatten(string data_format = null)
  232. => new Flatten(new FlattenArgs
  233. {
  234. DataFormat = data_format
  235. });
  236. /// <summary>
  237. /// `Input()` is used to instantiate a Keras tensor.
  238. /// </summary>
  239. /// <param name="shape">A shape tuple not including the batch size.</param>
  240. /// <param name="name"></param>
  241. /// <param name="sparse"></param>
  242. /// <param name="ragged"></param>
  243. /// <returns></returns>
  244. public Tensors Input(TensorShape shape,
  245. string name = null,
  246. bool sparse = false,
  247. bool ragged = false)
  248. {
  249. var input_layer = new InputLayer(new InputLayerArgs
  250. {
  251. InputShape = shape,
  252. Name = name,
  253. Sparse = sparse,
  254. Ragged = ragged
  255. });
  256. return input_layer.InboundNodes[0].Outputs;
  257. }
  258. public MaxPooling2D MaxPooling2D(TensorShape pool_size = null,
  259. TensorShape strides = null,
  260. string padding = "valid")
  261. => new MaxPooling2D(new MaxPooling2DArgs
  262. {
  263. PoolSize = pool_size ?? (2, 2),
  264. Strides = strides,
  265. Padding = padding
  266. });
  267. /// <summary>
  268. /// Max pooling layer for 2D inputs (e.g. images).
  269. /// </summary>
  270. /// <param name="inputs">The tensor over which to pool. Must have rank 4.</param>
  271. /// <param name="pool_size"></param>
  272. /// <param name="strides"></param>
  273. /// <param name="padding"></param>
  274. /// <param name="data_format"></param>
  275. /// <param name="name"></param>
  276. /// <returns></returns>
  277. public Tensor max_pooling2d(Tensor inputs,
  278. int[] pool_size,
  279. int[] strides,
  280. string padding = "valid",
  281. string data_format = "channels_last",
  282. string name = null)
  283. {
  284. var layer = new MaxPooling2D(new MaxPooling2DArgs
  285. {
  286. PoolSize = pool_size,
  287. Strides = strides,
  288. Padding = padding,
  289. DataFormat = data_format,
  290. Name = name
  291. });
  292. return layer.Apply(inputs);
  293. }
  294. public Layer LSTM(int units,
  295. Activation activation = null,
  296. Activation recurrent_activation = null,
  297. bool use_bias = true,
  298. IInitializer kernel_initializer = null,
  299. IInitializer recurrent_initializer = null,
  300. IInitializer bias_initializer = null,
  301. bool unit_forget_bias = true,
  302. float dropout = 0f,
  303. float recurrent_dropout = 0f,
  304. int implementation = 2,
  305. bool return_sequences = false,
  306. bool return_state = false,
  307. bool go_backwards = false,
  308. bool stateful = false,
  309. bool time_major = false,
  310. bool unroll = false)
  311. => new LSTM(new LSTMArgs
  312. {
  313. Units = units,
  314. Activation = activation ?? keras.activations.Tanh,
  315. RecurrentActivation = recurrent_activation ?? keras.activations.Sigmoid,
  316. KernelInitializer = kernel_initializer ?? tf.glorot_uniform_initializer,
  317. RecurrentInitializer = recurrent_initializer ?? tf.orthogonal_initializer,
  318. BiasInitializer = bias_initializer ?? tf.zeros_initializer,
  319. Dropout = dropout,
  320. RecurrentDropout = recurrent_dropout,
  321. Implementation = implementation,
  322. ReturnSequences = return_sequences,
  323. ReturnState = return_state,
  324. GoBackwards = go_backwards,
  325. Stateful = stateful,
  326. TimeMajor = time_major,
  327. Unroll = unroll
  328. });
  329. public Rescaling Rescaling(float scale,
  330. float offset = 0,
  331. TensorShape input_shape = null)
  332. => new Rescaling(new RescalingArgs
  333. {
  334. Scale = scale,
  335. Offset = offset,
  336. InputShape = input_shape
  337. });
  338. /// <summary>
  339. /// Zero-padding layer for 2D input (e.g. picture).
  340. /// </summary>
  341. /// <param name="padding"></param>
  342. /// <returns></returns>
  343. public ZeroPadding2D ZeroPadding2D(NDArray padding)
  344. => new ZeroPadding2D(new ZeroPadding2DArgs
  345. {
  346. Padding = padding
  347. });
  348. Activation GetActivationByName(string name)
  349. => name switch
  350. {
  351. "linear" => keras.activations.Linear,
  352. "relu" => keras.activations.Relu,
  353. "sigmoid" => keras.activations.Sigmoid,
  354. "tanh" => keras.activations.Tanh,
  355. _ => keras.activations.Linear
  356. };
  357. }
  358. }