You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

paddle_cost.py 22 kB

4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603
  1. #! /usr/bin/python
  2. # -*- coding: utf-8 -*-
  3. import paddle.nn.functional as F
  4. import paddle as pd
  5. __all__ = [
  6. 'cross_entropy',
  7. 'sigmoid_cross_entropy',
  8. 'binary_cross_entropy',
  9. 'mean_squared_error',
  10. 'normalized_mean_square_error',
  11. 'absolute_difference_error',
  12. 'dice_coe',
  13. 'dice_hard_coe',
  14. 'iou_coe',
  15. 'cross_entropy_seq',
  16. 'cross_entropy_seq_with_mask',
  17. 'cosine_similarity',
  18. 'li_regularizer',
  19. 'lo_regularizer',
  20. 'maxnorm_regularizer',
  21. 'maxnorm_o_regularizer',
  22. 'maxnorm_i_regularizer',
  23. ]
  24. def cross_entropy(output, target):
  25. """Softmax cross-entropy operation, returns the TensorFlow expression of cross-entropy for two distributions,
  26. it implements softmax internally. See ``tf.ops.sparse_softmax_cross_entropy_with_logits``.
  27. Parameters
  28. ----------
  29. output : Tensor
  30. A batch of distribution with shape: [batch_size, num of classes].
  31. target : Tensor
  32. A batch of index with shape: [batch_size, ].
  33. name : string
  34. Name of this loss.
  35. Examples
  36. --------
  37. >>> import tensorlayer as tl
  38. >>> ce = tl.cost.cross_entropy(y_logits, y_target_logits)
  39. References
  40. -----------
  41. - About cross-entropy: `<https://en.wikipedia.org/wiki/Cross_entropy>`__.
  42. - The code is borrowed from: `<https://en.wikipedia.org/wiki/Cross_entropy>`__.
  43. """
  44. return F.cross_entropy(input=output, label=target)
  45. def sigmoid_cross_entropy(output, target):
  46. """Sigmoid cross-entropy operation, see ``tf.ops.sigmoid_cross_entropy_with_logits``.
  47. Parameters
  48. ----------
  49. output : Tensor
  50. A batch of distribution with shape: [batch_size, num of classes].
  51. target : Tensor
  52. A batch of index with shape: [batch_size, ].
  53. name : string
  54. Name of this loss.
  55. """
  56. if output.shape[-1] == target.shape[-1]:
  57. pass
  58. else:
  59. depth = output.shape[-1]
  60. label = pd.fluid.layers.one_hot(target, depth=depth)
  61. out = pd.fluid.layers.sigmoid_cross_entropy_with_logits(x=output, label=label)
  62. out = pd.fluid.layers.reduce_mean(out)
  63. return out
  64. def binary_cross_entropy(output, target, epsilon=1e-8):
  65. """Binary cross entropy operation.
  66. Parameters
  67. ----------
  68. output : Tensor
  69. Tensor with type of `float32` or `float64`.
  70. target : Tensor
  71. The target distribution, format the same with `output`.
  72. epsilon : float
  73. A small value to avoid output to be zero.
  74. name : str
  75. An optional name to attach to this function.
  76. References
  77. -----------
  78. - `ericjang-DRAW <https://github.com/ericjang/draw/blob/master/draw.py#L73>`__
  79. """
  80. if output.shape[-1] == target.shape[-1]:
  81. pass
  82. else:
  83. depth = output.shape[-1]
  84. target = pd.fluid.layers.one_hot(target, depth=depth)
  85. out = pd.fluid.layers.reduce_sum(
  86. -(target * pd.log(output + epsilon) + (1. - target) * pd.log(1. - output + epsilon))
  87. )
  88. return out
  89. def mean_squared_error(output, target, is_mean=False, axis=-1, name="mean_squared_error"):
  90. """Return the TensorFlow expression of mean-square-error (L2) of two batch of data.
  91. Parameters
  92. ----------
  93. output : Tensor
  94. 2D, 3D or 4D tensor i.e. [batch_size, n_feature], [batch_size, height, width] or [batch_size, height, width, channel].
  95. target : Tensor
  96. The target distribution, format the same with `output`.
  97. is_mean : boolean
  98. Whether compute the mean or sum for each example.
  99. - If True, use ``tf.reduce_mean`` to compute the loss between one target and predict data.
  100. - If False, use ``tf.reduce_sum`` (default).
  101. axis : int or list of int
  102. The dimensions to reduce.
  103. name : str
  104. An optional name to attach to this function.
  105. References
  106. ------------
  107. - `Wiki Mean Squared Error <https://en.wikipedia.org/wiki/Mean_squared_error>`__
  108. """
  109. if output.shape[-1] == target.shape[-1]:
  110. pass
  111. else:
  112. depth = output.shape[-1]
  113. target = pd.fluid.layers.one_hot(target, depth=depth)
  114. if is_mean:
  115. mse = F.mse_loss(input=output, label=target, reduction='mean')
  116. else:
  117. mse = F.mse_loss(input=output, label=target, reduction='sum')
  118. return mse
  119. def normalized_mean_square_error(output, target, axis=-1, name="normalized_mean_squared_error_loss"):
  120. """Return the TensorFlow expression of normalized mean-square-error of two distributions.
  121. Parameters
  122. ----------
  123. output : Tensor
  124. 2D, 3D or 4D tensor i.e. [batch_size, n_feature], [batch_size, height, width] or [batch_size, height, width, channel].
  125. target : Tensor
  126. The target distribution, format the same with `output`.
  127. axis : int or list of int
  128. The dimensions to reduce.
  129. name : str
  130. An optional name to attach to this function.
  131. """
  132. if output.shape[-1] == target.shape[-1]:
  133. pass
  134. else:
  135. depth = output.shape[-1]
  136. target = pd.fluid.layers.one_hot(target, depth=depth)
  137. nmse_a = pd.sqrt(pd.fluid.layers.reduce_sum(pd.fluid.layers.square_error_cost(output, target), dim=axis))
  138. nmse_b = pd.sqrt(pd.fluid.layers.reduce_sum(pd.square(target), dim=axis))
  139. nmse = pd.fluid.layers.reduce_mean(nmse_a / nmse_b)
  140. return nmse
  141. def absolute_difference_error(output, target, is_mean=False, axis=-1, name="absolute_difference_error_loss"):
  142. """Return the TensorFlow expression of absolute difference error (L1) of two batch of data.
  143. Parameters
  144. ----------
  145. output : Tensor
  146. 2D, 3D or 4D tensor i.e. [batch_size, n_feature], [batch_size, height, width] or [batch_size, height, width, channel].
  147. target : Tensor
  148. The target distribution, format the same with `output`.
  149. is_mean : boolean
  150. Whether compute the mean or sum for each example.
  151. - If True, use ``tf.reduce_mean`` to compute the loss between one target and predict data.
  152. - If False, use ``tf.reduce_sum`` (default).
  153. axis : int or list of int
  154. The dimensions to reduce.
  155. name : str
  156. An optional name to attach to this function.
  157. """
  158. if is_mean:
  159. loss = pd.fluid.layers.reduce_mean(pd.fluid.layers.reduce_mean(pd.abs(output - target), axis))
  160. else:
  161. loss = pd.fluid.layers.reduce_mean(pd.fluid.layers.reduce_sum(pd.abs(output - target), axis))
  162. return loss
  163. def dice_coe(output, target, loss_type='jaccard', axis=(1, 2, 3), smooth=1e-5):
  164. """Soft dice (Sørensen or Jaccard) coefficient for comparing the similarity
  165. of two batch of data, usually be used for binary image segmentation
  166. i.e. labels are binary. The coefficient between 0 to 1, 1 means totally match.
  167. Parameters
  168. -----------
  169. output : Tensor
  170. A distribution with shape: [batch_size, ....], (any dimensions).
  171. target : Tensor
  172. The target distribution, format the same with `output`.
  173. loss_type : str
  174. ``jaccard`` or ``sorensen``, default is ``jaccard``.
  175. axis : tuple of int
  176. All dimensions are reduced, default ``[1,2,3]``.
  177. smooth : float
  178. This small value will be added to the numerator and denominator.
  179. - If both output and target are empty, it makes sure dice is 1.
  180. - If either output or target are empty (all pixels are background), dice = ```smooth/(small_value + smooth)``, then if smooth is very small, dice close to 0 (even the image values lower than the threshold), so in this case, higher smooth can have a higher dice.
  181. Examples
  182. ---------
  183. >>> import tensorlayer as tl
  184. >>> outputs = tl.act.pixel_wise_softmax(outputs)
  185. >>> dice_loss = 1 - tl.cost.dice_coe(outputs, y_)
  186. References
  187. -----------
  188. - `Wiki-Dice <https://en.wikipedia.org/wiki/Sørensen–Dice_coefficient>`__
  189. """
  190. axis = list(axis)
  191. inse = pd.fluid.layers.reduce_sum(output * target, dim=axis)
  192. if loss_type == 'jaccard':
  193. l = pd.fluid.layers.reduce_sum(output * output, dim=axis)
  194. r = pd.fluid.layers.reduce_sum(target * target, dim=axis)
  195. elif loss_type == 'sorensen':
  196. l = pd.fluid.layers.reduce_sum(output, dim=axis)
  197. r = pd.fluid.layers.reduce_sum(target, dim=axis)
  198. else:
  199. raise Exception("Unknow loss_type")
  200. dice = (2. * inse + smooth) / (l + r + smooth)
  201. dice = pd.fluid.layers.reduce_mean(dice)
  202. return dice
  203. def dice_hard_coe(output, target, threshold=0.5, axis=(1, 2, 3), smooth=1e-5):
  204. """Non-differentiable Sørensen–Dice coefficient for comparing the similarity
  205. of two batch of data, usually be used for binary image segmentation i.e. labels are binary.
  206. The coefficient between 0 to 1, 1 if totally match.
  207. Parameters
  208. -----------
  209. output : tensor
  210. A distribution with shape: [batch_size, ....], (any dimensions).
  211. target : tensor
  212. The target distribution, format the same with `output`.
  213. threshold : float
  214. The threshold value to be true.
  215. axis : tuple of integer
  216. All dimensions are reduced, default ``(1,2,3)``.
  217. smooth : float
  218. This small value will be added to the numerator and denominator, see ``dice_coe``.
  219. References
  220. -----------
  221. - `Wiki-Dice <https://en.wikipedia.org/wiki/Sørensen–Dice_coefficient>`__
  222. """
  223. output = pd.cast(output > threshold, dtype='float32')
  224. target = pd.cast(target > threshold, dtype='float32')
  225. inse = pd.fluid.layers.reduce_sum(pd.multiply(output, target), dim=list(axis))
  226. l = pd.fluid.layers.reduce_sum(output, dim=list(axis))
  227. r = pd.fluid.layers.reduce_sum(target, dim=list(axis))
  228. hard_dice = (2. * inse + smooth) / (l + r + smooth)
  229. ##
  230. hard_dice = pd.fluid.layers.reduce_mean(hard_dice)
  231. return hard_dice
  232. def iou_coe(output, target, threshold=0.5, axis=(1, 2, 3), smooth=1e-5):
  233. """Non-differentiable Intersection over Union (IoU) for comparing the
  234. similarity of two batch of data, usually be used for evaluating binary image segmentation.
  235. The coefficient between 0 to 1, and 1 means totally match.
  236. Parameters
  237. -----------
  238. output : tensor
  239. A batch of distribution with shape: [batch_size, ....], (any dimensions).
  240. target : tensor
  241. The target distribution, format the same with `output`.
  242. threshold : float
  243. The threshold value to be true.
  244. axis : tuple of integer
  245. All dimensions are reduced, default ``(1,2,3)``.
  246. smooth : float
  247. This small value will be added to the numerator and denominator, see ``dice_coe``.
  248. Notes
  249. ------
  250. - IoU cannot be used as training loss, people usually use dice coefficient for training, IoU and hard-dice for evaluating.
  251. """
  252. pre = pd.cast(output > threshold, dtype='float32')
  253. truth = pd.cast(target > threshold, dtype='float32')
  254. inse = pd.fluid.layers.reduce_sum(pd.multiply(pre, truth), dim=axis) # AND
  255. union = pd.fluid.layers.reduce_sum(pd.cast(pd.add(pre, truth) >= 1, dtype='float32'), dim=axis) # OR
  256. batch_iou = (inse + smooth) / (union + smooth)
  257. iou = pd.fluid.layers.reduce_mean(batch_iou, name='iou_coe')
  258. return iou
  259. def sequence_loss_by_example(
  260. logits, targets, weights, average_across_timesteps=True, softmax_loss_function=None, name=None
  261. ):
  262. """Weighted cross-entropy loss for a sequence of logits (per example). see original tensorflow code :
  263. <https://github.com/tensorflow/tensorflow/blob/master/tensorflow/contrib/legacy_seq2seq/python/ops/seq2seq.py#L1057>
  264. Parameters
  265. ----------
  266. logits: List
  267. List of 2D Tensors of shape [batch_size x num_decoder_symbols].
  268. targets: List
  269. List of 1D batch-sized int32 Tensors of the same length as logits.
  270. weights: List
  271. List of 1D batch-sized float-Tensors of the same length as logits.
  272. average_across_timesteps: Boolean
  273. If set, divide the returned cost by the total label weight.
  274. softmax_loss_function: None or Function
  275. Function (labels, logits) -> loss-batch to be used instead of the standard softmax (the default if this is None).
  276. **Note that to avoid confusion, it is required for the function to accept named arguments.**
  277. name: None or str
  278. Optional name for this operation, default: "sequence_loss_by_example".
  279. Returns
  280. -------
  281. 1D batch-sized float Tensor: The log-perplexity for each sequence.
  282. Raises
  283. ------
  284. ValueError: If len(logits) is different from len(targets) or len(weights).
  285. """
  286. raise NotImplementedError("Not Implemented.")
  287. def cross_entropy_seq(logits, target_seqs, batch_size=None):
  288. """Returns the expression of cross-entropy of two sequences, implement
  289. softmax internally. Normally be used for fixed length RNN outputs, see `PTB example <https://github.com/tensorlayer/tensorlayer/blob/master/example/tutorial_ptb_lstm.py>`__.
  290. Parameters
  291. ----------
  292. logits : Tensor
  293. 2D tensor with shape of `[batch_size * n_steps, n_classes]`.
  294. target_seqs : Tensor
  295. The target sequence, 2D tensor `[batch_size, n_steps]`, if the number of step is dynamic, please use ``tl.cost.cross_entropy_seq_with_mask`` instead.
  296. batch_size : None or int.
  297. Whether to divide the cost by batch size.
  298. - If integer, the return cost will be divided by `batch_size`.
  299. - If None (default), the return cost will not be divided by anything.
  300. Examples
  301. --------
  302. >>> import tensorlayer as tl
  303. >>> # see `PTB example <https://github.com/tensorlayer/tensorlayer/blob/master/example/tutorial_ptb_lstm.py>`__.for more details
  304. >>> # outputs shape : (batch_size * n_steps, n_classes)
  305. >>> # targets shape : (batch_size, n_steps)
  306. >>> cost = tl.cost.cross_entropy_seq(outputs, targets)
  307. """
  308. raise NotImplementedError("Not Implemented.")
  309. def cross_entropy_seq_with_mask(logits, target_seqs, input_mask, return_details=False, name=None):
  310. """Returns the expression of cross-entropy of two sequences, implement
  311. softmax internally. Normally be used for Dynamic RNN with Synced sequence input and output.
  312. Parameters
  313. -----------
  314. logits : Tensor
  315. 2D tensor with shape of [batch_size * ?, n_classes], `?` means dynamic IDs for each example.
  316. - Can be get from `DynamicRNNLayer` by setting ``return_seq_2d`` to `True`.
  317. target_seqs : Tensor
  318. int of tensor, like word ID. [batch_size, ?], `?` means dynamic IDs for each example.
  319. input_mask : Tensor
  320. The mask to compute loss, it has the same size with `target_seqs`, normally 0 or 1.
  321. return_details : boolean
  322. Whether to return detailed losses.
  323. - If False (default), only returns the loss.
  324. - If True, returns the loss, losses, weights and targets (see source code).
  325. Examples
  326. --------
  327. >>> import tensorlayer as tl
  328. >>> import tensorflow as tf
  329. >>> import numpy as np
  330. >>> batch_size = 64
  331. >>> vocab_size = 10000
  332. >>> embedding_size = 256
  333. >>> ni = tl.layers.Input([batch_size, None], dtype=tf.int64)
  334. >>> net = tl.layers.Embedding(
  335. ... vocabulary_size = vocab_size,
  336. ... embedding_size = embedding_size,
  337. ... name = 'seq_embedding')(ni)
  338. >>> net = tl.layers.RNN(
  339. ... cell =tf.keras.layers.LSTMCell(units=embedding_size, dropout=0.1),
  340. ... return_seq_2d = True,
  341. ... name = 'dynamicrnn')(net)
  342. >>> net = tl.layers.Dense(n_units=vocab_size, name="output")(net)
  343. >>> model = tl.models.Model(inputs=ni, outputs=net)
  344. >>> input_seqs = np.random.randint(0, 10, size=(batch_size, 10), dtype=np.int64)
  345. >>> target_seqs = np.random.randint(0, 10, size=(batch_size, 10), dtype=np.int64)
  346. >>> input_mask = np.random.randint(0, 2, size=(batch_size, 10), dtype=np.int64)
  347. >>> outputs = model(input_seqs, is_train=True)
  348. >>> loss = tl.cost.cross_entropy_seq_with_mask(outputs, target_seqs, input_mask)
  349. """
  350. raise NotImplementedError("Not Implemented.")
  351. def cosine_similarity(v1, v2):
  352. """Cosine similarity [-1, 1].
  353. Parameters
  354. ----------
  355. v1, v2 : Tensor
  356. Tensor with the same shape [batch_size, n_feature].
  357. References
  358. ----------
  359. - `Wiki <https://en.wikipedia.org/wiki/Cosine_similarity>`__.
  360. """
  361. return pd.fluid.layers.reduce_sum(pd.multiply(v1, v2), 1) / \
  362. (pd.sqrt(pd.fluid.layers.reduce_sum(pd.multiply(v1, v1), 1)) *
  363. pd.sqrt(pd.fluid.layers.reduce_sum(pd.multiply(v2, v2), 1)))
  364. # Regularization Functions
  365. def li_regularizer(scale, scope=None):
  366. """Li regularization removes the neurons of previous layer. The `i` represents `inputs`.
  367. Returns a function that can be used to apply group li regularization to weights.
  368. The implementation follows `TensorFlow contrib <https://github.com/tensorflow/tensorflow/blob/master/tensorflow/contrib/layers/python/layers/regularizers.py>`__.
  369. Parameters
  370. ----------
  371. scale : float
  372. A scalar multiplier `Tensor`. 0.0 disables the regularizer.
  373. scope: str
  374. An optional scope name for this function.
  375. Returns
  376. --------
  377. A function with signature `li(weights, name=None)` that apply Li regularization.
  378. Raises
  379. ------
  380. ValueError : if scale is outside of the range [0.0, 1.0] or if scale is not a float.
  381. """
  382. raise NotImplementedError("Not Implemented.")
  383. def lo_regularizer(scale):
  384. """Lo regularization removes the neurons of current layer. The `o` represents `outputs`
  385. Returns a function that can be used to apply group lo regularization to weights.
  386. The implementation follows `TensorFlow contrib <https://github.com/tensorflow/tensorflow/blob/master/tensorflow/contrib/layers/python/layers/regularizers.py>`__.
  387. Parameters
  388. ----------
  389. scale : float
  390. A scalar multiplier `Tensor`. 0.0 disables the regularizer.
  391. Returns
  392. -------
  393. A function with signature `lo(weights, name=None)` that apply Lo regularization.
  394. Raises
  395. ------
  396. ValueError : If scale is outside of the range [0.0, 1.0] or if scale is not a float.
  397. """
  398. raise NotImplementedError("Not Implemented.")
  399. def maxnorm_regularizer(scale=1.0):
  400. """Max-norm regularization returns a function that can be used to apply max-norm regularization to weights.
  401. More about max-norm, see `wiki-max norm <https://en.wikipedia.org/wiki/Matrix_norm#Max_norm>`_.
  402. The implementation follows `TensorFlow contrib <https://github.com/tensorflow/tensorflow/blob/master/tensorflow/contrib/layers/python/layers/regularizers.py>`__.
  403. Parameters
  404. ----------
  405. scale : float
  406. A scalar multiplier `Tensor`. 0.0 disables the regularizer.
  407. Returns
  408. ---------
  409. A function with signature `mn(weights, name=None)` that apply Lo regularization.
  410. Raises
  411. --------
  412. ValueError : If scale is outside of the range [0.0, 1.0] or if scale is not a float.
  413. """
  414. raise NotImplementedError("Not Implemented.")
  415. def maxnorm_o_regularizer(scale):
  416. """Max-norm output regularization removes the neurons of current layer.
  417. Returns a function that can be used to apply max-norm regularization to each column of weight matrix.
  418. The implementation follows `TensorFlow contrib <https://github.com/tensorflow/tensorflow/blob/master/tensorflow/contrib/layers/python/layers/regularizers.py>`__.
  419. Parameters
  420. ----------
  421. scale : float
  422. A scalar multiplier `Tensor`. 0.0 disables the regularizer.
  423. Returns
  424. ---------
  425. A function with signature `mn_o(weights, name=None)` that apply Lo regularization.
  426. Raises
  427. ---------
  428. ValueError : If scale is outside of the range [0.0, 1.0] or if scale is not a float.
  429. """
  430. raise NotImplementedError("Not Implemented.")
  431. def maxnorm_i_regularizer(scale):
  432. """Max-norm input regularization removes the neurons of previous layer.
  433. Returns a function that can be used to apply max-norm regularization to each row of weight matrix.
  434. The implementation follows `TensorFlow contrib <https://github.com/tensorflow/tensorflow/blob/master/tensorflow/contrib/layers/python/layers/regularizers.py>`__.
  435. Parameters
  436. ----------
  437. scale : float
  438. A scalar multiplier `Tensor`. 0.0 disables the regularizer.
  439. Returns
  440. ---------
  441. A function with signature `mn_i(weights, name=None)` that apply Lo regularization.
  442. Raises
  443. ---------
  444. ValueError : If scale is outside of the range [0.0, 1.0] or if scale is not a float.
  445. """
  446. raise NotImplementedError("Not Implemented.")
  447. def huber_loss(
  448. output, target, is_mean=True, delta=1.0, dynamichuber=False, reverse=False, axis=-1, epsilon=0.00001, name=None
  449. ):
  450. """Huber Loss operation, see ``https://en.wikipedia.org/wiki/Huber_loss`` .
  451. Reverse Huber Loss operation, see ''https://statweb.stanford.edu/~owen/reports/hhu.pdf''.
  452. Dynamic Reverse Huber Loss operation, see ''https://arxiv.org/pdf/1606.00373.pdf''.
  453. Parameters
  454. ----------
  455. output : Tensor
  456. A distribution with shape: [batch_size, ....], (any dimensions).
  457. target : Tensor
  458. The target distribution, format the same with `output`.
  459. is_mean : boolean
  460. Whether compute the mean or sum for each example.
  461. - If True, use ``tf.reduce_mean`` to compute the loss between one target and predict data (default).
  462. - If False, use ``tf.reduce_sum``.
  463. delta: float
  464. The point where the huber loss function changes from a quadratic to linear.
  465. dynamichuber: boolean
  466. Whether compute the coefficient c for each batch.
  467. - If True, c is 20% of the maximal per-batch error.
  468. - If False, c is delta.
  469. reverse: boolean
  470. Whether compute the reverse huber loss.
  471. axis : int or list of int
  472. The dimensions to reduce.
  473. epsilon:
  474. Eplison.
  475. name : string
  476. Name of this loss.
  477. """
  478. raise NotImplementedError("Not Implemented.")

TensorLayer3.0 是一款兼容多种深度学习框架为计算后端的深度学习库。计划兼容TensorFlow, Pytorch, MindSpore, Paddle.