You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

test_seq2seq_model.py 3.1 kB

4 years ago
123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596
  1. #!/usr/bin/env python
  2. # -*- coding: utf-8 -*-
  3. import os
  4. import unittest
  5. os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
  6. import numpy as np
  7. import tensorflow as tf
  8. import tensorlayer as tl
  9. from tqdm import tqdm
  10. from sklearn.utils import shuffle
  11. from tensorlayer.models.seq2seq import Seq2seq
  12. from tests.utils import CustomTestCase
  13. from tensorlayer.cost import cross_entropy_seq
  14. class Model_SEQ2SEQ_Test(CustomTestCase):
  15. @classmethod
  16. def setUpClass(cls):
  17. cls.batch_size = 16
  18. cls.vocab_size = 20
  19. cls.embedding_size = 32
  20. cls.dec_seq_length = 5
  21. cls.trainX = np.random.randint(20, size=(50, 6))
  22. cls.trainY = np.random.randint(20, size=(50, cls.dec_seq_length + 1))
  23. cls.trainY[:, 0] = 0 # start_token == 0
  24. # Parameters
  25. cls.src_len = len(cls.trainX)
  26. cls.tgt_len = len(cls.trainY)
  27. assert cls.src_len == cls.tgt_len
  28. cls.num_epochs = 100
  29. cls.n_step = cls.src_len // cls.batch_size
  30. @classmethod
  31. def tearDownClass(cls):
  32. pass
  33. def test_basic_simpleSeq2Seq(self):
  34. model_ = Seq2seq(
  35. decoder_seq_length=5,
  36. cell_enc=tf.keras.layers.GRUCell,
  37. cell_dec=tf.keras.layers.GRUCell,
  38. n_layer=3,
  39. n_units=128,
  40. embedding_layer=tl.layers.Embedding(vocabulary_size=self.vocab_size, embedding_size=self.embedding_size),
  41. )
  42. optimizer = tf.optimizers.Adam(learning_rate=0.001)
  43. for epoch in range(self.num_epochs):
  44. model_.train()
  45. trainX, trainY = shuffle(self.trainX, self.trainY)
  46. total_loss, n_iter = 0, 0
  47. for X, Y in tqdm(tl.iterate.minibatches(inputs=trainX, targets=trainY, batch_size=self.batch_size,
  48. shuffle=False), total=self.n_step,
  49. desc='Epoch[{}/{}]'.format(epoch + 1, self.num_epochs), leave=False):
  50. dec_seq = Y[:, :-1]
  51. target_seq = Y[:, 1:]
  52. with tf.GradientTape() as tape:
  53. ## compute outputs
  54. output = model_(inputs=[X, dec_seq])
  55. output = tf.reshape(output, [-1, self.vocab_size])
  56. loss = cross_entropy_seq(logits=output, target_seqs=target_seq)
  57. grad = tape.gradient(loss, model_.all_weights)
  58. optimizer.apply_gradients(zip(grad, model_.all_weights))
  59. total_loss += loss
  60. n_iter += 1
  61. model_.eval()
  62. test_sample = trainX[0:2, :].tolist()
  63. top_n = 1
  64. for i in range(top_n):
  65. prediction = model_([test_sample], seq_length=self.dec_seq_length, start_token=0, top_n=1)
  66. print("Prediction: >>>>> ", prediction, "\n Target: >>>>> ", trainY[0:2, 1:], "\n\n")
  67. # printing average loss after every epoch
  68. print('Epoch [{}/{}]: loss {:.4f}'.format(epoch + 1, self.num_epochs, total_loss / n_iter))
  69. if __name__ == '__main__':
  70. unittest.main()

TensorLayer3.0 是一款兼容多种深度学习框架为计算后端的深度学习库。计划兼容TensorFlow, Pytorch, MindSpore, Paddle.