You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

run_compile.py 2.8 kB

4 years ago
1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374
  1. import tensorlayer as T
  2. from dragon.vm.tensorlayer.layers import Dense
  3. from dragon.vm.tensorlayer.models import Model
  4. import dragon.vm.tensorlayer as tl
  5. import dragon as dg
  6. import argparse
  7. import numpy as np
  8. X_train, y_train, X_val, y_val, X_test, y_test = T.files.load_mnist_dataset(shape=(-1, 784))
  9. class MLP(Model):
  10. def __init__(self):
  11. super(MLP, self).__init__()
  12. self.dense1 = Dense(n_units=800, act=tl.act.relu, in_channels=784)
  13. self.dense2 = Dense(n_units=800, act=tl.act.relu, in_channels=800)
  14. self.dense3 = Dense(n_units=10, act=tl.act.relu, in_channels=800)
  15. def forward(self, x):
  16. z = self.dense1(x)
  17. z = self.dense2(z)
  18. out = self.dense3(z)
  19. return out
  20. class Classifier(object):
  21. """The base classifier class."""
  22. # TensorSpec for graph execution
  23. image_spec = dg.Tensor([None, 3, 32, 32], 'float32')
  24. label_spec = dg.Tensor([None], 'int64')
  25. def __init__(self, optimizer):
  26. super(Classifier, self).__init__()
  27. self.net = MLP()
  28. self.optimizer = optimizer
  29. self.params = self.net.trainable_weights
  30. def step(self, image, label):
  31. with dg.GradientTape() as tape:
  32. logit = self.net(image)
  33. # logit = dg.cast(logit, 'float64')
  34. logit = dg.cast(dg.math.argmax(logit, -1), 'int32')
  35. # label = dg.cast(label, 'float32')
  36. # print("logit :\n", logit, label)
  37. # loss = dg.losses.smooth_l1_loss([logit, label])
  38. # loss = tl.losses.sparse_softmax_crossentropy(logit, label)
  39. loss = dg.math.sum(
  40. (logit - label) * (logit - label)
  41. ) # dg.losses.sparse_softmax_cross_entropy([logit, label])
  42. accuracy = dg.math.mean(dg.math.equal([logit, label]).astype('float32'))
  43. grads = tape.gradient(loss, self.params)
  44. self.optimizer.apply_gradients(zip(self.params, grads))
  45. return loss, accuracy, self.optimizer
  46. if __name__ == '__main__':
  47. dg.autograph.set_execution('EAGER_MODE')
  48. # Define the model
  49. model = Classifier(dg.optimizers.SGD(base_lr=0.001, momentum=0.9, weight_decay=1e-4))
  50. # Main loop
  51. batch_size = 200
  52. for i in range(50):
  53. for X_batch, y_batch in T.iterate.minibatches(X_train, y_train, batch_size, shuffle=True):
  54. image = dg.EagerTensor(X_batch, copy=False)
  55. label = dg.EagerTensor(y_batch, copy=False, dtype='float32')
  56. loss, accuracy, _ = model.step(image, label)
  57. if i % 20 == 0:
  58. dg.logging.info(
  59. 'Iteration %d, lr = %s, loss = %.5f, accuracy = %.3f' %
  60. (i, str(model.optimizer.base_lr), loss, accuracy)
  61. )

TensorLayer3.0 是一款兼容多种深度学习框架为计算后端的深度学习库。计划兼容TensorFlow, Pytorch, MindSpore, Paddle.