Browse Source

Updates to Trainer/Tester/fastnlp

1. Tester has a parameter "print_every_step" to control printing. print_every_step == 0 means NO print.
2. Tester's evaluate return (list of) floats, rather than torch.cuda.tensor
3. Trainer also has a parameter "print_every_step". The same usage.
4. In training, validation steps are not shown.
5. Updates to code comments.
6. fastnlp.py is ready for CWS. test_fastNLP.py works.
tags/v0.1.0
FengZiYjun 6 years ago
parent
commit
ab55f25e20
8 changed files with 61 additions and 76 deletions
  1. +2
    -2
      fastNLP/core/preprocess.py
  2. +2
    -2
      fastNLP/core/tester.py
  3. +25
    -54
      fastNLP/core/trainer.py
  4. +13
    -4
      fastNLP/fastnlp.py
  5. BIN
      fastnlp-architecture.jpg
  6. +16
    -11
      reproduction/chinese_word_segment/run.py
  7. +2
    -2
      test/seq_labeling.py
  8. +1
    -1
      test/test_fastNLP.py

+ 2
- 2
fastNLP/core/preprocess.py View File

@@ -19,13 +19,13 @@ DEFAULT_WORD_TO_INDEX = {DEFAULT_PADDING_LABEL: 0, DEFAULT_UNKNOWN_LABEL: 1,
def save_pickle(obj, pickle_path, file_name):
with open(os.path.join(pickle_path, file_name), "wb") as f:
_pickle.dump(obj, f)
print("{} saved in {}.".format(file_name, pickle_path))
print("{} saved in {}".format(file_name, pickle_path))


def load_pickle(pickle_path, file_name):
with open(os.path.join(pickle_path, file_name), "rb") as f:
obj = _pickle.load(f)
print("{} loaded from {}.".format(file_name, pickle_path))
print("{} loaded from {}".format(file_name, pickle_path))
return obj




+ 2
- 2
fastNLP/core/tester.py View File

@@ -98,7 +98,7 @@ class BaseTester(object):

print_output = "[test step {}] {}".format(step, eval_results)
logger.info(print_output)
if step % self.print_every_step == 0:
if self.print_every_step > 0 and step % self.print_every_step == 0:
print(print_output)
step += 1

@@ -187,7 +187,7 @@ class SeqLabelTester(BaseTester):
# make sure "results" is in the same device as "truth"
results = results.to(truth)
accuracy = torch.sum(results == truth.view((-1,))).to(torch.float) / results.shape[0]
return [loss.data, accuracy.data]
return [float(loss), float(accuracy)]

def metrics(self):
batch_loss = np.mean([x[0] for x in self.eval_history])


+ 25
- 54
fastNLP/core/trainer.py View File

@@ -4,7 +4,6 @@ import os
import time
from datetime import timedelta

import numpy as np
import torch

from fastNLP.core.action import Action
@@ -47,7 +46,7 @@ class BaseTrainer(object):
Otherwise, error will raise.
"""
default_args = {"epochs": 3, "batch_size": 8, "validate": True, "use_cuda": True, "pickle_path": "./save/",
"save_best_dev": True, "model_name": "default_model_name.pkl",
"save_best_dev": True, "model_name": "default_model_name.pkl", "print_every_step": 1,
"loss": Loss(None),
"optimizer": Optimizer("Adam", lr=0.001, weight_decay=0)
}
@@ -86,6 +85,7 @@ class BaseTrainer(object):
self.save_best_dev = default_args["save_best_dev"]
self.use_cuda = default_args["use_cuda"]
self.model_name = default_args["model_name"]
self.print_every_step = default_args["print_every_step"]

self._model = None
self._loss_func = default_args["loss"].get() # return a pytorch loss function or None
@@ -93,48 +93,35 @@ class BaseTrainer(object):
self._optimizer_proto = default_args["optimizer"]

def train(self, network, train_data, dev_data=None):
"""General Training Steps
"""General Training Procedure
:param network: a model
:param train_data: three-level list, the training set.
:param dev_data: three-level list, the validation data (optional)

The method is framework independent.
Work by calling the following methods:
- prepare_input
- mode
- define_optimizer
- data_forward
- get_loss
- grad_backward
- update
Subclasses must implement these methods with a specific framework.
"""
# prepare model and data, transfer model to gpu if available
# transfer model to gpu if available
if torch.cuda.is_available() and self.use_cuda:
self._model = network.cuda()
# self._model is used to access model-specific loss
else:
self._model = network

# define tester over dev data
# define Tester over dev data
if self.validate:
default_valid_args = {"save_output": True, "validate_in_training": True, "save_dev_input": True,
"save_loss": True, "batch_size": self.batch_size, "pickle_path": self.pickle_path,
"use_cuda": self.use_cuda}
"use_cuda": self.use_cuda, "print_every_step": 0}
validator = self._create_validator(default_valid_args)
logger.info("validator defined as {}".format(str(validator)))

# optimizer and loss
self.define_optimizer()
logger.info("optimizer defined as {}".format(str(self._optimizer)))
self.define_loss()
logger.info("loss function defined as {}".format(str(self._loss_func)))

# main training epochs
n_samples = len(train_data)
n_batches = n_samples // self.batch_size
n_print = 1
# main training procedure
start = time.time()
logger.info("training epochs started")

for epoch in range(1, self.n_epochs + 1):
logger.info("training epoch {}".format(epoch))

@@ -144,23 +131,30 @@ class BaseTrainer(object):
data_iterator = iter(Batchifier(RandomSampler(train_data), self.batch_size, drop_last=False))
logger.info("prepared data iterator")

self._train_step(data_iterator, network, start=start, n_print=n_print, epoch=epoch)
# one forward and backward pass
self._train_step(data_iterator, network, start=start, n_print=self.print_every_step, epoch=epoch)

# validation
if self.validate:
logger.info("validation started")
validator.test(network, dev_data)

if self.save_best_dev and self.best_eval_result(validator):
self.save_model(network, self.model_name)
print("saved better model selected by dev")
logger.info("saved better model selected by dev")
print("Saved better model selected by validation.")
logger.info("Saved better model selected by validation.")

valid_results = validator.show_matrices()
print("[epoch {}] {}".format(epoch, valid_results))
logger.info("[epoch {}] {}".format(epoch, valid_results))

def _train_step(self, data_iterator, network, **kwargs):
"""Training process in one epoch."""
"""Training process in one epoch.
kwargs should contain:
- n_print: int, print training information every n steps.
- start: time.time(), the starting time of this step.
- epoch: int,
"""
step = 0
for batch_x, batch_y in self.make_batch(data_iterator):

@@ -287,10 +281,11 @@ class BaseTrainer(object):
raise NotImplementedError

def save_model(self, network, model_name):
"""
"""Save this model with such a name.
This method may be called multiple times by Trainer to overwritten a better model.

:param network: the PyTorch model
:param model_name: str
model_best_dev.pkl may be overwritten by a better model in future epochs.
"""
if model_name[-4:] != ".pkl":
model_name += ".pkl"
@@ -300,33 +295,9 @@ class BaseTrainer(object):
raise NotImplementedError


class ToyTrainer(BaseTrainer):
"""
An example to show the definition of Trainer.
"""

def __init__(self, training_args):
super(ToyTrainer, self).__init__(training_args)

def load_train_data(self, data_path):
data_train = _pickle.load(open(data_path + "/data_train.pkl", "rb"))
data_dev = _pickle.load(open(data_path + "/data_train.pkl", "rb"))
return data_train, data_dev, 0, 1

def data_forward(self, network, x):
return network(x)

def grad_backward(self, loss):
self._model.zero_grad()
loss.backward()

def get_loss(self, pred, truth):
return np.mean(np.square(pred - truth))


class SeqLabelTrainer(BaseTrainer):
"""
Trainer for Sequence Modeling
Trainer for Sequence Labeling

"""

@@ -384,7 +355,7 @@ class SeqLabelTrainer(BaseTrainer):


class ClassificationTrainer(BaseTrainer):
"""Trainer for classification."""
"""Trainer for text classification."""

def __init__(self, **train_args):
super(ClassificationTrainer, self).__init__(**train_args)


+ 13
- 4
fastNLP/fastnlp.py View File

@@ -1,4 +1,4 @@
# from fastNLP.core.predictor import SeqLabelInfer, ClassificationInfer
from fastNLP.core.predictor import SeqLabelInfer, ClassificationInfer
from fastNLP.core.preprocess import load_pickle
from fastNLP.loader.config_loader import ConfigLoader, ConfigSection
from fastNLP.loader.model_loader import ModelLoader
@@ -74,9 +74,11 @@ class FastNLP(object):
self._download(model_name, FastNLP_MODEL_COLLECTION[model_name]["url"])

model_class = self._get_model_class(FastNLP_MODEL_COLLECTION[model_name]["class"])
print("Restore model class {}".format(str(model_class)))

model_args = ConfigSection()
ConfigLoader.load_config(self.model_dir + config_file, {section_name: model_args})
print("Restore model hyper-parameters {}".format(str(model_args.data)))

# fetch dictionary size and number of labels from pickle files
word2index = load_pickle(self.model_dir, "word2id.pkl")
@@ -86,14 +88,16 @@ class FastNLP(object):

# Construct the model
model = model_class(model_args)
print("Model constructed.")

# To do: framework independent
ModelLoader.load_pytorch(model, self.model_dir + FastNLP_MODEL_COLLECTION[model_name]["pickle"])
print("Model weights loaded.")

self.model = model
self.infer_type = FastNLP_MODEL_COLLECTION[model_name]["type"]

print("Model loaded. ")
print("Inference ready.")

def run(self, raw_input):
"""
@@ -168,10 +172,15 @@ class FastNLP(object):
:param language: str, one of ('zh', 'en'), Chinese or English.
:return data: list of list of string, each string is a token.
"""
assert language in ("zh", "en")
data = []
delimiter = " " if language is "en" else ""
for sent in text:
tokens = sent.strip().split(delimiter)
if language == "en":
tokens = sent.strip().split()
elif language == "zh":
tokens = [char for char in sent]
else:
raise RuntimeError("Unknown language {}".format(language))
data.append(tokens)
return data



BIN
fastnlp-architecture.jpg View File

Before After
Width: 960  |  Height: 540  |  Size: 36 kB

+ 16
- 11
reproduction/chinese_word_segment/run.py View File

@@ -6,21 +6,21 @@ sys.path.append(os.path.join(os.path.dirname(__file__), '../..'))
from fastNLP.loader.config_loader import ConfigLoader, ConfigSection
from fastNLP.core.trainer import SeqLabelTrainer
from fastNLP.loader.dataset_loader import TokenizeDatasetLoader, BaseLoader
from fastNLP.loader.preprocess import POSPreprocess, load_pickle
from fastNLP.core.preprocess import SeqLabelPreprocess, load_pickle
from fastNLP.saver.model_saver import ModelSaver
from fastNLP.loader.model_loader import ModelLoader
from fastNLP.core.tester import SeqLabelTester
from fastNLP.models.sequence_modeling import AdvSeqLabel
from fastNLP.core.inference import SeqLabelInfer
from fastNLP.core.predictor import SeqLabelInfer

# not in the file's dir
if len(os.path.dirname(__file__)) != 0:
os.chdir(os.path.dirname(__file__))
datadir = 'icwb2-data'
cfgfile = 'cws.cfg'
datadir = "/home/zyfeng/data/"
cfgfile = './cws.cfg'
data_name = "pku_training.utf8"

cws_data_path = os.path.join(datadir, "training/pku_training.utf8")
cws_data_path = os.path.join(datadir, "pku_training.utf8")
pickle_path = "save"
data_infer_path = os.path.join(datadir, "infer.utf8")

@@ -70,9 +70,10 @@ def train():
train_data = loader.load_pku()

# Preprocessor
p = POSPreprocess(train_data, pickle_path, train_dev_split=0.3)
train_args["vocab_size"] = p.vocab_size
train_args["num_classes"] = p.num_classes
preprocessor = SeqLabelPreprocess()
data_train, data_dev = preprocessor.run(train_data, pickle_path=pickle_path, train_dev_split=0.3)
train_args["vocab_size"] = preprocessor.vocab_size
train_args["num_classes"] = preprocessor.num_classes

# Trainer
trainer = SeqLabelTrainer(**train_args.data)
@@ -83,10 +84,11 @@ def train():
ModelLoader.load_pytorch(model, "./save/saved_model.pkl")
print('model parameter loaded!')
except Exception as e:
print("No saved model. Continue.")
pass
# Start training
trainer.train(model)
trainer.train(model, data_train, data_dev)
print("Training finished!")

# Saver
@@ -106,6 +108,9 @@ def test():
index2label = load_pickle(pickle_path, "id2class.pkl")
test_args["num_classes"] = len(index2label)

# load dev data
dev_data = load_pickle(pickle_path, "data_dev.pkl")

# Define the same model
model = AdvSeqLabel(test_args)

@@ -114,10 +119,10 @@ def test():
print("model loaded!")

# Tester
tester = SeqLabelTester(test_args)
tester = SeqLabelTester(**test_args.data)

# Start testing
tester.test(model)
tester.test(model, dev_data)

# print test results
print(tester.show_matrices())


+ 2
- 2
test/seq_labeling.py View File

@@ -123,7 +123,7 @@ def train_and_test():
tester = SeqLabelTester(save_output=False,
save_loss=False,
save_best_dev=False,
batch_size=8,
batch_size=4,
use_cuda=False,
pickle_path=pickle_path,
model_name="seq_label_in_test.pkl",
@@ -140,4 +140,4 @@ def train_and_test():

if __name__ == "__main__":
train_and_test()
infer()
# infer()

+ 1
- 1
test/test_fastNLP.py View File

@@ -3,7 +3,7 @@ import sys
sys.path.append("..")
from fastNLP.fastnlp import FastNLP

PATH_TO_CWS_PICKLE_FILES = "/home/zyfeng/data/save/"
PATH_TO_CWS_PICKLE_FILES = "/home/zyfeng/fastNLP/reproduction/chinese_word_segment/save/"

def word_seg():
nlp = FastNLP(model_dir=PATH_TO_CWS_PICKLE_FILES)


Loading…
Cancel
Save