|
|
@@ -1,39 +1,35 @@ |
|
|
|
import os |
|
|
|
import tempfile |
|
|
|
import unittest |
|
|
|
|
|
|
|
import numpy as np |
|
|
|
import torch |
|
|
|
import os |
|
|
|
import shutil |
|
|
|
|
|
|
|
from fastNLP.core.callback import EarlyStopCallback, GradientClipCallback, LRScheduler, ControlC, \ |
|
|
|
LRFinder, TensorboardCallback |
|
|
|
from fastNLP import AccuracyMetric |
|
|
|
from fastNLP import BCELoss |
|
|
|
from fastNLP import DataSet |
|
|
|
from fastNLP import Instance |
|
|
|
from fastNLP import BCELoss |
|
|
|
from fastNLP import AccuracyMetric |
|
|
|
from fastNLP import SGD |
|
|
|
from fastNLP import Trainer |
|
|
|
from fastNLP.models.base_model import NaiveClassifier |
|
|
|
from fastNLP.core.callback import EarlyStopError |
|
|
|
from fastNLP.core.callback import EarlyStopCallback, GradientClipCallback, LRScheduler, ControlC, \ |
|
|
|
LRFinder, TensorboardCallback |
|
|
|
from fastNLP.core.callback import EvaluateCallback, FitlogCallback, SaveModelCallback |
|
|
|
from fastNLP.core.callback import WarmupCallback |
|
|
|
import tempfile |
|
|
|
from fastNLP.models.base_model import NaiveClassifier |
|
|
|
|
|
|
|
|
|
|
|
def prepare_env(): |
|
|
|
def prepare_fake_dataset(): |
|
|
|
mean = np.array([-3, -3]) |
|
|
|
cov = np.array([[1, 0], [0, 1]]) |
|
|
|
class_A = np.random.multivariate_normal(mean, cov, size=(1000,)) |
|
|
|
|
|
|
|
mean = np.array([3, 3]) |
|
|
|
cov = np.array([[1, 0], [0, 1]]) |
|
|
|
class_B = np.random.multivariate_normal(mean, cov, size=(1000,)) |
|
|
|
|
|
|
|
data_set = DataSet([Instance(x=[float(item[0]), float(item[1])], y=[0.0]) for item in class_A] + |
|
|
|
[Instance(x=[float(item[0]), float(item[1])], y=[1.0]) for item in class_B]) |
|
|
|
return data_set |
|
|
|
mean = np.array([-3, -3]) |
|
|
|
cov = np.array([[1, 0], [0, 1]]) |
|
|
|
class_A = np.random.multivariate_normal(mean, cov, size=(1000,)) |
|
|
|
|
|
|
|
mean = np.array([3, 3]) |
|
|
|
cov = np.array([[1, 0], [0, 1]]) |
|
|
|
class_B = np.random.multivariate_normal(mean, cov, size=(1000,)) |
|
|
|
|
|
|
|
data_set = DataSet([Instance(x=[float(item[0]), float(item[1])], y=[0.0]) for item in class_A] + |
|
|
|
[Instance(x=[float(item[0]), float(item[1])], y=[1.0]) for item in class_B]) |
|
|
|
|
|
|
|
data_set = prepare_fake_dataset() |
|
|
|
data_set.set_input("x") |
|
|
|
data_set.set_target("y") |
|
|
|
model = NaiveClassifier(2, 1) |
|
|
@@ -43,11 +39,11 @@ def prepare_env(): |
|
|
|
class TestCallback(unittest.TestCase): |
|
|
|
def setUp(self): |
|
|
|
self.tempdir = tempfile.mkdtemp() |
|
|
|
|
|
|
|
|
|
|
|
def tearDown(self): |
|
|
|
pass |
|
|
|
# shutil.rmtree(self.tempdir) |
|
|
|
|
|
|
|
|
|
|
|
def test_gradient_clip(self): |
|
|
|
data_set, model = prepare_env() |
|
|
|
trainer = Trainer(data_set, model, optimizer=SGD(lr=0.1), loss=BCELoss(pred="predict", target="y"), |
|
|
@@ -100,7 +96,7 @@ class TestCallback(unittest.TestCase): |
|
|
|
path = os.path.join("./", 'tensorboard_logs_{}'.format(trainer.start_time)) |
|
|
|
if os.path.exists(path): |
|
|
|
shutil.rmtree(path) |
|
|
|
|
|
|
|
|
|
|
|
def test_readonly_property(self): |
|
|
|
from fastNLP.core.callback import Callback |
|
|
|
passed_epochs = [] |
|
|
@@ -123,19 +119,19 @@ class TestCallback(unittest.TestCase): |
|
|
|
check_code_level=2) |
|
|
|
trainer.train() |
|
|
|
assert passed_epochs == list(range(1, total_epochs + 1)) |
|
|
|
|
|
|
|
|
|
|
|
def test_evaluate_callback(self): |
|
|
|
data_set, model = prepare_env() |
|
|
|
from fastNLP import Tester |
|
|
|
tester = Tester(data=data_set, model=model, metrics=AccuracyMetric(pred="predict", target="y")) |
|
|
|
evaluate_callback = EvaluateCallback(data_set, tester) |
|
|
|
|
|
|
|
|
|
|
|
trainer = Trainer(data_set, model, optimizer=SGD(lr=0.1), loss=BCELoss(pred="predict", target="y"), |
|
|
|
batch_size=32, n_epochs=5, print_every=50, dev_data=data_set, |
|
|
|
metrics=AccuracyMetric(pred="predict", target="y"), use_tqdm=False, |
|
|
|
callbacks=evaluate_callback, check_code_level=2) |
|
|
|
trainer.train() |
|
|
|
|
|
|
|
|
|
|
|
def test_fitlog_callback(self): |
|
|
|
import fitlog |
|
|
|
fitlog.set_log_dir(self.tempdir) |
|
|
@@ -143,13 +139,13 @@ class TestCallback(unittest.TestCase): |
|
|
|
from fastNLP import Tester |
|
|
|
tester = Tester(data=data_set, model=model, metrics=AccuracyMetric(pred="predict", target="y")) |
|
|
|
fitlog_callback = FitlogCallback(data_set, tester) |
|
|
|
|
|
|
|
|
|
|
|
trainer = Trainer(data_set, model, optimizer=SGD(lr=0.1), loss=BCELoss(pred="predict", target="y"), |
|
|
|
batch_size=32, n_epochs=5, print_every=50, dev_data=data_set, |
|
|
|
metrics=AccuracyMetric(pred="predict", target="y"), use_tqdm=True, |
|
|
|
callbacks=fitlog_callback, check_code_level=2) |
|
|
|
trainer.train() |
|
|
|
|
|
|
|
|
|
|
|
def test_save_model_callback(self): |
|
|
|
data_set, model = prepare_env() |
|
|
|
top = 3 |
|
|
@@ -159,10 +155,10 @@ class TestCallback(unittest.TestCase): |
|
|
|
metrics=AccuracyMetric(pred="predict", target="y"), use_tqdm=True, |
|
|
|
callbacks=save_model_callback, check_code_level=2) |
|
|
|
trainer.train() |
|
|
|
|
|
|
|
|
|
|
|
timestamp = os.listdir(self.tempdir)[0] |
|
|
|
self.assertEqual(len(os.listdir(os.path.join(self.tempdir, timestamp))), top) |
|
|
|
|
|
|
|
|
|
|
|
def test_warmup_callback(self): |
|
|
|
data_set, model = prepare_env() |
|
|
|
warmup_callback = WarmupCallback() |
|
|
@@ -171,3 +167,41 @@ class TestCallback(unittest.TestCase): |
|
|
|
metrics=AccuracyMetric(pred="predict", target="y"), use_tqdm=True, |
|
|
|
callbacks=warmup_callback, check_code_level=2) |
|
|
|
trainer.train() |
|
|
|
|
|
|
|
|
|
|
|
def test_control_C(): |
|
|
|
# 用于测试 ControlC , 再两次训练时用 Control+C 进行退出,如果最后不显示 "Test failed!" 则通过测试 |
|
|
|
from fastNLP import ControlC, Callback |
|
|
|
import time |
|
|
|
|
|
|
|
line1 = "\n\n\n\n\n*************************" |
|
|
|
line2 = "*************************\n\n\n\n\n" |
|
|
|
|
|
|
|
|
|
|
|
class Wait(Callback): |
|
|
|
def on_epoch_end(self): |
|
|
|
time.sleep(5) |
|
|
|
|
|
|
|
|
|
|
|
data_set, model = prepare_env() |
|
|
|
|
|
|
|
print(line1 + "Test starts!" + line2) |
|
|
|
trainer = Trainer(data_set, model, optimizer=SGD(lr=0.1), loss=BCELoss(pred="predict", target="y"), |
|
|
|
batch_size=32, n_epochs=20, dev_data=data_set, |
|
|
|
metrics=AccuracyMetric(pred="predict", target="y"), use_tqdm=True, |
|
|
|
callbacks=[Wait(), ControlC(False)], check_code_level=2) |
|
|
|
trainer.train() |
|
|
|
|
|
|
|
print(line1 + "Program goes on ..." + line2) |
|
|
|
|
|
|
|
trainer = Trainer(data_set, model, optimizer=SGD(lr=0.1), loss=BCELoss(pred="predict", target="y"), |
|
|
|
batch_size=32, n_epochs=20, dev_data=data_set, |
|
|
|
metrics=AccuracyMetric(pred="predict", target="y"), use_tqdm=True, |
|
|
|
callbacks=[Wait(), ControlC(True)], check_code_level=2) |
|
|
|
trainer.train() |
|
|
|
|
|
|
|
print(line1 + "Test failed!" + line2) |
|
|
|
|
|
|
|
|
|
|
|
if __name__ == "__main__": |
|
|
|
test_control_C() |