diff --git a/fastNLP/core/__init__.py b/fastNLP/core/__init__.py index 03f284d5..1003c824 100644 --- a/fastNLP/core/__init__.py +++ b/fastNLP/core/__init__.py @@ -7,4 +7,5 @@ from .sampler import SequentialSampler, BucketSampler, RandomSampler, BaseSample from .tester import Tester from .trainer import Trainer from .vocabulary import Vocabulary - +from .optimizer import Optimizer +from .loss import Loss diff --git a/fastNLP/core/tester.py b/fastNLP/core/tester.py index 2a0d33e0..d6ef9c1e 100644 --- a/fastNLP/core/tester.py +++ b/fastNLP/core/tester.py @@ -39,7 +39,6 @@ class Tester(object): for req_key in required_args: if req_key not in kwargs: - logger.error("Tester lacks argument {}".format(req_key)) raise ValueError("Tester lacks argument {}".format(req_key)) for key in default_args: @@ -49,7 +48,6 @@ class Tester(object): else: msg = "Argument %s type mismatch: expected %s while get %s" % ( key, type(default_args[key]), type(kwargs[key])) - logger.error(msg) raise ValueError(msg) else: # Tester doesn't care about extra arguments @@ -85,8 +83,7 @@ class Tester(object): for k, v in batch_y.items(): truths[k].append(v) eval_results = self.evaluate(**output, **truths) - # print("[tester] {}".format(self.print_eval_results(eval_results))) - # logger.info("[tester] {}".format(self.print_eval_results(eval_results))) + print("[tester] {}".format(self.print_eval_results(eval_results))) self.mode(network, is_test=False) self.metrics = eval_results return eval_results diff --git a/fastNLP/core/trainer.py b/fastNLP/core/trainer.py index b879ad11..b4f11090 100644 --- a/fastNLP/core/trainer.py +++ b/fastNLP/core/trainer.py @@ -100,9 +100,9 @@ class Trainer(object): for name, param in self.model.named_parameters(): if param.requires_grad: self._summary_writer.add_scalar(name + "_mean", param.mean(), global_step=self.step) - self._summary_writer.add_scalar(name + "_std", param.std(), global_step=self.step) - self._summary_writer.add_scalar(name + "_grad_sum", param.sum(), global_step=self.step) - if n_print > 0 and self.step % n_print == 0: + # self._summary_writer.add_scalar(name + "_std", param.std(), global_step=self.step) + # self._summary_writer.add_scalar(name + "_grad_sum", param.sum(), global_step=self.step) + if kwargs["n_print"] > 0 and self.step % kwargs["n_print"] == 0: end = time.time() diff = timedelta(seconds=round(end - kwargs["start"])) print_output = "[epoch: {:>3} step: {:>4}] train loss: {:>4.6} time: {}".format(