Browse Source

fix log in trainer & tester

tags/v0.2.0
yunfan 6 years ago
parent
commit
2fe39b7813
3 changed files with 6 additions and 8 deletions
  1. +2
    -1
      fastNLP/core/__init__.py
  2. +1
    -4
      fastNLP/core/tester.py
  3. +3
    -3
      fastNLP/core/trainer.py

+ 2
- 1
fastNLP/core/__init__.py View File

@@ -7,4 +7,5 @@ from .sampler import SequentialSampler, BucketSampler, RandomSampler, BaseSample
from .tester import Tester
from .trainer import Trainer
from .vocabulary import Vocabulary

from .optimizer import Optimizer
from .loss import Loss

+ 1
- 4
fastNLP/core/tester.py View File

@@ -39,7 +39,6 @@ class Tester(object):

for req_key in required_args:
if req_key not in kwargs:
logger.error("Tester lacks argument {}".format(req_key))
raise ValueError("Tester lacks argument {}".format(req_key))

for key in default_args:
@@ -49,7 +48,6 @@ class Tester(object):
else:
msg = "Argument %s type mismatch: expected %s while get %s" % (
key, type(default_args[key]), type(kwargs[key]))
logger.error(msg)
raise ValueError(msg)
else:
# Tester doesn't care about extra arguments
@@ -85,8 +83,7 @@ class Tester(object):
for k, v in batch_y.items():
truths[k].append(v)
eval_results = self.evaluate(**output, **truths)
# print("[tester] {}".format(self.print_eval_results(eval_results)))
# logger.info("[tester] {}".format(self.print_eval_results(eval_results)))
print("[tester] {}".format(self.print_eval_results(eval_results)))
self.mode(network, is_test=False)
self.metrics = eval_results
return eval_results


+ 3
- 3
fastNLP/core/trainer.py View File

@@ -100,9 +100,9 @@ class Trainer(object):
for name, param in self.model.named_parameters():
if param.requires_grad:
self._summary_writer.add_scalar(name + "_mean", param.mean(), global_step=self.step)
self._summary_writer.add_scalar(name + "_std", param.std(), global_step=self.step)
self._summary_writer.add_scalar(name + "_grad_sum", param.sum(), global_step=self.step)
if n_print > 0 and self.step % n_print == 0:
# self._summary_writer.add_scalar(name + "_std", param.std(), global_step=self.step)
# self._summary_writer.add_scalar(name + "_grad_sum", param.sum(), global_step=self.step)
if kwargs["n_print"] > 0 and self.step % kwargs["n_print"] == 0:
end = time.time()
diff = timedelta(seconds=round(end - kwargs["start"]))
print_output = "[epoch: {:>3} step: {:>4}] train loss: {:>4.6} time: {}".format(


Loading…
Cancel
Save