Browse Source

Merge branch 'master' of https://github.com/fastNLP/fastNLP into to_merge

# Conflicts:
#	fastNLP/core/metrics.py
#	fastNLP/core/predictor.py
tags/v0.1.0
FengZiYjun 6 years ago
parent
commit
929a595c4c
6 changed files with 37 additions and 17 deletions
  1. +14
    -0
      .travis.yml
  2. +2
    -0
      README.md
  3. +0
    -1
      fastNLP/core/metrics.py
  4. +2
    -2
      fastNLP/core/predictor.py
  5. +6
    -6
      fastNLP/core/tester.py
  6. +13
    -8
      test/test_metrics.py

+ 14
- 0
.travis.yml View File

@@ -0,0 +1,14 @@
language: python
python:
- "3.6"
# command to install dependencies
install:
- pip install --quiet -r requirements.txt
- pip install pytest pytest-cov
- pip install -U scikit-learn
# command to run tests
script:
- pytest --cov=./

after_success:
- codecov

+ 2
- 0
README.md View File

@@ -1,4 +1,6 @@
# FastNLP
[![Build Status](https://travis-ci.org/fastnlp/fastNLP.svg?branch=master)](https://travis-ci.org/fastnlp/fastNLP)
[![codecov](https://codecov.io/gh/fastnlp/fastNLP/branch/master/graph/badge.svg)](https://codecov.io/gh/fastnlp/fastNLP)
```
FastNLP
├── docs


+ 0
- 1
fastNLP/core/metrics.py View File

@@ -1,5 +1,4 @@
import warnings

import numpy as np
import torch



+ 2
- 2
fastNLP/core/predictor.py View File

@@ -84,8 +84,8 @@ class Predictor(object):
data_iterator = iter(Batchifier(SequentialSampler(data), self.batch_size, drop_last=False))

for batch_x in self.make_batch(data_iterator, use_cuda=False):
prediction = self.data_forward(network, batch_x)
with torch.no_grad():
prediction = self.data_forward(network, batch_x)

self.batch_output.append(prediction)



+ 6
- 6
fastNLP/core/tester.py View File

@@ -50,15 +50,16 @@ class BaseTester(object):
step = 0

for batch_x, batch_y in self.make_batch(iterator, dev_data):

prediction = self.data_forward(network, batch_x)

eval_results = self.evaluate(prediction, batch_y)
with torch.no_grad():
prediction = self.data_forward(network, batch_x)
eval_results = self.evaluate(prediction, batch_y)

if self.save_output:
self.batch_output.append(prediction)
if self.save_loss:
self.eval_history.append(eval_results)
if step % n_print == 0:
print('[test step: {:>4}]'.format(step))
step += 1

def prepare_input(self, data_path):
@@ -157,7 +158,7 @@ class SeqLabelTester(BaseTester):
results = torch.Tensor(prediction).view(-1,)
# make sure "results" is in the same device as "truth"
results = results.to(truth)
accuracy = torch.sum(results == truth.view((-1,))) / results.shape[0]
accuracy = torch.sum(results == truth.view((-1,))).to(torch.float) / results.shape[0]
return [loss.data, accuracy.data]

def metrics(self):
@@ -176,7 +177,6 @@ class SeqLabelTester(BaseTester):
def make_batch(self, iterator, data):
return Action.make_batch(iterator, use_cuda=self.use_cuda, output_length=True)


class ClassificationTester(BaseTester):
"""Tester for classification."""



+ 13
- 8
test/test_metrics.py View File

@@ -1,20 +1,24 @@
import sys, os

sys.path = [os.path.join(os.path.dirname(__file__), '..')] + sys.path

from fastNLP.action import metrics
from fastNLP.core import metrics
from sklearn import metrics as skmetrics
import unittest
import numpy as np
from numpy import random


def generate_fake_label(low, high, size):
return random.randint(low, high, size), random.randint(low, high, size)


class TestMetrics(unittest.TestCase):
delta = 1e-5
# test for binary, multiclass, multilabel
data_types = [((1000,), 2), ((1000,), 10), ((1000, 10), 2)]
fake_data = [generate_fake_label(0, high, shape) for shape, high in data_types]

def test_accuracy_score(self):
for y_true, y_pred in self.fake_data:
for normalize in [True, False]:
@@ -22,12 +26,12 @@ class TestMetrics(unittest.TestCase):
ans = skmetrics.accuracy_score(y_true, y_pred, normalize=normalize, sample_weight=sample_weight)
test = metrics.accuracy_score(y_true, y_pred, normalize=normalize, sample_weight=sample_weight)
self.assertAlmostEqual(test, ans, delta=self.delta)
def test_recall_score(self):
for y_true, y_pred in self.fake_data:
# print(y_true.shape)
labels = list(range(y_true.shape[1])) if len(y_true.shape) >= 2 else None
ans = skmetrics.recall_score(y_true, y_pred,labels=labels, average=None)
ans = skmetrics.recall_score(y_true, y_pred, labels=labels, average=None)
test = metrics.recall_score(y_true, y_pred, labels=labels, average=None)
ans = list(ans)
if not isinstance(test, list):
@@ -45,7 +49,7 @@ class TestMetrics(unittest.TestCase):
for y_true, y_pred in self.fake_data:
# print(y_true.shape)
labels = list(range(y_true.shape[1])) if len(y_true.shape) >= 2 else None
ans = skmetrics.precision_score(y_true, y_pred,labels=labels, average=None)
ans = skmetrics.precision_score(y_true, y_pred, labels=labels, average=None)
test = metrics.precision_score(y_true, y_pred, labels=labels, average=None)
ans, test = list(ans), list(test)
for a, b in zip(test, ans):
@@ -56,12 +60,12 @@ class TestMetrics(unittest.TestCase):
ans = skmetrics.precision_score(y_true, y_pred)
test = metrics.precision_score(y_true, y_pred)
self.assertAlmostEqual(ans, test, delta=self.delta)
def test_precision_score(self):
for y_true, y_pred in self.fake_data:
# print(y_true.shape)
labels = list(range(y_true.shape[1])) if len(y_true.shape) >= 2 else None
ans = skmetrics.precision_score(y_true, y_pred,labels=labels, average=None)
ans = skmetrics.precision_score(y_true, y_pred, labels=labels, average=None)
test = metrics.precision_score(y_true, y_pred, labels=labels, average=None)
ans, test = list(ans), list(test)
for a, b in zip(test, ans):
@@ -77,7 +81,7 @@ class TestMetrics(unittest.TestCase):
for y_true, y_pred in self.fake_data:
# print(y_true.shape)
labels = list(range(y_true.shape[1])) if len(y_true.shape) >= 2 else None
ans = skmetrics.f1_score(y_true, y_pred,labels=labels, average=None)
ans = skmetrics.f1_score(y_true, y_pred, labels=labels, average=None)
test = metrics.f1_score(y_true, y_pred, labels=labels, average=None)
ans, test = list(ans), list(test)
for a, b in zip(test, ans):
@@ -89,5 +93,6 @@ class TestMetrics(unittest.TestCase):
test = metrics.f1_score(y_true, y_pred)
self.assertAlmostEqual(ans, test, delta=self.delta)


if __name__ == '__main__':
unittest.main()
unittest.main()

Loading…
Cancel
Save