Browse Source

Merge branch 'master' into more_code_comments

tags/v0.1.0
Coet GitHub 6 years ago
parent
commit
ffe7c26369
No known key found for this signature in database GPG Key ID: 4AEE18F83AFDEB23
9 changed files with 152 additions and 56 deletions
  1. +41
    -0
      .github/ISSUE_TEMPLATE/bug-report-bug--.md
  2. +35
    -0
      .github/ISSUE_TEMPLATE/bug_report.md
  3. +7
    -0
      .github/ISSUE_TEMPLATE/custom-issue-template-------.md
  4. +7
    -0
      .github/ISSUE_TEMPLATE/custom.md
  5. +17
    -0
      .github/ISSUE_TEMPLATE/feature-request------.md
  6. +17
    -0
      .github/ISSUE_TEMPLATE/feature_request.md
  7. +0
    -7
      fastNLP/core/README.md
  8. +3
    -3
      fastNLP/core/metrics.py
  9. +25
    -46
      test/test_metrics.py

+ 41
- 0
.github/ISSUE_TEMPLATE/bug-report-bug--.md View File

@@ -0,0 +1,41 @@
---
name: Bug report Bug报告
about: Create a report to help us improve 报告bug帮助我们改进

---

**Describe the bug**
A clear and concise description of what the bug is.
清晰而简要地描述bug

**To Reproduce**
Steps to reproduce the behavior:
1. Go to '...'
2. Click on '....'
3. Scroll down to '....'
4. See error
重现这个bug的步骤

**Expected behavior**
A clear and concise description of what you expected to happen.
清晰而简要地描述你期望的结果

**Screenshots**
If applicable, add screenshots to help explain your problem.
如果可以,请提供截图

**Desktop (please complete the following information):**
- OS: [e.g. iOS]
- Browser [e.g. chrome, safari]
- Version [e.g. 22]
相关环境

**Smartphone (please complete the following information):**
- Device: [e.g. iPhone6]
- OS: [e.g. iOS8.1]
- Browser [e.g. stock browser, safari]
- Version [e.g. 22]

**Additional context**
Add any other context about the problem here.
备注

+ 35
- 0
.github/ISSUE_TEMPLATE/bug_report.md View File

@@ -0,0 +1,35 @@
---
name: Bug report
about: Create a report to help us improve

---

**Describe the bug**
A clear and concise description of what the bug is.

**To Reproduce**
Steps to reproduce the behavior:
1. Go to '...'
2. Click on '....'
3. Scroll down to '....'
4. See error

**Expected behavior**
A clear and concise description of what you expected to happen.

**Screenshots**
If applicable, add screenshots to help explain your problem.

**Desktop (please complete the following information):**
- OS: [e.g. iOS]
- Browser [e.g. chrome, safari]
- Version [e.g. 22]

**Smartphone (please complete the following information):**
- Device: [e.g. iPhone6]
- OS: [e.g. iOS8.1]
- Browser [e.g. stock browser, safari]
- Version [e.g. 22]

**Additional context**
Add any other context about the problem here.

+ 7
- 0
.github/ISSUE_TEMPLATE/custom-issue-template-------.md View File

@@ -0,0 +1,7 @@
---
name: Custom issue template 普通事项模板
about: Describe this issue template's purpose here. 在这里描述事项

---



+ 7
- 0
.github/ISSUE_TEMPLATE/custom.md View File

@@ -0,0 +1,7 @@
---
name: Custom issue template
about: Describe this issue template's purpose here.

---



+ 17
- 0
.github/ISSUE_TEMPLATE/feature-request------.md View File

@@ -0,0 +1,17 @@
---
name: Feature request 功能提议
about: Suggest an idea for this project 提议新功能

---

**Is your feature request related to a problem? Please describe.** 问题是什么
A clear and concise description of what the problem is. Ex. I'm always frustrated when [...]

**Describe the solution you'd like** 解决方案是什么
A clear and concise description of what you want to happen.

**Describe alternatives you've considered** 其他解决方案
A clear and concise description of any alternative solutions or features you've considered.

**Additional context** 备注
Add any other context or screenshots about the feature request here.

+ 17
- 0
.github/ISSUE_TEMPLATE/feature_request.md View File

@@ -0,0 +1,17 @@
---
name: Feature request
about: Suggest an idea for this project

---

**Is your feature request related to a problem? Please describe.**
A clear and concise description of what the problem is. Ex. I'm always frustrated when [...]

**Describe the solution you'd like**
A clear and concise description of what you want to happen.

**Describe alternatives you've considered**
A clear and concise description of any alternative solutions or features you've considered.

**Additional context**
Add any other context or screenshots about the feature request here.

+ 0
- 7
fastNLP/core/README.md View File

@@ -1,8 +1 @@
SpaCy "Doc"
https://github.com/explosion/spaCy/blob/75d2a05c2938f412f0fae44748374e4de19cc2be/spacy/tokens/doc.pyx#L80

SpaCy "Vocab"
https://github.com/explosion/spaCy/blob/75d2a05c2938f412f0fae44748374e4de19cc2be/spacy/vocab.pyx#L25

SpaCy "Token"
https://github.com/explosion/spaCy/blob/75d2a05c2938f412f0fae44748374e4de19cc2be/spacy/tokens/token.pyx#L27

+ 3
- 3
fastNLP/core/metrics.py View File

@@ -34,7 +34,7 @@ def _label_types(y):
"unknown"
"""
# never squeeze the first dimension
y = np.squeeze(y, list(range(1, len(y.shape))))
y = y.squeeze() if y.shape[0] > 1 else y.resize(1, -1)
shape = y.shape
if len(shape) < 1:
raise ValueError('cannot accept data: {}'.format(y))
@@ -103,7 +103,7 @@ def recall_score(y_true, y_pred, labels=None, pos_label=1, average='binary'):
labels = list(y_labels)
else:
for i in labels:
if i not in y_labels:
if (i not in y_labels and y_type != 'multilabel') or (y_type == 'multilabel' and i >= y_true.shape[1]):
warnings.warn('label {} is not contained in data'.format(i), UserWarning)

if y_type in ['binary', 'multiclass']:
@@ -139,7 +139,7 @@ def precision_score(y_true, y_pred, labels=None, pos_label=1, average='binary'):
labels = list(y_labels)
else:
for i in labels:
if i not in y_labels:
if (i not in y_labels and y_type != 'multilabel') or (y_type == 'multilabel' and i >= y_true.shape[1]):
warnings.warn('label {} is not contained in data'.format(i), UserWarning)

if y_type in ['binary', 'multiclass']:


+ 25
- 46
test/test_metrics.py View File

@@ -1,98 +1,77 @@
import sys, os

sys.path = [os.path.join(os.path.dirname(__file__), '..')] + sys.path

from fastNLP.core import metrics
from sklearn import metrics as skmetrics
# from sklearn import metrics as skmetrics
import unittest
import numpy as np
from numpy import random


def generate_fake_label(low, high, size):
return random.randint(low, high, size), random.randint(low, high, size)


class TestMetrics(unittest.TestCase):
delta = 1e-5
# test for binary, multiclass, multilabel
data_types = [((1000,), 2), ((1000,), 10), ((1000, 10), 2)]
fake_data = [generate_fake_label(0, high, shape) for shape, high in data_types]

def test_accuracy_score(self):
for y_true, y_pred in self.fake_data:
for normalize in [True, False]:
for sample_weight in [None, random.rand(y_true.shape[0])]:
ans = skmetrics.accuracy_score(y_true, y_pred, normalize=normalize, sample_weight=sample_weight)
test = metrics.accuracy_score(y_true, y_pred, normalize=normalize, sample_weight=sample_weight)
self.assertAlmostEqual(test, ans, delta=self.delta)

# ans = skmetrics.accuracy_score(y_true, y_pred, normalize=normalize, sample_weight=sample_weight)
# self.assertAlmostEqual(test, ans, delta=self.delta)
def test_recall_score(self):
for y_true, y_pred in self.fake_data:
# print(y_true.shape)
labels = list(range(y_true.shape[1])) if len(y_true.shape) >= 2 else None
ans = skmetrics.recall_score(y_true, y_pred, labels=labels, average=None)
test = metrics.recall_score(y_true, y_pred, labels=labels, average=None)
ans = list(ans)
if not isinstance(test, list):
test = list(test)
for a, b in zip(test, ans):
# print('{}, {}'.format(a, b))
self.assertAlmostEqual(a, b, delta=self.delta)
# ans = skmetrics.recall_score(y_true, y_pred,labels=labels, average=None)
# ans = list(ans)
# for a, b in zip(test, ans):
# # print('{}, {}'.format(a, b))
# self.assertAlmostEqual(a, b, delta=self.delta)
# test binary
y_true, y_pred = generate_fake_label(0, 2, 1000)
ans = skmetrics.recall_score(y_true, y_pred)
test = metrics.recall_score(y_true, y_pred)
self.assertAlmostEqual(ans, test, delta=self.delta)
# ans = skmetrics.recall_score(y_true, y_pred)
# self.assertAlmostEqual(ans, test, delta=self.delta)

def test_precision_score(self):
for y_true, y_pred in self.fake_data:
# print(y_true.shape)
labels = list(range(y_true.shape[1])) if len(y_true.shape) >= 2 else None
ans = skmetrics.precision_score(y_true, y_pred, labels=labels, average=None)
test = metrics.precision_score(y_true, y_pred, labels=labels, average=None)
ans, test = list(ans), list(test)
for a, b in zip(test, ans):
# print('{}, {}'.format(a, b))
self.assertAlmostEqual(a, b, delta=self.delta)
# ans = skmetrics.precision_score(y_true, y_pred,labels=labels, average=None)
# ans, test = list(ans), list(test)
# for a, b in zip(test, ans):
# # print('{}, {}'.format(a, b))
# self.assertAlmostEqual(a, b, delta=self.delta)
# test binary
y_true, y_pred = generate_fake_label(0, 2, 1000)
ans = skmetrics.precision_score(y_true, y_pred)
test = metrics.precision_score(y_true, y_pred)
self.assertAlmostEqual(ans, test, delta=self.delta)

def test_precision_score(self):
for y_true, y_pred in self.fake_data:
# print(y_true.shape)
labels = list(range(y_true.shape[1])) if len(y_true.shape) >= 2 else None
ans = skmetrics.precision_score(y_true, y_pred, labels=labels, average=None)
test = metrics.precision_score(y_true, y_pred, labels=labels, average=None)
ans, test = list(ans), list(test)
for a, b in zip(test, ans):
# print('{}, {}'.format(a, b))
self.assertAlmostEqual(a, b, delta=self.delta)
# test binary
y_true, y_pred = generate_fake_label(0, 2, 1000)
ans = skmetrics.precision_score(y_true, y_pred)
test = metrics.precision_score(y_true, y_pred)
self.assertAlmostEqual(ans, test, delta=self.delta)
# ans = skmetrics.precision_score(y_true, y_pred)
# self.assertAlmostEqual(ans, test, delta=self.delta)

def test_f1_score(self):
for y_true, y_pred in self.fake_data:
# print(y_true.shape)
labels = list(range(y_true.shape[1])) if len(y_true.shape) >= 2 else None
ans = skmetrics.f1_score(y_true, y_pred, labels=labels, average=None)
test = metrics.f1_score(y_true, y_pred, labels=labels, average=None)
ans, test = list(ans), list(test)
for a, b in zip(test, ans):
# print('{}, {}'.format(a, b))
self.assertAlmostEqual(a, b, delta=self.delta)
# ans = skmetrics.f1_score(y_true, y_pred,labels=labels, average=None)
# ans, test = list(ans), list(test)
# for a, b in zip(test, ans):
# # print('{}, {}'.format(a, b))
# self.assertAlmostEqual(a, b, delta=self.delta)
# test binary
y_true, y_pred = generate_fake_label(0, 2, 1000)
ans = skmetrics.f1_score(y_true, y_pred)
test = metrics.f1_score(y_true, y_pred)
self.assertAlmostEqual(ans, test, delta=self.delta)
# ans = skmetrics.f1_score(y_true, y_pred)
# self.assertAlmostEqual(ans, test, delta=self.delta)

if __name__ == '__main__':
unittest.main()

Loading…
Cancel
Save