@@ -1,62 +0,0 @@ | |||||
[test] | |||||
x = 1 | |||||
y = 2 | |||||
z = 3 | |||||
#this is an example | |||||
input = [1,2,3] | |||||
text = "this is text" | |||||
doubles = 0.8 | |||||
tt = 0.5 | |||||
test = 105 | |||||
str = "this is a str" | |||||
double = 0.5 | |||||
[t] | |||||
x = "this is an test section" | |||||
[test-case-2] | |||||
double = 0.5 | |||||
doubles = 0.8 | |||||
tt = 0.5 | |||||
test = 105 | |||||
str = "this is a str" | |||||
[another-test] | |||||
doubles = 0.8 | |||||
tt = 0.5 | |||||
test = 105 | |||||
str = "this is a str" | |||||
double = 0.5 | |||||
[one-another-test] | |||||
doubles = 0.8 | |||||
tt = 0.5 | |||||
test = 105 | |||||
str = "this is a str" | |||||
double = 0.5 | |||||
@@ -1,112 +0,0 @@ | |||||
import os | |||||
import unittest | |||||
# from fastNLP.io import ConfigSection, ConfigLoader, ConfigSaver | |||||
class TestConfigSaver(unittest.TestCase): | |||||
def test_case_1(self): | |||||
config_file_dir = "." | |||||
config_file_name = "config" | |||||
config_file_path = os.path.join(config_file_dir, config_file_name) | |||||
tmp_config_file_path = os.path.join(config_file_dir, "tmp_config") | |||||
with open(config_file_path, "r") as f: | |||||
lines = f.readlines() | |||||
standard_section = ConfigSection() | |||||
t_section = ConfigSection() | |||||
ConfigLoader().load_config(config_file_path, {"test": standard_section, "t": t_section}) | |||||
config_saver = ConfigSaver(config_file_path) | |||||
section = ConfigSection() | |||||
section["doubles"] = 0.8 | |||||
section["tt"] = 0.5 | |||||
section["test"] = 105 | |||||
section["str"] = "this is a str" | |||||
test_case_2_section = section | |||||
test_case_2_section["double"] = 0.5 | |||||
for k in section.__dict__.keys(): | |||||
standard_section[k] = section[k] | |||||
config_saver.save_config_file("test", section) | |||||
config_saver.save_config_file("another-test", section) | |||||
config_saver.save_config_file("one-another-test", section) | |||||
config_saver.save_config_file("test-case-2", section) | |||||
test_section = ConfigSection() | |||||
at_section = ConfigSection() | |||||
another_test_section = ConfigSection() | |||||
one_another_test_section = ConfigSection() | |||||
a_test_case_2_section = ConfigSection() | |||||
ConfigLoader().load_config(config_file_path, {"test": test_section, | |||||
"another-test": another_test_section, | |||||
"t": at_section, | |||||
"one-another-test": one_another_test_section, | |||||
"test-case-2": a_test_case_2_section}) | |||||
assert test_section == standard_section | |||||
assert at_section == t_section | |||||
assert another_test_section == section | |||||
assert one_another_test_section == section | |||||
assert a_test_case_2_section == test_case_2_section | |||||
config_saver.save_config_file("test", section) | |||||
with open(config_file_path, "w") as f: | |||||
f.writelines(lines) | |||||
with open(tmp_config_file_path, "w") as f: | |||||
f.write('[test]\n') | |||||
f.write('this is an fault example\n') | |||||
tmp_config_saver = ConfigSaver(tmp_config_file_path) | |||||
try: | |||||
tmp_config_saver._read_section() | |||||
except Exception as e: | |||||
pass | |||||
os.remove(tmp_config_file_path) | |||||
try: | |||||
tmp_config_saver = ConfigSaver("file-NOT-exist") | |||||
except Exception as e: | |||||
pass | |||||
def test_case_2(self): | |||||
config = "[section_A]\n[section_B]\n" | |||||
with open("./test.cfg", "w", encoding="utf-8") as f: | |||||
f.write(config) | |||||
saver = ConfigSaver("./test.cfg") | |||||
section = ConfigSection() | |||||
section["doubles"] = 0.8 | |||||
section["tt"] = [1, 2, 3] | |||||
section["test"] = 105 | |||||
section["str"] = "this is a str" | |||||
saver.save_config_file("section_A", section) | |||||
os.system("rm ./test.cfg") | |||||
def test_case_3(self): | |||||
config = "[section_A]\ndoubles = 0.9\ntt = [1, 2, 3]\n[section_B]\n" | |||||
with open("./test.cfg", "w", encoding="utf-8") as f: | |||||
f.write(config) | |||||
saver = ConfigSaver("./test.cfg") | |||||
section = ConfigSection() | |||||
section["doubles"] = 0.8 | |||||
section["tt"] = [1, 2, 3] | |||||
section["test"] = 105 | |||||
section["str"] = "this is a str" | |||||
saver.save_config_file("section_A", section) | |||||
os.system("rm ./test.cfg") |
@@ -9,22 +9,22 @@ class TestDatasetLoader(unittest.TestCase): | |||||
""" | """ | ||||
Test the the loader of Conll2003 dataset | Test the the loader of Conll2003 dataset | ||||
""" | """ | ||||
dataset_path = "../data_for_tests/conll_2003_example.txt" | |||||
dataset_path = "test/data_for_tests/conll_2003_example.txt" | |||||
loader = Conll2003Loader() | loader = Conll2003Loader() | ||||
dataset_2003 = loader.load(dataset_path) | dataset_2003 = loader.load(dataset_path) | ||||
def test_PeopleDailyCorpusLoader(self): | def test_PeopleDailyCorpusLoader(self): | ||||
data_set = PeopleDailyCorpusLoader().load("../data_for_tests/people_daily_raw.txt") | |||||
data_set = PeopleDailyCorpusLoader().load("test/data_for_tests/people_daily_raw.txt") | |||||
def test_CSVLoader(self): | def test_CSVLoader(self): | ||||
ds = CSVLoader(sep='\t', headers=['words', 'label']) \ | ds = CSVLoader(sep='\t', headers=['words', 'label']) \ | ||||
.load('../data_for_tests/tutorial_sample_dataset.csv') | |||||
.load('test/data_for_tests/tutorial_sample_dataset.csv') | |||||
assert len(ds) > 0 | assert len(ds) > 0 | ||||
def test_SNLILoader(self): | def test_SNLILoader(self): | ||||
ds = SNLILoader().load('../data_for_tests/sample_snli.jsonl') | |||||
ds = SNLILoader().load('test/data_for_tests/sample_snli.jsonl') | |||||
assert len(ds) == 3 | assert len(ds) == 3 | ||||
def test_JsonLoader(self): | def test_JsonLoader(self): | ||||
ds = JsonLoader().load('../data_for_tests/sample_snli.jsonl') | |||||
ds = JsonLoader().load('test/data_for_tests/sample_snli.jsonl') | |||||
assert len(ds) == 3 | assert len(ds) == 3 |
@@ -3,15 +3,13 @@ import numpy as np | |||||
from fastNLP import Vocabulary | from fastNLP import Vocabulary | ||||
from fastNLP.io import EmbedLoader | from fastNLP.io import EmbedLoader | ||||
import os | |||||
from fastNLP.io.dataset_loader import SSTLoader | |||||
from fastNLP.core.const import Const as C | |||||
class TestEmbedLoader(unittest.TestCase): | class TestEmbedLoader(unittest.TestCase): | ||||
def test_load_with_vocab(self): | def test_load_with_vocab(self): | ||||
vocab = Vocabulary() | vocab = Vocabulary() | ||||
glove = "../data_for_tests/glove.6B.50d_test.txt" | |||||
word2vec = "../data_for_tests/word2vec_test.txt" | |||||
glove = "test/data_for_tests/glove.6B.50d_test.txt" | |||||
word2vec = "test/data_for_tests/word2vec_test.txt" | |||||
vocab.add_word('the') | vocab.add_word('the') | ||||
vocab.add_word('none') | vocab.add_word('none') | ||||
g_m = EmbedLoader.load_with_vocab(glove, vocab) | g_m = EmbedLoader.load_with_vocab(glove, vocab) | ||||
@@ -19,11 +17,11 @@ class TestEmbedLoader(unittest.TestCase): | |||||
w_m = EmbedLoader.load_with_vocab(word2vec, vocab, normalize=True) | w_m = EmbedLoader.load_with_vocab(word2vec, vocab, normalize=True) | ||||
self.assertEqual(w_m.shape, (4, 50)) | self.assertEqual(w_m.shape, (4, 50)) | ||||
self.assertAlmostEqual(np.linalg.norm(w_m, axis=1).sum(), 4) | self.assertAlmostEqual(np.linalg.norm(w_m, axis=1).sum(), 4) | ||||
def test_load_without_vocab(self): | def test_load_without_vocab(self): | ||||
words = ['the', 'of', 'in', 'a', 'to', 'and'] | words = ['the', 'of', 'in', 'a', 'to', 'and'] | ||||
glove = "../data_for_tests/glove.6B.50d_test.txt" | |||||
word2vec = "../data_for_tests/word2vec_test.txt" | |||||
glove = "test/data_for_tests/glove.6B.50d_test.txt" | |||||
word2vec = "test/data_for_tests/word2vec_test.txt" | |||||
g_m, vocab = EmbedLoader.load_without_vocab(glove) | g_m, vocab = EmbedLoader.load_without_vocab(glove) | ||||
self.assertEqual(g_m.shape, (8, 50)) | self.assertEqual(g_m.shape, (8, 50)) | ||||
for word in words: | for word in words: | ||||
@@ -39,9 +37,10 @@ class TestEmbedLoader(unittest.TestCase): | |||||
self.assertAlmostEqual(np.linalg.norm(w_m, axis=1).sum(), 7) | self.assertAlmostEqual(np.linalg.norm(w_m, axis=1).sum(), 7) | ||||
for word in words: | for word in words: | ||||
self.assertIn(word, vocab) | self.assertIn(word, vocab) | ||||
def test_read_all_glove(self): | def test_read_all_glove(self): | ||||
pass | pass | ||||
# TODO | |||||
# 这是可以运行的,但是总数少于行数,应该是由于glove有重复的word | # 这是可以运行的,但是总数少于行数,应该是由于glove有重复的word | ||||
# path = '/where/to/read/full/glove' | # path = '/where/to/read/full/glove' | ||||
# init_embed, vocab = EmbedLoader.load_without_vocab(path, error='strict') | # init_embed, vocab = EmbedLoader.load_without_vocab(path, error='strict') | ||||
@@ -2,55 +2,9 @@ import unittest | |||||
import torch | import torch | ||||
# from fastNLP.modules.other_modules import GroupNorm, LayerNormalization, BiLinear, BiAffine | |||||
from fastNLP.modules.encoder.star_transformer import StarTransformer | from fastNLP.modules.encoder.star_transformer import StarTransformer | ||||
class TestGroupNorm(unittest.TestCase): | |||||
def test_case_1(self): | |||||
gn = GroupNorm(num_features=1, num_groups=10, eps=1.5e-5) | |||||
x = torch.randn((20, 50, 10)) | |||||
y = gn(x) | |||||
class TestLayerNormalization(unittest.TestCase): | |||||
def test_case_1(self): | |||||
ln = LayerNormalization(layer_size=5, eps=2e-3) | |||||
x = torch.randn((20, 50, 5)) | |||||
y = ln(x) | |||||
class TestBiLinear(unittest.TestCase): | |||||
def test_case_1(self): | |||||
bl = BiLinear(n_left=5, n_right=5, n_out=10, bias=True) | |||||
x_left = torch.randn((7, 10, 20, 5)) | |||||
x_right = torch.randn((7, 10, 20, 5)) | |||||
y = bl(x_left, x_right) | |||||
print(bl) | |||||
bl2 = BiLinear(n_left=15, n_right=15, n_out=10, bias=True) | |||||
class TestBiAffine(unittest.TestCase): | |||||
def test_case_1(self): | |||||
batch_size = 16 | |||||
encoder_length = 21 | |||||
decoder_length = 32 | |||||
layer = BiAffine(10, 10, 25, biaffine=True) | |||||
decoder_input = torch.randn((batch_size, encoder_length, 10)) | |||||
encoder_input = torch.randn((batch_size, decoder_length, 10)) | |||||
y = layer(decoder_input, encoder_input) | |||||
self.assertEqual(tuple(y.shape), (batch_size, 25, encoder_length, decoder_length)) | |||||
def test_case_2(self): | |||||
batch_size = 16 | |||||
encoder_length = 21 | |||||
decoder_length = 32 | |||||
layer = BiAffine(10, 10, 25, biaffine=False) | |||||
decoder_input = torch.randn((batch_size, encoder_length, 10)) | |||||
encoder_input = torch.randn((batch_size, decoder_length, 10)) | |||||
y = layer(decoder_input, encoder_input) | |||||
self.assertEqual(tuple(y.shape), (batch_size, 25, encoder_length, 1)) | |||||
class TestStarTransformer(unittest.TestCase): | class TestStarTransformer(unittest.TestCase): | ||||
def test_1(self): | def test_1(self): | ||||
model = StarTransformer(num_layers=6, hidden_size=100, num_head=8, head_dim=20, max_len=100) | model = StarTransformer(num_layers=6, hidden_size=100, num_head=8, head_dim=20, max_len=100) | ||||