|
@@ -9,27 +9,22 @@ import csv |
|
|
from typing import Union, Dict |
|
|
from typing import Union, Dict |
|
|
from reproduction.utils import check_dataloader_paths, get_tokenizer |
|
|
from reproduction.utils import check_dataloader_paths, get_tokenizer |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
class SSTLoader(DataSetLoader): |
|
|
class SSTLoader(DataSetLoader): |
|
|
URL = 'https://nlp.stanford.edu/sentiment/trainDevTestTrees_PTB.zip' |
|
|
URL = 'https://nlp.stanford.edu/sentiment/trainDevTestTrees_PTB.zip' |
|
|
DATA_DIR = 'sst/' |
|
|
DATA_DIR = 'sst/' |
|
|
|
|
|
|
|
|
""" |
|
|
""" |
|
|
别名::class:`fastNLP.io.SSTLoader` :class:`fastNLP.io.dataset_loader.SSTLoader` |
|
|
别名::class:`fastNLP.io.SSTLoader` :class:`fastNLP.io.dataset_loader.SSTLoader` |
|
|
|
|
|
|
|
|
读取SST数据集, DataSet包含fields:: |
|
|
读取SST数据集, DataSet包含fields:: |
|
|
|
|
|
|
|
|
words: list(str) 需要分类的文本 |
|
|
words: list(str) 需要分类的文本 |
|
|
target: str 文本的标签 |
|
|
target: str 文本的标签 |
|
|
|
|
|
|
|
|
数据来源: https://nlp.stanford.edu/sentiment/trainDevTestTrees_PTB.zip |
|
|
数据来源: https://nlp.stanford.edu/sentiment/trainDevTestTrees_PTB.zip |
|
|
|
|
|
|
|
|
:param subtree: 是否将数据展开为子树,扩充数据量. Default: ``False`` |
|
|
:param subtree: 是否将数据展开为子树,扩充数据量. Default: ``False`` |
|
|
:param fine_grained: 是否使用SST-5标准,若 ``False`` , 使用SST-2。Default: ``False`` |
|
|
:param fine_grained: 是否使用SST-5标准,若 ``False`` , 使用SST-2。Default: ``False`` |
|
|
""" |
|
|
""" |
|
|
|
|
|
|
|
|
def __init__(self, subtree=False, fine_grained=False): |
|
|
def __init__(self, subtree=False, fine_grained=False): |
|
|
self.subtree = subtree |
|
|
self.subtree = subtree |
|
|
|
|
|
|
|
|
tag_v = {'0': 'very negative', '1': 'negative', '2': 'neutral', |
|
|
tag_v = {'0': 'very negative', '1': 'negative', '2': 'neutral', |
|
|
'3': 'positive', '4': 'very positive'} |
|
|
'3': 'positive', '4': 'very positive'} |
|
|
if not fine_grained: |
|
|
if not fine_grained: |
|
@@ -39,7 +34,6 @@ class SSTLoader(DataSetLoader): |
|
|
|
|
|
|
|
|
def _load(self, path): |
|
|
def _load(self, path): |
|
|
""" |
|
|
""" |
|
|
|
|
|
|
|
|
:param str path: 存储数据的路径 |
|
|
:param str path: 存储数据的路径 |
|
|
:return: 一个 :class:`~fastNLP.DataSet` 类型的对象 |
|
|
:return: 一个 :class:`~fastNLP.DataSet` 类型的对象 |
|
|
""" |
|
|
""" |
|
@@ -54,6 +48,7 @@ class SSTLoader(DataSetLoader): |
|
|
ds.append(Instance(words=words, target=tag)) |
|
|
ds.append(Instance(words=words, target=tag)) |
|
|
return ds |
|
|
return ds |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
@staticmethod |
|
|
@staticmethod |
|
|
def _get_one(data, subtree): |
|
|
def _get_one(data, subtree): |
|
|
tree = Tree.fromstring(data) |
|
|
tree = Tree.fromstring(data) |
|
@@ -61,6 +56,7 @@ class SSTLoader(DataSetLoader): |
|
|
return [(t.leaves(), t.label()) for t in tree.subtrees()] |
|
|
return [(t.leaves(), t.label()) for t in tree.subtrees()] |
|
|
return [(tree.leaves(), tree.label())] |
|
|
return [(tree.leaves(), tree.label())] |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def process(self, |
|
|
def process(self, |
|
|
paths, |
|
|
paths, |
|
|
train_ds: Iterable[str] = None, |
|
|
train_ds: Iterable[str] = None, |
|
@@ -88,25 +84,30 @@ class SSTLoader(DataSetLoader): |
|
|
target_name: tgt_vocab |
|
|
target_name: tgt_vocab |
|
|
} |
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
if src_embed_op is not None: |
|
|
if src_embed_op is not None: |
|
|
src_embed_op.vocab = src_vocab |
|
|
src_embed_op.vocab = src_vocab |
|
|
init_emb = EmbedLoader.load_with_vocab(**src_embed_op) |
|
|
init_emb = EmbedLoader.load_with_vocab(**src_embed_op) |
|
|
info.embeddings[input_name] = init_emb |
|
|
info.embeddings[input_name] = init_emb |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
for name, dataset in info.datasets.items(): |
|
|
for name, dataset in info.datasets.items(): |
|
|
dataset.set_input(input_name) |
|
|
dataset.set_input(input_name) |
|
|
dataset.set_target(target_name) |
|
|
dataset.set_target(target_name) |
|
|
|
|
|
|
|
|
return info |
|
|
return info |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
class sst2Loader(DataSetLoader): |
|
|
class sst2Loader(DataSetLoader): |
|
|
''' |
|
|
''' |
|
|
数据来源"SST":'https://firebasestorage.googleapis.com/v0/b/mtl-sentence-representations.appspot.com/o/data%2FSST-2.zip?alt=media&token=aabc5f6b-e466-44a2-b9b4-cf6337f84ac8', |
|
|
数据来源"SST":'https://firebasestorage.googleapis.com/v0/b/mtl-sentence-representations.appspot.com/o/data%2FSST-2.zip?alt=media&token=aabc5f6b-e466-44a2-b9b4-cf6337f84ac8', |
|
|
''' |
|
|
''' |
|
|
|
|
|
|
|
|
def __init__(self): |
|
|
def __init__(self): |
|
|
super(sst2Loader, self).__init__() |
|
|
super(sst2Loader, self).__init__() |
|
|
self.tokenizer = get_tokenizer() |
|
|
self.tokenizer = get_tokenizer() |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def _load(self, path: str) -> DataSet: |
|
|
def _load(self, path: str) -> DataSet: |
|
|
ds = DataSet() |
|
|
ds = DataSet() |
|
|
all_count=0 |
|
|
all_count=0 |
|
@@ -122,6 +123,8 @@ class sst2Loader(DataSetLoader): |
|
|
print("all count:", all_count) |
|
|
print("all count:", all_count) |
|
|
return ds |
|
|
return ds |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def process(self, |
|
|
def process(self, |
|
|
paths: Union[str, Dict[str, str]], |
|
|
paths: Union[str, Dict[str, str]], |
|
|
src_vocab_opt: VocabularyOption = None, |
|
|
src_vocab_opt: VocabularyOption = None, |
|
@@ -153,7 +156,6 @@ class sst2Loader(DataSetLoader): |
|
|
if char_level_op: |
|
|
if char_level_op: |
|
|
for dataset in datasets.values(): |
|
|
for dataset in datasets.values(): |
|
|
dataset.apply_field(wordtochar, field_name="words", new_field_name='chars') |
|
|
dataset.apply_field(wordtochar, field_name="words", new_field_name='chars') |
|
|
|
|
|
|
|
|
src_vocab = Vocabulary() if src_vocab_opt is None else Vocabulary(**src_vocab_opt) |
|
|
src_vocab = Vocabulary() if src_vocab_opt is None else Vocabulary(**src_vocab_opt) |
|
|
src_vocab.from_dataset(datasets['train'], field_name='words') |
|
|
src_vocab.from_dataset(datasets['train'], field_name='words') |
|
|
src_vocab.index_dataset(*datasets.values(), field_name='words') |
|
|
src_vocab.index_dataset(*datasets.values(), field_name='words') |
|
@@ -171,21 +173,26 @@ class sst2Loader(DataSetLoader): |
|
|
|
|
|
|
|
|
info.datasets = datasets |
|
|
info.datasets = datasets |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
if src_embed_opt is not None: |
|
|
if src_embed_opt is not None: |
|
|
embed = EmbedLoader.load_with_vocab(**src_embed_opt, vocab=src_vocab) |
|
|
embed = EmbedLoader.load_with_vocab(**src_embed_opt, vocab=src_vocab) |
|
|
info.embeddings['words'] = embed |
|
|
info.embeddings['words'] = embed |
|
|
|
|
|
|
|
|
|
|
|
for name, dataset in info.datasets.items(): |
|
|
|
|
|
dataset.set_input("words") |
|
|
|
|
|
dataset.set_target("target") |
|
|
|
|
|
|
|
|
return info |
|
|
return info |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
if __name__=="__main__": |
|
|
if __name__=="__main__": |
|
|
datapath = {"train": "/remote-home/ygwang/workspace/GLUE/SST-2/train.tsv", |
|
|
datapath = {"train": "/remote-home/ygwang/workspace/GLUE/SST-2/train.tsv", |
|
|
"dev": "/remote-home/ygwang/workspace/GLUE/SST-2/dev.tsv"} |
|
|
"dev": "/remote-home/ygwang/workspace/GLUE/SST-2/dev.tsv"} |
|
|
datainfo=sst2Loader().process(datapath,char_level_op=True) |
|
|
datainfo=sst2Loader().process(datapath,char_level_op=True) |
|
|
#print(datainfo.datasets["train"]) |
|
|
#print(datainfo.datasets["train"]) |
|
|
|
|
|
|
|
|
len_count = 0 |
|
|
len_count = 0 |
|
|
for instance in datainfo.datasets["train"]: |
|
|
for instance in datainfo.datasets["train"]: |
|
|
len_count += len(instance["chars"]) |
|
|
len_count += len(instance["chars"]) |
|
|
|
|
|
|
|
|
ave_len = len_count / len(datainfo.datasets["train"]) |
|
|
ave_len = len_count / len(datainfo.datasets["train"]) |
|
|
print(ave_len) |
|
|
print(ave_len) |