From c7463cf0907b49f20007b3fd8d487d9ee84051e1 Mon Sep 17 00:00:00 2001 From: wyg <1505116161@qq.com> Date: Sat, 6 Jul 2019 16:45:37 +0800 Subject: [PATCH 1/4] [verify] yelpdataloader --- reproduction/text_classification/data/yelpLoader.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/reproduction/text_classification/data/yelpLoader.py b/reproduction/text_classification/data/yelpLoader.py index 9e1e1c6b..90a80301 100644 --- a/reproduction/text_classification/data/yelpLoader.py +++ b/reproduction/text_classification/data/yelpLoader.py @@ -128,7 +128,7 @@ class yelpLoader(DataSetLoader): all_count+=1 if len(row)==2: target=self.tag_v[row[0]+".0"] - words=clean_str(row[1],self.lower) + words=clean_str(row[1],self.tokenizer,self.lower) if len(words)!=0: ds.append(Instance(words=words,target=target)) real_count += 1 From d05aca6da62ecf1d7f41bbeeb03ea470a40ca165 Mon Sep 17 00:00:00 2001 From: lyhuang18 <42239874+lyhuang18@users.noreply.github.com> Date: Sun, 7 Jul 2019 08:21:51 +0800 Subject: [PATCH 2/4] TC/LSTM MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit LSTM、AWDLSTM和LSTM+self attention三个模型 --- .../text_classification/data/IMDBLoader.py | 80 +++++++++++++ .../text_classification/data/MTL16Loader.py | 6 +- .../text_classification/data/SSTLoader.py | 99 ++++++++++++++++ .../text_classification/data/yelpLoader.py | 111 ++++++++++-------- .../text_classification/model/awd_lstm.py | 31 +++++ .../model/awdlstm_module.py | 86 ++++++++++++++ .../text_classification/model/lstm.py | 30 +++++ .../model/lstm_self_attention.py | 35 ++++++ .../text_classification/model/weight_drop.py | 99 ++++++++++++++++ .../text_classification/results_LSTM.xlsx | Bin 0 -> 9944 bytes .../text_classification/train_awdlstm.py | 102 ++++++++++++++++ .../text_classification/train_lstm.py | 99 ++++++++++++++++ .../text_classification/train_lstm_att.py | 101 ++++++++++++++++ 13 files changed, 827 insertions(+), 52 deletions(-) create mode 100644 reproduction/text_classification/data/IMDBLoader.py create mode 100644 reproduction/text_classification/data/SSTLoader.py create mode 100644 reproduction/text_classification/model/awd_lstm.py create mode 100644 reproduction/text_classification/model/awdlstm_module.py create mode 100644 reproduction/text_classification/model/lstm.py create mode 100644 reproduction/text_classification/model/lstm_self_attention.py create mode 100644 reproduction/text_classification/model/weight_drop.py create mode 100644 reproduction/text_classification/results_LSTM.xlsx create mode 100644 reproduction/text_classification/train_awdlstm.py create mode 100644 reproduction/text_classification/train_lstm.py create mode 100644 reproduction/text_classification/train_lstm_att.py diff --git a/reproduction/text_classification/data/IMDBLoader.py b/reproduction/text_classification/data/IMDBLoader.py new file mode 100644 index 00000000..d591cdf8 --- /dev/null +++ b/reproduction/text_classification/data/IMDBLoader.py @@ -0,0 +1,80 @@ +from fastNLP.io.embed_loader import EmbeddingOption, EmbedLoader +from fastNLP.core.vocabulary import VocabularyOption +from fastNLP.io.base_loader import DataSetLoader, DataInfo +from typing import Union, Dict, List, Iterator +from fastNLP import DataSet +from fastNLP import Instance +from fastNLP import Vocabulary +from fastNLP import Const +# from reproduction.utils import check_dataloader_paths +from functools import partial + +class IMDBLoader(DataSetLoader): + """ + 读取IMDB数据集,DataSet包含以下fields: + + words: list(str), 需要分类的文本 + target: str, 文本的标签 + + + """ + + def __init__(self): + super(IMDBLoader, self).__init__() + + def _load(self, path): + dataset = DataSet() + with open(path, 'r', encoding="utf-8") as f: + for line in f: + line = line.strip() + if not line: + continue + parts = line.split('\t') + target = parts[0] + words = parts[1].lower().split() + dataset.append(Instance(words=words, target=target)) + if len(dataset)==0: + raise RuntimeError(f"{path} has no valid data.") + + return dataset + + def process(self, + paths: Union[str, Dict[str, str]], + src_vocab_opt: VocabularyOption = None, + tgt_vocab_opt: VocabularyOption = None, + src_embed_opt: EmbeddingOption = None): + + # paths = check_dataloader_paths(paths) + datasets = {} + info = DataInfo() + for name, path in paths.items(): + dataset = self.load(path) + datasets[name] = dataset + + datasets["train"], datasets["dev"] = datasets["train"].split(0.1, shuffle=False) + + src_vocab = Vocabulary() if src_vocab_opt is None else Vocabulary(**src_vocab_opt) + src_vocab.from_dataset(datasets['train'], field_name='words') + src_vocab.index_dataset(*datasets.values(), field_name='words') + + tgt_vocab = Vocabulary(unknown=None, padding=None) \ + if tgt_vocab_opt is None else Vocabulary(**tgt_vocab_opt) + tgt_vocab.from_dataset(datasets['train'], field_name='target') + tgt_vocab.index_dataset(*datasets.values(), field_name='target') + + info.vocabs = { + "words": src_vocab, + "target": tgt_vocab + } + + info.datasets = datasets + + if src_embed_opt is not None: + embed = EmbedLoader.load_with_vocab(**src_embed_opt, vocab=src_vocab) + info.embeddings['words'] = embed + + for name, dataset in info.datasets.items(): + dataset.set_input("words") + dataset.set_target("target") + + return info diff --git a/reproduction/text_classification/data/MTL16Loader.py b/reproduction/text_classification/data/MTL16Loader.py index 1b3e6245..066b53b4 100644 --- a/reproduction/text_classification/data/MTL16Loader.py +++ b/reproduction/text_classification/data/MTL16Loader.py @@ -32,7 +32,7 @@ class MTL16Loader(DataSetLoader): continue parts = line.split('\t') target = parts[0] - words = parts[1].split() + words = parts[1].lower().split() dataset.append(Instance(words=words, target=target)) if len(dataset)==0: raise RuntimeError(f"{path} has no valid data.") @@ -72,4 +72,8 @@ class MTL16Loader(DataSetLoader): embed = EmbedLoader.load_with_vocab(**src_embed_opt, vocab=src_vocab) info.embeddings['words'] = embed + for name, dataset in info.datasets.items(): + dataset.set_input("words") + dataset.set_target("target") + return info diff --git a/reproduction/text_classification/data/SSTLoader.py b/reproduction/text_classification/data/SSTLoader.py new file mode 100644 index 00000000..b570994e --- /dev/null +++ b/reproduction/text_classification/data/SSTLoader.py @@ -0,0 +1,99 @@ +from typing import Iterable +from nltk import Tree +from fastNLP.io.base_loader import DataInfo, DataSetLoader +from fastNLP.core.vocabulary import VocabularyOption, Vocabulary +from fastNLP import DataSet +from fastNLP import Instance +from fastNLP.io.embed_loader import EmbeddingOption, EmbedLoader + + +class SSTLoader(DataSetLoader): + URL = 'https://nlp.stanford.edu/sentiment/trainDevTestTrees_PTB.zip' + DATA_DIR = 'sst/' + + """ + 别名::class:`fastNLP.io.SSTLoader` :class:`fastNLP.io.dataset_loader.SSTLoader` + + 读取SST数据集, DataSet包含fields:: + + words: list(str) 需要分类的文本 + target: str 文本的标签 + + 数据来源: https://nlp.stanford.edu/sentiment/trainDevTestTrees_PTB.zip + + :param subtree: 是否将数据展开为子树,扩充数据量. Default: ``False`` + :param fine_grained: 是否使用SST-5标准,若 ``False`` , 使用SST-2。Default: ``False`` + """ + + def __init__(self, subtree=False, fine_grained=False): + self.subtree = subtree + + tag_v = {'0': 'very negative', '1': 'negative', '2': 'neutral', + '3': 'positive', '4': 'very positive'} + if not fine_grained: + tag_v['0'] = tag_v['1'] + tag_v['4'] = tag_v['3'] + self.tag_v = tag_v + + def _load(self, path): + """ + + :param str path: 存储数据的路径 + :return: 一个 :class:`~fastNLP.DataSet` 类型的对象 + """ + datalist = [] + with open(path, 'r', encoding='utf-8') as f: + datas = [] + for l in f: + datas.extend([(s, self.tag_v[t]) + for s, t in self._get_one(l, self.subtree)]) + ds = DataSet() + for words, tag in datas: + ds.append(Instance(words=words, target=tag)) + return ds + + @staticmethod + def _get_one(data, subtree): + tree = Tree.fromstring(data) + if subtree: + return [(t.leaves(), t.label()) for t in tree.subtrees()] + return [(tree.leaves(), tree.label())] + + def process(self, + paths, + train_ds: Iterable[str] = None, + src_vocab_op: VocabularyOption = None, + tgt_vocab_op: VocabularyOption = None, + src_embed_op: EmbeddingOption = None): + input_name, target_name = 'words', 'target' + src_vocab = Vocabulary() if src_vocab_op is None else Vocabulary(**src_vocab_op) + tgt_vocab = Vocabulary(unknown=None, padding=None) \ + if tgt_vocab_op is None else Vocabulary(**tgt_vocab_op) + + info = DataInfo(datasets=self.load(paths)) + _train_ds = [info.datasets[name] + for name in train_ds] if train_ds else info.datasets.values() + src_vocab.from_dataset(*_train_ds, field_name=input_name) + tgt_vocab.from_dataset(*_train_ds, field_name=target_name) + src_vocab.index_dataset( + *info.datasets.values(), + field_name=input_name, new_field_name=input_name) + tgt_vocab.index_dataset( + *info.datasets.values(), + field_name=target_name, new_field_name=target_name) + info.vocabs = { + input_name: src_vocab, + target_name: tgt_vocab + } + + if src_embed_op is not None: + src_embed_op.vocab = src_vocab + init_emb = EmbedLoader.load_with_vocab(**src_embed_op) + info.embeddings[input_name] = init_emb + + for name, dataset in info.datasets.items(): + dataset.set_input(input_name) + dataset.set_target(target_name) + + return info + diff --git a/reproduction/text_classification/data/yelpLoader.py b/reproduction/text_classification/data/yelpLoader.py index c47d48fd..680b3488 100644 --- a/reproduction/text_classification/data/yelpLoader.py +++ b/reproduction/text_classification/data/yelpLoader.py @@ -1,68 +1,77 @@ -import ast -from fastNLP import DataSet, Instance, Vocabulary +from fastNLP.io.embed_loader import EmbeddingOption, EmbedLoader from fastNLP.core.vocabulary import VocabularyOption -from fastNLP.io import JsonLoader -from fastNLP.io.base_loader import DataInfo -from fastNLP.io.embed_loader import EmbeddingOption -from fastNLP.io.file_reader import _read_json -from typing import Union, Dict -from reproduction.Star_transformer.datasets import EmbedLoader -from reproduction.utils import check_dataloader_paths +from fastNLP.io.base_loader import DataSetLoader, DataInfo +from typing import Union, Dict, List, Iterator +from fastNLP import DataSet +from fastNLP import Instance +from fastNLP import Vocabulary +from fastNLP import Const +# from reproduction.utils import check_dataloader_paths +from functools import partial +import pandas as pd - -class yelpLoader(JsonLoader): - +class yelpLoader(DataSetLoader): """ - 读取Yelp数据集, DataSet包含fields: - - review_id: str, 22 character unique review id - user_id: str, 22 character unique user id - business_id: str, 22 character business id - useful: int, number of useful votes received - funny: int, number of funny votes received - cool: int, number of cool votes received - date: str, date formatted YYYY-MM-DD + 读取IMDB数据集,DataSet包含以下fields: + words: list(str), 需要分类的文本 target: str, 文本的标签 - - 数据来源: https://www.yelp.com/dataset/download - - :param fine_grained: 是否使用SST-5标准,若 ``False`` , 使用SST-2。Default: ``False`` + + """ - - def __init__(self, fine_grained=False): + + def __init__(self): super(yelpLoader, self).__init__() - tag_v = {'1.0': 'very negative', '2.0': 'negative', '3.0': 'neutral', - '4.0': 'positive', '5.0': 'very positive'} - if not fine_grained: - tag_v['1.0'] = tag_v['2.0'] - tag_v['5.0'] = tag_v['4.0'] - self.fine_grained = fine_grained - self.tag_v = tag_v - + def _load(self, path): - ds = DataSet() - for idx, d in _read_json(path, fields=self.fields_list, dropna=self.dropna): - d = ast.literal_eval(d) - d["words"] = d.pop("text").split() - d["target"] = self.tag_v[str(d.pop("stars"))] - ds.append(Instance(**d)) - return ds + dataset = DataSet() + data = pd.read_csv(path, header=None, sep=",").values + for line in data: + target = str(line[0]) + words = str(line[1]).lower().split() + dataset.append(Instance(words=words, target=target)) + if len(dataset)==0: + raise RuntimeError(f"{path} has no valid data.") - def process(self, paths: Union[str, Dict[str, str]], vocab_opt: VocabularyOption = None, - embed_opt: EmbeddingOption = None): - paths = check_dataloader_paths(paths) + return dataset + + def process(self, + paths: Union[str, Dict[str, str]], + src_vocab_opt: VocabularyOption = None, + tgt_vocab_opt: VocabularyOption = None, + src_embed_opt: EmbeddingOption = None): + + # paths = check_dataloader_paths(paths) datasets = {} info = DataInfo() - vocab = Vocabulary(min_freq=2) if vocab_opt is None else Vocabulary(**vocab_opt) for name, path in paths.items(): dataset = self.load(path) datasets[name] = dataset - vocab.from_dataset(dataset, field_name="words") - info.vocabs = vocab + + datasets["train"], datasets["dev"] = datasets["train"].split(0.1, shuffle=False) + + src_vocab = Vocabulary() if src_vocab_opt is None else Vocabulary(**src_vocab_opt) + src_vocab.from_dataset(datasets['train'], field_name='words') + src_vocab.index_dataset(*datasets.values(), field_name='words') + + tgt_vocab = Vocabulary(unknown=None, padding=None) \ + if tgt_vocab_opt is None else Vocabulary(**tgt_vocab_opt) + tgt_vocab.from_dataset(datasets['train'], field_name='target') + tgt_vocab.index_dataset(*datasets.values(), field_name='target') + + info.vocabs = { + "words": src_vocab, + "target": tgt_vocab + } + info.datasets = datasets - if embed_opt is not None: - embed = EmbedLoader.load_with_vocab(**embed_opt, vocab=vocab) + + if src_embed_opt is not None: + embed = EmbedLoader.load_with_vocab(**src_embed_opt, vocab=src_vocab) info.embeddings['words'] = embed - return info + for name, dataset in info.datasets.items(): + dataset.set_input("words") + dataset.set_target("target") + + return info diff --git a/reproduction/text_classification/model/awd_lstm.py b/reproduction/text_classification/model/awd_lstm.py new file mode 100644 index 00000000..0d8f711a --- /dev/null +++ b/reproduction/text_classification/model/awd_lstm.py @@ -0,0 +1,31 @@ +import torch +import torch.nn as nn +from fastNLP.core.const import Const as C +from .awdlstm_module import LSTM +from fastNLP.modules import encoder +from fastNLP.modules.decoder.mlp import MLP + + +class AWDLSTMSentiment(nn.Module): + def __init__(self, init_embed, + num_classes, + hidden_dim=256, + num_layers=1, + nfc=128, + wdrop=0.5): + super(AWDLSTMSentiment,self).__init__() + self.embed = encoder.Embedding(init_embed) + self.lstm = LSTM(input_size=self.embed.embedding_dim, hidden_size=hidden_dim, num_layers=num_layers, bidirectional=True, wdrop=wdrop) + self.mlp = MLP(size_layer=[hidden_dim* 2, nfc, num_classes]) + + def forward(self, words): + x_emb = self.embed(words) + output, _ = self.lstm(x_emb) + output = self.mlp(output[:,-1,:]) + return {C.OUTPUT: output} + + def predict(self, words): + output = self(words) + _, predict = output[C.OUTPUT].max(dim=1) + return {C.OUTPUT: predict} + diff --git a/reproduction/text_classification/model/awdlstm_module.py b/reproduction/text_classification/model/awdlstm_module.py new file mode 100644 index 00000000..87bfe730 --- /dev/null +++ b/reproduction/text_classification/model/awdlstm_module.py @@ -0,0 +1,86 @@ +""" +轻量封装的 Pytorch LSTM 模块. +可在 forward 时传入序列的长度, 自动对padding做合适的处理. +""" +__all__ = [ + "LSTM" +] + +import torch +import torch.nn as nn +import torch.nn.utils.rnn as rnn + +from fastNLP.modules.utils import initial_parameter +from torch import autograd +from .weight_drop import WeightDrop + + +class LSTM(nn.Module): + """ + 别名::class:`fastNLP.modules.LSTM` :class:`fastNLP.modules.encoder.lstm.LSTM` + + LSTM 模块, 轻量封装的Pytorch LSTM. 在提供seq_len的情况下,将自动使用pack_padded_sequence; 同时默认将forget gate的bias初始化 + 为1; 且可以应对DataParallel中LSTM的使用问题。 + + :param input_size: 输入 `x` 的特征维度 + :param hidden_size: 隐状态 `h` 的特征维度. + :param num_layers: rnn的层数. Default: 1 + :param dropout: 层间dropout概率. Default: 0 + :param bidirectional: 若为 ``True``, 使用双向的RNN. Default: ``False`` + :param batch_first: 若为 ``True``, 输入和输出 ``Tensor`` 形状为 + :(batch, seq, feature). Default: ``False`` + :param bias: 如果为 ``False``, 模型将不会使用bias. Default: ``True`` + """ + + def __init__(self, input_size, hidden_size=100, num_layers=1, dropout=0.0, batch_first=True, + bidirectional=False, bias=True, wdrop=0.5): + super(LSTM, self).__init__() + self.batch_first = batch_first + self.lstm = nn.LSTM(input_size, hidden_size, num_layers, bias=bias, batch_first=batch_first, + dropout=dropout, bidirectional=bidirectional) + self.lstm = WeightDrop(self.lstm, ['weight_hh_l0'], dropout=wdrop) + self.init_param() + + def init_param(self): + for name, param in self.named_parameters(): + if 'bias' in name: + # based on https://github.com/pytorch/pytorch/issues/750#issuecomment-280671871 + param.data.fill_(0) + n = param.size(0) + start, end = n // 4, n // 2 + param.data[start:end].fill_(1) + else: + nn.init.xavier_uniform_(param) + + def forward(self, x, seq_len=None, h0=None, c0=None): + """ + + :param x: [batch, seq_len, input_size] 输入序列 + :param seq_len: [batch, ] 序列长度, 若为 ``None``, 所有输入看做一样长. Default: ``None`` + :param h0: [batch, hidden_size] 初始隐状态, 若为 ``None`` , 设为全0向量. Default: ``None`` + :param c0: [batch, hidden_size] 初始Cell状态, 若为 ``None`` , 设为全0向量. Default: ``None`` + :return (output, ht) 或 output: 若 ``get_hidden=True`` [batch, seq_len, hidden_size*num_direction] 输出序列 + 和 [batch, hidden_size*num_direction] 最后时刻隐状态. + """ + batch_size, max_len, _ = x.size() + if h0 is not None and c0 is not None: + hx = (h0, c0) + else: + hx = None + if seq_len is not None and not isinstance(x, rnn.PackedSequence): + sort_lens, sort_idx = torch.sort(seq_len, dim=0, descending=True) + if self.batch_first: + x = x[sort_idx] + else: + x = x[:, sort_idx] + x = rnn.pack_padded_sequence(x, sort_lens, batch_first=self.batch_first) + output, hx = self.lstm(x, hx) # -> [N,L,C] + output, _ = rnn.pad_packed_sequence(output, batch_first=self.batch_first, total_length=max_len) + _, unsort_idx = torch.sort(sort_idx, dim=0, descending=False) + if self.batch_first: + output = output[unsort_idx] + else: + output = output[:, unsort_idx] + else: + output, hx = self.lstm(x, hx) + return output, hx diff --git a/reproduction/text_classification/model/lstm.py b/reproduction/text_classification/model/lstm.py new file mode 100644 index 00000000..388f3f1c --- /dev/null +++ b/reproduction/text_classification/model/lstm.py @@ -0,0 +1,30 @@ +import torch +import torch.nn as nn +from fastNLP.core.const import Const as C +from fastNLP.modules.encoder.lstm import LSTM +from fastNLP.modules import encoder +from fastNLP.modules.decoder.mlp import MLP + + +class BiLSTMSentiment(nn.Module): + def __init__(self, init_embed, + num_classes, + hidden_dim=256, + num_layers=1, + nfc=128): + super(BiLSTMSentiment,self).__init__() + self.embed = encoder.Embedding(init_embed) + self.lstm = LSTM(input_size=self.embed.embedding_dim, hidden_size=hidden_dim, num_layers=num_layers, bidirectional=True) + self.mlp = MLP(size_layer=[hidden_dim* 2, nfc, num_classes]) + + def forward(self, words): + x_emb = self.embed(words) + output, _ = self.lstm(x_emb) + output = self.mlp(output[:,-1,:]) + return {C.OUTPUT: output} + + def predict(self, words): + output = self(words) + _, predict = output[C.OUTPUT].max(dim=1) + return {C.OUTPUT: predict} + diff --git a/reproduction/text_classification/model/lstm_self_attention.py b/reproduction/text_classification/model/lstm_self_attention.py new file mode 100644 index 00000000..239635fe --- /dev/null +++ b/reproduction/text_classification/model/lstm_self_attention.py @@ -0,0 +1,35 @@ +import torch +import torch.nn as nn +from fastNLP.core.const import Const as C +from fastNLP.modules.encoder.lstm import LSTM +from fastNLP.modules import encoder +from fastNLP.modules.aggregator.attention import SelfAttention +from fastNLP.modules.decoder.mlp import MLP + + +class BiLSTM_SELF_ATTENTION(nn.Module): + def __init__(self, init_embed, + num_classes, + hidden_dim=256, + num_layers=1, + attention_unit=256, + attention_hops=1, + nfc=128): + super(BiLSTM_SELF_ATTENTION,self).__init__() + self.embed = encoder.Embedding(init_embed) + self.lstm = LSTM(input_size=self.embed.embedding_dim, hidden_size=hidden_dim, num_layers=num_layers, bidirectional=True) + self.attention = SelfAttention(input_size=hidden_dim * 2 , attention_unit=attention_unit, attention_hops=attention_hops) + self.mlp = MLP(size_layer=[hidden_dim* 2*attention_hops, nfc, num_classes]) + + def forward(self, words): + x_emb = self.embed(words) + output, _ = self.lstm(x_emb) + after_attention, penalty = self.attention(output,words) + after_attention =after_attention.view(after_attention.size(0),-1) + output = self.mlp(after_attention) + return {C.OUTPUT: output} + + def predict(self, words): + output = self(words) + _, predict = output[C.OUTPUT].max(dim=1) + return {C.OUTPUT: predict} diff --git a/reproduction/text_classification/model/weight_drop.py b/reproduction/text_classification/model/weight_drop.py new file mode 100644 index 00000000..60fda179 --- /dev/null +++ b/reproduction/text_classification/model/weight_drop.py @@ -0,0 +1,99 @@ +import torch +from torch.nn import Parameter +from functools import wraps + +class WeightDrop(torch.nn.Module): + def __init__(self, module, weights, dropout=0, variational=False): + super(WeightDrop, self).__init__() + self.module = module + self.weights = weights + self.dropout = dropout + self.variational = variational + self._setup() + + def widget_demagnetizer_y2k_edition(*args, **kwargs): + # We need to replace flatten_parameters with a nothing function + # It must be a function rather than a lambda as otherwise pickling explodes + # We can't write boring code though, so ... WIDGET DEMAGNETIZER Y2K EDITION! + # (╯°□°)╯︵ ┻━┻ + return + + def _setup(self): + # Terrible temporary solution to an issue regarding compacting weights re: CUDNN RNN + if issubclass(type(self.module), torch.nn.RNNBase): + self.module.flatten_parameters = self.widget_demagnetizer_y2k_edition + + for name_w in self.weights: + print('Applying weight drop of {} to {}'.format(self.dropout, name_w)) + w = getattr(self.module, name_w) + del self.module._parameters[name_w] + self.module.register_parameter(name_w + '_raw', Parameter(w.data)) + + def _setweights(self): + for name_w in self.weights: + raw_w = getattr(self.module, name_w + '_raw') + w = None + if self.variational: + mask = torch.autograd.Variable(torch.ones(raw_w.size(0), 1)) + if raw_w.is_cuda: mask = mask.cuda() + mask = torch.nn.functional.dropout(mask, p=self.dropout, training=True) + w = mask.expand_as(raw_w) * raw_w + else: + w = torch.nn.functional.dropout(raw_w, p=self.dropout, training=self.training) + setattr(self.module, name_w, w) + + def forward(self, *args): + self._setweights() + return self.module.forward(*args) + +if __name__ == '__main__': + import torch + from weight_drop import WeightDrop + + # Input is (seq, batch, input) + x = torch.autograd.Variable(torch.randn(2, 1, 10)).cuda() + h0 = None + + ### + + print('Testing WeightDrop') + print('=-=-=-=-=-=-=-=-=-=') + + ### + + print('Testing WeightDrop with Linear') + + lin = WeightDrop(torch.nn.Linear(10, 10), ['weight'], dropout=0.9) + lin.cuda() + run1 = [x.sum() for x in lin(x).data] + run2 = [x.sum() for x in lin(x).data] + + print('All items should be different') + print('Run 1:', run1) + print('Run 2:', run2) + + assert run1[0] != run2[0] + assert run1[1] != run2[1] + + print('---') + + ### + + print('Testing WeightDrop with LSTM') + + wdrnn = WeightDrop(torch.nn.LSTM(10, 10), ['weight_hh_l0'], dropout=0.9) + wdrnn.cuda() + + run1 = [x.sum() for x in wdrnn(x, h0)[0].data] + run2 = [x.sum() for x in wdrnn(x, h0)[0].data] + + print('First timesteps should be equal, all others should differ') + print('Run 1:', run1) + print('Run 2:', run2) + + # First time step, not influenced by hidden to hidden weights, should be equal + assert run1[0] == run2[0] + # Second step should not + assert run1[1] != run2[1] + + print('---') diff --git a/reproduction/text_classification/results_LSTM.xlsx b/reproduction/text_classification/results_LSTM.xlsx new file mode 100644 index 0000000000000000000000000000000000000000..0d7b841b12b43ee346c4d9db17032fade8c0b40a GIT binary patch literal 9944 zcmeHNWmFtlw{0APHPE=by9Rd+1a}G2xVu{jmf!?;OK^8dkYEYHgIj>0!ChY`GxIXT zSV;*Y zQ45hC)h0rUXS_C5!#Y0QjUTPKibgeJC$)<0upZ5^AYS<_jgGkJsx8Zv&Qq#q$Zw*` zoeza>WwWJ%3(7R3YGYq==is64@57JRBRpxEHDR%)xTm1T;jVkcxzTx~v?X2|E+RqQ z&hV`S`f_?lXrN06T7ljT&xFtOxhgY({GwJ{n+gY~CRJs+3$X@E;&~K3s9L$X?ut}y zdvFYtV^D30&o4nLYE=gH){cV|g)q^$+P==7=ZFW(zITKjq(H%dru&($dEKO zat7PFu(JHv|2IAVgLCq4uU;0f__Ui1C3Ii*CanK_av>HduHYdi+e)tPA1F7EULTcD zMY!;emI$a$@Ek@apxyswaBe{`YP+BO>?>EL&<93FpCxe5>_EmD2`BSkZ!M{&Z5ah1=OUt z+V}F%sz%4$4_?0QnCA8Oe|(9eEV(5PcI2H)GQxcRheWt^HX>k znc2QSmP~(#b?eKll+mwB!G(FjF)Y(ZopTzXTgP?yG1IGu7pc3n|6m~aCF$EmNRRdJ zB;mXrhQ)#b05;(P0Cb3FJZ)J$9Gq=T92{(Z^kZcj1`g?LAiwOo`@kEe&XCHyB5Wkd z(g`<}R(n?2*ESi;*le|m2_@SBOWgEZ4Q;NZh_)=a(ZlWM`Yz55cGf#Vav!7wq%QiU zaBPa{yjrvJ(ua`+q?5)9nB|ytd8A^V;N03_5M|;nC^2; zOxZ=FJUAbF5`H0_!!*AcxhV<*4>Hga4rGfQc-GlVfBWO%JQmI;^%R?><^<9ZmXbSTw}hEt4w`G*JdfoV%`SrmhYGM z*wFP!2i#Ad@osrt4zAF5>#V_LIb3^f&UD16`S5tC5n4b+J0*Tdkh-zW<8EER{miqi z!Q5NdVC1D2IRsKPLFc;TOY4KuPVYQ_o!k<9LE}^xa29mv1{ad=xrwrcZ>V;%)R#_&?kIbnXhQS+ut?#FV^t`Dm%`tEvaH~hb?izQlIGI`!*`>c-O z4P3OcGci(}TF7k*-4s+zTT6p1t#E+{aP-k-nTMt0q>$|Rcm4zkCng_5T3R5T3;_TU z3X(s6^fQ0YqQCkaCkTYI%N}VAdwcVI2dVRL( z%)dfeCn{ASDd86K4Rs|8>z5%7ppSQ1&r5C9*k5rKzaFH2H+RJJ@nqk8ZJ=**3G$`? z9Wr<6;B66D0N^_s0DuR%;?I)nVhIMjy0HE{u>YvN8CrAE`Fub>BddF;%l77wd2Eg} z{Xl3$v~Y|ss+y3fF%wz*hnwgqRGtnhg)jG6l7(Q8~Fh^6_D<2cJ3-d0siYP3gF3qSfYi>@HKWOxyAN8cvh?29yIIEVV2q>jy zXiF>TftbSLp2y;AI5Ng4>Bvf-Pl|wxIUN_O6I8OW^4XGOEW^8lXl*IuU3#6tl2i)C z!I#OjhKcpMNrDu~DwqB&hL~~KWYw$mTG_AZs%y;dyg$7tXa)HOcuJ&clVbWK9BfP( zGqmB-uSS3h`RE7}i8(hqZc{m3EXU|hZ$+q9QHcjw^l1-RxG$RNz4E1P9C42WRBh+O zx7Op2IhPRDT79m)f*hU(G@ZwI?~2su?>;uuZmppbfL$x)jcJE+N_trmc=v5rx1oYr z%EN&P9AN_Np-ie6&IInKT|A0Kjt5JP#B-_KbbVn`gsI6Ajkwo?3sOK_=E9hra;vK< z*<32Dw?Ob#nk4;7!09hYk0?+lv#g#x=zHI642I@@lxr7I;D!|!jOeS2KZQ;>fp^Q6 zuL!BEU@kv=|v(d!SjJ;=0eVEj@lTkV%8!z>j;C(?^h`WivdJp!6{3tSUMXpo@g0&7Xdw zbO?1CCiHIma_YH?3ZsV-f)~yPYz&NUJmma=fhzQrvD@t?s*gD z{=Ca{)=GN;i9?(5v%g+wjk)G3rA9~2+ z@|}E9Vf(O`5WWEqksWAEW)DT|KtcqY<(Y5eKD7XPH;THR#b38jRvHd{;YMjgO`foW^M<}nO1}TRX7{OsBr7tWD(|V`sS9Bx9 z&s(sjyFA3j`gE5vmnZtjaz_xb!l~5#T*m!OxAnE*m{WXLm&o`)g|S558{68rvEs7S z^jbaqPQcgJfIHr>3g!xJ>3TC8MJLsgDtI1LWAWzn$Q@JU3IRc zL_q?9RFHuOK3fiP3JP*@*mxrM7KaHNnfK|yXM=g?=LN;Mzg}5 zf{Cl1UXpA#RRdqPTfcsqgMVma8>HsRW2zGjJ8yox%_Xt1%$qyevhiXPHg3XAH8F5b zpcX&%nFWMu*|%pYijEaj#Nqjxqe1^us7v zmc{x`C$jYx-Wf{yyuiQ~u;p&ey<*&b^nA<8;52opFI~w`^}VbMOUrQ1)ZXcH=+dAgg_ryOwM%dDJ%<8JU%K$diojHgHU|VCix5czr6A{z zg{yiBbYgsep3Zmiw%ac`5O~5x(#t(<95Dp)r)Pb^60}|Qo7Q241Ge_PICtK+@c&|W^+8@Jo0M##c1e)nno_q%;+pQ%>g zQ*i@p&VKi1VXe|rvkenJX*wYc#>ki^-Ab2~xFQKlbwZ^2^m*Gw9Cm#iJs23?;n*4{ z4w?=s3gL)RXq;2-`qPZ#uG0(wEJ*5x$k`OTh=RDoa1DRVPV|6rdqTD(0C{;;_x#YvqFGsoB^Go5b*_Lnz< zHw&F(@yz54w1+42_~T%Abg>Ayp){1`L`S}(;`PxCOup`UL>-*~T$&`oRKWw9_vSJhDNu{^@z4I_M$*7psjea&`zIcxv^%KfY-Q2nKqtCXsnf4;f|y8 z4%n0p%1u0bt#O5&N#6U z9z+V_d?3K6$N)?8?_sfSUa1I$GZ-@Bso#X|Yv_tKs%*iF1s{2bpLMcD-f7TYwR~|U*}_7xUh-F z=>Kl2q%OZoYb#8pR%BO3tW?N%o8RKmt_NWpDVpx4+c*?=9C zhiFOrGdEoHcCTSazf0>PQ56_aRoH*6!?}wf@{q%ITJ1`3{MzxgYdmM29go&3eUgQ# z)zNDA@FLye8{u43Hg9k~a*0imS@10-IwgaP23xcILHhBh)Mx616v$n#3y>Da2t=M{ z3rE=0uAkT{7K;JMcVH`{ip?wZHf$JV{R$Wuj0@FufiP3b8IEmR_8P)Dn=H~purPE+z z0^c%s?q;g*gUypgR~8wgb`)I2__Td@i<~N&#`*(Ym7Bj%MC4@lolXr;Ep_Ns#v}GN zW^z}BYVIy7veC73wLb2hQt3tv646{?K53h44Bp+!c;27e$bPm|qyE<*-`_o?&r zR#H1ptUg&;<;16ze>t;JhkK_0Mmez~zG*q9AaMr2@yB7I?Xf#ac!T9GUf6*#mj3|LB3Q3V1E$VceH}Sk!`p~sI$w` zSi9?zXNfcyeUp>Jtl7(GThwA7QCC9;1PB0ZFTahbW+~21D>T_P_<=Udf|Ja;hc5lu zJHrkcb*rKR<4+7LXI^kHO9rE_CN~`4R!4~u9@m7X!*^Kb>jaBre9BhDrPWz}(stQ{ zB*W!gH0rw|xpL@R-Ctz5OdLQY2zP$~tYgKc+X<^p>vqY#HuE{MK+p?DNQm~I!xQ!C z_ak`KjvKfCc&7h2)A0$-XEVXb45p*!ANVq|C3U!#Nra9%5A>Jzvq%(#)9`8NES%p= zD&^*4O9pR|#yfh*H-)`}+hYAK6YNOxVnKnZ5DbtZDCp0mvNU!Eo2j`vTiILuky{BW z3f*kLz9qPK68%0&6~W;J_%0H7e87zU_sJuMQ=;AxLmIn{yj^rY?p{y2?mf;|S*%aT z1PQ$n^g$U=UplHiOD?WgbOar72A-K_$h^IPjjKHQ9u*m7+2T{TU6#pqs8wA6h#mNPL-0RoLGHK<7#JPWQ*+wX2Q>GLWX>EHR^QZ{JjKr zKdAC?cIHr7VYf^m&0M)XE0>w&rXR*vB(D$iUp*}AF4R1Vaq6V!blR)fC=+R*NS^yN zxXfG2!Do0CfmneZiWoaqvrYiE-<{#7JVRs&KO$IK@V|Cd>^Mhl8g?_dN2eG!ntJzJ z?+Ms(D0D(v#33G}{KI=cl%78a?|(=?zlQKXg`bd^&d4q{kkmQMorI@P!j~5)5^B5@ zO&S|8CRelYbIGyipj)3>BJlh5b;sd#p7d+MVnTDYIHrjf=of3IxRzB1KC{g4#H#tW z=Rru!g0aTqEFEbuN=ot(eX-iu$ki`YsBZDP{d;DrqmhbZg}=?nMR>6dT)re6DJ!wI zJ2FD3V^yl(XIk^qdd@GTs&ls+?MEtTpa1T&?9p(D+~e41!g;cf`=!1g1)7NMc${U~ z_@TiT*?szWe1`P9qO7~gbaf4R^H&l80PPf>qSsmZ??nIUfh!<2ZOM z*;XG5ybnF%R(~2NSk5)BslYrjs%`V#P6NBhX+&c~Y=ND_8hh1658q9(-1E3nFX_vX zU|L2|jJEZg!#D3h?LMs6RE)K)PFXSn*}H@K&Ar1=OE0^M7J@_pP_V%NznybXa+E_v zPE!kTS`XQ8-3>5lgxm&+k$=lS3%cQxT96h~h+>EVQ4Gx-OqHD-99>vV9h|{`&K>_( zD1-!-Z@i*XHybGATy~!PNZ85W(yE;5HNXuH6H|=LJ?VB3aBS}}z!8s!=Tp|k!7PE9gsLatGrh1 zu_tp!x}#MxxStTOWuwTCm0X2#LYFo5JzySlfVlME(a3Lbf{_M^Mj(V{0skJ2#*U8v z4MvE|{@gO+JMHIw%oM_1p@r^6W!Te&ifXD9FnUi{0pz^3!2CE23c)=D16kl?V)$KD z_t`nu_aymsQAJ8~Y7SHZU#DbLzxvKim%T`NlR=AN zhZtmw5Rv(Dm9jcqSX2^*o)S6vktSs*{)XJt$XhP*nK;$UX}SU~`=&~U{FSktYj~og zaY2#QDk(?mL|S8qe)v9}m;2nz?RytvL)N&6+L14TWuuR*THxnw^uG=P`!I8cU z-b~8b&+p9q1V{SDr1B2%L>rv%M=VI9g0|NYoZf+0nHyk?UNUi<&?1}rnzmC6ItD~&&Pk?`}8~Drc$MFp$N&dEq;GyBexzt~#pCHQHgPGNb z#{Zm1`eh0Lpgj6%{C`XixxaI|7Yd?yZI-wznTA8$CVY}ATbCDCdiKl62=)n(g*NA DxVh?K literal 0 HcmV?d00001 diff --git a/reproduction/text_classification/train_awdlstm.py b/reproduction/text_classification/train_awdlstm.py new file mode 100644 index 00000000..ce3e52bc --- /dev/null +++ b/reproduction/text_classification/train_awdlstm.py @@ -0,0 +1,102 @@ +# 这个模型需要在pytorch=0.4下运行,weight_drop不支持1.0 + +# 首先需要加入以下的路径到环境变量,因为当前只对内部测试开放,所以需要手动申明一下路径 +import os +os.environ['FASTNLP_BASE_URL'] = 'http://10.141.222.118:8888/file/download/' +os.environ['FASTNLP_CACHE_DIR'] = '/remote-home/hyan01/fastnlp_caches' + + +import torch.nn as nn + +from data.SSTLoader import SSTLoader +from data.IMDBLoader import IMDBLoader +from data.yelpLoader import yelpLoader +from fastNLP.modules.encoder.embedding import StaticEmbedding +from model.awd_lstm import AWDLSTMSentiment + +from fastNLP.core.const import Const as C +from fastNLP import CrossEntropyLoss, AccuracyMetric +from fastNLP import Trainer, Tester +from torch.optim import Adam +from fastNLP.io.model_io import ModelLoader, ModelSaver + +import argparse + + +class Config(): + train_epoch= 10 + lr=0.001 + + num_classes=2 + hidden_dim=256 + num_layers=1 + nfc=128 + wdrop=0.5 + + task_name = "IMDB" + datapath={"train":"IMDB_data/train.csv", "test":"IMDB_data/test.csv"} + load_model_path="./result_IMDB/best_BiLSTM_SELF_ATTENTION_acc_2019-07-07-04-16-51" + save_model_path="./result_IMDB_test/" +opt=Config + + +# load data +dataloaders = { + "IMDB":IMDBLoader(), + "YELP":yelpLoader(), + "SST-5":SSTLoader(subtree=True,fine_grained=True), + "SST-3":SSTLoader(subtree=True,fine_grained=False) +} + +if opt.task_name not in ["IMDB", "YELP", "SST-5", "SST-3"]: + raise ValueError("task name must in ['IMDB', 'YELP, 'SST-5', 'SST-3']") + +dataloader = dataloaders[opt.task_name] +datainfo=dataloader.process(opt.datapath) +# print(datainfo.datasets["train"]) +# print(datainfo) + + +# define model +vocab=datainfo.vocabs['words'] +embed = StaticEmbedding(vocab, model_dir_or_name='en-glove-840b-300', requires_grad=True) +model=AWDLSTMSentiment(init_embed=embed, num_classes=opt.num_classes, hidden_dim=opt.hidden_dim, num_layers=opt.num_layers, nfc=opt.nfc, wdrop=opt.wdrop) + + +# define loss_function and metrics +loss=CrossEntropyLoss() +metrics=AccuracyMetric() +optimizer= Adam([param for param in model.parameters() if param.requires_grad==True], lr=opt.lr) + + +def train(datainfo, model, optimizer, loss, metrics, opt): + trainer = Trainer(datainfo.datasets['train'], model, optimizer=optimizer, loss=loss, + metrics=metrics, dev_data=datainfo.datasets['dev'], device=0, check_code_level=-1, + n_epochs=opt.train_epoch, save_path=opt.save_model_path) + trainer.train() + + +def test(datainfo, metrics, opt): + # load model + model = ModelLoader.load_pytorch_model(opt.load_model_path) + print("model loaded!") + + # Tester + tester = Tester(datainfo.datasets['test'], model, metrics, batch_size=4, device=0) + acc = tester.test() + print("acc=",acc) + + + +parser = argparse.ArgumentParser() +parser.add_argument('--mode', required=True, dest="mode",help='set the model\'s model') + + +args = parser.parse_args() +if args.mode == 'train': + train(datainfo, model, optimizer, loss, metrics, opt) +elif args.mode == 'test': + test(datainfo, metrics, opt) +else: + print('no mode specified for model!') + parser.print_help() diff --git a/reproduction/text_classification/train_lstm.py b/reproduction/text_classification/train_lstm.py new file mode 100644 index 00000000..b320e79c --- /dev/null +++ b/reproduction/text_classification/train_lstm.py @@ -0,0 +1,99 @@ +# 首先需要加入以下的路径到环境变量,因为当前只对内部测试开放,所以需要手动申明一下路径 +import os +os.environ['FASTNLP_BASE_URL'] = 'http://10.141.222.118:8888/file/download/' +os.environ['FASTNLP_CACHE_DIR'] = '/remote-home/hyan01/fastnlp_caches' + + +import torch.nn as nn + +from data.SSTLoader import SSTLoader +from data.IMDBLoader import IMDBLoader +from data.yelpLoader import yelpLoader +from fastNLP.modules.encoder.embedding import StaticEmbedding +from model.lstm import BiLSTMSentiment + +from fastNLP.core.const import Const as C +from fastNLP import CrossEntropyLoss, AccuracyMetric +from fastNLP import Trainer, Tester +from torch.optim import Adam +from fastNLP.io.model_io import ModelLoader, ModelSaver + +import argparse + + +class Config(): + train_epoch= 10 + lr=0.001 + + num_classes=2 + hidden_dim=256 + num_layers=1 + nfc=128 + + task_name = "IMDB" + datapath={"train":"IMDB_data/train.csv", "test":"IMDB_data/test.csv"} + load_model_path="./result_IMDB/best_BiLSTM_SELF_ATTENTION_acc_2019-07-07-04-16-51" + save_model_path="./result_IMDB_test/" +opt=Config + + +# load data +dataloaders = { + "IMDB":IMDBLoader(), + "YELP":yelpLoader(), + "SST-5":SSTLoader(subtree=True,fine_grained=True), + "SST-3":SSTLoader(subtree=True,fine_grained=False) +} + +if opt.task_name not in ["IMDB", "YELP", "SST-5", "SST-3"]: + raise ValueError("task name must in ['IMDB', 'YELP, 'SST-5', 'SST-3']") + +dataloader = dataloaders[opt.task_name] +datainfo=dataloader.process(opt.datapath) +# print(datainfo.datasets["train"]) +# print(datainfo) + + +# define model +vocab=datainfo.vocabs['words'] +embed = StaticEmbedding(vocab, model_dir_or_name='en-glove-840b-300', requires_grad=True) +model=BiLSTMSentiment(init_embed=embed, num_classes=opt.num_classes, hidden_dim=opt.hidden_dim, num_layers=opt.num_layers, nfc=opt.nfc) + + +# define loss_function and metrics +loss=CrossEntropyLoss() +metrics=AccuracyMetric() +optimizer= Adam([param for param in model.parameters() if param.requires_grad==True], lr=opt.lr) + + +def train(datainfo, model, optimizer, loss, metrics, opt): + trainer = Trainer(datainfo.datasets['train'], model, optimizer=optimizer, loss=loss, + metrics=metrics, dev_data=datainfo.datasets['dev'], device=0, check_code_level=-1, + n_epochs=opt.train_epoch, save_path=opt.save_model_path) + trainer.train() + + +def test(datainfo, metrics, opt): + # load model + model = ModelLoader.load_pytorch_model(opt.load_model_path) + print("model loaded!") + + # Tester + tester = Tester(datainfo.datasets['test'], model, metrics, batch_size=4, device=0) + acc = tester.test() + print("acc=",acc) + + + +parser = argparse.ArgumentParser() +parser.add_argument('--mode', required=True, dest="mode",help='set the model\'s model') + + +args = parser.parse_args() +if args.mode == 'train': + train(datainfo, model, optimizer, loss, metrics, opt) +elif args.mode == 'test': + test(datainfo, metrics, opt) +else: + print('no mode specified for model!') + parser.print_help() diff --git a/reproduction/text_classification/train_lstm_att.py b/reproduction/text_classification/train_lstm_att.py new file mode 100644 index 00000000..8db27d09 --- /dev/null +++ b/reproduction/text_classification/train_lstm_att.py @@ -0,0 +1,101 @@ +# 首先需要加入以下的路径到环境变量,因为当前只对内部测试开放,所以需要手动申明一下路径 +import os +os.environ['FASTNLP_BASE_URL'] = 'http://10.141.222.118:8888/file/download/' +os.environ['FASTNLP_CACHE_DIR'] = '/remote-home/hyan01/fastnlp_caches' + + +import torch.nn as nn + +from data.SSTLoader import SSTLoader +from data.IMDBLoader import IMDBLoader +from data.yelpLoader import yelpLoader +from fastNLP.modules.encoder.embedding import StaticEmbedding +from model.lstm_self_attention import BiLSTM_SELF_ATTENTION + +from fastNLP.core.const import Const as C +from fastNLP import CrossEntropyLoss, AccuracyMetric +from fastNLP import Trainer, Tester +from torch.optim import Adam +from fastNLP.io.model_io import ModelLoader, ModelSaver + +import argparse + + +class Config(): + train_epoch= 10 + lr=0.001 + + num_classes=2 + hidden_dim=256 + num_layers=1 + attention_unit=256 + attention_hops=1 + nfc=128 + + task_name = "IMDB" + datapath={"train":"IMDB_data/train.csv", "test":"IMDB_data/test.csv"} + load_model_path="./result_IMDB/best_BiLSTM_SELF_ATTENTION_acc_2019-07-07-04-16-51" + save_model_path="./result_IMDB_test/" +opt=Config + + +# load data +dataloaders = { + "IMDB":IMDBLoader(), + "YELP":yelpLoader(), + "SST-5":SSTLoader(subtree=True,fine_grained=True), + "SST-3":SSTLoader(subtree=True,fine_grained=False) +} + +if opt.task_name not in ["IMDB", "YELP", "SST-5", "SST-3"]: + raise ValueError("task name must in ['IMDB', 'YELP, 'SST-5', 'SST-3']") + +dataloader = dataloaders[opt.task_name] +datainfo=dataloader.process(opt.datapath) +# print(datainfo.datasets["train"]) +# print(datainfo) + + +# define model +vocab=datainfo.vocabs['words'] +embed = StaticEmbedding(vocab, model_dir_or_name='en-glove-840b-300', requires_grad=True) +model=BiLSTM_SELF_ATTENTION(init_embed=embed, num_classes=opt.num_classes, hidden_dim=opt.hidden_dim, num_layers=opt.num_layers, attention_unit=opt.attention_unit, attention_hops=opt.attention_hops, nfc=opt.nfc) + + +# define loss_function and metrics +loss=CrossEntropyLoss() +metrics=AccuracyMetric() +optimizer= Adam([param for param in model.parameters() if param.requires_grad==True], lr=opt.lr) + + +def train(datainfo, model, optimizer, loss, metrics, opt): + trainer = Trainer(datainfo.datasets['train'], model, optimizer=optimizer, loss=loss, + metrics=metrics, dev_data=datainfo.datasets['dev'], device=0, check_code_level=-1, + n_epochs=opt.train_epoch, save_path=opt.save_model_path) + trainer.train() + + +def test(datainfo, metrics, opt): + # load model + model = ModelLoader.load_pytorch_model(opt.load_model_path) + print("model loaded!") + + # Tester + tester = Tester(datainfo.datasets['test'], model, metrics, batch_size=4, device=0) + acc = tester.test() + print("acc=",acc) + + + +parser = argparse.ArgumentParser() +parser.add_argument('--mode', required=True, dest="mode",help='set the model\'s model') + + +args = parser.parse_args() +if args.mode == 'train': + train(datainfo, model, optimizer, loss, metrics, opt) +elif args.mode == 'test': + test(datainfo, metrics, opt) +else: + print('no mode specified for model!') + parser.print_help() From f6bba93696b8266d27a657eceace16653df3525f Mon Sep 17 00:00:00 2001 From: wyg <1505116161@qq.com> Date: Sun, 7 Jul 2019 14:50:36 +0800 Subject: [PATCH 3/4] [verify] yelpdataloader [add] HAN train_HAN --- .../text_classification/data/sstLoader.py | 106 +++++++++++++++-- .../text_classification/data/yelpLoader.py | 6 + reproduction/text_classification/model/HAN.py | 109 ++++++++++++++++++ reproduction/text_classification/train_HAN.py | 109 ++++++++++++++++++ .../text_classification/train_char_cnn.py | 7 +- 5 files changed, 324 insertions(+), 13 deletions(-) create mode 100644 reproduction/text_classification/model/HAN.py create mode 100644 reproduction/text_classification/train_HAN.py diff --git a/reproduction/text_classification/data/sstLoader.py b/reproduction/text_classification/data/sstLoader.py index bffb67fd..0d1b647c 100644 --- a/reproduction/text_classification/data/sstLoader.py +++ b/reproduction/text_classification/data/sstLoader.py @@ -1,13 +1,101 @@ -import csv from typing import Iterable -from fastNLP import DataSet, Instance, Vocabulary -from fastNLP.core.vocabulary import VocabularyOption -from fastNLP.io.base_loader import DataInfo,DataSetLoader -from fastNLP.io.embed_loader import EmbeddingOption -from fastNLP.io.file_reader import _read_json -from typing import Union, Dict -from reproduction.Star_transformer.datasets import EmbedLoader -from reproduction.utils import check_dataloader_paths +from nltk import Tree +from fastNLP.io.base_loader import DataInfo, DataSetLoader +from fastNLP.core.vocabulary import VocabularyOption, Vocabulary +from fastNLP import DataSet +from fastNLP import Instance +from fastNLP.io.embed_loader import EmbeddingOption, EmbedLoader + + +class SSTLoader(DataSetLoader): + URL = 'https://nlp.stanford.edu/sentiment/trainDevTestTrees_PTB.zip' + DATA_DIR = 'sst/' + + """ + 别名::class:`fastNLP.io.SSTLoader` :class:`fastNLP.io.dataset_loader.SSTLoader` + + 读取SST数据集, DataSet包含fields:: + + words: list(str) 需要分类的文本 + target: str 文本的标签 + + 数据来源: https://nlp.stanford.edu/sentiment/trainDevTestTrees_PTB.zip + + :param subtree: 是否将数据展开为子树,扩充数据量. Default: ``False`` + :param fine_grained: 是否使用SST-5标准,若 ``False`` , 使用SST-2。Default: ``False`` + """ + + def __init__(self, subtree=False, fine_grained=False): + self.subtree = subtree + + tag_v = {'0': 'very negative', '1': 'negative', '2': 'neutral', + '3': 'positive', '4': 'very positive'} + if not fine_grained: + tag_v['0'] = tag_v['1'] + tag_v['4'] = tag_v['3'] + self.tag_v = tag_v + + def _load(self, path): + """ + + :param str path: 存储数据的路径 + :return: 一个 :class:`~fastNLP.DataSet` 类型的对象 + """ + datalist = [] + with open(path, 'r', encoding='utf-8') as f: + datas = [] + for l in f: + datas.extend([(s, self.tag_v[t]) + for s, t in self._get_one(l, self.subtree)]) + ds = DataSet() + for words, tag in datas: + ds.append(Instance(words=words, target=tag)) + return ds + + @staticmethod + def _get_one(data, subtree): + tree = Tree.fromstring(data) + if subtree: + return [(t.leaves(), t.label()) for t in tree.subtrees()] + return [(tree.leaves(), tree.label())] + + def process(self, + paths, + train_ds: Iterable[str] = None, + src_vocab_op: VocabularyOption = None, + tgt_vocab_op: VocabularyOption = None, + src_embed_op: EmbeddingOption = None): + input_name, target_name = 'words', 'target' + src_vocab = Vocabulary() if src_vocab_op is None else Vocabulary(**src_vocab_op) + tgt_vocab = Vocabulary(unknown=None, padding=None) \ + if tgt_vocab_op is None else Vocabulary(**tgt_vocab_op) + + info = DataInfo(datasets=self.load(paths)) + _train_ds = [info.datasets[name] + for name in train_ds] if train_ds else info.datasets.values() + src_vocab.from_dataset(*_train_ds, field_name=input_name) + tgt_vocab.from_dataset(*_train_ds, field_name=target_name) + src_vocab.index_dataset( + *info.datasets.values(), + field_name=input_name, new_field_name=input_name) + tgt_vocab.index_dataset( + *info.datasets.values(), + field_name=target_name, new_field_name=target_name) + info.vocabs = { + input_name: src_vocab, + target_name: tgt_vocab + } + + if src_embed_op is not None: + src_embed_op.vocab = src_vocab + init_emb = EmbedLoader.load_with_vocab(**src_embed_op) + info.embeddings[input_name] = init_emb + + for name, dataset in info.datasets.items(): + dataset.set_input(input_name) + dataset.set_target(target_name) + + return info class sst2Loader(DataSetLoader): ''' diff --git a/reproduction/text_classification/data/yelpLoader.py b/reproduction/text_classification/data/yelpLoader.py index 0e65fb20..280e8be0 100644 --- a/reproduction/text_classification/data/yelpLoader.py +++ b/reproduction/text_classification/data/yelpLoader.py @@ -184,6 +184,12 @@ class yelpLoader(DataSetLoader): info.vocabs[target_name]=tgt_vocab + info.datasets['train'],info.datasets['dev']=info.datasets['train'].split(0.1, shuffle=False) + + for name, dataset in info.datasets.items(): + dataset.set_input("words") + dataset.set_target("target") + return info if __name__=="__main__": diff --git a/reproduction/text_classification/model/HAN.py b/reproduction/text_classification/model/HAN.py new file mode 100644 index 00000000..0902d1e4 --- /dev/null +++ b/reproduction/text_classification/model/HAN.py @@ -0,0 +1,109 @@ +import torch +import torch.nn as nn +from torch.autograd import Variable +from fastNLP.modules.utils import get_embeddings +from fastNLP.core import Const as C + + +def pack_sequence(tensor_seq, padding_value=0.0): + if len(tensor_seq) <= 0: + return + length = [v.size(0) for v in tensor_seq] + max_len = max(length) + size = [len(tensor_seq), max_len] + size.extend(list(tensor_seq[0].size()[1:])) + ans = torch.Tensor(*size).fill_(padding_value) + if tensor_seq[0].data.is_cuda: + ans = ans.cuda() + ans = Variable(ans) + for i, v in enumerate(tensor_seq): + ans[i, :length[i], :] = v + return ans + + +class HANCLS(nn.Module): + def __init__(self, init_embed, num_cls): + super(HANCLS, self).__init__() + + self.embed = get_embeddings(init_embed) + self.han = HAN(input_size=300, + output_size=num_cls, + word_hidden_size=50, word_num_layers=1, word_context_size=100, + sent_hidden_size=50, sent_num_layers=1, sent_context_size=100 + ) + + def forward(self, input_sents): + # input_sents [B, num_sents, seq-len] dtype long + # target + B, num_sents, seq_len = input_sents.size() + input_sents = input_sents.view(-1, seq_len) # flat + words_embed = self.embed(input_sents) # should be [B*num-sent, seqlen , word-dim] + words_embed = words_embed.view(B, num_sents, seq_len, -1) # recover # [B, num-sent, seqlen , word-dim] + out = self.han(words_embed) + + return {C.OUTPUT: out} + + def predict(self, input_sents): + x = self.forward(input_sents)[C.OUTPUT] + return {C.OUTPUT: torch.argmax(x, 1)} + + +class HAN(nn.Module): + def __init__(self, input_size, output_size, + word_hidden_size, word_num_layers, word_context_size, + sent_hidden_size, sent_num_layers, sent_context_size): + super(HAN, self).__init__() + + self.word_layer = AttentionNet(input_size, + word_hidden_size, + word_num_layers, + word_context_size) + self.sent_layer = AttentionNet(2 * word_hidden_size, + sent_hidden_size, + sent_num_layers, + sent_context_size) + self.output_layer = nn.Linear(2 * sent_hidden_size, output_size) + self.softmax = nn.LogSoftmax(dim=1) + + def forward(self, batch_doc): + # input is a sequence of matrix + doc_vec_list = [] + for doc in batch_doc: + sent_mat = self.word_layer(doc) # doc's dim (num_sent, seq_len, word_dim) + doc_vec_list.append(sent_mat) # sent_mat's dim (num_sent, vec_dim) + doc_vec = self.sent_layer(pack_sequence(doc_vec_list)) + output = self.softmax(self.output_layer(doc_vec)) + return output + + +class AttentionNet(nn.Module): + def __init__(self, input_size, gru_hidden_size, gru_num_layers, context_vec_size): + super(AttentionNet, self).__init__() + + self.input_size = input_size + self.gru_hidden_size = gru_hidden_size + self.gru_num_layers = gru_num_layers + self.context_vec_size = context_vec_size + + # Encoder + self.gru = nn.GRU(input_size=input_size, + hidden_size=gru_hidden_size, + num_layers=gru_num_layers, + batch_first=True, + bidirectional=True) + # Attention + self.fc = nn.Linear(2 * gru_hidden_size, context_vec_size) + self.tanh = nn.Tanh() + self.softmax = nn.Softmax(dim=1) + # context vector + self.context_vec = nn.Parameter(torch.Tensor(context_vec_size, 1)) + self.context_vec.data.uniform_(-0.1, 0.1) + + def forward(self, inputs): + # GRU part + h_t, hidden = self.gru(inputs) # inputs's dim (batch_size, seq_len, word_dim) + u = self.tanh(self.fc(h_t)) + # Attention part + alpha = self.softmax(torch.matmul(u, self.context_vec)) # u's dim (batch_size, seq_len, context_vec_size) + output = torch.bmm(torch.transpose(h_t, 1, 2), alpha) # alpha's dim (batch_size, seq_len, 1) + return torch.squeeze(output, dim=2) # output's dim (batch_size, 2*hidden_size, 1) diff --git a/reproduction/text_classification/train_HAN.py b/reproduction/text_classification/train_HAN.py new file mode 100644 index 00000000..b1135342 --- /dev/null +++ b/reproduction/text_classification/train_HAN.py @@ -0,0 +1,109 @@ +# 首先需要加入以下的路径到环境变量,因为当前只对内部测试开放,所以需要手动申明一下路径 + +import os +import sys +sys.path.append('../../') +os.environ['FASTNLP_BASE_URL'] = 'http://10.141.222.118:8888/file/download/' +os.environ['FASTNLP_CACHE_DIR'] = '/remote-home/hyan01/fastnlp_caches' +os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID" + +from fastNLP.core.const import Const as C +from fastNLP.core import LRScheduler +import torch.nn as nn +from fastNLP.io.dataset_loader import SSTLoader +from reproduction.text_classification.data.yelpLoader import yelpLoader +from reproduction.text_classification.model.HAN import HANCLS +from fastNLP.modules.encoder.embedding import StaticEmbedding, CNNCharEmbedding, StackEmbedding +from fastNLP import CrossEntropyLoss, AccuracyMetric +from fastNLP.core.trainer import Trainer +from torch.optim import SGD +import torch.cuda +from torch.optim.lr_scheduler import CosineAnnealingLR + + +##hyper + +class Config(): + model_dir_or_name = "en-base-uncased" + embedding_grad = False, + train_epoch = 30 + batch_size = 100 + num_classes = 5 + task = "yelp" + #datadir = '/remote-home/lyli/fastNLP/yelp_polarity/' + datadir = '/remote-home/ygwang/yelp_polarity/' + datafile = {"train": "train.csv", "test": "test.csv"} + lr = 1e-3 + + def __init__(self): + self.datapath = {k: os.path.join(self.datadir, v) + for k, v in self.datafile.items()} + + +ops = Config() + +##1.task相关信息:利用dataloader载入dataInfo + +datainfo = yelpLoader(fine_grained=True).process(paths=ops.datapath, train_ds=['train']) +print(len(datainfo.datasets['train'])) +print(len(datainfo.datasets['test'])) + + +# post process +def make_sents(words): + sents = [words] + return sents + + +for dataset in datainfo.datasets.values(): + dataset.apply_field(make_sents, field_name='words', new_field_name='input_sents') + +datainfo = datainfo +datainfo.datasets['train'].set_input('input_sents') +datainfo.datasets['test'].set_input('input_sents') +datainfo.datasets['train'].set_target('target') +datainfo.datasets['test'].set_target('target') + +## 2.或直接复用fastNLP的模型 + +vocab = datainfo.vocabs['words'] +# embedding = StackEmbedding([StaticEmbedding(vocab), CNNCharEmbedding(vocab, 100)]) +embedding = StaticEmbedding(vocab) + +print(len(vocab)) +print(len(datainfo.vocabs['target'])) + +# model = DPCNN(init_embed=embedding, num_cls=ops.num_classes) +model = HANCLS(init_embed=embedding, num_cls=ops.num_classes) + +## 3. 声明loss,metric,optimizer +loss = CrossEntropyLoss(pred=C.OUTPUT, target=C.TARGET) +metric = AccuracyMetric(pred=C.OUTPUT, target=C.TARGET) +optimizer = SGD([param for param in model.parameters() if param.requires_grad == True], + lr=ops.lr, momentum=0.9, weight_decay=0) + +callbacks = [] +callbacks.append(LRScheduler(CosineAnnealingLR(optimizer, 5))) + +device = 'cuda:0' if torch.cuda.is_available() else 'cpu' + +print(device) + +for ds in datainfo.datasets.values(): + ds.apply_field(len, C.INPUT, C.INPUT_LEN) + ds.set_input(C.INPUT, C.INPUT_LEN) + ds.set_target(C.TARGET) + + +## 4.定义train方法 +def train(model, datainfo, loss, metrics, optimizer, num_epochs=ops.train_epoch): + trainer = Trainer(datainfo.datasets['train'], model, optimizer=optimizer, loss=loss, + metrics=[metrics], dev_data=datainfo.datasets['test'], device=device, + check_code_level=-1, batch_size=ops.batch_size, callbacks=callbacks, + n_epochs=num_epochs) + + print(trainer.train()) + + +if __name__ == "__main__": + train(model, datainfo, loss, metric, optimizer) diff --git a/reproduction/text_classification/train_char_cnn.py b/reproduction/text_classification/train_char_cnn.py index c2c983a4..050527fe 100644 --- a/reproduction/text_classification/train_char_cnn.py +++ b/reproduction/text_classification/train_char_cnn.py @@ -7,7 +7,6 @@ import sys sys.path.append('../..') from fastNLP.core.const import Const as C import torch.nn as nn -from fastNLP.io.dataset_loader import SSTLoader from data.yelpLoader import yelpLoader from data.sstLoader import sst2Loader from data.IMDBLoader import IMDBLoader @@ -107,9 +106,9 @@ ops=Config ##1.task相关信息:利用dataloader载入dataInfo -dataloader=sst2Loader() -dataloader=IMDBLoader() -#dataloader=yelpLoader(fine_grained=True) +#dataloader=sst2Loader() +#dataloader=IMDBLoader() +dataloader=yelpLoader(fine_grained=True) datainfo=dataloader.process(ops.datapath,char_level_op=True) char_vocab=ops.char_cnn_config["alphabet"]["en"]["lower"]["alphabet"] ops.number_of_characters=len(char_vocab) From f369778ab33aa91447821db0de37b1bce4be4b62 Mon Sep 17 00:00:00 2001 From: wyg <1505116161@qq.com> Date: Sun, 7 Jul 2019 15:33:08 +0800 Subject: [PATCH 4/4] [verify] sstdataloader add sst2 [add] readme --- reproduction/text_classification/README.md | 22 +++++++++++++++++++ .../text_classification/data/sstLoader.py | 3 ++- 2 files changed, 24 insertions(+), 1 deletion(-) create mode 100644 reproduction/text_classification/README.md diff --git a/reproduction/text_classification/README.md b/reproduction/text_classification/README.md new file mode 100644 index 00000000..b058fbb2 --- /dev/null +++ b/reproduction/text_classification/README.md @@ -0,0 +1,22 @@ +# text_classification任务模型复现 +这里使用fastNLP复现以下模型: +char_cnn :论文链接[Character-level Convolutional Networks for Text Classification](https://arxiv.org/pdf/1509.01626v3.pdf) +dpcnn:论文链接[Deep Pyramid Convolutional Neural Networks for TextCategorization](https://ai.tencent.com/ailab/media/publications/ACL3-Brady.pdf) +HAN:论文链接[Hierarchical Attention Networks for Document Classification](https://www.cs.cmu.edu/~diyiy/docs/naacl16.pdf) +#待补充 +awd_lstm: +lstm_self_attention(BCN?): +awd-sltm: + +# 数据集及复现结果汇总 + +使用fastNLP复现的结果vs论文汇报结果(/前为fastNLP实现,后面为论文报道,-表示论文没有在该数据集上列出结果) + +model name | yelp_p | sst-2|IMDB| +:---: | :---: | :---: | :---: +char_cnn | 93.80/95.12 | - |- | +dpcnn | 95.50/97.36 | - |- | +HAN |- | - |-| +BCN| - |- |-| +awd-lstm| - |- |-| + diff --git a/reproduction/text_classification/data/sstLoader.py b/reproduction/text_classification/data/sstLoader.py index 0d1b647c..d8403b7a 100644 --- a/reproduction/text_classification/data/sstLoader.py +++ b/reproduction/text_classification/data/sstLoader.py @@ -5,7 +5,8 @@ from fastNLP.core.vocabulary import VocabularyOption, Vocabulary from fastNLP import DataSet from fastNLP import Instance from fastNLP.io.embed_loader import EmbeddingOption, EmbedLoader - +import csv +from typing import Union, Dict class SSTLoader(DataSetLoader): URL = 'https://nlp.stanford.edu/sentiment/trainDevTestTrees_PTB.zip'