Browse Source

Merge pull request #4 from lyhuang18/lyhuang-reproduction

update README.md
tags/v0.4.10
SrWYG GitHub 6 years ago
parent
commit
eb00da2299
No known key found for this signature in database GPG Key ID: 4AEE18F83AFDEB23
5 changed files with 35 additions and 128 deletions
  1. +16
    -10
      reproduction/text_classification/README.md
  2. BIN
      reproduction/text_classification/results_LSTM.xlsx
  3. +6
    -39
      reproduction/text_classification/train_awdlstm.py
  4. +7
    -40
      reproduction/text_classification/train_lstm.py
  5. +6
    -39
      reproduction/text_classification/train_lstm_att.py

+ 16
- 10
reproduction/text_classification/README.md View File

@@ -1,22 +1,28 @@
# text_classification任务模型复现
这里使用fastNLP复现以下模型:

char_cnn :论文链接[Character-level Convolutional Networks for Text Classification](https://arxiv.org/pdf/1509.01626v3.pdf)

dpcnn:论文链接[Deep Pyramid Convolutional Neural Networks for TextCategorization](https://ai.tencent.com/ailab/media/publications/ACL3-Brady.pdf)

HAN:论文链接[Hierarchical Attention Networks for Document Classification](https://www.cs.cmu.edu/~diyiy/docs/naacl16.pdf)

LSTM+self_attention:论文链接[A Structured Self-attentive Sentence Embedding](<https://arxiv.org/pdf/1703.03130.pdf>)

AWD-LSTM:论文链接[Regularizing and Optimizing LSTM Language Models](<https://arxiv.org/pdf/1708.02182.pdf>)

#待补充
awd_lstm:
lstm_self_attention(BCN?):
awd-sltm:

# 数据集及复现结果汇总

使用fastNLP复现的结果vs论文汇报结果(/前为fastNLP实现,后面为论文报道,-表示论文没有在该数据集上列出结果)

model name | yelp_p | sst-2|IMDB|
:---: | :---: | :---: | :---:
char_cnn | 93.80/95.12 | - |- |
dpcnn | 95.50/97.36 | - |- |
HAN |- | - |-|
BCN| - |- |-|
awd-lstm| - |- |-|
model name | yelp_p | yelp_f | sst-2|IMDB
:---: | :---: | :---: | :---: |-----
char_cnn | 93.80/95.12 | - | - |-
dpcnn | 95.50/97.36 | - | - |-
HAN |- | - | - |-
LSTM| 95.74/- |- |- |88.52/-
AWD-LSTM| 95.96/- |- |- |88.91/-
LSTM+self_attention| 96.34/- | - | - |89.53/-


BIN
reproduction/text_classification/results_LSTM.xlsx View File


+ 6
- 39
reproduction/text_classification/train_awdlstm.py View File

@@ -8,9 +8,7 @@ os.environ['FASTNLP_CACHE_DIR'] = '/remote-home/hyan01/fastnlp_caches'

import torch.nn as nn

from data.SSTLoader import SSTLoader
from data.IMDBLoader import IMDBLoader
from data.yelpLoader import yelpLoader
from fastNLP.modules.encoder.embedding import StaticEmbedding
from model.awd_lstm import AWDLSTMSentiment

@@ -35,24 +33,15 @@ class Config():

task_name = "IMDB"
datapath={"train":"IMDB_data/train.csv", "test":"IMDB_data/test.csv"}
load_model_path="./result_IMDB/best_BiLSTM_SELF_ATTENTION_acc_2019-07-07-04-16-51"
save_model_path="./result_IMDB_test/"
opt=Config

opt=Config()

# load data
dataloaders = {
"IMDB":IMDBLoader(),
"YELP":yelpLoader(),
"SST-5":SSTLoader(subtree=True,fine_grained=True),
"SST-3":SSTLoader(subtree=True,fine_grained=False)
}

if opt.task_name not in ["IMDB", "YELP", "SST-5", "SST-3"]:
raise ValueError("task name must in ['IMDB', 'YELP, 'SST-5', 'SST-3']")

dataloader = dataloaders[opt.task_name]
# load data
dataloader=IMDBLoader()
datainfo=dataloader.process(opt.datapath)

# print(datainfo.datasets["train"])
# print(datainfo)

@@ -71,32 +60,10 @@ optimizer= Adam([param for param in model.parameters() if param.requires_grad==T

def train(datainfo, model, optimizer, loss, metrics, opt):
trainer = Trainer(datainfo.datasets['train'], model, optimizer=optimizer, loss=loss,
metrics=metrics, dev_data=datainfo.datasets['dev'], device=0, check_code_level=-1,
metrics=metrics, dev_data=datainfo.datasets['test'], device=0, check_code_level=-1,
n_epochs=opt.train_epoch, save_path=opt.save_model_path)
trainer.train()


def test(datainfo, metrics, opt):
# load model
model = ModelLoader.load_pytorch_model(opt.load_model_path)
print("model loaded!")

# Tester
tester = Tester(datainfo.datasets['test'], model, metrics, batch_size=4, device=0)
acc = tester.test()
print("acc=",acc)



parser = argparse.ArgumentParser()
parser.add_argument('--mode', required=True, dest="mode",help='set the model\'s model')


args = parser.parse_args()
if args.mode == 'train':
if __name__ == "__main__":
train(datainfo, model, optimizer, loss, metrics, opt)
elif args.mode == 'test':
test(datainfo, metrics, opt)
else:
print('no mode specified for model!')
parser.print_help()

+ 7
- 40
reproduction/text_classification/train_lstm.py View File

@@ -6,9 +6,7 @@ os.environ['FASTNLP_CACHE_DIR'] = '/remote-home/hyan01/fastnlp_caches'

import torch.nn as nn

from data.SSTLoader import SSTLoader
from data.IMDBLoader import IMDBLoader
from data.yelpLoader import yelpLoader
from fastNLP.modules.encoder.embedding import StaticEmbedding
from model.lstm import BiLSTMSentiment

@@ -32,24 +30,15 @@ class Config():

task_name = "IMDB"
datapath={"train":"IMDB_data/train.csv", "test":"IMDB_data/test.csv"}
load_model_path="./result_IMDB/best_BiLSTM_SELF_ATTENTION_acc_2019-07-07-04-16-51"
save_model_path="./result_IMDB_test/"
opt=Config

opt=Config()

# load data
dataloaders = {
"IMDB":IMDBLoader(),
"YELP":yelpLoader(),
"SST-5":SSTLoader(subtree=True,fine_grained=True),
"SST-3":SSTLoader(subtree=True,fine_grained=False)
}

if opt.task_name not in ["IMDB", "YELP", "SST-5", "SST-3"]:
raise ValueError("task name must in ['IMDB', 'YELP, 'SST-5', 'SST-3']")

dataloader = dataloaders[opt.task_name]
# load data
dataloader=IMDBLoader()
datainfo=dataloader.process(opt.datapath)

# print(datainfo.datasets["train"])
# print(datainfo)

@@ -68,32 +57,10 @@ optimizer= Adam([param for param in model.parameters() if param.requires_grad==T

def train(datainfo, model, optimizer, loss, metrics, opt):
trainer = Trainer(datainfo.datasets['train'], model, optimizer=optimizer, loss=loss,
metrics=metrics, dev_data=datainfo.datasets['dev'], device=0, check_code_level=-1,
metrics=metrics, dev_data=datainfo.datasets['test'], device=0, check_code_level=-1,
n_epochs=opt.train_epoch, save_path=opt.save_model_path)
trainer.train()


def test(datainfo, metrics, opt):
# load model
model = ModelLoader.load_pytorch_model(opt.load_model_path)
print("model loaded!")

# Tester
tester = Tester(datainfo.datasets['test'], model, metrics, batch_size=4, device=0)
acc = tester.test()
print("acc=",acc)



parser = argparse.ArgumentParser()
parser.add_argument('--mode', required=True, dest="mode",help='set the model\'s model')


args = parser.parse_args()
if args.mode == 'train':
train(datainfo, model, optimizer, loss, metrics, opt)
elif args.mode == 'test':
test(datainfo, metrics, opt)
else:
print('no mode specified for model!')
parser.print_help()
if __name__ == "__main__":
train(datainfo, model, optimizer, loss, metrics, opt)

+ 6
- 39
reproduction/text_classification/train_lstm_att.py View File

@@ -6,9 +6,7 @@ os.environ['FASTNLP_CACHE_DIR'] = '/remote-home/hyan01/fastnlp_caches'

import torch.nn as nn

from data.SSTLoader import SSTLoader
from data.IMDBLoader import IMDBLoader
from data.yelpLoader import yelpLoader
from fastNLP.modules.encoder.embedding import StaticEmbedding
from model.lstm_self_attention import BiLSTM_SELF_ATTENTION

@@ -34,24 +32,15 @@ class Config():

task_name = "IMDB"
datapath={"train":"IMDB_data/train.csv", "test":"IMDB_data/test.csv"}
load_model_path="./result_IMDB/best_BiLSTM_SELF_ATTENTION_acc_2019-07-07-04-16-51"
save_model_path="./result_IMDB_test/"
opt=Config

opt=Config()

# load data
dataloaders = {
"IMDB":IMDBLoader(),
"YELP":yelpLoader(),
"SST-5":SSTLoader(subtree=True,fine_grained=True),
"SST-3":SSTLoader(subtree=True,fine_grained=False)
}

if opt.task_name not in ["IMDB", "YELP", "SST-5", "SST-3"]:
raise ValueError("task name must in ['IMDB', 'YELP, 'SST-5', 'SST-3']")

dataloader = dataloaders[opt.task_name]
# load data
dataloader=IMDBLoader()
datainfo=dataloader.process(opt.datapath)

# print(datainfo.datasets["train"])
# print(datainfo)

@@ -70,32 +59,10 @@ optimizer= Adam([param for param in model.parameters() if param.requires_grad==T

def train(datainfo, model, optimizer, loss, metrics, opt):
trainer = Trainer(datainfo.datasets['train'], model, optimizer=optimizer, loss=loss,
metrics=metrics, dev_data=datainfo.datasets['dev'], device=0, check_code_level=-1,
metrics=metrics, dev_data=datainfo.datasets['test'], device=0, check_code_level=-1,
n_epochs=opt.train_epoch, save_path=opt.save_model_path)
trainer.train()


def test(datainfo, metrics, opt):
# load model
model = ModelLoader.load_pytorch_model(opt.load_model_path)
print("model loaded!")

# Tester
tester = Tester(datainfo.datasets['test'], model, metrics, batch_size=4, device=0)
acc = tester.test()
print("acc=",acc)



parser = argparse.ArgumentParser()
parser.add_argument('--mode', required=True, dest="mode",help='set the model\'s model')


args = parser.parse_args()
if args.mode == 'train':
if __name__ == "__main__":
train(datainfo, model, optimizer, loss, metrics, opt)
elif args.mode == 'test':
test(datainfo, metrics, opt)
else:
print('no mode specified for model!')
parser.print_help()

Loading…
Cancel
Save