Browse Source

Merge pull request #5 from fastnlp/dev0.5.0

Dev0.5.0 delete dataset_loader and pull again
tags/v0.4.10
Danqing Wang GitHub 5 years ago
parent
commit
9fe06df4fe
No known key found for this signature in database GPG Key ID: 4AEE18F83AFDEB23
100 changed files with 6271 additions and 3435 deletions
  1. +16
    -14
      README.md
  2. +88
    -0
      fastNLP/core/_parallel_utils.py
  3. +26
    -1
      fastNLP/core/batch.py
  4. +135
    -6
      fastNLP/core/callback.py
  5. +33
    -34
      fastNLP/core/losses.py
  6. +110
    -0
      fastNLP/core/optimizer.py
  7. +12
    -10
      fastNLP/core/predictor.py
  8. +6
    -5
      fastNLP/core/tester.py
  9. +2
    -2
      fastNLP/core/trainer.py
  10. +75
    -23
      fastNLP/core/utils.py
  11. +9
    -4
      fastNLP/core/vocabulary.py
  12. +19
    -5
      fastNLP/io/__init__.py
  13. +3
    -6
      fastNLP/io/base_loader.py
  14. +19
    -0
      fastNLP/io/data_loader/__init__.py
  15. +430
    -0
      fastNLP/io/data_loader/matching.py
  16. +24
    -19
      fastNLP/io/data_loader/sst.py
  17. +1
    -40
      fastNLP/io/dataset_loader.py
  18. +2
    -2
      fastNLP/io/file_reader.py
  19. +69
    -0
      fastNLP/io/utils.py
  20. +33
    -37
      fastNLP/models/bert.py
  21. +3
    -3
      fastNLP/models/star_transformer.py
  22. +4
    -5
      fastNLP/modules/aggregator/attention.py
  23. +2
    -1
      fastNLP/modules/decoder/mlp.py
  24. +188
    -80
      fastNLP/modules/encoder/_bert.py
  25. +288
    -170
      fastNLP/modules/encoder/_elmo.py
  26. +218
    -85
      fastNLP/modules/encoder/embedding.py
  27. +8
    -5
      fastNLP/modules/encoder/star_transformer.py
  28. +0
    -110
      reproduction/CNN-sentence_classification/.gitignore
  29. +0
    -77
      reproduction/CNN-sentence_classification/README.md
  30. +0
    -136
      reproduction/CNN-sentence_classification/dataset.py
  31. +0
    -42
      reproduction/CNN-sentence_classification/model.py
  32. +0
    -92
      reproduction/CNN-sentence_classification/train.py
  33. +0
    -21
      reproduction/Char-aware_NLM/LICENSE
  34. +0
    -40
      reproduction/Char-aware_NLM/README.md
  35. +0
    -9
      reproduction/Char-aware_NLM/main.py
  36. +0
    -145
      reproduction/Char-aware_NLM/model.py
  37. +0
    -117
      reproduction/Char-aware_NLM/test.py
  38. +0
    -320
      reproduction/Char-aware_NLM/test.txt
  39. +0
    -263
      reproduction/Char-aware_NLM/train.py
  40. +0
    -360
      reproduction/Char-aware_NLM/train.txt
  41. +0
    -82
      reproduction/Char-aware_NLM/utilities.py
  42. +0
    -336
      reproduction/Char-aware_NLM/valid.txt
  43. +0
    -36
      reproduction/HAN-document_classification/README.md
  44. +0
    -45
      reproduction/HAN-document_classification/evaluate.py
  45. +0
    -50
      reproduction/HAN-document_classification/preprocess.py
  46. +0
    -171
      reproduction/HAN-document_classification/train.py
  47. +4
    -2
      reproduction/README.md
  48. +1
    -1
      reproduction/Star_transformer/README.md
  49. +10
    -4
      reproduction/Star_transformer/datasets.py
  50. +2
    -2
      reproduction/Star_transformer/run.sh
  51. +38
    -21
      reproduction/Star_transformer/train.py
  52. +129
    -0
      reproduction/Summmarization/BertSum/callback.py
  53. +157
    -0
      reproduction/Summmarization/BertSum/dataloader.py
  54. +178
    -0
      reproduction/Summmarization/BertSum/metrics.py
  55. +51
    -0
      reproduction/Summmarization/BertSum/model.py
  56. +147
    -0
      reproduction/Summmarization/BertSum/train_BertSum.py
  57. +24
    -0
      reproduction/Summmarization/BertSum/utils.py
  58. +0
    -0
      reproduction/coreference_resolution/__init__.py
  59. +0
    -0
      reproduction/coreference_resolution/data_load/__init__.py
  60. +68
    -0
      reproduction/coreference_resolution/data_load/cr_loader.py
  61. +0
    -0
      reproduction/coreference_resolution/model/__init__.py
  62. +54
    -0
      reproduction/coreference_resolution/model/config.py
  63. +163
    -0
      reproduction/coreference_resolution/model/metric.py
  64. +576
    -0
      reproduction/coreference_resolution/model/model_re.py
  65. +225
    -0
      reproduction/coreference_resolution/model/preprocess.py
  66. +32
    -0
      reproduction/coreference_resolution/model/softmax_loss.py
  67. +101
    -0
      reproduction/coreference_resolution/model/util.py
  68. +49
    -0
      reproduction/coreference_resolution/readme.md
  69. +0
    -0
      reproduction/coreference_resolution/test/__init__.py
  70. +14
    -0
      reproduction/coreference_resolution/test/test_dataloader.py
  71. +69
    -0
      reproduction/coreference_resolution/train.py
  72. +24
    -0
      reproduction/coreference_resolution/valid.py
  73. +0
    -0
      reproduction/joint_cws_parse/__init__.py
  74. +0
    -0
      reproduction/joint_cws_parse/data/__init__.py
  75. +284
    -0
      reproduction/joint_cws_parse/data/data_loader.py
  76. +311
    -0
      reproduction/joint_cws_parse/models/CharParser.py
  77. +0
    -0
      reproduction/joint_cws_parse/models/__init__.py
  78. +65
    -0
      reproduction/joint_cws_parse/models/callbacks.py
  79. +184
    -0
      reproduction/joint_cws_parse/models/metrics.py
  80. +16
    -0
      reproduction/joint_cws_parse/readme.md
  81. +124
    -0
      reproduction/joint_cws_parse/train.py
  82. +100
    -0
      reproduction/matching/README.md
  83. +52
    -12
      reproduction/matching/data/MatchingDataLoader.py
  84. +102
    -0
      reproduction/matching/matching_bert.py
  85. +105
    -0
      reproduction/matching/matching_cntn.py
  86. +79
    -23
      reproduction/matching/matching_esim.py
  87. +120
    -0
      reproduction/matching/model/cntn.py
  88. +4
    -2
      reproduction/matching/model/esim.py
  89. +13
    -0
      reproduction/seqence_labelling/ner/README.md
  90. +0
    -93
      reproduction/seqence_labelling/ner/data/Conll2003Loader.py
  91. +0
    -152
      reproduction/seqence_labelling/ner/data/OntoNoteLoader.py
  92. +0
    -49
      reproduction/seqence_labelling/ner/data/utils.py
  93. +142
    -0
      reproduction/seqence_labelling/ner/model/dilated_cnn.py
  94. +99
    -0
      reproduction/seqence_labelling/ner/train_idcnn.py
  95. +26
    -0
      reproduction/text_classification/README.md
  96. +114
    -0
      reproduction/text_classification/data/IMDBLoader.py
  97. +5
    -1
      reproduction/text_classification/data/MTL16Loader.py
  98. +198
    -0
      reproduction/text_classification/data/sstloader.py
  99. +160
    -31
      reproduction/text_classification/data/yelpLoader.py
  100. +29
    -33
      reproduction/text_classification/model/HAN.py

+ 16
- 14
README.md View File

@@ -6,13 +6,14 @@
![Hex.pm](https://img.shields.io/hexpm/l/plug.svg)
[![Documentation Status](https://readthedocs.org/projects/fastnlp/badge/?version=latest)](http://fastnlp.readthedocs.io/?badge=latest)

fastNLP 是一款轻量级的 NLP 处理套件。你既可以使用它快速地完成一个命名实体识别(NER)、中文分词或文本分类任务; 也可以使用他构建许多复杂的网络模型,进行科研。它具有如下的特性:
fastNLP 是一款轻量级的 NLP 处理套件。你既可以使用它快速地完成一个序列标注([NER](reproduction/seqence_labelling/ner/)、POS-Tagging等)、中文分词、文本分类、[Matching](reproduction/matching/)、指代消解、摘要等任务; 也可以使用它构建许多复杂的网络模型,进行科研。它具有如下的特性:

- 统一的Tabular式数据容器,让数据预处理过程简洁明了。内置多种数据集的DataSet Loader,省去预处理代码。
- 各种方便的NLP工具,例如预处理embedding加载; 中间数据cache等;
- 详尽的中文文档以供查阅;
- 统一的Tabular式数据容器,让数据预处理过程简洁明了。内置多种数据集的DataSet Loader,省去预处理代码;
- 多种训练、测试组件,例如训练器Trainer;测试器Tester;以及各种评测metrics等等;
- 各种方便的NLP工具,例如预处理embedding加载(包括EMLo和BERT); 中间数据cache等;
- 详尽的中文[文档](https://fastnlp.readthedocs.io/)、教程以供查阅;
- 提供诸多高级模块,例如Variational LSTM, Transformer, CRF等;
- 封装CNNText,Biaffine等模型可供直接使用;
- 在序列标注、中文分词、文本分类、Matching、指代消解、摘要等任务上封装了各种模型可供直接使用; [详细链接](reproduction/)
- 便捷且具有扩展性的训练器; 提供多种内置callback函数,方便实验记录、异常捕获等。


@@ -20,13 +21,14 @@ fastNLP 是一款轻量级的 NLP 处理套件。你既可以使用它快速地

fastNLP 依赖如下包:

+ numpy
+ torch>=0.4.0
+ tqdm
+ nltk
+ numpy>=1.14.2
+ torch>=1.0.0
+ tqdm>=4.28.1
+ nltk>=3.4.1
+ requests

其中torch的安装可能与操作系统及 CUDA 的版本相关,请参见 PyTorch 官网 。
在依赖包安装完成的情况,您可以在命令行执行如下指令完成安装
其中torch的安装可能与操作系统及 CUDA 的版本相关,请参见 [PyTorch 官网](https://pytorch.org/)
在依赖包安装完成,您可以在命令行执行如下指令完成安装

```shell
pip install fastNLP
@@ -77,8 +79,8 @@ fastNLP 在 modules 模块中内置了三种模块的诸多组件,可以帮助
fastNLP 为不同的 NLP 任务实现了许多完整的模型,它们都经过了训练和测试。

你可以在以下两个地方查看相关信息
- [介绍](reproduction/)
- [源码](fastNLP/models/)
- [模型介绍](reproduction/)
- [模型源码](fastNLP/models/)

## 项目结构

@@ -93,7 +95,7 @@ fastNLP的大致工作流程如上图所示,而项目结构如下:
</tr>
<tr>
<td><b> fastNLP.core </b></td>
<td> 实现了核心功能,包括数据处理组件、训练器、测器等 </td>
<td> 实现了核心功能,包括数据处理组件、训练器、测器等 </td>
</tr>
<tr>
<td><b> fastNLP.models </b></td>


+ 88
- 0
fastNLP/core/_parallel_utils.py View File

@@ -0,0 +1,88 @@

import threading
import torch
from torch.nn.parallel.parallel_apply import get_a_var

from torch.nn.parallel.scatter_gather import scatter_kwargs, gather
from torch.nn.parallel.replicate import replicate


def parallel_apply(modules, func_name, inputs, kwargs_tup=None, devices=None):
r"""Applies each `module` in :attr:`modules` in parallel on arguments
contained in :attr:`inputs` (positional) and :attr:`kwargs_tup` (keyword)
on each of :attr:`devices`.

:attr:`modules`, :attr:`inputs`, :attr:`kwargs_tup` (if given), and
:attr:`devices` (if given) should all have same length. Moreover, each
element of :attr:`inputs` can either be a single object as the only argument
to a module, or a collection of positional arguments.
"""
assert len(modules) == len(inputs)
if kwargs_tup is not None:
assert len(modules) == len(kwargs_tup)
else:
kwargs_tup = ({},) * len(modules)
if devices is not None:
assert len(modules) == len(devices)
else:
devices = [None] * len(modules)

lock = threading.Lock()
results = {}
grad_enabled = torch.is_grad_enabled()

def _worker(i, module, input, kwargs, device=None):
torch.set_grad_enabled(grad_enabled)
if device is None:
device = get_a_var(input).get_device()
try:
with torch.cuda.device(device):
# this also avoids accidental slicing of `input` if it is a Tensor
if not isinstance(input, (list, tuple)):
input = (input,)
output = getattr(module, func_name)(*input, **kwargs)
with lock:
results[i] = output
except Exception as e:
with lock:
results[i] = e

if len(modules) > 1:
threads = [threading.Thread(target=_worker,
args=(i, module, input, kwargs, device))
for i, (module, input, kwargs, device) in
enumerate(zip(modules, inputs, kwargs_tup, devices))]

for thread in threads:
thread.start()
for thread in threads:
thread.join()
else:
_worker(0, modules[0], inputs[0], kwargs_tup[0], devices[0])

outputs = []
for i in range(len(inputs)):
output = results[i]
if isinstance(output, Exception):
raise output
outputs.append(output)
return outputs


def _data_parallel_wrapper(func_name, device_ids, output_device):
"""
这个函数是用于对需要多卡执行的函数的wrapper函数。参考的nn.DataParallel的forward函数

:param str, func_name: 对network中的这个函数进行多卡运行
:param device_ids: nn.DataParallel中的device_ids
:param output_device: nn.DataParallel中的output_device
:return:
"""
def wrapper(network, *inputs, **kwargs):
inputs, kwargs = scatter_kwargs(inputs, kwargs, device_ids, dim=0)
if len(device_ids) == 1:
return getattr(network, func_name)(*inputs[0], **kwargs[0])
replicas = replicate(network, device_ids[:len(inputs)])
outputs = parallel_apply(replicas, func_name, inputs, kwargs, device_ids[:len(replicas)])
return gather(outputs, output_device)
return wrapper

+ 26
- 1
fastNLP/core/batch.py View File

@@ -3,7 +3,6 @@ batch 模块实现了 fastNLP 所需的 Batch 类。

"""
__all__ = [
"BatchIter",
"DataSetIter",
"TorchLoaderIter",
]
@@ -50,6 +49,7 @@ class DataSetGetter:
return len(self.dataset)

def collate_fn(self, batch: list):
# TODO 支持在DataSet中定义collate_fn,因为有时候可能需要不同的field之间融合,比如BERT的场景
batch_x = {n:[] for n in self.inputs.keys()}
batch_y = {n:[] for n in self.targets.keys()}
indices = []
@@ -136,6 +136,31 @@ class BatchIter:


class DataSetIter(BatchIter):
"""
别名::class:`fastNLP.DataSetIter` :class:`fastNLP.core.batch.DataSetIter`

DataSetIter 用于从 `DataSet` 中按一定的顺序, 依次按 ``batch_size`` 的大小将数据取出,
组成 `x` 和 `y`::

batch = DataSetIter(data_set, batch_size=16, sampler=SequentialSampler())
num_batch = len(batch)
for batch_x, batch_y in batch:
# do stuff ...

:param dataset: :class:`~fastNLP.DataSet` 对象, 数据集
:param int batch_size: 取出的batch大小
:param sampler: 规定使用的 :class:`~fastNLP.Sampler` 方式. 若为 ``None`` , 使用 :class:`~fastNLP.SequentialSampler`.

Default: ``None``
:param bool as_numpy: 若为 ``True`` , 输出batch为 numpy.array. 否则为 :class:`torch.Tensor`.

Default: ``False``
:param int num_workers: 使用多少个进程来预处理数据
:param bool pin_memory: 是否将产生的tensor使用pin memory, 可能会加快速度。
:param bool drop_last: 如果最后一个batch没有batch_size这么多sample,就扔掉最后一个
:param timeout:
:param worker_init_fn: 在每个worker启动时调用该函数,会传入一个值,该值是worker的index。
"""
def __init__(self, dataset, batch_size=1, sampler=None, as_numpy=False,
num_workers=0, pin_memory=False, drop_last=False,
timeout=0, worker_init_fn=None):


+ 135
- 6
fastNLP/core/callback.py View File

@@ -66,6 +66,8 @@ import os

import torch
from copy import deepcopy
import sys
from .utils import _save_model

try:
from tensorboardX import SummaryWriter
@@ -113,7 +115,7 @@ class Callback(object):
@property
def n_steps(self):
"""Trainer一共会运行多少步"""
"""Trainer一共会采多少个batch。当Trainer中update_every设置为非1的值时,该值不等于update的次数"""
return self._trainer.n_steps
@property
@@ -181,7 +183,7 @@ class Callback(object):
:param dict batch_x: DataSet中被设置为input的field的batch。
:param dict batch_y: DataSet中被设置为target的field的batch。
:param list(int) indices: 这次采样使用到的indices,可以通过DataSet[indices]获取出这个batch采出的Instance,在一些
情况下可以帮助定位是哪个Sample导致了错误。仅在Trainer的prefetch为False时可用
情况下可以帮助定位是哪个Sample导致了错误。仅当num_workers=0时有效
:return:
"""
pass
@@ -399,10 +401,11 @@ class GradientClipCallback(Callback):
self.clip_value = clip_value
def on_backward_end(self):
if self.parameters is None:
self.clip_fun(self.model.parameters(), self.clip_value)
else:
self.clip_fun(self.parameters, self.clip_value)
if self.step%self.update_every==0:
if self.parameters is None:
self.clip_fun(self.model.parameters(), self.clip_value)
else:
self.clip_fun(self.parameters, self.clip_value)


class EarlyStopCallback(Callback):
@@ -736,6 +739,132 @@ class TensorboardCallback(Callback):
del self._summary_writer


class WarmupCallback(Callback):
"""
按一定的周期调节Learning rate的大小。

:param int,float warmup: 如果warmup为int,则在该step之前,learning rate根据schedule的策略变化; 如果warmup为float,
如0.1, 则前10%的step是按照schedule策略调整learning rate。
:param str schedule: 以哪种方式调整。linear: 前warmup的step上升到指定的learning rate(从Trainer中的optimizer处获取的), 后
warmup的step下降到0; constant前warmup的step上升到指定learning rate,后面的step保持learning rate.
"""
def __init__(self, warmup=0.1, schedule='constant'):
super().__init__()
self.warmup = max(warmup, 0.)

self.initial_lrs = [] # 存放param_group的learning rate
if schedule == 'constant':
self.get_lr = self._get_constant_lr
elif schedule == 'linear':
self.get_lr = self._get_linear_lr
else:
raise RuntimeError("Only support 'linear', 'constant'.")

def _get_constant_lr(self, progress):
if progress<self.warmup:
return progress/self.warmup
return 1

def _get_linear_lr(self, progress):
if progress<self.warmup:
return progress/self.warmup
return max((progress - 1.) / (self.warmup - 1.), 0.)

def on_train_begin(self):
self.t_steps = (len(self.trainer.train_data) // (self.batch_size*self.update_every) +
int(len(self.trainer.train_data) % (self.batch_size*self.update_every)!= 0)) * self.n_epochs
if self.warmup>1:
self.warmup = self.warmup/self.t_steps
self.t_steps = max(2, self.t_steps) # 不能小于2
# 获取param_group的初始learning rate
for group in self.optimizer.param_groups:
self.initial_lrs.append(group['lr'])

def on_backward_end(self):
if self.step%self.update_every==0:
progress = (self.step/self.update_every)/self.t_steps
for lr, group in zip(self.initial_lrs, self.optimizer.param_groups):
group['lr'] = lr * self.get_lr(progress)


class SaveModelCallback(Callback):
"""
由于Trainer在训练过程中只会保存最佳的模型, 该callback可实现多种方式的结果存储。
会根据训练开始的时间戳在save_dir下建立文件夹,再在文件夹下存放多个模型
-save_dir
-2019-07-03-15-06-36
-epoch:0_step:20_{metric_key}:{evaluate_performance}.pt # metric是给定的metric_key, evaluate_performance是性能
-epoch:1_step:40_{metric_key}:{evaluate_performance}.pt
-2019-07-03-15-10-00
-epoch:0_step:20_{metric_key}:{evaluate_performance}.pt # metric是给定的metric_key, evaluate_perfomance是性能
:param str save_dir: 将模型存放在哪个目录下,会在该目录下创建以时间戳命名的目录,并存放模型
:param int top: 保存dev表现top多少模型。-1为保存所有模型。
:param bool only_param: 是否只保存模型d饿权重。
:param save_on_exception: 发生exception时,是否保存一份发生exception的模型。模型名称为epoch:x_step:x_Exception:{exception_name}.
"""
def __init__(self, save_dir, top=3, only_param=False, save_on_exception=False):
super().__init__()

if not os.path.isdir(save_dir):
raise IsADirectoryError("{} is not a directory.".format(save_dir))
self.save_dir = save_dir
if top < 0:
self.top = sys.maxsize
else:
self.top = top
self._ordered_save_models = [] # List[Tuple], Tuple[0]是metric, Tuple[1]是path。metric是依次变好的,所以从头删

self.only_param = only_param
self.save_on_exception = save_on_exception

def on_train_begin(self):
self.save_dir = os.path.join(self.save_dir, self.trainer.start_time)

def on_valid_end(self, eval_result, metric_key, optimizer, is_better_eval):
metric_value = list(eval_result.values())[0][metric_key]
self._save_this_model(metric_value)

def _insert_into_ordered_save_models(self, pair):
# pair:(metric_value, model_name)
# 返回save的模型pair与删除的模型pair. pair中第一个元素是metric的值,第二个元素是模型的名称
index = -1
for _pair in self._ordered_save_models:
if _pair[0]>=pair[0] and self.trainer.increase_better:
break
if not self.trainer.increase_better and _pair[0]<=pair[0]:
break
index += 1
save_pair = None
if len(self._ordered_save_models)<self.top or (len(self._ordered_save_models)>=self.top and index!=-1):
save_pair = pair
self._ordered_save_models.insert(index+1, pair)
delete_pair = None
if len(self._ordered_save_models)>self.top:
delete_pair = self._ordered_save_models.pop(0)
return save_pair, delete_pair

def _save_this_model(self, metric_value):
name = "epoch:{}_step:{}_{}:{:.6f}.pt".format(self.epoch, self.step, self.trainer.metric_key, metric_value)
save_pair, delete_pair = self._insert_into_ordered_save_models((metric_value, name))
if save_pair:
try:
_save_model(self.model, model_name=name, save_dir=self.save_dir, only_param=self.only_param)
except Exception as e:
print(f"The following exception:{e} happens when save model to {self.save_dir}.")
if delete_pair:
try:
delete_model_path = os.path.join(self.save_dir, delete_pair[1])
if os.path.exists(delete_model_path):
os.remove(delete_model_path)
except Exception as e:
print(f"Fail to delete model {name} at {self.save_dir} caused by exception:{e}.")

def on_exception(self, exception):
if self.save_on_exception:
name = "epoch:{}_step:{}_Exception:{}.pt".format(self.epoch, self.step, exception.__class__.__name__)
_save_model(self.model, model_name=name, save_dir=self.save_dir, only_param=self.only_param)


class CallbackException(BaseException):
"""
当需要通过callback跳出训练的时候可以通过抛出CallbackException并在on_exception中捕获这个值。


+ 33
- 34
fastNLP/core/losses.py View File

@@ -20,6 +20,7 @@ from collections import defaultdict
import torch
import torch.nn.functional as F

from ..core.const import Const
from .utils import _CheckError
from .utils import _CheckRes
from .utils import _build_args
@@ -28,6 +29,7 @@ from .utils import _check_function_or_method
from .utils import _get_func_signature
from .utils import seq_len_to_mask


class LossBase(object):
"""
所有loss的基类。如果想了解其中的原理,请查看源码。
@@ -95,22 +97,7 @@ class LossBase(object):
# if func_spect.varargs:
# raise NameError(f"Delete `*{func_spect.varargs}` in {get_func_signature(self.get_loss)}(Do not use "
# f"positional argument.).")
def _fast_param_map(self, pred_dict, target_dict):
"""Only used as inner function. When the pred_dict, target is unequivocal. Don't need users to pass key_map.
such as pred_dict has one element, target_dict has one element

:param pred_dict:
:param target_dict:
:return: dict, if dict is not {}, pass it to self.evaluate. Otherwise do mapping.
"""
fast_param = {}
if len(self._param_map) == 2 and len(pred_dict) == 1 and len(target_dict) == 1:
fast_param['pred'] = list(pred_dict.values())[0]
fast_param['target'] = list(target_dict.values())[0]
return fast_param
return fast_param
def __call__(self, pred_dict, target_dict, check=False):
"""
:param dict pred_dict: 模型的forward函数返回的dict
@@ -118,11 +105,7 @@ class LossBase(object):
:param Boolean check: 每一次执行映射函数的时候是否检查映射表,默认为不检查
:return:
"""
fast_param = self._fast_param_map(pred_dict, target_dict)
if fast_param:
loss = self.get_loss(**fast_param)
return loss

if not self._checked:
# 1. check consistence between signature and _param_map
func_spect = inspect.getfullargspec(self.get_loss)
@@ -212,7 +195,6 @@ class LossFunc(LossBase):
if not isinstance(key_map, dict):
raise RuntimeError(f"Loss error: key_map except a {type({})} but got a {type(key_map)}")
self._init_param_map(key_map, **kwargs)


class CrossEntropyLoss(LossBase):
@@ -226,6 +208,7 @@ class CrossEntropyLoss(LossBase):
:param seq_len: 句子的长度, 长度之外的token不会计算loss。。
:param padding_idx: padding的index,在计算loss时将忽略target中标号为padding_idx的内容, 可以通过该值代替
传入seq_len.
:param str reduction: 支持'mean','sum'和'none'.

Example::

@@ -233,21 +216,25 @@ class CrossEntropyLoss(LossBase):
"""
def __init__(self, pred=None, target=None, seq_len=None, padding_idx=-100):
def __init__(self, pred=None, target=None, seq_len=None, padding_idx=-100, reduction='mean'):
super(CrossEntropyLoss, self).__init__()
self._init_param_map(pred=pred, target=target, seq_len=seq_len)
self.padding_idx = padding_idx
assert reduction in ('mean', 'sum', 'none')
self.reduction = reduction
def get_loss(self, pred, target, seq_len=None):
if pred.dim()>2:
pred = pred.view(-1, pred.size(-1))
target = target.view(-1)
if pred.dim() > 2:
if pred.size(1) != target.size(1):
pred = pred.transpose(1, 2)
pred = pred.reshape(-1, pred.size(-1))
target = target.reshape(-1)
if seq_len is not None:
mask = seq_len_to_mask(seq_len).view(-1).eq(0)
mask = seq_len_to_mask(seq_len).reshape(-1).eq(0)
target = target.masked_fill(mask, self.padding_idx)

return F.cross_entropy(input=pred, target=target,
ignore_index=self.padding_idx)
ignore_index=self.padding_idx, reduction=self.reduction)


class L1Loss(LossBase):
@@ -258,15 +245,18 @@ class L1Loss(LossBase):
:param pred: 参数映射表中 `pred` 的映射关系,None表示映射关系为 `pred` -> `pred`
:param target: 参数映射表中 `target` 的映射关系,None表示映射关系为 `target` >`target`
:param str reduction: 支持'mean','sum'和'none'.
"""
def __init__(self, pred=None, target=None):
def __init__(self, pred=None, target=None, reduction='mean'):
super(L1Loss, self).__init__()
self._init_param_map(pred=pred, target=target)
assert reduction in ('mean', 'sum', 'none')
self.reduction = reduction
def get_loss(self, pred, target):
return F.l1_loss(input=pred, target=target)
return F.l1_loss(input=pred, target=target, reduction=self.reduction)


class BCELoss(LossBase):
@@ -277,14 +267,17 @@ class BCELoss(LossBase):
:param pred: 参数映射表中`pred`的映射关系,None表示映射关系为`pred`->`pred`
:param target: 参数映射表中`target`的映射关系,None表示映射关系为`target`->`target`
:param str reduction: 支持'mean','sum'和'none'.
"""
def __init__(self, pred=None, target=None):
def __init__(self, pred=None, target=None, reduction='mean'):
super(BCELoss, self).__init__()
self._init_param_map(pred=pred, target=target)
assert reduction in ('mean', 'sum', 'none')
self.reduction = reduction
def get_loss(self, pred, target):
return F.binary_cross_entropy(input=pred, target=target)
return F.binary_cross_entropy(input=pred, target=target, reduction=self.reduction)


class NLLLoss(LossBase):
@@ -295,14 +288,20 @@ class NLLLoss(LossBase):
:param pred: 参数映射表中`pred`的映射关系,None表示映射关系为`pred`->`pred`
:param target: 参数映射表中`target`的映射关系,None表示映射关系为`target`->`target`
:param ignore_idx: ignore的index,在计算loss时将忽略target中标号为ignore_idx的内容, 可以通过该值代替
传入seq_len.
:param str reduction: 支持'mean','sum'和'none'.
"""
def __init__(self, pred=None, target=None):
def __init__(self, pred=None, target=None, ignore_idx=-100, reduction='mean'):
super(NLLLoss, self).__init__()
self._init_param_map(pred=pred, target=target)
assert reduction in ('mean', 'sum', 'none')
self.reduction = reduction
self.ignore_idx = ignore_idx
def get_loss(self, pred, target):
return F.nll_loss(input=pred, target=target)
return F.nll_loss(input=pred, target=target, ignore_index=self.ignore_idx, reduction=self.reduction)


class LossInForward(LossBase):
@@ -314,7 +313,7 @@ class LossInForward(LossBase):
:param str loss_key: 在forward函数中loss的键名,默认为loss
"""
def __init__(self, loss_key='loss'):
def __init__(self, loss_key=Const.LOSS):
super().__init__()
if not isinstance(loss_key, str):
raise TypeError(f"Only str allowed for loss_key, got {type(loss_key)}.")


+ 110
- 0
fastNLP/core/optimizer.py View File

@@ -9,6 +9,9 @@ __all__ = [
]

import torch
import math
import torch
from torch.optim.optimizer import Optimizer as TorchOptimizer


class Optimizer(object):
@@ -97,3 +100,110 @@ class Adam(Optimizer):
return torch.optim.Adam(self._get_require_grads_param(model_params), **self.settings)
else:
return torch.optim.Adam(self._get_require_grads_param(self.model_params), **self.settings)


class AdamW(TorchOptimizer):
r"""对AdamW的实现,该实现应该会在pytorch更高版本中出现,https://github.com/pytorch/pytorch/pull/21250。这里提前加入
The original Adam algorithm was proposed in `Adam: A Method for Stochastic Optimization`_.
The AdamW variant was proposed in `Decoupled Weight Decay Regularization`_.
Arguments:
params (iterable): iterable of parameters to optimize or dicts defining
parameter groups
lr (float, optional): learning rate (default: 1e-3)
betas (Tuple[float, float], optional): coefficients used for computing
running averages of gradient and its square (default: (0.9, 0.99))
eps (float, optional): term added to the denominator to improve
numerical stability (default: 1e-8)
weight_decay (float, optional): weight decay coefficient (default: 1e-2)
amsgrad (boolean, optional): whether to use the AMSGrad variant of this
algorithm from the paper `On the Convergence of Adam and Beyond`_
(default: False)
.. _Adam\: A Method for Stochastic Optimization:
https://arxiv.org/abs/1412.6980
.. _Decoupled Weight Decay Regularization:
https://arxiv.org/abs/1711.05101
.. _On the Convergence of Adam and Beyond:
https://openreview.net/forum?id=ryQu7f-RZ
"""

def __init__(self, params, lr=1e-3, betas=(0.9, 0.999), eps=1e-8,
weight_decay=1e-2, amsgrad=False):
if not 0.0 <= lr:
raise ValueError("Invalid learning rate: {}".format(lr))
if not 0.0 <= eps:
raise ValueError("Invalid epsilon value: {}".format(eps))
if not 0.0 <= betas[0] < 1.0:
raise ValueError("Invalid beta parameter at index 0: {}".format(betas[0]))
if not 0.0 <= betas[1] < 1.0:
raise ValueError("Invalid beta parameter at index 1: {}".format(betas[1]))
defaults = dict(lr=lr, betas=betas, eps=eps,
weight_decay=weight_decay, amsgrad=amsgrad)
super(AdamW, self).__init__(params, defaults)

def __setstate__(self, state):
super(AdamW, self).__setstate__(state)
for group in self.param_groups:
group.setdefault('amsgrad', False)

def step(self, closure=None):
"""Performs a single optimization step.
Arguments:
closure (callable, optional): A closure that reevaluates the model
and returns the loss.
"""
loss = None
if closure is not None:
loss = closure()

for group in self.param_groups:
for p in group['params']:
if p.grad is None:
continue

# Perform stepweight decay
p.data.mul_(1 - group['lr'] * group['weight_decay'])

# Perform optimization step
grad = p.grad.data
if grad.is_sparse:
raise RuntimeError('Adam does not support sparse gradients, please consider SparseAdam instead')
amsgrad = group['amsgrad']

state = self.state[p]

# State initialization
if len(state) == 0:
state['step'] = 0
# Exponential moving average of gradient values
state['exp_avg'] = torch.zeros_like(p.data)
# Exponential moving average of squared gradient values
state['exp_avg_sq'] = torch.zeros_like(p.data)
if amsgrad:
# Maintains max of all exp. moving avg. of sq. grad. values
state['max_exp_avg_sq'] = torch.zeros_like(p.data)

exp_avg, exp_avg_sq = state['exp_avg'], state['exp_avg_sq']
if amsgrad:
max_exp_avg_sq = state['max_exp_avg_sq']
beta1, beta2 = group['betas']

state['step'] += 1

# Decay the first and second moment running average coefficient
exp_avg.mul_(beta1).add_(1 - beta1, grad)
exp_avg_sq.mul_(beta2).addcmul_(1 - beta2, grad, grad)
if amsgrad:
# Maintains the maximum of all 2nd moment running avg. till now
torch.max(max_exp_avg_sq, exp_avg_sq, out=max_exp_avg_sq)
# Use the max. for normalizing running avg. of gradient
denom = max_exp_avg_sq.sqrt().add_(group['eps'])
else:
denom = exp_avg_sq.sqrt().add_(group['eps'])

bias_correction1 = 1 - beta1 ** state['step']
bias_correction2 = 1 - beta2 ** state['step']
step_size = group['lr'] * math.sqrt(bias_correction2) / bias_correction1

p.data.addcdiv_(-step_size, exp_avg, denom)

return loss

+ 12
- 10
fastNLP/core/predictor.py View File

@@ -14,12 +14,12 @@ from .utils import _build_args, _move_dict_value_to_device, _get_model_device

class Predictor(object):
"""
An interface for predicting outputs based on trained models.
一个根据训练模型预测输出的预测器(Predictor)

It does not care about evaluations of the model, which is different from Tester.
This is a high-level model wrapper to be called by FastNLP.
This class does not share any operations with Trainer and Tester.
Currently, Predictor does not support GPU.
与测试器(Tester)不同的是,predictor不关心模型性能的评价指标,只做inference。
这是一个fastNLP调用的高级模型包装器。它与Trainer、Tester不共享任何操作。
:param torch.nn.Module network: 用来完成预测任务的模型
"""

def __init__(self, network):
@@ -30,18 +30,19 @@ class Predictor(object):
self.batch_size = 1
self.batch_output = []

def predict(self, data, seq_len_field_name=None):
"""Perform inference using the trained model.
def predict(self, data: DataSet, seq_len_field_name=None):
"""用已经训练好的模型进行inference.

:param data: a DataSet object.
:param str seq_len_field_name: field name indicating sequence lengths
:return: list of batch outputs
:param fastNLP.DataSet data: 待预测的数据集
:param str seq_len_field_name: 表示序列长度信息的field名字
:return: dict dict里面的内容为模型预测的结果
"""
if not isinstance(data, DataSet):
raise ValueError("Only Dataset class is allowed, not {}.".format(type(data)))
if seq_len_field_name is not None and seq_len_field_name not in data.field_arrays:
raise ValueError("Field name {} not found in DataSet {}.".format(seq_len_field_name, data))

prev_training = self.network.training
self.network.eval()
network_device = _get_model_device(self.network)
batch_output = defaultdict(list)
@@ -74,4 +75,5 @@ class Predictor(object):
else:
batch_output[key].append(value)

self.network.train(prev_training)
return batch_output

+ 6
- 5
fastNLP/core/tester.py View File

@@ -32,8 +32,6 @@ Tester在验证进行之前会调用model.eval()提示当前进入了evaluation


"""
import warnings

import torch
import torch.nn as nn

@@ -48,7 +46,8 @@ from .utils import _move_dict_value_to_device
from .utils import _get_func_signature
from .utils import _get_model_device
from .utils import _move_model_to_device
from .utils import _data_parallel_wrapper
from ._parallel_utils import _data_parallel_wrapper
from functools import partial

__all__ = [
"Tester"
@@ -111,8 +110,10 @@ class Tester(object):
(isinstance(self._model, nn.DataParallel) and hasattr(self._model.module, 'predict') and
callable(self._model.module.predict)):
if isinstance(self._model, nn.DataParallel):
self._predict_func_wrapper = _data_parallel_wrapper(self._model.module.predict, self._model.device_ids,
self._model.output_device)
self._predict_func_wrapper = partial(_data_parallel_wrapper('predict',
self._model.device_ids,
self._model.output_device),
network=self._model.module)
self._predict_func = self._model.module.predict
else:
self._predict_func = self._model.predict


+ 2
- 2
fastNLP/core/trainer.py View File

@@ -454,7 +454,7 @@ class Trainer(object):

if check_code_level > -1 and isinstance(self.data_iterator, DataSetIter):
_check_code(dataset=train_data, model=model, losser=losser, metrics=metrics, dev_data=dev_data,
metric_key=metric_key, check_level=check_code_level,
metric_key=self.metric_key, check_level=check_code_level,
batch_size=min(batch_size, DEFAULT_CHECK_BATCH_SIZE))
# _check_code 是 fastNLP 帮助你检查代码是否正确的方法 。如果你在错误栈中看到这行注释,请认真检查你的代码
self.model = _move_model_to_device(model, device=device)
@@ -473,7 +473,7 @@ class Trainer(object):
self.best_dev_step = None
self.best_dev_perf = None
self.n_steps = (len(self.train_data) // self.batch_size + int(
len(self.train_data) % self.batch_size != 0)) * self.n_epochs
len(self.train_data) % self.batch_size != 0)) * int(drop_last==0) * self.n_epochs

if isinstance(optimizer, torch.optim.Optimizer):
self.optimizer = optimizer


+ 75
- 23
fastNLP/core/utils.py View File

@@ -16,9 +16,7 @@ from collections import Counter, namedtuple
import numpy as np
import torch
import torch.nn as nn
from torch.nn.parallel.scatter_gather import scatter_kwargs, gather
from torch.nn.parallel.replicate import replicate
from torch.nn.parallel.parallel_apply import parallel_apply
from typing import List

_CheckRes = namedtuple('_CheckRes', ['missing', 'unused', 'duplicated', 'required', 'all_needed',
'varargs'])
@@ -165,6 +163,30 @@ def cache_results(_cache_fp, _refresh=False, _verbose=1):
return wrapper_

def _save_model(model, model_name, save_dir, only_param=False):
""" 存储不含有显卡信息的state_dict或model
:param model:
:param model_name:
:param save_dir: 保存的directory
:param only_param:
:return:
"""
model_path = os.path.join(save_dir, model_name)
if not os.path.isdir(save_dir):
os.makedirs(save_dir, exist_ok=True)
if isinstance(model, nn.DataParallel):
model = model.module
if only_param:
state_dict = model.state_dict()
for key in state_dict:
state_dict[key] = state_dict[key].cpu()
torch.save(state_dict, model_path)
else:
_model_device = _get_model_device(model)
model.cpu()
torch.save(model, model_path)
model.to(_model_device)


# def save_pickle(obj, pickle_path, file_name):
# """Save an object into a pickle file.
@@ -279,25 +301,6 @@ def _move_model_to_device(model, device):
model = model.to(device)
return model

def _data_parallel_wrapper(func, device_ids, output_device):
"""
这个函数是用于对需要多卡执行的函数的wrapper函数。参考的nn.DataParallel的forward函数

:param func: callable
:param device_ids: nn.DataParallel中的device_ids
:param inputs:
:param kwargs:
:return:
"""
def wrapper(*inputs, **kwargs):
inputs, kwargs = scatter_kwargs(inputs, kwargs, device_ids, dim=0)
if len(device_ids) == 1:
return func(*inputs[0], **kwargs[0])
replicas = replicate(func, device_ids[:len(inputs)])
outputs = parallel_apply(replicas, inputs, kwargs)
return gather(outputs, output_device)
return wrapper


def _get_model_device(model):
"""
@@ -306,7 +309,7 @@ def _get_model_device(model):
:param model: nn.Module
:return: torch.device,None 如果返回值为None,说明这个模型没有任何参数。
"""
# TODO 这个函数存在一定的风险,因为同一个模型可能存在某些parameter不在显卡中,比如BertEmbedding
# TODO 这个函数存在一定的风险,因为同一个模型可能存在某些parameter不在显卡中,比如BertEmbedding. 或者跨显卡
assert isinstance(model, nn.Module)
parameters = list(model.parameters())
@@ -733,3 +736,52 @@ class _pseudo_tqdm:
def __exit__(self, exc_type, exc_val, exc_tb):
del self

def iob2(tags:List[str])->List[str]:
"""
检查数据是否是合法的IOB数据,如果是IOB1会被自动转换为IOB2。两者的差异见
https://datascience.stackexchange.com/questions/37824/difference-between-iob-and-iob2-format

:param tags: 需要转换的tags, 需要为大写的BIO标签。
"""
for i, tag in enumerate(tags):
if tag == "O":
continue
split = tag.split("-")
if len(split) != 2 or split[0] not in ["I", "B"]:
raise TypeError("The encoding schema is not a valid IOB type.")
if split[0] == "B":
continue
elif i == 0 or tags[i - 1] == "O": # conversion IOB1 to IOB2
tags[i] = "B" + tag[1:]
elif tags[i - 1][1:] == tag[1:]:
continue
else: # conversion IOB1 to IOB2
tags[i] = "B" + tag[1:]
return tags

def iob2bioes(tags:List[str])->List[str]:
"""
将iob的tag转换为bioes编码
:param tags: List[str]. 编码需要是大写的。
:return:
"""
new_tags = []
for i, tag in enumerate(tags):
if tag == 'O':
new_tags.append(tag)
else:
split = tag.split('-')[0]
if split == 'B':
if i+1!=len(tags) and tags[i+1].split('-')[0] == 'I':
new_tags.append(tag)
else:
new_tags.append(tag.replace('B-', 'S-'))
elif split == 'I':
if i + 1<len(tags) and tags[i+1].split('-')[0] == 'I':
new_tags.append(tag)
else:
new_tags.append(tag.replace('I-', 'E-'))
else:
raise TypeError("Invalid IOB format.")
return new_tags

+ 9
- 4
fastNLP/core/vocabulary.py View File

@@ -117,6 +117,8 @@ class Vocabulary(object):

:param str word: 新词
"""
if word in self._no_create_word:
self._no_create_word.pop(word)
self.add(word)
@_check_build_status
@@ -126,6 +128,9 @@ class Vocabulary(object):

:param list[str] word_lst: 词的序列
"""
for word in word_lst:
if word in self._no_create_word:
self._no_create_word.pop(word)
self.update(word_lst)
def build_vocab(self):
@@ -136,10 +141,10 @@ class Vocabulary(object):
"""
if self.word2idx is None:
self.word2idx = {}
if self.padding is not None:
self.word2idx[self.padding] = len(self.word2idx)
if self.unknown is not None:
self.word2idx[self.unknown] = len(self.word2idx)
if self.padding is not None:
self.word2idx[self.padding] = len(self.word2idx)
if self.unknown is not None:
self.word2idx[self.unknown] = len(self.word2idx)
max_size = min(self.max_size, len(self.word_count)) if self.max_size else None
words = self.word_count.most_common(max_size)


+ 19
- 5
fastNLP/io/__init__.py View File

@@ -11,21 +11,35 @@
"""
__all__ = [
'EmbedLoader',

'DataInfo',
'DataSetLoader',

'CSVLoader',
'JsonLoader',
'ConllLoader',
'SNLILoader',
'SSTLoader',
'PeopleDailyCorpusLoader',
'Conll2003Loader',
'ModelLoader',
'ModelSaver',

'SSTLoader',

'MatchingLoader',
'SNLILoader',
'MNLILoader',
'QNLILoader',
'QuoraLoader',
'RTELoader',
]

from .embed_loader import EmbedLoader
from .dataset_loader import DataSetLoader, CSVLoader, JsonLoader, ConllLoader, \
SNLILoader, SSTLoader, PeopleDailyCorpusLoader, Conll2003Loader
from .base_loader import DataInfo, DataSetLoader
from .dataset_loader import CSVLoader, JsonLoader, ConllLoader, \
PeopleDailyCorpusLoader, Conll2003Loader
from .model_io import ModelLoader, ModelSaver

from .data_loader.sst import SSTLoader
from .data_loader.matching import MatchingLoader, SNLILoader, \
MNLILoader, QNLILoader, QuoraLoader, RTELoader

+ 3
- 6
fastNLP/io/base_loader.py View File

@@ -10,6 +10,7 @@ from typing import Union, Dict
import os
from ..core.dataset import DataSet


class BaseLoader(object):
"""
各个 Loader 的基类,提供了 API 的参考。
@@ -55,8 +56,6 @@ class BaseLoader(object):
return obj




def _download_from_url(url, path):
try:
from tqdm.auto import tqdm
@@ -115,13 +114,11 @@ class DataInfo:
经过处理的数据信息,包括一系列数据集(比如:分开的训练集、验证集和测试集)及它们所用的词表和词嵌入。

:param vocabs: 从名称(字符串)到 :class:`~fastNLP.Vocabulary` 类型的dict
:param embeddings: 从名称(字符串)到一系列 embedding 的dict,参考 :class:`~fastNLP.io.EmbedLoader`
:param datasets: 从名称(字符串)到 :class:`~fastNLP.DataSet` 类型的dict
"""

def __init__(self, vocabs: dict = None, embeddings: dict = None, datasets: dict = None):
def __init__(self, vocabs: dict = None, datasets: dict = None):
self.vocabs = vocabs or {}
self.embeddings = embeddings or {}
self.datasets = datasets or {}

def __repr__(self):
@@ -133,6 +130,7 @@ class DataInfo:
_str += '\t{} has {} entries.\n'.format(name, len(vocab))
return _str


class DataSetLoader:
"""
别名::class:`fastNLP.io.DataSetLoader` :class:`fastNLP.io.dataset_loader.DataSetLoader`
@@ -213,7 +211,6 @@ class DataSetLoader:
返回的 :class:`DataInfo` 对象有如下属性:

- vocabs: 由从数据集中获取的词表组成的字典,每个词表
- embeddings: (可选) 数据集对应的词嵌入
- datasets: 一个dict,包含一系列 :class:`~fastNLP.DataSet` 类型的对象。其中 field 的命名参考 :mod:`~fastNLP.core.const`

:param paths: 原始数据读取的路径


+ 19
- 0
fastNLP/io/data_loader/__init__.py View File

@@ -0,0 +1,19 @@
"""
用于读数据集的模块, 具体包括:

这些模块的使用方法如下:
"""
__all__ = [
'SSTLoader',

'MatchingLoader',
'SNLILoader',
'MNLILoader',
'QNLILoader',
'QuoraLoader',
'RTELoader',
]

from .sst import SSTLoader
from .matching import MatchingLoader, SNLILoader, \
MNLILoader, QNLILoader, QuoraLoader, RTELoader

+ 430
- 0
fastNLP/io/data_loader/matching.py View File

@@ -0,0 +1,430 @@
import os

from typing import Union, Dict

from ...core.const import Const
from ...core.vocabulary import Vocabulary
from ..base_loader import DataInfo, DataSetLoader
from ..dataset_loader import JsonLoader, CSVLoader
from ..file_utils import _get_base_url, cached_path, PRETRAINED_BERT_MODEL_DIR
from ...modules.encoder._bert import BertTokenizer


class MatchingLoader(DataSetLoader):
"""
别名::class:`fastNLP.io.MatchingLoader` :class:`fastNLP.io.dataset_loader.MatchingLoader`

读取Matching任务的数据集

:param dict paths: key是数据集名称(如train、dev、test),value是对应的文件名
"""

def __init__(self, paths: dict=None):
self.paths = paths

def _load(self, path):
"""
:param str path: 待读取数据集的路径名
:return: fastNLP.DataSet ds: 返回一个DataSet对象,里面必须包含3个field:其中两个分别为两个句子
的原始字符串文本,第三个为标签
"""
raise NotImplementedError

def process(self, paths: Union[str, Dict[str, str]], dataset_name: str=None,
to_lower=False, seq_len_type: str=None, bert_tokenizer: str=None,
cut_text: int = None, get_index=True, auto_pad_length: int=None,
auto_pad_token: str='<pad>', set_input: Union[list, str, bool]=True,
set_target: Union[list, str, bool] = True, concat: Union[str, list, bool]=None, ) -> DataInfo:
"""
:param paths: str或者Dict[str, str]。如果是str,则为数据集所在的文件夹或者是全路径文件名:如果是文件夹,
则会从self.paths里面找对应的数据集名称与文件名。如果是Dict,则为数据集名称(如train、dev、test)和
对应的全路径文件名。
:param str dataset_name: 如果在paths里传入的是一个数据集的全路径文件名,那么可以用dataset_name来定义
这个数据集的名字,如果不定义则默认为train。
:param bool to_lower: 是否将文本自动转为小写。默认值为False。
:param str seq_len_type: 提供的seq_len类型,支持 ``seq_len`` :提供一个数字作为句子长度; ``mask`` :
提供一个0/1的mask矩阵作为句子长度; ``bert`` :提供segment_type_id(第一个句子为0,第二个句子为1)和
attention mask矩阵(0/1的mask矩阵)。默认值为None,即不提供seq_len
:param str bert_tokenizer: bert tokenizer所使用的词表所在的文件夹路径
:param int cut_text: 将长于cut_text的内容截掉。默认为None,即不截。
:param bool get_index: 是否需要根据词表将文本转为index
:param int auto_pad_length: 是否需要将文本自动pad到一定长度(超过这个长度的文本将会被截掉),默认为不会自动pad
:param str auto_pad_token: 自动pad的内容
:param set_input: 如果为True,则会自动将相关的field(名字里含有Const.INPUT的)设置为input,如果为False
则不会将任何field设置为input。如果传入str或者List[str],则会根据传入的内容将相对应的field设置为input,
于此同时其他field不会被设置为input。默认值为True。
:param set_target: set_target将控制哪些field可以被设置为target,用法与set_input一致。默认值为True。
:param concat: 是否需要将两个句子拼接起来。如果为False则不会拼接。如果为True则会在两个句子之间插入一个<sep>。
如果传入一个长度为4的list,则分别表示插在第一句开始前、第一句结束后、第二句开始前、第二句结束后的标识符。如果
传入字符串 ``bert`` ,则会采用bert的拼接方式,等价于['[CLS]', '[SEP]', '', '[SEP]'].
:return:
"""
if isinstance(set_input, str):
set_input = [set_input]
if isinstance(set_target, str):
set_target = [set_target]
if isinstance(set_input, bool):
auto_set_input = set_input
else:
auto_set_input = False
if isinstance(set_target, bool):
auto_set_target = set_target
else:
auto_set_target = False
if isinstance(paths, str):
if os.path.isdir(paths):
path = {n: os.path.join(paths, self.paths[n]) for n in self.paths.keys()}
else:
path = {dataset_name if dataset_name is not None else 'train': paths}
else:
path = paths

data_info = DataInfo()
for data_name in path.keys():
data_info.datasets[data_name] = self._load(path[data_name])

for data_name, data_set in data_info.datasets.items():
if auto_set_input:
data_set.set_input(Const.INPUTS(0), Const.INPUTS(1))
if auto_set_target:
if Const.TARGET in data_set.get_field_names():
data_set.set_target(Const.TARGET)

if to_lower:
for data_name, data_set in data_info.datasets.items():
data_set.apply(lambda x: [w.lower() for w in x[Const.INPUTS(0)]], new_field_name=Const.INPUTS(0),
is_input=auto_set_input)
data_set.apply(lambda x: [w.lower() for w in x[Const.INPUTS(1)]], new_field_name=Const.INPUTS(1),
is_input=auto_set_input)

if bert_tokenizer is not None:
if bert_tokenizer.lower() in PRETRAINED_BERT_MODEL_DIR:
PRETRAIN_URL = _get_base_url('bert')
model_name = PRETRAINED_BERT_MODEL_DIR[bert_tokenizer]
model_url = PRETRAIN_URL + model_name
model_dir = cached_path(model_url)
# 检查是否存在
elif os.path.isdir(bert_tokenizer):
model_dir = bert_tokenizer
else:
raise ValueError(f"Cannot recognize BERT tokenizer from {bert_tokenizer}.")

words_vocab = Vocabulary(padding='[PAD]', unknown='[UNK]')
with open(os.path.join(model_dir, 'vocab.txt'), 'r') as f:
lines = f.readlines()
lines = [line.strip() for line in lines]
words_vocab.add_word_lst(lines)
words_vocab.build_vocab()

tokenizer = BertTokenizer.from_pretrained(model_dir)

for data_name, data_set in data_info.datasets.items():
for fields in data_set.get_field_names():
if Const.INPUT in fields:
data_set.apply(lambda x: tokenizer.tokenize(' '.join(x[fields])), new_field_name=fields,
is_input=auto_set_input)

if isinstance(concat, bool):
concat = 'default' if concat else None
if concat is not None:
if isinstance(concat, str):
CONCAT_MAP = {'bert': ['[CLS]', '[SEP]', '', '[SEP]'],
'default': ['', '<sep>', '', '']}
if concat.lower() in CONCAT_MAP:
concat = CONCAT_MAP[concat]
else:
concat = 4 * [concat]
assert len(concat) == 4, \
f'Please choose a list with 4 symbols which at the beginning of first sentence ' \
f'the end of first sentence, the begin of second sentence, and the end of second' \
f'sentence. Your input is {concat}'

for data_name, data_set in data_info.datasets.items():
data_set.apply(lambda x: [concat[0]] + x[Const.INPUTS(0)] + [concat[1]] + [concat[2]] +
x[Const.INPUTS(1)] + [concat[3]], new_field_name=Const.INPUT)
data_set.apply(lambda x: [w for w in x[Const.INPUT] if len(w) > 0], new_field_name=Const.INPUT,
is_input=auto_set_input)

if seq_len_type is not None:
if seq_len_type == 'seq_len': #
for data_name, data_set in data_info.datasets.items():
for fields in data_set.get_field_names():
if Const.INPUT in fields:
data_set.apply(lambda x: len(x[fields]),
new_field_name=fields.replace(Const.INPUT, Const.INPUT_LEN),
is_input=auto_set_input)
elif seq_len_type == 'mask':
for data_name, data_set in data_info.datasets.items():
for fields in data_set.get_field_names():
if Const.INPUT in fields:
data_set.apply(lambda x: [1] * len(x[fields]),
new_field_name=fields.replace(Const.INPUT, Const.INPUT_LEN),
is_input=auto_set_input)
elif seq_len_type == 'bert':
for data_name, data_set in data_info.datasets.items():
if Const.INPUT not in data_set.get_field_names():
raise KeyError(f'Field ``{Const.INPUT}`` not in {data_name} data set: '
f'got {data_set.get_field_names()}')
data_set.apply(lambda x: [0] * (len(x[Const.INPUTS(0)]) + 2) + [1] * (len(x[Const.INPUTS(1)]) + 1),
new_field_name=Const.INPUT_LENS(0), is_input=auto_set_input)
data_set.apply(lambda x: [1] * len(x[Const.INPUT_LENS(0)]),
new_field_name=Const.INPUT_LENS(1), is_input=auto_set_input)

if auto_pad_length is not None:
cut_text = min(auto_pad_length, cut_text if cut_text is not None else auto_pad_length)

if cut_text is not None:
for data_name, data_set in data_info.datasets.items():
for fields in data_set.get_field_names():
if (Const.INPUT in fields) or ((Const.INPUT_LEN in fields) and (seq_len_type != 'seq_len')):
data_set.apply(lambda x: x[fields][: cut_text], new_field_name=fields,
is_input=auto_set_input)

data_set_list = [d for n, d in data_info.datasets.items()]
assert len(data_set_list) > 0, f'There are NO data sets in data info!'

if bert_tokenizer is None:
words_vocab = Vocabulary(padding=auto_pad_token)
words_vocab = words_vocab.from_dataset(*[d for n, d in data_info.datasets.items() if 'train' in n],
field_name=[n for n in data_set_list[0].get_field_names()
if (Const.INPUT in n)],
no_create_entry_dataset=[d for n, d in data_info.datasets.items()
if 'train' not in n])
target_vocab = Vocabulary(padding=None, unknown=None)
target_vocab = target_vocab.from_dataset(*[d for n, d in data_info.datasets.items() if 'train' in n],
field_name=Const.TARGET)
data_info.vocabs = {Const.INPUT: words_vocab, Const.TARGET: target_vocab}

if get_index:
for data_name, data_set in data_info.datasets.items():
for fields in data_set.get_field_names():
if Const.INPUT in fields:
data_set.apply(lambda x: [words_vocab.to_index(w) for w in x[fields]], new_field_name=fields,
is_input=auto_set_input)

if Const.TARGET in data_set.get_field_names():
data_set.apply(lambda x: target_vocab.to_index(x[Const.TARGET]), new_field_name=Const.TARGET,
is_input=auto_set_input, is_target=auto_set_target)

if auto_pad_length is not None:
if seq_len_type == 'seq_len':
raise RuntimeError(f'the sequence will be padded with the length {auto_pad_length}, '
f'so the seq_len_type cannot be `{seq_len_type}`!')
for data_name, data_set in data_info.datasets.items():
for fields in data_set.get_field_names():
if Const.INPUT in fields:
data_set.apply(lambda x: x[fields] + [words_vocab.to_index(words_vocab.padding)] *
(auto_pad_length - len(x[fields])), new_field_name=fields,
is_input=auto_set_input)
elif (Const.INPUT_LEN in fields) and (seq_len_type != 'seq_len'):
data_set.apply(lambda x: x[fields] + [0] * (auto_pad_length - len(x[fields])),
new_field_name=fields, is_input=auto_set_input)

for data_name, data_set in data_info.datasets.items():
if isinstance(set_input, list):
data_set.set_input(*[inputs for inputs in set_input if inputs in data_set.get_field_names()])
if isinstance(set_target, list):
data_set.set_target(*[target for target in set_target if target in data_set.get_field_names()])

return data_info


class SNLILoader(MatchingLoader, JsonLoader):
"""
别名::class:`fastNLP.io.SNLILoader` :class:`fastNLP.io.dataset_loader.SNLILoader`

读取SNLI数据集,读取的DataSet包含fields::

words1: list(str),第一句文本, premise
words2: list(str), 第二句文本, hypothesis
target: str, 真实标签

数据来源: https://nlp.stanford.edu/projects/snli/snli_1.0.zip
"""

def __init__(self, paths: dict=None):
fields = {
'sentence1_binary_parse': Const.INPUTS(0),
'sentence2_binary_parse': Const.INPUTS(1),
'gold_label': Const.TARGET,
}
paths = paths if paths is not None else {
'train': 'snli_1.0_train.jsonl',
'dev': 'snli_1.0_dev.jsonl',
'test': 'snli_1.0_test.jsonl'}
MatchingLoader.__init__(self, paths=paths)
JsonLoader.__init__(self, fields=fields)

def _load(self, path):
ds = JsonLoader._load(self, path)

parentheses_table = str.maketrans({'(': None, ')': None})

ds.apply(lambda ins: ins[Const.INPUTS(0)].translate(parentheses_table).strip().split(),
new_field_name=Const.INPUTS(0))
ds.apply(lambda ins: ins[Const.INPUTS(1)].translate(parentheses_table).strip().split(),
new_field_name=Const.INPUTS(1))
ds.drop(lambda x: x[Const.TARGET] == '-')
return ds


class RTELoader(MatchingLoader, CSVLoader):
"""
别名::class:`fastNLP.io.RTELoader` :class:`fastNLP.io.dataset_loader.RTELoader`

读取RTE数据集,读取的DataSet包含fields::

words1: list(str),第一句文本, premise
words2: list(str), 第二句文本, hypothesis
target: str, 真实标签

数据来源:
"""

def __init__(self, paths: dict=None):
paths = paths if paths is not None else {
'train': 'train.tsv',
'dev': 'dev.tsv',
'test': 'test.tsv' # test set has not label
}
MatchingLoader.__init__(self, paths=paths)
self.fields = {
'sentence1': Const.INPUTS(0),
'sentence2': Const.INPUTS(1),
'label': Const.TARGET,
}
CSVLoader.__init__(self, sep='\t')

def _load(self, path):
ds = CSVLoader._load(self, path)

for k, v in self.fields.items():
if v in ds.get_field_names():
ds.rename_field(k, v)
for fields in ds.get_all_fields():
if Const.INPUT in fields:
ds.apply(lambda x: x[fields].strip().split(), new_field_name=fields)

return ds


class QNLILoader(MatchingLoader, CSVLoader):
"""
别名::class:`fastNLP.io.QNLILoader` :class:`fastNLP.io.dataset_loader.QNLILoader`

读取QNLI数据集,读取的DataSet包含fields::

words1: list(str),第一句文本, premise
words2: list(str), 第二句文本, hypothesis
target: str, 真实标签

数据来源:
"""

def __init__(self, paths: dict=None):
paths = paths if paths is not None else {
'train': 'train.tsv',
'dev': 'dev.tsv',
'test': 'test.tsv' # test set has not label
}
MatchingLoader.__init__(self, paths=paths)
self.fields = {
'question': Const.INPUTS(0),
'sentence': Const.INPUTS(1),
'label': Const.TARGET,
}
CSVLoader.__init__(self, sep='\t')

def _load(self, path):
ds = CSVLoader._load(self, path)

for k, v in self.fields.items():
if v in ds.get_field_names():
ds.rename_field(k, v)
for fields in ds.get_all_fields():
if Const.INPUT in fields:
ds.apply(lambda x: x[fields].strip().split(), new_field_name=fields)

return ds


class MNLILoader(MatchingLoader, CSVLoader):
"""
别名::class:`fastNLP.io.MNLILoader` :class:`fastNLP.io.dataset_loader.MNLILoader`

读取MNLI数据集,读取的DataSet包含fields::

words1: list(str),第一句文本, premise
words2: list(str), 第二句文本, hypothesis
target: str, 真实标签

数据来源:
"""

def __init__(self, paths: dict=None):
paths = paths if paths is not None else {
'train': 'train.tsv',
'dev_matched': 'dev_matched.tsv',
'dev_mismatched': 'dev_mismatched.tsv',
'test_matched': 'test_matched.tsv',
'test_mismatched': 'test_mismatched.tsv',
# 'test_0.9_matched': 'multinli_0.9_test_matched_unlabeled.txt',
# 'test_0.9_mismatched': 'multinli_0.9_test_mismatched_unlabeled.txt',

# test_0.9_mathed与mismatched是MNLI0.9版本的(数据来源:kaggle)
}
MatchingLoader.__init__(self, paths=paths)
CSVLoader.__init__(self, sep='\t')
self.fields = {
'sentence1_binary_parse': Const.INPUTS(0),
'sentence2_binary_parse': Const.INPUTS(1),
'gold_label': Const.TARGET,
}

def _load(self, path):
ds = CSVLoader._load(self, path)

for k, v in self.fields.items():
if k in ds.get_field_names():
ds.rename_field(k, v)

if Const.TARGET in ds.get_field_names():
if ds[0][Const.TARGET] == 'hidden':
ds.delete_field(Const.TARGET)

parentheses_table = str.maketrans({'(': None, ')': None})

ds.apply(lambda ins: ins[Const.INPUTS(0)].translate(parentheses_table).strip().split(),
new_field_name=Const.INPUTS(0))
ds.apply(lambda ins: ins[Const.INPUTS(1)].translate(parentheses_table).strip().split(),
new_field_name=Const.INPUTS(1))
if Const.TARGET in ds.get_field_names():
ds.drop(lambda x: x[Const.TARGET] == '-')
return ds


class QuoraLoader(MatchingLoader, CSVLoader):
"""
别名::class:`fastNLP.io.QuoraLoader` :class:`fastNLP.io.dataset_loader.QuoraLoader`

读取MNLI数据集,读取的DataSet包含fields::

words1: list(str),第一句文本, premise
words2: list(str), 第二句文本, hypothesis
target: str, 真实标签

数据来源:
"""

def __init__(self, paths: dict=None):
paths = paths if paths is not None else {
'train': 'train.tsv',
'dev': 'dev.tsv',
'test': 'test.tsv',
}
MatchingLoader.__init__(self, paths=paths)
CSVLoader.__init__(self, sep='\t', headers=(Const.TARGET, Const.INPUTS(0), Const.INPUTS(1), 'pairID'))

def _load(self, path):
ds = CSVLoader._load(self, path)
return ds

+ 24
- 19
fastNLP/io/data_loader/sst.py View File

@@ -1,10 +1,11 @@
from typing import Iterable
from nltk import Tree
import spacy
from ..base_loader import DataInfo, DataSetLoader
from ...core.vocabulary import VocabularyOption, Vocabulary
from ...core.dataset import DataSet
from ...core.instance import Instance
from ..embed_loader import EmbeddingOption, EmbedLoader
from ..utils import check_dataloader_paths, get_tokenizer


class SSTLoader(DataSetLoader):
@@ -34,6 +35,7 @@ class SSTLoader(DataSetLoader):
tag_v['0'] = tag_v['1']
tag_v['4'] = tag_v['3']
self.tag_v = tag_v
self.tokenizer = get_tokenizer()

def _load(self, path):
"""
@@ -52,29 +54,37 @@ class SSTLoader(DataSetLoader):
ds.append(Instance(words=words, target=tag))
return ds

@staticmethod
def _get_one(data, subtree):
def _get_one(self, data, subtree):
tree = Tree.fromstring(data)
if subtree:
return [(t.leaves(), t.label()) for t in tree.subtrees()]
return [(tree.leaves(), tree.label())]
return [([x.text for x in self.tokenizer(' '.join(t.leaves()))], t.label()) for t in tree.subtrees() ]
return [([x.text for x in self.tokenizer(' '.join(tree.leaves()))], tree.label())]

def process(self,
paths,
train_ds: Iterable[str] = None,
paths, train_subtree=True,
src_vocab_op: VocabularyOption = None,
tgt_vocab_op: VocabularyOption = None,
src_embed_op: EmbeddingOption = None):
tgt_vocab_op: VocabularyOption = None,):
paths = check_dataloader_paths(paths)
input_name, target_name = 'words', 'target'
src_vocab = Vocabulary() if src_vocab_op is None else Vocabulary(**src_vocab_op)
tgt_vocab = Vocabulary(unknown=None, padding=None) \
if tgt_vocab_op is None else Vocabulary(**tgt_vocab_op)

info = DataInfo(datasets=self.load(paths))
_train_ds = [info.datasets[name]
for name in train_ds] if train_ds else info.datasets.values()
src_vocab.from_dataset(*_train_ds, field_name=input_name)
tgt_vocab.from_dataset(*_train_ds, field_name=target_name)
info = DataInfo()
origin_subtree = self.subtree
self.subtree = train_subtree
info.datasets['train'] = self._load(paths['train'])
self.subtree = origin_subtree
for n, p in paths.items():
if n != 'train':
info.datasets[n] = self._load(p)

src_vocab.from_dataset(
info.datasets['train'],
field_name=input_name,
no_create_entry_dataset=[ds for n, ds in info.datasets.items() if n != 'train'])
tgt_vocab.from_dataset(info.datasets['train'], field_name=target_name)

src_vocab.index_dataset(
*info.datasets.values(),
field_name=input_name, new_field_name=input_name)
@@ -86,10 +96,5 @@ class SSTLoader(DataSetLoader):
target_name: tgt_vocab
}

if src_embed_op is not None:
src_embed_op.vocab = src_vocab
init_emb = EmbedLoader.load_with_vocab(**src_embed_op)
info.embeddings[input_name] = init_emb

return info


+ 1
- 40
fastNLP/io/dataset_loader.py View File

@@ -16,8 +16,6 @@ __all__ = [
'CSVLoader',
'JsonLoader',
'ConllLoader',
'SNLILoader',
'SSTLoader',
'PeopleDailyCorpusLoader',
'Conll2003Loader',
]
@@ -30,7 +28,6 @@ from ..core.dataset import DataSet
from ..core.instance import Instance
from .file_reader import _read_csv, _read_json, _read_conll
from .base_loader import DataSetLoader, DataInfo
from .data_loader.sst import SSTLoader
from ..core.const import Const
from ..modules.encoder._bert import BertTokenizer

@@ -111,7 +108,7 @@ class PeopleDailyCorpusLoader(DataSetLoader):
else:
instance = Instance(words=sent_words)
data_set.append(instance)
data_set.apply(lambda ins: len(ins["words"]), new_field_name="seq_len")
data_set.apply(lambda ins: len(ins[Const.INPUT]), new_field_name=Const.INPUT_LEN)
return data_set


@@ -249,42 +246,6 @@ class JsonLoader(DataSetLoader):
return ds


class SNLILoader(JsonLoader):
"""
别名::class:`fastNLP.io.SNLILoader` :class:`fastNLP.io.dataset_loader.SNLILoader`

读取SNLI数据集,读取的DataSet包含fields::

words1: list(str),第一句文本, premise
words2: list(str), 第二句文本, hypothesis
target: str, 真实标签

数据来源: https://nlp.stanford.edu/projects/snli/snli_1.0.zip
"""

def __init__(self):
fields = {
'sentence1_parse': Const.INPUTS(0),
'sentence2_parse': Const.INPUTS(1),
'gold_label': Const.TARGET,
}
super(SNLILoader, self).__init__(fields=fields)

def _load(self, path):
ds = super(SNLILoader, self)._load(path)

def parse_tree(x):
t = Tree.fromstring(x)
return t.leaves()

ds.apply(lambda ins: parse_tree(
ins[Const.INPUTS(0)]), new_field_name=Const.INPUTS(0))
ds.apply(lambda ins: parse_tree(
ins[Const.INPUTS(1)]), new_field_name=Const.INPUTS(1))
ds.drop(lambda x: x[Const.TARGET] == '-')
return ds


class CSVLoader(DataSetLoader):
"""
别名::class:`fastNLP.io.CSVLoader` :class:`fastNLP.io.dataset_loader.CSVLoader`


+ 2
- 2
fastNLP/io/file_reader.py View File

@@ -104,7 +104,7 @@ def _read_conll(path, encoding='utf-8', indexes=None, dropna=True):
except Exception as e:
if dropna:
continue
raise ValueError('invalid instance at line: {}'.format(line_idx))
raise ValueError('invalid instance ends at line: {}'.format(line_idx))
elif line.startswith('#'):
continue
else:
@@ -117,5 +117,5 @@ def _read_conll(path, encoding='utf-8', indexes=None, dropna=True):
except Exception as e:
if dropna:
return
print('invalid instance at line: {}'.format(line_idx))
print('invalid instance ends at line: {}'.format(line_idx))
raise e

+ 69
- 0
fastNLP/io/utils.py View File

@@ -0,0 +1,69 @@
import os

from typing import Union, Dict


def check_dataloader_paths(paths:Union[str, Dict[str, str]])->Dict[str, str]:
"""
检查传入dataloader的文件的合法性。如果为合法路径,将返回至少包含'train'这个key的dict。类似于下面的结果
{
'train': '/some/path/to/', # 一定包含,建词表应该在这上面建立,剩下的其它文件应该只需要处理并index。
'test': 'xxx' # 可能有,也可能没有
...
}
如果paths为不合法的,将直接进行raise相应的错误

:param paths: 路径. 可以为一个文件路径(则认为该文件就是train的文件); 可以为一个文件目录,将在该目录下寻找train(文件名
中包含train这个字段), test.txt, dev.txt; 可以为一个dict, 则key是用户自定义的某个文件的名称,value是这个文件的路径。
:return:
"""
if isinstance(paths, str):
if os.path.isfile(paths):
return {'train': paths}
elif os.path.isdir(paths):
filenames = os.listdir(paths)
files = {}
for filename in filenames:
path_pair = None
if 'train' in filename:
path_pair = ('train', filename)
if 'dev' in filename:
if path_pair:
raise Exception("File:{} in {} contains bot `{}` and `dev`.".format(filename, paths, path_pair[0]))
path_pair = ('dev', filename)
if 'test' in filename:
if path_pair:
raise Exception("File:{} in {} contains bot `{}` and `test`.".format(filename, paths, path_pair[0]))
path_pair = ('test', filename)
if path_pair:
files[path_pair[0]] = os.path.join(paths, path_pair[1])
return files
else:
raise FileNotFoundError(f"{paths} is not a valid file path.")

elif isinstance(paths, dict):
if paths:
if 'train' not in paths:
raise KeyError("You have to include `train` in your dict.")
for key, value in paths.items():
if isinstance(key, str) and isinstance(value, str):
if not os.path.isfile(value):
raise TypeError(f"{value} is not a valid file.")
else:
raise TypeError("All keys and values in paths should be str.")
return paths
else:
raise ValueError("Empty paths is not allowed.")
else:
raise TypeError(f"paths only supports str and dict. not {type(paths)}.")

def get_tokenizer():
try:
import spacy
spacy.prefer_gpu()
en = spacy.load('en')
print('use spacy tokenizer')
return lambda x: [w.text for w in en.tokenizer(x)]
except Exception as e:
print('use raw tokenizer')
return lambda x: x.split()

+ 33
- 37
fastNLP/models/bert.py View File

@@ -8,35 +8,7 @@ from torch import nn
from .base_model import BaseModel
from ..core.const import Const
from ..modules.encoder import BertModel


class BertConfig:

def __init__(
self,
vocab_size=30522,
hidden_size=768,
num_hidden_layers=12,
num_attention_heads=12,
intermediate_size=3072,
hidden_act="gelu",
hidden_dropout_prob=0.1,
attention_probs_dropout_prob=0.1,
max_position_embeddings=512,
type_vocab_size=2,
initializer_range=0.02
):
self.vocab_size = vocab_size
self.hidden_size = hidden_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.intermediate_size = intermediate_size
self.hidden_act = hidden_act
self.hidden_dropout_prob = hidden_dropout_prob
self.attention_probs_dropout_prob = attention_probs_dropout_prob
self.max_position_embeddings = max_position_embeddings
self.type_vocab_size = type_vocab_size
self.initializer_range = initializer_range
from ..modules.encoder._bert import BertConfig


class BertForSequenceClassification(BaseModel):
@@ -84,11 +56,17 @@ class BertForSequenceClassification(BaseModel):
self.bert = BertModel.from_pretrained(bert_dir)
else:
if config is None:
config = BertConfig()
self.bert = BertModel(**config.__dict__)
config = BertConfig(30522)
self.bert = BertModel(config)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
self.classifier = nn.Linear(config.hidden_size, num_labels)

@classmethod
def from_pretrained(cls, num_labels, pretrained_model_dir):
config = BertConfig(pretrained_model_dir)
model = cls(num_labels=num_labels, config=config, bert_dir=pretrained_model_dir)
return model

def forward(self, input_ids, token_type_ids=None, attention_mask=None, labels=None):
_, pooled_output = self.bert(input_ids, token_type_ids, attention_mask, output_all_encoded_layers=False)
pooled_output = self.dropout(pooled_output)
@@ -151,11 +129,17 @@ class BertForMultipleChoice(BaseModel):
self.bert = BertModel.from_pretrained(bert_dir)
else:
if config is None:
config = BertConfig()
self.bert = BertModel(**config.__dict__)
config = BertConfig(30522)
self.bert = BertModel(config)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
self.classifier = nn.Linear(config.hidden_size, 1)

@classmethod
def from_pretrained(cls, num_choices, pretrained_model_dir):
config = BertConfig(pretrained_model_dir)
model = cls(num_choices=num_choices, config=config, bert_dir=pretrained_model_dir)
return model

def forward(self, input_ids, token_type_ids=None, attention_mask=None, labels=None):
flat_input_ids = input_ids.view(-1, input_ids.size(-1))
flat_token_type_ids = token_type_ids.view(-1, token_type_ids.size(-1))
@@ -224,11 +208,17 @@ class BertForTokenClassification(BaseModel):
self.bert = BertModel.from_pretrained(bert_dir)
else:
if config is None:
config = BertConfig()
self.bert = BertModel(**config.__dict__)
config = BertConfig(30522)
self.bert = BertModel(config)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
self.classifier = nn.Linear(config.hidden_size, num_labels)

@classmethod
def from_pretrained(cls, num_labels, pretrained_model_dir):
config = BertConfig(pretrained_model_dir)
model = cls(num_labels=num_labels, config=config, bert_dir=pretrained_model_dir)
return model

def forward(self, input_ids, token_type_ids=None, attention_mask=None, labels=None):
sequence_output, _ = self.bert(input_ids, token_type_ids, attention_mask, output_all_encoded_layers=False)
sequence_output = self.dropout(sequence_output)
@@ -302,12 +292,18 @@ class BertForQuestionAnswering(BaseModel):
self.bert = BertModel.from_pretrained(bert_dir)
else:
if config is None:
config = BertConfig()
self.bert = BertModel(**config.__dict__)
config = BertConfig(30522)
self.bert = BertModel(config)
# TODO check with Google if it's normal there is no dropout on the token classifier of SQuAD in the TF version
# self.dropout = nn.Dropout(config.hidden_dropout_prob)
self.qa_outputs = nn.Linear(config.hidden_size, 2)

@classmethod
def from_pretrained(cls, pretrained_model_dir):
config = BertConfig(pretrained_model_dir)
model = cls(config=config, bert_dir=pretrained_model_dir)
return model

def forward(self, input_ids, token_type_ids=None, attention_mask=None, start_positions=None, end_positions=None):
sequence_output, _ = self.bert(input_ids, token_type_ids, attention_mask, output_all_encoded_layers=False)
logits = self.qa_outputs(sequence_output)


+ 3
- 3
fastNLP/models/star_transformer.py View File

@@ -46,7 +46,7 @@ class StarTransEnc(nn.Module):
super(StarTransEnc, self).__init__()
self.embedding = get_embeddings(init_embed)
emb_dim = self.embedding.embedding_dim
self.emb_fc = nn.Linear(emb_dim, hidden_size)
#self.emb_fc = nn.Linear(emb_dim, hidden_size)
self.emb_drop = nn.Dropout(emb_dropout)
self.encoder = StarTransformer(hidden_size=hidden_size,
num_layers=num_layers,
@@ -65,7 +65,7 @@ class StarTransEnc(nn.Module):
[batch, hidden] 全局 relay 节点, 详见论文
"""
x = self.embedding(x)
x = self.emb_fc(self.emb_drop(x))
#x = self.emb_fc(self.emb_drop(x))
nodes, relay = self.encoder(x, mask)
return nodes, relay

@@ -205,7 +205,7 @@ class STSeqCls(nn.Module):
max_len=max_len,
emb_dropout=emb_dropout,
dropout=dropout)
self.cls = _Cls(hidden_size, num_cls, cls_hidden_size)
self.cls = _Cls(hidden_size, num_cls, cls_hidden_size, dropout=dropout)
def forward(self, words, seq_len):
"""


+ 4
- 5
fastNLP/modules/aggregator/attention.py View File

@@ -19,7 +19,7 @@ class DotAttention(nn.Module):
补上文档
"""
def __init__(self, key_size, value_size, dropout=0):
def __init__(self, key_size, value_size, dropout=0.0):
super(DotAttention, self).__init__()
self.key_size = key_size
self.value_size = value_size
@@ -37,7 +37,7 @@ class DotAttention(nn.Module):
"""
output = torch.matmul(Q, K.transpose(1, 2)) / self.scale
if mask_out is not None:
output.masked_fill_(mask_out, -1e8)
output.masked_fill_(mask_out, -1e18)
output = self.softmax(output)
output = self.drop(output)
return torch.matmul(output, V)
@@ -67,9 +67,8 @@ class MultiHeadAttention(nn.Module):
self.k_in = nn.Linear(input_size, in_size)
self.v_in = nn.Linear(input_size, in_size)
# follow the paper, do not apply dropout within dot-product
self.attention = DotAttention(key_size=key_size, value_size=value_size, dropout=0)
self.attention = DotAttention(key_size=key_size, value_size=value_size, dropout=dropout)
self.out = nn.Linear(value_size * num_head, input_size)
self.drop = TimestepDropout(dropout)
self.reset_parameters()
def reset_parameters(self):
@@ -105,7 +104,7 @@ class MultiHeadAttention(nn.Module):
# concat all heads, do output linear
atte = atte.permute(1, 2, 0, 3).contiguous().view(batch, sq, -1)
output = self.drop(self.out(atte))
output = self.out(atte)
return output




+ 2
- 1
fastNLP/modules/decoder/mlp.py View File

@@ -15,7 +15,8 @@ class MLP(nn.Module):
多层感知器

:param List[int] size_layer: 一个int的列表,用来定义MLP的层数,列表中的数字为每一层是hidden数目。MLP的层数为 len(size_layer) - 1
:param Union[str,func,List[str]] activation: 一个字符串或者函数的列表,用来定义每一个隐层的激活函数,字符串包括relu,tanh和sigmoid,默认值为relu
:param Union[str,func,List[str]] activation: 一个字符串或者函数的列表,用来定义每一个隐层的激活函数,字符串包括relu,tanh和
sigmoid,默认值为relu
:param Union[str,func] output_activation: 字符串或者函数,用来定义输出层的激活函数,默认值为None,表示输出层没有激活函数
:param str initial_method: 参数初始化方式
:param float dropout: dropout概率,默认值为0


+ 188
- 80
fastNLP/modules/encoder/_bert.py View File

@@ -2,7 +2,8 @@


"""
这个页面的代码很大程度上参考了https://github.com/huggingface/pytorch-pretrained-BERT的代码
这个页面的代码很大程度上参考(复制粘贴)了https://github.com/huggingface/pytorch-pretrained-BERT的代码, 如果你发现该代码对你
有用,也请引用一下他们。
"""


@@ -11,7 +12,6 @@ from ...core.vocabulary import Vocabulary
import collections

import unicodedata
from ...io.file_utils import _get_base_url, cached_path
import numpy as np
from itertools import chain
import copy
@@ -22,9 +22,106 @@ import os
import torch
from torch import nn
import glob
import sys

CONFIG_FILE = 'bert_config.json'
MODEL_WEIGHTS = 'pytorch_model.bin'


class BertConfig(object):
"""Configuration class to store the configuration of a `BertModel`.
"""
def __init__(self,
vocab_size_or_config_json_file,
hidden_size=768,
num_hidden_layers=12,
num_attention_heads=12,
intermediate_size=3072,
hidden_act="gelu",
hidden_dropout_prob=0.1,
attention_probs_dropout_prob=0.1,
max_position_embeddings=512,
type_vocab_size=2,
initializer_range=0.02,
layer_norm_eps=1e-12):
"""Constructs BertConfig.

Args:
vocab_size_or_config_json_file: Vocabulary size of `inputs_ids` in `BertModel`.
hidden_size: Size of the encoder layers and the pooler layer.
num_hidden_layers: Number of hidden layers in the Transformer encoder.
num_attention_heads: Number of attention heads for each attention layer in
the Transformer encoder.
intermediate_size: The size of the "intermediate" (i.e., feed-forward)
layer in the Transformer encoder.
hidden_act: The non-linear activation function (function or string) in the
encoder and pooler. If string, "gelu", "relu" and "swish" are supported.
hidden_dropout_prob: The dropout probabilitiy for all fully connected
layers in the embeddings, encoder, and pooler.
attention_probs_dropout_prob: The dropout ratio for the attention
probabilities.
max_position_embeddings: The maximum sequence length that this model might
ever be used with. Typically set this to something large just in case
(e.g., 512 or 1024 or 2048).
type_vocab_size: The vocabulary size of the `token_type_ids` passed into
`BertModel`.
initializer_range: The sttdev of the truncated_normal_initializer for
initializing all weight matrices.
layer_norm_eps: The epsilon used by LayerNorm.
"""
if isinstance(vocab_size_or_config_json_file, str) or (sys.version_info[0] == 2
and isinstance(vocab_size_or_config_json_file, unicode)):
with open(vocab_size_or_config_json_file, "r", encoding='utf-8') as reader:
json_config = json.loads(reader.read())
for key, value in json_config.items():
self.__dict__[key] = value
elif isinstance(vocab_size_or_config_json_file, int):
self.vocab_size = vocab_size_or_config_json_file
self.hidden_size = hidden_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.hidden_act = hidden_act
self.intermediate_size = intermediate_size
self.hidden_dropout_prob = hidden_dropout_prob
self.attention_probs_dropout_prob = attention_probs_dropout_prob
self.max_position_embeddings = max_position_embeddings
self.type_vocab_size = type_vocab_size
self.initializer_range = initializer_range
self.layer_norm_eps = layer_norm_eps
else:
raise ValueError("First argument must be either a vocabulary size (int)"
"or the path to a pretrained model config file (str)")

@classmethod
def from_dict(cls, json_object):
"""Constructs a `BertConfig` from a Python dictionary of parameters."""
config = BertConfig(vocab_size_or_config_json_file=-1)
for key, value in json_object.items():
config.__dict__[key] = value
return config

@classmethod
def from_json_file(cls, json_file):
"""Constructs a `BertConfig` from a json file of parameters."""
with open(json_file, "r", encoding='utf-8') as reader:
text = reader.read()
return cls.from_dict(json.loads(text))

def __repr__(self):
return str(self.to_json_string())

def to_dict(self):
"""Serializes this instance to a Python dictionary."""
output = copy.deepcopy(self.__dict__)
return output

def to_json_string(self):
"""Serializes this instance to a JSON string."""
return json.dumps(self.to_dict(), indent=2, sort_keys=True) + "\n"

def to_json_file(self, json_file_path):
""" Save this instance to a json file."""
with open(json_file_path, "w", encoding='utf-8') as writer:
writer.write(self.to_json_string())


def gelu(x):
@@ -40,6 +137,8 @@ ACT2FN = {"gelu": gelu, "relu": torch.nn.functional.relu, "swish": swish}

class BertLayerNorm(nn.Module):
def __init__(self, hidden_size, eps=1e-12):
"""Construct a layernorm module in the TF style (epsilon inside the square root).
"""
super(BertLayerNorm, self).__init__()
self.weight = nn.Parameter(torch.ones(hidden_size))
self.bias = nn.Parameter(torch.zeros(hidden_size))
@@ -53,16 +152,18 @@ class BertLayerNorm(nn.Module):


class BertEmbeddings(nn.Module):
def __init__(self, vocab_size, hidden_size, max_position_embeddings, type_vocab_size, hidden_dropout_prob):
"""Construct the embeddings from word, position and token_type embeddings.
"""
def __init__(self, config):
super(BertEmbeddings, self).__init__()
self.word_embeddings = nn.Embedding(vocab_size, hidden_size)
self.position_embeddings = nn.Embedding(max_position_embeddings, hidden_size)
self.token_type_embeddings = nn.Embedding(type_vocab_size, hidden_size)
self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=0)
self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.hidden_size)
self.token_type_embeddings = nn.Embedding(config.type_vocab_size, config.hidden_size)

# self.LayerNorm is not snake-cased to stick with TensorFlow model variable name and be able to load
# any TensorFlow checkpoint file
self.LayerNorm = BertLayerNorm(hidden_size, eps=1e-12)
self.dropout = nn.Dropout(hidden_dropout_prob)
self.LayerNorm = BertLayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.dropout = nn.Dropout(config.hidden_dropout_prob)

def forward(self, input_ids, token_type_ids=None):
seq_length = input_ids.size(1)
@@ -82,21 +183,21 @@ class BertEmbeddings(nn.Module):


class BertSelfAttention(nn.Module):
def __init__(self, hidden_size, num_attention_heads, attention_probs_dropout_prob):
def __init__(self, config):
super(BertSelfAttention, self).__init__()
if hidden_size % num_attention_heads != 0:
if config.hidden_size % config.num_attention_heads != 0:
raise ValueError(
"The hidden size (%d) is not a multiple of the number of attention "
"heads (%d)" % (hidden_size, num_attention_heads))
self.num_attention_heads = num_attention_heads
self.attention_head_size = int(hidden_size / num_attention_heads)
"heads (%d)" % (config.hidden_size, config.num_attention_heads))
self.num_attention_heads = config.num_attention_heads
self.attention_head_size = int(config.hidden_size / config.num_attention_heads)
self.all_head_size = self.num_attention_heads * self.attention_head_size

self.query = nn.Linear(hidden_size, self.all_head_size)
self.key = nn.Linear(hidden_size, self.all_head_size)
self.value = nn.Linear(hidden_size, self.all_head_size)
self.query = nn.Linear(config.hidden_size, self.all_head_size)
self.key = nn.Linear(config.hidden_size, self.all_head_size)
self.value = nn.Linear(config.hidden_size, self.all_head_size)

self.dropout = nn.Dropout(attention_probs_dropout_prob)
self.dropout = nn.Dropout(config.attention_probs_dropout_prob)

def transpose_for_scores(self, x):
new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size)
@@ -133,11 +234,11 @@ class BertSelfAttention(nn.Module):


class BertSelfOutput(nn.Module):
def __init__(self, hidden_size, hidden_dropout_prob):
def __init__(self, config):
super(BertSelfOutput, self).__init__()
self.dense = nn.Linear(hidden_size, hidden_size)
self.LayerNorm = BertLayerNorm(hidden_size, eps=1e-12)
self.dropout = nn.Dropout(hidden_dropout_prob)
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
self.LayerNorm = BertLayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.dropout = nn.Dropout(config.hidden_dropout_prob)

def forward(self, hidden_states, input_tensor):
hidden_states = self.dense(hidden_states)
@@ -147,10 +248,10 @@ class BertSelfOutput(nn.Module):


class BertAttention(nn.Module):
def __init__(self, hidden_size, num_attention_heads, attention_probs_dropout_prob, hidden_dropout_prob):
def __init__(self, config):
super(BertAttention, self).__init__()
self.self = BertSelfAttention(hidden_size, num_attention_heads, attention_probs_dropout_prob)
self.output = BertSelfOutput(hidden_size, hidden_dropout_prob)
self.self = BertSelfAttention(config)
self.output = BertSelfOutput(config)

def forward(self, input_tensor, attention_mask):
self_output = self.self(input_tensor, attention_mask)
@@ -159,11 +260,13 @@ class BertAttention(nn.Module):


class BertIntermediate(nn.Module):
def __init__(self, hidden_size, intermediate_size, hidden_act):
def __init__(self, config):
super(BertIntermediate, self).__init__()
self.dense = nn.Linear(hidden_size, intermediate_size)
self.intermediate_act_fn = ACT2FN[hidden_act] \
if isinstance(hidden_act, str) else hidden_act
self.dense = nn.Linear(config.hidden_size, config.intermediate_size)
if isinstance(config.hidden_act, str) or (sys.version_info[0] == 2 and isinstance(config.hidden_act, unicode)):
self.intermediate_act_fn = ACT2FN[config.hidden_act]
else:
self.intermediate_act_fn = config.hidden_act

def forward(self, hidden_states):
hidden_states = self.dense(hidden_states)
@@ -172,11 +275,11 @@ class BertIntermediate(nn.Module):


class BertOutput(nn.Module):
def __init__(self, hidden_size, intermediate_size, hidden_dropout_prob):
def __init__(self, config):
super(BertOutput, self).__init__()
self.dense = nn.Linear(intermediate_size, hidden_size)
self.LayerNorm = BertLayerNorm(hidden_size, eps=1e-12)
self.dropout = nn.Dropout(hidden_dropout_prob)
self.dense = nn.Linear(config.intermediate_size, config.hidden_size)
self.LayerNorm = BertLayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.dropout = nn.Dropout(config.hidden_dropout_prob)

def forward(self, hidden_states, input_tensor):
hidden_states = self.dense(hidden_states)
@@ -186,13 +289,11 @@ class BertOutput(nn.Module):


class BertLayer(nn.Module):
def __init__(self, hidden_size, num_attention_heads, attention_probs_dropout_prob, hidden_dropout_prob,
intermediate_size, hidden_act):
def __init__(self, config):
super(BertLayer, self).__init__()
self.attention = BertAttention(hidden_size, num_attention_heads, attention_probs_dropout_prob,
hidden_dropout_prob)
self.intermediate = BertIntermediate(hidden_size, intermediate_size, hidden_act)
self.output = BertOutput(hidden_size, intermediate_size, hidden_dropout_prob)
self.attention = BertAttention(config)
self.intermediate = BertIntermediate(config)
self.output = BertOutput(config)

def forward(self, hidden_states, attention_mask):
attention_output = self.attention(hidden_states, attention_mask)
@@ -202,13 +303,10 @@ class BertLayer(nn.Module):


class BertEncoder(nn.Module):
def __init__(self, num_hidden_layers, hidden_size, num_attention_heads, attention_probs_dropout_prob,
hidden_dropout_prob,
intermediate_size, hidden_act):
def __init__(self, config):
super(BertEncoder, self).__init__()
layer = BertLayer(hidden_size, num_attention_heads, attention_probs_dropout_prob, hidden_dropout_prob,
intermediate_size, hidden_act)
self.layer = nn.ModuleList([copy.deepcopy(layer) for _ in range(num_hidden_layers)])
layer = BertLayer(config)
self.layer = nn.ModuleList([copy.deepcopy(layer) for _ in range(config.num_hidden_layers)])

def forward(self, hidden_states, attention_mask, output_all_encoded_layers=True):
all_encoder_layers = []
@@ -222,9 +320,9 @@ class BertEncoder(nn.Module):


class BertPooler(nn.Module):
def __init__(self, hidden_size):
def __init__(self, config):
super(BertPooler, self).__init__()
self.dense = nn.Linear(hidden_size, hidden_size)
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
self.activation = nn.Tanh()

def forward(self, hidden_states):
@@ -242,13 +340,19 @@ class BertModel(nn.Module):
如果你想使用预训练好的权重矩阵,请在以下网址下载.
sources::

'bert-base-uncased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-uncased.tar.gz",
'bert-large-uncased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-uncased.tar.gz",
'bert-base-cased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-cased.tar.gz",
'bert-large-cased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-cased.tar.gz",
'bert-base-multilingual-uncased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-multilingual-uncased.tar.gz",
'bert-base-multilingual-cased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-multilingual-cased.tar.gz",
'bert-base-chinese': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-chinese.tar.gz",
'bert-base-uncased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-uncased-pytorch_model.bin",
'bert-large-uncased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-uncased-pytorch_model.bin",
'bert-base-cased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-cased-pytorch_model.bin",
'bert-large-cased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-cased-pytorch_model.bin",
'bert-base-multilingual-uncased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-multilingual-uncased-pytorch_model.bin",
'bert-base-multilingual-cased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-multilingual-cased-pytorch_model.bin",
'bert-base-chinese': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-chinese-pytorch_model.bin",
'bert-base-german-cased': "https://int-deepset-models-bert.s3.eu-central-1.amazonaws.com/pytorch/bert-base-german-cased-pytorch_model.bin",
'bert-large-uncased-whole-word-masking': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-uncased-whole-word-masking-pytorch_model.bin",
'bert-large-cased-whole-word-masking': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-cased-whole-word-masking-pytorch_model.bin",
'bert-large-uncased-whole-word-masking-finetuned-squad': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-uncased-whole-word-masking-finetuned-squad-pytorch_model.bin",
'bert-large-cased-whole-word-masking-finetuned-squad': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-cased-whole-word-masking-finetuned-squad-pytorch_model.bin",
'bert-base-cased-finetuned-mrpc': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-cased-finetuned-mrpc-pytorch_model.bin"


用预训练权重矩阵来建立BERT模型::
@@ -272,34 +376,30 @@ class BertModel(nn.Module):
:param int initializer_range: 初始化权重范围,默认值为0.02
"""

def __init__(self, vocab_size=30522,
hidden_size=768,
num_hidden_layers=12,
num_attention_heads=12,
intermediate_size=3072,
hidden_act="gelu",
hidden_dropout_prob=0.1,
attention_probs_dropout_prob=0.1,
max_position_embeddings=512,
type_vocab_size=2,
initializer_range=0.02):
def __init__(self, config, *inputs, **kwargs):
super(BertModel, self).__init__()
self.hidden_size = hidden_size
self.embeddings = BertEmbeddings(vocab_size, hidden_size, max_position_embeddings,
type_vocab_size, hidden_dropout_prob)
self.encoder = BertEncoder(num_hidden_layers, hidden_size, num_attention_heads,
attention_probs_dropout_prob, hidden_dropout_prob, intermediate_size,
hidden_act)
self.pooler = BertPooler(hidden_size)
self.initializer_range = initializer_range

if not isinstance(config, BertConfig):
raise ValueError(
"Parameter config in `{}(config)` should be an instance of class `BertConfig`. "
"To create a model from a Google pretrained model use "
"`model = {}.from_pretrained(PRETRAINED_MODEL_NAME)`".format(
self.__class__.__name__, self.__class__.__name__
))
super(BertModel, self).__init__()
self.config = config
self.hidden_size = self.config.hidden_size
self.embeddings = BertEmbeddings(config)
self.encoder = BertEncoder(config)
self.pooler = BertPooler(config)
self.apply(self.init_bert_weights)

def init_bert_weights(self, module):
""" Initialize the weights.
"""
if isinstance(module, (nn.Linear, nn.Embedding)):
# Slightly different from the TF version which uses truncated_normal for initialization
# cf https://github.com/pytorch/pytorch/pull/5617
module.weight.data.normal_(mean=0.0, std=self.initializer_range)
module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
elif isinstance(module, BertLayerNorm):
module.bias.data.zero_()
module.weight.data.fill_(1.0)
@@ -338,14 +438,19 @@ class BertModel(nn.Module):
return encoded_layers, pooled_output

@classmethod
def from_pretrained(cls, pretrained_model_dir, state_dict=None, *inputs, **kwargs):
def from_pretrained(cls, pretrained_model_dir, *inputs, **kwargs):
state_dict = kwargs.get('state_dict', None)
kwargs.pop('state_dict', None)
cache_dir = kwargs.get('cache_dir', None)
kwargs.pop('cache_dir', None)
from_tf = kwargs.get('from_tf', False)
kwargs.pop('from_tf', None)
# Load config
config_file = os.path.join(pretrained_model_dir, CONFIG_FILE)
config = json.load(open(config_file, "r"))
# config = BertConfig.from_json_file(config_file)
config = BertConfig.from_json_file(config_file)
# logger.info("Model config {}".format(config))
# Instantiate model.
model = cls(*inputs, **config, **kwargs)
model = cls(config, *inputs, **kwargs)
if state_dict is None:
files = glob.glob(os.path.join(pretrained_model_dir, '*.bin'))
if len(files)==0:
@@ -353,7 +458,7 @@ class BertModel(nn.Module):
elif len(files)>1:
raise FileExistsError(f"There are multiple *.bin files in {pretrained_model_dir}")
weights_path = files[0]
state_dict = torch.load(weights_path)
state_dict = torch.load(weights_path, map_location='cpu')

old_keys = []
new_keys = []
@@ -464,6 +569,7 @@ class WordpieceTokenizer(object):
output_tokens.extend(sub_tokens)
return output_tokens


def load_vocab(vocab_file):
"""Loads a vocabulary file into a dictionary."""
vocab = collections.OrderedDict()
@@ -594,6 +700,7 @@ class BasicTokenizer(object):
output.append(char)
return "".join(output)


def _is_whitespace(char):
"""Checks whether `chars` is a whitespace character."""
# \t, \n, and \r are technically contorl characters but we treat them
@@ -840,6 +947,7 @@ class _WordBertModel(nn.Module):
word_pieces_i = list(chain(*self.word_to_wordpieces[word_indexes[i]]))
word_pieces[i, 1:len(word_pieces_i)+1] = torch.LongTensor(word_pieces_i)
attn_masks[i, :len(word_pieces_i)+2].fill_(1)
# TODO 截掉长度超过的部分。
# 2. 获取hidden的结果,根据word_pieces进行对应的pool计算
# all_outputs: [batch_size x max_len x hidden_size, batch_size x max_len x hidden_size, ...]
bert_outputs, _ = self.encoder(word_pieces, token_type_ids=None, attention_mask=attn_masks,


+ 288
- 170
fastNLP/modules/encoder/_elmo.py View File

@@ -1,12 +1,13 @@

"""
这个页面的代码大量参考了https://github.com/HIT-SCIR/ELMoForManyLangs/tree/master/elmoformanylangs
这个页面的代码大量参考了 allenNLP
"""


from typing import Optional, Tuple, List, Callable

import os

import h5py
import numpy
import torch
import torch.nn as nn
import torch.nn.functional as F
@@ -16,7 +17,6 @@ import json

from ..utils import get_dropout_mask
import codecs
from torch import autograd

class LstmCellWithProjection(torch.nn.Module):
"""
@@ -58,6 +58,7 @@ class LstmCellWithProjection(torch.nn.Module):
respectively. The first dimension is 1 in order to match the Pytorch
API for returning stacked LSTM states.
"""

def __init__(self,
input_size: int,
hidden_size: int,
@@ -129,13 +130,13 @@ class LstmCellWithProjection(torch.nn.Module):
# We have to use this '.data.new().fill_' pattern to create tensors with the correct
# type - forward has no knowledge of whether these are torch.Tensors or torch.cuda.Tensors.
output_accumulator = inputs.data.new(batch_size,
total_timesteps,
self.hidden_size).fill_(0)
total_timesteps,
self.hidden_size).fill_(0)
if initial_state is None:
full_batch_previous_memory = inputs.data.new(batch_size,
self.cell_size).fill_(0)
self.cell_size).fill_(0)
full_batch_previous_state = inputs.data.new(batch_size,
self.hidden_size).fill_(0)
self.hidden_size).fill_(0)
else:
full_batch_previous_state = initial_state[0].squeeze(0)
full_batch_previous_memory = initial_state[1].squeeze(0)
@@ -169,7 +170,7 @@ class LstmCellWithProjection(torch.nn.Module):
# Second conditional: Does the next shortest sequence beyond the current batch
# index require computation use this timestep?
while current_length_index < (len(batch_lengths) - 1) and \
batch_lengths[current_length_index + 1] > index:
batch_lengths[current_length_index + 1] > index:
current_length_index += 1

# Actually get the slices of the batch which we
@@ -256,7 +257,7 @@ class LstmbiLm(nn.Module):
inputs = inputs[sort_idx]
inputs = nn.utils.rnn.pack_padded_sequence(inputs, sort_lens, batch_first=self.batch_first)
output, hx = self.encoder(inputs, None) # -> [N,L,C]
output, _ = nn.util.rnn.pad_packed_sequence(output, batch_first=self.batch_first)
output, _ = nn.utils.rnn.pad_packed_sequence(output, batch_first=self.batch_first)
_, unsort_idx = torch.sort(sort_idx, dim=0, descending=False)
output = output[unsort_idx]
forward, backward = output.split(self.config['encoder']['dim'], 2)
@@ -316,13 +317,13 @@ class ElmobiLm(torch.nn.Module):
:param seq_len: batch_size
:return: torch.FloatTensor. num_layers x batch_size x max_len x hidden_size
"""
max_len = inputs.size(1)
sort_lens, sort_idx = torch.sort(seq_len, dim=0, descending=True)
inputs = inputs[sort_idx]
inputs = nn.utils.rnn.pack_padded_sequence(inputs, sort_lens, batch_first=True)
output, _ = self._lstm_forward(inputs, None)
_, unsort_idx = torch.sort(sort_idx, dim=0, descending=False)
output = output[:, unsort_idx]

return output

def _lstm_forward(self,
@@ -399,7 +400,7 @@ class ElmobiLm(torch.nn.Module):
torch.cat([forward_state[1], backward_state[1]], -1)))

stacked_sequence_outputs: torch.FloatTensor = torch.stack(sequence_outputs)
# Stack the hidden state and memory for each layer into 2 tensors of shape
# Stack the hidden state and memory for each layer into 2 tensors of shape
# (num_layers, batch_size, hidden_size) and (num_layers, batch_size, cell_size)
# respectively.
final_hidden_states, final_memory_states = zip(*final_states)
@@ -408,6 +409,66 @@ class ElmobiLm(torch.nn.Module):
torch.cat(final_memory_states, 0))
return stacked_sequence_outputs, final_state_tuple

def load_weights(self, weight_file: str) -> None:
"""
Load the pre-trained weights from the file.
"""
requires_grad = False

with h5py.File(weight_file, 'r') as fin:
for i_layer, lstms in enumerate(
zip(self.forward_layers, self.backward_layers)
):
for j_direction, lstm in enumerate(lstms):
# lstm is an instance of LSTMCellWithProjection
cell_size = lstm.cell_size

dataset = fin['RNN_%s' % j_direction]['RNN']['MultiRNNCell']['Cell%s' % i_layer
]['LSTMCell']

# tensorflow packs together both W and U matrices into one matrix,
# but pytorch maintains individual matrices. In addition, tensorflow
# packs the gates as input, memory, forget, output but pytorch
# uses input, forget, memory, output. So we need to modify the weights.
tf_weights = numpy.transpose(dataset['W_0'][...])
torch_weights = tf_weights.copy()

# split the W from U matrices
input_size = lstm.input_size
input_weights = torch_weights[:, :input_size]
recurrent_weights = torch_weights[:, input_size:]
tf_input_weights = tf_weights[:, :input_size]
tf_recurrent_weights = tf_weights[:, input_size:]

# handle the different gate order convention
for torch_w, tf_w in [[input_weights, tf_input_weights],
[recurrent_weights, tf_recurrent_weights]]:
torch_w[(1 * cell_size):(2 * cell_size), :] = tf_w[(2 * cell_size):(3 * cell_size), :]
torch_w[(2 * cell_size):(3 * cell_size), :] = tf_w[(1 * cell_size):(2 * cell_size), :]

lstm.input_linearity.weight.data.copy_(torch.FloatTensor(input_weights))
lstm.state_linearity.weight.data.copy_(torch.FloatTensor(recurrent_weights))
lstm.input_linearity.weight.requires_grad = requires_grad
lstm.state_linearity.weight.requires_grad = requires_grad

# the bias weights
tf_bias = dataset['B'][...]
# tensorflow adds 1.0 to forget gate bias instead of modifying the
# parameters...
tf_bias[(2 * cell_size):(3 * cell_size)] += 1
torch_bias = tf_bias.copy()
torch_bias[(1 * cell_size):(2 * cell_size)
] = tf_bias[(2 * cell_size):(3 * cell_size)]
torch_bias[(2 * cell_size):(3 * cell_size)
] = tf_bias[(1 * cell_size):(2 * cell_size)]
lstm.state_linearity.bias.data.copy_(torch.FloatTensor(torch_bias))
lstm.state_linearity.bias.requires_grad = requires_grad

# the projection weights
proj_weights = numpy.transpose(dataset['W_P_0'][...])
lstm.state_projection.weight.data.copy_(torch.FloatTensor(proj_weights))
lstm.state_projection.weight.requires_grad = requires_grad


class LstmTokenEmbedder(nn.Module):
def __init__(self, config, word_emb_layer, char_emb_layer):
@@ -441,7 +502,7 @@ class LstmTokenEmbedder(nn.Module):
chars_emb = self.char_emb_layer(chars)
# TODO 这里应该要考虑seq_len的问题
_, (chars_outputs, __) = self.char_lstm(chars_emb)
chars_outputs = chars_outputs.contiguous().view(-1, self.config['token_embedder']['char_dim'] * 2)
chars_outputs = chars_outputs.contiguous().view(-1, self.config['token_embedder']['embedding']['dim'] * 2)
embs.append(chars_outputs)

token_embedding = torch.cat(embs, dim=2)
@@ -450,79 +511,143 @@ class LstmTokenEmbedder(nn.Module):


class ConvTokenEmbedder(nn.Module):
def __init__(self, config, word_emb_layer, char_emb_layer):
def __init__(self, config, weight_file, word_emb_layer, char_emb_layer, char_vocab):
super(ConvTokenEmbedder, self).__init__()
self.config = config
self.weight_file = weight_file
self.word_emb_layer = word_emb_layer
self.char_emb_layer = char_emb_layer

self.output_dim = config['encoder']['projection_dim']
self.emb_dim = 0
if word_emb_layer is not None:
self.emb_dim += word_emb_layer.weight.size(1)

if char_emb_layer is not None:
self.convolutions = []
cnn_config = config['token_embedder']
filters = cnn_config['filters']
char_embed_dim = cnn_config['char_dim']

for i, (width, num) in enumerate(filters):
conv = torch.nn.Conv1d(
in_channels=char_embed_dim,
out_channels=num,
kernel_size=width,
bias=True
)
self.convolutions.append(conv)

self.convolutions = nn.ModuleList(self.convolutions)

self.n_filters = sum(f[1] for f in filters)
self.n_highway = cnn_config['n_highway']

self.highways = Highway(self.n_filters, self.n_highway, activation=torch.nn.functional.relu)
self.emb_dim += self.n_filters

self.projection = nn.Linear(self.emb_dim, self.output_dim, bias=True)
self._options = config
self.requires_grad = False
self._load_weights()
self._char_embedding_weights = char_emb_layer.weight.data

def _load_weights(self):
self._load_cnn_weights()
self._load_highway()
self._load_projection()

def _load_cnn_weights(self):
cnn_options = self._options['token_embedder']
filters = cnn_options['filters']
char_embed_dim = cnn_options['embedding']['dim']

convolutions = []
for i, (width, num) in enumerate(filters):
conv = torch.nn.Conv1d(
in_channels=char_embed_dim,
out_channels=num,
kernel_size=width,
bias=True
)
# load the weights
with h5py.File(self.weight_file, 'r') as fin:
weight = fin['CNN']['W_cnn_{}'.format(i)][...]
bias = fin['CNN']['b_cnn_{}'.format(i)][...]

w_reshaped = numpy.transpose(weight.squeeze(axis=0), axes=(2, 1, 0))
if w_reshaped.shape != tuple(conv.weight.data.shape):
raise ValueError("Invalid weight file")
conv.weight.data.copy_(torch.FloatTensor(w_reshaped))
conv.bias.data.copy_(torch.FloatTensor(bias))

conv.weight.requires_grad = self.requires_grad
conv.bias.requires_grad = self.requires_grad

convolutions.append(conv)
self.add_module('char_conv_{}'.format(i), conv)

self._convolutions = convolutions

def _load_highway(self):
# the highway layers have same dimensionality as the number of cnn filters
cnn_options = self._options['token_embedder']
filters = cnn_options['filters']
n_filters = sum(f[1] for f in filters)
n_highway = cnn_options['n_highway']

# create the layers, and load the weights
self._highways = Highway(n_filters, n_highway, activation=torch.nn.functional.relu)
for k in range(n_highway):
# The AllenNLP highway is one matrix multplication with concatenation of
# transform and carry weights.
with h5py.File(self.weight_file, 'r') as fin:
# The weights are transposed due to multiplication order assumptions in tf
# vs pytorch (tf.matmul(X, W) vs pytorch.matmul(W, X))
w_transform = numpy.transpose(fin['CNN_high_{}'.format(k)]['W_transform'][...])
# -1.0 since AllenNLP is g * x + (1 - g) * f(x) but tf is (1 - g) * x + g * f(x)
w_carry = -1.0 * numpy.transpose(fin['CNN_high_{}'.format(k)]['W_carry'][...])
weight = numpy.concatenate([w_transform, w_carry], axis=0)
self._highways._layers[k].weight.data.copy_(torch.FloatTensor(weight))
self._highways._layers[k].weight.requires_grad = self.requires_grad

b_transform = fin['CNN_high_{}'.format(k)]['b_transform'][...]
b_carry = -1.0 * fin['CNN_high_{}'.format(k)]['b_carry'][...]
bias = numpy.concatenate([b_transform, b_carry], axis=0)
self._highways._layers[k].bias.data.copy_(torch.FloatTensor(bias))
self._highways._layers[k].bias.requires_grad = self.requires_grad

def _load_projection(self):
cnn_options = self._options['token_embedder']
filters = cnn_options['filters']
n_filters = sum(f[1] for f in filters)

self._projection = torch.nn.Linear(n_filters, self.output_dim, bias=True)
with h5py.File(self.weight_file, 'r') as fin:
weight = fin['CNN_proj']['W_proj'][...]
bias = fin['CNN_proj']['b_proj'][...]
self._projection.weight.data.copy_(torch.FloatTensor(numpy.transpose(weight)))
self._projection.bias.data.copy_(torch.FloatTensor(bias))

self._projection.weight.requires_grad = self.requires_grad
self._projection.bias.requires_grad = self.requires_grad

def forward(self, words, chars):
embs = []
if self.word_emb_layer is not None:
if hasattr(self, 'words_to_words'):
words = self.words_to_words[words]
word_emb = self.word_emb_layer(words)
embs.append(word_emb)
"""
:param words:
:param chars: Tensor Shape ``(batch_size, sequence_length, 50)``:
:return Tensor Shape ``(batch_size, sequence_length + 2, embedding_dim)`` :
"""
# the character id embedding
# (batch_size * sequence_length, max_chars_per_token, embed_dim)
# character_embedding = torch.nn.functional.embedding(
# chars.view(-1, max_chars_per_token),
# self._char_embedding_weights
# )
batch_size, sequence_length, max_char_len = chars.size()
character_embedding = self.char_emb_layer(chars).reshape(batch_size*sequence_length, max_char_len, -1)
# run convolutions
cnn_options = self._options['token_embedder']
if cnn_options['activation'] == 'tanh':
activation = torch.tanh
elif cnn_options['activation'] == 'relu':
activation = torch.nn.functional.relu
else:
raise Exception("Unknown activation")

if self.char_emb_layer is not None:
batch_size, seq_len, _ = chars.size()
chars = chars.view(batch_size * seq_len, -1)
character_embedding = self.char_emb_layer(chars)
character_embedding = torch.transpose(character_embedding, 1, 2)

cnn_config = self.config['token_embedder']
if cnn_config['activation'] == 'tanh':
activation = torch.nn.functional.tanh
elif cnn_config['activation'] == 'relu':
activation = torch.nn.functional.relu
else:
raise Exception("Unknown activation")
# (batch_size * sequence_length, embed_dim, max_chars_per_token)
character_embedding = torch.transpose(character_embedding, 1, 2)
convs = []
for i in range(len(self._convolutions)):
conv = getattr(self, 'char_conv_{}'.format(i))
convolved = conv(character_embedding)
# (batch_size * sequence_length, n_filters for this width)
convolved, _ = torch.max(convolved, dim=-1)
convolved = activation(convolved)
convs.append(convolved)

convs = []
for i in range(len(self.convolutions)):
convolved = self.convolutions[i](character_embedding)
# (batch_size * sequence_length, n_filters for this width)
convolved, _ = torch.max(convolved, dim=-1)
convolved = activation(convolved)
convs.append(convolved)
char_emb = torch.cat(convs, dim=-1)
char_emb = self.highways(char_emb)
# (batch_size * sequence_length, n_filters)
token_embedding = torch.cat(convs, dim=-1)

embs.append(char_emb.view(batch_size, -1, self.n_filters))
# apply the highway layers (batch_size * sequence_length, n_filters)
token_embedding = self._highways(token_embedding)

token_embedding = torch.cat(embs, dim=2)
# final projection (batch_size * sequence_length, embedding_dim)
token_embedding = self._projection(token_embedding)

return self.projection(token_embedding)
# reshape to (batch_size, sequence_length+2, embedding_dim)
return token_embedding.view(batch_size, sequence_length, -1)


class Highway(torch.nn.Module):
@@ -543,6 +668,7 @@ class Highway(torch.nn.Module):
activation : ``Callable[[torch.Tensor], torch.Tensor]``, optional (default=``torch.nn.functional.relu``)
The non-linearity to use in the highway layers.
"""

def __init__(self,
input_dim: int,
num_layers: int = 1,
@@ -573,6 +699,7 @@ class Highway(torch.nn.Module):
current_input = gate * linear_part + (1 - gate) * nonlinear_part
return current_input


class _ElmoModel(nn.Module):
"""
该Module是ElmoEmbedding中进行所有的heavy lifting的地方。做的工作,包括
@@ -582,11 +709,32 @@ class _ElmoModel(nn.Module):
(4) 设计一个保存token的embedding,允许缓存word的表示。

"""
def __init__(self, model_dir:str, vocab:Vocabulary=None, cache_word_reprs:bool=False):

def __init__(self, model_dir: str, vocab: Vocabulary = None, cache_word_reprs: bool = False):
super(_ElmoModel, self).__init__()
config = json.load(open(os.path.join(model_dir, 'structure_config.json'), 'r'))

dir = os.walk(model_dir)
config_file = None
weight_file = None
config_count = 0
weight_count = 0
for path, dir_list, file_list in dir:
for file_name in file_list:
if file_name.__contains__(".json"):
config_file = file_name
config_count += 1
elif file_name.__contains__(".hdf5"):
weight_file = file_name
weight_count += 1
if config_count > 1 or weight_count > 1:
raise Exception(f"Multiple config files(*.json) or weight files(*.hdf5) detected in {model_dir}.")
elif config_count == 0 or weight_count == 0:
raise Exception(f"No config file or weight file found in {model_dir}")

config = json.load(open(os.path.join(model_dir, config_file), 'r'))
self.weight_file = os.path.join(model_dir, weight_file)
self.config = config
self.requires_grad = False

OOV_TAG = '<oov>'
PAD_TAG = '<pad>'
@@ -595,48 +743,8 @@ class _ElmoModel(nn.Module):
BOW_TAG = '<bow>'
EOW_TAG = '<eow>'

# 将加载embedding放到这里
token_embedder_states = torch.load(os.path.join(model_dir, 'token_embedder.pkl'), map_location='cpu')

# For the model trained with word form word encoder.
if config['token_embedder']['word_dim'] > 0:
word_lexicon = {}
with codecs.open(os.path.join(model_dir, 'word.dic'), 'r', encoding='utf-8') as fpi:
for line in fpi:
tokens = line.strip().split('\t')
if len(tokens) == 1:
tokens.insert(0, '\u3000')
token, i = tokens
word_lexicon[token] = int(i)
# 做一些sanity check
for special_word in [PAD_TAG, OOV_TAG, BOS_TAG, EOS_TAG]:
assert special_word in word_lexicon, f"{special_word} not found in word.dic."
# 根据vocab调整word_embedding
pre_word_embedding = token_embedder_states.pop('word_emb_layer.embedding.weight')
word_emb_layer = nn.Embedding(len(vocab)+2, config['token_embedder']['word_dim']) #多增加两个是为了<bos>与<eos>
found_word_count = 0
for word, index in vocab:
if index == vocab.unknown_idx: # 因为fastNLP的unknow是<unk> 而在这里是<oov>所以ugly强制适配一下
index_in_pre = word_lexicon[OOV_TAG]
found_word_count += 1
elif index == vocab.padding_idx: # 需要pad对齐
index_in_pre = word_lexicon[PAD_TAG]
found_word_count += 1
elif word in word_lexicon:
index_in_pre = word_lexicon[word]
found_word_count += 1
else:
index_in_pre = word_lexicon[OOV_TAG]
word_emb_layer.weight.data[index] = pre_word_embedding[index_in_pre]
print(f"{found_word_count} out of {len(vocab)} words were found in pretrained elmo embedding.")
word_emb_layer.weight.data[-1] = pre_word_embedding[word_lexicon[EOS_TAG]]
word_emb_layer.weight.data[-2] = pre_word_embedding[word_lexicon[BOS_TAG]]
self.word_vocab = vocab
else:
word_emb_layer = None

# For the model trained with character-based word encoder.
if config['token_embedder']['char_dim'] > 0:
if config['token_embedder']['embedding']['dim'] > 0:
char_lexicon = {}
with codecs.open(os.path.join(model_dir, 'char.dic'), 'r', encoding='utf-8') as fpi:
for line in fpi:
@@ -645,22 +753,26 @@ class _ElmoModel(nn.Module):
tokens.insert(0, '\u3000')
token, i = tokens
char_lexicon[token] = int(i)

# 做一些sanity check
for special_word in [PAD_TAG, OOV_TAG, BOW_TAG, EOW_TAG]:
assert special_word in char_lexicon, f"{special_word} not found in char.dic."

# 从vocab中构建char_vocab
char_vocab = Vocabulary(unknown=OOV_TAG, padding=PAD_TAG)
# 需要保证<bow>与<eow>在里面
char_vocab.add_word(BOW_TAG)
char_vocab.add_word(EOW_TAG)
char_vocab.add_word_lst([BOW_TAG, EOW_TAG, BOS_TAG, EOS_TAG])
for word, index in vocab:
char_vocab.add_word_lst(list(word))
# 保证<eos>, <bos>也在
char_vocab.add_word_lst(list(BOS_TAG))
char_vocab.add_word_lst(list(EOS_TAG))
# 根据char_lexicon调整
char_emb_layer = nn.Embedding(len(char_vocab), int(config['token_embedder']['char_dim']))
pre_char_embedding = token_embedder_states.pop('char_emb_layer.embedding.weight')

self.bos_index, self.eos_index, self._pad_index = len(vocab), len(vocab)+1, vocab.padding_idx
# 根据char_lexicon调整, 多设置一位,是预留给word padding的(该位置的char表示为全0表示)
char_emb_layer = nn.Embedding(len(char_vocab)+1, int(config['token_embedder']['embedding']['dim']),
padding_idx=len(char_vocab))
with h5py.File(self.weight_file, 'r') as fin:
char_embed_weights = fin['char_embed'][...]
char_embed_weights = torch.from_numpy(char_embed_weights)
found_char_count = 0
for char, index in char_vocab: # 调整character embedding
if char in char_lexicon:
@@ -668,79 +780,84 @@ class _ElmoModel(nn.Module):
found_char_count += 1
else:
index_in_pre = char_lexicon[OOV_TAG]
char_emb_layer.weight.data[index] = pre_char_embedding[index_in_pre]
char_emb_layer.weight.data[index] = char_embed_weights[index_in_pre]

print(f"{found_char_count} out of {len(char_vocab)} characters were found in pretrained elmo embedding.")
# 生成words到chars的映射
if config['token_embedder']['name'].lower() == 'cnn':
max_chars = config['token_embedder']['max_characters_per_token']
elif config['token_embedder']['name'].lower() == 'lstm':
max_chars = max(map(lambda x: len(x[0]), vocab)) + 2 # 需要补充两个<bow>与<eow>
max_chars = max(map(lambda x: len(x[0]), vocab)) + 2 # 需要补充两个<bow>与<eow>
else:
raise ValueError('Unknown token_embedder: {0}'.format(config['token_embedder']['name']))
# 增加<bos>, <eos>所以加2.
self.words_to_chars_embedding = nn.Parameter(torch.full((len(vocab)+2, max_chars),
fill_value=char_vocab.to_index(PAD_TAG), dtype=torch.long),
fill_value=len(char_vocab),
dtype=torch.long),
requires_grad=False)
for word, index in vocab:
if len(word)+2>max_chars:
word = word[:max_chars-2]
if index==vocab.padding_idx: # 如果是pad的话,需要和给定的对齐
word = PAD_TAG
elif index==vocab.unknown_idx:
word = OOV_TAG
char_ids = [char_vocab.to_index(BOW_TAG)] + [char_vocab.to_index(c) for c in word] + [char_vocab.to_index(EOW_TAG)]
char_ids += [char_vocab.to_index(PAD_TAG)]*(max_chars-len(char_ids))
for word, index in list(iter(vocab)) + [(BOS_TAG, len(vocab)), (EOS_TAG, len(vocab)+1)]:
if len(word) + 2 > max_chars:
word = word[:max_chars - 2]
if index == self._pad_index:
continue
elif word == BOS_TAG or word == EOS_TAG:
char_ids = [char_vocab.to_index(BOW_TAG)] + [char_vocab.to_index(word)] + [
char_vocab.to_index(EOW_TAG)]
char_ids += [char_vocab.to_index(PAD_TAG)] * (max_chars - len(char_ids))
else:
char_ids = [char_vocab.to_index(BOW_TAG)] + [char_vocab.to_index(c) for c in word] + [
char_vocab.to_index(EOW_TAG)]
char_ids += [char_vocab.to_index(PAD_TAG)] * (max_chars - len(char_ids))
self.words_to_chars_embedding[index] = torch.LongTensor(char_ids)
for index, word in enumerate([BOS_TAG, EOS_TAG]): # 加上<eos>, <bos>
if len(word)+2>max_chars:
word = word[:max_chars-2]
char_ids = [char_vocab.to_index(BOW_TAG)] + [char_vocab.to_index(c) for c in word] + [char_vocab.to_index(EOW_TAG)]
char_ids += [char_vocab.to_index(PAD_TAG)]*(max_chars-len(char_ids))
self.words_to_chars_embedding[index+len(vocab)] = torch.LongTensor(char_ids)

self.char_vocab = char_vocab
else:
char_emb_layer = None

if config['token_embedder']['name'].lower() == 'cnn':
self.token_embedder = ConvTokenEmbedder(
config, word_emb_layer, char_emb_layer)
config, self.weight_file, None, char_emb_layer, self.char_vocab)
elif config['token_embedder']['name'].lower() == 'lstm':
self.token_embedder = LstmTokenEmbedder(
config, word_emb_layer, char_emb_layer)
self.token_embedder.load_state_dict(token_embedder_states, strict=False)
if config['token_embedder']['word_dim'] > 0 and vocab._no_create_word_length > 0: # 需要映射,使得来自于dev, test的idx指向unk
words_to_words = nn.Parameter(torch.arange(len(vocab)+2).long(), requires_grad=False)
config, None, char_emb_layer)

if config['token_embedder']['word_dim'] > 0 \
and vocab._no_create_word_length > 0: # 需要映射,使得来自于dev, test的idx指向unk
words_to_words = nn.Parameter(torch.arange(len(vocab) + 2).long(), requires_grad=False)
for word, idx in vocab:
if vocab._is_word_no_create_entry(word):
words_to_words[idx] = vocab.unknown_idx
setattr(self.token_embedder, 'words_to_words', words_to_words)
self.output_dim = config['encoder']['projection_dim']

# 暂时只考虑 elmo
if config['encoder']['name'].lower() == 'elmo':
self.encoder = ElmobiLm(config)
elif config['encoder']['name'].lower() == 'lstm':
self.encoder = LstmbiLm(config)
self.encoder.load_state_dict(torch.load(os.path.join(model_dir, 'encoder.pkl'),
map_location='cpu'))

self.bos_index = len(vocab)
self.eos_index = len(vocab) + 1
self._pad_index = vocab.padding_idx
self.encoder.load_weights(self.weight_file)

if cache_word_reprs:
if config['token_embedder']['char_dim']>0: # 只有在使用了chars的情况下有用
if config['token_embedder']['embedding']['dim'] > 0: # 只有在使用了chars的情况下有用
print("Start to generate cache word representations.")
batch_size = 320
num_batches = self.words_to_chars_embedding.size(0)//batch_size + \
int(self.words_to_chars_embedding.size(0)%batch_size!=0)
self.cached_word_embedding = nn.Embedding(self.words_to_chars_embedding.size(0),
# bos eos
word_size = self.words_to_chars_embedding.size(0)
num_batches = word_size // batch_size + \
int(word_size % batch_size != 0)

self.cached_word_embedding = nn.Embedding(word_size,
config['encoder']['projection_dim'])
with torch.no_grad():
for i in range(num_batches):
words = torch.arange(i*batch_size, min((i+1)*batch_size, self.words_to_chars_embedding.size(0))).long()
words = torch.arange(i * batch_size,
min((i + 1) * batch_size, word_size)).long()
chars = self.words_to_chars_embedding[words].unsqueeze(1) # batch_size x 1 x max_chars
word_reprs = self.token_embedder(words.unsqueeze(1), chars).detach() # batch_size x 1 x config['encoder']['projection_dim']
word_reprs = self.token_embedder(words.unsqueeze(1),
chars).detach() # batch_size x 1 x config['encoder']['projection_dim']
self.cached_word_embedding.weight.data[words] = word_reprs.squeeze(1)

print("Finish generating cached word representations. Going to delete the character encoder.")
del self.token_embedder, self.words_to_chars_embedding
else:
@@ -758,7 +875,7 @@ class _ElmoModel(nn.Module):
seq_len = words.ne(self._pad_index).sum(dim=-1)
expanded_words[:, 1:-1] = words
expanded_words[:, 0].fill_(self.bos_index)
expanded_words[torch.arange(batch_size).to(words), seq_len+1] = self.eos_index
expanded_words[torch.arange(batch_size).to(words), seq_len + 1] = self.eos_index
seq_len = seq_len + 2
if hasattr(self, 'cached_word_embedding'):
token_embedding = self.cached_word_embedding(expanded_words)
@@ -767,16 +884,18 @@ class _ElmoModel(nn.Module):
chars = self.words_to_chars_embedding[expanded_words]
else:
chars = None
token_embedding = self.token_embedder(expanded_words, chars)
token_embedding = self.token_embedder(expanded_words, chars) # batch_size x max_len x embed_dim

if self.config['encoder']['name'] == 'elmo':
encoder_output = self.encoder(token_embedding, seq_len)
if encoder_output.size(2) < max_len+2:
dummy_tensor = encoder_output.new_zeros(encoder_output.size(0), batch_size,
max_len + 2 - encoder_output.size(2), encoder_output.size(-1))
encoder_output = torch.cat([encoder_output, dummy_tensor], 2)
sz = encoder_output.size() # 2, batch_size, max_len, hidden_size
token_embedding = torch.cat([token_embedding, token_embedding], dim=2).view(1, sz[1], sz[2], sz[3])
encoder_output = torch.cat([token_embedding, encoder_output], dim=0)
if encoder_output.size(2) < max_len + 2:
num_layers, _, output_len, hidden_size = encoder_output.size()
dummy_tensor = encoder_output.new_zeros(num_layers, batch_size,
max_len + 2 - output_len, hidden_size)
encoder_output = torch.cat((encoder_output, dummy_tensor), 2)
sz = encoder_output.size() # 2, batch_size, max_len, hidden_size
token_embedding = torch.cat((token_embedding, token_embedding), dim=2).view(1, sz[1], sz[2], sz[3])
encoder_output = torch.cat((token_embedding, encoder_output), dim=0)
elif self.config['encoder']['name'] == 'lstm':
encoder_output = self.encoder(token_embedding, seq_len)
else:
@@ -784,5 +903,4 @@ class _ElmoModel(nn.Module):

# 删除<eos>, <bos>. 这里没有精确地删除,但应该也不会影响最后的结果了。
encoder_output = encoder_output[:, :, 1:-1]

return encoder_output

+ 218
- 85
fastNLP/modules/encoder/embedding.py View File

@@ -35,15 +35,15 @@ class Embedding(nn.Module):

Embedding组件. 可以通过self.num_embeddings获取词表大小; self.embedding_dim获取embedding的维度"""
def __init__(self, init_embed, dropout=0.0, dropout_word=0, unk_index=None):
def __init__(self, init_embed, word_dropout=0, dropout=0.0, unk_index=None):
"""

:param tuple(int,int),torch.FloatTensor,nn.Embedding,numpy.ndarray init_embed: Embedding的大小(传入tuple(int, int),
第一个int为vocab_zie, 第二个int为embed_dim); 如果为Tensor, Embedding, ndarray等则直接使用该值初始化Embedding;
也可以传入TokenEmbedding对象
:param float word_dropout: 按照一定概率随机将word设置为unk_index,这样可以使得unk这个token得到足够的训练, 且会对网络有
一定的regularize的作用。
:param float dropout: 对Embedding的输出的dropout。
:param float dropout_word: 按照一定比例随机将word设置为unk的idx,这样可以使得unk这个token得到足够的训练
:param int unk_index: drop word时替换为的index,如果init_embed为TokenEmbedding不需要传入该值。
:param int unk_index: drop word时替换为的index。fastNLP的Vocabulary的unk_index默认为1。
"""
super(Embedding, self).__init__()

@@ -52,21 +52,21 @@ class Embedding(nn.Module):
self.dropout = nn.Dropout(dropout)
if not isinstance(self.embed, TokenEmbedding):
self._embed_size = self.embed.weight.size(1)
if dropout_word>0 and not isinstance(unk_index, int):
if word_dropout>0 and not isinstance(unk_index, int):
raise ValueError("When drop word is set, you need to pass in the unk_index.")
else:
self._embed_size = self.embed.embed_size
unk_index = self.embed.get_word_vocab().unknown_idx
self.unk_index = unk_index
self.dropout_word = dropout_word
self.word_dropout = word_dropout

def forward(self, x):
"""
:param torch.LongTensor x: [batch, seq_len]
:return: torch.Tensor : [batch, seq_len, embed_dim]
"""
if self.dropout_word>0 and self.training:
mask = torch.ones_like(x).float() * self.dropout_word
if self.word_dropout>0 and self.training:
mask = torch.ones_like(x).float() * self.word_dropout
mask = torch.bernoulli(mask).byte() # dropout_word越大,越多位置为1
x = x.masked_fill(mask, self.unk_index)
x = self.embed(x)
@@ -117,11 +117,38 @@ class Embedding(nn.Module):


class TokenEmbedding(nn.Module):
def __init__(self, vocab):
def __init__(self, vocab, word_dropout=0.0, dropout=0.0):
super(TokenEmbedding, self).__init__()
assert vocab.padding_idx is not None, "You vocabulary must have padding."
assert vocab.padding is not None, "Vocabulary must have a padding entry."
self._word_vocab = vocab
self._word_pad_index = vocab.padding_idx
if word_dropout>0:
assert vocab.unknown is not None, "Vocabulary must have unknown entry when you want to drop a word."
self.word_dropout = word_dropout
self._word_unk_index = vocab.unknown_idx
self.dropout_layer = nn.Dropout(dropout)

def drop_word(self, words):
"""
按照设定随机将words设置为unknown_index。

:param torch.LongTensor words: batch_size x max_len
:return:
"""
if self.dropout_word > 0 and self.training:
mask = torch.ones_like(words).float() * self.word_dropout
mask = torch.bernoulli(mask).byte() # dropout_word越大,越多位置为1
words = words.masked_fill(mask, self._word_unk_index)
return words

def dropout(self, words):
"""
对embedding后的word表示进行drop。

:param torch.FloatTensor words: batch_size x max_len x embed_size
:return:
"""
return self.dropout_layer(words)

@property
def requires_grad(self):
@@ -163,6 +190,9 @@ class TokenEmbedding(nn.Module):
def size(self):
return torch.Size(self.num_embedding, self._embed_size)

@abstractmethod
def forward(self, *input):
raise NotImplementedError

class StaticEmbedding(TokenEmbedding):
"""
@@ -179,15 +209,17 @@ class StaticEmbedding(TokenEmbedding):
:param model_dir_or_name: 可以有两种方式调用预训练好的static embedding:第一种是传入embedding的文件名,第二种是传入embedding
的名称。目前支持的embedding包括{`en` 或者 `en-glove-840b-300` : glove.840B.300d, `en-glove-6b-50` : glove.6B.50d,
`en-word2vec-300` : GoogleNews-vectors-negative300}。第二种情况将自动查看缓存中是否存在该模型,没有的话将自动下载。
:param requires_grad: 是否需要gradient. 默认为True
:param init_method: 如何初始化没有找到的值。可以使用torch.nn.init.*中各种方法。调用该方法时传入一个tensor对象。
:param normailize: 是否对vector进行normalize,使得每个vector的norm为1。
:param bool requires_grad: 是否需要gradient. 默认为True
:param callable init_method: 如何初始化没有找到的值。可以使用torch.nn.init.*中各种方法。调用该方法时传入一个tensor对象。
:param bool lower: 是否将vocab中的词语小写后再和预训练的词表进行匹配。如果你的词表中包含大写的词语,或者就是需要单独
为大写的词语开辟一个vector表示,则将lower设置为False。
:param float word_dropout: 以多大的概率将一个词替换为unk。这样既可以训练unk也是一定的regularize。
:param float dropout: 以多大的概率对embedding的表示进行Dropout。0.1即随机将10%的值置为0。
:param bool normailize: 是否对vector进行normalize,使得每个vector的norm为1。
"""
def __init__(self, vocab: Vocabulary, model_dir_or_name: str='en', requires_grad: bool=True, init_method=None,
normalize=False):
super(StaticEmbedding, self).__init__(vocab)

# 优先定义需要下载的static embedding有哪些。这里估计需要自己搞一个server,
lower=False, dropout=0, word_dropout=0, normalize=False):
super(StaticEmbedding, self).__init__(vocab, word_dropout=word_dropout, dropout=dropout)

# 得到cache_path
if model_dir_or_name.lower() in PRETRAIN_STATIC_FILES:
@@ -202,18 +234,44 @@ class StaticEmbedding(TokenEmbedding):
raise ValueError(f"Cannot recognize {model_dir_or_name}.")

# 读取embedding
embedding, hit_flags = self._load_with_vocab(model_path, vocab=vocab, init_method=init_method,
normalize=normalize)
if lower:
lowered_vocab = Vocabulary(padding=vocab.padding, unknown=vocab.unknown)
for word, index in vocab:
if not vocab._is_word_no_create_entry(word):
lowered_vocab.add_word(word.lower()) # 先加入需要创建entry的
for word in vocab._no_create_word.keys(): # 不需要创建entry的
if word in vocab:
lowered_word = word.lower()
if lowered_word not in lowered_vocab.word_count:
lowered_vocab.add_word(lowered_word)
lowered_vocab._no_create_word[lowered_word] += 1
print(f"All word in vocab have been lowered. There are {len(vocab)} words, {len(lowered_vocab)} unique lowered "
f"words.")
embedding = self._load_with_vocab(model_path, vocab=lowered_vocab, init_method=init_method,
normalize=normalize)
# 需要适配一下
if not hasattr(self, 'words_to_words'):
self.words_to_words = torch.arange(len(lowered_vocab, )).long()
if lowered_vocab.unknown:
unknown_idx = lowered_vocab.unknown_idx
else:
unknown_idx = embedding.size(0) - 1 # 否则是最后一个为unknow
words_to_words = nn.Parameter(torch.full((len(vocab),), fill_value=unknown_idx).long(),
requires_grad=False)
for word, index in vocab:
if word not in lowered_vocab:
word = word.lower()
if lowered_vocab._is_word_no_create_entry(word): # 如果不需要创建entry,已经默认unknown了
continue
words_to_words[index] = self.words_to_words[lowered_vocab.to_index(word)]
self.words_to_words = words_to_words
else:
embedding = self._load_with_vocab(model_path, vocab=vocab, init_method=init_method,
normalize=normalize)
self.embedding = nn.Embedding(num_embeddings=embedding.shape[0], embedding_dim=embedding.shape[1],
padding_idx=vocab.padding_idx,
max_norm=None, norm_type=2, scale_grad_by_freq=False,
sparse=False, _weight=embedding)
if vocab._no_create_word_length > 0: # 需要映射,使得来自于dev, test的idx指向unk
words_to_words = nn.Parameter(torch.arange(len(vocab)).long(), requires_grad=False)
for word, idx in vocab:
if vocab._is_word_no_create_entry(word) and not hit_flags[idx]:
words_to_words[idx] = vocab.unknown_idx
self.words_to_words = words_to_words
self._embed_size = self.embedding.weight.size(1)
self.requires_grad = requires_grad

@@ -268,10 +326,8 @@ class StaticEmbedding(TokenEmbedding):
else:
dim = len(parts) - 1
f.seek(0)
matrix = torch.zeros(len(vocab), dim)
if init_method is not None:
init_method(matrix)
hit_flags = np.zeros(len(vocab), dtype=bool)
matrix = {}
found_count = 0
for idx, line in enumerate(f, start_idx):
try:
parts = line.strip().split()
@@ -285,28 +341,49 @@ class StaticEmbedding(TokenEmbedding):
if word in vocab:
index = vocab.to_index(word)
matrix[index] = torch.from_numpy(np.fromstring(' '.join(nums), sep=' ', dtype=dtype, count=dim))
hit_flags[index] = True
found_count += 1
except Exception as e:
if error == 'ignore':
warnings.warn("Error occurred at the {} line.".format(idx))
else:
print("Error occurred at the {} line.".format(idx))
raise e
found_count = sum(hit_flags)
print("Found {} out of {} words in the pre-training embedding.".format(found_count, len(vocab)))
if init_method is None:
if len(vocab)-found_count>0 and found_count>0: # 有的没找到
found_vecs = matrix[torch.LongTensor(hit_flags.astype(int)).byte()]
mean = found_vecs.mean(dim=0, keepdim=True)
std = found_vecs.std(dim=0, keepdim=True)
unfound_vec_num = np.sum(hit_flags==False)
unfound_vecs = torch.randn(unfound_vec_num, dim)*std + mean
matrix[torch.LongTensor(hit_flags.astype(int)).eq(0)] = unfound_vecs
for word, index in vocab:
if index not in matrix and not vocab._is_word_no_create_entry(word):
if vocab.unknown_idx in matrix: # 如果有unkonwn,用unknown初始化
matrix[index] = matrix[vocab.unknown_idx]
else:
matrix[index] = None

vectors = torch.zeros(len(matrix), dim)
if init_method:
init_method(vectors)
else:
nn.init.uniform_(vectors, -np.sqrt(3/dim), np.sqrt(3/dim))

if vocab._no_create_word_length>0:
if vocab.unknown is None: # 创建一个专门的unknown
unknown_idx = len(matrix)
vectors = torch.cat((vectors, torch.zeros(1, dim)), dim=0).contiguous()
else:
unknown_idx = vocab.unknown_idx
words_to_words = nn.Parameter(torch.full((len(vocab),), fill_value=unknown_idx).long(),
requires_grad=False)
for order, (index, vec) in enumerate(matrix.items()):
if vec is not None:
vectors[order] = vec
words_to_words[index] = order
self.words_to_words = words_to_words
else:
for index, vec in matrix.items():
if vec is not None:
vectors[index] = vec

if normalize:
matrix /= (torch.norm(matrix, dim=1, keepdim=True) + 1e-12)
vectors /= (torch.norm(vectors, dim=1, keepdim=True) + 1e-12)

return matrix, hit_flags
return vectors

def forward(self, words):
"""
@@ -317,12 +394,15 @@ class StaticEmbedding(TokenEmbedding):
"""
if hasattr(self, 'words_to_words'):
words = self.words_to_words[words]
return self.embedding(words)
words = self.drop_word(words)
words = self.embedding(words)
words = self.dropout(words)
return words


class ContextualEmbedding(TokenEmbedding):
def __init__(self, vocab: Vocabulary):
super(ContextualEmbedding, self).__init__(vocab)
def __init__(self, vocab: Vocabulary, word_dropout:float=0.0, dropout:float=0.0):
super(ContextualEmbedding, self).__init__(vocab, word_dropout=word_dropout, dropout=dropout)

def add_sentence_cache(self, *datasets, batch_size=32, device='cpu', delete_weights: bool=True):
"""
@@ -425,19 +505,17 @@ class ElmoEmbedding(ContextualEmbedding):
:param model_dir_or_name: 可以有两种方式调用预训练好的ELMo embedding:第一种是传入ELMo权重的文件名,第二种是传入ELMo版本的名称,
目前支持的ELMo包括{`en` : 英文版本的ELMo, `cn` : 中文版本的ELMo,}。第二种情况将自动查看缓存中是否存在该模型,没有的话将自动下载
:param layers: str, 指定返回的层数, 以,隔开不同的层。如果要返回第二层的结果'2', 返回后两层的结果'1,2'。不同的层的结果
按照这个顺序concat起来。默认为'2'。
:param requires_grad: bool, 该层是否需要gradient. 默认为False
按照这个顺序concat起来。默认为'2'。'mix'会使用可学习的权重结合不同层的表示(权重是否可训练与requires_grad保持一致,
初始化权重对三层结果进行mean-pooling, 可以通过ElmoEmbedding.set_mix_weights_requires_grad()方法只将mix weights设置为可学习。)
:param requires_grad: bool, 该层是否需要gradient, 默认为False.
:param float word_dropout: 以多大的概率将一个词替换为unk。这样既可以训练unk也是一定的regularize。
:param float dropout: 以多大的概率对embedding的表示进行Dropout。0.1即随机将10%的值置为0。
:param cache_word_reprs: 可以选择对word的表示进行cache; 设置为True的话,将在初始化的时候为每个word生成对应的embedding,
并删除character encoder,之后将直接使用cache的embedding。默认为False。
"""
def __init__(self, vocab: Vocabulary, model_dir_or_name: str='en',
layers: str='2', requires_grad: bool=False, cache_word_reprs: bool=False):
super(ElmoEmbedding, self).__init__(vocab)
layers = list(map(int, layers.split(',')))
assert len(layers) > 0, "Must choose one output"
for layer in layers:
assert 0 <= layer <= 2, "Layer index should be in range [0, 2]."
self.layers = layers
def __init__(self, vocab: Vocabulary, model_dir_or_name: str='en', layers: str='2', requires_grad: bool=False,
word_dropout=0.0, dropout=0.0, cache_word_reprs: bool=False):
super(ElmoEmbedding, self).__init__(vocab, word_dropout=word_dropout, dropout=dropout)

# 根据model_dir_or_name检查是否存在并下载
if model_dir_or_name.lower() in PRETRAINED_ELMO_MODEL_DIR:
@@ -451,8 +529,49 @@ class ElmoEmbedding(ContextualEmbedding):
else:
raise ValueError(f"Cannot recognize {model_dir_or_name}.")
self.model = _ElmoModel(model_dir, vocab, cache_word_reprs=cache_word_reprs)

if layers=='mix':
self.layer_weights = nn.Parameter(torch.zeros(self.model.config['encoder']['n_layers']+1),
requires_grad=requires_grad)
self.gamma = nn.Parameter(torch.ones(1), requires_grad=requires_grad)
self._get_outputs = self._get_mixed_outputs
self._embed_size = self.model.config['encoder']['projection_dim'] * 2
else:
layers = list(map(int, layers.split(',')))
assert len(layers) > 0, "Must choose one output"
for layer in layers:
assert 0 <= layer <= 2, "Layer index should be in range [0, 2]."
self.layers = layers
self._get_outputs = self._get_layer_outputs
self._embed_size = len(self.layers) * self.model.config['encoder']['projection_dim'] * 2

self.requires_grad = requires_grad
self._embed_size = len(self.layers) * self.model.config['encoder']['projection_dim'] * 2

def _get_mixed_outputs(self, outputs):
# outputs: num_layers x batch_size x max_len x hidden_size
# return: batch_size x max_len x hidden_size
weights = F.softmax(self.layer_weights+1/len(outputs), dim=0).to(outputs)
outputs = torch.einsum('l,lbij->bij', weights, outputs)
return self.gamma.to(outputs)*outputs

def set_mix_weights_requires_grad(self, flag=True):
"""
当初始化ElmoEmbedding时layers被设置为mix时,可以通过调用该方法设置mix weights是否可训练。如果layers不是mix,调用
该方法没有用。
:param bool flag: 混合不同层表示的结果是否可以训练。
:return:
"""
if hasattr(self, 'layer_weights'):
self.layer_weights.requires_grad = flag
self.gamma.requires_grad = flag

def _get_layer_outputs(self, outputs):
if len(self.layers) == 1:
outputs = outputs[self.layers[0]]
else:
outputs = torch.cat(tuple([*outputs[self.layers]]), dim=-1)

return outputs

def forward(self, words: torch.LongTensor):
"""
@@ -463,19 +582,18 @@ class ElmoEmbedding(ContextualEmbedding):
:param words: batch_size x max_len
:return: torch.FloatTensor. batch_size x max_len x (512*len(self.layers))
"""
words = self.drop_word(words)
outputs = self._get_sent_reprs(words)
if outputs is not None:
return outputs
return self.dropout(outputs)
outputs = self.model(words)
if len(self.layers) == 1:
outputs = outputs[self.layers[0]]
else:
outputs = torch.cat([*outputs[self.layers]], dim=-1)

return outputs
outputs = self._get_outputs(outputs)
return self.dropout(outputs)

def _delete_model_weights(self):
del self.layers, self.model
for name in ['layers', 'model', 'layer_weights', 'gamma']:
if hasattr(self, name):
delattr(self, name)

@property
def requires_grad(self):
@@ -516,13 +634,16 @@ class BertEmbedding(ContextualEmbedding):
:param str layers:最终结果中的表示。以','隔开层数,可以以负数去索引倒数几层
:param str pool_method: 因为在bert中,每个word会被表示为多个word pieces, 当获取一个word的表示的时候,怎样从它的word pieces
中计算得到它对应的表示。支持``last``, ``first``, ``avg``, ``max``。
:param float word_dropout: 以多大的概率将一个词替换为unk。这样既可以训练unk也是一定的regularize。
:param float dropout: 以多大的概率对embedding的表示进行Dropout。0.1即随机将10%的值置为0。
:param bool include_cls_sep: bool,在bert计算句子的表示的时候,需要在前面加上[CLS]和[SEP], 是否在结果中保留这两个内容。 这样
会使得word embedding的结果比输入的结果长两个token。在使用 :class::StackEmbedding 可能会遇到问题。
:param bool requires_grad: 是否需要gradient。
"""
def __init__(self, vocab: Vocabulary, model_dir_or_name: str='en-base-uncased', layers: str='-1',
pool_method: str='first', include_cls_sep: bool=False, requires_grad: bool=False):
super(BertEmbedding, self).__init__(vocab)
pool_method: str='first', word_dropout=0, dropout=0, requires_grad: bool=False,
include_cls_sep: bool=False):
super(BertEmbedding, self).__init__(vocab, word_dropout=word_dropout, dropout=dropout)

# 根据model_dir_or_name检查是否存在并下载
if model_dir_or_name.lower() in PRETRAINED_BERT_MODEL_DIR:
@@ -553,13 +674,14 @@ class BertEmbedding(ContextualEmbedding):
:param torch.LongTensor words: [batch_size, max_len]
:return: torch.FloatTensor. batch_size x max_len x (768*len(self.layers))
"""
words = self.drop_word(words)
outputs = self._get_sent_reprs(words)
if outputs is not None:
return outputs
return self.dropout(words)
outputs = self.model(words)
outputs = torch.cat([*outputs], dim=-1)

return outputs
return self.dropout(words)

@property
def requires_grad(self):
@@ -601,8 +723,8 @@ class CNNCharEmbedding(TokenEmbedding):
"""
别名::class:`fastNLP.modules.CNNCharEmbedding` :class:`fastNLP.modules.encoder.embedding.CNNCharEmbedding`

使用CNN生成character embedding。CNN的结果为, embed(x) -> Dropout(x) -> CNN(x) -> activation(x) -> pool
-> fc. 不同的kernel大小的fitler结果是concat起来的。
使用CNN生成character embedding。CNN的结果为, embed(x) -> Dropout(x) -> CNN(x) -> activation(x) -> pool -> fc -> Dropout.
不同的kernel大小的fitler结果是concat起来的。

Example::

@@ -612,23 +734,24 @@ class CNNCharEmbedding(TokenEmbedding):
:param vocab: 词表
:param embed_size: 该word embedding的大小,默认值为50.
:param char_emb_size: character的embed的大小。character是从vocab中生成的。默认值为50.
:param dropout: 以多大的概率drop
:param float word_dropout: 以多大的概率将一个词替换为unk。这样既可以训练unk也是一定的regularize。
:param float dropout: 以多大的概率drop
:param filter_nums: filter的数量. 长度需要和kernels一致。默认值为[40, 30, 20].
:param kernel_sizes: kernel的大小. 默认值为[5, 3, 1].
:param pool_method: character的表示在合成一个表示时所使用的pool方法,支持'avg', 'max'.
:param activation: CNN之后使用的激活方法,支持'relu', 'sigmoid', 'tanh' 或者自定义函数.
:param min_char_freq: character的最少出现次数。默认值为2.
"""
def __init__(self, vocab: Vocabulary, embed_size: int=50, char_emb_size: int=50, dropout:float=0.5,
filter_nums: List[int]=(40, 30, 20), kernel_sizes: List[int]=(5, 3, 1), pool_method: str='max',
activation='relu', min_char_freq: int=2):
super(CNNCharEmbedding, self).__init__(vocab)
def __init__(self, vocab: Vocabulary, embed_size: int=50, char_emb_size: int=50, word_dropout:float=0,
dropout:float=0.5, filter_nums: List[int]=(40, 30, 20), kernel_sizes: List[int]=(5, 3, 1),
pool_method: str='max', activation='relu', min_char_freq: int=2):
super(CNNCharEmbedding, self).__init__(vocab, word_dropout=word_dropout, dropout=dropout)

for kernel in kernel_sizes:
assert kernel % 2 == 1, "Only odd kernel is allowed."

assert pool_method in ('max', 'avg')
self.dropout = nn.Dropout(dropout, inplace=True)
self.dropout = nn.Dropout(dropout)
self.pool_method = pool_method
# activation function
if isinstance(activation, str):
@@ -678,6 +801,7 @@ class CNNCharEmbedding(TokenEmbedding):
:param words: [batch_size, max_len]
:return: [batch_size, max_len, embed_size]
"""
words = self.drop_word(words)
batch_size, max_len = words.size()
chars = self.words_to_chars_embedding[words] # batch_size x max_len x max_word_len
word_lengths = self.word_lengths[words] # batch_size x max_len
@@ -700,7 +824,7 @@ class CNNCharEmbedding(TokenEmbedding):
conv_chars = conv_chars.masked_fill(chars_masks.unsqueeze(-1), 0)
chars = torch.sum(conv_chars, dim=-2)/chars_masks.eq(0).sum(dim=-1, keepdim=True).float()
chars = self.fc(chars)
return chars
return self.dropout(chars)

@property
def requires_grad(self):
@@ -747,6 +871,7 @@ class LSTMCharEmbedding(TokenEmbedding):
:param vocab: 词表
:param embed_size: embedding的大小。默认值为50.
:param char_emb_size: character的embedding的大小。默认值为50.
:param float word_dropout: 以多大的概率将一个词替换为unk。这样既可以训练unk也是一定的regularize。
:param dropout: 以多大概率drop
:param hidden_size: LSTM的中间hidden的大小,如果为bidirectional的,hidden会除二,默认为50.
:param pool_method: 支持'max', 'avg'
@@ -754,15 +879,16 @@ class LSTMCharEmbedding(TokenEmbedding):
:param min_char_freq: character的最小出现次数。默认值为2.
:param bidirectional: 是否使用双向的LSTM进行encode。默认值为True。
"""
def __init__(self, vocab: Vocabulary, embed_size: int=50, char_emb_size: int=50, dropout:float=0.5, hidden_size=50,
pool_method: str='max', activation='relu', min_char_freq: int=2, bidirectional=True):
def __init__(self, vocab: Vocabulary, embed_size: int=50, char_emb_size: int=50, word_dropout:float=0,
dropout:float=0.5, hidden_size=50,pool_method: str='max', activation='relu', min_char_freq: int=2,
bidirectional=True):
super(LSTMCharEmbedding, self).__init__(vocab)

assert hidden_size % 2 == 0, "Only even kernel is allowed."

assert pool_method in ('max', 'avg')
self.pool_method = pool_method
self.dropout = nn.Dropout(dropout, inplace=True)
self.dropout = nn.Dropout(dropout)
# activation function
if isinstance(activation, str):
if activation.lower() == 'relu':
@@ -811,6 +937,7 @@ class LSTMCharEmbedding(TokenEmbedding):
:param words: [batch_size, max_len]
:return: [batch_size, max_len, embed_size]
"""
words = self.drop_word(words)
batch_size, max_len = words.size()
chars = self.words_to_chars_embedding[words] # batch_size x max_len x max_word_len
word_lengths = self.word_lengths[words] # batch_size x max_len
@@ -835,7 +962,7 @@ class LSTMCharEmbedding(TokenEmbedding):

chars = self.fc(chars)

return chars
return self.dropout(words)

@property
def requires_grad(self):
@@ -874,17 +1001,21 @@ class StackEmbedding(TokenEmbedding):


:param embeds: 一个由若干个TokenEmbedding组成的list,要求每一个TokenEmbedding的词表都保持一致
:param float word_dropout: 以多大的概率将一个词替换为unk。这样既可以训练unk也是一定的regularize。不同embedidng会在相同的位置
被设置为unknown。如果这里设置了dropout,则组成的embedding就不要再设置dropout了。
:param float dropout: 以多大的概率对embedding的表示进行Dropout。0.1即随机将10%的值置为0。

"""
def __init__(self, embeds: List[TokenEmbedding]):
def __init__(self, embeds: List[TokenEmbedding], word_dropout=0, dropout=0):
vocabs = []
for embed in embeds:
vocabs.append(embed.get_word_vocab())
if hasattr(embed, 'get_word_vocab'):
vocabs.append(embed.get_word_vocab())
_vocab = vocabs[0]
for vocab in vocabs[1:]:
assert vocab == _vocab, "All embeddings should use the same word vocabulary."
assert vocab == _vocab, "All embeddings in StackEmbedding should use the same word vocabulary."

super(StackEmbedding, self).__init__(_vocab)
super(StackEmbedding, self).__init__(_vocab, word_dropout=word_dropout, dropout=dropout)
assert isinstance(embeds, list)
for embed in embeds:
assert isinstance(embed, TokenEmbedding), "Only TokenEmbedding type is supported."
@@ -936,7 +1067,9 @@ class StackEmbedding(TokenEmbedding):
:return: 返回的shape和当前这个stack embedding中embedding的组成有关
"""
outputs = []
words = self.drop_word(words)
for embed in self.embeds:
outputs.append(embed(words))
return torch.cat(outputs, dim=-1)
outputs = self.dropout(torch.cat(outputs, dim=-1))
return outputs


+ 8
- 5
fastNLP/modules/encoder/star_transformer.py View File

@@ -35,11 +35,13 @@ class StarTransformer(nn.Module):
self.iters = num_layers
self.norm = nn.ModuleList([nn.LayerNorm(hidden_size) for _ in range(self.iters)])
self.emb_fc = nn.Conv2d(hidden_size, hidden_size, 1)
self.emb_drop = nn.Dropout(dropout)
self.ring_att = nn.ModuleList(
[_MSA1(hidden_size, nhead=num_head, head_dim=head_dim, dropout=dropout)
[_MSA1(hidden_size, nhead=num_head, head_dim=head_dim, dropout=0.0)
for _ in range(self.iters)])
self.star_att = nn.ModuleList(
[_MSA2(hidden_size, nhead=num_head, head_dim=head_dim, dropout=dropout)
[_MSA2(hidden_size, nhead=num_head, head_dim=head_dim, dropout=0.0)
for _ in range(self.iters)])
if max_len is not None:
@@ -66,18 +68,19 @@ class StarTransformer(nn.Module):
smask = torch.cat([torch.zeros(B, 1, ).byte().to(mask), mask], 1)
embs = data.permute(0, 2, 1)[:, :, :, None] # B H L 1
if self.pos_emb:
if self.pos_emb and False:
P = self.pos_emb(torch.arange(L, dtype=torch.long, device=embs.device) \
.view(1, L)).permute(0, 2, 1).contiguous()[:, :, :, None] # 1 H L 1
embs = embs + P
embs = norm_func(self.emb_drop, embs)
nodes = embs
relay = embs.mean(2, keepdim=True)
ex_mask = mask[:, None, :, None].expand(B, H, L, 1)
r_embs = embs.view(B, H, 1, L)
for i in range(self.iters):
ax = torch.cat([r_embs, relay.expand(B, H, 1, L)], 2)
nodes = nodes + F.leaky_relu(self.ring_att[i](norm_func(self.norm[i], nodes), ax=ax))
nodes = F.leaky_relu(self.ring_att[i](norm_func(self.norm[i], nodes), ax=ax))
#nodes = F.leaky_relu(self.ring_att[i](nodes, ax=ax))
relay = F.leaky_relu(self.star_att[i](relay, torch.cat([relay, nodes], 2), smask))
nodes = nodes.masked_fill_(ex_mask, 0)


+ 0
- 110
reproduction/CNN-sentence_classification/.gitignore View File

@@ -1,110 +0,0 @@
# Byte-compiled / optimized / DLL files
__pycache__/
*.py[cod]
*$py.class

# C extensions
*.so

# Distribution / packaging
.Python
build/
develop-eggs/
dist/
downloads/
eggs/
.eggs/
lib/
lib64/
parts/
sdist/
var/
wheels/
*.egg-info/
.installed.cfg
*.egg
MANIFEST

# PyInstaller
# Usually these files are written by a python script from a template
# before PyInstaller builds the exe, so as to inject date/other infos into it.
*.manifest
*.spec

# Installer logs
pip-log.txt
pip-delete-this-directory.txt

# Unit test / coverage reports
htmlcov/
.tox/
.coverage
.coverage.*
.cache
nosetests.xml
coverage.xml
*.cover
.hypothesis/
.pytest_cache/

# Translations
*.mo
*.pot

# Django stuff:
*.log
local_settings.py
db.sqlite3

# Flask stuff:
instance/
.webassets-cache

# Scrapy stuff:
.scrapy

# Sphinx documentation
docs/_build/

# PyBuilder
target/

# Jupyter Notebook
.ipynb_checkpoints

# pyenv
.python-version

# celery beat schedule file
celerybeat-schedule

# SageMath parsed files
*.sage.py

# Environments
.env
.venv
env/
venv/
ENV/
env.bak/
venv.bak/

# Spyder project settings
.spyderproject
.spyproject

# Rope project settings
.ropeproject

# mkdocs documentation
/site

# mypy
.mypy_cache

#custom
GoogleNews-vectors-negative300.bin/
GoogleNews-vectors-negative300.bin.gz
models/
*.swp

+ 0
- 77
reproduction/CNN-sentence_classification/README.md View File

@@ -1,77 +0,0 @@
## Introduction
This is the implementation of [Convolutional Neural Networks for Sentence Classification](https://arxiv.org/abs/1408.5882) paper in PyTorch.
* MRDataset, non-static-model(word2vec rained by Mikolov etal. (2013) on 100 billion words of Google News)
* It can be run in both CPU and GPU
* The best accuracy is 82.61%, which is better than 81.5% in the paper
(by Jingyuan Liu @Fudan University; Email:(fdjingyuan@outlook.com) Welcome to discussion!)

## Requirement
* python 3.6
* pytorch > 0.1
* numpy
* gensim

## Run
STEP 1
install packages like gensim (other needed pakages is the same)
```
pip install gensim
```

STEP 2
install MRdataset and word2vec resources
* MRdataset: you can download the dataset in (https://www.cs.cornell.edu/people/pabo/movie-review-data/rt-polaritydata.tar.gz)
* word2vec: you can download the file in (https://drive.google.com/file/d/0B7XkCwpI5KDYNlNUTTlSS21pQmM/edit)

Since this file is more than 1.5G, I did not display in folders. If you download the file, please remember modify the path in Function def word_embeddings(path = './GoogleNews-vectors-negative300.bin/'):


STEP 3
train the model
```
python train.py
```
you will get the information printed in the screen, like
```
Epoch [1/20], Iter [100/192] Loss: 0.7008
Test Accuracy: 71.869159 %
Epoch [2/20], Iter [100/192] Loss: 0.5957
Test Accuracy: 75.700935 %
Epoch [3/20], Iter [100/192] Loss: 0.4934
Test Accuracy: 78.130841 %

......
Epoch [20/20], Iter [100/192] Loss: 0.0364
Test Accuracy: 81.495327 %
Best Accuracy: 82.616822 %
Best Model: models/cnn.pkl
```

## Hyperparameters
According to the paper and experiment, I set:

|Epoch|Kernel Size|dropout|learning rate|batch size|
|---|---|---|---|---|
|20|\(h,300,100\)|0.5|0.0001|50|

h = [3,4,5]
If the accuracy is not improved, the learning rate will \*0.8.

## Result
I just tried one dataset : MR. (Other 6 dataset in paper SST-1, SST-2, TREC, CR, MPQA)
There are four models in paper: CNN-rand, CNN-static, CNN-non-static, CNN-multichannel.
I have tried CNN-non-static:A model with pre-trained vectors from word2vec.
All words—including the unknown ones that are randomly initialized and the pretrained vectors are fine-tuned for each task
(which has almost the best performance and the most difficut to implement among the four models)

|Dataset|Class Size|Best Result|Kim's Paper Result|
|---|---|---|---|
|MR|2|82.617%(CNN-non-static)|81.5%(CNN-nonstatic)|



## Reference
* [Convolutional Neural Networks for Sentence Classification](https://arxiv.org/abs/1408.5882)
* https://github.com/Shawn1993/cnn-text-classification-pytorch
* https://github.com/junwang4/CNN-sentence-classification-pytorch-2017/blob/master/utils.py


+ 0
- 136
reproduction/CNN-sentence_classification/dataset.py View File

@@ -1,136 +0,0 @@
import codecs
import random
import re

import gensim
import numpy as np
from gensim import corpora
from torch.utils.data import Dataset


def clean_str(string):
"""
Tokenization/string cleaning for all datasets except for SST.
Original taken from https://github.com/yoonkim/CNN_sentence/blob/master/process_data.py
"""
string = re.sub(r"[^A-Za-z0-9(),!?\'\`]", " ", string)
string = re.sub(r"\'s", " \'s", string)
string = re.sub(r"\'ve", " \'ve", string)
string = re.sub(r"n\'t", " n\'t", string)
string = re.sub(r"\'re", " \'re", string)
string = re.sub(r"\'d", " \'d", string)
string = re.sub(r"\'ll", " \'ll", string)
string = re.sub(r",", " , ", string)
string = re.sub(r"!", " ! ", string)
string = re.sub(r"\(", " \( ", string)
string = re.sub(r"\)", " \) ", string)
string = re.sub(r"\?", " \? ", string)
string = re.sub(r"\s{2,}", " ", string)
return string.strip()


def pad_sentences(sentence, padding_word=" <PAD/>"):
sequence_length = 64
sent = sentence.split()
padded_sentence = sentence + padding_word * (sequence_length - len(sent))
return padded_sentence


# data loader
class MRDataset(Dataset):
def __init__(self):

# load positive and negative sentenses from files
with codecs.open("./rt-polaritydata/rt-polarity.pos", encoding='ISO-8859-1') as f:
positive_examples = list(f.readlines())
with codecs.open("./rt-polaritydata/rt-polarity.neg", encoding='ISO-8859-1') as f:
negative_examples = list(f.readlines())
# s.strip: clear "\n"; clear_str; pad
positive_examples = [pad_sentences(clean_str(s.strip())) for s in positive_examples]
negative_examples = [pad_sentences(clean_str(s.strip())) for s in negative_examples]
self.examples = positive_examples + negative_examples
self.sentences_texts = [sample.split() for sample in self.examples]

# word dictionary
dictionary = corpora.Dictionary(self.sentences_texts)
self.word2id_dict = dictionary.token2id # transform to dict, like {"human":0, "a":1,...}

# set lables: postive is 1; negative is 0
positive_labels = [1 for _ in positive_examples]
negative_labels = [0 for _ in negative_examples]
self.lables = positive_labels + negative_labels
examples_lables = list(zip(self.examples, self.lables))
random.shuffle(examples_lables)
self.MRDataset_frame = examples_lables

# transform word to id
self.MRDataset_wordid = \
[(
np.array([self.word2id_dict[word] for word in sent[0].split()], dtype=np.int64),
sent[1]
) for sent in self.MRDataset_frame]

def word_embeddings(self, path="./GoogleNews-vectors-negative300.bin/GoogleNews-vectors-negative300.bin"):
# establish from google
model = gensim.models.KeyedVectors.load_word2vec_format(path, binary=True)

print('Please wait ... (it could take a while to load the file : {})'.format(path))
word_dict = self.word2id_dict
embedding_weights = np.random.uniform(-0.25, 0.25, (len(self.word2id_dict), 300))

for word in word_dict:
word_id = word_dict[word]
if word in model.wv.vocab:
embedding_weights[word_id, :] = model[word]
return embedding_weights

def __len__(self):
return len(self.MRDataset_frame)

def __getitem__(self, idx):

sample = self.MRDataset_wordid[idx]
return sample

def getsent(self, idx):

sample = self.MRDataset_wordid[idx][0]
return sample

def getlabel(self, idx):

label = self.MRDataset_wordid[idx][1]
return label

def word2id(self):

return self.word2id_dict

def id2word(self):

id2word_dict = dict([val, key] for key, val in self.word2id_dict.items())
return id2word_dict


class train_set(Dataset):

def __init__(self, samples):
self.train_frame = samples

def __len__(self):
return len(self.train_frame)

def __getitem__(self, idx):
return self.train_frame[idx]


class test_set(Dataset):

def __init__(self, samples):
self.test_frame = samples

def __len__(self):
return len(self.test_frame)

def __getitem__(self, idx):
return self.test_frame[idx]

+ 0
- 42
reproduction/CNN-sentence_classification/model.py View File

@@ -1,42 +0,0 @@
import torch
import torch.nn as nn
import torch.nn.functional as F


class CNN_text(nn.Module):
def __init__(self, kernel_h=[3, 4, 5], kernel_num=100, embed_num=1000, embed_dim=300, num_classes=2, dropout=0.5,
L2_constrain=3,
pretrained_embeddings=None):
super(CNN_text, self).__init__()

self.embedding = nn.Embedding(embed_num, embed_dim)
self.dropout = nn.Dropout(dropout)
if pretrained_embeddings is not None:
self.embedding.weight.data.copy_(torch.from_numpy(pretrained_embeddings))

# the network structure
# Conv2d: input- N,C,H,W output- (50,100,62,1)
self.conv1 = nn.ModuleList([nn.Conv2d(1, kernel_num, (K, embed_dim)) for K in kernel_h])
self.fc1 = nn.Linear(len(kernel_h) * kernel_num, num_classes)

def max_pooling(self, x):
x = F.relu(self.conv1(x)).squeeze(3) # N,C,L - (50,100,62)
x = F.max_pool1d(x, x.size(2)).squeeze(2)
# x.size(2)=62 squeeze: (50,100,1) -> (50,100)
return x

def forward(self, x):
x = self.embedding(x) # output: (N,H,W) = (50,64,300)
x = x.unsqueeze(1) # (N,C,H,W)
x = [F.relu(conv(x)).squeeze(3) for conv in self.conv1] # [N, C, H(50,100,62),(50,100,61),(50,100,60)]
x = [F.max_pool1d(i, i.size(2)).squeeze(2) for i in x] # [N,C(50,100),(50,100),(50,100)]
x = torch.cat(x, 1)
x = self.dropout(x)
x = self.fc1(x)
return x


if __name__ == '__main__':
model = CNN_text(kernel_h=[1, 2, 3, 4], embed_num=3, embed_dim=2)
x = torch.LongTensor([[1, 2, 1, 2, 0]])
print(model(x))

+ 0
- 92
reproduction/CNN-sentence_classification/train.py View File

@@ -1,92 +0,0 @@
import os

import torch
import torch.nn as nn
from torch.autograd import Variable

from . import dataset as dst
from .model import CNN_text

# Hyper Parameters
batch_size = 50
learning_rate = 0.0001
num_epochs = 20
cuda = True

# split Dataset
dataset = dst.MRDataset()
length = len(dataset)

train_dataset = dataset[:int(0.9 * length)]
test_dataset = dataset[int(0.9 * length):]

train_dataset = dst.train_set(train_dataset)
test_dataset = dst.test_set(test_dataset)

# Data Loader
train_loader = torch.utils.data.DataLoader(dataset=train_dataset,
batch_size=batch_size,
shuffle=True)

test_loader = torch.utils.data.DataLoader(dataset=test_dataset,
batch_size=batch_size,
shuffle=False)

# cnn

cnn = CNN_text(embed_num=len(dataset.word2id()), pretrained_embeddings=dataset.word_embeddings())
if cuda:
cnn.cuda()

# Loss and Optimizer
criterion = nn.CrossEntropyLoss()
optimizer = torch.optim.Adam(cnn.parameters(), lr=learning_rate)

# train and test
best_acc = None

for epoch in range(num_epochs):
# Train the Model
cnn.train()
for i, (sents, labels) in enumerate(train_loader):
sents = Variable(sents)
labels = Variable(labels)
if cuda:
sents = sents.cuda()
labels = labels.cuda()
optimizer.zero_grad()
outputs = cnn(sents)
loss = criterion(outputs, labels)
loss.backward()
optimizer.step()

if (i + 1) % 100 == 0:
print('Epoch [%d/%d], Iter [%d/%d] Loss: %.4f'
% (epoch + 1, num_epochs, i + 1, len(train_dataset) // batch_size, loss.data[0]))

# Test the Model
cnn.eval()
correct = 0
total = 0
for sents, labels in test_loader:
sents = Variable(sents)
if cuda:
sents = sents.cuda()
labels = labels.cuda()
outputs = cnn(sents)
_, predicted = torch.max(outputs.data, 1)
total += labels.size(0)
correct += (predicted == labels).sum()
acc = 100. * correct / total
print('Test Accuracy: %f %%' % (acc))

if best_acc is None or acc > best_acc:
best_acc = acc
if os.path.exists("models") is False:
os.makedirs("models")
torch.save(cnn.state_dict(), 'models/cnn.pkl')
else:
learning_rate = learning_rate * 0.8

print("Best Accuracy: %f %%" % best_acc)
print("Best Model: models/cnn.pkl")

+ 0
- 21
reproduction/Char-aware_NLM/LICENSE View File

@@ -1,21 +0,0 @@
MIT License

Copyright (c) 2017

Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:

The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.

THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.

+ 0
- 40
reproduction/Char-aware_NLM/README.md View File

@@ -1,40 +0,0 @@

# PyTorch-Character-Aware-Neural-Language-Model

This is the PyTorch implementation of character-aware neural language model proposed in this [paper](https://arxiv.org/abs/1508.06615) by Yoon Kim.

## Requiredments
The code is run and tested with **Python 3.5.2** and **PyTorch 0.3.1**.

## HyperParameters
| HyperParam | value |
| ------ | :-------|
| LSTM batch size | 20 |
| LSTM sequence length | 35 |
| LSTM hidden units | 300 |
| epochs | 35 |
| initial learning rate | 1.0 |
| character embedding dimension | 15 |

## Demo
Train the model with split train/valid/test data.

`python train.py`

The trained model will saved in `cache/net.pkl`.
Test the model.

`python test.py`

Best result on test set:
PPl=127.2163
cross entropy loss=4.8459

## Acknowledgement
This implementation borrowed ideas from

https://github.com/jarfo/kchar

https://github.com/cronos123/Character-Aware-Neural-Language-Models



+ 0
- 9
reproduction/Char-aware_NLM/main.py View File

@@ -1,9 +0,0 @@
PICKLE = "./save/"


def train():
pass


if __name__ == "__main__":
train()

+ 0
- 145
reproduction/Char-aware_NLM/model.py View File

@@ -1,145 +0,0 @@
import torch
import torch.nn as nn
import torch.nn.functional as F


class Highway(nn.Module):
"""Highway network"""

def __init__(self, input_size):
super(Highway, self).__init__()
self.fc1 = nn.Linear(input_size, input_size, bias=True)
self.fc2 = nn.Linear(input_size, input_size, bias=True)

def forward(self, x):
t = F.sigmoid(self.fc1(x))
return torch.mul(t, F.relu(self.fc2(x))) + torch.mul(1 - t, x)


class charLM(nn.Module):
"""CNN + highway network + LSTM
# Input:
4D tensor with shape [batch_size, in_channel, height, width]
# Output:
2D Tensor with shape [batch_size, vocab_size]
# Arguments:
char_emb_dim: the size of each character's attention
word_emb_dim: the size of each word's attention
vocab_size: num of unique words
num_char: num of characters
use_gpu: True or False
"""

def __init__(self, char_emb_dim, word_emb_dim,
vocab_size, num_char, use_gpu):
super(charLM, self).__init__()
self.char_emb_dim = char_emb_dim
self.word_emb_dim = word_emb_dim
self.vocab_size = vocab_size

# char attention layer
self.char_embed = nn.Embedding(num_char, char_emb_dim)

# convolutions of filters with different sizes
self.convolutions = []

# list of tuples: (the number of filter, width)
self.filter_num_width = [(25, 1), (50, 2), (75, 3), (100, 4), (125, 5), (150, 6)]

for out_channel, filter_width in self.filter_num_width:
self.convolutions.append(
nn.Conv2d(
1, # in_channel
out_channel, # out_channel
kernel_size=(char_emb_dim, filter_width), # (height, width)
bias=True
)
)

self.highway_input_dim = sum([x for x, y in self.filter_num_width])

self.batch_norm = nn.BatchNorm1d(self.highway_input_dim, affine=False)

# highway net
self.highway1 = Highway(self.highway_input_dim)
self.highway2 = Highway(self.highway_input_dim)

# LSTM
self.lstm_num_layers = 2

self.lstm = nn.LSTM(input_size=self.highway_input_dim,
hidden_size=self.word_emb_dim,
num_layers=self.lstm_num_layers,
bias=True,
dropout=0.5,
batch_first=True)

# output layer
self.dropout = nn.Dropout(p=0.5)
self.linear = nn.Linear(self.word_emb_dim, self.vocab_size)

if use_gpu is True:
for x in range(len(self.convolutions)):
self.convolutions[x] = self.convolutions[x].cuda()
self.highway1 = self.highway1.cuda()
self.highway2 = self.highway2.cuda()
self.lstm = self.lstm.cuda()
self.dropout = self.dropout.cuda()
self.char_embed = self.char_embed.cuda()
self.linear = self.linear.cuda()
self.batch_norm = self.batch_norm.cuda()

def forward(self, x, hidden):
# Input: Variable of Tensor with shape [num_seq, seq_len, max_word_len+2]
# Return: Variable of Tensor with shape [num_words, len(word_dict)]
lstm_batch_size = x.size()[0]
lstm_seq_len = x.size()[1]

x = x.contiguous().view(-1, x.size()[2])
# [num_seq*seq_len, max_word_len+2]

x = self.char_embed(x)
# [num_seq*seq_len, max_word_len+2, char_emb_dim]

x = torch.transpose(x.view(x.size()[0], 1, x.size()[1], -1), 2, 3)
# [num_seq*seq_len, 1, max_word_len+2, char_emb_dim]

x = self.conv_layers(x)
# [num_seq*seq_len, total_num_filters]

x = self.batch_norm(x)
# [num_seq*seq_len, total_num_filters]

x = self.highway1(x)
x = self.highway2(x)
# [num_seq*seq_len, total_num_filters]

x = x.contiguous().view(lstm_batch_size, lstm_seq_len, -1)
# [num_seq, seq_len, total_num_filters]

x, hidden = self.lstm(x, hidden)
# [seq_len, num_seq, hidden_size]

x = self.dropout(x)
# [seq_len, num_seq, hidden_size]

x = x.contiguous().view(lstm_batch_size * lstm_seq_len, -1)
# [num_seq*seq_len, hidden_size]

x = self.linear(x)
# [num_seq*seq_len, vocab_size]
return x, hidden

def conv_layers(self, x):
chosen_list = list()
for conv in self.convolutions:
feature_map = F.tanh(conv(x))
# (batch_size, out_channel, 1, max_word_len-width+1)
chosen = torch.max(feature_map, 3)[0]
# (batch_size, out_channel, 1)
chosen = chosen.squeeze()
# (batch_size, out_channel)
chosen_list.append(chosen)

# (batch_size, total_num_filers)
return torch.cat(chosen_list, 1)

+ 0
- 117
reproduction/Char-aware_NLM/test.py View File

@@ -1,117 +0,0 @@
import os
from collections import namedtuple

import numpy as np
import torch
import torch.nn as nn
from torch.autograd import Variable
from utilities import *


def to_var(x):
if torch.cuda.is_available():
x = x.cuda()
return Variable(x)


def test(net, data, opt):
net.eval()

test_input = torch.from_numpy(data.test_input)
test_label = torch.from_numpy(data.test_label)

num_seq = test_input.size()[0] // opt.lstm_seq_len
test_input = test_input[:num_seq * opt.lstm_seq_len, :]
# [num_seq, seq_len, max_word_len+2]
test_input = test_input.view(-1, opt.lstm_seq_len, opt.max_word_len + 2)

criterion = nn.CrossEntropyLoss()

loss_list = []
num_hits = 0
total = 0
iterations = test_input.size()[0] // opt.lstm_batch_size
test_generator = batch_generator(test_input, opt.lstm_batch_size)
label_generator = batch_generator(test_label, opt.lstm_batch_size * opt.lstm_seq_len)

hidden = (to_var(torch.zeros(2, opt.lstm_batch_size, opt.word_embed_dim)),
to_var(torch.zeros(2, opt.lstm_batch_size, opt.word_embed_dim)))

add_loss = 0.0
for t in range(iterations):
batch_input = test_generator.__next__()
batch_label = label_generator.__next__()

net.zero_grad()
hidden = [state.detach() for state in hidden]
test_output, hidden = net(to_var(batch_input), hidden)

test_loss = criterion(test_output, to_var(batch_label)).data
loss_list.append(test_loss)
add_loss += test_loss

print("Test Loss={0:.4f}".format(float(add_loss) / iterations))
print("Test PPL={0:.4f}".format(float(np.exp(add_loss / iterations))))


#############################################################

if __name__ == "__main__":

word_embed_dim = 300
char_embedding_dim = 15

if os.path.exists("cache/prep.pt") is False:
print("Cannot find prep.pt")

objetcs = torch.load("cache/prep.pt")

word_dict = objetcs["word_dict"]
char_dict = objetcs["char_dict"]
reverse_word_dict = objetcs["reverse_word_dict"]
max_word_len = objetcs["max_word_len"]
num_words = len(word_dict)

print("word/char dictionary built. Start making inputs.")

if os.path.exists("cache/data_sets.pt") is False:

test_text = read_data("./test.txt")
test_set = np.array(text2vec(test_text, char_dict, max_word_len))

# Labels are next-word index in word_dict with the same length as inputs
test_label = np.array([word_dict[w] for w in test_text[1:]] + [word_dict[test_text[-1]]])

category = {"test": test_set, "tlabel": test_label}
torch.save(category, "cache/data_sets.pt")
else:
data_sets = torch.load("cache/data_sets.pt")
test_set = data_sets["test"]
test_label = data_sets["tlabel"]
train_set = data_sets["tdata"]
train_label = data_sets["trlabel"]

DataTuple = namedtuple("DataTuple", "test_input test_label train_input train_label ")
data = DataTuple(test_input=test_set,
test_label=test_label, train_label=train_label, train_input=train_set)

print("Loaded data sets. Start building network.")

USE_GPU = True
cnn_batch_size = 700
lstm_seq_len = 35
lstm_batch_size = 20

net = torch.load("cache/net.pkl")

Options = namedtuple("Options", ["cnn_batch_size", "lstm_seq_len",
"max_word_len", "lstm_batch_size", "word_embed_dim"])
opt = Options(cnn_batch_size=lstm_seq_len * lstm_batch_size,
lstm_seq_len=lstm_seq_len,
max_word_len=max_word_len,
lstm_batch_size=lstm_batch_size,
word_embed_dim=word_embed_dim)

print("Network built. Start testing.")

test(net, data, opt)

+ 0
- 320
reproduction/Char-aware_NLM/test.txt View File

@@ -1,320 +0,0 @@
no it was n't black monday
but while the new york stock exchange did n't fall apart friday as the dow jones industrial average plunged N points most of it in the final hour it barely managed to stay this side of chaos
some circuit breakers installed after the october N crash failed their first test traders say unable to cool the selling panic in both stocks and futures
the N stock specialist firms on the big board floor the buyers and sellers of last resort who were criticized after the N crash once again could n't handle the selling pressure
big investment banks refused to step up to the plate to support the beleaguered floor traders by buying big blocks of stock traders say
heavy selling of standard & poor 's 500-stock index futures in chicago <unk> beat stocks downward
seven big board stocks ual amr bankamerica walt disney capital cities\/abc philip morris and pacific telesis group stopped trading and never resumed
the <unk> has already begun
the equity market was <unk>
once again the specialists were not able to handle the imbalances on the floor of the new york stock exchange said christopher <unk> senior vice president at <unk> securities corp
<unk> james <unk> chairman of specialists henderson brothers inc. it is easy to say the specialist is n't doing his job
when the dollar is in a <unk> even central banks ca n't stop it
speculators are calling for a degree of liquidity that is not there in the market
many money managers and some traders had already left their offices early friday afternoon on a warm autumn day because the stock market was so quiet
then in a <unk> plunge the dow jones industrials in barely an hour surrendered about a third of their gains this year <unk> up a 190.58-point or N N loss on the day in <unk> trading volume
<unk> trading accelerated to N million shares a record for the big board
at the end of the day N million shares were traded
the dow jones industrials closed at N
the dow 's decline was second in point terms only to the <unk> black monday crash that occurred oct. N N
in percentage terms however the dow 's dive was the <unk> ever and the sharpest since the market fell N or N N a week after black monday
the dow fell N N on black monday
shares of ual the parent of united airlines were extremely active all day friday reacting to news and rumors about the proposed $ N billion buy-out of the airline by an <unk> group
wall street 's takeover-stock speculators or risk arbitragers had placed unusually large bets that a takeover would succeed and ual stock would rise
at N p.m. edt came the <unk> news the big board was <unk> trading in ual pending news
on the exchange floor as soon as ual stopped trading we <unk> for a panic said one top floor trader
several traders could be seen shaking their heads when the news <unk>
for weeks the market had been nervous about takeovers after campeau corp. 's cash crunch spurred concern about the prospects for future highly leveraged takeovers
and N minutes after the ual trading halt came news that the ual group could n't get financing for its bid
at this point the dow was down about N points
the market <unk>
arbitragers could n't dump their ual stock but they rid themselves of nearly every rumor stock they had
for example their selling caused trading halts to be declared in usair group which closed down N N to N N delta air lines which fell N N to N N and <unk> industries which sank N to N N
these stocks eventually reopened
but as panic spread speculators began to sell blue-chip stocks such as philip morris and international business machines to offset their losses
when trading was halted in philip morris the stock was trading at N down N N while ibm closed N N lower at N
selling <unk> because of waves of automatic stop-loss orders which are triggered by computer when prices fall to certain levels
most of the stock selling pressure came from wall street professionals including computer-guided program traders
traders said most of their major institutional investors on the other hand sat tight
now at N one of the market 's post-crash reforms took hold as the s&p N futures contract had plunged N points equivalent to around a <unk> drop in the dow industrials
under an agreement signed by the big board and the chicago mercantile exchange trading was temporarily halted in chicago
after the trading halt in the s&p N pit in chicago waves of selling continued to hit stocks themselves on the big board and specialists continued to <unk> prices down
as a result the link between the futures and stock markets <unk> apart
without the <unk> of stock-index futures the barometer of where traders think the overall stock market is headed many traders were afraid to trust stock prices quoted on the big board
the futures halt was even <unk> by big board floor traders
it <unk> things up said one major specialist
this confusion effectively halted one form of program trading stock index arbitrage that closely links the futures and stock markets and has been blamed by some for the market 's big swings
in a stock-index arbitrage sell program traders buy or sell big baskets of stocks and offset the trade in futures to lock in a price difference
when the airline information came through it <unk> every model we had for the marketplace said a managing director at one of the largest program-trading firms
we did n't even get a chance to do the programs we wanted to do
but stocks kept falling
the dow industrials were down N points at N p.m. before the <unk> halt
at N p.m. at the end of the cooling off period the average was down N points
meanwhile during the the s&p trading halt s&p futures sell orders began <unk> up while stocks in new york kept falling sharply
big board chairman john j. phelan said yesterday the circuit breaker worked well <unk>
i just think it 's <unk> at this point to get into a debate if index arbitrage would have helped or hurt things
under another post-crash system big board president richard <unk> mr. phelan was flying to <unk> as the market was falling was talking on an <unk> hot line to the other exchanges the securities and exchange commission and the federal reserve board
he <unk> out at a high-tech <unk> center on the floor of the big board where he could watch <unk> on prices and pending stock orders
at about N p.m. edt s&p futures resumed trading and for a brief time the futures and stock markets started to come back in line
buyers stepped in to the futures pit
but the <unk> of s&p futures sell orders weighed on the market and the link with stocks began to fray again
at about N the s&p market <unk> to still another limit of N points down and trading was locked again
futures traders say the s&p was <unk> that the dow could fall as much as N points
during this time small investors began ringing their brokers wondering whether another crash had begun
at prudential-bache securities inc. which is trying to cater to small investors some <unk> brokers thought this would be the final <unk>
that 's when george l. ball chairman of the prudential insurance co. of america unit took to the internal <unk> system to declare that the plunge was only mechanical
i have a <unk> that this particular decline today is something more <unk> about less
it would be my <unk> to advise clients not to sell to look for an opportunity to buy mr. ball told the brokers
at merrill lynch & co. the nation 's biggest brokerage firm a news release was prepared <unk> merrill lynch comments on market drop
the release cautioned that there are significant differences between the current environment and that of october N and that there are still attractive investment opportunities in the stock market
however jeffrey b. lane president of shearson lehman hutton inc. said that friday 's plunge is going to set back relations with customers because it <unk> the concern of volatility
and i think a lot of people will <unk> on program trading
it 's going to bring the debate right back to the <unk>
as the dow average ground to its final N loss friday the s&p pit stayed locked at its <unk> trading limit
jeffrey <unk> of program trader <unk> investment group said N s&p contracts were for sale on the close the equivalent of $ N million in stock
but there were no buyers
while friday 's debacle involved mainly professional traders rather than investors it left the market vulnerable to continued selling this morning traders said
stock-index futures contracts settled at much lower prices than indexes of the stock market itself
at those levels stocks are set up to be <unk> by index arbitragers who lock in profits by buying futures when futures prices fall and simultaneously sell off stocks
but nobody knows at what level the futures and stocks will open today
the <unk> between the stock and futures markets friday will undoubtedly cause renewed debate about whether wall street is properly prepared for another crash situation
the big board 's mr. <unk> said our <unk> performance was good
but the exchange will look at the performance of all specialists in all stocks
obviously we 'll take a close look at any situation in which we think the <unk> obligations were n't met he said
see related story fed ready to <unk> big funds wsj oct. N N
but specialists complain privately that just as in the N crash the <unk> firms big investment banks that support the market by trading big blocks of stock stayed on the sidelines during friday 's <unk>
mr. phelan said it will take another day or two to analyze who was buying and selling friday
concerning your sept. N page-one article on prince charles and the <unk> it 's a few hundred years since england has been a kingdom
it 's now the united kingdom of great britain and northern ireland <unk> <unk> northern ireland scotland and oh yes england too
just thought you 'd like to know
george <unk>
ports of call inc. reached agreements to sell its remaining seven aircraft to buyers that were n't disclosed
the agreements bring to a total of nine the number of planes the travel company has sold this year as part of a restructuring
the company said a portion of the $ N million realized from the sales will be used to repay its bank debt and other obligations resulting from the currently suspended <unk> operations
earlier the company announced it would sell its aging fleet of boeing co. <unk> because of increasing maintenance costs
a consortium of private investors operating as <unk> funding co. said it has made a $ N million cash bid for most of l.j. hooker corp. 's real-estate and <unk> holdings
the $ N million bid includes the assumption of an estimated $ N million in secured liabilities on those properties according to those making the bid
the group is led by jay <unk> chief executive officer of <unk> investment corp. in <unk> and a. boyd simpson chief executive of the atlanta-based simpson organization inc
mr. <unk> 's company specializes in commercial real-estate investment and claims to have $ N billion in assets mr. simpson is a developer and a former senior executive of l.j. hooker
the assets are good but they require more money and management than can be provided in l.j. hooker 's current situation said mr. simpson in an interview
hooker 's philosophy was to build and sell
we want to build and hold
l.j. hooker based in atlanta is operating with protection from its creditors under chapter N of the u.s. bankruptcy code
its parent company hooker corp. of sydney australia is currently being managed by a court-appointed provisional <unk>
sanford <unk> chief executive of l.j. hooker said yesterday in a statement that he has not yet seen the bid but that he would review it and bring it to the attention of the creditors committee
the $ N million bid is estimated by mr. simpson as representing N N of the value of all hooker real-estate holdings in the u.s.
not included in the bid are <unk> teller or b. altman & co. l.j. hooker 's department-store chains
the offer covers the massive N <unk> forest fair mall in cincinnati the N <unk> <unk> fashion mall in columbia s.c. and the N <unk> <unk> town center mall in <unk> <unk>
the <unk> mall opened sept. N with a <unk> 's <unk> as its <unk> the columbia mall is expected to open nov. N
other hooker properties included are a <unk> office tower in <unk> atlanta expected to be completed next february vacant land sites in florida and ohio l.j. hooker international the commercial real-estate brokerage company that once did business as merrill lynch commercial real estate plus other shopping centers
the consortium was put together by <unk> <unk> the london-based investment banking company that is a subsidiary of security pacific corp
we do n't anticipate any problems in raising the funding for the bid said <unk> campbell the head of mergers and acquisitions at <unk> <unk> in an interview
<unk> <unk> is acting as the consortium 's investment bankers
according to people familiar with the consortium the bid was <unk> project <unk> a reference to the film <unk> in which a <unk> played by actress <unk> <unk> is saved from a <unk> businessman by a police officer named john <unk>
l.j. hooker was a small <unk> company based in atlanta in N when mr. simpson was hired to push it into commercial development
the company grew modestly until N when a majority position in hooker corp. was acquired by australian developer george <unk> currently hooker 's chairman
mr. <unk> <unk> to launch an ambitious but <unk> $ N billion acquisition binge that included <unk> teller and b. altman & co. as well as majority positions in merksamer jewelers a sacramento chain <unk> inc. the <unk> retailer and <unk> inc. the southeast department-store chain
eventually mr. simpson and mr. <unk> had a falling out over the direction of the company and mr. simpson said he resigned in N
since then hooker corp. has sold its interest in the <unk> chain back to <unk> 's management and is currently attempting to sell the b. altman & co. chain
in addition robert <unk> chief executive of the <unk> chain is seeking funds to buy out the hooker interest in his company
the merksamer chain is currently being offered for sale by first boston corp
reached in <unk> mr. <unk> said that he believes the various hooker <unk> can become profitable with new management
these are n't mature assets but they have the potential to be so said mr. <unk>
managed properly and with a long-term outlook these can become investment-grade quality properties
canadian <unk> production totaled N metric tons in the week ended oct. N up N N from the preceding week 's total of N tons statistics canada a federal agency said
the week 's total was up N N from N tons a year earlier
the <unk> total was N tons up N N from N tons a year earlier
the treasury plans to raise $ N million in new cash thursday by selling about $ N billion of 52-week bills and <unk> $ N billion of maturing bills
the bills will be dated oct. N and will mature oct. N N
they will be available in minimum denominations of $ N
bids must be received by N p.m. edt thursday at the treasury or at federal reserve banks or branches
as small investors <unk> their mutual funds with phone calls over the weekend big fund managers said they have a strong defense against any wave of withdrawals cash
unlike the weekend before black monday the funds were n't <unk> with heavy withdrawal requests
and many fund managers have built up cash levels and say they will be buying stock this week
at fidelity investments the nation 's largest fund company telephone volume was up sharply but it was still at just half the level of the weekend preceding black monday in N
the boston firm said <unk> redemptions were running at less than one-third the level two years ago
as of yesterday afternoon the redemptions represented less than N N of the total cash position of about $ N billion of fidelity 's stock funds
two years ago there were massive redemption levels over the weekend and a lot of fear around said c. bruce <unk> who runs fidelity investments ' $ N billion <unk> fund
this feels more like a <unk> deal
people are n't <unk>
the test may come today
friday 's stock market sell-off came too late for many investors to act
some shareholders have held off until today because any fund exchanges made after friday 's close would take place at today 's closing prices
stock fund redemptions during the N debacle did n't begin to <unk> until after the market opened on black monday
but fund managers say they 're ready
many have raised cash levels which act as a buffer against steep market declines
mario <unk> for instance holds cash positions well above N N in several of his funds
windsor fund 's john <unk> and mutual series ' michael price said they had raised their cash levels to more than N N and N N respectively this year
even peter lynch manager of fidelity 's $ N billion <unk> fund the nation 's largest stock fund built up cash to N N or $ N million
one reason is that after two years of monthly net redemptions the fund posted net inflows of money from investors in august and september
i 've let the money build up mr. lynch said who added that he has had trouble finding stocks he likes
not all funds have raised cash levels of course
as a group stock funds held N N of assets in cash as of august the latest figures available from the investment company institute
that was modestly higher than the N N and N N levels in august and september of N
also persistent redemptions would force some fund managers to dump stocks to raise cash
but a strong level of investor withdrawals is much more unlikely this time around fund managers said
a major reason is that investors already have sharply scaled back their purchases of stock funds since black monday
<unk> sales have rebounded in recent months but monthly net purchases are still running at less than half N levels
there 's not nearly as much <unk> said john <unk> chairman of vanguard group inc. a big valley forge pa. fund company
many fund managers argue that now 's the time to buy
vincent <unk> manager of the $ N billion wellington fund added to his positions in bristol-myers squibb woolworth and dun & bradstreet friday
and today he 'll be looking to buy drug stocks like eli lilly pfizer and american home products whose dividend yields have been bolstered by stock declines
fidelity 's mr. lynch for his part snapped up southern co. shares friday after the stock got <unk>
if the market drops further today he said he 'll be buying blue chips such as bristol-myers and kellogg
if they <unk> stocks like that he said it presents an opportunity that is the kind of thing you dream about
major mutual-fund groups said phone calls were <unk> at twice the normal weekend pace yesterday
but most investors were seeking share prices and other information
trading volume was only modestly higher than normal
still fund groups are n't taking any chances
they hope to avoid the <unk> phone lines and other <unk> that <unk> some fund investors in october N
fidelity on saturday opened its N <unk> investor centers across the country
the centers normally are closed through the weekend
in addition east coast centers will open at N edt this morning instead of the normal N
t. rowe price associates inc. increased its staff of phone representatives to handle investor requests
the <unk> group noted that some investors moved money from stock funds to money-market funds
but most investors seemed to be in an information mode rather than in a transaction mode said steven <unk> a vice president
and vanguard among other groups said it was adding more phone representatives today to help investors get through
in an unusual move several funds moved to calm investors with <unk> on their <unk> phone lines
we view friday 's market decline as offering us a buying opportunity as long-term investors a recording at <unk> & co. funds said over the weekend
the <unk> group had a similar recording for investors
several fund managers expect a rough market this morning before prices stabilize
some early selling is likely to stem from investors and portfolio managers who want to lock in this year 's fat profits
stock funds have averaged a staggering gain of N N through september according to lipper analytical services inc
<unk> <unk> who runs shearson lehman hutton inc. 's $ N million sector analysis portfolio predicts the market will open down at least N points on technical factors and some panic selling
but she expects prices to rebound soon and is telling investors she expects the stock market wo n't decline more than N N to N N from recent highs
this is not a major crash she said
nevertheless ms. <unk> said she was <unk> with phone calls over the weekend from nervous shareholders
half of them are really scared and want to sell she said but i 'm trying to talk them out of it
she added if they all were bullish i 'd really be upset
the backdrop to friday 's slide was <unk> different from that of the october N crash fund managers argue
two years ago unlike today the dollar was weak interest rates were rising and the market was very <unk> they say
from the investors ' standpoint institutions and individuals learned a painful lesson by selling at the lows on black monday said stephen boesel manager of the $ N million t. rowe price growth and income fund
this time i do n't think we 'll get a panic reaction
newport corp. said it expects to report <unk> earnings of between N cents and N cents a share somewhat below analysts ' estimates of N cents to N cents
the maker of scientific instruments and laser parts said orders fell below expectations in recent months
a spokesman added that sales in the current quarter will about equal the <unk> quarter 's figure when newport reported net income of $ N million or N cents a share on $ N million in sales
<unk> from the strike by N machinists union members against boeing co. reached air carriers friday as america west airlines announced it will postpone its new service out of houston because of delays in receiving aircraft from the seattle jet maker
peter <unk> vice president for planning at the phoenix ariz. carrier said in an interview that the work <unk> at boeing now entering its 13th day has caused some turmoil in our scheduling and that more than N passengers who were booked to fly out of houston on america west would now be put on other airlines
mr. <unk> said boeing told america west that the N it was supposed to get this thursday would n't be delivered until nov. N the day after the airline had been planning to <unk> service at houston with four daily flights including three <unk> to phoenix and one <unk> to las vegas
now those routes are n't expected to begin until jan
boeing is also supposed to send to america west another N <unk> aircraft as well as a N by year 's end
those too are almost certain to arrive late
at this point no other america west flights including its new service at san antonio texas newark n.j. and <unk> calif. have been affected by the delays in boeing deliveries
nevertheless the company 's reaction <unk> the <unk> effect that a huge manufacturer such as boeing can have on other parts of the economy
it also is sure to help the machinists put added pressure on the company
i just do n't feel that the company can really stand or would want a prolonged <unk> tom baker president of machinists ' district N said in an interview yesterday
i do n't think their customers would like it very much
america west though is a smaller airline and therefore more affected by the delayed delivery of a single plane than many of its competitors would be
i figure that american and united probably have such a hard time counting all the planes in their fleets they might not miss one at all mr. <unk> said
indeed a random check friday did n't seem to indicate that the strike was having much of an effect on other airline operations
southwest airlines has a boeing N set for delivery at the end of this month and expects to have the plane on time
it 's so close to completion boeing 's told us there wo n't be a problem said a southwest spokesman
a spokesman for amr corp. said boeing has assured american airlines it will deliver a N on time later this month
american is preparing to take delivery of another N in early december and N more next year and is n't anticipating any changes in that timetable
in seattle a boeing spokesman explained that the company has been in constant communication with all of its customers and that it was impossible to predict what further disruptions might be triggered by the strike
meanwhile supervisors and <unk> employees have been trying to finish some N aircraft mostly N and N jumbo jets at the company 's <unk> wash. plant that were all but completed before the <unk>
as of friday four had been delivered and a fifth plane a N was supposed to be <unk> out over the weekend to air china
no date has yet been set to get back to the bargaining table
we want to make sure they know what they want before they come back said doug hammond the federal mediator who has been in contact with both sides since the strike began
the investment community for one has been anticipating a <unk> resolution
though boeing 's stock price was battered along with the rest of the market friday it actually has risen over the last two weeks on the strength of new orders
the market has taken two views that the labor situation will get settled in the short term and that things look very <unk> for boeing in the long term said howard <unk> an analyst at <unk> j. lawrence inc
boeing 's shares fell $ N friday to close at $ N in composite trading on the new york stock exchange
but mr. baker said he thinks the earliest a pact could be struck would be the end of this month <unk> that the company and union may resume negotiations as early as this week
still he said it 's possible that the strike could last considerably longer
i would n't expect an immediate resolution to anything
last week boeing chairman frank <unk> sent striking workers a letter saying that to my knowledge boeing 's offer represents the best overall three-year contract of any major u.s. industrial firm in recent history
but mr. baker called the letter and the company 's offer of a N N wage increase over the life of the pact plus bonuses very weak
he added that the company <unk> the union 's resolve and the workers ' <unk> with being forced to work many hours overtime
in separate developments talks have broken off between machinists representatives at lockheed corp. and the <unk> calif. aerospace company
the union is continuing to work through its expired contract however
it had planned a strike vote for next sunday but that has been pushed back indefinitely
united auto workers local N which represents N workers at boeing 's helicopter unit in delaware county pa. said it agreed to extend its contract on a <unk> basis with a <unk> notification to cancel while it continues bargaining
the accord expired yesterday
and boeing on friday said it received an order from <unk> <unk> for four model N <unk> <unk> valued at a total of about $ N million
the planes long range versions of the <unk> <unk> will be delivered with <unk> & <unk> <unk> engines
<unk> & <unk> is a unit of united technologies inc
<unk> <unk> is based in amsterdam
a boeing spokeswoman said a delivery date for the planes is still being worked out for a variety of reasons but not because of the strike
<unk> <unk> contributed to this article
<unk> ltd. said its utilities arm is considering building new electric power plants some valued at more than one billion canadian dollars us$ N million in great britain and elsewhere
<unk> <unk> <unk> 's senior vice president finance said its <unk> canadian utilities ltd. unit is reviewing <unk> projects in eastern canada and conventional electric power generating plants elsewhere including britain where the british government plans to allow limited competition in electrical generation from private-sector suppliers as part of its privatization program
the projects are big
they can be c$ N billion plus mr. <unk> said
but we would n't go into them alone and canadian utilities ' equity stake would be small he said
<unk> we 'd like to be the operator of the project and a modest equity investor
our long suit is our proven ability to operate power plants he said
mr. <unk> would n't offer <unk> regarding <unk> 's proposed british project but he said it would compete for customers with two huge british power generating companies that would be formed under the country 's plan to <unk> its massive water and electric utilities
britain 's government plans to raise about # N billion $ N billion from the sale of most of its giant water and electric utilities beginning next month
the planned electric utility sale scheduled for next year is alone expected to raise # N billion making it the world 's largest public offering
under terms of the plan independent <unk> would be able to compete for N N of customers until N and for another N N between N and N
canadian utilities had N revenue of c$ N billion mainly from its natural gas and electric utility businesses in alberta where the company serves about N customers
there seems to be a move around the world to <unk> the generation of electricity mr. <unk> said and canadian utilities hopes to capitalize on it
this is a real thrust on our utility side he said adding that canadian utilities is also <unk> projects in <unk> countries though he would be specific
canadian utilities is n't alone in exploring power generation opportunities in britain in anticipation of the privatization program
we 're certainly looking at some power generating projects in england said bruce <unk> vice president corporate strategy and corporate planning with enron corp. houston a big natural gas producer and pipeline operator
mr. <unk> said enron is considering building <unk> power plants in the u.k. capable of producing about N <unk> of power at a cost of about $ N million to $ N million
pse inc. said it expects to report third earnings of $ N million to $ N million or N cents to N cents a share
in the year-ago quarter the designer and operator of <unk> and waste heat recovery plants had net income of $ N or four cents a share on revenue of about $ N million
the company said the improvement is related to additional <unk> facilities that have been put into operation
<unk> <unk> flights are $ N to paris and $ N to london
in a centennial journal article oct. N the fares were reversed
diamond <unk> offshore partners said it had discovered gas offshore louisiana
the well <unk> at a rate of N million cubic feet of gas a day through a N <unk> opening at <unk> between N and N feet
diamond <unk> is the operator with a N N interest in the well
diamond <unk> offshore 's stock rose N cents friday to close at $ N in new york stock exchange composite trading
<unk> & broad home corp. said it formed a $ N million limited partnership subsidiary to buy land in california suitable for residential development
the partnership <unk> & broad land development venture limited partnership is a N joint venture with a trust created by institutional clients of <unk> advisory corp. a unit of <unk> financial corp. a real estate advisory management and development company with offices in chicago and beverly hills calif
<unk> & broad a home building company declined to identify the institutional investors
the land to be purchased by the joint venture has n't yet received <unk> and other approvals required for development and part of <unk> & broad 's job will be to obtain such approvals
the partnership runs the risk that it may not get the approvals for development but in return it can buy land at wholesale rather than retail prices which can result in sizable savings said bruce <unk> president and chief executive officer of <unk> & broad
there are really very few companies that have adequate capital to buy properties in a raw state for cash
typically developers option property and then once they get the administrative approvals they buy it said mr. <unk> adding that he believes the joint venture is the first of its kind
we usually operate in that conservative manner
by setting up the joint venture <unk> & broad can take the more aggressive approach of buying raw land while avoiding the negative <unk> to its own balance sheet mr. <unk> said
the company is putting up only N N of the capital although it is responsible for providing management planning and processing services to the joint venture
this is one of the best ways to assure a pipeline of land to fuel our growth at a minimum risk to our company mr. <unk> said
when the price of plastics took off in N quantum chemical corp. went along for the ride
the timing of quantum 's chief executive officer john <unk> <unk> appeared to be nothing less than inspired because he had just increased quantum 's reliance on plastics
the company <unk> much of the chemical industry as annual profit grew <unk> in two years
mr. <unk> said of the boom it 's going to last a whole lot longer than anybody thinks
but now prices have <unk> and quantum 's profit is <unk>
some securities analysts are looking for no better than break-even results from the company for the third quarter compared with year-earlier profit of $ N million or $ N a share on sales of $ N million
the stock having lost nearly a quarter of its value since sept. N closed at $ N share down $ N in new york stock exchange composite trading friday
to a degree quantum represents the new times that have arrived for producers of the so-called commodity plastics that <unk> modern life
having just passed through one of the most profitable periods in their history these producers now see their prices eroding
pricing cycles to be sure are nothing new for plastics producers
and the financial decline of some looks steep only in comparison with the <unk> period that is just behind them
we were all wonderful heroes last year says an executive at one of quantum 's competitors
now we 're at the bottom of the <unk>
at quantum which is based in new york the trouble is magnified by the company 's heavy <unk> on plastics
once known as national <unk> & chemical corp. the company <unk> the wine and spirits business and <unk> more of its resources into plastics after mr. <unk> took the chief executive 's job in N
mr. <unk> N years old declined to be interviewed for this article but he has consistently argued that over the long haul across both the <unk> and the <unk> of the plastics market quantum will <unk> through its new direction
quantum 's lot is mostly tied to polyethylene <unk> used to make garbage bags milk <unk> <unk> toys and meat packaging among other items
in the u.s. polyethylene market quantum has claimed the largest share about N N
but its competitors including dow chemical co. union carbide corp. and several oil giants have much broader business interests and so are better <unk> against price swings
when the price of polyethylene moves a mere penny a pound quantum 's annual profit <unk> by about N cents a share provided no other <unk> are changing
in recent months the price of polyethylene even more than that of other commodity plastics has taken a dive
benchmark grades which still sold for as much as N cents a pound last spring have skidded to between N cents and N cents
meanwhile the price of <unk> the chemical building block of polyethylene has n't dropped nearly so fast
that <unk> <unk> quantum badly because its own plants cover only about half of its <unk> needs
by many accounts an early hint of a price rout in the making came at the start of this year
china which had been putting in huge orders for polyethylene abruptly halted them
<unk> that excess polyethylene would soon be <unk> around the world other buyers then bet that prices had peaked and so began to draw down inventories rather than order new product
kenneth mitchell director of dow 's polyethylene business says producers were surprised to learn how much inventories had swelled throughout the distribution chain as prices <unk> up
people were even <unk> bags he says
now producers hope prices have hit bottom
they recently announced increases of a few cents a pound to take effect in the next several weeks
no one knows however whether the new posted prices will stick once producers and customers start to <unk>
one <unk> is george <unk> a <unk> analyst at oppenheimer & co. and a bear on plastics stocks
noting others ' estimates of when price increases can be sustained he remarks some say october
some say november
i say N
he argues that efforts to firm up prices will be undermined by producers ' plans to expand production capacity
a quick turnaround is crucial to quantum because its cash requirements remain heavy
the company is trying to carry out a three-year $ N billion <unk> program started this year
at the same time its annual payments on long-term debt will more than double from a year ago to about $ N million largely because of debt taken on to pay a $ <unk> special dividend earlier this year
quantum described the payout at the time as a way for it to share the <unk> with its holders because its stock price was n't reflecting the huge profit increases
some analysts saw the payment as an effort also to <unk> takeover speculation
whether a cash crunch might eventually force the company to cut its quarterly dividend raised N N to N cents a share only a year ago has become a topic of intense speculation on wall street since mr. <unk> <unk> dividend questions in a sept. N meeting with analysts
some viewed his response that company directors review the dividend regularly as nothing more than the standard line from executives

+ 0
- 263
reproduction/Char-aware_NLM/train.py View File

@@ -1,263 +0,0 @@
import os
from collections import namedtuple

import numpy as np
import torch.optim as optim

from .model import charLM
from .test import test
from .utilities import *


def preprocess():
word_dict, char_dict = create_word_char_dict("charlm.txt", "train.txt", "test.txt")
num_words = len(word_dict)
num_char = len(char_dict)
char_dict["BOW"] = num_char + 1
char_dict["EOW"] = num_char + 2
char_dict["PAD"] = 0

# dict of (int, string)
reverse_word_dict = {value: key for key, value in word_dict.items()}
max_word_len = max([len(word) for word in word_dict])

objects = {
"word_dict": word_dict,
"char_dict": char_dict,
"reverse_word_dict": reverse_word_dict,
"max_word_len": max_word_len
}

torch.save(objects, "cache/prep.pt")
print("Preprocess done.")


def to_var(x):
if torch.cuda.is_available():
x = x.cuda()
return Variable(x)


def train(net, data, opt):
"""
:param net: the pytorch models
:param data: numpy array
:param opt: named tuple
1. random seed
2. define local input
3. training settting: learning rate, loss, etc
4. main loop epoch
5. batchify
6. validation
7. save models
"""
torch.manual_seed(1024)

train_input = torch.from_numpy(data.train_input)
train_label = torch.from_numpy(data.train_label)
valid_input = torch.from_numpy(data.valid_input)
valid_label = torch.from_numpy(data.valid_label)

# [num_seq, seq_len, max_word_len+2]
num_seq = train_input.size()[0] // opt.lstm_seq_len
train_input = train_input[:num_seq * opt.lstm_seq_len, :]
train_input = train_input.view(-1, opt.lstm_seq_len, opt.max_word_len + 2)

num_seq = valid_input.size()[0] // opt.lstm_seq_len
valid_input = valid_input[:num_seq * opt.lstm_seq_len, :]
valid_input = valid_input.view(-1, opt.lstm_seq_len, opt.max_word_len + 2)

num_epoch = opt.epochs
num_iter_per_epoch = train_input.size()[0] // opt.lstm_batch_size

learning_rate = opt.init_lr
old_PPL = 100000
best_PPL = 100000

# Log-SoftMax
criterion = nn.CrossEntropyLoss()

# word_emb_dim == hidden_size / num of hidden units
hidden = (to_var(torch.zeros(2, opt.lstm_batch_size, opt.word_embed_dim)),
to_var(torch.zeros(2, opt.lstm_batch_size, opt.word_embed_dim)))

for epoch in range(num_epoch):

################ Validation ####################
net.eval()
loss_batch = []
PPL_batch = []
iterations = valid_input.size()[0] // opt.lstm_batch_size

valid_generator = batch_generator(valid_input, opt.lstm_batch_size)
vlabel_generator = batch_generator(valid_label, opt.lstm_batch_size * opt.lstm_seq_len)

for t in range(iterations):
batch_input = valid_generator.__next__()
batch_label = vlabel_generator.__next__()

hidden = [state.detach() for state in hidden]
valid_output, hidden = net(to_var(batch_input), hidden)

length = valid_output.size()[0]

# [num_sample-1, len(word_dict)] vs [num_sample-1]
valid_loss = criterion(valid_output, to_var(batch_label))

PPL = torch.exp(valid_loss.data)

loss_batch.append(float(valid_loss))
PPL_batch.append(float(PPL))

PPL = np.mean(PPL_batch)
print("[epoch {}] valid PPL={}".format(epoch, PPL))
print("valid loss={}".format(np.mean(loss_batch)))
print("PPL decrease={}".format(float(old_PPL - PPL)))

# Preserve the best models
if best_PPL > PPL:
best_PPL = PPL
torch.save(net.state_dict(), "cache/models.pt")
torch.save(net, "cache/net.pkl")

# Adjust the learning rate
if float(old_PPL - PPL) <= 1.0:
learning_rate /= 2
print("halved lr:{}".format(learning_rate))

old_PPL = PPL

##################################################
#################### Training ####################
net.train()
optimizer = optim.SGD(net.parameters(),
lr=learning_rate,
momentum=0.85)

# split the first dim
input_generator = batch_generator(train_input, opt.lstm_batch_size)
label_generator = batch_generator(train_label, opt.lstm_batch_size * opt.lstm_seq_len)

for t in range(num_iter_per_epoch):
batch_input = input_generator.__next__()
batch_label = label_generator.__next__()

# detach hidden state of LSTM from last batch
hidden = [state.detach() for state in hidden]

output, hidden = net(to_var(batch_input), hidden)
# [num_word, vocab_size]

loss = criterion(output, to_var(batch_label))

net.zero_grad()
loss.backward()
torch.nn.utils.clip_grad_norm(net.parameters(), 5, norm_type=2)
optimizer.step()

if (t + 1) % 100 == 0:
print("[epoch {} step {}] train loss={}, Perplexity={}".format(epoch + 1,
t + 1, float(loss.data),
float(np.exp(loss.data))))

torch.save(net.state_dict(), "cache/models.pt")
print("Training finished.")


################################################################

if __name__ == "__main__":

word_embed_dim = 300
char_embedding_dim = 15

if os.path.exists("cache/prep.pt") is False:
preprocess()

objetcs = torch.load("cache/prep.pt")

word_dict = objetcs["word_dict"]
char_dict = objetcs["char_dict"]
reverse_word_dict = objetcs["reverse_word_dict"]
max_word_len = objetcs["max_word_len"]
num_words = len(word_dict)

print("word/char dictionary built. Start making inputs.")

if os.path.exists("cache/data_sets.pt") is False:
train_text = read_data("./train.txt")
valid_text = read_data("./charlm.txt")
test_text = read_data("./test.txt")

train_set = np.array(text2vec(train_text, char_dict, max_word_len))
valid_set = np.array(text2vec(valid_text, char_dict, max_word_len))
test_set = np.array(text2vec(test_text, char_dict, max_word_len))

# Labels are next-word index in word_dict with the same length as inputs
train_label = np.array([word_dict[w] for w in train_text[1:]] + [word_dict[train_text[-1]]])
valid_label = np.array([word_dict[w] for w in valid_text[1:]] + [word_dict[valid_text[-1]]])
test_label = np.array([word_dict[w] for w in test_text[1:]] + [word_dict[test_text[-1]]])

category = {"tdata": train_set, "vdata": valid_set, "test": test_set,
"trlabel": train_label, "vlabel": valid_label, "tlabel": test_label}
torch.save(category, "cache/data_sets.pt")
else:
data_sets = torch.load("cache/data_sets.pt")
train_set = data_sets["tdata"]
valid_set = data_sets["vdata"]
test_set = data_sets["test"]
train_label = data_sets["trlabel"]
valid_label = data_sets["vlabel"]
test_label = data_sets["tlabel"]

DataTuple = namedtuple("DataTuple",
"train_input train_label valid_input valid_label test_input test_label")
data = DataTuple(train_input=train_set,
train_label=train_label,
valid_input=valid_set,
valid_label=valid_label,
test_input=test_set,
test_label=test_label)

print("Loaded data sets. Start building network.")

USE_GPU = True
cnn_batch_size = 700
lstm_seq_len = 35
lstm_batch_size = 20
# cnn_batch_size == lstm_seq_len * lstm_batch_size

net = charLM(char_embedding_dim,
word_embed_dim,
num_words,
len(char_dict),
use_gpu=USE_GPU)

for param in net.parameters():
nn.init.uniform(param.data, -0.05, 0.05)

Options = namedtuple("Options", [
"cnn_batch_size", "init_lr", "lstm_seq_len",
"max_word_len", "lstm_batch_size", "epochs",
"word_embed_dim"])
opt = Options(cnn_batch_size=lstm_seq_len * lstm_batch_size,
init_lr=1.0,
lstm_seq_len=lstm_seq_len,
max_word_len=max_word_len,
lstm_batch_size=lstm_batch_size,
epochs=35,
word_embed_dim=word_embed_dim)

print("Network built. Start training.")

# You can stop training anytime by "ctrl+C"
try:
train(net, data, opt)
except KeyboardInterrupt:
print('-' * 89)
print('Exiting from training early')

torch.save(net, "cache/net.pkl")
print("save net")

test(net, data, opt)

+ 0
- 360
reproduction/Char-aware_NLM/train.txt View File

@@ -1,360 +0,0 @@
aer banknote berlitz calloway centrust cluett fromstein gitano guterman hydro-quebec ipo kia memotec mlx nahb punts rake regatta rubens sim snack-food ssangyong swapo wachter
pierre <unk> N years old will join the board as a nonexecutive director nov. N
mr. <unk> is chairman of <unk> n.v. the dutch publishing group
rudolph <unk> N years old and former chairman of consolidated gold fields plc was named a nonexecutive director of this british industrial conglomerate
a form of asbestos once used to make kent cigarette filters has caused a high percentage of cancer deaths among a group of workers exposed to it more than N years ago researchers reported
the asbestos fiber <unk> is unusually <unk> once it enters the <unk> with even brief exposures to it causing symptoms that show up decades later researchers said
<unk> inc. the unit of new york-based <unk> corp. that makes kent cigarettes stopped using <unk> in its <unk> cigarette filters in N
although preliminary findings were reported more than a year ago the latest results appear in today 's new england journal of medicine a forum likely to bring new attention to the problem
a <unk> <unk> said this is an old story
we 're talking about years ago before anyone heard of asbestos having any questionable properties
there is no asbestos in our products now
neither <unk> nor the researchers who studied the workers were aware of any research on smokers of the kent cigarettes
we have no useful information on whether users are at risk said james a. <unk> of boston 's <unk> cancer institute
dr. <unk> led a team of researchers from the national cancer institute and the medical schools of harvard university and boston university
the <unk> spokeswoman said asbestos was used in very modest amounts in making paper for the filters in the early 1950s and replaced with a different type of <unk> in N
from N to N N billion kent cigarettes with the filters were sold the company said
among N men who worked closely with the substance N have died more than three times the expected number
four of the five surviving workers have <unk> diseases including three with recently <unk> cancer
the total of N deaths from malignant <unk> lung cancer and <unk> was far higher than expected the researchers said
the <unk> rate is a striking finding among those of us who study <unk> diseases said dr. <unk>
the percentage of lung cancer deaths among the workers at the west <unk> mass. paper factory appears to be the highest for any asbestos workers studied in western industrialized countries he said
the plant which is owned by <unk> & <unk> co. was under contract with <unk> to make the cigarette filters
the finding probably will support those who argue that the u.s. should regulate the class of asbestos including <unk> more <unk> than the common kind of asbestos <unk> found in most schools and other buildings dr. <unk> said
the u.s. is one of the few industrialized nations that does n't have a higher standard of regulation for the smooth <unk> fibers such as <unk> that are classified as <unk> according to <unk> t. <unk> a professor of <unk> at the university of vermont college of medicine
more common <unk> fibers are <unk> and are more easily rejected by the body dr. <unk> explained
in july the environmental protection agency imposed a gradual ban on virtually all uses of asbestos
by N almost all remaining uses of <unk> asbestos will be outlawed
about N workers at a factory that made paper for the kent filters were exposed to asbestos in the 1950s
areas of the factory were particularly dusty where the <unk> was used
workers dumped large <unk> <unk> of the imported material into a huge <unk> poured in cotton and <unk> fibers and <unk> mixed the dry fibers in a process used to make filters
workers described clouds of blue dust that hung over parts of the factory even though <unk> fans <unk> the area
there 's no question that some of those workers and managers contracted <unk> diseases said <unk> phillips vice president of human resources for <unk> & <unk>
but you have to recognize that these events took place N years ago
it has no bearing on our work force today
yields on money-market mutual funds continued to slide amid signs that portfolio managers expect further declines in interest rates
the average seven-day compound yield of the N taxable funds tracked by <unk> 's money fund report eased a fraction of a percentage point to N N from N N for the week ended tuesday
compound yields assume reinvestment of dividends and that the current yield continues for a year
average maturity of the funds ' investments <unk> by a day to N days the longest since early august according to donoghue 's
longer maturities are thought to indicate declining interest rates because they permit portfolio managers to retain relatively higher rates for a longer period
shorter maturities are considered a sign of rising rates because portfolio managers can capture higher rates sooner
the average maturity for funds open only to institutions considered by some to be a stronger indicator because those managers watch the market closely reached a high point for the year N days
nevertheless said <unk> <unk> <unk> editor of money fund report yields may <unk> up again before they <unk> down because of recent rises in short-term interest rates
the yield on six-month treasury bills sold at monday 's auction for example rose to N N from N N
despite recent declines in yields investors continue to pour cash into money funds
assets of the N taxable funds grew by $ N billion during the latest week to $ N billion
typically money-fund yields beat comparable short-term investments because portfolio managers can vary maturities and go after the highest rates
the top money funds are currently yielding well over N N
dreyfus world-wide dollar the <unk> fund had a seven-day compound yield of N N during the latest week down from N N a week earlier
it invests heavily in dollar-denominated securities overseas and is currently <unk> management fees which boosts its yield
the average seven-day simple yield of the N funds was N N down from N N
the 30-day simple yield fell to an average N N from N N the 30-day compound yield slid to an average N N from N N
j.p. <unk> vice chairman of <unk> grace & co. which holds a N N interest in this <unk> company was elected a director
he succeeds <unk> d. <unk> formerly a <unk> grace vice chairman who resigned
<unk> grace holds three of grace energy 's seven board seats
pacific first financial corp. said shareholders approved its acquisition by royal <unk> ltd. of toronto for $ N a share or $ N million
the thrift holding company said it expects to obtain regulatory approval and complete the transaction by year-end
<unk> international inc. said its <unk> & <unk> unit completed the sale of its <unk> controls operations to <unk> s.p a. for $ N million
<unk> is an italian state-owned holding company with interests in the mechanical engineering industry
<unk> controls based in <unk> ohio makes computerized industrial controls systems
it employs N people and has annual revenue of about $ N million
the federal government suspended sales of u.s. savings bonds because congress has n't lifted the ceiling on government debt
until congress acts the government has n't any authority to issue new debt obligations of any kind the treasury said
the government 's borrowing authority dropped at midnight tuesday to $ N trillion from $ N trillion
legislation to lift the debt ceiling is <unk> in the fight over cutting capital-gains taxes
the house has voted to raise the ceiling to $ N trillion but the senate is n't expected to act until next week at the earliest
the treasury said the u.s. will default on nov. N if congress does n't act by then
clark j. <unk> was named senior vice president and general manager of this u.s. sales and marketing arm of japanese auto maker mazda motor corp
in the new position he will oversee mazda 's u.s. sales service parts and marketing operations
previously mr. <unk> N years old was general marketing manager of chrysler corp. 's chrysler division
he had been a sales and marketing executive with chrysler for N years
when it 's time for their <unk> <unk> the nation 's manufacturing <unk> typically jet off to the <unk> <unk> of resort towns like <unk> <unk> and hot springs
not this year
the national association of manufacturers settled on the <unk> capital of indianapolis for its fall board meeting
and the city decided to treat its guests more like royalty or rock stars than factory owners
the idea of course to prove to N corporate decision makers that the buckle on the <unk> belt is n't so <unk> after all that it 's a good place for a company to expand
on the receiving end of the message were officials from giants like du pont and <unk> along with lesser <unk> like <unk> steel and the valley queen <unk> factory
for <unk> the executives joined mayor william h. <unk> iii for an evening of the indianapolis <unk> <unk> and a guest <unk> victor <unk>
champagne and <unk> followed
the next morning with a police <unk> <unk> of executives and their wives <unk> to the indianapolis motor <unk> <unk> by traffic or red lights
the governor could n't make it so the <unk> governor welcomed the special guests
a buffet breakfast was held in the museum where food and drinks are banned to everyday visitors
then in the guests ' honor the <unk> <unk> out four drivers crews and even the official indianapolis N announcer for a <unk> exhibition race
after the race fortune N executives <unk> like <unk> over the cars and drivers
no <unk> the drivers pointed out they still had space on their machines for another sponsor 's name or two
back downtown the <unk> squeezed in a few meetings at the hotel before <unk> the buses again
this time it was for dinner and <unk> a block away
under the stars and <unk> of the <unk> indiana <unk> <unk> nine of the hottest chefs in town fed them indiana <unk> <unk> <unk> <unk> <unk> <unk> and <unk> <unk> with a <unk> <unk>
knowing a <unk> and free <unk> when they eat one the executives gave the chefs a standing <unk>
more than a few <unk> say the <unk> treatment <unk> them to return to a <unk> city for future meetings
but for now they 're looking forward to their winter meeting <unk> in february
south korea registered a trade deficit of $ N million in october reflecting the country 's economic <unk> according to government figures released wednesday
preliminary <unk> by the trade and industry ministry showed another trade deficit in october the fifth monthly setback this year casting a cloud on south korea 's <unk> economy
exports in october stood at $ N billion a mere N N increase from a year earlier while imports increased sharply to $ N billion up N N from last october
south korea 's economic boom which began in N stopped this year because of prolonged labor disputes trade conflicts and sluggish exports
government officials said exports at the end of the year would remain under a government target of $ N billion
despite the gloomy forecast south korea has recorded a trade surplus of $ N million so far this year
from january to october the nation 's accumulated exports increased N N from the same period last year to $ N billion
imports were at $ N billion up N N
newsweek trying to keep pace with rival time magazine announced new advertising rates for N and said it will introduce a new incentive plan for advertisers
the new ad plan from newsweek a unit of the washington post co. is the second incentive plan the magazine has offered advertisers in three years
plans that give advertisers discounts for maintaining or increasing ad spending have become permanent <unk> at the news <unk> and underscore the fierce competition between newsweek time warner inc. 's time magazine and <unk> b. <unk> 's u.s. news & world report
alan <unk> recently named newsweek president said newsweek 's ad rates would increase N N in january
a full <unk> page in newsweek will cost $ N
in mid-october time magazine lowered its guaranteed circulation rate base for N while not increasing ad page rates with a lower circulation base time 's ad rate will be effectively N N higher per subscriber a full page in time costs about $ N
u.s. news has yet to announce its N ad rates
newsweek said it will introduce the circulation credit plan which <unk> space credits to advertisers on renewal advertising
the magazine will reward with page bonuses advertisers who in N meet or exceed their N spending as long as they spent $ N in N and $ N in N
mr. <unk> said the plan is not an attempt to shore up a decline in ad pages in the first nine months of N newsweek 's ad pages totaled N a drop of N N from last year according to publishers information bureau
what matters is what advertisers are paying per page and in that department we are doing fine this fall said mr. <unk>
both newsweek and u.s. news have been gaining circulation in recent years without heavy use of electronic <unk> to subscribers such as telephones or watches
however none of the big three <unk> recorded circulation gains recently
according to audit bureau of <unk> time the largest <unk> had average circulation of N a decrease of N N
newsweek 's circulation for the first six months of N was N flat from the same period last year
u.s. news ' circulation in the same time was N down N N
new england electric system bowed out of the bidding for public service co. of new hampshire saying that the risks were too high and the potential <unk> too far in the future to justify a higher offer
the move leaves united illuminating co. and northeast utilities as the remaining outside bidders for ps of new hampshire which also has proposed an internal reorganization plan in chapter N bankruptcy proceedings under which it would remain an independent company
new england electric based in <unk> mass. had offered $ N billion to acquire ps of new hampshire well below the $ N billion value united illuminating places on its bid and the $ N billion northeast says its bid is worth
united illuminating is based in new haven conn. and northeast is based in hartford conn
ps of new hampshire <unk> n.h. values its internal reorganization plan at about $ N billion
john rowe president and chief executive officer of new england electric said the company 's return on equity could suffer if it made a higher bid and its forecasts related to ps of new hampshire such as growth in electricity demand and improved operating <unk> did n't come true
when we <unk> raising our bid the risks seemed substantial and persistent over the next five years and the rewards seemed a long way out
that got hard to take he added
mr. rowe also noted that political concerns also worried new england electric
no matter who owns ps of new hampshire after it emerges from bankruptcy proceedings its rates will be among the highest in the nation he said
that attracts attention
it was just another one of the risk factors that led to the company 's decision to withdraw from the bidding he added
wilbur ross jr. of rothschild inc. the financial adviser to the troubled company 's equity holders said the withdrawal of new england electric might speed up the reorganization process
the fact that new england proposed lower rate increases N N over seven years against around N N boosts proposed by the other two outside bidders complicated negotiations with state officials mr. ross asserted
now the field is less <unk> he added
separately the federal energy regulatory commission turned down for now a request by northeast seeking approval of its possible purchase of ps of new hampshire
northeast said it would <unk> its request and still hopes for an <unk> review by the ferc so that it could complete the purchase by next summer if its bid is the one approved by the bankruptcy court
ps of new hampshire shares closed yesterday at $ N off N cents in new york stock exchange composite trading
norman <unk> N years old and former president and chief operating officer of toys r us inc. and frederick <unk> jr. N chairman of <unk> banking corp. were elected directors of this consumer electronics and appliances retailing chain
they succeed daniel m. <unk> retired circuit city executive vice president and robert r. <unk> u.s. treasury undersecretary on the <unk> board
commonwealth edison co. was ordered to refund about $ N million to its current and former <unk> for illegal rates collected for cost overruns on a nuclear power plant
the refund was about $ N million more than previously ordered by the illinois commerce commission and trade groups said it may be the largest ever required of a state or local utility
state court judge richard curry ordered edison to make average refunds of about $ N to $ N each to edison customers who have received electric service since april N including about two million customers who have moved during that period
judge curry ordered the refunds to begin feb. N and said that he would n't <unk> any appeals or other attempts to block his order by commonwealth edison
the refund pool may not be held <unk> through another round of appeals judge curry said
commonwealth edison said it is already appealing the underlying commission order and is considering appealing judge curry 's order
the exact amount of the refund will be determined next year based on actual <unk> made until dec. N of this year
commonwealth edison said the ruling could force it to slash its N earnings by $ N a share
for N commonwealth edison reported earnings of $ N million or $ N a share
a commonwealth edison spokesman said that tracking down the two million customers whose addresses have changed during the past N N years would be an administrative nightmare
in new york stock exchange composite trading yesterday commonwealth edison closed at $ N down N cents
the $ N billion <unk> N plant near <unk> ill. was completed in N
in a disputed N ruling the commerce commission said commonwealth edison could raise its electricity rates by $ N million to pay for the plant
but state courts upheld a challenge by consumer groups to the commission 's rate increase and found the rates illegal
the illinois supreme court ordered the commission to audit commonwealth edison 's construction expenses and refund any <unk> expenses
the utility has been collecting for the plant 's construction cost from its N million customers subject to a refund since N
in august the commission ruled that between $ N million and $ N million of the plant 's construction cost was <unk> and should be <unk> plus interest
in his ruling judge curry added an additional $ N million to the commission 's calculations
last month judge curry set the interest rate on the refund at N N
commonwealth edison now faces an additional <unk> refund on its <unk> rate <unk> <unk> that the illinois appellate court has estimated at $ N million
and consumer groups hope that judge curry 's <unk> N order may set a precedent for a second nuclear rate case involving commonwealth edison 's <unk> N plant
commonwealth edison is seeking about $ N million in rate increases to pay for <unk> N
the commission is expected to rule on the <unk> N case by year end
last year commonwealth edison had to refund $ N million for poor performance of its <unk> i nuclear plant
japan 's domestic sales of cars trucks and buses in october rose N N from a year earlier to N units a record for the month the japan automobile dealers ' association said
the strong growth followed year-to-year increases of N N in august and N N in september
the monthly sales have been setting records every month since march
october sales compared with the previous month inched down N N
sales of passenger cars grew N N from a year earlier to N units
sales of medium-sized cars which benefited from price reductions arising from introduction of the consumption tax more than doubled to N units from N in october N
texas instruments japan ltd. a unit of texas instruments inc. said it opened a plant in south korea to manufacture control devices
the new plant located in <unk> about N miles from seoul will help meet increasing and diversifying demand for control products in south korea the company said
the plant will produce control devices used in motor vehicles and household appliances
the survival of spinoff cray computer corp. as a fledgling in the supercomputer business appears to depend heavily on the creativity and <unk> of its chairman and chief designer seymour cray
not only is development of the new company 's initial machine tied directly to mr. cray so is its balance sheet
documents filed with the securities and exchange commission on the pending spinoff disclosed that cray research inc. will withdraw the almost $ N million in financing it is providing the new firm if mr. cray leaves or if the <unk> project he heads is scrapped
the documents also said that although the <unk> mr. cray has been working on the project for more than six years the cray-3 machine is at least another year away from a fully operational prototype
moreover there have been no orders for the cray-3 so far though the company says it is talking with several prospects
while many of the risks were anticipated when <unk> cray research first announced the spinoff in may the <unk> it attached to the financing had n't been made public until yesterday
we did n't have much of a choice cray computer 's chief financial officer gregory <unk> said in an interview
the theory is that seymour is the chief designer of the cray-3 and without him it could not be completed
cray research did not want to fund a project that did not include seymour
the documents also said that cray computer anticipates <unk> perhaps another $ N million in financing beginning next september
but mr. <unk> called that a <unk> scenario
the filing on the details of the spinoff caused cray research stock to jump $ N yesterday to close at $ N in new york stock exchange composite trading
analysts noted yesterday that cray research 's decision to link its $ N million <unk> note to mr. cray 's presence will complicate a valuation of the new company
it has to be considered as an additional risk for the investor said gary p. <unk> of <unk> group inc. minneapolis
cray computer will be a concept stock he said
you either believe seymour can do it again or you do n't
besides the designer 's age other risk factors for mr. cray 's new company include the cray-3 's tricky <unk> chip technology
the sec documents describe those chips which are made of <unk> <unk> as being so fragile and minute they will require special <unk> handling equipment
in addition the cray-3 will contain N processors twice as many as the largest current supercomputer
cray computer also will face intense competition not only from cray research which has about N N of the world-wide supercomputer market and which is expected to roll out the <unk> machine a direct competitor with the cray-3 in N
the spinoff also will compete with international business machines corp. and japan 's big three hitachi ltd. nec corp. and fujitsu ltd
the new company said it believes there are fewer than N potential customers for <unk> priced between $ N million and $ N million presumably the cray-3 price range
under terms of the spinoff cray research stockholders are to receive one cray computer share for every two cray research shares they own in a distribution expected to occur in about two weeks
no price for the new shares has been set
instead the companies will leave it up to the marketplace to decide
cray computer has applied to trade on nasdaq
analysts calculate cray computer 's initial book value at about $ N a share
along with the note cray research is <unk> about $ N million in assets primarily those related to the cray-3 development which has been a drain on cray research 's earnings
<unk> balance sheets clearly show why cray research favored the spinoff
without the cray-3 research and development expenses the company would have been able to report a profit of $ N million for the first half of N rather than the $ N million it posted
on the other hand had it existed then cray computer would have incurred a $ N million loss
mr. cray who could n't be reached for comment will work for the new colorado springs colo. company as an independent contractor the arrangement he had with cray research
regarded as the father of the supercomputer mr. cray was paid $ N at cray research last year
at cray computer he will be paid $ N
besides messrs. cray and <unk> other senior management at the company includes neil <unk> N president and chief executive officer joseph m. <unk> N vice president engineering malcolm a. <unk> N vice president software and douglas r. <unk> N vice president hardware
all came from cray research
cray computer which currently employs N people said it expects a work force of N by the end of N
john r. stevens N years old was named senior executive vice president and chief operating officer both new positions
he will continue to report to donald <unk> president and chief executive officer
mr. stevens was executive vice president of this <unk> holding company
arthur a. hatch N was named executive vice president of the company
he was previously president of the company 's eastern edison co. unit
john d. <unk> N was named to succeed mr. hatch as president of eastern edison
previously he was vice president of eastern edison
robert p. <unk> N was named senior vice president of eastern utilities
he was previously vice president
the u.s. claiming some success in its trade <unk> removed south korea taiwan and saudi arabia from a list of countries it is closely watching for allegedly failing to honor u.s. patents <unk> and other <unk> rights
however five other countries china thailand india brazil and mexico will remain on that so-called priority watch list as a result of an interim review u.s. trade representative carla hills announced
under the new u.s. trade law those countries could face accelerated <unk> investigations and stiff trade sanctions if they do n't improve their protection of intellectual property by next spring
mrs. hills said many of the N countries that she placed under <unk> degrees of scrutiny have made genuine progress on this touchy issue
she said there is growing <unk> around the world that <unk> of <unk> rights <unk> all trading nations and particularly the creativity and <unk> of an <unk> country 's own citizens
u.s. trade negotiators argue that countries with inadequate <unk> for <unk> rights could be hurting themselves by discouraging their own scientists and authors and by <unk> u.s. high-technology firms from investing or marketing their best products there
mrs. hills <unk> south korea for creating an <unk> task force and special enforcement teams of police officers and prosecutors trained to pursue movie and book <unk>
seoul also has instituted effective <unk> procedures to aid these teams she said
taiwan has improved its standing with the u.s. by <unk> a <unk> copyright agreement <unk> its trademark law and introducing legislation to protect foreign movie producers from unauthorized <unk> of their films
that measure could <unk> taipei 's growing number of small <unk> <unk> to pay movie producers for showing their films
saudi arabia for its part has vowed to enact a copyright law compatible with international standards and to apply the law to computer software as well as to literary works mrs. hills said
these three countries are n't completely off the hook though
they will remain on a <unk> list that includes N other countries
those countries including japan italy canada greece and spain are still of some concern to the u.s. but are deemed to pose <unk> problems for american patent and copyright owners than those on the priority list
gary hoffman a washington lawyer specializing in <unk> cases said the threat of u.s. <unk> combined with a growing recognition that protecting intellectual property is in a country 's own interest prompted the improvements made by south korea taiwan and saudi arabia
what this tells us is that u.s. trade law is working he said
he said mexico could be one of the next countries to be removed from the priority list because of its efforts to craft a new patent law
mrs. hills said that the u.s. is still concerned about disturbing developments in turkey and continuing slow progress in malaysia
she did n't elaborate although earlier u.s. trade reports have complained of videocassette <unk> in malaysia and <unk> for u.s. pharmaceutical patents in turkey
the N trade act requires mrs. hills to issue another review of the performance of these countries by april N
so far mrs. hills has n't deemed any cases bad enough to merit an accelerated investigation under the so-called special N provision of the act
argentina said it will ask creditor banks to <unk> its foreign debt of $ N billion the <unk> in the developing world
the declaration by economy minister <unk> <unk> is believed to be the first time such an action has been called for by an <unk> official of such <unk>
the latin american nation has paid very little on its debt since early last year
argentina <unk> to reach a reduction of N N in the value of its external debt mr. <unk> said through his spokesman <unk> <unk>
mr. <unk> met in august with u.s. assistant treasury secretary david mulford
<unk> negotiator carlos <unk> was in washington and new york this week to meet with banks
mr. <unk> recently has said the government of president carlos <unk> who took office july N feels a significant reduction of principal and interest is the only way the debt problem may be solved
but he has not said before that the country wants half the debt <unk>
during its centennial year the wall street journal will report events of the past century that stand as milestones of american business history
three computers that changed the face of personal computing were launched in N
that year the apple ii commodore pet and tandy <unk> came to market
the computers were crude by today 's standards
apple ii owners for example had to use their television sets as screens and <unk> data on <unk>
but apple ii was a major advance from apple i which was built in a garage by stephen <unk> and steven jobs for <unk> such as the <unk> computer club
in addition the apple ii was an affordable $ N
crude as they were these early pcs triggered explosive product development in desktop models for the home and office
big mainframe computers for business had been around for years
but the new N pcs unlike earlier <unk> types such as the <unk> <unk> and <unk> had <unk> and could store about two pages of data in their memories
current pcs are more than N times faster and have memory capacity N times greater than their N counterparts
there were many pioneer pc <unk>
william gates and paul allen in N developed an early <unk> system for pcs and gates became an industry billionaire six years after ibm adapted one of these versions in N
alan f. <unk> currently chairman of seagate technology led the team that developed the disk drives for pcs
dennis <unk> and dale <unk> two atlanta engineers were <unk> of the internal <unk> that allow pcs to share data via the telephone
ibm the world leader in computers did n't offer its first pc until august N as many other companies entered the market
today pc shipments annually total some $ N billion world-wide
<unk> <unk> & co. an australian pharmaceuticals company said its <unk> inc. affiliate acquired <unk> inc. for $ N million
<unk> is a new <unk> pharmaceuticals concern that sells products under the <unk> label
<unk> said it owns N N of <unk> 's voting stock and has an agreement to acquire an additional N N
that stake together with its convertible preferred stock holdings gives <unk> the right to increase its interest to N N of <unk> 's voting stock
oil production from australia 's bass <unk> fields will be raised by N barrels a day to about N barrels with the launch of the <unk> field the first of five small fields scheduled to be brought into production before the end of N
esso australia ltd. a unit of new york-based exxon corp. and broken hill <unk> operate the fields in a joint venture
esso said the <unk> field started production tuesday
output will be gradually increased until it reaches about N barrels a day
the field has reserves of N million barrels
reserves for the five new fields total N million barrels
the <unk> and <unk> fields are expected to start producing early next year and the <unk> and <unk> fields later next year
esso said the fields were developed after the australian government decided in N to make the first N million barrels from new fields free of <unk> tax
<unk> <unk> corp. said it completed the $ N million sale of its southern optical subsidiary to a group led by the unit 's president thomas r. sloan and other managers
following the acquisition of <unk> <unk> by a buy-out group led by shearson lehman hutton earlier this year the maker of <unk> <unk> decided to <unk> itself of certain of its <unk> businesses
the sale of southern optical is a part of the program
the white house said president bush has approved duty-free treatment for imports of certain types of watches that are n't produced in significant quantities in the u.s. the virgin islands and other u.s. <unk>
the action came in response to a petition filed by <unk> inc. for changes in the u.s. <unk> system of preferences for imports from developing nations
previously watch imports were denied such duty-free treatment
<unk> had requested duty-free treatment for many types of watches covered by N different u.s. tariff <unk>
the white house said mr. bush decided to grant duty-free status for N categories but turned down such treatment for other types of watches because of the potential for material injury to watch producers located in the u.s. and the virgin islands
<unk> is a major u.s. producer and seller of watches including <unk> <unk> watches assembled in the philippines and other developing nations covered by the u.s. tariff preferences
u.s. trade officials said the philippines and thailand would be the main beneficiaries of the president 's action
imports of the types of watches that now will be eligible for duty-free treatment totaled about $ N million in N a relatively small share of the $ N billion in u.s. watch imports that year according to an aide to u.s. trade representative carla hills
magna international inc. 's chief financial officer james mcalpine resigned and its chairman frank <unk> is stepping in to help turn the <unk> manufacturer around the company said
mr. <unk> will direct an effort to reduce overhead and curb capital spending until a more satisfactory level of profit is achieved and maintained magna said
stephen <unk> currently vice president finance will succeed mr. mcalpine
an ambitious expansion has left magna with excess capacity and a heavy debt load as the automotive industry enters a downturn
the company has reported declines in operating profit in each of the past three years despite steady sales growth
magna recently cut its quarterly dividend in half and the company 's class a shares are <unk> far below their 52-week high of N canadian dollars us$ N
on the toronto stock exchange yesterday magna shares closed up N canadian cents to c$ N
mr. <unk> founder and controlling shareholder of magna resigned as chief executive officer last year to seek unsuccessfully a seat in canada 's parliament
analysts said mr. <unk> wants to resume a more influential role in running the company
they expect him to cut costs throughout the organization
the company said mr. <unk> will personally direct the restructuring <unk> by <unk> <unk> president and chief executive
neither they nor mr. mcalpine could be reached for comment
magna said mr. mcalpine resigned to pursue a consulting career with magna as one of his clients
lord <unk> <unk> chairman of english china <unk> plc was named a nonexecutive director of this british chemical company
japanese investors nearly <unk> bought up two new mortgage <unk> mutual funds totaling $ N million the u.s. federal national mortgage association said
the purchases show the strong interest of japanese investors in u.s. <unk> instruments fannie mae 's chairman david o. maxwell said at a news conference
he said more than N N of the funds were placed with japanese institutional investors
the rest went to investors from france and hong kong
earlier this year japanese investors snapped up a similar $ N million mortgage-backed securities mutual fund
that fund was put together by blackstone group a new york investment bank
the latest two funds were assembled jointly by goldman sachs & co. of the u.s. and japan 's daiwa securities co
the new seven-year funds one offering a fixed-rate return and the other with a floating-rate return linked to the london interbank offered rate offer two key advantages to japanese investors
first they are designed to eliminate the risk of prepayment mortgage-backed securities can be retired early if interest rates decline and such prepayment forces investors to <unk> their money at lower rates
second they channel monthly mortgage payments into semiannual payments reducing the administrative burden on investors
by addressing those problems mr. maxwell said the new funds have become extremely attractive to japanese and other investors outside the u.s.
such devices have boosted japanese investment in mortgage-backed securities to more than N N of the $ N billion in such instruments outstanding and their purchases are growing at a rapid rate
they also have become large purchasers of fannie mae 's corporate debt buying $ N billion in fannie mae bonds during the first nine months of the year or almost a <unk> of the total amount issued
james l. <unk> <unk> executive vice president was named a director of this oil concern expanding the board to N members
ltv corp. said a federal bankruptcy court judge agreed to extend until march N N the period in which the steel aerospace and energy products company has the exclusive right to file a reorganization plan
the company is operating under chapter N of the federal bankruptcy code giving it court protection from creditors ' lawsuits while it attempts to work out a plan to pay its debts
italian chemical giant montedison <unk> through its montedison acquisition n.v. indirect unit began its $ <unk> tender offer for all the common shares outstanding of erbamont n.v. a maker of pharmaceuticals incorporated in the netherlands
the offer advertised in today 's editions of the wall street journal is scheduled to expire at the end of november
montedison currently owns about N N of erbamont 's common shares outstanding
the offer is being launched <unk> to a previously announced agreement between the companies
japan 's reserves of gold convertible foreign currencies and special drawing rights fell by a hefty $ N billion in october to $ N billion the finance ministry said
the total marks the sixth consecutive monthly decline
the <unk> downturn reflects the intensity of bank of japan <unk> intervention since june when the u.s. currency temporarily surged above the N yen level
the announcement follows a sharper $ N billion decline in the country 's foreign reserves in september to $ N billion
pick a country any country
it 's the latest investment craze sweeping wall street a rash of new closed-end country funds those publicly traded portfolios that invest in stocks of a single foreign country
no fewer than N country funds have been launched or registered with regulators this year triple the level of all of N according to charles e. simon & co. a washington-based research firm
the turf recently has ranged from chile to <unk> to portugal
next week the philippine fund 's launch will be capped by a visit by philippine president <unk> aquino the first time a head of state has kicked off an issue at the big board here
the next province
anything 's possible how about the new guinea fund <unk> george foot a managing partner at <unk> management associates of <unk> mass
the recent explosion of country funds <unk> the closed-end fund mania of the 1920s mr. foot says when narrowly focused funds grew wildly popular
they fell into <unk> after the N crash
unlike traditional <unk> mutual funds most of these <unk> portfolios are the closed-end type issuing a fixed number of shares that trade publicly
the surge brings to nearly N the number of country funds that are or soon will be listed in new york or london
these funds now account for several billions of dollars in assets
people are looking to stake their claims now before the number of available nations runs out says michael porter an analyst at smith barney harris upham & co. new york
behind all the <unk> is some <unk> competition
as individual investors have turned away from the stock market over the years securities firms have scrambled to find new products that brokers find easy to sell
and the firms are stretching their <unk> far and wide to do it
financial planners often urge investors to diversify and to hold a <unk> of international securities
and many emerging markets have <unk> more mature markets such as the u.s. and japan
country funds offer an easy way to get a taste of foreign stocks without the hard research of seeking out individual companies
but it does n't take much to get burned
political and currency gyrations can <unk> the funds
another concern the funds ' share prices tend to swing more than the broader market
when the stock market dropped nearly N N oct. N for instance the mexico fund plunged about N N and the spain fund fell N N
and most country funds were clobbered more than most stocks after the N crash
what 's so wild about the funds ' frenzy right now is that many are trading at historically fat premiums to the value of their underlying portfolios
after trading at an average discount of more than N N in late N and part of last year country funds currently trade at an average premium of N N
the reason share prices of many of these funds this year have climbed much more sharply than the foreign stocks they hold
it 's probably worth paying a premium for funds that invest in markets that are partially closed to foreign investors such as south korea some specialists say
but some european funds recently have skyrocketed spain fund has surged to a startling N N premium
it has been targeted by japanese investors as a good long-term play tied to N 's european economic integration
and several new funds that are n't even fully invested yet have jumped to trade at big premiums
i 'm very alarmed to see these rich <unk> says smith barney 's mr. porter
the newly <unk> premiums reflect the increasingly global marketing of some country funds mr. porter suggests
unlike many u.s. investors those in asia or europe seeking <unk> exposure may be less <unk> to paying higher prices for country funds
there may be an international viewpoint cast on the funds listed here mr. porter says
nonetheless plenty of u.s. analysts and money managers are <unk> at the <unk> trading levels of some country funds
they argue that u.s. investors often can buy american depositary receipts on the big stocks in many funds these so-called adrs represent shares of foreign companies traded in the u.s.
that way investors can essentially buy the funds without paying the premium
for people who insist on jumping in now to buy the funds <unk> 's mr. foot says the only advice i have for these folks is that those who come to the party late had better be ready to leave quickly
the u.s. and soviet union are holding technical talks about possible repayment by moscow of $ N million in <unk> russian debts owed to the u.s. government the state department said

+ 0
- 82
reproduction/Char-aware_NLM/utilities.py View File

@@ -1,82 +0,0 @@
import torch
import torch.nn.functional as F


def batch_generator(x, batch_size):
# x: [num_words, in_channel, height, width]
# partitions x into batches
num_step = x.size()[0] // batch_size
for t in range(num_step):
yield x[t * batch_size:(t + 1) * batch_size]


def text2vec(words, char_dict, max_word_len):
""" Return list of list of int """
word_vec = []
for word in words:
vec = [char_dict[ch] for ch in word]
if len(vec) < max_word_len:
vec += [char_dict["PAD"] for _ in range(max_word_len - len(vec))]
vec = [char_dict["BOW"]] + vec + [char_dict["EOW"]]
word_vec.append(vec)
return word_vec


def seq2vec(input_words, char_embedding, char_embedding_dim, char_table):
""" convert the input strings into character embeddings """
# input_words == list of string
# char_embedding == torch.nn.Embedding
# char_embedding_dim == int
# char_table == list of unique chars
# Returns: tensor of shape [len(input_words), char_embedding_dim, max_word_len+2]
max_word_len = max([len(word) for word in input_words])
print("max_word_len={}".format(max_word_len))
tensor_list = []

start_column = torch.ones(char_embedding_dim, 1)
end_column = torch.ones(char_embedding_dim, 1)

for word in input_words:
# convert string to word attention
word_encoding = char_embedding_lookup(word, char_embedding, char_table)
# add start and end columns
word_encoding = torch.cat([start_column, word_encoding, end_column], 1)
# zero-pad right columns
word_encoding = F.pad(word_encoding, (0, max_word_len - word_encoding.size()[1] + 2)).data
# create dimension
word_encoding = word_encoding.unsqueeze(0)

tensor_list.append(word_encoding)

return torch.cat(tensor_list, 0)


def read_data(file_name):
# Return: list of strings
with open(file_name, 'r') as f:
corpus = f.read().lower()
import re
corpus = re.sub(r"<unk>", "unk", corpus)
return corpus.split()


def get_char_dict(vocabulary):
# vocabulary == dict of (word, int)
# Return: dict of (char, int), starting from 1
char_dict = dict()
count = 1
for word in vocabulary:
for ch in word:
if ch not in char_dict:
char_dict[ch] = count
count += 1
return char_dict


def create_word_char_dict(*file_name):
text = []
for file in file_name:
text += read_data(file)
word_dict = {word: ix for ix, word in enumerate(set(text))}
char_dict = get_char_dict(word_dict)
return word_dict, char_dict

+ 0
- 336
reproduction/Char-aware_NLM/valid.txt View File

@@ -1,336 +0,0 @@
consumers may want to move their telephones a little closer to the tv set
<unk> <unk> watching abc 's monday night football can now vote during <unk> for the greatest play in N years from among four or five <unk> <unk>
two weeks ago viewers of several nbc <unk> consumer segments started calling a N number for advice on various <unk> issues
and the new syndicated reality show hard copy records viewers ' opinions for possible airing on the next day 's show
interactive telephone technology has taken a new leap in <unk> and television programmers are racing to exploit the possibilities
eventually viewers may grow <unk> with the technology and <unk> the cost
but right now programmers are figuring that viewers who are busy dialing up a range of services may put down their <unk> control <unk> and stay <unk>
we 've been spending a lot of time in los angeles talking to tv production people says mike parks president of call interactive which supplied technology for both abc sports and nbc 's consumer minutes
with the competitiveness of the television market these days everyone is looking for a way to get viewers more excited
one of the leaders behind the expanded use of N numbers is call interactive a joint venture of giants american express co. and american telephone & telegraph co
formed in august the venture <unk> at&t 's newly expanded N service with N <unk> computers in american express 's omaha neb. service center
other long-distance carriers have also begun marketing enhanced N service and special consultants are <unk> up to exploit the new tool
blair entertainment a new york firm that advises tv stations and sells ads for them has just formed a subsidiary N blair to apply the technology to television
the use of N toll numbers has been expanding rapidly in recent years
for a while <unk> <unk> lines and services that <unk> children to dial and <unk> movie or music information earned the service a somewhat <unk> image but new legal restrictions are aimed at trimming excesses
the cost of a N call is set by the <unk> abc sports for example with the cheapest starting at N cents
billing is included in a caller 's regular phone bill
from the fee the local phone company and the long-distance carrier extract their costs to carry the call passing the rest of the money to the <unk> which must cover advertising and other costs
in recent months the technology has become more flexible and able to handle much more volume
before callers of N numbers would just listen and not talk or they 'd vote yes or no by calling one of two numbers
people in the phone business call this technology N <unk>
now callers are led through complex <unk> of choices to retrieve information they want and the hardware can process N calls in N seconds
up to now N numbers have mainly been used on local tv stations and cable channels
<unk> used one to give away the house that rock star jon <unk> <unk> grew up in
for several years turner broadcasting system 's cable news network has invited viewers to respond <unk> to <unk> issues should the u.s. military intervene in panama but even the hottest <unk> on <unk> <unk> only about N calls
the newest uses of the <unk> technology demonstrate the growing variety of applications
capital cities\/abc inc. cbs inc. and general electric co. 's national broadcasting co. unit are expected to announce soon a joint campaign to raise awareness about <unk>
the subject will be written into the <unk> of prime-time shows and viewers will be given a N number to call
callers will be sent educational booklets and the call 's modest cost will be an immediate method of raising money
other network applications have very different goals
abc sports was looking for ways to lift <unk> <unk> ratings for monday night football
kurt <unk> abc sports 's marketing director says that now tens of thousands of fans call its N number each week to vote for the best <unk> return <unk> <unk> etc
profit from the calls goes to charity but abc sports also uses the calls as a sales tool after <unk> callers for voting frank <unk> offers a football <unk> for $ N and N N of callers stay on the line to order it
jackets may be sold next
meanwhile nbc sports recently began scores plus a <unk> 24-hour N line providing a complex array of scores analysis and fan news
a spokesman said its purpose is to bolster the impression that nbc sports is always there for people
nbc 's <unk> consumer minutes have increased advertiser spending during the day the network 's weakest period
each <unk> matches a sponsor and a topic on <unk> unilever n.v. 's <unk> bros. sponsors tips on diet and exercise followed by a <unk> <unk> bros. commercial
viewers can call a N number for additional advice which will be tailored to their needs based on the numbers they <unk> press one if you 're pregnant etc
if the caller stays on the line and leaves a name and address for the sponsor coupons and a newsletter will be <unk> and the sponsor will be able to gather a list of desirable potential customers
<unk> <unk> an <unk> vice president says nbc has been able to charge premium rates for this ad time
she would n't say what the premium is but it 's believed to be about N N above regular <unk> rates
we were able to get advertisers to use their promotion budget for this because they get a chance to do <unk> says ms. <unk>
and we were able to attract some new advertisers because this is something new
mr. parks of call interactive says tv executives are considering the use of N numbers for talk shows game shows news and opinion surveys
experts are predicting a big influx of new shows in N when a service called automatic number information will become widely available
this service <unk> each caller 's phone number and it can be used to generate instant mailing lists
hard copy the new syndicated tabloid show from paramount pictures will use its N number for additional purposes that include research says executive producer mark b. von s. <unk>
for a piece on local heroes of world war ii we can ask people to leave the name and number of anyone they know who won a <unk> he says
that 'll save us time and get people involved
but mr. <unk> sees much bigger changes ahead
these are just baby steps toward real interactive video which i believe will be the biggest thing yet to affect television he says
although it would be costly to shoot multiple versions tv programmers could let audiences vote on different <unk> for a movie
fox broadcasting <unk> with this concept last year when viewers of married with children voted on whether al should say i love you to <unk> on <unk> 's day
someday viewers may also choose different <unk> of news coverage
a <unk> by phone could let you decide i 'm interested in just the beginning of story no. N and i want story no. N in <unk> mr. <unk> says
you 'll start to see shows where viewers program the program
integrated resources inc. the troubled financial-services company that has been trying to sell its core companies to restructure debt said talks with a potential buyer ended
integrated did n't identify the party or say why the talks failed
last week another potential buyer <unk> financial group which had agreed in august to purchase most of integrated 's core companies for $ N million ended talks with integrated
integrated said that it would continue to pursue other alternatives to sell the five core companies and that a group of senior executives plans to make a proposal to purchase three of the companies integrated resources equity corp. resources trust co. and integrated resources asset management corp
a price was n't disclosed
integrated also said it expects to report a second-quarter loss wider than the earlier estimate of about $ N million
the company did n't disclose the new estimate but said the change was related to integrated 's failure to sell its core businesses as well as other events which it did n't detail that occurred after its announcement last week that it was in talks with the unidentified prospective buyer
meanwhile a number of top sales producers from integrated resources equity will meet this afternoon in chicago to discuss their options
the unit is a <unk> constructed group of about N independent brokers and financial planners who sell insurance annuities limited partnerships mutual funds and other investments for integrated and other firms
the sales force is viewed as a critical asset in integrated 's attempt to sell its core companies
<unk> cited concerns about how long integrated would be able to hold together the sales force as one reason its talks with integrated failed
in composite trading on the new york stock exchange yesterday integrated closed at $ N a share down N cents
integrated has been struggling to avoid a bankruptcy-law filing since june when it failed to make interest payments on nearly $ N billion of debt
integrated senior and junior creditors are owed a total of about $ N billion
an earthquake struck northern california killing more than N people
the violent temblor which lasted about N seconds and registered N on the richter scale also caused the collapse of a <unk> section of the san <unk> bay bridge and shook candlestick park
the tremor was centered near <unk> southeast of san francisco and was felt as far as N miles away
numerous injuries were reported
some buildings collapsed gas and water lines <unk> and fires <unk>
the quake which also caused damage in san jose and berkeley knocked out electricity and telephones <unk> roadways and disrupted subway service in the bay area
major injuries were n't reported at candlestick park where the third game of baseball 's world series was canceled and fans <unk> from the stadium
bush vowed to veto a bill allowing federal financing for abortions in cases of rape and incest saying tax dollars should n't be used to compound a violent act with the taking of an <unk> life
his pledge in a letter to democratic sen. byrd came ahead of an expected senate vote on spending legislation containing the provision
east germany 's politburo met amid speculation that the ruling body would oust hard-line leader honecker whose rule has been challenged by mass emigration and calls for democratic freedoms
meanwhile about N refugees flew to <unk> west germany from warsaw the first <unk> in east germany 's <unk> exodus
the world psychiatric association voted at an <unk> <unk> to <unk> <unk> the soviet union
moscow which left the group in N to avoid <unk> over allegations that political <unk> were being certified as <unk> could be suspended if the <unk> of <unk> against <unk> is discovered during a review within a year
nasa postponed the <unk> of the space shuttle atlantis because of rain near the site of the launch <unk> in <unk> <unk> fla
the flight was <unk> for today
the spacecraft 's five <unk> are to <unk> the <unk> galileo space probe on an <unk> mission to jupiter
senate democratic leaders said they had enough votes to defeat a proposed constitutional amendment to ban flag burning
the amendment is aimed at <unk> a supreme court ruling that threw out the conviction of a texas <unk> on grounds that his freedom of speech was violated
federal researchers said lung-cancer mortality rates for people under N years of age have begun to decline particularly for white males
the national cancer institute also projected that overall u.s. mortality rates from lung cancer should begin to drop in several years if cigarette smoking continues to <unk>
bush met with south korean president roh who indicated that seoul plans to further ease trade rules to ensure that its economy becomes as open as the other industrialized nations by the mid-1990s
bush assured roh that the u.s. would stand by its security commitments as long as there is a threat from communist north korea
the bush administration is seeking an understanding with congress to ease restrictions on u.s. involvement in foreign coups that might result in the death of a country 's leader
a white house spokesman said that while bush would n't alter a longstanding ban on such involvement there 's a <unk> needed on its interpretation
india 's gandhi called for parliamentary elections next month
the balloting considered a test for the prime minister and the ruling congress i party comes amid charges of <unk> leadership and government corruption
gandhi 's family has ruled independent india for all but five years of its <unk> history
the soviet union <unk> from a u.n. general assembly vote to reject israel 's credentials
it was the first time in seven years that moscow has n't joined efforts led by <unk> nations to <unk> israel from the world body and was viewed as a sign of improving <unk> ties
israel was <unk> by a vote of N with N <unk>
black activist walter sisulu said the african national congress would n't reject violence as a way to pressure the south african government into concessions that might lead to negotiations over apartheid
the <unk> sisulu was among eight black political activists freed sunday from prison
london has concluded that <unk> president <unk> was n't responsible for the execution of six british <unk> in world war ii although he probably was aware of the <unk>
the report by the defense ministry also rejected allegations that britain covered up evidence of <unk> 's activities as a german army officer
an international group approved a formal ban on ivory trade despite objections from southern african governments which threatened to find alternative channels for selling elephant <unk>
the move by the convention on trade in endangered <unk> meeting in switzerland places the elephant on the <unk> list
an <unk> in colombia killed a federal judge on a <unk> street
an <unk> caller to a local radio station said cocaine traffickers had <unk> the <unk> in <unk> for the <unk> of <unk> wanted on drug charges in the u.s.
<unk> leader <unk> met with egypt 's president <unk> and the two officials pledged to respect each other 's laws security and stability
they stopped short of <unk> diplomatic ties <unk> in N
the reconciliation talks in the <unk> desert town of <unk> followed a meeting monday in the egyptian resort of <unk> <unk>
<unk> group inc. revised its exchange offer for $ N million face amount of N N senior subordinated debt due N and extended the offer to oct. N from oct. N
the <unk> n.j. company said holders would receive for each $ N face amount $ N face amount of a new issue of secured senior subordinated notes convertible into common stock at an initial rate of $ N a share and N common shares
the new notes will bear interest at N N through july N N and thereafter at N N
under the original proposal the maker of specialty coatings and a developer of <unk> technologies offered $ N of notes due N N common shares and $ N in cash for each $ N face amount
completion of the exchange offer is subject to the tender of at least N N of the debt among other things
<unk> which said it does n't plan to further extend the offer said it received $ N face amount of debt under the original offer
the stock of ual corp. continued to be <unk> amid signs that british airways may <unk> at any <unk> <unk> of the aborted $ N billion buy-out of united airlines ' parent
ual stock plummeted a further $ N to $ N on volume of more than N million shares in new york stock exchange composite trading
the plunge followed a drop of $ N monday amid indications the takeover may take weeks to be revived
the stock has fallen $ N or N N in the three trading days since announcement of the collapse of the $ 300-a-share takeover jolted the entire stock market into its <unk> plunge ever
this is a total <unk> for takeover-stock traders one investment banker said
los angeles financier marvin davis who put united in play with a $ N billion bid two months ago last night <unk> both a ray of hope and an extra element of uncertainty by saying he remains interested in acquiring ual
but he dropped his earlier $ 300-a-share <unk> bid saying he must first explore bank financing
even as citicorp and chase manhattan corp. scrambled to line up bank financing for a revised version of the <unk> labor-management bid british airways a N N partner in the buying group indicated it wants to start from <unk>
its partners are united 's pilots who were to own N N and ual management at N N
adding <unk> to injury united 's <unk> machinists ' union which helped scuttle financing for the first bid yesterday asked ual chairman stephen wolf and other ual directors to resign
a similar demand was made by a group that represents some of united 's N <unk> employees
john <unk> machinists union general vice president attacked mr. wolf as greedy and irresponsible for pursuing the buy-out
although mr. wolf and john pope ual 's chief financial officer stood to <unk> $ N million for stock and options in the buy-out ual executives planned to reinvest only $ N million in the new company
the blue-collar machinists longtime rivals of the white-collar pilots say the <unk> would load the company with debt and weaken its finances
confusion about the two banks ' <unk> efforts to round up financing for a new bid that the ual board has n't even seen yet helped send ual stock <unk> downward
and rumors of forced selling by takeover-stock traders triggered a <unk> <unk> in the dow jones industrial average around N a.m. edt yesterday
yesterday 's selling began after a japanese news agency reported that japanese banks which balked at the first bid were ready to reject a revised version at around $ N a share or $ N billion
several reports as the day <unk> gave vague or <unk> indications about whether banks would sign up
citicorp for example said only that it had <unk> of interest of a transaction from both the borrowers and the banks but did n't have an agreement
late in the day mr. wolf issued a <unk> statement calling mr. <unk> 's blast divisive and <unk> for
but he gave few details on the progress toward a new bid saying only we are working toward a revised proposal for majority employee ownership
meanwhile in another sign that a new bid is n't imminent it was learned that the ual board held a telephone meeting monday to hear an update on the situation but that a formal board meeting is n't likely to be <unk> until early next week
in london british airways chairman lord king was quoted in the times as declaring he is not prepared to take my shareholders into a <unk> deal
observers said it appeared that british air was angered at the way the bid has <unk> into confusion as well as by the banks ' effort to round up financing for what one called a deal that is n't a deal
the effort to revive the bid was complicated by the <unk> nature of the <unk> buying group
the pilots were meeting outside chicago yesterday
but british air which was to have supplied $ N million out of $ N million in equity financing apparently was n't involved in the second proposal and could well reject it even if banks obtain financing
a group of united 's <unk> employees said in a statement the fact that wolf and other officers were going to line their pockets with literally millions of dollars while <unk> severe pay cuts on the <unk> employees of united is not only <unk> but <unk>
the machinists also asked for an investigation by the securities and exchange commission into possible <unk> violations in the original bid for ual by mr. davis as well as in the response by ual
last week just before the bank commitments were due the union asked the u.s. labor department to study whether the bid violated legal standards of fairness governing employee investment funds
in his statement mr. wolf said we continue to believe our approach is sound and that it is far better for all employees than the alternative of having an outsider own the company with employees paying for it just the same
mr. wolf has <unk> merger advice from a major wall street securities firm relying instead only on a takeover lawyer peter <unk> of <unk> <unk> slate <unk> & flom
the huge drop in ual stock prompted one takeover stock trader george <unk> managing partner of <unk> <unk> & co. to deny publicly rumors that his firm was going out of business
mr. <unk> said that despite losses on ual stock his firm 's health is excellent
the stock 's decline also has left the ual board in a <unk>
although it may not be legally obligated to sell the company if the buy-out group ca n't revive its bid it may have to explore alternatives if the buyers come back with a bid much lower than the group 's original $ 300-a-share proposal
at a meeting sept. N to consider the labor-management bid the board also was informed by its investment adviser first boston corp. of interest expressed by buy-out funds including kohlberg kravis roberts & co. and <unk> little & co. as well as by robert bass morgan stanley 's buy-out fund and pan am corp
the takeover-stock traders were hoping that mr. davis or one of the other interested parties might <unk> with the situation in disarray or that the board might consider a recapitalization
meanwhile japanese bankers said they were still <unk> about accepting citicorp 's latest proposal
macmillan inc. said it plans a public offering of N million shares of its berlitz international inc. unit at $ N to $ N a share
the offering for the language school unit was announced by robert maxwell chairman and chief executive officer of london-based maxwell communication corp. which owns macmillan
after the offering is completed macmillan will own about N N of the berlitz common stock outstanding
five million shares will be offered in the u.s. and N million additional shares will be offered in <unk> international offerings outside the u.s.
goldman sachs & co. will manage the offering
macmillan said berlitz intends to pay quarterly dividends on the stock
the company said it expects to pay the first dividend of N cents a share in the N first quarter
berlitz will borrow an amount equal to its expected net proceeds from the offerings plus $ N million in connection with a credit agreement with lenders
the total borrowing will be about $ N million the company said
proceeds from the borrowings under the credit agreement will be used to pay an $ N million cash dividend to macmillan and to lend the remainder of about $ N million to maxwell communications in connection with a <unk> note
proceeds from the offering will be used to repay borrowings under the short-term parts of a credit agreement
berlitz which is based in princeton n.j. provides language instruction and translation services through more than N language centers in N countries
in the past five years more than N N of its sales have been outside the u.s.
macmillan has owned berlitz since N
in the first six months of this year berlitz posted net income of $ N million on sales of $ N million compared with net income of $ N million on sales of $ N million
right away you notice the following things about a philip glass concert
it attracts people with funny hair or with no hair in front of me a girl with <unk> <unk> sat <unk> a boy who had <unk> his
whoever constitute the local left bank come out in force dressed in black along with a <unk> of <unk> who want to be on the cutting edge
people in glass houses tend to look <unk>
and if still <unk> at the evening 's end you notice something else the audience at first <unk> and <unk> by the music releases its <unk> feelings in collective <unk>
currently in the middle of a <unk> <unk> tour as a solo <unk> mr. glass has left behind his <unk> equipment and <unk> in favor of going it alone
he sits down at the piano and plays
and plays
either one likes it or one does n't
the typical glass audience which is more likely to be composed of music students than their teachers certainly does
the work though sounds like <unk> for <unk>
philip glass is the <unk> and his music the new clothes of the <unk>
his success is easy to understand
<unk> introducing and explaining his pieces mr. glass looks and sounds more like a <unk> <unk> describing his work than a classical <unk> playing a recital
the piano <unk> which have been labeled <unk> as <unk> <unk> <unk> cyclical <unk> and <unk> are <unk> <unk> therefore <unk> <unk> <unk> therefore <unk> and <unk> <unk> but <unk> therefore both pretty and <unk>
it is music for people who want to hear something different but do n't want to work especially hard at the task
it is <unk> listening for the now generation
mr. glass has <unk> the famous <unk> <unk> less is more
his more is always less
far from being <unk> the music <unk> <unk> us with apparent <unk> not so <unk> <unk> in the <unk> of N time <unk> <unk> and <unk> or <unk> <unk> <unk>
but the music has its <unk> and mr. glass has constructed his solo program around a move from the simple to the relatively complex
opening N from <unk> <unk> the audience to the glass technique never <unk> too far from the piano 's center mr. glass works in the two <unk> on either side of middle c and his fingers seldom leave the <unk>
there is a <unk> musical style here but not a particular performance style
the music is not especially <unk> indeed it 's hard to imagine a bad performance of it
nothing <unk> no <unk> no <unk> <unk> problems challenge the performer
we hear we may think inner voices but they all seem to be saying the same thing
with planet news music meant to <unk> <unk> of allen <unk> 's wichita <unk> <unk> mr. glass gets going
his hands sit <unk> apart on the <unk>
seventh <unk> make you feel as though he may break into a very slow <unk> <unk>
the <unk> <unk> but there is little <unk> even though his fingers begin to <unk> over more of the <unk>
contrasts predictably <unk> first the music is loud then it becomes soft then you realize it becomes <unk> again
the fourth <unk> play an <unk> from <unk> on the beach is like a <unk> but it does n't seem to move much beyond its <unk> ground in three blind mice
when mr. glass decides to get really fancy he <unk> his hands and hits a <unk> bass note with his right hand
he does this in at least three of his solo pieces
you might call it a <unk> or a <unk> <unk>
in mad rush which came from a commission to write a piece of <unk> length mr. glass <unk> and <unk> confessed that this was no problem for me an a <unk> with a b section several times before the piece ends <unk>
not only is the typical <unk> <unk> it is also often multiple in its context s
mad rush began its life as the <unk> to the <unk> lama 's first public address in the u.s. when mr. glass played it on the <unk> at new york 's <unk> of st. john the <unk>
later it was performed on radio <unk> in germany and then <unk> <unk> took it for one of her dance pieces
the point is that any piece can be used as background music for virtually anything
the evening ended with mr. glass 's <unk> another multiple work
parts N N and N come from the <unk> of <unk> morris 's <unk> film the thin blue line and the two other parts from <unk> music to two separate <unk> of the <unk> story of the same name
when used as background in this way the music has an appropriate <unk> as when a <unk> phrase a <unk> minor third <unk> the seemingly endless <unk> of reports interviews and <unk> of witnesses in the morris film
served up as a solo however the music lacks the <unk> provided by a context within another medium
<unk> of mr. glass may agree with the critic richard <unk> 's sense that the N music in twelve parts is as <unk> and <unk> as the <unk> <unk>
but while making the obvious point that both <unk> develop variations from themes this comparison <unk> the intensely <unk> nature of mr. glass 's music
its supposedly <unk> <unk> <unk> a <unk> that makes one <unk> for the <unk> of <unk> <unk> the <unk> radical <unk> of <unk> and <unk> and what in <unk> even seems like <unk> in <unk>
mr. <unk> is professor of english at southern <unk> university and editor of the southwest review
honeywell inc. said it hopes to complete shortly the first of two sales of shares in its japanese joint venture <unk> for about $ N million
the company would n't disclose the buyer of the initial N N stake
proceeds of the sale expected to be completed next week would be used to repurchase as many as N million shares of honeywell stock the company said
honeywell said it is negotiating the sale of a second stake in <unk> but indicated it intends to hold at least N N of the joint venture 's stock long term
a N N stake would allow honeywell to include <unk> earnings in its results
honeywell previously said it intended to reduce its holding in the japanese concern as part of a restructuring plan which also calls for a reduction of <unk> on weapons sales
yesterday a spokeswoman said the company was pleased with our progress in that regard and hopes to provide additional details soon
honeywell said its defense and marine systems group incurred delays in shipping some undisclosed contracts during the third quarter resulting in lower operating profit for that business
overall honeywell reported earnings of $ N million or $ N a share for the three months ended oct. N compared with a loss of $ N million or N cents a share a year earlier
the previous period 's results included a $ N million pretax charge related to <unk> contract costs and a $ N million pretax gain on real estate sales
sales for the latest quarter were flat at $ N billion
for the nine months honeywell reported earnings of $ N million or $ N a share compared with earnings of $ N million or $ N a share a year earlier
sales declined slightly to $ N billion
once again your editorial page <unk> the law to conform to your almost <unk> <unk>
in an <unk> of little <unk> to his central point about private enforcement suits by environmental groups michael s. <unk> <unk> your readers the clean water act is written upon the <unk> the <unk> rather that nothing but zero risk will do it <unk> a legal standard of zero <unk> <unk> environmental <unk> sept. N
this statement surely <unk> your editorial viewpoint that environmental protection is generally silly or excessive but it is simply wrong
the clean water act contains no legal standard of zero <unk>
it requires that <unk> of <unk> into the waters of the united states be authorized by permits that reflect the <unk> limitations developed under section N
whatever may be the problems with this system it <unk> reflects zero risk or zero <unk>
perhaps mr. <unk> was confused by congress 's <unk> statement of the national goal in section N which indeed calls for the elimination of <unk> by N no less
this <unk> statement was not taken seriously when enacted in N and should not now be confused with the <unk> provisions of the statute
thus you do the public a great <unk> when mr. <unk> suggests even <unk> that the clean water act prohibits the preparation of a <unk> and water your <unk> readers may be led to believe that nothing but chance or oversight protects them as they <unk> in the night with their <unk> and waters from the <unk> knock of the sierra club at their doors
robert j. <unk>
national geographic the <unk> u.s. magazine is attracting more readers than ever and offers the glossy <unk> pages that upscale advertisers love
so why did advertising pages plunge by almost N N and ad revenue by N N in the first half
to hear advertisers tell it the magazine just has n't kept up with the times
despite renewed interest by the public in such topics as the environment and the third world it has n't been able to shake its reputation as a magazine boys like to <unk> through in search of <unk> tribe women
worse it lagged behind competitors in offering <unk> <unk> from regional editions to discounts for frequent advertisers
but now the magazine is attempting to fight back with an ambitious plan including a revamped sales strategy and a surprisingly aggressive ad campaign
advertisers do n't think of the magazine first says joan <unk> who joined in april as national advertising director
what we want to do is take a more aggressive stance
people did n't believe we were in tune with the marketplace and in many ways we were n't
the <unk> magazine has never had to woo advertisers with quite so much <unk> before
it largely <unk> on its <unk> <unk> N million subscribers in the first half up from N million a year ago an average age of N for readers at the <unk> of their <unk> years loyalty to the tune of an N N average subscription renewal rate
the magazine had its best year yet in N when it <unk> its centennial and racked up a N N gain in ad pages to N
but this year when the <unk> surrounding its centennial died so too did some advertiser interest
the reason ad executives say is that the entire magazine business has been soft and national geographic has some <unk> that make it especially <unk> during a soft market
perhaps the biggest of those factors is its high ad prices $ N for a <unk> page vs. $ N for the <unk> a comparable publication with a far smaller circulation
when ad dollars are tight the high page cost is a major <unk> for advertisers who generally want to appear regularly in a publication or not at all
even though national geographic offers far more readers than does a magazine like <unk> the page costs you an arm and a leg to develop any frequency says harry glass new york media manager for bozell inc
to combat that problem national geographic like other magazines began offering regional editions allowing advertisers to appear in only a portion of its magazines for example ads can run only in the magazines sent to subscribers in the largest N markets
but the magazine was slower than its competitors to come up with its regional editions and until last year offered fewer of them than did competitors
time magazine for example has more than N separate editions going to different regions top management and other groups
another sticking point for advertisers was national geographic 's tradition of <unk> its ads together usually at the beginning or end of the magazine rather than spreading ads out among its articles as most magazines do
and national geographic 's <unk> size means extra production costs for advertisers
but ms. <unk> says the magazine is fighting back
it now offers N regional editions it very recently began running ads adjacent to articles and it has been <unk> up its sales force
and it just launched a promotional campaign to tell chief executives marketing directors and media executives just that
the centerpiece of the promotion is its new ad campaign into which the magazine will pour about $ N mostly in the next few weeks
the campaign created by <unk> group 's ddb needham agency takes advantage of the <unk> photography that national geographic is known for
in one ad a photo of the interior of the <unk> in paris is <unk> with the headline the only book more respected than <unk> does n't accept advertising
another ad pictures a tree <unk> magnified N times with the headline for impact far beyond your size consider our regional editions
ms. <unk> says she wants the campaign to help attract advertisers in N categories including corporate financial services consumer electronics insurance and food
her goal to top N ad pages in N up from about N this year
whether she can meet that ambitious goal is still far from certain
the ad campaign is meant to <unk> the thought of national geographic she says
we want it to be a <unk> kind of image
wcrs plans <unk> sale
wcrs group hopes to announce perhaps today an agreement to sell the majority of its ad unit to <unk> eurocom a european ad executive said
wcrs has been in discussions with eurocom for several months
however when negotiations <unk> down recently wcrs 's chief executive peter scott met in paris with another french firm <unk> <unk> <unk> <unk> or <unk>
according to the executive <unk> 's involvement prompted renewed <unk> in the <unk> talks and the two agencies were hoping to <unk> out details by today
executives of the two agencies could n't be reached last night
ad notes
new account procter & gamble co. cincinnati awarded the ad accounts for its line of professional <unk> <unk> <unk> and oil products to <unk> <unk> <unk> cincinnati
billings were n't disclosed
professional <unk> products are specially made for the <unk> industry
who 's news stephen <unk> N was named executive vice president deputy creative director at grey advertising new york
he was executive vice president director of broadcast production
the commodity futures trading commission plans to restrict dual trading on commodity exchanges a move almost certain to <unk> exchange officials and traders
the cftc said it will propose the restrictions after the release of a study that shows little economic benefit resulting from dual trading and cites problems associated with the practice
dual trading gives an exchange trader the right to trade both for his own account and for customers
the issue exploded this year after a federal bureau of investigation operation led to charges of widespread trading abuses at the chicago board of trade and chicago mercantile exchange
while not specifically mentioned in the fbi charges dual trading became a focus of attempts to tighten industry regulations
critics contend that traders were putting buying or selling for their own accounts ahead of other traders ' customer orders
traders are likely to oppose such restrictions because dual trading provides a way to make money in slower markets where there is a shortage of customer orders
the exchanges contend that dual trading improves liquidity in the markets because traders can buy or sell even when they do n't have a customer order in hand
the exchanges say liquidity becomes a severe problem for <unk> traded contracts such as those with a long time remaining before expiration
the cftc may take those arguments into account by allowing exceptions to its restrictions
the agency did n't cite specific situations where dual trading might be allowed but smaller exchanges or contracts that need additional liquidity are expected to be among them
wendy <unk> the agency 's chairman told the senate agriculture committee that she expects the study to be released within two weeks and the rule changes to be completed by <unk>
the study by the cftc 's division of economic analysis shows that a trade is a trade a member of the study team said
whether a trade is done on a dual or <unk> basis the member said does n't seem to have much economic impact
currently most traders on commodity exchanges specialize in trading either for customer accounts which makes them brokers or for their own accounts as <unk> <unk>
the tests indicate that dual and <unk> traders are similar in terms of the trade executions and liquidity they provide to the market mrs. <unk> told the senate panel
members of congress have proposed restricting dual trading in bills to <unk> cftc operations
the house 's bill would prohibit dual trading in markets with daily average volume of N contracts or more <unk> those considered too difficult to track without a sophisticated computer system
the senate bill would force the cftc to suspend dual trading if an exchange ca n't show that its oversight system can detect <unk> abuses
so far one test of restricting dual trading has worked well
the chicago merc banned dual trading in its standard & poor 's 500-stock index futures pit in N
under the rules traders decide before a session begins whether they will trade for their own account or for customers
traders who stand on the pit 's top step where most customer orders are executed ca n't trade for themselves
a merc spokesman said the plan has n't made much difference in liquidity in the pit
it 's too soon to tell but people do n't seem to be unhappy with it he said
he said he would n't comment on the cftc plan until the exchange has seen the full proposal
but at a meeting last week tom <unk> the board of trade 's president told commodity lawyers dual trading is definitely worth saving
it adds something to the market
japanese firms push <unk> car <unk>
japanese luxury-car makers are trying to set strict design standards for their dealerships
but some dealers are negotiating <unk> terms while others decline to deal at all
nissan motor co. 's infiniti division likes to insist that every dealer construct and <unk> a building in a japanese style
specifications include a <unk> <unk> <unk> at the center of each showroom and a <unk> bridge <unk> a stream that flows into the building from outside
infiniti has it down to the <unk> says jay <unk> a partner at <unk> power & associates an auto research firm
toyota motor corp. 's lexus division also provides specifications
but only two-thirds of lexus dealers are <unk> new buildings according to the lexus <unk>
some are even coming up with their own novel designs
in louisville ky. for example david peterson has built a lexus dealership with the showroom on the second floor
yet some dealers have turned down infiniti or lexus <unk> because they were unwilling or unable to meet the design requirements
lee seidman of cleveland says infiniti was a bear on <unk> but at least let him <unk> an existing building without the stream
mr. seidman says he turned down a lexus franchise in part because the building was <unk> but very expensive
to head off arguments infiniti offers dealers cash bonuses and <unk> construction loans
<unk> device 's <unk> plays back a lesson
products <unk> have to be first to be winners
that 's the lesson offered through one case study featured in a design exhibit
dictaphone corp. was caught off guard in N when its main competitor <unk> office products of japan introduced a <unk> <unk> recorder half the size of standard <unk> devices
blocked by patent protection from following suit dictaphone decided to go a step further and cut the <unk> in half again down to the length of a <unk>

+ 0
- 36
reproduction/HAN-document_classification/README.md View File

@@ -1,36 +0,0 @@
## Introduction
This is the implementation of [Hierarchical Attention Networks for Document Classification](https://www.cs.cmu.edu/~diyiy/docs/naacl16.pdf) paper in PyTorch.
* Dataset is 600k documents extracted from [Yelp 2018](https://www.yelp.com/dataset) customer reviews
* Use [NLTK](http://www.nltk.org/) and [Stanford CoreNLP](https://stanfordnlp.github.io/CoreNLP/) to tokenize documents and sentences
* Both CPU & GPU support
* The best accuracy is 71%, reaching the same performance in the paper

## Requirement
* python 3.6
* pytorch = 0.3.0
* numpy
* gensim
* nltk
* coreNLP

## Parameters
According to the paper and experiment, I set model parameters:
|word embedding dimension|GRU hidden size|GRU layer|word/sentence context vector dimension|
|---|---|---|---|
|200|50|1|100|

And the training parameters:
|Epoch|learning rate|momentum|batch size|
|---|---|---|---|
|3|0.01|0.9|64|

## Run
1. Prepare dataset. Download the [data set](https://www.yelp.com/dataset), and unzip the custom reviews as a file. Use preprocess.py to transform file into data set foe model input.
2. Train the model. Word enbedding of train data in 'yelp.word2vec'. The model will trained and autosaved in 'model.dict'
```
python train
```
3. Test the model.
```
python evaluate
```

+ 0
- 45
reproduction/HAN-document_classification/evaluate.py View File

@@ -1,45 +0,0 @@
from model import *
from train import *


def evaluate(net, dataset, bactch_size=64, use_cuda=False):
dataloader = DataLoader(dataset, batch_size=bactch_size, collate_fn=collate, num_workers=0)
count = 0
if use_cuda:
net.cuda()
for i, batch_samples in enumerate(dataloader):
x, y = batch_samples
doc_list = []
for sample in x:
doc = []
for sent_vec in sample:
if use_cuda:
sent_vec = sent_vec.cuda()
doc.append(Variable(sent_vec, volatile=True))
doc_list.append(pack_sequence(doc))
if use_cuda:
y = y.cuda()
predicts = net(doc_list)
p, idx = torch.max(predicts, dim=1)
idx = idx.data
count += torch.sum(torch.eq(idx, y))
return count


if __name__ == '__main__':
'''
Evaluate the performance of models
'''
from gensim.models import Word2Vec

embed_model = Word2Vec.load('yelp.word2vec')
embedding = Embedding_layer(embed_model.wv, embed_model.wv.vector_size)
del embed_model

net = HAN(input_size=200, output_size=5,
word_hidden_size=50, word_num_layers=1, word_context_size=100,
sent_hidden_size=50, sent_num_layers=1, sent_context_size=100)
net.load_state_dict(torch.load('models.dict'))
test_dataset = YelpDocSet('reviews', 199, 4, embedding)
correct = evaluate(net, test_dataset, True)
print('accuracy {}'.format(correct / len(test_dataset)))

+ 0
- 50
reproduction/HAN-document_classification/preprocess.py View File

@@ -1,50 +0,0 @@
''''
Tokenize yelp dataset's documents using stanford core nlp
'''

import json
import os
import pickle

import nltk
from nltk.tokenize import stanford

input_filename = 'review.json'

# config for stanford core nlp
os.environ['JAVAHOME'] = 'D:\\java\\bin\\java.exe'
path_to_jar = 'E:\\College\\fudanNLP\\stanford-corenlp-full-2018-02-27\\stanford-corenlp-3.9.1.jar'
tokenizer = stanford.CoreNLPTokenizer()

in_dirname = 'review'
out_dirname = 'reviews'

f = open(input_filename, encoding='utf-8')
samples = []
j = 0
for i, line in enumerate(f.readlines()):
review = json.loads(line)
samples.append((review['stars'], review['text']))
if (i + 1) % 5000 == 0:
print(i)
pickle.dump(samples, open(in_dirname + '/samples%d.pkl' % j, 'wb'))
j += 1
samples = []
pickle.dump(samples, open(in_dirname + '/samples%d.pkl' % j, 'wb'))
# samples = pickle.load(open(out_dirname + '/samples0.pkl', 'rb'))
# print(samples[0])


for fn in os.listdir(in_dirname):
print(fn)
precessed = []
for stars, text in pickle.load(open(os.path.join(in_dirname, fn), 'rb')):
tokens = []
sents = nltk.tokenize.sent_tokenize(text)
for s in sents:
tokens.append(tokenizer.tokenize(s))
precessed.append((stars, tokens))
# print(tokens)
if len(precessed) % 100 == 0:
print(len(precessed))
pickle.dump(precessed, open(os.path.join(out_dirname, fn), 'wb'))

+ 0
- 171
reproduction/HAN-document_classification/train.py View File

@@ -1,171 +0,0 @@
import os
import pickle

import numpy as np
import torch
from model import *


class SentIter:
def __init__(self, dirname, count):
self.dirname = dirname
self.count = int(count)

def __iter__(self):
for f in os.listdir(self.dirname)[:self.count]:
with open(os.path.join(self.dirname, f), 'rb') as f:
for y, x in pickle.load(f):
for sent in x:
yield sent


def train_word_vec():
# load data
dirname = 'reviews'
sents = SentIter(dirname, 238)
# define models and train
model = models.Word2Vec(size=200, sg=0, workers=4, min_count=5)
model.build_vocab(sents)
model.train(sents, total_examples=model.corpus_count, epochs=10)
model.save('yelp.word2vec')
print(model.wv.similarity('woman', 'man'))
print(model.wv.similarity('nice', 'awful'))


class Embedding_layer:
def __init__(self, wv, vector_size):
self.wv = wv
self.vector_size = vector_size

def get_vec(self, w):
try:
v = self.wv[w]
except KeyError as e:
v = np.random.randn(self.vector_size)
return v


from torch.utils.data import DataLoader, Dataset


class YelpDocSet(Dataset):
def __init__(self, dirname, start_file, num_files, embedding):
self.dirname = dirname
self.num_files = num_files
self._files = os.listdir(dirname)[start_file:start_file + num_files]
self.embedding = embedding
self._cache = [(-1, None) for i in range(5)]

def get_doc(self, n):
file_id = n // 5000
idx = file_id % 5
if self._cache[idx][0] != file_id:
with open(os.path.join(self.dirname, self._files[file_id]), 'rb') as f:
self._cache[idx] = (file_id, pickle.load(f))
y, x = self._cache[idx][1][n % 5000]
sents = []
for s_list in x:
sents.append(' '.join(s_list))
x = '\n'.join(sents)
return x, y - 1

def __len__(self):
return len(self._files) * 5000

def __getitem__(self, n):
file_id = n // 5000
idx = file_id % 5
if self._cache[idx][0] != file_id:
print('load {} to {}'.format(file_id, idx))
with open(os.path.join(self.dirname, self._files[file_id]), 'rb') as f:
self._cache[idx] = (file_id, pickle.load(f))
y, x = self._cache[idx][1][n % 5000]
doc = []
for sent in x:
if len(sent) == 0:
continue
sent_vec = []
for word in sent:
vec = self.embedding.get_vec(word)
sent_vec.append(vec.tolist())
sent_vec = torch.Tensor(sent_vec)
doc.append(sent_vec)
if len(doc) == 0:
doc = [torch.zeros(1, 200)]
return doc, y - 1


def collate(iterable):
y_list = []
x_list = []
for x, y in iterable:
y_list.append(y)
x_list.append(x)
return x_list, torch.LongTensor(y_list)


def train(net, dataset, num_epoch, batch_size, print_size=10, use_cuda=False):
optimizer = torch.optim.SGD(net.parameters(), lr=0.01, momentum=0.9)
criterion = nn.NLLLoss()

dataloader = DataLoader(dataset,
batch_size=batch_size,
collate_fn=collate,
num_workers=0)
running_loss = 0.0

if use_cuda:
net.cuda()
print('start training')
for epoch in range(num_epoch):
for i, batch_samples in enumerate(dataloader):
x, y = batch_samples
doc_list = []
for sample in x:
doc = []
for sent_vec in sample:
if use_cuda:
sent_vec = sent_vec.cuda()
doc.append(Variable(sent_vec))
doc_list.append(pack_sequence(doc))
if use_cuda:
y = y.cuda()
y = Variable(y)
predict = net(doc_list)
loss = criterion(predict, y)
optimizer.zero_grad()
loss.backward()
optimizer.step()
running_loss += loss.data[0]
if i % print_size == print_size - 1:
print('{}, {}'.format(i + 1, running_loss / print_size))
running_loss = 0.0
torch.save(net.state_dict(), 'models.dict')
torch.save(net.state_dict(), 'models.dict')


if __name__ == '__main__':
'''
Train process
'''
from gensim.models import Word2Vec
from gensim import models

train_word_vec()

embed_model = Word2Vec.load('yelp.word2vec')
embedding = Embedding_layer(embed_model.wv, embed_model.wv.vector_size)
del embed_model
start_file = 0
dataset = YelpDocSet('reviews', start_file, 120 - start_file, embedding)
print('training data size {}'.format(len(dataset)))
net = HAN(input_size=200, output_size=5,
word_hidden_size=50, word_num_layers=1, word_context_size=100,
sent_hidden_size=50, sent_num_layers=1, sent_context_size=100)
try:
net.load_state_dict(torch.load('models.dict'))
print("last time trained models has loaded")
except Exception:
print("cannot load models, train the inital models")

train(net, dataset, num_epoch=5, batch_size=64, use_cuda=True)

+ 4
- 2
reproduction/README.md View File

@@ -3,6 +3,8 @@

复现的模型有:
- [Star-Transformer](Star_transformer/)
- [Biaffine](https://github.com/fastnlp/fastNLP/blob/999a14381747068e9e6a7cc370037b320197db00/fastNLP/models/biaffine_parser.py#L239)
- [CNNText](https://github.com/fastnlp/fastNLP/blob/999a14381747068e9e6a7cc370037b320197db00/fastNLP/models/cnn_text_classification.py#L12)
- ...

# 任务复现
@@ -11,11 +13,11 @@


## Matching (自然语言推理/句子匹配)
- still in progress
- [Matching 任务复现](matching)


## Sequence Labeling (序列标注)
- still in progress
- [NER](seqence_labelling/ner)


## Coreference resolution (指代消解)


+ 1
- 1
reproduction/Star_transformer/README.md View File

@@ -6,7 +6,7 @@ paper: [Star-Transformer](https://arxiv.org/abs/1902.09113)
|Pos Tagging|CTB 9.0|-|ACC 92.31|
|Pos Tagging|CONLL 2012|-|ACC 96.51|
|Named Entity Recognition|CONLL 2012|-|F1 85.66|
|Text Classification|SST|-|49.18|
|Text Classification|SST|-|51.2|
|Natural Language Inference|SNLI|-|83.76|

## Usage


+ 10
- 4
reproduction/Star_transformer/datasets.py View File

@@ -2,7 +2,8 @@ import torch
import json
import os
from fastNLP import Vocabulary
from fastNLP.io.dataset_loader import ConllLoader, SSTLoader, SNLILoader
from fastNLP.io.dataset_loader import ConllLoader
from fastNLP.io.data_loader import SSTLoader, SNLILoader
from fastNLP.core import Const as C
import numpy as np

@@ -50,13 +51,15 @@ def load_sst(path, files):
for sub in [True, False, False]]
ds_list = [loader.load(os.path.join(path, fn))
for fn, loader in zip(files, loaders)]
word_v = Vocabulary(min_freq=2)
word_v = Vocabulary(min_freq=0)
tag_v = Vocabulary(unknown=None, padding=None)
for ds in ds_list:
ds.apply(lambda x: [w.lower()
for w in x['words']], new_field_name='words')
ds_list[0].drop(lambda x: len(x['words']) < 3)
#ds_list[0].drop(lambda x: len(x['words']) < 3)
update_v(word_v, ds_list[0], 'words')
update_v(word_v, ds_list[1], 'words')
update_v(word_v, ds_list[2], 'words')
ds_list[0].apply(lambda x: tag_v.add_word(
x['target']), new_field_name=None)

@@ -151,7 +154,10 @@ class EmbedLoader:
# some words from vocab are missing in pre-trained embedding
# we normally sample each dimension
vocab_embed = embedding_matrix[np.where(hit_flags)]
sampled_vectors = np.random.normal(vocab_embed.mean(axis=0), vocab_embed.std(axis=0),
#sampled_vectors = np.random.normal(vocab_embed.mean(axis=0), vocab_embed.std(axis=0),
# size=(len(vocab) - np.sum(hit_flags), emb_dim))
sampled_vectors = np.random.uniform(-0.01, 0.01,
size=(len(vocab) - np.sum(hit_flags), emb_dim))

embedding_matrix[np.where(1 - hit_flags)] = sampled_vectors
return embedding_matrix

+ 2
- 2
reproduction/Star_transformer/run.sh View File

@@ -1,5 +1,5 @@
#python -u train.py --task pos --ds conll --mode train --gpu 1 --lr 3e-4 --w_decay 2e-5 --lr_decay .95 --drop 0.3 --ep 25 --bsz 64 > conll_pos102.log 2>&1 &
#python -u train.py --task pos --ds ctb --mode train --gpu 1 --lr 3e-4 --w_decay 2e-5 --lr_decay .95 --drop 0.3 --ep 25 --bsz 64 > ctb_pos101.log 2>&1 &
#python -u train.py --task cls --ds sst --mode train --gpu 2 --lr 1e-4 --w_decay 1e-5 --lr_decay 0.9 --drop 0.5 --ep 50 --bsz 128 > sst_cls201.log &
python -u train.py --task cls --ds sst --mode train --gpu 0 --lr 1e-4 --w_decay 5e-5 --lr_decay 1.0 --drop 0.4 --ep 20 --bsz 64 > sst_cls.log &
#python -u train.py --task nli --ds snli --mode train --gpu 1 --lr 1e-4 --w_decay 1e-5 --lr_decay 0.9 --drop 0.4 --ep 120 --bsz 128 > snli_nli201.log &
python -u train.py --task ner --ds conll --mode train --gpu 0 --lr 1e-4 --w_decay 1e-5 --lr_decay 0.9 --drop 0.4 --ep 120 --bsz 64 > conll_ner201.log &
#python -u train.py --task ner --ds conll --mode train --gpu 0 --lr 1e-4 --w_decay 1e-5 --lr_decay 0.9 --drop 0.4 --ep 120 --bsz 64 > conll_ner201.log &

+ 38
- 21
reproduction/Star_transformer/train.py View File

@@ -1,4 +1,6 @@
from util import get_argparser, set_gpu, set_rng_seeds, add_model_args
seed = set_rng_seeds(15360)
print('RNG SEED {}'.format(seed))
from datasets import load_seqtag, load_sst, load_snli, EmbedLoader, MAX_LEN
import torch.nn as nn
import torch
@@ -7,8 +9,8 @@ import fastNLP as FN
from fastNLP.models.star_transformer import STSeqLabel, STSeqCls, STNLICls
from fastNLP.core.const import Const as C
import sys
sys.path.append('/remote-home/yfshao/workdir/dev_fastnlp/')
#sys.path.append('/remote-home/yfshao/workdir/dev_fastnlp/')
pre_dir = '/home/ec2-user/fast_data/'

g_model_select = {
'pos': STSeqLabel,
@@ -17,8 +19,8 @@ g_model_select = {
'nli': STNLICls,
}

g_emb_file_path = {'en': '/remote-home/yfshao/workdir/datasets/word_vector/glove.840B.300d.txt',
'zh': '/remote-home/yfshao/workdir/datasets/word_vector/cc.zh.300.vec'}
g_emb_file_path = {'en': pre_dir + 'glove.840B.300d.txt',
'zh': pre_dir + 'cc.zh.300.vec'}

g_args = None
g_model_cfg = None
@@ -53,7 +55,7 @@ def get_conll2012_ner():


def get_sst():
path = '/remote-home/yfshao/workdir/datasets/SST'
path = pre_dir + 'sst'
files = ['train.txt', 'dev.txt', 'test.txt']
return load_sst(path, files)

@@ -94,6 +96,7 @@ class MyCallback(FN.core.callback.Callback):
nn.utils.clip_grad.clip_grad_norm_(self.model.parameters(), 5.0)

def on_step_end(self):
return
warm_steps = 6000
# learning rate warm-up & decay
if self.step <= warm_steps:
@@ -108,12 +111,11 @@ class MyCallback(FN.core.callback.Callback):


def train():
seed = set_rng_seeds(1234)
print('RNG SEED {}'.format(seed))
print('loading data')
ds_list, word_v, tag_v = g_datasets['{}-{}'.format(
g_args.ds, g_args.task)]()
print(ds_list[0][:2])
print(len(ds_list[0]), len(ds_list[1]), len(ds_list[2]))
embed = load_pretrain_emb(word_v, lang='zh' if g_args.ds == 'ctb' else 'en')
g_model_cfg['num_cls'] = len(tag_v)
print(g_model_cfg)
@@ -123,11 +125,14 @@ def train():
def init_model(model):
for p in model.parameters():
if p.size(0) != len(word_v):
nn.init.normal_(p, 0.0, 0.05)
if len(p.size())<2:
nn.init.constant_(p, 0.0)
else:
nn.init.normal_(p, 0.0, 0.05)
init_model(model)
train_data = ds_list[0]
dev_data = ds_list[2]
test_data = ds_list[1]
dev_data = ds_list[1]
test_data = ds_list[2]
print(tag_v.word2idx)

if g_args.task in ['pos', 'ner']:
@@ -145,14 +150,26 @@ def train():
}
metric_key, metric = metrics[g_args.task]
device = 'cuda' if torch.cuda.is_available() else 'cpu'
ex_param = [x for x in model.parameters(
) if x.requires_grad and x.size(0) != len(word_v)]
optim_cfg = [{'params': model.enc.embedding.parameters(), 'lr': g_args.lr*0.1},
{'params': ex_param, 'lr': g_args.lr, 'weight_decay': g_args.w_decay}, ]
trainer = FN.Trainer(train_data=train_data, model=model, optimizer=torch.optim.Adam(optim_cfg), loss=loss,
batch_size=g_args.bsz, n_epochs=g_args.ep, print_every=10, dev_data=dev_data, metrics=metric,
metric_key=metric_key, validate_every=3000, save_path=g_args.log, use_tqdm=False,
device=device, callbacks=[MyCallback()])

params = [(x,y) for x,y in list(model.named_parameters()) if y.requires_grad and y.size(0) != len(word_v)]
no_decay = ['bias', 'LayerNorm.bias', 'LayerNorm.weight']
print([n for n,p in params])
optim_cfg = [
#{'params': model.enc.embedding.parameters(), 'lr': g_args.lr*0.1},
{'params': [p for n, p in params if not any(nd in n for nd in no_decay)], 'lr': g_args.lr, 'weight_decay': 1.0*g_args.w_decay},
{'params': [p for n, p in params if any(nd in n for nd in no_decay)], 'lr': g_args.lr, 'weight_decay': 0.0*g_args.w_decay}
]

print(model)
trainer = FN.Trainer(model=model, train_data=train_data, dev_data=dev_data,
loss=loss, metrics=metric, metric_key=metric_key,
optimizer=torch.optim.Adam(optim_cfg),
n_epochs=g_args.ep, batch_size=g_args.bsz, print_every=100, validate_every=1000,
device=device,
use_tqdm=False, prefetch=False,
save_path=g_args.log,
sampler=FN.BucketSampler(100, g_args.bsz, C.INPUT_LEN),
callbacks=[MyCallback()])

trainer.train()
tester = FN.Tester(data=test_data, model=model, metrics=metric,
@@ -195,12 +212,12 @@ def main():
'init_embed': (None, 300),
'num_cls': None,
'hidden_size': g_args.hidden,
'num_layers': 4,
'num_layers': 2,
'num_head': g_args.nhead,
'head_dim': g_args.hdim,
'max_len': MAX_LEN,
'cls_hidden_size': 600,
'emb_dropout': 0.3,
'cls_hidden_size': 200,
'emb_dropout': g_args.drop,
'dropout': g_args.drop,
}
run_select[g_args.mode.lower()]()


+ 129
- 0
reproduction/Summmarization/BertSum/callback.py View File

@@ -0,0 +1,129 @@
import os
import torch
import sys
from torch import nn

from fastNLP.core.callback import Callback
from fastNLP.core.utils import _get_model_device

class MyCallback(Callback):
def __init__(self, args):
super(MyCallback, self).__init__()
self.args = args
self.real_step = 0

def on_step_end(self):
if self.step % self.update_every == 0 and self.step > 0:
self.real_step += 1
cur_lr = self.args.max_lr * 100 * min(self.real_step ** (-0.5), self.real_step * self.args.warmup_steps**(-1.5))
for param_group in self.optimizer.param_groups:
param_group['lr'] = cur_lr

if self.real_step % 1000 == 0:
self.pbar.write('Current learning rate is {:.8f}, real_step: {}'.format(cur_lr, self.real_step))
def on_epoch_end(self):
self.pbar.write('Epoch {} is done !!!'.format(self.epoch))

def _save_model(model, model_name, save_dir, only_param=False):
""" 存储不含有显卡信息的 state_dict 或 model
:param model:
:param model_name:
:param save_dir: 保存的 directory
:param only_param:
:return:
"""
model_path = os.path.join(save_dir, model_name)
if not os.path.isdir(save_dir):
os.makedirs(save_dir, exist_ok=True)
if isinstance(model, nn.DataParallel):
model = model.module
if only_param:
state_dict = model.state_dict()
for key in state_dict:
state_dict[key] = state_dict[key].cpu()
torch.save(state_dict, model_path)
else:
_model_device = _get_model_device(model)
model.cpu()
torch.save(model, model_path)
model.to(_model_device)

class SaveModelCallback(Callback):
"""
由于Trainer在训练过程中只会保存最佳的模型, 该 callback 可实现多种方式的结果存储。
会根据训练开始的时间戳在 save_dir 下建立文件夹,在再文件夹下存放多个模型
-save_dir
-2019-07-03-15-06-36
-epoch0step20{metric_key}{evaluate_performance}.pt # metric是给定的metric_key, evaluate_perfomance是性能
-epoch1step40
-2019-07-03-15-10-00
-epoch:0step:20{metric_key}:{evaluate_performance}.pt # metric是给定的metric_key, evaluate_perfomance是性能
:param str save_dir: 将模型存放在哪个目录下,会在该目录下创建以时间戳命名的目录,并存放模型
:param int top: 保存dev表现top多少模型。-1为保存所有模型
:param bool only_param: 是否只保存模型权重
:param save_on_exception: 发生exception时,是否保存一份当时的模型
"""
def __init__(self, save_dir, top=5, only_param=False, save_on_exception=False):
super().__init__()

if not os.path.isdir(save_dir):
raise IsADirectoryError("{} is not a directory.".format(save_dir))
self.save_dir = save_dir
if top < 0:
self.top = sys.maxsize
else:
self.top = top
self._ordered_save_models = [] # List[Tuple], Tuple[0]是metric, Tuple[1]是path。metric是依次变好的,所以从头删

self.only_param = only_param
self.save_on_exception = save_on_exception

def on_train_begin(self):
self.save_dir = os.path.join(self.save_dir, self.trainer.start_time)

def on_valid_end(self, eval_result, metric_key, optimizer, is_better_eval):
metric_value = list(eval_result.values())[0][metric_key]
self._save_this_model(metric_value)

def _insert_into_ordered_save_models(self, pair):
# pair:(metric_value, model_name)
# 返回save的模型pair与删除的模型pair. pair中第一个元素是metric的值,第二个元素是模型的名称
index = -1
for _pair in self._ordered_save_models:
if _pair[0]>=pair[0] and self.trainer.increase_better:
break
if not self.trainer.increase_better and _pair[0]<=pair[0]:
break
index += 1
save_pair = None
if len(self._ordered_save_models)<self.top or (len(self._ordered_save_models)>=self.top and index!=-1):
save_pair = pair
self._ordered_save_models.insert(index+1, pair)
delete_pair = None
if len(self._ordered_save_models)>self.top:
delete_pair = self._ordered_save_models.pop(0)
return save_pair, delete_pair

def _save_this_model(self, metric_value):
name = "epoch:{}_step:{}_{}:{:.6f}.pt".format(self.epoch, self.step, self.trainer.metric_key, metric_value)
save_pair, delete_pair = self._insert_into_ordered_save_models((metric_value, name))
if save_pair:
try:
_save_model(self.model, model_name=name, save_dir=self.save_dir, only_param=self.only_param)
except Exception as e:
print(f"The following exception:{e} happens when saves model to {self.save_dir}.")
if delete_pair:
try:
delete_model_path = os.path.join(self.save_dir, delete_pair[1])
if os.path.exists(delete_model_path):
os.remove(delete_model_path)
except Exception as e:
print(f"Fail to delete model {name} at {self.save_dir} caused by exception:{e}.")

def on_exception(self, exception):
if self.save_on_exception:
name = "epoch:{}_step:{}_Exception:{}.pt".format(self.epoch, self.step, exception.__class__.__name__)
_save_model(self.model, model_name=name, save_dir=self.save_dir, only_param=self.only_param)



+ 157
- 0
reproduction/Summmarization/BertSum/dataloader.py View File

@@ -0,0 +1,157 @@
from time import time
from datetime import timedelta

from fastNLP.io.dataset_loader import JsonLoader
from fastNLP.modules.encoder._bert import BertTokenizer
from fastNLP.io.base_loader import DataInfo
from fastNLP.core.const import Const

class BertData(JsonLoader):
def __init__(self, max_nsents=60, max_ntokens=100, max_len=512):
fields = {'article': 'article',
'label': 'label'}
super(BertData, self).__init__(fields=fields)

self.max_nsents = max_nsents
self.max_ntokens = max_ntokens
self.max_len = max_len

self.tokenizer = BertTokenizer.from_pretrained('/path/to/uncased_L-12_H-768_A-12')
self.cls_id = self.tokenizer.vocab['[CLS]']
self.sep_id = self.tokenizer.vocab['[SEP]']
self.pad_id = self.tokenizer.vocab['[PAD]']

def _load(self, paths):
dataset = super(BertData, self)._load(paths)
return dataset

def process(self, paths):
def truncate_articles(instance, max_nsents=self.max_nsents, max_ntokens=self.max_ntokens):
article = [' '.join(sent.lower().split()[:max_ntokens]) for sent in instance['article']]
return article[:max_nsents]

def truncate_labels(instance):
label = list(filter(lambda x: x < len(instance['article']), instance['label']))
return label
def bert_tokenize(instance, tokenizer, max_len, pad_value):
article = instance['article']
article = ' [SEP] [CLS] '.join(article)
word_pieces = tokenizer.tokenize(article)[:(max_len - 2)]
word_pieces = ['[CLS]'] + word_pieces + ['[SEP]']
token_ids = tokenizer.convert_tokens_to_ids(word_pieces)
while len(token_ids) < max_len:
token_ids.append(pad_value)
assert len(token_ids) == max_len
return token_ids

def get_seg_id(instance, max_len, sep_id):
_segs = [-1] + [i for i, idx in enumerate(instance['article']) if idx == sep_id]
segs = [_segs[i] - _segs[i - 1] for i in range(1, len(_segs))]
segment_id = []
for i, length in enumerate(segs):
if i % 2 == 0:
segment_id += length * [0]
else:
segment_id += length * [1]
while len(segment_id) < max_len:
segment_id.append(0)
return segment_id
def get_cls_id(instance, cls_id):
classification_id = [i for i, idx in enumerate(instance['article']) if idx == cls_id]
return classification_id
def get_labels(instance):
labels = [0] * len(instance['cls_id'])
label_idx = list(filter(lambda x: x < len(instance['cls_id']), instance['label']))
for idx in label_idx:
labels[idx] = 1
return labels

datasets = {}
for name in paths:
datasets[name] = self._load(paths[name])
# remove empty samples
datasets[name].drop(lambda ins: len(ins['article']) == 0 or len(ins['label']) == 0)
# truncate articles
datasets[name].apply(lambda ins: truncate_articles(ins, self.max_nsents, self.max_ntokens), new_field_name='article')
# truncate labels
datasets[name].apply(truncate_labels, new_field_name='label')
# tokenize and convert tokens to id
datasets[name].apply(lambda ins: bert_tokenize(ins, self.tokenizer, self.max_len, self.pad_id), new_field_name='article')
# get segment id
datasets[name].apply(lambda ins: get_seg_id(ins, self.max_len, self.sep_id), new_field_name='segment_id')
# get classification id
datasets[name].apply(lambda ins: get_cls_id(ins, self.cls_id), new_field_name='cls_id')

# get label
datasets[name].apply(get_labels, new_field_name='label')
# rename filed
datasets[name].rename_field('article', Const.INPUTS(0))
datasets[name].rename_field('segment_id', Const.INPUTS(1))
datasets[name].rename_field('cls_id', Const.INPUTS(2))
datasets[name].rename_field('lbael', Const.TARGET)

# set input and target
datasets[name].set_input(Const.INPUTS(0), Const.INPUTS(1), Const.INPUTS(2))
datasets[name].set_target(Const.TARGET)
# set paddding value
datasets[name].set_pad_val('article', 0)

return DataInfo(datasets=datasets)


class BertSumLoader(JsonLoader):
def __init__(self):
fields = {'article': 'article',
'segment_id': 'segment_id',
'cls_id': 'cls_id',
'label': Const.TARGET
}
super(BertSumLoader, self).__init__(fields=fields)

def _load(self, paths):
dataset = super(BertSumLoader, self)._load(paths)
return dataset

def process(self, paths):
def get_seq_len(instance):
return len(instance['article'])

print('Start loading datasets !!!')
start = time()

# load datasets
datasets = {}
for name in paths:
datasets[name] = self._load(paths[name])
datasets[name].apply(get_seq_len, new_field_name='seq_len')

# set input and target
datasets[name].set_input('article', 'segment_id', 'cls_id')
datasets[name].set_target(Const.TARGET)
# set padding value
datasets[name].set_pad_val('article', 0)
datasets[name].set_pad_val('segment_id', 0)
datasets[name].set_pad_val('cls_id', -1)
datasets[name].set_pad_val(Const.TARGET, 0)

print('Finished in {}'.format(timedelta(seconds=time()-start)))

return DataInfo(datasets=datasets)

+ 178
- 0
reproduction/Summmarization/BertSum/metrics.py View File

@@ -0,0 +1,178 @@
import numpy as np
import json
from os.path import join
import torch
import logging
import tempfile
import subprocess as sp
from datetime import timedelta
from time import time

from pyrouge import Rouge155
from pyrouge.utils import log

from fastNLP.core.losses import LossBase
from fastNLP.core.metrics import MetricBase

_ROUGE_PATH = '/path/to/RELEASE-1.5.5'

class MyBCELoss(LossBase):
def __init__(self, pred=None, target=None, mask=None):
super(MyBCELoss, self).__init__()
self._init_param_map(pred=pred, target=target, mask=mask)
self.loss_func = torch.nn.BCELoss(reduction='none')

def get_loss(self, pred, target, mask):
loss = self.loss_func(pred, target.float())
loss = (loss * mask.float()).sum()
return loss

class LossMetric(MetricBase):
def __init__(self, pred=None, target=None, mask=None):
super(LossMetric, self).__init__()
self._init_param_map(pred=pred, target=target, mask=mask)
self.loss_func = torch.nn.BCELoss(reduction='none')
self.avg_loss = 0.0
self.nsamples = 0

def evaluate(self, pred, target, mask):
batch_size = pred.size(0)
loss = self.loss_func(pred, target.float())
loss = (loss * mask.float()).sum()
self.avg_loss += loss
self.nsamples += batch_size

def get_metric(self, reset=True):
self.avg_loss = self.avg_loss / self.nsamples
eval_result = {'loss': self.avg_loss}
if reset:
self.avg_loss = 0
self.nsamples = 0
return eval_result
class RougeMetric(MetricBase):
def __init__(self, data_path, dec_path, ref_path, n_total, n_ext=3, ngram_block=3, pred=None, target=None, mask=None):
super(RougeMetric, self).__init__()
self._init_param_map(pred=pred, target=target, mask=mask)
self.data_path = data_path
self.dec_path = dec_path
self.ref_path = ref_path
self.n_total = n_total
self.n_ext = n_ext
self.ngram_block = ngram_block

self.cur_idx = 0
self.ext = []
self.start = time()

@staticmethod
def eval_rouge(dec_dir, ref_dir):
assert _ROUGE_PATH is not None
log.get_global_console_logger().setLevel(logging.WARNING)
dec_pattern = '(\d+).dec'
ref_pattern = '#ID#.ref'
cmd = '-c 95 -r 1000 -n 2 -m'
with tempfile.TemporaryDirectory() as tmp_dir:
Rouge155.convert_summaries_to_rouge_format(
dec_dir, join(tmp_dir, 'dec'))
Rouge155.convert_summaries_to_rouge_format(
ref_dir, join(tmp_dir, 'ref'))
Rouge155.write_config_static(
join(tmp_dir, 'dec'), dec_pattern,
join(tmp_dir, 'ref'), ref_pattern,
join(tmp_dir, 'settings.xml'), system_id=1
)
cmd = (join(_ROUGE_PATH, 'ROUGE-1.5.5.pl')
+ ' -e {} '.format(join(_ROUGE_PATH, 'data'))
+ cmd
+ ' -a {}'.format(join(tmp_dir, 'settings.xml')))
output = sp.check_output(cmd.split(' '), universal_newlines=True)
R_1 = float(output.split('\n')[3].split(' ')[3])
R_2 = float(output.split('\n')[7].split(' ')[3])
R_L = float(output.split('\n')[11].split(' ')[3])
print(output)
return R_1, R_2, R_L
def evaluate(self, pred, target, mask):
pred = pred + mask.float()
pred = pred.cpu().data.numpy()
ext_ids = np.argsort(-pred, 1)
for sent_id in ext_ids:
self.ext.append(sent_id)
self.cur_idx += 1
print('{}/{} ({:.2f}%) decoded in {} seconds\r'.format(
self.cur_idx, self.n_total, self.cur_idx/self.n_total*100, timedelta(seconds=int(time()-self.start))
), end='')

def get_metric(self, use_ngram_block=True, reset=True):
def check_n_gram(sentence, n, dic):
tokens = sentence.split(' ')
s_len = len(tokens)
for i in range(s_len):
if i + n > s_len:
break
if ' '.join(tokens[i: i + n]) in dic:
return False
return True # no n_gram overlap

# load original data
data = []
with open(self.data_path) as f:
for line in f:
cur_data = json.loads(line)
if 'text' in cur_data:
new_data = {}
new_data['article'] = cur_data['text']
new_data['abstract'] = cur_data['summary']
data.append(new_data)
else:
data.append(cur_data)
# write decode sentences and references
if use_ngram_block == True:
print('\nStart {}-gram blocking !!!'.format(self.ngram_block))
for i, ext_ids in enumerate(self.ext):
dec, ref = [], []
if use_ngram_block == False:
n_sent = min(len(data[i]['article']), self.n_ext)
for j in range(n_sent):
idx = ext_ids[j]
dec.append(data[i]['article'][idx])
else:
n_sent = len(ext_ids)
dic = {}
for j in range(n_sent):
sent = data[i]['article'][ext_ids[j]]
if check_n_gram(sent, self.ngram_block, dic) == True:
dec.append(sent)
# update dic
tokens = sent.split(' ')
s_len = len(tokens)
for k in range(s_len):
if k + self.ngram_block > s_len:
break
dic[' '.join(tokens[k: k + self.ngram_block])] = 1
if len(dec) >= self.n_ext:
break

for sent in data[i]['abstract']:
ref.append(sent)

with open(join(self.dec_path, '{}.dec'.format(i)), 'w') as f:
for sent in dec:
print(sent, file=f)
with open(join(self.ref_path, '{}.ref'.format(i)), 'w') as f:
for sent in ref:
print(sent, file=f)
print('\nStart evaluating ROUGE score !!!')
R_1, R_2, R_L = RougeMetric.eval_rouge(self.dec_path, self.ref_path)
eval_result = {'ROUGE-1': R_1, 'ROUGE-2': R_2, 'ROUGE-L':R_L}

if reset == True:
self.cur_idx = 0
self.ext = []
self.start = time()
return eval_result

+ 51
- 0
reproduction/Summmarization/BertSum/model.py View File

@@ -0,0 +1,51 @@
import torch
from torch import nn
from torch.nn import init

from fastNLP.modules.encoder._bert import BertModel


class Classifier(nn.Module):
def __init__(self, hidden_size):
super(Classifier, self).__init__()
self.linear = nn.Linear(hidden_size, 1)
self.sigmoid = nn.Sigmoid()

def forward(self, inputs, mask_cls):
h = self.linear(inputs).squeeze(-1) # [batch_size, seq_len]
sent_scores = self.sigmoid(h) * mask_cls.float()
return sent_scores


class BertSum(nn.Module):
def __init__(self, hidden_size=768):
super(BertSum, self).__init__()
self.hidden_size = hidden_size

self.encoder = BertModel.from_pretrained('/path/to/uncased_L-12_H-768_A-12')
self.decoder = Classifier(self.hidden_size)

def forward(self, article, segment_id, cls_id):
# print(article.device)
# print(segment_id.device)
# print(cls_id.device)

input_mask = 1 - (article == 0)
mask_cls = 1 - (cls_id == -1)
assert input_mask.size() == article.size()
assert mask_cls.size() == cls_id.size()

bert_out = self.encoder(article, token_type_ids=segment_id, attention_mask=input_mask)
bert_out = bert_out[0][-1] # last layer

sent_emb = bert_out[torch.arange(bert_out.size(0)).unsqueeze(1), cls_id]
sent_emb = sent_emb * mask_cls.unsqueeze(-1).float()
assert sent_emb.size() == (article.size(0), cls_id.size(1), self.hidden_size) # [batch_size, seq_len, hidden_size]

sent_scores = self.decoder(sent_emb, mask_cls) # [batch_size, seq_len]
assert sent_scores.size() == (article.size(0), cls_id.size(1))

return {'pred': sent_scores, 'mask': mask_cls}

+ 147
- 0
reproduction/Summmarization/BertSum/train_BertSum.py View File

@@ -0,0 +1,147 @@
import sys
import argparse
import os
import json
import torch
from time import time
from datetime import timedelta
from os.path import join, exists
from torch.optim import Adam

from utils import get_data_path, get_rouge_path

from dataloader import BertSumLoader
from model import BertSum
from fastNLP.core.optimizer import AdamW
from metrics import MyBCELoss, LossMetric, RougeMetric
from fastNLP.core.sampler import BucketSampler
from callback import MyCallback, SaveModelCallback
from fastNLP.core.trainer import Trainer
from fastNLP.core.tester import Tester


def configure_training(args):
devices = [int(gpu) for gpu in args.gpus.split(',')]
params = {}
params['label_type'] = args.label_type
params['batch_size'] = args.batch_size
params['accum_count'] = args.accum_count
params['max_lr'] = args.max_lr
params['warmup_steps'] = args.warmup_steps
params['n_epochs'] = args.n_epochs
params['valid_steps'] = args.valid_steps
return devices, params

def train_model(args):
# check if the data_path and save_path exists
data_paths = get_data_path(args.mode, args.label_type)
for name in data_paths:
assert exists(data_paths[name])
if not exists(args.save_path):
os.makedirs(args.save_path)
# load summarization datasets
datasets = BertSumLoader().process(data_paths)
print('Information of dataset is:')
print(datasets)
train_set = datasets.datasets['train']
valid_set = datasets.datasets['val']
# configure training
devices, train_params = configure_training(args)
with open(join(args.save_path, 'params.json'), 'w') as f:
json.dump(train_params, f, indent=4)
print('Devices is:')
print(devices)

# configure model
model = BertSum()
optimizer = Adam(filter(lambda p: p.requires_grad, model.parameters()), lr=0)
callbacks = [MyCallback(args), SaveModelCallback(args.save_path)]
criterion = MyBCELoss()
val_metric = [LossMetric()]
# sampler = BucketSampler(num_buckets=32, batch_size=args.batch_size)
trainer = Trainer(train_data=train_set, model=model, optimizer=optimizer,
loss=criterion, batch_size=args.batch_size, # sampler=sampler,
update_every=args.accum_count, n_epochs=args.n_epochs,
print_every=100, dev_data=valid_set, metrics=val_metric,
metric_key='-loss', validate_every=args.valid_steps,
save_path=args.save_path, device=devices, callbacks=callbacks)
print('Start training with the following hyper-parameters:')
print(train_params)
trainer.train()
def test_model(args):

models = os.listdir(args.save_path)
# load dataset
data_paths = get_data_path(args.mode, args.label_type)
datasets = BertSumLoader().process(data_paths)
print('Information of dataset is:')
print(datasets)
test_set = datasets.datasets['test']
# only need 1 gpu for testing
device = int(args.gpus)
args.batch_size = 1

for cur_model in models:
print('Current model is {}'.format(cur_model))

# load model
model = torch.load(join(args.save_path, cur_model))
# configure testing
original_path, dec_path, ref_path = get_rouge_path(args.label_type)
test_metric = RougeMetric(data_path=original_path, dec_path=dec_path,
ref_path=ref_path, n_total = len(test_set))
tester = Tester(data=test_set, model=model, metrics=[test_metric],
batch_size=args.batch_size, device=device)
tester.test()


if __name__ == '__main__':
parser = argparse.ArgumentParser(
description='training/testing of BertSum(liu et al. 2019)'
)
parser.add_argument('--mode', required=True,
help='training or testing of BertSum', type=str)

parser.add_argument('--label_type', default='greedy',
help='greedy/limit', type=str)
parser.add_argument('--save_path', required=True,
help='root of the model', type=str)
# example for gpus input: '0,1,2,3'
parser.add_argument('--gpus', required=True,
help='available gpus for training(separated by commas)', type=str)
parser.add_argument('--batch_size', default=18,
help='the training batch size', type=int)
parser.add_argument('--accum_count', default=2,
help='number of updates steps to accumulate before performing a backward/update pass.', type=int)
parser.add_argument('--max_lr', default=2e-5,
help='max learning rate for warm up', type=float)
parser.add_argument('--warmup_steps', default=10000,
help='warm up steps for training', type=int)
parser.add_argument('--n_epochs', default=10,
help='total number of training epochs', type=int)
parser.add_argument('--valid_steps', default=1000,
help='number of update steps for checkpoint and validation', type=int)

args = parser.parse_args()
if args.mode == 'train':
print('Training process of BertSum !!!')
train_model(args)
else:
print('Testing process of BertSum !!!')
test_model(args)





+ 24
- 0
reproduction/Summmarization/BertSum/utils.py View File

@@ -0,0 +1,24 @@
import os
from os.path import exists

def get_data_path(mode, label_type):
paths = {}
if mode == 'train':
paths['train'] = 'data/' + label_type + '/bert.train.jsonl'
paths['val'] = 'data/' + label_type + '/bert.val.jsonl'
else:
paths['test'] = 'data/' + label_type + '/bert.test.jsonl'
return paths

def get_rouge_path(label_type):
if label_type == 'others':
data_path = 'data/' + label_type + '/bert.test.jsonl'
else:
data_path = 'data/' + label_type + '/test.jsonl'
dec_path = 'dec'
ref_path = 'ref'
if not exists(ref_path):
os.makedirs(ref_path)
if not exists(dec_path):
os.makedirs(dec_path)
return data_path, dec_path, ref_path

reproduction/CNN-sentence_classification/__init__.py → reproduction/coreference_resolution/__init__.py View File


reproduction/Char-aware_NLM/__init__.py → reproduction/coreference_resolution/data_load/__init__.py View File


+ 68
- 0
reproduction/coreference_resolution/data_load/cr_loader.py View File

@@ -0,0 +1,68 @@
from fastNLP.io.dataset_loader import JsonLoader,DataSet,Instance
from fastNLP.io.file_reader import _read_json
from fastNLP.core.vocabulary import Vocabulary
from fastNLP.io.base_loader import DataInfo
from reproduction.coreference_resolution.model.config import Config
import reproduction.coreference_resolution.model.preprocess as preprocess


class CRLoader(JsonLoader):
def __init__(self, fields=None, dropna=False):
super().__init__(fields, dropna)

def _load(self, path):
"""
加载数据
:param path:
:return:
"""
dataset = DataSet()
for idx, d in _read_json(path, fields=self.fields_list, dropna=self.dropna):
if self.fields:
ins = {self.fields[k]: v for k, v in d.items()}
else:
ins = d
dataset.append(Instance(**ins))
return dataset

def process(self, paths, **kwargs):
data_info = DataInfo()
for name in ['train', 'test', 'dev']:
data_info.datasets[name] = self.load(paths[name])

config = Config()
vocab = Vocabulary().from_dataset(*data_info.datasets.values(), field_name='sentences')
vocab.build_vocab()
word2id = vocab.word2idx

char_dict = preprocess.get_char_dict(config.char_path)
data_info.vocabs = vocab

genres = {g: i for i, g in enumerate(["bc", "bn", "mz", "nw", "pt", "tc", "wb"])}

for name, ds in data_info.datasets.items():
ds.apply(lambda x: preprocess.doc2numpy(x['sentences'], word2id, char_dict, max(config.filter),
config.max_sentences, is_train=name=='train')[0],
new_field_name='doc_np')
ds.apply(lambda x: preprocess.doc2numpy(x['sentences'], word2id, char_dict, max(config.filter),
config.max_sentences, is_train=name=='train')[1],
new_field_name='char_index')
ds.apply(lambda x: preprocess.doc2numpy(x['sentences'], word2id, char_dict, max(config.filter),
config.max_sentences, is_train=name=='train')[2],
new_field_name='seq_len')
ds.apply(lambda x: preprocess.speaker2numpy(x["speakers"], config.max_sentences, is_train=name=='train'),
new_field_name='speaker_ids_np')
ds.apply(lambda x: genres[x["doc_key"][:2]], new_field_name='genre')

ds.set_ignore_type('clusters')
ds.set_padder('clusters', None)
ds.set_input("sentences", "doc_np", "speaker_ids_np", "genre", "char_index", "seq_len")
ds.set_target("clusters")

# train_dev, test = self.ds.split(348 / (2802 + 343 + 348), shuffle=False)
# train, dev = train_dev.split(343 / (2802 + 343), shuffle=False)

return data_info




reproduction/HAN-document_classification/__init__.py → reproduction/coreference_resolution/model/__init__.py View File


+ 54
- 0
reproduction/coreference_resolution/model/config.py View File

@@ -0,0 +1,54 @@
class Config():
def __init__(self):
self.is_training = True
# path
self.glove = 'data/glove.840B.300d.txt.filtered'
self.turian = 'data/turian.50d.txt'
self.train_path = "data/train.english.jsonlines"
self.dev_path = "data/dev.english.jsonlines"
self.test_path = "data/test.english.jsonlines"
self.char_path = "data/char_vocab.english.txt"

self.cuda = "0"
self.max_word = 1500
self.epoch = 200

# config
# self.use_glove = True
# self.use_turian = True #No
self.use_elmo = False
self.use_CNN = True
self.model_heads = True #Yes
self.use_width = True # Yes
self.use_distance = True #Yes
self.use_metadata = True #Yes

self.mention_ratio = 0.4
self.max_sentences = 50
self.span_width = 10
self.feature_size = 20 #宽度信息emb的size
self.lr = 0.001
self.lr_decay = 1e-3
self.max_antecedents = 100 # 这个参数在mention detection中没有用
self.atten_hidden_size = 150
self.mention_hidden_size = 150
self.sa_hidden_size = 150

self.char_emb_size = 8
self.filter = [3,4,5]


# decay = 1e-5

def __str__(self):
d = self.__dict__
out = 'config==============\n'
for i in list(d):
out += i+":"
out += str(d[i])+"\n"
out+="config==============\n"
return out

if __name__=="__main__":
config = Config()
print(config)

+ 163
- 0
reproduction/coreference_resolution/model/metric.py View File

@@ -0,0 +1,163 @@
from fastNLP.core.metrics import MetricBase

import numpy as np

from collections import Counter
from sklearn.utils.linear_assignment_ import linear_assignment

"""
Mostly borrowed from https://github.com/clarkkev/deep-coref/blob/master/evaluation.py
"""



class CRMetric(MetricBase):
def __init__(self):
super().__init__()
self.evaluators = [Evaluator(m) for m in (muc, b_cubed, ceafe)]

# TODO 改名为evaluate,输入也
def evaluate(self, predicted, mention_to_predicted,clusters):
for e in self.evaluators:
e.update(predicted,mention_to_predicted, clusters)

def get_f1(self):
return sum(e.get_f1() for e in self.evaluators) / len(self.evaluators)

def get_recall(self):
return sum(e.get_recall() for e in self.evaluators) / len(self.evaluators)

def get_precision(self):
return sum(e.get_precision() for e in self.evaluators) / len(self.evaluators)

# TODO 原本的getprf
def get_metric(self,reset=False):
res = {"pre":self.get_precision(), "rec":self.get_recall(), "f":self.get_f1()}
self.evaluators = [Evaluator(m) for m in (muc, b_cubed, ceafe)]
return res






class Evaluator():
def __init__(self, metric, beta=1):
self.p_num = 0
self.p_den = 0
self.r_num = 0
self.r_den = 0
self.metric = metric
self.beta = beta

def update(self, predicted,mention_to_predicted,gold):
gold = gold[0].tolist()
gold = [tuple(tuple(m) for m in gc) for gc in gold]
mention_to_gold = {}
for gc in gold:
for mention in gc:
mention_to_gold[mention] = gc

if self.metric == ceafe:
pn, pd, rn, rd = self.metric(predicted, gold)
else:
pn, pd = self.metric(predicted, mention_to_gold)
rn, rd = self.metric(gold, mention_to_predicted)
self.p_num += pn
self.p_den += pd
self.r_num += rn
self.r_den += rd

def get_f1(self):
return f1(self.p_num, self.p_den, self.r_num, self.r_den, beta=self.beta)

def get_recall(self):
return 0 if self.r_num == 0 else self.r_num / float(self.r_den)

def get_precision(self):
return 0 if self.p_num == 0 else self.p_num / float(self.p_den)

def get_prf(self):
return self.get_precision(), self.get_recall(), self.get_f1()

def get_counts(self):
return self.p_num, self.p_den, self.r_num, self.r_den



def b_cubed(clusters, mention_to_gold):
num, dem = 0, 0

for c in clusters:
if len(c) == 1:
continue

gold_counts = Counter()
correct = 0
for m in c:
if m in mention_to_gold:
gold_counts[tuple(mention_to_gold[m])] += 1
for c2, count in gold_counts.items():
if len(c2) != 1:
correct += count * count

num += correct / float(len(c))
dem += len(c)

return num, dem


def muc(clusters, mention_to_gold):
tp, p = 0, 0
for c in clusters:
p += len(c) - 1
tp += len(c)
linked = set()
for m in c:
if m in mention_to_gold:
linked.add(mention_to_gold[m])
else:
tp -= 1
tp -= len(linked)
return tp, p


def phi4(c1, c2):
return 2 * len([m for m in c1 if m in c2]) / float(len(c1) + len(c2))


def ceafe(clusters, gold_clusters):
clusters = [c for c in clusters if len(c) != 1]
scores = np.zeros((len(gold_clusters), len(clusters)))
for i in range(len(gold_clusters)):
for j in range(len(clusters)):
scores[i, j] = phi4(gold_clusters[i], clusters[j])
matching = linear_assignment(-scores)
similarity = sum(scores[matching[:, 0], matching[:, 1]])
return similarity, len(clusters), similarity, len(gold_clusters)


def lea(clusters, mention_to_gold):
num, dem = 0, 0

for c in clusters:
if len(c) == 1:
continue

common_links = 0
all_links = len(c) * (len(c) - 1) / 2.0
for i, m in enumerate(c):
if m in mention_to_gold:
for m2 in c[i + 1:]:
if m2 in mention_to_gold and mention_to_gold[m] == mention_to_gold[m2]:
common_links += 1

num += len(c) * common_links / float(all_links)
dem += len(c)

return num, dem

def f1(p_num, p_den, r_num, r_den, beta=1):
p = 0 if p_den == 0 else p_num / float(p_den)
r = 0 if r_den == 0 else r_num / float(r_den)
return 0 if p + r == 0 else (1 + beta * beta) * p * r / (beta * beta * p + r)

+ 576
- 0
reproduction/coreference_resolution/model/model_re.py View File

@@ -0,0 +1,576 @@
import torch
import numpy as np
import torch.nn as nn
import torch.nn.functional as F

from allennlp.commands.elmo import ElmoEmbedder
from fastNLP.models.base_model import BaseModel
from fastNLP.modules.encoder.variational_rnn import VarLSTM
from reproduction.coreference_resolution.model import preprocess
from fastNLP.io.embed_loader import EmbedLoader
import random

# 设置seed
torch.manual_seed(0) # cpu
torch.cuda.manual_seed(0) # gpu
np.random.seed(0) # numpy
random.seed(0)


class ffnn(nn.Module):
def __init__(self, input_size, hidden_size, output_size):
super(ffnn, self).__init__()

self.f = nn.Sequential(
# 多少层数
nn.Linear(input_size, hidden_size),
nn.ReLU(inplace=True),
nn.Dropout(p=0.2),
nn.Linear(hidden_size, hidden_size),
nn.ReLU(inplace=True),
nn.Dropout(p=0.2),
nn.Linear(hidden_size, output_size)
)
self.reset_param()

def reset_param(self):
for name, param in self.named_parameters():
if param.dim() > 1:
nn.init.xavier_normal_(param)
# param.data = torch.tensor(np.random.randn(*param.shape)).float()
else:
nn.init.zeros_(param)

def forward(self, input):
return self.f(input).squeeze()


class Model(BaseModel):
def __init__(self, vocab, config):
word2id = vocab.word2idx
super(Model, self).__init__()
vocab_num = len(word2id)
self.word2id = word2id
self.config = config
self.char_dict = preprocess.get_char_dict('data/char_vocab.english.txt')
self.genres = {g: i for i, g in enumerate(["bc", "bn", "mz", "nw", "pt", "tc", "wb"])}
self.device = torch.device("cuda:" + config.cuda)

self.emb = nn.Embedding(vocab_num, 350)

emb1 = EmbedLoader().load_with_vocab(config.glove, vocab,normalize=False)
emb2 = EmbedLoader().load_with_vocab(config.turian, vocab ,normalize=False)
pre_emb = np.concatenate((emb1, emb2), axis=1)
pre_emb /= (np.linalg.norm(pre_emb, axis=1, keepdims=True) + 1e-12)

if pre_emb is not None:
self.emb.weight = nn.Parameter(torch.from_numpy(pre_emb).float())
for param in self.emb.parameters():
param.requires_grad = False
self.emb_dropout = nn.Dropout(inplace=True)


if config.use_elmo:
self.elmo = ElmoEmbedder(options_file='data/elmo/elmo_2x4096_512_2048cnn_2xhighway_options.json',
weight_file='data/elmo/elmo_2x4096_512_2048cnn_2xhighway_weights.hdf5',
cuda_device=int(config.cuda))
print("elmo load over.")
self.elmo_args = torch.randn((3), requires_grad=True).to(self.device)

self.char_emb = nn.Embedding(len(self.char_dict), config.char_emb_size)
self.conv1 = nn.Conv1d(config.char_emb_size, 50, 3)
self.conv2 = nn.Conv1d(config.char_emb_size, 50, 4)
self.conv3 = nn.Conv1d(config.char_emb_size, 50, 5)

self.feature_emb = nn.Embedding(config.span_width, config.feature_size)
self.feature_emb_dropout = nn.Dropout(p=0.2, inplace=True)

self.mention_distance_emb = nn.Embedding(10, config.feature_size)
self.distance_drop = nn.Dropout(p=0.2, inplace=True)

self.genre_emb = nn.Embedding(7, config.feature_size)
self.speaker_emb = nn.Embedding(2, config.feature_size)

self.bilstm = VarLSTM(input_size=350+150*config.use_CNN+config.use_elmo*1024,hidden_size=200,bidirectional=True,batch_first=True,hidden_dropout=0.2)
# self.bilstm = nn.LSTM(input_size=500, hidden_size=200, bidirectional=True, batch_first=True)
self.h0 = nn.init.orthogonal_(torch.empty(2, 1, 200)).to(self.device)
self.c0 = nn.init.orthogonal_(torch.empty(2, 1, 200)).to(self.device)
self.bilstm_drop = nn.Dropout(p=0.2, inplace=True)

self.atten = ffnn(input_size=400, hidden_size=config.atten_hidden_size, output_size=1)
self.mention_score = ffnn(input_size=1320, hidden_size=config.mention_hidden_size, output_size=1)
self.sa = ffnn(input_size=3980+40*config.use_metadata, hidden_size=config.sa_hidden_size, output_size=1)
self.mention_start_np = None
self.mention_end_np = None

def _reorder_lstm(self, word_emb, seq_lens):
sort_ind = sorted(range(len(seq_lens)), key=lambda i: seq_lens[i], reverse=True)
seq_lens_re = [seq_lens[i] for i in sort_ind]
emb_seq = self.reorder_sequence(word_emb, sort_ind, batch_first=True)
packed_seq = nn.utils.rnn.pack_padded_sequence(emb_seq, seq_lens_re, batch_first=True)

h0 = self.h0.repeat(1, len(seq_lens), 1)
c0 = self.c0.repeat(1, len(seq_lens), 1)
packed_out, final_states = self.bilstm(packed_seq, (h0, c0))

lstm_out, _ = nn.utils.rnn.pad_packed_sequence(packed_out, batch_first=True)
back_map = {ind: i for i, ind in enumerate(sort_ind)}
reorder_ind = [back_map[i] for i in range(len(seq_lens_re))]
lstm_out = self.reorder_sequence(lstm_out, reorder_ind, batch_first=True)
return lstm_out

def reorder_sequence(self, sequence_emb, order, batch_first=True):
"""
sequence_emb: [T, B, D] if not batch_first
order: list of sequence length
"""
batch_dim = 0 if batch_first else 1
assert len(order) == sequence_emb.size()[batch_dim]

order = torch.LongTensor(order)
order = order.to(sequence_emb).long()

sorted_ = sequence_emb.index_select(index=order, dim=batch_dim)

del order
return sorted_

def flat_lstm(self, lstm_out, seq_lens):
batch = lstm_out.shape[0]
seq = lstm_out.shape[1]
dim = lstm_out.shape[2]
l = [j + i * seq for i, seq_len in enumerate(seq_lens) for j in range(seq_len)]
flatted = torch.index_select(lstm_out.view(batch * seq, dim), 0, torch.LongTensor(l).to(self.device))
return flatted

def potential_mention_index(self, word_index, max_sent_len):
# get mention index [3,2]:the first sentence is 3 and secend 2
# [0,0,0,1,1] --> [[0, 0], [0, 1], [1, 1], [1, 2], [2, 2], [3, 3], [3, 4], [4, 4]] (max =2)
potential_mention = []
for i in range(len(word_index)):
for j in range(i, i + max_sent_len):
if (j < len(word_index) and word_index[i] == word_index[j]):
potential_mention.append([i, j])
return potential_mention

def get_mention_start_end(self, seq_lens):
# 序列长度转换成mention
# [3,2] --> [0,0,0,1,1]
word_index = [0] * sum(seq_lens)
sent_index = 0
index = 0
for length in seq_lens:
for l in range(length):
word_index[index] = sent_index
index += 1
sent_index += 1

# [0,0,0,1,1]-->[[0,0],[0,1],[0,2]....]
mention_id = self.potential_mention_index(word_index, self.config.span_width)
mention_start = np.array(mention_id, dtype=int)[:, 0]
mention_end = np.array(mention_id, dtype=int)[:, 1]
return mention_start, mention_end

def get_mention_emb(self, flatten_lstm, mention_start, mention_end):
mention_start_tensor = torch.from_numpy(mention_start).to(self.device)
mention_end_tensor = torch.from_numpy(mention_end).to(self.device)
emb_start = flatten_lstm.index_select(dim=0, index=mention_start_tensor) # [mention_num,embed]
emb_end = flatten_lstm.index_select(dim=0, index=mention_end_tensor) # [mention_num,embed]
return emb_start, emb_end

def get_mask(self, mention_start, mention_end):
# big mask for attention
mention_num = mention_start.shape[0]
mask = np.zeros((mention_num, self.config.span_width)) # [mention_num,span_width]
for i in range(mention_num):
start = mention_start[i]
end = mention_end[i]
# 实际上是宽度
for j in range(end - start + 1):
mask[i][j] = 1
mask = torch.from_numpy(mask) # [mention_num,max_mention]
# 0-->-inf 1-->0
log_mask = torch.log(mask)
return log_mask

def get_mention_index(self, mention_start, max_mention):
# TODO 后面可能要改
assert len(mention_start.shape) == 1
mention_start_tensor = torch.from_numpy(mention_start)
num_mention = mention_start_tensor.shape[0]
mention_index = mention_start_tensor.expand(max_mention, num_mention).transpose(0,
1) # [num_mention,max_mention]
assert mention_index.shape[0] == num_mention
assert mention_index.shape[1] == max_mention
range_add = torch.arange(0, max_mention).expand(num_mention, max_mention).long() # [num_mention,max_mention]
mention_index = mention_index + range_add
mention_index = torch.min(mention_index, torch.LongTensor([mention_start[-1]]).expand(num_mention, max_mention))
return mention_index.to(self.device)

def sort_mention(self, mention_start, mention_end, candidate_mention_emb, candidate_mention_score, seq_lens):
# 排序记录,高分段在前面
mention_score, mention_ids = torch.sort(candidate_mention_score, descending=True)
preserve_mention_num = int(self.config.mention_ratio * sum(seq_lens))
mention_ids = mention_ids[0:preserve_mention_num]
mention_score = mention_score[0:preserve_mention_num]

mention_start_tensor = torch.from_numpy(mention_start).to(self.device).index_select(dim=0,
index=mention_ids) # [lamda*word_num]
mention_end_tensor = torch.from_numpy(mention_end).to(self.device).index_select(dim=0,
index=mention_ids) # [lamda*word_num]
mention_emb = candidate_mention_emb.index_select(index=mention_ids, dim=0) # [lamda*word_num,emb]
assert mention_score.shape[0] == preserve_mention_num
assert mention_start_tensor.shape[0] == preserve_mention_num
assert mention_end_tensor.shape[0] == preserve_mention_num
assert mention_emb.shape[0] == preserve_mention_num
# TODO 不交叉没做处理

# 对start进行再排序,实际位置在前面
# TODO 这里只考虑了start没有考虑end
mention_start_tensor, temp_index = torch.sort(mention_start_tensor)
mention_end_tensor = mention_end_tensor.index_select(dim=0, index=temp_index)
mention_emb = mention_emb.index_select(dim=0, index=temp_index)
mention_score = mention_score.index_select(dim=0, index=temp_index)
return mention_start_tensor, mention_end_tensor, mention_score, mention_emb

def get_antecedents(self, mention_starts, max_antecedents):
num_mention = mention_starts.shape[0]
max_antecedents = min(max_antecedents, num_mention)
# mention和它是第几个mention之间的对应关系
antecedents = np.zeros((num_mention, max_antecedents), dtype=int) # [num_mention,max_an]
# 记录长度
antecedents_len = [0] * num_mention
for i in range(num_mention):
ante_count = 0
for j in range(max(0, i - max_antecedents), i):
antecedents[i, ante_count] = j
ante_count += 1
# 补位操作
for j in range(ante_count, max_antecedents):
antecedents[i, j] = 0
antecedents_len[i] = ante_count
assert antecedents.shape[1] == max_antecedents
return antecedents, antecedents_len

def get_antecedents_score(self, span_represent, mention_score, antecedents, antecedents_len, mention_speakers_ids,
genre):
num_mention = mention_score.shape[0]
max_antecedent = antecedents.shape[1]

pair_emb = self.get_pair_emb(span_represent, antecedents, mention_speakers_ids, genre) # [span_num,max_ant,emb]
antecedent_scores = self.sa(pair_emb)
mask01 = self.sequence_mask(antecedents_len, max_antecedent)
maskinf = torch.log(mask01).to(self.device)
assert maskinf.shape[1] <= max_antecedent
assert antecedent_scores.shape[0] == num_mention
antecedent_scores = antecedent_scores + maskinf
antecedents = torch.from_numpy(antecedents).to(self.device)
mention_scoreij = mention_score.unsqueeze(1) + torch.gather(
mention_score.unsqueeze(0).expand(num_mention, num_mention), dim=1, index=antecedents)
antecedent_scores += mention_scoreij

antecedent_scores = torch.cat([torch.zeros([mention_score.shape[0], 1]).to(self.device), antecedent_scores],
1) # [num_mentions, max_ant + 1]
return antecedent_scores

##############################
def distance_bin(self, mention_distance):
bins = torch.zeros(mention_distance.size()).byte().to(self.device)
rg = [[1, 1], [2, 2], [3, 3], [4, 4], [5, 7], [8, 15], [16, 31], [32, 63], [64, 300]]
for t, k in enumerate(rg):
i, j = k[0], k[1]
b = torch.LongTensor([i]).unsqueeze(-1).expand(mention_distance.size()).to(self.device)
m1 = torch.ge(mention_distance, b)
e = torch.LongTensor([j]).unsqueeze(-1).expand(mention_distance.size()).to(self.device)
m2 = torch.le(mention_distance, e)
bins = bins + (t + 1) * (m1 & m2)
return bins.long()

def get_distance_emb(self, antecedents_tensor):
num_mention = antecedents_tensor.shape[0]
max_ant = antecedents_tensor.shape[1]

assert max_ant <= self.config.max_antecedents
source = torch.arange(0, num_mention).expand(max_ant, num_mention).transpose(0,1).to(self.device) # [num_mention,max_ant]
mention_distance = source - antecedents_tensor
mention_distance_bin = self.distance_bin(mention_distance)
distance_emb = self.mention_distance_emb(mention_distance_bin)
distance_emb = self.distance_drop(distance_emb)
return distance_emb

def get_pair_emb(self, span_emb, antecedents, mention_speakers_ids, genre):
emb_dim = span_emb.shape[1]
num_span = span_emb.shape[0]
max_ant = antecedents.shape[1]
assert span_emb.shape[0] == antecedents.shape[0]
antecedents = torch.from_numpy(antecedents).to(self.device)

# [num_span,max_ant,emb]
antecedent_emb = torch.gather(span_emb.unsqueeze(0).expand(num_span, num_span, emb_dim), dim=1,
index=antecedents.unsqueeze(2).expand(num_span, max_ant, emb_dim))
# [num_span,max_ant,emb]
target_emb_tiled = span_emb.expand((max_ant, num_span, emb_dim))
target_emb_tiled = target_emb_tiled.transpose(0, 1)

similarity_emb = antecedent_emb * target_emb_tiled

pair_emb_list = [target_emb_tiled, antecedent_emb, similarity_emb]

# get speakers and genre
if self.config.use_metadata:
antecedent_speaker_ids = mention_speakers_ids.unsqueeze(0).expand(num_span, num_span).gather(dim=1,
index=antecedents)
same_speaker = torch.eq(mention_speakers_ids.unsqueeze(1).expand(num_span, max_ant),
antecedent_speaker_ids) # [num_mention,max_ant]
speaker_embedding = self.speaker_emb(same_speaker.long().to(self.device)) # [mention_num.max_ant,emb]
genre_embedding = self.genre_emb(
torch.LongTensor([genre]).expand(num_span, max_ant).to(self.device)) # [mention_num,max_ant,emb]
pair_emb_list.append(speaker_embedding)
pair_emb_list.append(genre_embedding)

# get distance emb
if self.config.use_distance:
distance_emb = self.get_distance_emb(antecedents)
pair_emb_list.append(distance_emb)

pair_emb = torch.cat(pair_emb_list, 2)
return pair_emb

def sequence_mask(self, len_list, max_len):
x = np.zeros((len(len_list), max_len))
for i in range(len(len_list)):
l = len_list[i]
for j in range(l):
x[i][j] = 1
return torch.from_numpy(x).float()

def logsumexp(self, value, dim=None, keepdim=False):
"""Numerically stable implementation of the operation

value.exp().sum(dim, keepdim).log()
"""
# TODO: torch.max(value, dim=None) threw an error at time of writing
if dim is not None:
m, _ = torch.max(value, dim=dim, keepdim=True)
value0 = value - m
if keepdim is False:
m = m.squeeze(dim)
return m + torch.log(torch.sum(torch.exp(value0),
dim=dim, keepdim=keepdim))
else:
m = torch.max(value)
sum_exp = torch.sum(torch.exp(value - m))

return m + torch.log(sum_exp)

def softmax_loss(self, antecedent_scores, antecedent_labels):
antecedent_labels = torch.from_numpy(antecedent_labels * 1).to(self.device)
gold_scores = antecedent_scores + torch.log(antecedent_labels.float()) # [num_mentions, max_ant + 1]
marginalized_gold_scores = self.logsumexp(gold_scores, 1) # [num_mentions]
log_norm = self.logsumexp(antecedent_scores, 1) # [num_mentions]
return torch.sum(log_norm - marginalized_gold_scores) # [num_mentions]reduce_logsumexp

def get_predicted_antecedents(self, antecedents, antecedent_scores):
predicted_antecedents = []
for i, index in enumerate(np.argmax(antecedent_scores.detach(), axis=1) - 1):
if index < 0:
predicted_antecedents.append(-1)
else:
predicted_antecedents.append(antecedents[i, index])
return predicted_antecedents

def get_predicted_clusters(self, mention_starts, mention_ends, predicted_antecedents):
mention_to_predicted = {}
predicted_clusters = []
for i, predicted_index in enumerate(predicted_antecedents):
if predicted_index < 0:
continue
assert i > predicted_index
predicted_antecedent = (int(mention_starts[predicted_index]), int(mention_ends[predicted_index]))
if predicted_antecedent in mention_to_predicted:
predicted_cluster = mention_to_predicted[predicted_antecedent]
else:
predicted_cluster = len(predicted_clusters)
predicted_clusters.append([predicted_antecedent])
mention_to_predicted[predicted_antecedent] = predicted_cluster

mention = (int(mention_starts[i]), int(mention_ends[i]))
predicted_clusters[predicted_cluster].append(mention)
mention_to_predicted[mention] = predicted_cluster

predicted_clusters = [tuple(pc) for pc in predicted_clusters]
mention_to_predicted = {m: predicted_clusters[i] for m, i in mention_to_predicted.items()}

return predicted_clusters, mention_to_predicted

def evaluate_coref(self, mention_starts, mention_ends, predicted_antecedents, gold_clusters, evaluator):
gold_clusters = [tuple(tuple(m) for m in gc) for gc in gold_clusters]
mention_to_gold = {}
for gc in gold_clusters:
for mention in gc:
mention_to_gold[mention] = gc
predicted_clusters, mention_to_predicted = self.get_predicted_clusters(mention_starts, mention_ends,
predicted_antecedents)
evaluator.update(predicted_clusters, gold_clusters, mention_to_predicted, mention_to_gold)
return predicted_clusters


def forward(self, sentences, doc_np, speaker_ids_np, genre, char_index, seq_len):
"""
实际输入都是tensor
:param sentences: 句子,被fastNLP转化成了numpy,
:param doc_np: 被fastNLP转化成了Tensor
:param speaker_ids_np: 被fastNLP转化成了Tensor
:param genre: 被fastNLP转化成了Tensor
:param char_index: 被fastNLP转化成了Tensor
:param seq_len: 被fastNLP转化成了Tensor
:return:
"""
# change for fastNLP
sentences = sentences[0].tolist()
doc_tensor = doc_np[0]
speakers_tensor = speaker_ids_np[0]
genre = genre[0].item()
char_index = char_index[0]
seq_len = seq_len[0].cpu().numpy()

# 类型

# doc_tensor = torch.from_numpy(doc_np).to(self.device)
# speakers_tensor = torch.from_numpy(speaker_ids_np).to(self.device)
mention_emb_list = []

word_emb = self.emb(doc_tensor)
word_emb_list = [word_emb]
if self.config.use_CNN:
# [batch, length, char_length, char_dim]
char = self.char_emb(char_index)
char_size = char.size()
# first transform to [batch *length, char_length, char_dim]
# then transpose to [batch * length, char_dim, char_length]
char = char.view(char_size[0] * char_size[1], char_size[2], char_size[3]).transpose(1, 2)

# put into cnn [batch*length, char_filters, char_length]
# then put into maxpooling [batch * length, char_filters]
char_over_cnn, _ = self.conv1(char).max(dim=2)
# reshape to [batch, length, char_filters]
char_over_cnn = torch.tanh(char_over_cnn).view(char_size[0], char_size[1], -1)
word_emb_list.append(char_over_cnn)

char_over_cnn, _ = self.conv2(char).max(dim=2)
char_over_cnn = torch.tanh(char_over_cnn).view(char_size[0], char_size[1], -1)
word_emb_list.append(char_over_cnn)

char_over_cnn, _ = self.conv3(char).max(dim=2)
char_over_cnn = torch.tanh(char_over_cnn).view(char_size[0], char_size[1], -1)
word_emb_list.append(char_over_cnn)

# word_emb = torch.cat(word_emb_list, dim=2)

# use elmo or not
if self.config.use_elmo:
# 如果确实被截断了
if doc_tensor.shape[0] == 50 and len(sentences) > 50:
sentences = sentences[0:50]
elmo_embedding, elmo_mask = self.elmo.batch_to_embeddings(sentences)
elmo_embedding = elmo_embedding.to(
self.device) # [sentence_num,max_sent_len,3,1024]--[sentence_num,max_sent,1024]
elmo_embedding = elmo_embedding[:, 0, :, :] * self.elmo_args[0] + elmo_embedding[:, 1, :, :] * \
self.elmo_args[1] + elmo_embedding[:, 2, :, :] * self.elmo_args[2]
word_emb_list.append(elmo_embedding)
# print(word_emb_list[0].shape)
# print(word_emb_list[1].shape)
# print(word_emb_list[2].shape)
# print(word_emb_list[3].shape)
# print(word_emb_list[4].shape)

word_emb = torch.cat(word_emb_list, dim=2)

word_emb = self.emb_dropout(word_emb)
# word_emb_elmo = self.emb_dropout(word_emb_elmo)
lstm_out = self._reorder_lstm(word_emb, seq_len)
flatten_lstm = self.flat_lstm(lstm_out, seq_len) # [word_num,emb]
flatten_lstm = self.bilstm_drop(flatten_lstm)
# TODO 没有按照论文写
flatten_word_emb = self.flat_lstm(word_emb, seq_len) # [word_num,emb]

mention_start, mention_end = self.get_mention_start_end(seq_len) # [mention_num]
self.mention_start_np = mention_start # [mention_num] np
self.mention_end_np = mention_end
mention_num = mention_start.shape[0]
emb_start, emb_end = self.get_mention_emb(flatten_lstm, mention_start, mention_end) # [mention_num,emb]

# list
mention_emb_list.append(emb_start)
mention_emb_list.append(emb_end)

if self.config.use_width:
mention_width_index = mention_end - mention_start
mention_width_tensor = torch.from_numpy(mention_width_index).to(self.device) # [mention_num]
mention_width_emb = self.feature_emb(mention_width_tensor)
mention_width_emb = self.feature_emb_dropout(mention_width_emb)
mention_emb_list.append(mention_width_emb)

if self.config.model_heads:
mention_index = self.get_mention_index(mention_start, self.config.span_width) # [mention_num,max_mention]
log_mask_tensor = self.get_mask(mention_start, mention_end).float().to(
self.device) # [mention_num,max_mention]
alpha = self.atten(flatten_lstm).to(self.device) # [word_num]

# 得到attention
mention_head_score = torch.gather(alpha.expand(mention_num, -1), 1,
mention_index).float().to(self.device) # [mention_num,max_mention]
mention_attention = F.softmax(mention_head_score + log_mask_tensor, dim=1) # [mention_num,max_mention]

# TODO flatte lstm
word_num = flatten_lstm.shape[0]
lstm_emb = flatten_lstm.shape[1]
emb_num = flatten_word_emb.shape[1]

# [num_mentions, max_mention_width, emb]
mention_text_emb = torch.gather(
flatten_word_emb.unsqueeze(1).expand(word_num, self.config.span_width, emb_num),
0, mention_index.unsqueeze(2).expand(mention_num, self.config.span_width,
emb_num))
# [mention_num,emb]
mention_head_emb = torch.sum(
mention_attention.unsqueeze(2).expand(mention_num, self.config.span_width, emb_num) * mention_text_emb,
dim=1)
mention_emb_list.append(mention_head_emb)

candidate_mention_emb = torch.cat(mention_emb_list, 1) # [candidate_mention_num,emb]
candidate_mention_score = self.mention_score(candidate_mention_emb) # [candidate_mention_num]

antecedent_scores, antecedents, mention_start_tensor, mention_end_tensor = (None, None, None, None)
mention_start_tensor, mention_end_tensor, mention_score, mention_emb = \
self.sort_mention(mention_start, mention_end, candidate_mention_emb, candidate_mention_score, seq_len)
mention_speakers_ids = speakers_tensor.index_select(dim=0, index=mention_start_tensor) # num_mention

antecedents, antecedents_len = self.get_antecedents(mention_start_tensor, self.config.max_antecedents)
antecedent_scores = self.get_antecedents_score(mention_emb, mention_score, antecedents, antecedents_len,
mention_speakers_ids, genre)

ans = {"candidate_mention_score": candidate_mention_score, "antecedent_scores": antecedent_scores,
"antecedents": antecedents, "mention_start_tensor": mention_start_tensor,
"mention_end_tensor": mention_end_tensor}

return ans

def predict(self, sentences, doc_np, speaker_ids_np, genre, char_index, seq_len):
ans = self(sentences,
doc_np,
speaker_ids_np,
genre,
char_index,
seq_len)

predicted_antecedents = self.get_predicted_antecedents(ans["antecedents"], ans["antecedent_scores"])
predicted_clusters, mention_to_predicted = self.get_predicted_clusters(ans["mention_start_tensor"],
ans["mention_end_tensor"],
predicted_antecedents)

return {'predicted':predicted_clusters,"mention_to_predicted":mention_to_predicted}


if __name__ == '__main__':
pass

+ 225
- 0
reproduction/coreference_resolution/model/preprocess.py View File

@@ -0,0 +1,225 @@
import json
import numpy as np
from . import util
import collections

def load(path):
"""
load the file from jsonline
:param path:
:return: examples with many example(dict): {"clusters":[[[mention],[mention]],[another cluster]],
"doc_key":"str","speakers":[[,,,],[]...],"sentence":[[][]]}
"""
with open(path) as f:
train_examples = [json.loads(jsonline) for jsonline in f.readlines()]
return train_examples

def get_vocab():
"""
从所有的句子中得到最终的字典,被main调用,不止是train,还有dev和test
:param examples:
:return: word2id & id2word
"""
word2id = {'PAD':0,'UNK':1}
id2word = {0:'PAD',1:'UNK'}
index = 2
data = [load("../data/train.english.jsonlines"),load("../data/dev.english.jsonlines"),load("../data/test.english.jsonlines")]
for examples in data:
for example in examples:
for sent in example["sentences"]:
for word in sent:
if(word not in word2id):
word2id[word]=index
id2word[index] = word
index += 1
return word2id,id2word

def normalize(v):
norm = np.linalg.norm(v)
if norm > 0:
return v / norm
else:
return v

# 加载glove得到embedding
def get_emb(id2word,embedding_size):
glove_oov = 0
turian_oov = 0
both = 0
glove_emb_path = "../data/glove.840B.300d.txt.filtered"
turian_emb_path = "../data/turian.50d.txt"
word_num = len(id2word)
emb = np.zeros((word_num,embedding_size))
glove_emb_dict = util.load_embedding_dict(glove_emb_path,300,"txt")
turian_emb_dict = util.load_embedding_dict(turian_emb_path,50,"txt")
for i in range(word_num):
if id2word[i] in glove_emb_dict:
word_embedding = glove_emb_dict.get(id2word[i])
emb[i][0:300] = np.array(word_embedding)
else:
# print(id2word[i])
glove_oov += 1
if id2word[i] in turian_emb_dict:
word_embedding = turian_emb_dict.get(id2word[i])
emb[i][300:350] = np.array(word_embedding)
else:
# print(id2word[i])
turian_oov += 1
if id2word[i] not in glove_emb_dict and id2word[i] not in turian_emb_dict:
both += 1
emb[i] = normalize(emb[i])
print("embedding num:"+str(word_num))
print("glove num:"+str(glove_oov))
print("glove oov rate:"+str(glove_oov/word_num))
print("turian num:"+str(turian_oov))
print("turian oov rate:"+str(turian_oov/word_num))
print("both num:"+str(both))
return emb


def _doc2vec(doc,word2id,char_dict,max_filter,max_sentences,is_train):
max_len = 0
max_word_length = 0
docvex = []
length = []
if is_train:
sent_num = min(max_sentences,len(doc))
else:
sent_num = len(doc)

for i in range(sent_num):
sent = doc[i]
length.append(len(sent))
if (len(sent) > max_len):
max_len = len(sent)
sent_vec =[]
for j,word in enumerate(sent):
if len(word)>max_word_length:
max_word_length = len(word)
if word in word2id:
sent_vec.append(word2id[word])
else:
sent_vec.append(word2id["UNK"])
docvex.append(sent_vec)

char_index = np.zeros((sent_num, max_len, max_word_length),dtype=int)
for i in range(sent_num):
sent = doc[i]
for j,word in enumerate(sent):
char_index[i, j, :len(word)] = [char_dict[c] for c in word]

return docvex,char_index,length,max_len

# TODO 修改了接口,确认所有该修改的地方都修改好
def doc2numpy(doc,word2id,chardict,max_filter,max_sentences,is_train):
docvec, char_index, length, max_len = _doc2vec(doc,word2id,chardict,max_filter,max_sentences,is_train)
assert max(length) == max_len
assert char_index.shape[0]==len(length)
assert char_index.shape[1]==max_len
doc_np = np.zeros((len(docvec), max_len), int)
for i in range(len(docvec)):
for j in range(len(docvec[i])):
doc_np[i][j] = docvec[i][j]
return doc_np,char_index,length

# TODO 没有测试
def speaker2numpy(speakers_raw,max_sentences,is_train):
if is_train and len(speakers_raw)> max_sentences:
speakers_raw = speakers_raw[0:max_sentences]
speakers = flatten(speakers_raw)
speaker_dict = {s: i for i, s in enumerate(set(speakers))}
speaker_ids = np.array([speaker_dict[s] for s in speakers])
return speaker_ids


def flat_cluster(clusters):
flatted = []
for cluster in clusters:
for item in cluster:
flatted.append(item)
return flatted

def get_right_mention(clusters,mention_start_np,mention_end_np):
flatted = flat_cluster(clusters)
cluster_num = len(flatted)
mention_num = mention_start_np.shape[0]
right_mention = np.zeros(mention_num,dtype=int)
for i in range(mention_num):
if [mention_start_np[i],mention_end_np[i]] in flatted:
right_mention[i]=1
return right_mention,cluster_num

def handle_cluster(clusters):
gold_mentions = sorted(tuple(m) for m in flatten(clusters))
gold_mention_map = {m: i for i, m in enumerate(gold_mentions)}
cluster_ids = np.zeros(len(gold_mentions), dtype=int)
for cluster_id, cluster in enumerate(clusters):
for mention in cluster:
cluster_ids[gold_mention_map[tuple(mention)]] = cluster_id
gold_starts, gold_ends = tensorize_mentions(gold_mentions)
return cluster_ids, gold_starts, gold_ends

# 展平
def flatten(l):
return [item for sublist in l for item in sublist]

# 把mention分成start end
def tensorize_mentions(mentions):
if len(mentions) > 0:
starts, ends = zip(*mentions)
else:
starts, ends = [], []
return np.array(starts), np.array(ends)

def get_char_dict(path):
vocab = ["<UNK>"]
with open(path) as f:
vocab.extend(c.strip() for c in f.readlines())
char_dict = collections.defaultdict(int)
char_dict.update({c: i for i, c in enumerate(vocab)})
return char_dict

def get_labels(clusters,mention_starts,mention_ends,max_antecedents):
cluster_ids, gold_starts, gold_ends = handle_cluster(clusters)
num_mention = mention_starts.shape[0]
num_gold = gold_starts.shape[0]
max_antecedents = min(max_antecedents, num_mention)
mention_indices = {}

for i in range(num_mention):
mention_indices[(mention_starts[i].detach().item(), mention_ends[i].detach().item())] = i
# 用来记录哪些mention是对的,-1表示错误,正数代表这个mention实际上对应哪个gold cluster的id
mention_cluster_ids = [-1] * num_mention
# test
right_mention_count = 0
for i in range(num_gold):
right_mention = mention_indices.get((gold_starts[i], gold_ends[i]))
if (right_mention != None):
right_mention_count += 1
mention_cluster_ids[right_mention] = cluster_ids[i]

# i j 是否属于同一个cluster
labels = np.zeros((num_mention, max_antecedents + 1), dtype=bool) # [num_mention,max_an+1]
for i in range(num_mention):
ante_count = 0
null_label = True
for j in range(max(0, i - max_antecedents), i):
if (mention_cluster_ids[i] >= 0 and mention_cluster_ids[i] == mention_cluster_ids[j]):
labels[i, ante_count + 1] = True
null_label = False
else:
labels[i, ante_count + 1] = False
ante_count += 1
for j in range(ante_count, max_antecedents):
labels[i, j + 1] = False
labels[i, 0] = null_label
return labels

# test===========================


if __name__=="__main__":
word2id,id2word = get_vocab()
get_emb(id2word,350)



+ 32
- 0
reproduction/coreference_resolution/model/softmax_loss.py View File

@@ -0,0 +1,32 @@
from fastNLP.core.losses import LossBase

from reproduction.coreference_resolution.model.preprocess import get_labels
from reproduction.coreference_resolution.model.config import Config
import torch


class SoftmaxLoss(LossBase):
"""
交叉熵loss
允许多标签分类
"""

def __init__(self, antecedent_scores=None, clusters=None, mention_start_tensor=None, mention_end_tensor=None):
"""

:param pred:
:param target:
"""
super().__init__()
self._init_param_map(antecedent_scores=antecedent_scores, clusters=clusters,
mention_start_tensor=mention_start_tensor, mention_end_tensor=mention_end_tensor)

def get_loss(self, antecedent_scores, clusters, mention_start_tensor, mention_end_tensor):
antecedent_labels = get_labels(clusters[0], mention_start_tensor, mention_end_tensor,
Config().max_antecedents)

antecedent_labels = torch.from_numpy(antecedent_labels*1).to(torch.device("cuda:" + Config().cuda))
gold_scores = antecedent_scores + torch.log(antecedent_labels.float()).to(torch.device("cuda:" + Config().cuda)) # [num_mentions, max_ant + 1]
marginalized_gold_scores = gold_scores.logsumexp(dim=1) # [num_mentions]
log_norm = antecedent_scores.logsumexp(dim=1) # [num_mentions]
return torch.sum(log_norm - marginalized_gold_scores)

+ 101
- 0
reproduction/coreference_resolution/model/util.py View File

@@ -0,0 +1,101 @@
import os
import errno
import collections
import torch
import numpy as np
import pyhocon



# flatten the list
def flatten(l):
return [item for sublist in l for item in sublist]


def get_config(filename):
return pyhocon.ConfigFactory.parse_file(filename)


# safe make directions
def mkdirs(path):
try:
os.makedirs(path)
except OSError as exception:
if exception.errno != errno.EEXIST:
raise
return path


def load_char_dict(char_vocab_path):
vocab = ["<unk>"]
with open(char_vocab_path) as f:
vocab.extend(c.strip() for c in f.readlines())
char_dict = collections.defaultdict(int)
char_dict.update({c: i for i, c in enumerate(vocab)})
return char_dict

# 加载embedding
def load_embedding_dict(embedding_path, embedding_size, embedding_format):
print("Loading word embeddings from {}...".format(embedding_path))
default_embedding = np.zeros(embedding_size)
embedding_dict = collections.defaultdict(lambda: default_embedding)
skip_first = embedding_format == "vec"
with open(embedding_path) as f:
for i, line in enumerate(f.readlines()):
if skip_first and i == 0:
continue
splits = line.split()
assert len(splits) == embedding_size + 1
word = splits[0]
embedding = np.array([float(s) for s in splits[1:]])
embedding_dict[word] = embedding
print("Done loading word embeddings.")
return embedding_dict


# safe devide
def maybe_divide(x, y):
return 0 if y == 0 else x / float(y)


def shape(x, dim):
return x.get_shape()[dim].value or torch.shape(x)[dim]


def normalize(v):
norm = np.linalg.norm(v)
if norm > 0:
return v / norm
else:
return v


class RetrievalEvaluator(object):
def __init__(self):
self._num_correct = 0
self._num_gold = 0
self._num_predicted = 0

def update(self, gold_set, predicted_set):
self._num_correct += len(gold_set & predicted_set)
self._num_gold += len(gold_set)
self._num_predicted += len(predicted_set)

def recall(self):
return maybe_divide(self._num_correct, self._num_gold)

def precision(self):
return maybe_divide(self._num_correct, self._num_predicted)

def metrics(self):
recall = self.recall()
precision = self.precision()
f1 = maybe_divide(2 * recall * precision, precision + recall)
return recall, precision, f1



if __name__=="__main__":
print(load_char_dict("../data/char_vocab.english.txt"))
embedding_dict = load_embedding_dict("../data/glove.840B.300d.txt.filtered",300,"txt")
print("hello")

+ 49
- 0
reproduction/coreference_resolution/readme.md View File

@@ -0,0 +1,49 @@
# 共指消解复现
## 介绍
Coreference resolution是查找文本中指向同一现实实体的所有表达式的任务。
对于涉及自然语言理解的许多更高级别的NLP任务来说,
这是一个重要的步骤,例如文档摘要,问题回答和信息提取。
代码的实现主要基于[ End-to-End Coreference Resolution (Lee et al, 2017)](https://arxiv.org/pdf/1707.07045).


## 数据获取与预处理
论文在[OntoNote5.0](https://allennlp.org/models)数据集上取得了当时的sota结果。
由于版权问题,本文无法提供数据集的下载,请自行下载。
原始数据集的格式为conll格式,详细介绍参考数据集给出的官方介绍页面。

代码实现采用了论文作者Lee的预处理方法,具体细节参加[链接](https://github.com/kentonl/e2e-coref/blob/e2e/setup_training.sh)。
处理之后的数据集为json格式,例子:
```
{
"clusters": [],
"doc_key": "nw",
"sentences": [["This", "is", "the", "first", "sentence", "."], ["This", "is", "the", "second", "."]],
"speakers": [["spk1", "spk1", "spk1", "spk1", "spk1", "spk1"], ["spk2", "spk2", "spk2", "spk2", "spk2"]]
}
```

### embedding 数据集下载
[turian emdedding](https://lil.cs.washington.edu/coref/turian.50d.txt)

[glove embedding]( https://nlp.stanford.edu/data/glove.840B.300d.zip)



## 运行
```python
# 训练代码
CUDA_VISIBLE_DEVICES=0 python train.py
# 测试代码
CUDA_VISIBLE_DEVICES=0 python valid.py
```

## 结果
原论文作者在测试集上取得了67.2%的结果,AllenNLP复现的结果为 [63.0%](https://allennlp.org/models)。
其中allenNLP训练时没有加入speaker信息,没有variational dropout以及只使用了100的antecedents而不是250。

在与allenNLP使用同样的超参和配置时,本代码复现取得了63.6%的F1值。


## 问题
如果您有什么问题或者反馈,请提issue或者邮件联系我:
yexu_i@qq.com

+ 0
- 0
reproduction/coreference_resolution/test/__init__.py View File


+ 14
- 0
reproduction/coreference_resolution/test/test_dataloader.py View File

@@ -0,0 +1,14 @@
import unittest
from ..data_load.cr_loader import CRLoader

class Test_CRLoader(unittest.TestCase):
def test_cr_loader(self):
train_path = 'data/train.english.jsonlines.mini'
dev_path = 'data/dev.english.jsonlines.minid'
test_path = 'data/test.english.jsonlines'
cr = CRLoader()
data_info = cr.process({'train':train_path,'dev':dev_path,'test':test_path})

print(data_info.datasets['train'][0])
print(data_info.datasets['dev'][0])
print(data_info.datasets['test'][0])

+ 69
- 0
reproduction/coreference_resolution/train.py View File

@@ -0,0 +1,69 @@
import sys
sys.path.append('../..')

import torch
from torch.optim import Adam

from fastNLP.core.callback import Callback, GradientClipCallback
from fastNLP.core.trainer import Trainer

from reproduction.coreference_resolution.data_load.cr_loader import CRLoader
from reproduction.coreference_resolution.model.config import Config
from reproduction.coreference_resolution.model.model_re import Model
from reproduction.coreference_resolution.model.softmax_loss import SoftmaxLoss
from reproduction.coreference_resolution.model.metric import CRMetric
from fastNLP import SequentialSampler
from fastNLP import cache_results


# torch.backends.cudnn.benchmark = False
# torch.backends.cudnn.deterministic = True

class LRCallback(Callback):
def __init__(self, parameters, decay_rate=1e-3):
super().__init__()
self.paras = parameters
self.decay_rate = decay_rate

def on_step_end(self):
if self.step % 100 == 0:
for para in self.paras:
para['lr'] = para['lr'] * (1 - self.decay_rate)


if __name__ == "__main__":
config = Config()

print(config)

@cache_results('cache.pkl')
def cache():
cr_train_dev_test = CRLoader()

data_info = cr_train_dev_test.process({'train': config.train_path, 'dev': config.dev_path,
'test': config.test_path})
return data_info
data_info = cache()
print("数据集划分:\ntrain:", str(len(data_info.datasets["train"])),
"\ndev:" + str(len(data_info.datasets["dev"])) + "\ntest:" + str(len(data_info.datasets["test"])))
# print(data_info)
model = Model(data_info.vocabs, config)
print(model)

loss = SoftmaxLoss()

metric = CRMetric()

optim = Adam(model.parameters(), lr=config.lr)

lr_decay_callback = LRCallback(optim.param_groups, config.lr_decay)

trainer = Trainer(model=model, train_data=data_info.datasets["train"], dev_data=data_info.datasets["dev"],
loss=loss, metrics=metric, check_code_level=-1,sampler=None,
batch_size=1, device=torch.device("cuda:" + config.cuda), metric_key='f', n_epochs=config.epoch,
optimizer=optim,
save_path='/remote-home/xxliu/pycharm/fastNLP/fastNLP/reproduction/coreference_resolution/save',
callbacks=[lr_decay_callback, GradientClipCallback(clip_value=5)])
print()

trainer.train()

+ 24
- 0
reproduction/coreference_resolution/valid.py View File

@@ -0,0 +1,24 @@
import torch
from reproduction.coreference_resolution.model.config import Config
from reproduction.coreference_resolution.model.metric import CRMetric
from reproduction.coreference_resolution.data_load.cr_loader import CRLoader
from fastNLP import Tester
import argparse


if __name__=='__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--path')
args = parser.parse_args()
cr_loader = CRLoader()
config = Config()
data_info = cr_loader.process({'train': config.train_path, 'dev': config.dev_path,
'test': config.test_path})
metirc = CRMetric()
model = torch.load(args.path)
tester = Tester(data_info.datasets['test'],model,metirc,batch_size=1,device="cuda:0")
tester.test()
print('test over')



+ 0
- 0
reproduction/joint_cws_parse/__init__.py View File


+ 0
- 0
reproduction/joint_cws_parse/data/__init__.py View File


+ 284
- 0
reproduction/joint_cws_parse/data/data_loader.py View File

@@ -0,0 +1,284 @@


from fastNLP.io.base_loader import DataSetLoader, DataInfo
from fastNLP.io.dataset_loader import ConllLoader
import numpy as np

from itertools import chain
from fastNLP import DataSet, Vocabulary
from functools import partial
import os
from typing import Union, Dict
from reproduction.utils import check_dataloader_paths


class CTBxJointLoader(DataSetLoader):
"""
文件夹下应该具有以下的文件结构
-train.conllx
-dev.conllx
-test.conllx
每个文件中的内容如下(空格隔开不同的句子, 共有)
1 费孝通 _ NR NR _ 3 nsubjpass _ _
2 被 _ SB SB _ 3 pass _ _
3 授予 _ VV VV _ 0 root _ _
4 麦格赛赛 _ NR NR _ 5 nn _ _
5 奖 _ NN NN _ 3 dobj _ _

1 新华社 _ NR NR _ 7 dep _ _
2 马尼拉 _ NR NR _ 7 dep _ _
3 8月 _ NT NT _ 7 dep _ _
4 31日 _ NT NT _ 7 dep _ _
...

"""
def __init__(self):
self._loader = ConllLoader(headers=['words', 'pos_tags', 'heads', 'labels'], indexes=[1, 3, 6, 7])

def load(self, path:str):
"""
给定一个文件路径,将数据读取为DataSet格式。DataSet中包含以下的内容
words: list[str]
pos_tags: list[str]
heads: list[int]
labels: list[str]

:param path:
:return:
"""
dataset = self._loader.load(path)
dataset.heads.int()
return dataset
def process(self, paths):
"""
:param paths:
:return:
Dataset包含以下的field
chars:
bigrams:
trigrams:
pre_chars:
pre_bigrams:
pre_trigrams:
seg_targets:
seg_masks:
seq_lens:
char_labels:
char_heads:
gold_word_pairs:
seg_targets:
seg_masks:
char_labels:
char_heads:
pun_masks:
gold_label_word_pairs:
"""
paths = check_dataloader_paths(paths)
data = DataInfo()

for name, path in paths.items():
dataset = self.load(path)
data.datasets[name] = dataset

char_labels_vocab = Vocabulary(padding=None, unknown=None)

def process(dataset, char_label_vocab):
dataset.apply(add_word_lst, new_field_name='word_lst')
dataset.apply(lambda x: list(chain(*x['word_lst'])), new_field_name='chars')
dataset.apply(add_bigram, field_name='chars', new_field_name='bigrams')
dataset.apply(add_trigram, field_name='chars', new_field_name='trigrams')
dataset.apply(add_char_heads, new_field_name='char_heads')
dataset.apply(add_char_labels, new_field_name='char_labels')
dataset.apply(add_segs, new_field_name='seg_targets')
dataset.apply(add_mask, new_field_name='seg_masks')
dataset.add_seq_len('chars', new_field_name='seq_lens')
dataset.apply(add_pun_masks, new_field_name='pun_masks')
if len(char_label_vocab.word_count)==0:
char_label_vocab.from_dataset(dataset, field_name='char_labels')
char_label_vocab.index_dataset(dataset, field_name='char_labels')
new_dataset = add_root(dataset)
new_dataset.apply(add_word_pairs, new_field_name='gold_word_pairs', ignore_type=True)
global add_label_word_pairs
add_label_word_pairs = partial(add_label_word_pairs, label_vocab=char_label_vocab)
new_dataset.apply(add_label_word_pairs, new_field_name='gold_label_word_pairs', ignore_type=True)

new_dataset.set_pad_val('char_labels', -1)
new_dataset.set_pad_val('char_heads', -1)

return new_dataset

for name in list(paths.keys()):
dataset = data.datasets[name]
dataset = process(dataset, char_labels_vocab)
data.datasets[name] = dataset

data.vocabs['char_labels'] = char_labels_vocab

char_vocab = Vocabulary(min_freq=2).from_dataset(data.datasets['train'], field_name='chars')
bigram_vocab = Vocabulary(min_freq=5).from_dataset(data.datasets['train'], field_name='bigrams')
trigram_vocab = Vocabulary(min_freq=5).from_dataset(data.datasets['train'], field_name='trigrams')

for name in ['chars', 'bigrams', 'trigrams']:
vocab = Vocabulary().from_dataset(field_name=name, no_create_entry_dataset=list(data.datasets.values()))
vocab.index_dataset(*data.datasets.values(), field_name=name, new_field_name='pre_' + name)
data.vocabs['pre_{}'.format(name)] = vocab

for name, vocab in zip(['chars', 'bigrams', 'trigrams'],
[char_vocab, bigram_vocab, trigram_vocab]):
vocab.index_dataset(*data.datasets.values(), field_name=name, new_field_name=name)
data.vocabs[name] = vocab

for name, dataset in data.datasets.items():
dataset.set_input('chars', 'bigrams', 'trigrams', 'seq_lens', 'char_labels', 'char_heads', 'pre_chars',
'pre_bigrams', 'pre_trigrams')
dataset.set_target('gold_word_pairs', 'seq_lens', 'seg_targets', 'seg_masks', 'char_labels',
'char_heads',
'pun_masks', 'gold_label_word_pairs')

return data


def add_label_word_pairs(instance, label_vocab):
# List[List[((head_start, head_end], (dep_start, dep_end]), ...]]
word_end_indexes = np.array(list(map(len, instance['word_lst'])))
word_end_indexes = np.cumsum(word_end_indexes).tolist()
word_end_indexes.insert(0, 0)
word_pairs = []
labels = instance['labels']
pos_tags = instance['pos_tags']
for idx, head in enumerate(instance['heads']):
if pos_tags[idx]=='PU': # 如果是标点符号,就不记录
continue
label = label_vocab.to_index(labels[idx])
if head==0:
word_pairs.append((('root', label, (word_end_indexes[idx], word_end_indexes[idx+1]))))
else:
word_pairs.append(((word_end_indexes[head-1], word_end_indexes[head]), label,
(word_end_indexes[idx], word_end_indexes[idx + 1])))
return word_pairs

def add_word_pairs(instance):
# List[List[((head_start, head_end], (dep_start, dep_end]), ...]]
word_end_indexes = np.array(list(map(len, instance['word_lst'])))
word_end_indexes = np.cumsum(word_end_indexes).tolist()
word_end_indexes.insert(0, 0)
word_pairs = []
pos_tags = instance['pos_tags']
for idx, head in enumerate(instance['heads']):
if pos_tags[idx]=='PU': # 如果是标点符号,就不记录
continue
if head==0:
word_pairs.append((('root', (word_end_indexes[idx], word_end_indexes[idx+1]))))
else:
word_pairs.append(((word_end_indexes[head-1], word_end_indexes[head]),
(word_end_indexes[idx], word_end_indexes[idx + 1])))
return word_pairs

def add_root(dataset):
new_dataset = DataSet()
for sample in dataset:
chars = ['char_root'] + sample['chars']
bigrams = ['bigram_root'] + sample['bigrams']
trigrams = ['trigram_root'] + sample['trigrams']
seq_lens = sample['seq_lens']+1
char_labels = [0] + sample['char_labels']
char_heads = [0] + sample['char_heads']
sample['chars'] = chars
sample['bigrams'] = bigrams
sample['trigrams'] = trigrams
sample['seq_lens'] = seq_lens
sample['char_labels'] = char_labels
sample['char_heads'] = char_heads
new_dataset.append(sample)
return new_dataset

def add_pun_masks(instance):
tags = instance['pos_tags']
pun_masks = []
for word, tag in zip(instance['words'], tags):
if tag=='PU':
pun_masks.extend([1]*len(word))
else:
pun_masks.extend([0]*len(word))
return pun_masks

def add_word_lst(instance):
words = instance['words']
word_lst = [list(word) for word in words]
return word_lst

def add_bigram(instance):
chars = instance['chars']
length = len(chars)
chars = chars + ['<eos>']
bigrams = []
for i in range(length):
bigrams.append(''.join(chars[i:i + 2]))
return bigrams

def add_trigram(instance):
chars = instance['chars']
length = len(chars)
chars = chars + ['<eos>'] * 2
trigrams = []
for i in range(length):
trigrams.append(''.join(chars[i:i + 3]))
return trigrams

def add_char_heads(instance):
words = instance['word_lst']
heads = instance['heads']
char_heads = []
char_index = 1 # 因此存在root节点所以需要从1开始
head_end_indexes = np.cumsum(list(map(len, words))).tolist() + [0] # 因为root是0,0-1=-1
for word, head in zip(words, heads):
char_head = []
if len(word)>1:
char_head.append(char_index+1)
char_index += 1
for _ in range(len(word)-2):
char_index += 1
char_head.append(char_index)
char_index += 1
char_head.append(head_end_indexes[head-1])
char_heads.extend(char_head)
return char_heads

def add_char_labels(instance):
"""
将word_lst中的数据按照下面的方式设置label
比如"复旦大学 位于 ", 对应的分词是"B M M E B E", 则对应的dependency是"复(dep)->旦(head)", "旦(dep)->大(head)"..
对应的label是'app', 'app', 'app', , 而学的label就是复旦大学这个词的dependency label
:param instance:
:return:
"""
words = instance['word_lst']
labels = instance['labels']
char_labels = []
for word, label in zip(words, labels):
for _ in range(len(word)-1):
char_labels.append('APP')
char_labels.append(label)
return char_labels

# add seg_targets
def add_segs(instance):
words = instance['word_lst']
segs = [0]*len(instance['chars'])
index = 0
for word in words:
index = index + len(word) - 1
segs[index] = len(word)-1
index = index + 1
return segs

# add target_masks
def add_mask(instance):
words = instance['word_lst']
mask = []
for word in words:
mask.extend([0] * (len(word) - 1))
mask.append(1)
return mask

+ 311
- 0
reproduction/joint_cws_parse/models/CharParser.py View File

@@ -0,0 +1,311 @@



from fastNLP.models.biaffine_parser import BiaffineParser
from fastNLP.models.biaffine_parser import ArcBiaffine, LabelBilinear

import numpy as np
import torch
from torch import nn
from torch.nn import functional as F

from fastNLP.modules.dropout import TimestepDropout
from fastNLP.modules.encoder.variational_rnn import VarLSTM
from fastNLP import seq_len_to_mask
from fastNLP.modules import Embedding


def drop_input_independent(word_embeddings, dropout_emb):
batch_size, seq_length, _ = word_embeddings.size()
word_masks = word_embeddings.new(batch_size, seq_length).fill_(1 - dropout_emb)
word_masks = torch.bernoulli(word_masks)
word_masks = word_masks.unsqueeze(dim=2)
word_embeddings = word_embeddings * word_masks

return word_embeddings


class CharBiaffineParser(BiaffineParser):
def __init__(self, char_vocab_size,
emb_dim,
bigram_vocab_size,
trigram_vocab_size,
num_label,
rnn_layers=3,
rnn_hidden_size=800, #单向的数量
arc_mlp_size=500,
label_mlp_size=100,
dropout=0.3,
encoder='lstm',
use_greedy_infer=False,
app_index = 0,
pre_chars_embed=None,
pre_bigrams_embed=None,
pre_trigrams_embed=None):


super(BiaffineParser, self).__init__()
rnn_out_size = 2 * rnn_hidden_size
self.char_embed = Embedding((char_vocab_size, emb_dim))
self.bigram_embed = Embedding((bigram_vocab_size, emb_dim))
self.trigram_embed = Embedding((trigram_vocab_size, emb_dim))
if pre_chars_embed:
self.pre_char_embed = Embedding(pre_chars_embed)
self.pre_char_embed.requires_grad = False
if pre_bigrams_embed:
self.pre_bigram_embed = Embedding(pre_bigrams_embed)
self.pre_bigram_embed.requires_grad = False
if pre_trigrams_embed:
self.pre_trigram_embed = Embedding(pre_trigrams_embed)
self.pre_trigram_embed.requires_grad = False
self.timestep_drop = TimestepDropout(dropout)
self.encoder_name = encoder

if encoder == 'var-lstm':
self.encoder = VarLSTM(input_size=emb_dim*3,
hidden_size=rnn_hidden_size,
num_layers=rnn_layers,
bias=True,
batch_first=True,
input_dropout=dropout,
hidden_dropout=dropout,
bidirectional=True)
elif encoder == 'lstm':
self.encoder = nn.LSTM(input_size=emb_dim*3,
hidden_size=rnn_hidden_size,
num_layers=rnn_layers,
bias=True,
batch_first=True,
dropout=dropout,
bidirectional=True)

else:
raise ValueError('unsupported encoder type: {}'.format(encoder))

self.mlp = nn.Sequential(nn.Linear(rnn_out_size, arc_mlp_size * 2 + label_mlp_size * 2),
nn.LeakyReLU(0.1),
TimestepDropout(p=dropout),)
self.arc_mlp_size = arc_mlp_size
self.label_mlp_size = label_mlp_size
self.arc_predictor = ArcBiaffine(arc_mlp_size, bias=True)
self.label_predictor = LabelBilinear(label_mlp_size, label_mlp_size, num_label, bias=True)
self.use_greedy_infer = use_greedy_infer
self.reset_parameters()
self.dropout = dropout

self.app_index = app_index
self.num_label = num_label
if self.app_index != 0:
raise ValueError("现在app_index必须等于0")

def reset_parameters(self):
for name, m in self.named_modules():
if 'embed' in name:
pass
elif hasattr(m, 'reset_parameters') or hasattr(m, 'init_param'):
pass
else:
for p in m.parameters():
if len(p.size())>1:
nn.init.xavier_normal_(p, gain=0.1)
else:
nn.init.uniform_(p, -0.1, 0.1)

def forward(self, chars, bigrams, trigrams, seq_lens, gold_heads=None, pre_chars=None, pre_bigrams=None,
pre_trigrams=None):
"""
max_len是包含root的
:param chars: batch_size x max_len
:param ngrams: batch_size x max_len*ngram_per_char
:param seq_lens: batch_size
:param gold_heads: batch_size x max_len
:param pre_chars: batch_size x max_len
:param pre_ngrams: batch_size x max_len*ngram_per_char
:return dict: parsing results
arc_pred: [batch_size, seq_len, seq_len]
label_pred: [batch_size, seq_len, seq_len]
mask: [batch_size, seq_len]
head_pred: [batch_size, seq_len] if gold_heads is not provided, predicting the heads
"""
# prepare embeddings
batch_size, seq_len = chars.shape
# print('forward {} {}'.format(batch_size, seq_len))

# get sequence mask
mask = seq_len_to_mask(seq_lens).long()

chars = self.char_embed(chars) # [N,L] -> [N,L,C_0]
bigrams = self.bigram_embed(bigrams) # [N,L] -> [N,L,C_1]
trigrams = self.trigram_embed(trigrams)

if pre_chars is not None:
pre_chars = self.pre_char_embed(pre_chars)
# pre_chars = self.pre_char_fc(pre_chars)
chars = pre_chars + chars
if pre_bigrams is not None:
pre_bigrams = self.pre_bigram_embed(pre_bigrams)
# pre_bigrams = self.pre_bigram_fc(pre_bigrams)
bigrams = bigrams + pre_bigrams
if pre_trigrams is not None:
pre_trigrams = self.pre_trigram_embed(pre_trigrams)
# pre_trigrams = self.pre_trigram_fc(pre_trigrams)
trigrams = trigrams + pre_trigrams

x = torch.cat([chars, bigrams, trigrams], dim=2) # -> [N,L,C]

# encoder, extract features
if self.training:
x = drop_input_independent(x, self.dropout)
sort_lens, sort_idx = torch.sort(seq_lens, dim=0, descending=True)
x = x[sort_idx]
x = nn.utils.rnn.pack_padded_sequence(x, sort_lens, batch_first=True)
feat, _ = self.encoder(x) # -> [N,L,C]
feat, _ = nn.utils.rnn.pad_packed_sequence(feat, batch_first=True)
_, unsort_idx = torch.sort(sort_idx, dim=0, descending=False)
feat = feat[unsort_idx]
feat = self.timestep_drop(feat)

# for arc biaffine
# mlp, reduce dim
feat = self.mlp(feat)
arc_sz, label_sz = self.arc_mlp_size, self.label_mlp_size
arc_dep, arc_head = feat[:,:,:arc_sz], feat[:,:,arc_sz:2*arc_sz]
label_dep, label_head = feat[:,:,2*arc_sz:2*arc_sz+label_sz], feat[:,:,2*arc_sz+label_sz:]

# biaffine arc classifier
arc_pred = self.arc_predictor(arc_head, arc_dep) # [N, L, L]

# use gold or predicted arc to predict label
if gold_heads is None or not self.training:
# use greedy decoding in training
if self.training or self.use_greedy_infer:
heads = self.greedy_decoder(arc_pred, mask)
else:
heads = self.mst_decoder(arc_pred, mask)
head_pred = heads
else:
assert self.training # must be training mode
if gold_heads is None:
heads = self.greedy_decoder(arc_pred, mask)
head_pred = heads
else:
head_pred = None
heads = gold_heads
# heads: batch_size x max_len

batch_range = torch.arange(start=0, end=batch_size, dtype=torch.long, device=chars.device).unsqueeze(1)
label_head = label_head[batch_range, heads].contiguous()
label_pred = self.label_predictor(label_head, label_dep) # [N, max_len, num_label]
# 这里限制一下,只有当head为下一个时,才能预测app这个label
arange_index = torch.arange(1, seq_len+1, dtype=torch.long, device=chars.device).unsqueeze(0)\
.repeat(batch_size, 1) # batch_size x max_len
app_masks = heads.ne(arange_index) # batch_size x max_len, 为1的位置不可以预测app
app_masks = app_masks.unsqueeze(2).repeat(1, 1, self.num_label)
app_masks[:, :, 1:] = 0
label_pred = label_pred.masked_fill(app_masks, -np.inf)

res_dict = {'arc_pred': arc_pred, 'label_pred': label_pred, 'mask': mask}
if head_pred is not None:
res_dict['head_pred'] = head_pred
return res_dict

@staticmethod
def loss(arc_pred, label_pred, arc_true, label_true, mask):
"""
Compute loss.

:param arc_pred: [batch_size, seq_len, seq_len]
:param label_pred: [batch_size, seq_len, n_tags]
:param arc_true: [batch_size, seq_len]
:param label_true: [batch_size, seq_len]
:param mask: [batch_size, seq_len]
:return: loss value
"""

batch_size, seq_len, _ = arc_pred.shape
flip_mask = (mask == 0)
_arc_pred = arc_pred.clone()
_arc_pred.masked_fill_(flip_mask.unsqueeze(1), -float('inf'))

arc_true[:, 0].fill_(-1)
label_true[:, 0].fill_(-1)

arc_nll = F.cross_entropy(_arc_pred.view(-1, seq_len), arc_true.view(-1), ignore_index=-1)
label_nll = F.cross_entropy(label_pred.view(-1, label_pred.size(-1)), label_true.view(-1), ignore_index=-1)

return arc_nll + label_nll

def predict(self, chars, bigrams, trigrams, seq_lens, pre_chars, pre_bigrams, pre_trigrams):
"""

max_len是包含root的

:param chars: batch_size x max_len
:param ngrams: batch_size x max_len*ngram_per_char
:param seq_lens: batch_size
:param pre_chars: batch_size x max_len
:param pre_ngrams: batch_size x max_len*ngram_per_cha
:return:
"""
res = self(chars, bigrams, trigrams, seq_lens, pre_chars=pre_chars, pre_bigrams=pre_bigrams,
pre_trigrams=pre_trigrams, gold_heads=None)
output = {}
output['arc_pred'] = res.pop('head_pred')
_, label_pred = res.pop('label_pred').max(2)
output['label_pred'] = label_pred
return output

class CharParser(nn.Module):
def __init__(self, char_vocab_size,
emb_dim,
bigram_vocab_size,
trigram_vocab_size,
num_label,
rnn_layers=3,
rnn_hidden_size=400, #单向的数量
arc_mlp_size=500,
label_mlp_size=100,
dropout=0.3,
encoder='var-lstm',
use_greedy_infer=False,
app_index = 0,
pre_chars_embed=None,
pre_bigrams_embed=None,
pre_trigrams_embed=None):
super().__init__()

self.parser = CharBiaffineParser(char_vocab_size,
emb_dim,
bigram_vocab_size,
trigram_vocab_size,
num_label,
rnn_layers,
rnn_hidden_size, #单向的数量
arc_mlp_size,
label_mlp_size,
dropout,
encoder,
use_greedy_infer,
app_index,
pre_chars_embed=pre_chars_embed,
pre_bigrams_embed=pre_bigrams_embed,
pre_trigrams_embed=pre_trigrams_embed)

def forward(self, chars, bigrams, trigrams, seq_lens, char_heads, char_labels, pre_chars=None, pre_bigrams=None,
pre_trigrams=None):
res_dict = self.parser(chars, bigrams, trigrams, seq_lens, gold_heads=char_heads, pre_chars=pre_chars,
pre_bigrams=pre_bigrams, pre_trigrams=pre_trigrams)
arc_pred = res_dict['arc_pred']
label_pred = res_dict['label_pred']
masks = res_dict['mask']
loss = self.parser.loss(arc_pred, label_pred, char_heads, char_labels, masks)
return {'loss': loss}

def predict(self, chars, bigrams, trigrams, seq_lens, pre_chars=None, pre_bigrams=None, pre_trigrams=None):
res = self.parser(chars, bigrams, trigrams, seq_lens, gold_heads=None, pre_chars=pre_chars,
pre_bigrams=pre_bigrams, pre_trigrams=pre_trigrams)
output = {}
output['head_preds'] = res.pop('head_pred')
_, label_pred = res.pop('label_pred').max(2)
output['label_preds'] = label_pred
return output

+ 0
- 0
reproduction/joint_cws_parse/models/__init__.py View File


+ 65
- 0
reproduction/joint_cws_parse/models/callbacks.py View File

@@ -0,0 +1,65 @@

from fastNLP.core.callback import Callback
import torch
from torch import nn

class OptimizerCallback(Callback):
def __init__(self, optimizer, scheduler, update_every=4):
super().__init__()

self._optimizer = optimizer
self.scheduler = scheduler
self._update_every = update_every

def on_backward_end(self):
if self.step % self._update_every==0:
# nn.utils.clip_grad.clip_grad_norm_(self.model.parameters(), 5)
# self._optimizer.step()
self.scheduler.step()
# self.model.zero_grad()


class DevCallback(Callback):
def __init__(self, tester, metric_key='u_f1'):
super().__init__()
self.tester = tester
setattr(tester, 'verbose', 0)

self.metric_key = metric_key

self.record_best = False
self.best_eval_value = 0
self.best_eval_res = None

self.best_dev_res = None # 存取dev的表现

def on_valid_begin(self):
eval_res = self.tester.test()
metric_name = self.tester.metrics[0].__class__.__name__
metric_value = eval_res[metric_name][self.metric_key]
if metric_value>self.best_eval_value:
self.best_eval_value = metric_value
self.best_epoch = self.trainer.epoch
self.record_best = True
self.best_eval_res = eval_res
self.test_eval_res = eval_res
eval_str = "Epoch {}/{}. \n".format(self.trainer.epoch, self.n_epochs) + \
self.tester._format_eval_results(eval_res)
self.pbar.write(eval_str)

def on_valid_end(self, eval_result, metric_key, optimizer, is_better_eval):
if self.record_best:
self.best_dev_res = eval_result
self.record_best = False
if is_better_eval:
self.best_dev_res_on_dev = eval_result
self.best_test_res_on_dev = self.test_eval_res
self.dev_epoch = self.epoch

def on_train_end(self):
print("Got best test performance in epoch:{}\n Test: {}\n Dev:{}\n".format(self.best_epoch,
self.tester._format_eval_results(self.best_eval_res),
self.tester._format_eval_results(self.best_dev_res)))
print("Got best dev performance in epoch:{}\n Test: {}\n Dev:{}\n".format(self.dev_epoch,
self.tester._format_eval_results(self.best_test_res_on_dev),
self.tester._format_eval_results(self.best_dev_res_on_dev)))

+ 184
- 0
reproduction/joint_cws_parse/models/metrics.py View File

@@ -0,0 +1,184 @@
from fastNLP.core.metrics import MetricBase
from fastNLP.core.utils import seq_len_to_mask
import torch


class SegAppCharParseF1Metric(MetricBase):
#
def __init__(self, app_index):
super().__init__()
self.app_index = app_index

self.parse_head_tp = 0
self.parse_label_tp = 0
self.rec_tol = 0
self.pre_tol = 0

def evaluate(self, gold_word_pairs, gold_label_word_pairs, head_preds, label_preds, seq_lens,
pun_masks):
"""

max_len是不包含root的character的长度
:param gold_word_pairs: List[List[((head_start, head_end), (dep_start, dep_end)), ...]], batch_size
:param gold_label_word_pairs: List[List[((head_start, head_end), label, (dep_start, dep_end)), ...]], batch_size
:param head_preds: batch_size x max_len
:param label_preds: batch_size x max_len
:param seq_lens:
:param pun_masks: batch_size x
:return:
"""
# 去掉root
head_preds = head_preds[:, 1:].tolist()
label_preds = label_preds[:, 1:].tolist()
seq_lens = (seq_lens - 1).tolist()

# 先解码出words,POS,heads, labels, 对应的character范围
for b in range(len(head_preds)):
seq_len = seq_lens[b]
head_pred = head_preds[b][:seq_len]
label_pred = label_preds[b][:seq_len]

words = [] # 存放[word_start, word_end),相对起始位置,不考虑root
heads = []
labels = []
ranges = [] # 对应该char是第几个word,长度是seq_len+1
word_idx = 0
word_start_idx = 0
for idx, (label, head) in enumerate(zip(label_pred, head_pred)):
ranges.append(word_idx)
if label == self.app_index:
pass
else:
labels.append(label)
heads.append(head)
words.append((word_start_idx, idx+1))
word_start_idx = idx+1
word_idx += 1

head_dep_tuple = [] # head在前面
head_label_dep_tuple = []
for idx, head in enumerate(heads):
span = words[idx]
if span[0]==span[1]-1 and pun_masks[b, span[0]]:
continue # exclude punctuations
if head == 0:
head_dep_tuple.append((('root', words[idx])))
head_label_dep_tuple.append(('root', labels[idx], words[idx]))
else:
head_word_idx = ranges[head-1]
head_word_span = words[head_word_idx]
head_dep_tuple.append(((head_word_span, words[idx])))
head_label_dep_tuple.append((head_word_span, labels[idx], words[idx]))

gold_head_dep_tuple = set(gold_word_pairs[b])
gold_head_label_dep_tuple = set(gold_label_word_pairs[b])

for head_dep, head_label_dep in zip(head_dep_tuple, head_label_dep_tuple):
if head_dep in gold_head_dep_tuple:
self.parse_head_tp += 1
if head_label_dep in gold_head_label_dep_tuple:
self.parse_label_tp += 1
self.pre_tol += len(head_dep_tuple)
self.rec_tol += len(gold_head_dep_tuple)

def get_metric(self, reset=True):
u_p = self.parse_head_tp / self.pre_tol
u_r = self.parse_head_tp / self.rec_tol
u_f = 2*u_p*u_r/(1e-6 + u_p + u_r)
l_p = self.parse_label_tp / self.pre_tol
l_r = self.parse_label_tp / self.rec_tol
l_f = 2*l_p*l_r/(1e-6 + l_p + l_r)

if reset:
self.parse_head_tp = 0
self.parse_label_tp = 0
self.rec_tol = 0
self.pre_tol = 0

return {'u_f1': round(u_f, 4), 'u_p': round(u_p, 4), 'u_r/uas':round(u_r, 4),
'l_f1': round(l_f, 4), 'l_p': round(l_p, 4), 'l_r/las': round(l_r, 4)}


class CWSMetric(MetricBase):
def __init__(self, app_index):
super().__init__()
self.app_index = app_index
self.pre = 0
self.rec = 0
self.tp = 0

def evaluate(self, seg_targets, seg_masks, label_preds, seq_lens):
"""

:param seg_targets: batch_size x max_len, 每个位置预测的是该word的长度-1,在word结束的地方。
:param seg_masks: batch_size x max_len,只有在word结束的地方为1
:param label_preds: batch_size x max_len
:param seq_lens: batch_size
:return:
"""

pred_masks = torch.zeros_like(seg_masks)
pred_segs = torch.zeros_like(seg_targets)

seq_lens = (seq_lens - 1).tolist()
for idx, label_pred in enumerate(label_preds[:, 1:].tolist()):
seq_len = seq_lens[idx]
label_pred = label_pred[:seq_len]
word_len = 0
for l_i, label in enumerate(label_pred):
if label==self.app_index and l_i!=len(label_pred)-1:
word_len += 1
else:
pred_segs[idx, l_i] = word_len # 这个词的长度为word_len
pred_masks[idx, l_i] = 1
word_len = 0

right_mask = seg_targets.eq(pred_segs) # 对长度的预测一致
self.rec += seg_masks.sum().item()
self.pre += pred_masks.sum().item()
# 且pred和target在同一个地方有值
self.tp += (right_mask.__and__(pred_masks.byte().__and__(seg_masks.byte()))).sum().item()

def get_metric(self, reset=True):
res = {}
res['rec'] = round(self.tp/(self.rec+1e-6), 4)
res['pre'] = round(self.tp/(self.pre+1e-6), 4)
res['f1'] = round(2*res['rec']*res['pre']/(res['pre'] + res['rec'] + 1e-6), 4)

if reset:
self.pre = 0
self.rec = 0
self.tp = 0

return res


class ParserMetric(MetricBase):
def __init__(self, ):
super().__init__()
self.num_arc = 0
self.num_label = 0
self.num_sample = 0

def get_metric(self, reset=True):
res = {'UAS': round(self.num_arc*1.0 / self.num_sample, 4),
'LAS': round(self.num_label*1.0 / self.num_sample, 4)}
if reset:
self.num_sample = self.num_label = self.num_arc = 0
return res

def evaluate(self, head_preds, label_preds, heads, labels, seq_lens=None):
"""Evaluate the performance of prediction.
"""
if seq_lens is None:
seq_mask = head_preds.new_ones(head_preds.size(), dtype=torch.byte)
else:
seq_mask = seq_len_to_mask(seq_lens.long(), float=False)
# mask out <root> tag
seq_mask[:, 0] = 0
head_pred_correct = (head_preds == heads).__and__(seq_mask)
label_pred_correct = (label_preds == labels).__and__(head_pred_correct)
self.num_arc += head_pred_correct.float().sum().item()
self.num_label += label_pred_correct.float().sum().item()
self.num_sample += seq_mask.sum().item()


+ 16
- 0
reproduction/joint_cws_parse/readme.md View File

@@ -0,0 +1,16 @@
Code for paper [A Unified Model for Chinese Word Segmentation and Dependency Parsing](https://arxiv.org/abs/1904.04697)

### 准备数据
1. 数据应该为conll格式,1, 3, 6, 7列应该对应为'words', 'pos_tags', 'heads', 'labels'.
2. 将train, dev, test放在同一个folder下,并将该folder路径填入train.py中的data_folder变量里。
3. 从[百度云](https://pan.baidu.com/s/1uXnAZpYecYJITCiqgAjjjA)(提取:ua53)下载预训练vector,放到同一个folder下,并将train.py中vector_folder变量正确设置。


### 运行代码
```
python train.py
```

### 其它
ctb5上跑出论文中报道的结果使用以上的默认参数应该就可以了(应该会更高一些); ctb7上使用默认参数会低0.1%左右,需要调节
learning rate scheduler.

+ 124
- 0
reproduction/joint_cws_parse/train.py View File

@@ -0,0 +1,124 @@
import sys
sys.path.append('../..')

from reproduction.joint_cws_parse.data.data_loader import CTBxJointLoader
from fastNLP.modules.encoder.embedding import StaticEmbedding
from torch import nn
from functools import partial
from reproduction.joint_cws_parse.models.CharParser import CharParser
from reproduction.joint_cws_parse.models.metrics import SegAppCharParseF1Metric, CWSMetric
from fastNLP import cache_results, BucketSampler, Trainer
from torch import optim
from reproduction.joint_cws_parse.models.callbacks import DevCallback, OptimizerCallback
from torch.optim.lr_scheduler import LambdaLR, StepLR
from fastNLP import Tester
from fastNLP import GradientClipCallback, LRScheduler
import os

def set_random_seed(random_seed=666):
import random, numpy, torch
random.seed(random_seed)
numpy.random.seed(random_seed)
torch.cuda.manual_seed(random_seed)
torch.random.manual_seed(random_seed)

uniform_init = partial(nn.init.normal_, std=0.02)

###################################################
# 需要变动的超参放到这里
lr = 0.002 # 0.01~0.001
dropout = 0.33 # 0.3~0.6
weight_decay = 0 # 1e-5, 1e-6, 0
arc_mlp_size = 500 # 200, 300
rnn_hidden_size = 400 # 200, 300, 400
rnn_layers = 3 # 2, 3
encoder = 'var-lstm' # var-lstm, lstm
emb_size = 100 # 64 , 100
label_mlp_size = 100

batch_size = 32
update_every = 4
n_epochs = 100
data_folder = '' # 填写在数据所在文件夹, 文件夹下应该有train, dev, test等三个文件
vector_folder = '' # 预训练的vector,下面应该包含三个文件: 1grams_t3_m50_corpus.txt, 2grams_t3_m50_corpus.txt, 3grams_t3_m50_corpus.txt
####################################################

set_random_seed(1234)
device = 0

# @cache_results('caches/{}.pkl'.format(data_name))
# def get_data():
data = CTBxJointLoader().process(data_folder)

char_labels_vocab = data.vocabs['char_labels']

pre_chars_vocab = data.vocabs['pre_chars']
pre_bigrams_vocab = data.vocabs['pre_bigrams']
pre_trigrams_vocab = data.vocabs['pre_trigrams']

chars_vocab = data.vocabs['chars']
bigrams_vocab = data.vocabs['bigrams']
trigrams_vocab = data.vocabs['trigrams']

pre_chars_embed = StaticEmbedding(pre_chars_vocab,
model_dir_or_name=os.path.join(vector_folder, '1grams_t3_m50_corpus.txt'),
init_method=uniform_init, normalize=False)
pre_chars_embed.embedding.weight.data = pre_chars_embed.embedding.weight.data/pre_chars_embed.embedding.weight.data.std()
pre_bigrams_embed = StaticEmbedding(pre_bigrams_vocab,
model_dir_or_name=os.path.join(vector_folder, '2grams_t3_m50_corpus.txt'),
init_method=uniform_init, normalize=False)
pre_bigrams_embed.embedding.weight.data = pre_bigrams_embed.embedding.weight.data/pre_bigrams_embed.embedding.weight.data.std()
pre_trigrams_embed = StaticEmbedding(pre_trigrams_vocab,
model_dir_or_name=os.path.join(vector_folder, '3grams_t3_m50_corpus.txt'),
init_method=uniform_init, normalize=False)
pre_trigrams_embed.embedding.weight.data = pre_trigrams_embed.embedding.weight.data/pre_trigrams_embed.embedding.weight.data.std()

# return chars_vocab, bigrams_vocab, trigrams_vocab, char_labels_vocab, pre_chars_embed, pre_bigrams_embed, pre_trigrams_embed, data

# chars_vocab, bigrams_vocab, trigrams_vocab, char_labels_vocab, pre_chars_embed, pre_bigrams_embed, pre_trigrams_embed, data = get_data()

print(data)
model = CharParser(char_vocab_size=len(chars_vocab),
emb_dim=emb_size,
bigram_vocab_size=len(bigrams_vocab),
trigram_vocab_size=len(trigrams_vocab),
num_label=len(char_labels_vocab),
rnn_layers=rnn_layers,
rnn_hidden_size=rnn_hidden_size,
arc_mlp_size=arc_mlp_size,
label_mlp_size=label_mlp_size,
dropout=dropout,
encoder=encoder,
use_greedy_infer=False,
app_index=char_labels_vocab['APP'],
pre_chars_embed=pre_chars_embed,
pre_bigrams_embed=pre_bigrams_embed,
pre_trigrams_embed=pre_trigrams_embed)

metric1 = SegAppCharParseF1Metric(char_labels_vocab['APP'])
metric2 = CWSMetric(char_labels_vocab['APP'])
metrics = [metric1, metric2]

optimizer = optim.Adam([param for param in model.parameters() if param.requires_grad], lr=lr,
weight_decay=weight_decay, betas=[0.9, 0.9])

sampler = BucketSampler(seq_len_field_name='seq_lens')
callbacks = []
# scheduler = LambdaLR(optimizer, lr_lambda=lambda step:(0.75)**(step//5000))
scheduler = StepLR(optimizer, step_size=18, gamma=0.75)
# optim_callback = OptimizerCallback(optimizer, scheduler, update_every)
# callbacks.append(optim_callback)
scheduler_callback = LRScheduler(scheduler)
callbacks.append(scheduler_callback)
callbacks.append(GradientClipCallback(clip_type='value', clip_value=5))

tester = Tester(data=data.datasets['test'], model=model, metrics=metrics,
batch_size=64, device=device, verbose=0)
dev_callback = DevCallback(tester)
callbacks.append(dev_callback)

trainer = Trainer(data.datasets['train'], model, loss=None, metrics=metrics, n_epochs=n_epochs, batch_size=batch_size, print_every=3,
validate_every=-1, dev_data=data.datasets['dev'], save_path=None, optimizer=optimizer,
check_code_level=0, metric_key='u_f1', sampler=sampler, prefetch=True, use_tqdm=True,
device=device, callbacks=callbacks, update_every=update_every)
trainer.train()

+ 100
- 0
reproduction/matching/README.md View File

@@ -0,0 +1,100 @@
# Matching任务模型复现
这里使用fastNLP复现了几个著名的Matching任务的模型,旨在达到与论文中相符的性能。这几个任务的评价指标均为准确率(%).

复现的模型有(按论文发表时间顺序排序):
- CNTN:模型代码(still in progress)[](); 训练代码(still in progress)[]().
论文链接:[Convolutional Neural Tensor Network Architecture for Community-based Question Answering](https://www.aaai.org/ocs/index.php/IJCAI/IJCAI15/paper/view/11401/10844).
- ESIM:[模型代码](model/esim.py); [训练代码](matching_esim.py).
论文链接:[Enhanced LSTM for Natural Language Inference](https://arxiv.org/pdf/1609.06038.pdf).
- DIIN:模型代码(still in progress)[](); 训练代码(still in progress)[]().
论文链接:[Natural Language Inference over Interaction Space](https://arxiv.org/pdf/1709.04348.pdf).
- MwAN:模型代码(still in progress)[](); 训练代码(still in progress)[]().
论文链接:[Multiway Attention Networks for Modeling Sentence Pairs](https://www.ijcai.org/proceedings/2018/0613.pdf).
- BERT:[模型代码](model/bert.py); [训练代码](matching_bert.py).
论文链接:[BERT: Pre-training of Deep Bidirectional Transformers for Language Understanding](https://arxiv.org/pdf/1810.04805.pdf).

# 数据集及复现结果汇总

使用fastNLP复现的结果vs论文汇报结果,在前面的表示使用fastNLP复现的结果

'\-'表示我们仍未复现或者论文原文没有汇报

model name | SNLI | MNLI | RTE | QNLI | Quora
:---: | :---: | :---: | :---: | :---: | :---:
CNTN [](); [论文](https://www.aaai.org/ocs/index.php/IJCAI/IJCAI15/paper/view/11401/10844) | 74.53 vs - | 60.84/-(dev) vs - | 57.4(dev) vs - | 62.53(dev) vs - | - |
ESIM[代码](model/bert.py); [论文](https://arxiv.org/pdf/1609.06038.pdf) | 88.13(glove) vs 88.0(glove)/88.7(elmo) | 77.78/76.49 vs 72.4/72.1* | 59.21(dev) vs - | 76.97(dev) vs - | - |
DIIN [](); [论文](https://arxiv.org/pdf/1709.04348.pdf) | - vs 88.0 | - vs 78.8/77.8 | - | - | - vs 89.06 |
MwAN [](); [论文](https://www.ijcai.org/proceedings/2018/0613.pdf) | 87.9 vs 88.3 | 77.3/76.7(dev) vs 78.5/77.7 | - | 74.6(dev) vs - | 85.6 vs 89.12 |
BERT (BASE version)[代码](model/bert.py); [论文](https://arxiv.org/pdf/1810.04805.pdf) | 90.6 vs - | - vs 84.6/83.4| 67.87(dev) vs 66.4 | 90.97(dev) vs 90.5 | - |

*ESIM模型由MNLI官方复现的结果为72.4/72.1,ESIM原论文当中没有汇报MNLI数据集的结果。

# 数据集复现结果及其他主要模型对比
## SNLI
[Link to SNLI leaderboard](https://nlp.stanford.edu/projects/snli/)

Performance on Test set:

model name | ESIM | DIIN | MwAN | [GPT1.0](https://s3-us-west-2.amazonaws.com/openai-assets/research-covers/language-unsupervised/language_understanding_paper.pdf) | [BERT-Large+SRL](https://arxiv.org/pdf/1809.02794.pdf) | [MT-DNN](https://arxiv.org/pdf/1901.11504.pdf)
:---: | :---: | :---: | :---: | :---: | :---: | :---:
__performance__ | 88.0 | 88.0 | 88.3 | 89.9 | 91.3 | 91.6 |

### 基于fastNLP复现的结果
Performance on Test set:

model name | CNTN | ESIM | DIIN | MwAN | BERT-Base | BERT-Large
:---: | :---: | :---: | :---: | :---: | :---: | :---:
__performance__ | - | 88.13 | - | 87.9 | 90.6 | 91.16

## MNLI
[Link to MNLI main page](https://www.nyu.edu/projects/bowman/multinli/)

Performance on Test set(matched/mismatched):

model name | ESIM | DIIN | MwAN | GPT1.0 | BERT-Base | MT-DNN
:---: | :---: | :---: | :---: | :---: | :---: | :---:
__performance__ | 72.4/72.1 | 78.8/77.8 | 78.5/77.7 | 82.1/81.4 | 84.6/83.4 | 87.9/87.4 |

### 基于fastNLP复现的结果
Performance on Test set(matched/mismatched):

model name | CNTN | ESIM | DIIN | MwAN | BERT-Base
:---: | :---: | :---: | :---: | :---: | :---: |
__performance__ | - | 77.78/76.49 | - | 77.3/76.7(dev) | - |


## RTE

Still in progress.

## QNLI

### From GLUE baselines
[Link to GLUE leaderboard](https://gluebenchmark.com/leaderboard)

Performance on Test set:
#### LSTM-based
model name | BiLSTM | BiLSTM + Attn | BiLSTM + ELMo | BiLSTM + Attn + ELMo
:---: | :---: | :---: | :---: | :---: |
__performance__ | 74.6 | 74.3 | 75.5 | 79.8 |

*这些LSTM-based的baseline是由QNLI官方实现并测试的。

#### Transformer-based
model name | GPT1.0 | BERT-Base | BERT-Large | MT-DNN
:---: | :---: | :---: | :---: | :---: |
__performance__ | 87.4 | 90.5 | 92.7 | 96.0 |



### 基于fastNLP复现的结果
Performance on __Dev__ set:

model name | CNTN | ESIM | DIIN | MwAN | BERT
:---: | :---: | :---: | :---: | :---: | :---:
__performance__ | - | 76.97 | - | 74.6 | -

## Quora

Still in progress.


+ 52
- 12
reproduction/matching/data/MatchingDataLoader.py View File

@@ -5,8 +5,8 @@ from typing import Union, Dict

from fastNLP.core.const import Const
from fastNLP.core.vocabulary import Vocabulary
from fastNLP.io.base_loader import DataInfo
from fastNLP.io.dataset_loader import JsonLoader, DataSetLoader, CSVLoader
from fastNLP.io.base_loader import DataInfo, DataSetLoader
from fastNLP.io.dataset_loader import JsonLoader, CSVLoader
from fastNLP.io.file_utils import _get_base_url, cached_path, PRETRAINED_BERT_MODEL_DIR
from fastNLP.modules.encoder._bert import BertTokenizer

@@ -16,12 +16,11 @@ class MatchingLoader(DataSetLoader):
别名::class:`fastNLP.io.MatchingLoader` :class:`fastNLP.io.dataset_loader.MatchingLoader`

读取Matching任务的数据集

:param dict paths: key是数据集名称(如train、dev、test),value是对应的文件名
"""

def __init__(self, paths: dict=None):
"""
:param dict paths: key是数据集名称(如train、dev、test),value是对应的文件名
"""
self.paths = paths

def _load(self, path):
@@ -34,7 +33,8 @@ class MatchingLoader(DataSetLoader):

def process(self, paths: Union[str, Dict[str, str]], dataset_name: str=None,
to_lower=False, seq_len_type: str=None, bert_tokenizer: str=None,
cut_text: int = None, get_index=True, set_input: Union[list, str, bool]=True,
cut_text: int = None, get_index=True, auto_pad_length: int=None,
auto_pad_token: str='<pad>', set_input: Union[list, str, bool]=True,
set_target: Union[list, str, bool] = True, concat: Union[str, list, bool]=None, ) -> DataInfo:
"""
:param paths: str或者Dict[str, str]。如果是str,则为数据集所在的文件夹或者是全路径文件名:如果是文件夹,
@@ -49,6 +49,8 @@ class MatchingLoader(DataSetLoader):
:param str bert_tokenizer: bert tokenizer所使用的词表所在的文件夹路径
:param int cut_text: 将长于cut_text的内容截掉。默认为None,即不截。
:param bool get_index: 是否需要根据词表将文本转为index
:param int auto_pad_length: 是否需要将文本自动pad到一定长度(超过这个长度的文本将会被截掉),默认为不会自动pad
:param str auto_pad_token: 自动pad的内容
:param set_input: 如果为True,则会自动将相关的field(名字里含有Const.INPUT的)设置为input,如果为False
则不会将任何field设置为input。如果传入str或者List[str],则会根据传入的内容将相对应的field设置为input,
于此同时其他field不会被设置为input。默认值为True。
@@ -169,6 +171,9 @@ class MatchingLoader(DataSetLoader):
data_set.apply(lambda x: [1] * len(x[Const.INPUT_LENS(0)]),
new_field_name=Const.INPUT_LENS(1), is_input=auto_set_input)

if auto_pad_length is not None:
cut_text = min(auto_pad_length, cut_text if cut_text is not None else auto_pad_length)

if cut_text is not None:
for data_name, data_set in data_info.datasets.items():
for fields in data_set.get_field_names():
@@ -180,7 +185,7 @@ class MatchingLoader(DataSetLoader):
assert len(data_set_list) > 0, f'There are NO data sets in data info!'

if bert_tokenizer is None:
words_vocab = Vocabulary()
words_vocab = Vocabulary(padding=auto_pad_token)
words_vocab = words_vocab.from_dataset(*[d for n, d in data_info.datasets.items() if 'train' in n],
field_name=[n for n in data_set_list[0].get_field_names()
if (Const.INPUT in n)],
@@ -202,6 +207,20 @@ class MatchingLoader(DataSetLoader):
data_set.apply(lambda x: target_vocab.to_index(x[Const.TARGET]), new_field_name=Const.TARGET,
is_input=auto_set_input, is_target=auto_set_target)

if auto_pad_length is not None:
if seq_len_type == 'seq_len':
raise RuntimeError(f'the sequence will be padded with the length {auto_pad_length}, '
f'so the seq_len_type cannot be `{seq_len_type}`!')
for data_name, data_set in data_info.datasets.items():
for fields in data_set.get_field_names():
if Const.INPUT in fields:
data_set.apply(lambda x: x[fields] + [words_vocab.to_index(words_vocab.padding)] *
(auto_pad_length - len(x[fields])), new_field_name=fields,
is_input=auto_set_input)
elif (Const.INPUT_LEN in fields) and (seq_len_type != 'seq_len'):
data_set.apply(lambda x: x[fields] + [0] * (auto_pad_length - len(x[fields])),
new_field_name=fields, is_input=auto_set_input)

for data_name, data_set in data_info.datasets.items():
if isinstance(set_input, list):
data_set.set_input(*[inputs for inputs in set_input if inputs in data_set.get_field_names()])
@@ -267,7 +286,7 @@ class RTELoader(MatchingLoader, CSVLoader):
paths = paths if paths is not None else {
'train': 'train.tsv',
'dev': 'dev.tsv',
# 'test': 'test.tsv' # test set has not label
'test': 'test.tsv' # test set has not label
}
MatchingLoader.__init__(self, paths=paths)
self.fields = {
@@ -281,7 +300,8 @@ class RTELoader(MatchingLoader, CSVLoader):
ds = CSVLoader._load(self, path)

for k, v in self.fields.items():
ds.rename_field(k, v)
if v in ds.get_field_names():
ds.rename_field(k, v)
for fields in ds.get_all_fields():
if Const.INPUT in fields:
ds.apply(lambda x: x[fields].strip().split(), new_field_name=fields)
@@ -306,7 +326,7 @@ class QNLILoader(MatchingLoader, CSVLoader):
paths = paths if paths is not None else {
'train': 'train.tsv',
'dev': 'dev.tsv',
# 'test': 'test.tsv' # test set has not label
'test': 'test.tsv' # test set has not label
}
MatchingLoader.__init__(self, paths=paths)
self.fields = {
@@ -320,7 +340,8 @@ class QNLILoader(MatchingLoader, CSVLoader):
ds = CSVLoader._load(self, path)

for k, v in self.fields.items():
ds.rename_field(k, v)
if v in ds.get_field_names():
ds.rename_field(k, v)
for fields in ds.get_all_fields():
if Const.INPUT in fields:
ds.apply(lambda x: x[fields].strip().split(), new_field_name=fields)
@@ -332,7 +353,7 @@ class MNLILoader(MatchingLoader, CSVLoader):
"""
别名::class:`fastNLP.io.MNLILoader` :class:`fastNLP.io.dataset_loader.MNLILoader`

读取SNLI数据集,读取的DataSet包含fields::
读取MNLI数据集,读取的DataSet包含fields::

words1: list(str),第一句文本, premise
words2: list(str), 第二句文本, hypothesis
@@ -348,6 +369,10 @@ class MNLILoader(MatchingLoader, CSVLoader):
'dev_mismatched': 'dev_mismatched.tsv',
'test_matched': 'test_matched.tsv',
'test_mismatched': 'test_mismatched.tsv',
# 'test_0.9_matched': 'multinli_0.9_test_matched_unlabeled.txt',
# 'test_0.9_mismatched': 'multinli_0.9_test_mismatched_unlabeled.txt',

# test_0.9_mathed与mismatched是MNLI0.9版本的(数据来源:kaggle)
}
MatchingLoader.__init__(self, paths=paths)
CSVLoader.__init__(self, sep='\t')
@@ -364,6 +389,10 @@ class MNLILoader(MatchingLoader, CSVLoader):
if k in ds.get_field_names():
ds.rename_field(k, v)

if Const.TARGET in ds.get_field_names():
if ds[0][Const.TARGET] == 'hidden':
ds.delete_field(Const.TARGET)

parentheses_table = str.maketrans({'(': None, ')': None})

ds.apply(lambda ins: ins[Const.INPUTS(0)].translate(parentheses_table).strip().split(),
@@ -376,6 +405,17 @@ class MNLILoader(MatchingLoader, CSVLoader):


class QuoraLoader(MatchingLoader, CSVLoader):
"""
别名::class:`fastNLP.io.QuoraLoader` :class:`fastNLP.io.dataset_loader.QuoraLoader`

读取MNLI数据集,读取的DataSet包含fields::

words1: list(str),第一句文本, premise
words2: list(str), 第二句文本, hypothesis
target: str, 真实标签

数据来源:
"""

def __init__(self, paths: dict=None):
paths = paths if paths is not None else {


+ 102
- 0
reproduction/matching/matching_bert.py View File

@@ -0,0 +1,102 @@
import random
import numpy as np
import torch

from fastNLP.core import Trainer, Tester, AccuracyMetric, Const, Adam

from reproduction.matching.data.MatchingDataLoader import SNLILoader, RTELoader, \
MNLILoader, QNLILoader, QuoraLoader
from reproduction.matching.model.bert import BertForNLI


# define hyper-parameters
class BERTConfig:

task = 'snli'
batch_size_per_gpu = 6
n_epochs = 6
lr = 2e-5
seq_len_type = 'bert'
seed = 42
train_dataset_name = 'train'
dev_dataset_name = 'dev'
test_dataset_name = 'test'
save_path = None # 模型存储的位置,None表示不存储模型。
bert_dir = 'path/to/bert/dir' # 预训练BERT参数文件的文件夹


arg = BERTConfig()

# set random seed
random.seed(arg.seed)
np.random.seed(arg.seed)
torch.manual_seed(arg.seed)

n_gpu = torch.cuda.device_count()
if n_gpu > 0:
torch.cuda.manual_seed_all(arg.seed)

# load data set
if arg.task == 'snli':
data_info = SNLILoader().process(
paths='path/to/snli/data', to_lower=True, seq_len_type=arg.seq_len_type,
bert_tokenizer=arg.bert_dir, cut_text=512,
get_index=True, concat='bert',
)
elif arg.task == 'rte':
data_info = RTELoader().process(
paths='path/to/rte/data', to_lower=True, seq_len_type=arg.seq_len_type,
bert_tokenizer=arg.bert_dir, cut_text=512,
get_index=True, concat='bert',
)
elif arg.task == 'qnli':
data_info = QNLILoader().process(
paths='path/to/qnli/data', to_lower=True, seq_len_type=arg.seq_len_type,
bert_tokenizer=arg.bert_dir, cut_text=512,
get_index=True, concat='bert',
)
elif arg.task == 'mnli':
data_info = MNLILoader().process(
paths='path/to/mnli/data', to_lower=True, seq_len_type=arg.seq_len_type,
bert_tokenizer=arg.bert_dir, cut_text=512,
get_index=True, concat='bert',
)
elif arg.task == 'quora':
data_info = QuoraLoader().process(
paths='path/to/quora/data', to_lower=True, seq_len_type=arg.seq_len_type,
bert_tokenizer=arg.bert_dir, cut_text=512,
get_index=True, concat='bert',
)
else:
raise RuntimeError(f'NOT support {arg.task} task yet!')

# define model
model = BertForNLI(class_num=len(data_info.vocabs[Const.TARGET]), bert_dir=arg.bert_dir)

# define trainer
trainer = Trainer(train_data=data_info.datasets[arg.train_dataset_name], model=model,
optimizer=Adam(lr=arg.lr, model_params=model.parameters()),
batch_size=torch.cuda.device_count() * arg.batch_size_per_gpu,
n_epochs=arg.n_epochs, print_every=-1,
dev_data=data_info.datasets[arg.dev_dataset_name],
metrics=AccuracyMetric(), metric_key='acc',
device=[i for i in range(torch.cuda.device_count())],
check_code_level=-1,
save_path=arg.save_path)

# train model
trainer.train(load_best_model=True)

# define tester
tester = Tester(
data=data_info.datasets[arg.test_dataset_name],
model=model,
metrics=AccuracyMetric(),
batch_size=torch.cuda.device_count() * arg.batch_size_per_gpu,
device=[i for i in range(torch.cuda.device_count())],
)

# test model
tester.test()



+ 105
- 0
reproduction/matching/matching_cntn.py View File

@@ -0,0 +1,105 @@
import argparse
import torch
import os

from fastNLP.core import Trainer, Tester, Adam, AccuracyMetric, Const
from fastNLP.modules.encoder.embedding import StaticEmbedding

from reproduction.matching.data.MatchingDataLoader import QNLILoader, RTELoader, SNLILoader, MNLILoader
from reproduction.matching.model.cntn import CNTNModel

# define hyper-parameters
argument = argparse.ArgumentParser()
argument.add_argument('--embedding', choices=['glove', 'word2vec'], default='glove')
argument.add_argument('--batch-size-per-gpu', type=int, default=256)
argument.add_argument('--n-epochs', type=int, default=200)
argument.add_argument('--lr', type=float, default=1e-5)
argument.add_argument('--seq-len-type', choices=['mask', 'seq_len'], default='mask')
argument.add_argument('--save-dir', type=str, default=None)
argument.add_argument('--cntn-depth', type=int, default=1)
argument.add_argument('--cntn-ns', type=int, default=200)
argument.add_argument('--cntn-k-top', type=int, default=10)
argument.add_argument('--cntn-r', type=int, default=5)
argument.add_argument('--dataset', choices=['qnli', 'rte', 'snli', 'mnli'], default='qnli')
argument.add_argument('--max-len', type=int, default=50)
arg = argument.parse_args()

# dataset dict
dev_dict = {
'qnli': 'dev',
'rte': 'dev',
'snli': 'dev',
'mnli': 'dev_matched',
}

test_dict = {
'qnli': 'dev',
'rte': 'dev',
'snli': 'test',
'mnli': 'dev_matched',
}

# set num_labels
if arg.dataset == 'qnli' or arg.dataset == 'rte':
num_labels = 2
else:
num_labels = 3

# load data set
if arg.dataset == 'qnli':
data_info = QNLILoader().process(
paths='path/to/qnli/data', to_lower=True, seq_len_type=arg.seq_len_type, bert_tokenizer=None,
get_index=True, concat=False, auto_pad_length=arg.max_len)
elif arg.dataset == 'rte':
data_info = RTELoader().process(
paths='path/to/rte/data', to_lower=True, seq_len_type=arg.seq_len_type, bert_tokenizer=None,
get_index=True, concat=False, auto_pad_length=arg.max_len)
elif arg.dataset == 'snli':
data_info = SNLILoader().process(
paths='path/to/snli/data', to_lower=True, seq_len_type=arg.seq_len_type, bert_tokenizer=None,
get_index=True, concat=False, auto_pad_length=arg.max_len)
elif arg.dataset == 'mnli':
data_info = MNLILoader().process(
paths='path/to/mnli/data', to_lower=True, seq_len_type=arg.seq_len_type, bert_tokenizer=None,
get_index=True, concat=False, auto_pad_length=arg.max_len)
else:
raise ValueError(f'now we only support [qnli,rte,snli,mnli] dataset for cntn model!')

# load embedding
if arg.embedding == 'word2vec':
embedding = StaticEmbedding(data_info.vocabs[Const.INPUT], model_dir_or_name='en-word2vec-300', requires_grad=True)
elif arg.embedding == 'glove':
embedding = StaticEmbedding(data_info.vocabs[Const.INPUT], model_dir_or_name='en-glove-840b-300',
requires_grad=True)
else:
raise ValueError(f'now we only support word2vec or glove embedding for cntn model!')

# define model
model = CNTNModel(embedding, ns=arg.cntn_ns, k_top=arg.cntn_k_top, num_labels=num_labels, depth=arg.cntn_depth,
r=arg.cntn_r)
print(model)

# define trainer
trainer = Trainer(train_data=data_info.datasets['train'], model=model,
optimizer=Adam(lr=arg.lr, model_params=model.parameters()),
batch_size=torch.cuda.device_count() * arg.batch_size_per_gpu,
n_epochs=arg.n_epochs, print_every=-1,
dev_data=data_info.datasets[dev_dict[arg.dataset]],
metrics=AccuracyMetric(), metric_key='acc',
device=[i for i in range(torch.cuda.device_count())],
check_code_level=-1)

# train model
trainer.train(load_best_model=True)

# define tester
tester = Tester(
data=data_info.datasets[test_dict[arg.dataset]],
model=model,
metrics=AccuracyMetric(),
batch_size=torch.cuda.device_count() * arg.batch_size_per_gpu,
device=[i for i in range(torch.cuda.device_count())]
)

# test model
tester.test()

+ 79
- 23
reproduction/matching/matching_esim.py View File

@@ -1,47 +1,103 @@

import argparse
import random
import numpy as np
import torch
from torch.optim import Adamax
from torch.optim.lr_scheduler import StepLR

from fastNLP.core import Trainer, Tester, Adam, AccuracyMetric, Const
from fastNLP.core import Trainer, Tester, AccuracyMetric, Const
from fastNLP.core.callback import GradientClipCallback, LRScheduler
from fastNLP.modules.encoder.embedding import ElmoEmbedding, StaticEmbedding

from reproduction.matching.data.MatchingDataLoader import SNLILoader
from reproduction.matching.data.MatchingDataLoader import SNLILoader, RTELoader, \
MNLILoader, QNLILoader, QuoraLoader
from reproduction.matching.model.esim import ESIMModel

argument = argparse.ArgumentParser()
argument.add_argument('--embedding', choices=['glove', 'elmo'], default='glove')
argument.add_argument('--batch-size-per-gpu', type=int, default=128)
argument.add_argument('--n-epochs', type=int, default=100)
argument.add_argument('--lr', type=float, default=1e-4)
argument.add_argument('--seq-len-type', choices=['mask', 'seq_len'], default='seq_len')
argument.add_argument('--save-dir', type=str, default=None)
arg = argument.parse_args()

bert_dirs = 'path/to/bert/dir'
# define hyper-parameters
class ESIMConfig:

task = 'snli'
embedding = 'glove'
batch_size_per_gpu = 196
n_epochs = 30
lr = 2e-3
seq_len_type = 'seq_len'
# seq_len表示在process的时候用len(words)来表示长度信息;
# mask表示用0/1掩码矩阵来表示长度信息;
seed = 42
train_dataset_name = 'train'
dev_dataset_name = 'dev'
test_dataset_name = 'test'
save_path = None # 模型存储的位置,None表示不存储模型。


arg = ESIMConfig()

# set random seed
random.seed(arg.seed)
np.random.seed(arg.seed)
torch.manual_seed(arg.seed)

n_gpu = torch.cuda.device_count()
if n_gpu > 0:
torch.cuda.manual_seed_all(arg.seed)

# load data set
data_info = SNLILoader().process(
paths='path/to/snli/data/dir', to_lower=True, seq_len_type=arg.seq_len_type, bert_tokenizer=None,
get_index=True, concat=False,
)
if arg.task == 'snli':
data_info = SNLILoader().process(
paths='path/to/snli/data', to_lower=False, seq_len_type=arg.seq_len_type,
get_index=True, concat=False,
)
elif arg.task == 'rte':
data_info = RTELoader().process(
paths='path/to/rte/data', to_lower=False, seq_len_type=arg.seq_len_type,
get_index=True, concat=False,
)
elif arg.task == 'qnli':
data_info = QNLILoader().process(
paths='path/to/qnli/data', to_lower=False, seq_len_type=arg.seq_len_type,
get_index=True, concat=False,
)
elif arg.task == 'mnli':
data_info = MNLILoader().process(
paths='path/to/mnli/data', to_lower=False, seq_len_type=arg.seq_len_type,
get_index=True, concat=False,
)
elif arg.task == 'quora':
data_info = QuoraLoader().process(
paths='path/to/quora/data', to_lower=False, seq_len_type=arg.seq_len_type,
get_index=True, concat=False,
)
else:
raise RuntimeError(f'NOT support {arg.task} task yet!')

# load embedding
if arg.embedding == 'elmo':
embedding = ElmoEmbedding(data_info.vocabs[Const.INPUT], requires_grad=True)
elif arg.embedding == 'glove':
embedding = StaticEmbedding(data_info.vocabs[Const.INPUT], requires_grad=True)
embedding = StaticEmbedding(data_info.vocabs[Const.INPUT], requires_grad=True, normalize=False)
else:
raise ValueError(f'now we only support elmo or glove embedding for esim model!')
raise RuntimeError(f'NOT support {arg.embedding} embedding yet!')

# define model
model = ESIMModel(embedding)
model = ESIMModel(embedding, num_labels=len(data_info.vocabs[Const.TARGET]))

# define optimizer and callback
optimizer = Adamax(lr=arg.lr, params=model.parameters())
scheduler = StepLR(optimizer, step_size=10, gamma=0.5) # 每10个epoch学习率变为原来的0.5倍

callbacks = [
GradientClipCallback(clip_value=10), # 等价于torch.nn.utils.clip_grad_norm_(10)
LRScheduler(scheduler),
]

# define trainer
trainer = Trainer(train_data=data_info.datasets['train'], model=model,
optimizer=Adam(lr=arg.lr, model_params=model.parameters()),
trainer = Trainer(train_data=data_info.datasets[arg.train_dataset_name], model=model,
optimizer=optimizer,
batch_size=torch.cuda.device_count() * arg.batch_size_per_gpu,
n_epochs=arg.n_epochs, print_every=-1,
dev_data=data_info.datasets['dev'],
dev_data=data_info.datasets[arg.dev_dataset_name],
metrics=AccuracyMetric(), metric_key='acc',
device=[i for i in range(torch.cuda.device_count())],
check_code_level=-1,
@@ -52,7 +108,7 @@ trainer.train(load_best_model=True)

# define tester
tester = Tester(
data=data_info.datasets['test'],
data=data_info.datasets[arg.test_dataset_name],
model=model,
metrics=AccuracyMetric(),
batch_size=torch.cuda.device_count() * arg.batch_size_per_gpu,


+ 120
- 0
reproduction/matching/model/cntn.py View File

@@ -0,0 +1,120 @@
import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np

from torch.nn import CrossEntropyLoss

from fastNLP.models import BaseModel
from fastNLP.modules.encoder.embedding import TokenEmbedding
from fastNLP.core.const import Const


class DynamicKMaxPooling(nn.Module):
"""
:param k_top: Fixed number of pooling output features for the topmost convolutional layer.
:param l: Number of convolutional layers.
"""

def __init__(self, k_top, l):
super(DynamicKMaxPooling, self).__init__()
self.k_top = k_top
self.L = l

def forward(self, x, l):
"""
:param x: Input sequence.
:param l: Current convolutional layers.
"""
s = x.size()[3]
k_ll = ((self.L - l) / self.L) * s
k_l = int(round(max(self.k_top, np.ceil(k_ll))))
out = F.adaptive_max_pool2d(x, (x.size()[2], k_l))
return out


class CNTNModel(BaseModel):
"""
使用CNN进行问答匹配的模型
'Qiu, Xipeng, and Xuanjing Huang.
Convolutional neural tensor network architecture for community-based question answering.
Twenty-Fourth International Joint Conference on Artificial Intelligence. 2015.'

:param init_embedding: Embedding.
:param ns: Sentence embedding size.
:param k_top: Fixed number of pooling output features for the topmost convolutional layer.
:param num_labels: Number of labels.
:param depth: Number of convolutional layers.
:param r: Number of weight tensor slices.
:param drop_rate: Dropout rate.
"""

def __init__(self, init_embedding: TokenEmbedding, ns=200, k_top=10, num_labels=2, depth=2, r=5,
dropout_rate=0.3):
super(CNTNModel, self).__init__()
self.embedding = init_embedding
self.depth = depth
self.kmaxpooling = DynamicKMaxPooling(k_top, depth)
self.conv_q = nn.ModuleList()
self.conv_a = nn.ModuleList()
width = self.embedding.embed_size
for i in range(depth):
self.conv_q.append(nn.Sequential(
nn.Dropout(p=dropout_rate),
nn.Conv2d(
in_channels=1,
out_channels=width // 2,
kernel_size=(width, 3),
padding=(0, 2))
))
self.conv_a.append(nn.Sequential(
nn.Dropout(p=dropout_rate),
nn.Conv2d(
in_channels=1,
out_channels=width // 2,
kernel_size=(width, 3),
padding=(0, 2))
))
width = width // 2

self.fc_q = nn.Sequential(nn.Dropout(p=dropout_rate), nn.Linear(width * k_top, ns))
self.fc_a = nn.Sequential(nn.Dropout(p=dropout_rate), nn.Linear(width * k_top, ns))
self.weight_M = nn.Bilinear(ns, ns, r)
self.weight_V = nn.Linear(2 * ns, r)
self.weight_u = nn.Sequential(nn.Dropout(p=dropout_rate), nn.Linear(r, num_labels))

def forward(self, words1, words2, seq_len1, seq_len2, target=None):
"""
:param words1: [batch, seq_len, emb_size] Question.
:param words2: [batch, seq_len, emb_size] Answer.
:param seq_len1: [batch]
:param seq_len2: [batch]
:param target: [batch] Glod labels.
:return:
"""
in_q = self.embedding(words1)
in_a = self.embedding(words2)
in_q = in_q.permute(0, 2, 1).unsqueeze(1)
in_a = in_a.permute(0, 2, 1).unsqueeze(1)

for i in range(self.depth):
in_q = F.relu(self.conv_q[i](in_q))
in_q = in_q.squeeze().unsqueeze(1)
in_q = self.kmaxpooling(in_q, i + 1)
in_a = F.relu(self.conv_a[i](in_a))
in_a = in_a.squeeze().unsqueeze(1)
in_a = self.kmaxpooling(in_a, i + 1)

in_q = self.fc_q(in_q.view(in_q.size(0), -1))
in_a = self.fc_q(in_a.view(in_a.size(0), -1))
score = torch.tanh(self.weight_u(self.weight_M(in_q, in_a) + self.weight_V(torch.cat((in_q, in_a), -1))))

if target is not None:
loss_fct = CrossEntropyLoss()
loss = loss_fct(score, target)
return {Const.LOSS: loss, Const.OUTPUT: score}
else:
return {Const.OUTPUT: score}

def predict(self, **kwargs):
return self.forward(**kwargs)

+ 4
- 2
reproduction/matching/model/esim.py View File

@@ -81,6 +81,7 @@ class ESIMModel(BaseModel):

out = torch.cat((a_avg, a_max, b_avg, b_max), dim=1) # v: [B, 8 * H]
logits = torch.tanh(self.classifier(out))
# logits = self.classifier(out)

if target is not None:
loss_fct = CrossEntropyLoss()
@@ -91,7 +92,8 @@ class ESIMModel(BaseModel):
return {Const.OUTPUT: logits}

def predict(self, **kwargs):
return self.forward(**kwargs)
pred = self.forward(**kwargs)[Const.OUTPUT].argmax(-1)
return {Const.OUTPUT: pred}

# input [batch_size, len , hidden]
# mask [batch_size, len] (111...00)
@@ -127,7 +129,7 @@ class BiRNN(nn.Module):

def forward(self, x, x_mask):
# Sort x
lengths = x_mask.data.eq(1).long().sum(1).squeeze()
lengths = x_mask.data.eq(1).long().sum(1)
_, idx_sort = torch.sort(lengths, dim=0, descending=True)
_, idx_unsort = torch.sort(idx_sort, dim=0)
lengths = list(lengths[idx_sort])


+ 13
- 0
reproduction/seqence_labelling/ner/README.md View File

@@ -0,0 +1,13 @@
# NER任务模型复现
这里使用fastNLP复现经典的BiLSTM-CNN的NER任务的模型,旨在达到与论文中相符的性能。

论文链接[Named Entity Recognition with Bidirectional LSTM-CNNs](https://arxiv.org/pdf/1511.08308.pdf)

# 数据集及复现结果汇总

使用fastNLP复现的结果vs论文汇报结果(/前为fastNLP实现,后面为论文报道)

model name | Conll2003 | Ontonotes
:---: | :---: | :---:
BiLSTM-CNN | 91.17/90.91 | 86.47/86.35 |


+ 0
- 93
reproduction/seqence_labelling/ner/data/Conll2003Loader.py View File

@@ -1,93 +0,0 @@

from fastNLP.core.vocabulary import VocabularyOption
from fastNLP.io.base_loader import DataSetLoader, DataInfo
from typing import Union, Dict
from fastNLP import Vocabulary
from fastNLP import Const
from reproduction.utils import check_dataloader_paths

from fastNLP.io.dataset_loader import ConllLoader
from reproduction.seqence_labelling.ner.data.utils import iob2bioes, iob2


class Conll2003DataLoader(DataSetLoader):
def __init__(self, task:str='ner', encoding_type:str='bioes'):
"""
加载Conll2003格式的英语语料,该数据集的信息可以在https://www.clips.uantwerpen.be/conll2003/ner/找到。当task为pos
时,返回的DataSet中target取值于第2列; 当task为chunk时,返回的DataSet中target取值于第3列;当task为ner时,返回
的DataSet中target取值于第4列。所有"-DOCSTART- -X- O O"将被忽略,这会导致数据的数量少于很多文献报道的值,但
鉴于"-DOCSTART- -X- O O"只是用于文档分割的符号,并不应该作为预测对象,所以我们忽略了数据中的-DOCTSTART-开头的行
ner与chunk任务读取后的数据的target将为encoding_type类型。pos任务读取后就是pos列的数据。

:param task: 指定需要标注任务。可选ner, pos, chunk
"""
assert task in ('ner', 'pos', 'chunk')
index = {'ner':3, 'pos':1, 'chunk':2}[task]
self._loader = ConllLoader(headers=['raw_words', 'target'], indexes=[0, index])
self._tag_converters = None
if task in ('ner', 'chunk'):
self._tag_converters = [iob2]
if encoding_type == 'bioes':
self._tag_converters.append(iob2bioes)

def load(self, path: str):
dataset = self._loader.load(path)
def convert_tag_schema(tags):
for converter in self._tag_converters:
tags = converter(tags)
return tags
if self._tag_converters:
dataset.apply_field(convert_tag_schema, field_name=Const.TARGET, new_field_name=Const.TARGET)
return dataset

def process(self, paths: Union[str, Dict[str, str]], word_vocab_opt:VocabularyOption=None, lower:bool=True):
"""
读取并处理数据。数据中的'-DOCSTART-'开头的行会被忽略

:param paths:
:param word_vocab_opt: vocabulary的初始化值
:param lower: 是否将所有字母转为小写
:return:
"""
# 读取数据
paths = check_dataloader_paths(paths)
data = DataInfo()
input_fields = [Const.TARGET, Const.INPUT, Const.INPUT_LEN]
target_fields = [Const.TARGET, Const.INPUT_LEN]
for name, path in paths.items():
dataset = self.load(path)
dataset.apply_field(lambda words: words, field_name='raw_words', new_field_name=Const.INPUT)
if lower:
dataset.words.lower()
data.datasets[name] = dataset

# 对construct vocab
word_vocab = Vocabulary(min_freq=2) if word_vocab_opt is None else Vocabulary(**word_vocab_opt)
word_vocab.from_dataset(data.datasets['train'], field_name=Const.INPUT,
no_create_entry_dataset=[dataset for name, dataset in data.datasets.items() if name!='train'])
word_vocab.index_dataset(*data.datasets.values(), field_name=Const.INPUT, new_field_name=Const.INPUT)
data.vocabs[Const.INPUT] = word_vocab

# cap words
cap_word_vocab = Vocabulary()
cap_word_vocab.from_dataset(data.datasets['train'], field_name='raw_words',
no_create_entry_dataset=[dataset for name, dataset in data.datasets.items() if name!='train'])
cap_word_vocab.index_dataset(*data.datasets.values(), field_name='raw_words', new_field_name='cap_words')
input_fields.append('cap_words')
data.vocabs['cap_words'] = cap_word_vocab

# 对target建vocab
target_vocab = Vocabulary(unknown=None, padding=None)
target_vocab.from_dataset(*data.datasets.values(), field_name=Const.TARGET)
target_vocab.index_dataset(*data.datasets.values(), field_name=Const.TARGET)
data.vocabs[Const.TARGET] = target_vocab

for name, dataset in data.datasets.items():
dataset.add_seq_len(Const.INPUT, new_field_name=Const.INPUT_LEN)
dataset.set_input(*input_fields)
dataset.set_target(*target_fields)

return data

if __name__ == '__main__':
pass

+ 0
- 152
reproduction/seqence_labelling/ner/data/OntoNoteLoader.py View File

@@ -1,152 +0,0 @@
from fastNLP.core.vocabulary import VocabularyOption
from fastNLP.io.base_loader import DataSetLoader, DataInfo
from typing import Union, Dict
from fastNLP import DataSet
from fastNLP import Vocabulary
from fastNLP import Const
from reproduction.utils import check_dataloader_paths

from fastNLP.io.dataset_loader import ConllLoader
from reproduction.seqence_labelling.ner.data.utils import iob2bioes, iob2

class OntoNoteNERDataLoader(DataSetLoader):
"""
用于读取处理为Conll格式后的OntoNote数据。将OntoNote数据处理为conll格式的过程可以参考https://github.com/yhcc/OntoNotes-5.0-NER。

"""
def __init__(self, encoding_type:str='bioes'):
assert encoding_type in ('bioes', 'bio')
self.encoding_type = encoding_type
if encoding_type=='bioes':
self.encoding_method = iob2bioes
else:
self.encoding_method = iob2

def load(self, path:str)->DataSet:
"""
给定一个文件路径,读取数据。返回的DataSet包含以下的field
raw_words: List[str]
target: List[str]

:param path:
:return:
"""
dataset = ConllLoader(headers=['raw_words', 'target'], indexes=[3, 10]).load(path)
def convert_to_bio(tags):
bio_tags = []
flag = None
for tag in tags:
label = tag.strip("()*")
if '(' in tag:
bio_label = 'B-' + label
flag = label
elif flag:
bio_label = 'I-' + flag
else:
bio_label = 'O'
if ')' in tag:
flag = None
bio_tags.append(bio_label)
return self.encoding_method(bio_tags)

def convert_word(words):
converted_words = []
for word in words:
word = word.replace('/.', '.') # 有些结尾的.是/.形式的
if not word.startswith('-'):
converted_words.append(word)
continue
# 以下是由于这些符号被转义了,再转回来
tfrs = {'-LRB-':'(',
'-RRB-': ')',
'-LSB-': '[',
'-RSB-': ']',
'-LCB-': '{',
'-RCB-': '}'
}
if word in tfrs:
converted_words.append(tfrs[word])
else:
converted_words.append(word)
return converted_words

dataset.apply_field(convert_word, field_name='raw_words', new_field_name='raw_words')
dataset.apply_field(convert_to_bio, field_name='target', new_field_name='target')

return dataset

def process(self, paths: Union[str, Dict[str, str]], word_vocab_opt:VocabularyOption=None,
lower:bool=True)->DataInfo:
"""
读取并处理数据。返回的DataInfo包含以下的内容
vocabs:
word: Vocabulary
target: Vocabulary
datasets:
train: DataSet
words: List[int], 被设置为input
target: int. label,被同时设置为input和target
seq_len: int. 句子的长度,被同时设置为input和target
raw_words: List[str]
xxx(根据传入的paths可能有所变化)

:param paths:
:param word_vocab_opt: vocabulary的初始化值
:param lower: 是否使用小写
:return:
"""
paths = check_dataloader_paths(paths)
data = DataInfo()
input_fields = [Const.TARGET, Const.INPUT, Const.INPUT_LEN]
target_fields = [Const.TARGET, Const.INPUT_LEN]
for name, path in paths.items():
dataset = self.load(path)
dataset.apply_field(lambda words: words, field_name='raw_words', new_field_name=Const.INPUT)
if lower:
dataset.words.lower()
data.datasets[name] = dataset

# 对construct vocab
word_vocab = Vocabulary(min_freq=2) if word_vocab_opt is None else Vocabulary(**word_vocab_opt)
word_vocab.from_dataset(data.datasets['train'], field_name=Const.INPUT,
no_create_entry_dataset=[dataset for name, dataset in data.datasets.items() if name!='train'])
word_vocab.index_dataset(*data.datasets.values(), field_name=Const.INPUT, new_field_name=Const.INPUT)
data.vocabs[Const.INPUT] = word_vocab

# cap words
cap_word_vocab = Vocabulary()
cap_word_vocab.from_dataset(*data.datasets.values(), field_name='raw_words')
cap_word_vocab.index_dataset(*data.datasets.values(), field_name='raw_words', new_field_name='cap_words')
input_fields.append('cap_words')
data.vocabs['cap_words'] = cap_word_vocab

# 对target建vocab
target_vocab = Vocabulary(unknown=None, padding=None)
target_vocab.from_dataset(*data.datasets.values(), field_name=Const.TARGET)
target_vocab.index_dataset(*data.datasets.values(), field_name=Const.TARGET)
data.vocabs[Const.TARGET] = target_vocab

for name, dataset in data.datasets.items():
dataset.add_seq_len(Const.INPUT, new_field_name=Const.INPUT_LEN)
dataset.set_input(*input_fields)
dataset.set_target(*target_fields)

return data


if __name__ == '__main__':
loader = OntoNoteNERDataLoader()
dataset = loader.load('/hdd/fudanNLP/fastNLP/others/data/v4/english/test.txt')
print(dataset.target.value_count())
print(dataset[:4])


"""
train 115812 2200752
development 15680 304684
test 12217 230111

train 92403 1901772
valid 13606 279180
test 10258 204135
"""

+ 0
- 49
reproduction/seqence_labelling/ner/data/utils.py View File

@@ -1,49 +0,0 @@
from typing import List

def iob2(tags:List[str])->List[str]:
"""
检查数据是否是合法的IOB数据,如果是IOB1会被自动转换为IOB2。

:param tags: 需要转换的tags
"""
for i, tag in enumerate(tags):
if tag == "O":
continue
split = tag.split("-")
if len(split) != 2 or split[0] not in ["I", "B"]:
raise TypeError("The encoding schema is not a valid IOB type.")
if split[0] == "B":
continue
elif i == 0 or tags[i - 1] == "O": # conversion IOB1 to IOB2
tags[i] = "B" + tag[1:]
elif tags[i - 1][1:] == tag[1:]:
continue
else: # conversion IOB1 to IOB2
tags[i] = "B" + tag[1:]
return tags

def iob2bioes(tags:List[str])->List[str]:
"""
将iob的tag转换为bmeso编码
:param tags:
:return:
"""
new_tags = []
for i, tag in enumerate(tags):
if tag == 'O':
new_tags.append(tag)
else:
split = tag.split('-')[0]
if split == 'B':
if i+1!=len(tags) and tags[i+1].split('-')[0] == 'I':
new_tags.append(tag)
else:
new_tags.append(tag.replace('B-', 'S-'))
elif split == 'I':
if i + 1<len(tags) and tags[i+1].split('-')[0] == 'I':
new_tags.append(tag)
else:
new_tags.append(tag.replace('I-', 'E-'))
else:
raise TypeError("Invalid IOB format.")
return new_tags

+ 142
- 0
reproduction/seqence_labelling/ner/model/dilated_cnn.py View File

@@ -0,0 +1,142 @@
import torch
import torch.nn as nn
import torch.nn.functional as F
from fastNLP.modules.decoder import ConditionalRandomField
from fastNLP.modules.encoder import Embedding
from fastNLP.core.utils import seq_len_to_mask
from fastNLP.core.const import Const as C


class IDCNN(nn.Module):
def __init__(self,
init_embed,
char_embed,
num_cls,
repeats, num_layers, num_filters, kernel_size,
use_crf=False, use_projection=False, block_loss=False,
input_dropout=0.3, hidden_dropout=0.2, inner_dropout=0.0):
super(IDCNN, self).__init__()
self.word_embeddings = Embedding(init_embed)

if char_embed is None:
self.char_embeddings = None
embedding_size = self.word_embeddings.embedding_dim
else:
self.char_embeddings = Embedding(char_embed)
embedding_size = self.word_embeddings.embedding_dim + \
self.char_embeddings.embedding_dim

self.conv0 = nn.Sequential(
nn.Conv1d(in_channels=embedding_size,
out_channels=num_filters,
kernel_size=kernel_size,
stride=1, dilation=1,
padding=kernel_size//2,
bias=True),
nn.ReLU(),
)

block = []
for layer_i in range(num_layers):
dilated = 2 ** layer_i if layer_i+1 < num_layers else 1
block.append(nn.Conv1d(
in_channels=num_filters,
out_channels=num_filters,
kernel_size=kernel_size,
stride=1, dilation=dilated,
padding=(kernel_size//2) * dilated,
bias=True))
block.append(nn.ReLU())
self.block = nn.Sequential(*block)

if use_projection:
self.projection = nn.Sequential(
nn.Conv1d(
in_channels=num_filters,
out_channels=num_filters//2,
kernel_size=1,
bias=True),
nn.ReLU(),)
encode_dim = num_filters // 2
else:
self.projection = None
encode_dim = num_filters

self.input_drop = nn.Dropout(input_dropout)
self.hidden_drop = nn.Dropout(hidden_dropout)
self.inner_drop = nn.Dropout(inner_dropout)
self.repeats = repeats
self.out_fc = nn.Conv1d(
in_channels=encode_dim,
out_channels=num_cls,
kernel_size=1,
bias=True)
self.crf = ConditionalRandomField(
num_tags=num_cls) if use_crf else None
self.block_loss = block_loss
self.reset_parameters()

def reset_parameters(self):
for m in self.modules():
if isinstance(m, (nn.Conv1d, nn.Conv2d, nn.Linear)):
nn.init.xavier_normal_(m.weight, gain=1)
if m.bias is not None:
nn.init.normal_(m.bias, mean=0, std=0.01)

def forward(self, words, seq_len, target=None, chars=None):
if self.char_embeddings is None:
x = self.word_embeddings(words)
else:
if chars is None:
raise ValueError('must provide chars for model with char embedding')
e1 = self.word_embeddings(words)
e2 = self.char_embeddings(chars)
x = torch.cat((e1, e2), dim=-1) # b,l,h
mask = seq_len_to_mask(seq_len)

x = x.transpose(1, 2) # b,h,l
last_output = self.conv0(x)
output = []
for repeat in range(self.repeats):
last_output = self.block(last_output)
hidden = self.projection(last_output) if self.projection is not None else last_output
output.append(self.out_fc(hidden))

def compute_loss(y, t, mask):
if self.crf is not None and target is not None:
loss = self.crf(y.transpose(1, 2), t, mask)
else:
t.masked_fill_(mask == 0, -100)
loss = F.cross_entropy(y, t, ignore_index=-100)
return loss

if target is not None:
if self.block_loss:
losses = [compute_loss(o, target, mask) for o in output]
loss = sum(losses)
else:
loss = compute_loss(output[-1], target, mask)
else:
loss = None

scores = output[-1]
if self.crf is not None:
pred, _ = self.crf.viterbi_decode(scores.transpose(1, 2), mask)
else:
pred = scores.max(1)[1] * mask.long()

return {
C.LOSS: loss,
C.OUTPUT: pred,
}

def predict(self, words, seq_len, chars=None):
res = self.forward(
words=words,
seq_len=seq_len,
chars=chars,
target=None
)[C.OUTPUT]
return {
C.OUTPUT: res
}

+ 99
- 0
reproduction/seqence_labelling/ner/train_idcnn.py View File

@@ -0,0 +1,99 @@
from reproduction.seqence_labelling.ner.data.OntoNoteLoader import OntoNoteNERDataLoader
from fastNLP.core.callback import FitlogCallback, LRScheduler
from fastNLP import GradientClipCallback
from torch.optim.lr_scheduler import LambdaLR, CosineAnnealingLR
from torch.optim import SGD, Adam
from fastNLP import Const
from fastNLP import RandomSampler, BucketSampler
from fastNLP import SpanFPreRecMetric
from fastNLP import Trainer
from reproduction.seqence_labelling.ner.model.dilated_cnn import IDCNN
from fastNLP.core.utils import Option
from fastNLP.modules.encoder.embedding import CNNCharEmbedding, StaticEmbedding
from fastNLP.core.utils import cache_results
import sys
import torch.cuda
import os
os.environ['FASTNLP_BASE_URL'] = 'http://10.141.222.118:8888/file/download/'
os.environ['FASTNLP_CACHE_DIR'] = '/remote-home/hyan01/fastnlp_caches'
os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"

encoding_type = 'bioes'


def get_path(path):
return os.path.join(os.environ['HOME'], path)

data_path = get_path('workdir/datasets/ontonotes-v4')

ops = Option(
batch_size=128,
num_epochs=100,
lr=3e-4,
repeats=3,
num_layers=3,
num_filters=400,
use_crf=True,
gradient_clip=5,
)

@cache_results('ontonotes-cache')
def load_data():

data = OntoNoteNERDataLoader(encoding_type=encoding_type).process(data_path,
lower=True)

# char_embed = CNNCharEmbedding(vocab=data.vocabs['cap_words'], embed_size=30, char_emb_size=30, filter_nums=[30],
# kernel_sizes=[3])

word_embed = StaticEmbedding(vocab=data.vocabs[Const.INPUT],
model_dir_or_name='en-glove-840b-300',
requires_grad=True)
return data, [word_embed]

data, embeds = load_data()
print(data.datasets['train'][0])
print(list(data.vocabs.keys()))

for ds in data.datasets.values():
ds.rename_field('cap_words', 'chars')
ds.set_input('chars')

word_embed = embeds[0]
char_embed = CNNCharEmbedding(data.vocabs['cap_words'])
# for ds in data.datasets:
# ds.rename_field('')

print(data.vocabs[Const.TARGET].word2idx)

model = IDCNN(init_embed=word_embed,
char_embed=char_embed,
num_cls=len(data.vocabs[Const.TARGET]),
repeats=ops.repeats,
num_layers=ops.num_layers,
num_filters=ops.num_filters,
kernel_size=3,
use_crf=ops.use_crf, use_projection=True,
block_loss=True,
input_dropout=0.33, hidden_dropout=0.2, inner_dropout=0.2)

print(model)

callbacks = [GradientClipCallback(clip_value=ops.gradient_clip, clip_type='norm'),]

optimizer = Adam(model.parameters(), lr=ops.lr, weight_decay=0)
# scheduler = LRScheduler(LambdaLR(optimizer, lr_lambda=lambda epoch: 1 / (1 + 0.05 * epoch)))
# callbacks.append(LRScheduler(CosineAnnealingLR(optimizer, 15)))
# optimizer = SWATS(model.parameters(), verbose=True)
# optimizer = Adam(model.parameters(), lr=0.005)

device = 'cuda:0' if torch.cuda.is_available() else 'cpu'

trainer = Trainer(train_data=data.datasets['train'], model=model, optimizer=optimizer,
sampler=BucketSampler(num_buckets=50, batch_size=ops.batch_size),
device=device, dev_data=data.datasets['dev'], batch_size=ops.batch_size,
metrics=SpanFPreRecMetric(
tag_vocab=data.vocabs[Const.TARGET], encoding_type=encoding_type),
check_code_level=-1,
callbacks=callbacks, num_workers=2, n_epochs=ops.num_epochs)
trainer.train()

+ 26
- 0
reproduction/text_classification/README.md View File

@@ -0,0 +1,26 @@
# text_classification任务模型复现
这里使用fastNLP复现以下模型:

char_cnn :论文链接[Character-level Convolutional Networks for Text Classification](https://arxiv.org/pdf/1509.01626v3.pdf)

dpcnn:论文链接[Deep Pyramid Convolutional Neural Networks for TextCategorization](https://ai.tencent.com/ailab/media/publications/ACL3-Brady.pdf)

HAN:论文链接[Hierarchical Attention Networks for Document Classification](https://www.cs.cmu.edu/~diyiy/docs/naacl16.pdf)

LSTM+self_attention:论文链接[A Structured Self-attentive Sentence Embedding](<https://arxiv.org/pdf/1703.03130.pdf>)

AWD-LSTM:论文链接[Regularizing and Optimizing LSTM Language Models](<https://arxiv.org/pdf/1708.02182.pdf>)

# 数据集及复现结果汇总

使用fastNLP复现的结果vs论文汇报结果(/前为fastNLP实现,后面为论文报道,-表示论文没有在该数据集上列出结果)

model name | yelp_p | yelp_f | sst-2|IMDB
:---: | :---: | :---: | :---: |-----
char_cnn | 93.80/95.12 | - | - |-
dpcnn | 95.50/97.36 | - | - |-
HAN |- | - | - |-
LSTM| 95.74/- |64.16/- |- |88.52/-
AWD-LSTM| 95.96/- |64.74/- |- |88.91/-
LSTM+self_attention| 96.34/- | 65.78/- | - |89.53/-


+ 114
- 0
reproduction/text_classification/data/IMDBLoader.py View File

@@ -0,0 +1,114 @@
from fastNLP.io.embed_loader import EmbeddingOption, EmbedLoader
from fastNLP.core.vocabulary import VocabularyOption
from fastNLP.io.base_loader import DataSetLoader, DataInfo
from typing import Union, Dict, List, Iterator
from fastNLP import DataSet
from fastNLP import Instance
from fastNLP import Vocabulary
from fastNLP import Const
# from reproduction.utils import check_dataloader_paths
from functools import partial
from reproduction.utils import check_dataloader_paths, get_tokenizer


class IMDBLoader(DataSetLoader):
"""
读取IMDB数据集,DataSet包含以下fields:

words: list(str), 需要分类的文本
target: str, 文本的标签


"""

def __init__(self):
super(IMDBLoader, self).__init__()
self.tokenizer = get_tokenizer()

def _load(self, path):
dataset = DataSet()
with open(path, 'r', encoding="utf-8") as f:
for line in f:
line = line.strip()
if not line:
continue
parts = line.split('\t')
target = parts[0]
words = self.tokenizer(parts[1].lower())
dataset.append(Instance(words=words, target=target))

if len(dataset)==0:
raise RuntimeError(f"{path} has no valid data.")

return dataset
def process(self,
paths: Union[str, Dict[str, str]],
src_vocab_opt: VocabularyOption = None,
tgt_vocab_opt: VocabularyOption = None,
src_embed_opt: EmbeddingOption = None,
char_level_op=False):
datasets = {}
info = DataInfo()
for name, path in paths.items():
dataset = self.load(path)
datasets[name] = dataset

def wordtochar(words):
chars = []
for word in words:
word = word.lower()
for char in word:
chars.append(char)
chars.append('')
chars.pop()
return chars

if char_level_op:
for dataset in datasets.values():
dataset.apply_field(wordtochar, field_name="words", new_field_name='chars')

datasets["train"], datasets["dev"] = datasets["train"].split(0.1, shuffle=False)

src_vocab = Vocabulary() if src_vocab_opt is None else Vocabulary(**src_vocab_opt)
src_vocab.from_dataset(datasets['train'], field_name='words')

src_vocab.index_dataset(*datasets.values(), field_name='words')

tgt_vocab = Vocabulary(unknown=None, padding=None) \
if tgt_vocab_opt is None else Vocabulary(**tgt_vocab_opt)
tgt_vocab.from_dataset(datasets['train'], field_name='target')
tgt_vocab.index_dataset(*datasets.values(), field_name='target')

info.vocabs = {
"words": src_vocab,
"target": tgt_vocab
}

info.datasets = datasets

if src_embed_opt is not None:
embed = EmbedLoader.load_with_vocab(**src_embed_opt, vocab=src_vocab)
info.embeddings['words'] = embed

for name, dataset in info.datasets.items():
dataset.set_input("words")
dataset.set_target("target")

return info



if __name__=="__main__":
datapath = {"train": "/remote-home/ygwang/IMDB_data/train.csv",
"test": "/remote-home/ygwang/IMDB_data/test.csv"}
datainfo=IMDBLoader().process(datapath,char_level_op=True)
#print(datainfo.datasets["train"])
len_count = 0
for instance in datainfo.datasets["train"]:
len_count += len(instance["chars"])

ave_len = len_count / len(datainfo.datasets["train"])
print(ave_len)


+ 5
- 1
reproduction/text_classification/data/MTL16Loader.py View File

@@ -32,7 +32,7 @@ class MTL16Loader(DataSetLoader):
continue
parts = line.split('\t')
target = parts[0]
words = parts[1].split()
words = parts[1].lower().split()
dataset.append(Instance(words=words, target=target))
if len(dataset)==0:
raise RuntimeError(f"{path} has no valid data.")
@@ -72,4 +72,8 @@ class MTL16Loader(DataSetLoader):
embed = EmbedLoader.load_with_vocab(**src_embed_opt, vocab=src_vocab)
info.embeddings['words'] = embed

for name, dataset in info.datasets.items():
dataset.set_input("words")
dataset.set_target("target")

return info

+ 198
- 0
reproduction/text_classification/data/sstloader.py View File

@@ -0,0 +1,198 @@
from typing import Iterable
from nltk import Tree
from fastNLP.io.base_loader import DataInfo, DataSetLoader
from fastNLP.core.vocabulary import VocabularyOption, Vocabulary
from fastNLP import DataSet
from fastNLP import Instance
from fastNLP.io.embed_loader import EmbeddingOption, EmbedLoader
import csv
from typing import Union, Dict
from reproduction.utils import check_dataloader_paths, get_tokenizer


class SSTLoader(DataSetLoader):
URL = 'https://nlp.stanford.edu/sentiment/trainDevTestTrees_PTB.zip'
DATA_DIR = 'sst/'

"""
别名::class:`fastNLP.io.SSTLoader` :class:`fastNLP.io.dataset_loader.SSTLoader`
读取SST数据集, DataSet包含fields::
words: list(str) 需要分类的文本
target: str 文本的标签
数据来源: https://nlp.stanford.edu/sentiment/trainDevTestTrees_PTB.zip
:param subtree: 是否将数据展开为子树,扩充数据量. Default: ``False``
:param fine_grained: 是否使用SST-5标准,若 ``False`` , 使用SST-2。Default: ``False``
"""
def __init__(self, subtree=False, fine_grained=False):
self.subtree = subtree
tag_v = {'0': 'very negative', '1': 'negative', '2': 'neutral',
'3': 'positive', '4': 'very positive'}
if not fine_grained:
tag_v['0'] = tag_v['1']
tag_v['4'] = tag_v['3']
self.tag_v = tag_v

def _load(self, path):
"""
:param str path: 存储数据的路径
:return: 一个 :class:`~fastNLP.DataSet` 类型的对象
"""
datalist = []
with open(path, 'r', encoding='utf-8') as f:
datas = []
for l in f:
datas.extend([(s, self.tag_v[t])
for s, t in self._get_one(l, self.subtree)])
ds = DataSet()
for words, tag in datas:
ds.append(Instance(words=words, target=tag))
return ds


@staticmethod
def _get_one(data, subtree):
tree = Tree.fromstring(data)
if subtree:
return [(t.leaves(), t.label()) for t in tree.subtrees()]
return [(tree.leaves(), tree.label())]


def process(self,
paths,
train_ds: Iterable[str] = None,
src_vocab_op: VocabularyOption = None,
tgt_vocab_op: VocabularyOption = None,
src_embed_op: EmbeddingOption = None):
input_name, target_name = 'words', 'target'
src_vocab = Vocabulary() if src_vocab_op is None else Vocabulary(**src_vocab_op)
tgt_vocab = Vocabulary(unknown=None, padding=None) \
if tgt_vocab_op is None else Vocabulary(**tgt_vocab_op)

info = DataInfo(datasets=self.load(paths))
_train_ds = [info.datasets[name]
for name in train_ds] if train_ds else info.datasets.values()
src_vocab.from_dataset(*_train_ds, field_name=input_name)
tgt_vocab.from_dataset(*_train_ds, field_name=target_name)
src_vocab.index_dataset(
*info.datasets.values(),
field_name=input_name, new_field_name=input_name)
tgt_vocab.index_dataset(
*info.datasets.values(),
field_name=target_name, new_field_name=target_name)
info.vocabs = {
input_name: src_vocab,
target_name: tgt_vocab
}


if src_embed_op is not None:
src_embed_op.vocab = src_vocab
init_emb = EmbedLoader.load_with_vocab(**src_embed_op)
info.embeddings[input_name] = init_emb


for name, dataset in info.datasets.items():
dataset.set_input(input_name)
dataset.set_target(target_name)
return info



class sst2Loader(DataSetLoader):
'''
数据来源"SST":'https://firebasestorage.googleapis.com/v0/b/mtl-sentence-representations.appspot.com/o/data%2FSST-2.zip?alt=media&token=aabc5f6b-e466-44a2-b9b4-cf6337f84ac8',
'''

def __init__(self):
super(sst2Loader, self).__init__()
self.tokenizer = get_tokenizer()


def _load(self, path: str) -> DataSet:
ds = DataSet()
all_count=0
csv_reader = csv.reader(open(path, encoding='utf-8'),delimiter='\t')
skip_row = 0
for idx,row in enumerate(csv_reader):
if idx<=skip_row:
continue
target = row[1]
words=self.tokenizer(row[0])
ds.append(Instance(words=words,target=target))
all_count+=1
print("all count:", all_count)
return ds



def process(self,
paths: Union[str, Dict[str, str]],
src_vocab_opt: VocabularyOption = None,
tgt_vocab_opt: VocabularyOption = None,
src_embed_opt: EmbeddingOption = None,
char_level_op=False):

paths = check_dataloader_paths(paths)
datasets = {}
info = DataInfo()
for name, path in paths.items():
dataset = self.load(path)
datasets[name] = dataset

def wordtochar(words):
chars = []
for word in words:
word = word.lower()
for char in word:
chars.append(char)
chars.append('')
chars.pop()
return chars

input_name, target_name = 'words', 'target'
info.vocabs={}

# 就分隔为char形式
if char_level_op:
for dataset in datasets.values():
dataset.apply_field(wordtochar, field_name="words", new_field_name='chars')
src_vocab = Vocabulary() if src_vocab_opt is None else Vocabulary(**src_vocab_opt)
src_vocab.from_dataset(datasets['train'], field_name='words')
src_vocab.index_dataset(*datasets.values(), field_name='words')

tgt_vocab = Vocabulary(unknown=None, padding=None) \
if tgt_vocab_opt is None else Vocabulary(**tgt_vocab_opt)
tgt_vocab.from_dataset(datasets['train'], field_name='target')
tgt_vocab.index_dataset(*datasets.values(), field_name='target')


info.vocabs = {
"words": src_vocab,
"target": tgt_vocab
}

info.datasets = datasets

if src_embed_opt is not None:
embed = EmbedLoader.load_with_vocab(**src_embed_opt, vocab=src_vocab)
info.embeddings['words'] = embed

for name, dataset in info.datasets.items():
dataset.set_input("words")
dataset.set_target("target")

return info



if __name__=="__main__":
datapath = {"train": "/remote-home/ygwang/workspace/GLUE/SST-2/train.tsv",
"dev": "/remote-home/ygwang/workspace/GLUE/SST-2/dev.tsv"}
datainfo=sst2Loader().process(datapath,char_level_op=True)
#print(datainfo.datasets["train"])

len_count = 0
for instance in datainfo.datasets["train"]:
len_count += len(instance["chars"])
ave_len = len_count / len(datainfo.datasets["train"])
print(ave_len)

+ 160
- 31
reproduction/text_classification/data/yelpLoader.py View File

@@ -1,18 +1,64 @@
import ast
import csv
from typing import Iterable
from fastNLP import DataSet, Instance, Vocabulary
from fastNLP.core.vocabulary import VocabularyOption
from fastNLP.io import JsonLoader
from fastNLP.io.base_loader import DataInfo
from fastNLP.io.base_loader import DataInfo,DataSetLoader
from fastNLP.io.embed_loader import EmbeddingOption
from fastNLP.io.file_reader import _read_json
from typing import Union, Dict
from reproduction.Star_transformer.datasets import EmbedLoader
from reproduction.utils import check_dataloader_paths
from reproduction.utils import check_dataloader_paths, get_tokenizer

def clean_str(sentence, tokenizer, char_lower=False):
"""
heavily borrowed from github
https://github.com/LukeZhuang/Hierarchical-Attention-Network/blob/master/yelp-preprocess.ipynb
:param sentence: is a str
:return:
"""
if char_lower:
sentence = sentence.lower()
import re
nonalpnum = re.compile('[^0-9a-zA-Z?!\']+')
words = tokenizer(sentence)
words_collection = []
for word in words:
if word in ['-lrb-', '-rrb-', '<sssss>', '-r', '-l', 'b-']:
continue
tt = nonalpnum.split(word)
t = ''.join(tt)
if t != '':
words_collection.append(t)

return words_collection


class yelpLoader(JsonLoader):
class yelpLoader(DataSetLoader):
"""
读取Yelp_full/Yelp_polarity数据集, DataSet包含fields:
words: list(str), 需要分类的文本
target: str, 文本的标签
chars:list(str),未index的字符列表

数据集:yelp_full/yelp_polarity
:param fine_grained: 是否使用SST-5标准,若 ``False`` , 使用SST-2。Default: ``False``
"""
def __init__(self, fine_grained=False,lower=False):
super(yelpLoader, self).__init__()
tag_v = {'1.0': 'very negative', '2.0': 'negative', '3.0': 'neutral',
'4.0': 'positive', '5.0': 'very positive'}
if not fine_grained:
tag_v['1.0'] = tag_v['2.0']
tag_v['5.0'] = tag_v['4.0']
self.fine_grained = fine_grained
self.tag_v = tag_v
self.lower = lower
self.tokenizer = get_tokenizer()

'''
读取Yelp数据集, DataSet包含fields:
review_id: str, 22 character unique review id
@@ -27,20 +73,8 @@ class yelpLoader(JsonLoader):
数据来源: https://www.yelp.com/dataset/download
:param fine_grained: 是否使用SST-5标准,若 ``False`` , 使用SST-2。Default: ``False``
"""
def __init__(self, fine_grained=False):
super(yelpLoader, self).__init__()
tag_v = {'1.0': 'very negative', '2.0': 'negative', '3.0': 'neutral',
'4.0': 'positive', '5.0': 'very positive'}
if not fine_grained:
tag_v['1.0'] = tag_v['2.0']
tag_v['5.0'] = tag_v['4.0']
self.fine_grained = fine_grained
self.tag_v = tag_v
def _load(self, path):

def _load_json(self, path):
ds = DataSet()
for idx, d in _read_json(path, fields=self.fields_list, dropna=self.dropna):
d = ast.literal_eval(d)
@@ -48,21 +82,116 @@ class yelpLoader(JsonLoader):
d["target"] = self.tag_v[str(d.pop("stars"))]
ds.append(Instance(**d))
return ds
def _load_yelp2015_broken(self,path):
ds = DataSet()
with open (path,encoding='ISO 8859-1') as f:
row=f.readline()
all_count=0
exp_count=0
while row:
row=row.split("\t\t")
all_count+=1
if len(row)>=3:
words=row[-1].split()
try:
target=self.tag_v[str(row[-2])+".0"]
ds.append(Instance(words=words, target=target))
except KeyError:
exp_count+=1
else:
exp_count+=1
row = f.readline()
print("error sample count:",exp_count)
print("all count:",all_count)
return ds
'''

def _load(self, path):
ds = DataSet()
csv_reader=csv.reader(open(path,encoding='utf-8'))
all_count=0
real_count=0
for row in csv_reader:
all_count+=1
if len(row)==2:
target=self.tag_v[row[0]+".0"]
words = clean_str(row[1], self.tokenizer, self.lower)
if len(words)!=0:
ds.append(Instance(words=words,target=target))
real_count += 1
print("all count:", all_count)
print("real count:", real_count)
return ds



def process(self, paths: Union[str, Dict[str, str]], vocab_opt: VocabularyOption = None,
embed_opt: EmbeddingOption = None):
def process(self, paths: Union[str, Dict[str, str]],
train_ds: Iterable[str] = None,
src_vocab_op: VocabularyOption = None,
tgt_vocab_op: VocabularyOption = None,
embed_opt: EmbeddingOption = None,
char_level_op=False):
paths = check_dataloader_paths(paths)
datasets = {}
info = DataInfo()
vocab = Vocabulary(min_freq=2) if vocab_opt is None else Vocabulary(**vocab_opt)
for name, path in paths.items():
dataset = self.load(path)
datasets[name] = dataset
vocab.from_dataset(dataset, field_name="words")
info.vocabs = vocab
info.datasets = datasets
if embed_opt is not None:
embed = EmbedLoader.load_with_vocab(**embed_opt, vocab=vocab)
info.embeddings['words'] = embed
info = DataInfo(datasets=self.load(paths))
src_vocab = Vocabulary() if src_vocab_op is None else Vocabulary(**src_vocab_op)
tgt_vocab = Vocabulary(unknown=None, padding=None) \
if tgt_vocab_op is None else Vocabulary(**tgt_vocab_op)
_train_ds = [info.datasets[name]
for name in train_ds] if train_ds else info.datasets.values()

def wordtochar(words):
chars = []
for word in words:
word = word.lower()
for char in word:
chars.append(char)
chars.append('')
chars.pop()
return chars

input_name, target_name = 'words', 'target'
info.vocabs={}
#就分隔为char形式
if char_level_op:
for dataset in info.datasets.values():
dataset.apply_field(wordtochar, field_name="words",new_field_name='chars')
# if embed_opt is not None:
# embed = EmbedLoader.load_with_vocab(**embed_opt, vocab=vocab)
# info.embeddings['words'] = embed
else:
src_vocab.from_dataset(*_train_ds, field_name=input_name)
src_vocab.index_dataset(*info.datasets.values(),field_name=input_name, new_field_name=input_name)
info.vocabs[input_name]=src_vocab

tgt_vocab.from_dataset(*_train_ds, field_name=target_name)
tgt_vocab.index_dataset(
*info.datasets.values(),
field_name=target_name, new_field_name=target_name)

info.vocabs[target_name]=tgt_vocab

info.datasets['train'],info.datasets['dev']=info.datasets['train'].split(0.1, shuffle=False)

for name, dataset in info.datasets.items():
dataset.set_input("words")
dataset.set_target("target")

return info

if __name__=="__main__":
testloader=yelpLoader()
# datapath = {"train": "/remote-home/ygwang/yelp_full/train.csv",
# "test": "/remote-home/ygwang/yelp_full/test.csv"}
#datapath={"train": "/remote-home/ygwang/yelp_full/test.csv"}
datapath = {"train": "/remote-home/ygwang/yelp_polarity/train.csv",
"test": "/remote-home/ygwang/yelp_polarity/test.csv"}
datainfo=testloader.process(datapath,char_level_op=True)

len_count=0
for instance in datainfo.datasets["train"]:
len_count+=len(instance["chars"])

ave_len=len_count/len(datainfo.datasets["train"])
print(ave_len)

reproduction/HAN-document_classification/model.py → reproduction/text_classification/model/HAN.py View File

@@ -1,6 +1,8 @@
import torch
import torch.nn as nn
from torch.autograd import Variable
from fastNLP.modules.utils import get_embeddings
from fastNLP.core import Const as C


def pack_sequence(tensor_seq, padding_value=0.0):
@@ -19,6 +21,33 @@ def pack_sequence(tensor_seq, padding_value=0.0):
return ans


class HANCLS(nn.Module):
def __init__(self, init_embed, num_cls):
super(HANCLS, self).__init__()

self.embed = get_embeddings(init_embed)
self.han = HAN(input_size=300,
output_size=num_cls,
word_hidden_size=50, word_num_layers=1, word_context_size=100,
sent_hidden_size=50, sent_num_layers=1, sent_context_size=100
)

def forward(self, input_sents):
# input_sents [B, num_sents, seq-len] dtype long
# target
B, num_sents, seq_len = input_sents.size()
input_sents = input_sents.view(-1, seq_len) # flat
words_embed = self.embed(input_sents) # should be [B*num-sent, seqlen , word-dim]
words_embed = words_embed.view(B, num_sents, seq_len, -1) # recover # [B, num-sent, seqlen , word-dim]
out = self.han(words_embed)

return {C.OUTPUT: out}

def predict(self, input_sents):
x = self.forward(input_sents)[C.OUTPUT]
return {C.OUTPUT: torch.argmax(x, 1)}


class HAN(nn.Module):
def __init__(self, input_size, output_size,
word_hidden_size, word_num_layers, word_context_size,
@@ -78,36 +107,3 @@ class AttentionNet(nn.Module):
alpha = self.softmax(torch.matmul(u, self.context_vec)) # u's dim (batch_size, seq_len, context_vec_size)
output = torch.bmm(torch.transpose(h_t, 1, 2), alpha) # alpha's dim (batch_size, seq_len, 1)
return torch.squeeze(output, dim=2) # output's dim (batch_size, 2*hidden_size, 1)


if __name__ == '__main__':
'''
Test the models correctness
'''
import numpy as np

use_cuda = True
net = HAN(input_size=200, output_size=5,
word_hidden_size=50, word_num_layers=1, word_context_size=100,
sent_hidden_size=50, sent_num_layers=1, sent_context_size=100)
optimizer = torch.optim.SGD(net.parameters(), lr=0.01, momentum=0.9)
criterion = nn.NLLLoss()
test_time = 10
batch_size = 64
if use_cuda:
net.cuda()
print('test training')
for step in range(test_time):
x_data = [torch.randn(np.random.randint(1, 10), 200, 200) for i in range(batch_size)]
y_data = torch.LongTensor([np.random.randint(0, 5) for i in range(batch_size)])
if use_cuda:
x_data = [x_i.cuda() for x_i in x_data]
y_data = y_data.cuda()
x = [Variable(x_i) for x_i in x_data]
y = Variable(y_data)
predict = net(x)
loss = criterion(predict, y)
optimizer.zero_grad()
loss.backward()
optimizer.step()
print(loss.data[0])

Some files were not shown because too many files changed in this diff

Loading…
Cancel
Save