* add DataSet.get_field(), to fetch a FieldArray based on its name * remove old tutorials & add new tutorialstags/v0.2.0^2
| @@ -268,7 +268,7 @@ class SetTensorProcessor(Processor): | |||
| self.default = default | |||
| def process(self, dataset): | |||
| set_dict = {name: self.default for name in dataset.get_fields().keys()} | |||
| set_dict = {name: self.default for name in dataset.get_all_fields().keys()} | |||
| set_dict.update(self.field_dict) | |||
| dataset._set_need_tensor(**set_dict) | |||
| return dataset | |||
| @@ -282,7 +282,7 @@ class SetIsTargetProcessor(Processor): | |||
| self.default = default | |||
| def process(self, dataset): | |||
| set_dict = {name: self.default for name in dataset.get_fields().keys()} | |||
| set_dict = {name: self.default for name in dataset.get_all_fields().keys()} | |||
| set_dict.update(self.field_dict) | |||
| dataset.set_target(**set_dict) | |||
| return dataset | |||
| @@ -43,7 +43,7 @@ class Batch(object): | |||
| indices = self.idx_list[self.curidx:endidx] | |||
| for field_name, field in self.dataset.get_fields().items(): | |||
| for field_name, field in self.dataset.get_all_fields().items(): | |||
| if field.is_target or field.is_input: | |||
| batch = field.get(indices) | |||
| if not self.as_numpy: | |||
| @@ -157,7 +157,12 @@ class DataSet(object): | |||
| """ | |||
| self.field_arrays.pop(name) | |||
| def get_fields(self): | |||
| def get_field(self, field_name): | |||
| if field_name not in self.field_arrays: | |||
| raise KeyError("Field name {} not found in DataSet".format(field_name)) | |||
| return self.field_arrays[field_name] | |||
| def get_all_fields(self): | |||
| """Return all the fields with their names. | |||
| :return dict field_arrays: the internal data structure of DataSet. | |||
| @@ -55,7 +55,7 @@ class BucketSampler(BaseSampler): | |||
| def __call__(self, data_set): | |||
| seq_lens = data_set.get_fields()[self.seq_lens_field_name].content | |||
| seq_lens = data_set.get_all_fields()[self.seq_lens_field_name].content | |||
| total_sample_num = len(seq_lens) | |||
| bucket_indexes = [] | |||
| @@ -44,7 +44,7 @@ class CNNText(torch.nn.Module): | |||
| x = self.conv_pool(x) # [N,L,C] -> [N,C] | |||
| x = self.dropout(x) | |||
| x = self.fc(x) # [N,C] -> [N, N_class] | |||
| return {'output': x} | |||
| return {'pred': x} | |||
| def predict(self, word_seq): | |||
| """ | |||
| @@ -53,5 +53,5 @@ class CNNText(torch.nn.Module): | |||
| :return predict: dict of torch.LongTensor, [batch_size, seq_len] | |||
| """ | |||
| output = self(word_seq) | |||
| _, predict = output['output'].max(dim=1) | |||
| return {'predict': predict} | |||
| _, predict = output['pred'].max(dim=1) | |||
| return {'pred': predict} | |||
| @@ -2,6 +2,7 @@ import os | |||
| import unittest | |||
| from fastNLP.core.dataset import DataSet | |||
| from fastNLP.core.fieldarray import FieldArray | |||
| from fastNLP.core.instance import Instance | |||
| @@ -162,6 +163,21 @@ class TestDataSet(unittest.TestCase): | |||
| ds_1 = DataSet.load("./my_ds.pkl") | |||
| os.remove("my_ds.pkl") | |||
| def test_get_all_fields(self): | |||
| ds = DataSet({"x": [[1, 2, 3, 4]] * 10, "y": [[5, 6]] * 10}) | |||
| ans = ds.get_all_fields() | |||
| self.assertEqual(ans["x"].content, [[1, 2, 3, 4]] * 10) | |||
| self.assertEqual(ans["y"].content, [[5, 6]] * 10) | |||
| def test_get_field(self): | |||
| ds = DataSet({"x": [[1, 2, 3, 4]] * 10, "y": [[5, 6]] * 10}) | |||
| ans = ds.get_field("x") | |||
| self.assertTrue(isinstance(ans, FieldArray)) | |||
| self.assertEqual(ans.content, [[1, 2, 3, 4]] * 10) | |||
| ans = ds.get_field("y") | |||
| self.assertTrue(isinstance(ans, FieldArray)) | |||
| self.assertEqual(ans.content, [[5, 6]] * 10) | |||
| class TestDataSetIter(unittest.TestCase): | |||
| def test__repr__(self): | |||
| @@ -35,4 +35,43 @@ There 's very little sense to what 's going on here , but the makers serve up th | |||
| Cattaneo should have followed the runaway success of his first film , The Full Monty , with something different . 2 | |||
| They 're the unnamed , easily substitutable forces that serve as whatever terror the heroes of horror movies try to avoid . 1 | |||
| It almost feels as if the movie is more interested in entertaining itself than in amusing us . 1 | |||
| The movie 's progression into rambling incoherence gives new meaning to the phrase ` fatal script error . ' 0 | |||
| The movie 's progression into rambling incoherence gives new meaning to the phrase ` fatal script error . ' 0 | |||
| I still like Moonlight Mile , better judgment be damned . 3 | |||
| A welcome relief from baseball movies that try too hard to be mythic , this one is a sweet and modest and ultimately winning story . 3 | |||
| a bilingual charmer , just like the woman who inspired it 3 | |||
| Like a less dizzily gorgeous companion to Mr. Wong 's In the Mood for Love -- very much a Hong Kong movie despite its mainland setting . 2 | |||
| As inept as big-screen remakes of The Avengers and The Wild Wild West . 1 | |||
| It 's everything you 'd expect -- but nothing more . 2 | |||
| Best indie of the year , so far . 4 | |||
| Hatfield and Hicks make the oddest of couples , and in this sense the movie becomes a study of the gambles of the publishing world , offering a case study that exists apart from all the movie 's political ramifications . 3 | |||
| It 's like going to a house party and watching the host defend himself against a frothing ex-girlfriend . 1 | |||
| That the Chuck Norris `` grenade gag '' occurs about 7 times during Windtalkers is a good indication of how serious-minded the film is . 2 | |||
| The plot is romantic comedy boilerplate from start to finish . 2 | |||
| It arrives with an impeccable pedigree , mongrel pep , and almost indecipherable plot complications . 2 | |||
| A film that clearly means to preach exclusively to the converted . 2 | |||
| I still like Moonlight Mile , better judgment be damned . 3 | |||
| A welcome relief from baseball movies that try too hard to be mythic , this one is a sweet and modest and ultimately winning story . 3 | |||
| a bilingual charmer , just like the woman who inspired it 3 | |||
| Like a less dizzily gorgeous companion to Mr. Wong 's In the Mood for Love -- very much a Hong Kong movie despite its mainland setting . 2 | |||
| As inept as big-screen remakes of The Avengers and The Wild Wild West . 1 | |||
| It 's everything you 'd expect -- but nothing more . 2 | |||
| Best indie of the year , so far . 4 | |||
| Hatfield and Hicks make the oddest of couples , and in this sense the movie becomes a study of the gambles of the publishing world , offering a case study that exists apart from all the movie 's political ramifications . 3 | |||
| It 's like going to a house party and watching the host defend himself against a frothing ex-girlfriend . 1 | |||
| That the Chuck Norris `` grenade gag '' occurs about 7 times during Windtalkers is a good indication of how serious-minded the film is . 2 | |||
| The plot is romantic comedy boilerplate from start to finish . 2 | |||
| It arrives with an impeccable pedigree , mongrel pep , and almost indecipherable plot complications . 2 | |||
| A film that clearly means to preach exclusively to the converted . 2 | |||
| I still like Moonlight Mile , better judgment be damned . 3 | |||
| A welcome relief from baseball movies that try too hard to be mythic , this one is a sweet and modest and ultimately winning story . 3 | |||
| a bilingual charmer , just like the woman who inspired it 3 | |||
| Like a less dizzily gorgeous companion to Mr. Wong 's In the Mood for Love -- very much a Hong Kong movie despite its mainland setting . 2 | |||
| As inept as big-screen remakes of The Avengers and The Wild Wild West . 1 | |||
| It 's everything you 'd expect -- but nothing more . 2 | |||
| Best indie of the year , so far . 4 | |||
| Hatfield and Hicks make the oddest of couples , and in this sense the movie becomes a study of the gambles of the publishing world , offering a case study that exists apart from all the movie 's political ramifications . 3 | |||
| It 's like going to a house party and watching the host defend himself against a frothing ex-girlfriend . 1 | |||
| That the Chuck Norris `` grenade gag '' occurs about 7 times during Windtalkers is a good indication of how serious-minded the film is . 2 | |||
| The plot is romantic comedy boilerplate from start to finish . 2 | |||
| It arrives with an impeccable pedigree , mongrel pep , and almost indecipherable plot complications . 2 | |||
| A film that clearly means to preach exclusively to the converted . 2 | |||
| @@ -0,0 +1,911 @@ | |||
| { | |||
| "cells": [ | |||
| { | |||
| "cell_type": "markdown", | |||
| "metadata": {}, | |||
| "source": [ | |||
| "fastNLP上手教程\n", | |||
| "-------\n", | |||
| "\n", | |||
| "fastNLP提供方便的数据预处理,训练和测试模型的功能" | |||
| ] | |||
| }, | |||
| { | |||
| "cell_type": "markdown", | |||
| "metadata": {}, | |||
| "source": [ | |||
| "DataSet & Instance\n", | |||
| "------\n", | |||
| "\n", | |||
| "fastNLP用DataSet和Instance保存和处理数据。每个DataSet表示一个数据集,每个Instance表示一个数据样本。一个DataSet存有多个Instance,每个Instance可以自定义存哪些内容。\n", | |||
| "\n", | |||
| "有一些read_*方法,可以轻松从文件读取数据,存成DataSet。" | |||
| ] | |||
| }, | |||
| { | |||
| "cell_type": "code", | |||
| "execution_count": 9, | |||
| "metadata": {}, | |||
| "outputs": [ | |||
| { | |||
| "name": "stdout", | |||
| "output_type": "stream", | |||
| "text": [ | |||
| "8529" | |||
| ] | |||
| }, | |||
| { | |||
| "name": "stdout", | |||
| "output_type": "stream", | |||
| "text": [ | |||
| "\n" | |||
| ] | |||
| } | |||
| ], | |||
| "source": [ | |||
| "from fastNLP import DataSet\n", | |||
| "from fastNLP import Instance\n", | |||
| "\n", | |||
| "# 从csv读取数据到DataSet\n", | |||
| "dataset = DataSet.read_csv('../sentence.csv', headers=('raw_sentence', 'label'), sep='\\t')\n", | |||
| "print(len(dataset))" | |||
| ] | |||
| }, | |||
| { | |||
| "cell_type": "code", | |||
| "execution_count": 10, | |||
| "metadata": {}, | |||
| "outputs": [ | |||
| { | |||
| "name": "stdout", | |||
| "output_type": "stream", | |||
| "text": [ | |||
| "{'raw_sentence': A series of escapades demonstrating the adage that what is good for the goose is also good for the gander , some of which occasionally amuses but none of which amounts to much of a story .,\n'label': 1}" | |||
| ] | |||
| }, | |||
| { | |||
| "name": "stdout", | |||
| "output_type": "stream", | |||
| "text": [ | |||
| "\n" | |||
| ] | |||
| } | |||
| ], | |||
| "source": [ | |||
| "# 使用数字索引[k],获取第k个样本\n", | |||
| "print(dataset[0])\n", | |||
| "\n", | |||
| "# 索引也可以是负数\n", | |||
| "print(dataset[-3])" | |||
| ] | |||
| }, | |||
| { | |||
| "cell_type": "markdown", | |||
| "metadata": {}, | |||
| "source": [ | |||
| "## Instance\n", | |||
| "Instance表示一个样本,由一个或多个field(域,属性,特征)组成,每个field有名字和值。\n", | |||
| "\n", | |||
| "在初始化Instance时即可定义它包含的域,使用 \"field_name=field_value\"的写法。" | |||
| ] | |||
| }, | |||
| { | |||
| "cell_type": "code", | |||
| "execution_count": 11, | |||
| "metadata": {}, | |||
| "outputs": [ | |||
| { | |||
| "data": { | |||
| "text/plain": [ | |||
| "{'raw_sentence': fake data,\n'label': 0}" | |||
| ] | |||
| }, | |||
| "execution_count": 11, | |||
| "metadata": {}, | |||
| "output_type": "execute_result" | |||
| } | |||
| ], | |||
| "source": [ | |||
| "# DataSet.append(Instance)加入新数据\n", | |||
| "dataset.append(Instance(raw_sentence='fake data', label='0'))\n", | |||
| "dataset[-1]" | |||
| ] | |||
| }, | |||
| { | |||
| "cell_type": "markdown", | |||
| "metadata": {}, | |||
| "source": [ | |||
| "## DataSet.apply方法\n", | |||
| "数据预处理利器" | |||
| ] | |||
| }, | |||
| { | |||
| "cell_type": "code", | |||
| "execution_count": 12, | |||
| "metadata": {}, | |||
| "outputs": [ | |||
| { | |||
| "name": "stdout", | |||
| "output_type": "stream", | |||
| "text": [ | |||
| "{'raw_sentence': a series of escapades demonstrating the adage that what is good for the goose is also good for the gander , some of which occasionally amuses but none of which amounts to much of a story .,\n'label': 1}" | |||
| ] | |||
| }, | |||
| { | |||
| "name": "stdout", | |||
| "output_type": "stream", | |||
| "text": [ | |||
| "\n" | |||
| ] | |||
| } | |||
| ], | |||
| "source": [ | |||
| "# 将所有数字转为小写\n", | |||
| "dataset.apply(lambda x: x['raw_sentence'].lower(), new_field_name='raw_sentence')\n", | |||
| "print(dataset[0])" | |||
| ] | |||
| }, | |||
| { | |||
| "cell_type": "code", | |||
| "execution_count": 13, | |||
| "metadata": {}, | |||
| "outputs": [ | |||
| { | |||
| "name": "stdout", | |||
| "output_type": "stream", | |||
| "text": [ | |||
| "{'raw_sentence': a series of escapades demonstrating the adage that what is good for the goose is also good for the gander , some of which occasionally amuses but none of which amounts to much of a story .,\n'label': 1}" | |||
| ] | |||
| }, | |||
| { | |||
| "name": "stdout", | |||
| "output_type": "stream", | |||
| "text": [ | |||
| "\n" | |||
| ] | |||
| } | |||
| ], | |||
| "source": [ | |||
| "# label转int\n", | |||
| "dataset.apply(lambda x: int(x['label']), new_field_name='label')\n", | |||
| "print(dataset[0])" | |||
| ] | |||
| }, | |||
| { | |||
| "cell_type": "code", | |||
| "execution_count": 14, | |||
| "metadata": {}, | |||
| "outputs": [ | |||
| { | |||
| "name": "stdout", | |||
| "output_type": "stream", | |||
| "text": [ | |||
| "{'raw_sentence': a series of escapades demonstrating the adage that what is good for the goose is also good for the gander , some of which occasionally amuses but none of which amounts to much of a story .,\n'label': 1,\n'words': ['a', 'series', 'of', 'escapades', 'demonstrating', 'the', 'adage', 'that', 'what', 'is', 'good', 'for', 'the', 'goose', 'is', 'also', 'good', 'for', 'the', 'gander', ',', 'some', 'of', 'which', 'occasionally', 'amuses', 'but', 'none', 'of', 'which', 'amounts', 'to', 'much', 'of', 'a', 'story', '.']}" | |||
| ] | |||
| }, | |||
| { | |||
| "name": "stdout", | |||
| "output_type": "stream", | |||
| "text": [ | |||
| "\n" | |||
| ] | |||
| } | |||
| ], | |||
| "source": [ | |||
| "# 使用空格分割句子\n", | |||
| "def split_sent(ins):\n", | |||
| " return ins['raw_sentence'].split()\n", | |||
| "dataset.apply(split_sent, new_field_name='words')\n", | |||
| "print(dataset[0])" | |||
| ] | |||
| }, | |||
| { | |||
| "cell_type": "code", | |||
| "execution_count": 15, | |||
| "metadata": {}, | |||
| "outputs": [ | |||
| { | |||
| "name": "stdout", | |||
| "output_type": "stream", | |||
| "text": [ | |||
| "{'raw_sentence': a series of escapades demonstrating the adage that what is good for the goose is also good for the gander , some of which occasionally amuses but none of which amounts to much of a story .,\n'label': 1,\n'words': ['a', 'series', 'of', 'escapades', 'demonstrating', 'the', 'adage', 'that', 'what', 'is', 'good', 'for', 'the', 'goose', 'is', 'also', 'good', 'for', 'the', 'gander', ',', 'some', 'of', 'which', 'occasionally', 'amuses', 'but', 'none', 'of', 'which', 'amounts', 'to', 'much', 'of', 'a', 'story', '.'],\n'seq_len': 37}" | |||
| ] | |||
| }, | |||
| { | |||
| "name": "stdout", | |||
| "output_type": "stream", | |||
| "text": [ | |||
| "\n" | |||
| ] | |||
| } | |||
| ], | |||
| "source": [ | |||
| "# 增加长度信息\n", | |||
| "dataset.apply(lambda x: len(x['words']), new_field_name='seq_len')\n", | |||
| "print(dataset[0])" | |||
| ] | |||
| }, | |||
| { | |||
| "cell_type": "markdown", | |||
| "metadata": {}, | |||
| "source": [ | |||
| "## DataSet.drop\n", | |||
| "筛选数据" | |||
| ] | |||
| }, | |||
| { | |||
| "cell_type": "code", | |||
| "execution_count": 16, | |||
| "metadata": {}, | |||
| "outputs": [ | |||
| { | |||
| "name": "stdout", | |||
| "output_type": "stream", | |||
| "text": [ | |||
| "8358" | |||
| ] | |||
| }, | |||
| { | |||
| "name": "stdout", | |||
| "output_type": "stream", | |||
| "text": [ | |||
| "\n" | |||
| ] | |||
| } | |||
| ], | |||
| "source": [ | |||
| "dataset.drop(lambda x: x['seq_len'] <= 3)\n", | |||
| "print(len(dataset))" | |||
| ] | |||
| }, | |||
| { | |||
| "cell_type": "markdown", | |||
| "metadata": {}, | |||
| "source": [ | |||
| "## 配置DataSet\n", | |||
| "1. 哪些域是特征,哪些域是标签\n", | |||
| "2. 切分训练集/验证集" | |||
| ] | |||
| }, | |||
| { | |||
| "cell_type": "code", | |||
| "execution_count": 17, | |||
| "metadata": {}, | |||
| "outputs": [], | |||
| "source": [ | |||
| "# 设置DataSet中,哪些field要转为tensor\n", | |||
| "\n", | |||
| "# set target,loss或evaluate中的golden,计算loss,模型评估时使用\n", | |||
| "dataset.set_target(\"label\")\n", | |||
| "# set input,模型forward时使用\n", | |||
| "dataset.set_input(\"words\")" | |||
| ] | |||
| }, | |||
| { | |||
| "cell_type": "code", | |||
| "execution_count": 18, | |||
| "metadata": {}, | |||
| "outputs": [ | |||
| { | |||
| "name": "stdout", | |||
| "output_type": "stream", | |||
| "text": [ | |||
| "5851" | |||
| ] | |||
| }, | |||
| { | |||
| "name": "stdout", | |||
| "output_type": "stream", | |||
| "text": [ | |||
| "\n" | |||
| ] | |||
| }, | |||
| { | |||
| "name": "stdout", | |||
| "output_type": "stream", | |||
| "text": [ | |||
| "2507" | |||
| ] | |||
| }, | |||
| { | |||
| "name": "stdout", | |||
| "output_type": "stream", | |||
| "text": [ | |||
| "\n" | |||
| ] | |||
| } | |||
| ], | |||
| "source": [ | |||
| "# 分出测试集、训练集\n", | |||
| "\n", | |||
| "test_data, train_data = dataset.split(0.3)\n", | |||
| "print(len(test_data))\n", | |||
| "print(len(train_data))" | |||
| ] | |||
| }, | |||
| { | |||
| "cell_type": "markdown", | |||
| "metadata": {}, | |||
| "source": [ | |||
| "Vocabulary\n", | |||
| "------\n", | |||
| "\n", | |||
| "fastNLP中的Vocabulary轻松构建词表,将词转成数字" | |||
| ] | |||
| }, | |||
| { | |||
| "cell_type": "code", | |||
| "execution_count": 19, | |||
| "metadata": {}, | |||
| "outputs": [ | |||
| { | |||
| "name": "stdout", | |||
| "output_type": "stream", | |||
| "text": [ | |||
| "{'raw_sentence': the project 's filmmakers forgot to include anything even halfway scary as they poorly rejigger fatal attraction into a high school setting .,\n'label': 0,\n'words': [4, 423, 9, 316, 1, 8, 1, 312, 72, 1478, 885, 14, 86, 725, 1, 1913, 1431, 53, 5, 455, 736, 1, 2],\n'seq_len': 23}" | |||
| ] | |||
| }, | |||
| { | |||
| "name": "stdout", | |||
| "output_type": "stream", | |||
| "text": [ | |||
| "\n" | |||
| ] | |||
| } | |||
| ], | |||
| "source": [ | |||
| "from fastNLP import Vocabulary\n", | |||
| "\n", | |||
| "# 构建词表, Vocabulary.add(word)\n", | |||
| "vocab = Vocabulary(min_freq=2)\n", | |||
| "train_data.apply(lambda x: [vocab.add(word) for word in x['words']])\n", | |||
| "vocab.build_vocab()\n", | |||
| "\n", | |||
| "# index句子, Vocabulary.to_index(word)\n", | |||
| "train_data.apply(lambda x: [vocab.to_index(word) for word in x['words']], new_field_name='words')\n", | |||
| "test_data.apply(lambda x: [vocab.to_index(word) for word in x['words']], new_field_name='words')\n", | |||
| "\n", | |||
| "\n", | |||
| "print(test_data[0])" | |||
| ] | |||
| }, | |||
| { | |||
| "cell_type": "markdown", | |||
| "metadata": {}, | |||
| "source": [ | |||
| "# Model\n", | |||
| "定义一个PyTorch模型" | |||
| ] | |||
| }, | |||
| { | |||
| "cell_type": "code", | |||
| "execution_count": 20, | |||
| "metadata": {}, | |||
| "outputs": [ | |||
| { | |||
| "data": { | |||
| "text/plain": [ | |||
| "CNNText(\n (embed): Embedding(\n (embed): Embedding(3459, 50, padding_idx=0)\n (dropout): Dropout(p=0.0)\n )\n (conv_pool): ConvMaxpool(\n (convs): ModuleList(\n (0): Conv1d(50, 3, kernel_size=(3,), stride=(1,), padding=(2,))\n (1): Conv1d(50, 4, kernel_size=(4,), stride=(1,), padding=(2,))\n (2): Conv1d(50, 5, kernel_size=(5,), stride=(1,), padding=(2,))\n )\n )\n (dropout): Dropout(p=0.1)\n (fc): Linear(\n (linear): Linear(in_features=12, out_features=5, bias=True)\n )\n)" | |||
| ] | |||
| }, | |||
| "execution_count": 20, | |||
| "metadata": {}, | |||
| "output_type": "execute_result" | |||
| } | |||
| ], | |||
| "source": [ | |||
| "from fastNLP.models import CNNText\n", | |||
| "model = CNNText(embed_num=len(vocab), embed_dim=50, num_classes=5, padding=2, dropout=0.1)\n", | |||
| "model" | |||
| ] | |||
| }, | |||
| { | |||
| "cell_type": "markdown", | |||
| "metadata": {}, | |||
| "source": [ | |||
| "这是上述模型的forward方法。如果你不知道什么是forward方法,请参考我们的PyTorch教程。\n", | |||
| "\n", | |||
| "注意两点:\n", | |||
| "1. forward参数名字叫**word_seq**,请记住。\n", | |||
| "2. forward的返回值是一个**dict**,其中有个key的名字叫**output**。\n", | |||
| "\n", | |||
| "```Python\n", | |||
| " def forward(self, word_seq):\n", | |||
| " \"\"\"\n", | |||
| "\n", | |||
| " :param word_seq: torch.LongTensor, [batch_size, seq_len]\n", | |||
| " :return output: dict of torch.LongTensor, [batch_size, num_classes]\n", | |||
| " \"\"\"\n", | |||
| " x = self.embed(word_seq) # [N,L] -> [N,L,C]\n", | |||
| " x = self.conv_pool(x) # [N,L,C] -> [N,C]\n", | |||
| " x = self.dropout(x)\n", | |||
| " x = self.fc(x) # [N,C] -> [N, N_class]\n", | |||
| " return {'output': x}\n", | |||
| "```" | |||
| ] | |||
| }, | |||
| { | |||
| "cell_type": "markdown", | |||
| "metadata": {}, | |||
| "source": [ | |||
| "这是上述模型的predict方法,是用来直接输出该任务的预测结果,与forward目的不同。\n", | |||
| "\n", | |||
| "注意两点:\n", | |||
| "1. predict参数名也叫**word_seq**。\n", | |||
| "2. predict的返回值是也一个**dict**,其中有个key的名字叫**predict**。\n", | |||
| "\n", | |||
| "```\n", | |||
| " def predict(self, word_seq):\n", | |||
| " \"\"\"\n", | |||
| "\n", | |||
| " :param word_seq: torch.LongTensor, [batch_size, seq_len]\n", | |||
| " :return predict: dict of torch.LongTensor, [batch_size, seq_len]\n", | |||
| " \"\"\"\n", | |||
| " output = self(word_seq)\n", | |||
| " _, predict = output['output'].max(dim=1)\n", | |||
| " return {'predict': predict}\n", | |||
| "```" | |||
| ] | |||
| }, | |||
| { | |||
| "cell_type": "markdown", | |||
| "metadata": {}, | |||
| "source": [ | |||
| "Trainer & Tester\n", | |||
| "------\n", | |||
| "\n", | |||
| "使用fastNLP的Trainer训练模型" | |||
| ] | |||
| }, | |||
| { | |||
| "cell_type": "code", | |||
| "execution_count": 21, | |||
| "metadata": {}, | |||
| "outputs": [], | |||
| "source": [ | |||
| "from fastNLP import Trainer\n", | |||
| "from copy import deepcopy\n", | |||
| "from fastNLP.core.losses import CrossEntropyLoss\n", | |||
| "from fastNLP.core.metrics import AccuracyMetric\n", | |||
| "\n", | |||
| "\n", | |||
| "# 更改DataSet中对应field的名称,与模型的forward的参数名一致\n", | |||
| "# 因为forward的参数叫word_seq, 所以要把原本叫words的field改名为word_seq\n", | |||
| "# 这里的演示是让你了解这种**命名规则**\n", | |||
| "train_data.rename_field('words', 'word_seq')\n", | |||
| "test_data.rename_field('words', 'word_seq')\n", | |||
| "\n", | |||
| "# 顺便把label换名为label_seq\n", | |||
| "train_data.rename_field('label', 'label_seq')\n", | |||
| "test_data.rename_field('label', 'label_seq')" | |||
| ] | |||
| }, | |||
| { | |||
| "cell_type": "markdown", | |||
| "metadata": {}, | |||
| "source": [ | |||
| "### loss\n", | |||
| "训练模型需要提供一个损失函数\n", | |||
| "\n", | |||
| "下面提供了一个在分类问题中常用的交叉熵损失。注意它的**初始化参数**。\n", | |||
| "\n", | |||
| "pred参数对应的是模型的forward返回的dict的一个key的名字,这里是\"output\"。\n", | |||
| "\n", | |||
| "target参数对应的是dataset作为标签的field的名字,这里是\"label_seq\"。" | |||
| ] | |||
| }, | |||
| { | |||
| "cell_type": "code", | |||
| "execution_count": 22, | |||
| "metadata": {}, | |||
| "outputs": [], | |||
| "source": [ | |||
| "loss = CrossEntropyLoss(pred=\"output\", target=\"label_seq\")" | |||
| ] | |||
| }, | |||
| { | |||
| "cell_type": "markdown", | |||
| "metadata": {}, | |||
| "source": [ | |||
| "### Metric\n", | |||
| "定义评价指标\n", | |||
| "\n", | |||
| "这里使用准确率。参数的“命名规则”跟上面类似。\n", | |||
| "\n", | |||
| "pred参数对应的是模型的predict方法返回的dict的一个key的名字,这里是\"predict\"。\n", | |||
| "\n", | |||
| "target参数对应的是dataset作为标签的field的名字,这里是\"label_seq\"。" | |||
| ] | |||
| }, | |||
| { | |||
| "cell_type": "code", | |||
| "execution_count": 23, | |||
| "metadata": {}, | |||
| "outputs": [], | |||
| "source": [ | |||
| "metric = AccuracyMetric(pred=\"predict\", target=\"label_seq\")" | |||
| ] | |||
| }, | |||
| { | |||
| "cell_type": "code", | |||
| "execution_count": 24, | |||
| "metadata": {}, | |||
| "outputs": [ | |||
| { | |||
| "name": "stdout", | |||
| "output_type": "stream", | |||
| "text": [ | |||
| "training epochs started 2018-12-07 14:11:31" | |||
| ] | |||
| }, | |||
| { | |||
| "name": "stdout", | |||
| "output_type": "stream", | |||
| "text": [ | |||
| "\n" | |||
| ] | |||
| }, | |||
| { | |||
| "data": { | |||
| "text/plain": [ | |||
| "HBox(children=(IntProgress(value=0, layout=Layout(flex='2'), max=915), HTML(value='')), layout=Layout(display=…" | |||
| ] | |||
| }, | |||
| "execution_count": 0, | |||
| "metadata": {}, | |||
| "output_type": "execute_result" | |||
| }, | |||
| { | |||
| "name": "stdout", | |||
| "output_type": "stream", | |||
| "text": [ | |||
| "\r" | |||
| ] | |||
| }, | |||
| { | |||
| "name": "stdout", | |||
| "output_type": "stream", | |||
| "text": [ | |||
| "Epoch 1/5. Step:183/915. AccuracyMetric: acc=0.350367" | |||
| ] | |||
| }, | |||
| { | |||
| "name": "stdout", | |||
| "output_type": "stream", | |||
| "text": [ | |||
| "\n" | |||
| ] | |||
| }, | |||
| { | |||
| "name": "stdout", | |||
| "output_type": "stream", | |||
| "text": [ | |||
| "\r" | |||
| ] | |||
| }, | |||
| { | |||
| "name": "stdout", | |||
| "output_type": "stream", | |||
| "text": [ | |||
| "Epoch 2/5. Step:366/915. AccuracyMetric: acc=0.409332" | |||
| ] | |||
| }, | |||
| { | |||
| "name": "stdout", | |||
| "output_type": "stream", | |||
| "text": [ | |||
| "\n" | |||
| ] | |||
| }, | |||
| { | |||
| "name": "stdout", | |||
| "output_type": "stream", | |||
| "text": [ | |||
| "\r" | |||
| ] | |||
| }, | |||
| { | |||
| "name": "stdout", | |||
| "output_type": "stream", | |||
| "text": [ | |||
| "Epoch 3/5. Step:549/915. AccuracyMetric: acc=0.572552" | |||
| ] | |||
| }, | |||
| { | |||
| "name": "stdout", | |||
| "output_type": "stream", | |||
| "text": [ | |||
| "\n" | |||
| ] | |||
| }, | |||
| { | |||
| "name": "stdout", | |||
| "output_type": "stream", | |||
| "text": [ | |||
| "\r" | |||
| ] | |||
| }, | |||
| { | |||
| "name": "stdout", | |||
| "output_type": "stream", | |||
| "text": [ | |||
| "Epoch 4/5. Step:732/915. AccuracyMetric: acc=0.711331" | |||
| ] | |||
| }, | |||
| { | |||
| "name": "stdout", | |||
| "output_type": "stream", | |||
| "text": [ | |||
| "\n" | |||
| ] | |||
| }, | |||
| { | |||
| "name": "stdout", | |||
| "output_type": "stream", | |||
| "text": [ | |||
| "\r" | |||
| ] | |||
| }, | |||
| { | |||
| "name": "stdout", | |||
| "output_type": "stream", | |||
| "text": [ | |||
| "Epoch 5/5. Step:915/915. AccuracyMetric: acc=0.801572" | |||
| ] | |||
| }, | |||
| { | |||
| "name": "stdout", | |||
| "output_type": "stream", | |||
| "text": [ | |||
| "\n" | |||
| ] | |||
| }, | |||
| { | |||
| "name": "stdout", | |||
| "output_type": "stream", | |||
| "text": [ | |||
| "\r" | |||
| ] | |||
| } | |||
| ], | |||
| "source": [ | |||
| "# 实例化Trainer,传入模型和数据,进行训练\n", | |||
| "# 先在test_data拟合\n", | |||
| "copy_model = deepcopy(model)\n", | |||
| "overfit_trainer = Trainer(model=copy_model, train_data=test_data, dev_data=test_data,\n", | |||
| " loss=loss,\n", | |||
| " metrics=metric,\n", | |||
| " save_path=None,\n", | |||
| " batch_size=32,\n", | |||
| " n_epochs=5)\n", | |||
| "overfit_trainer.train()" | |||
| ] | |||
| }, | |||
| { | |||
| "cell_type": "code", | |||
| "execution_count": 25, | |||
| "metadata": {}, | |||
| "outputs": [ | |||
| { | |||
| "name": "stdout", | |||
| "output_type": "stream", | |||
| "text": [ | |||
| "training epochs started 2018-12-07 14:12:21" | |||
| ] | |||
| }, | |||
| { | |||
| "name": "stdout", | |||
| "output_type": "stream", | |||
| "text": [ | |||
| "\n" | |||
| ] | |||
| }, | |||
| { | |||
| "data": { | |||
| "text/plain": [ | |||
| "HBox(children=(IntProgress(value=0, layout=Layout(flex='2'), max=395), HTML(value='')), layout=Layout(display=…" | |||
| ] | |||
| }, | |||
| "execution_count": 0, | |||
| "metadata": {}, | |||
| "output_type": "execute_result" | |||
| }, | |||
| { | |||
| "name": "stdout", | |||
| "output_type": "stream", | |||
| "text": [ | |||
| "\r" | |||
| ] | |||
| }, | |||
| { | |||
| "name": "stdout", | |||
| "output_type": "stream", | |||
| "text": [ | |||
| "Epoch 1/5. Step:79/395. AccuracyMetric: acc=0.250043" | |||
| ] | |||
| }, | |||
| { | |||
| "name": "stdout", | |||
| "output_type": "stream", | |||
| "text": [ | |||
| "\n" | |||
| ] | |||
| }, | |||
| { | |||
| "name": "stdout", | |||
| "output_type": "stream", | |||
| "text": [ | |||
| "\r" | |||
| ] | |||
| }, | |||
| { | |||
| "name": "stdout", | |||
| "output_type": "stream", | |||
| "text": [ | |||
| "Epoch 2/5. Step:158/395. AccuracyMetric: acc=0.280807" | |||
| ] | |||
| }, | |||
| { | |||
| "name": "stdout", | |||
| "output_type": "stream", | |||
| "text": [ | |||
| "\n" | |||
| ] | |||
| }, | |||
| { | |||
| "name": "stdout", | |||
| "output_type": "stream", | |||
| "text": [ | |||
| "\r" | |||
| ] | |||
| }, | |||
| { | |||
| "name": "stdout", | |||
| "output_type": "stream", | |||
| "text": [ | |||
| "Epoch 3/5. Step:237/395. AccuracyMetric: acc=0.280978" | |||
| ] | |||
| }, | |||
| { | |||
| "name": "stdout", | |||
| "output_type": "stream", | |||
| "text": [ | |||
| "\n" | |||
| ] | |||
| }, | |||
| { | |||
| "name": "stdout", | |||
| "output_type": "stream", | |||
| "text": [ | |||
| "\r" | |||
| ] | |||
| }, | |||
| { | |||
| "name": "stdout", | |||
| "output_type": "stream", | |||
| "text": [ | |||
| "Epoch 4/5. Step:316/395. AccuracyMetric: acc=0.285592" | |||
| ] | |||
| }, | |||
| { | |||
| "name": "stdout", | |||
| "output_type": "stream", | |||
| "text": [ | |||
| "\n" | |||
| ] | |||
| }, | |||
| { | |||
| "name": "stdout", | |||
| "output_type": "stream", | |||
| "text": [ | |||
| "\r" | |||
| ] | |||
| }, | |||
| { | |||
| "name": "stdout", | |||
| "output_type": "stream", | |||
| "text": [ | |||
| "Epoch 5/5. Step:395/395. AccuracyMetric: acc=0.278927" | |||
| ] | |||
| }, | |||
| { | |||
| "name": "stdout", | |||
| "output_type": "stream", | |||
| "text": [ | |||
| "\n" | |||
| ] | |||
| }, | |||
| { | |||
| "name": "stdout", | |||
| "output_type": "stream", | |||
| "text": [ | |||
| "\r" | |||
| ] | |||
| } | |||
| ], | |||
| "source": [ | |||
| "# 用train_data训练,在test_data验证\n", | |||
| "trainer = Trainer(model=model, train_data=train_data, dev_data=test_data,\n", | |||
| " loss=CrossEntropyLoss(pred=\"output\", target=\"label_seq\"),\n", | |||
| " metrics=AccuracyMetric(pred=\"predict\", target=\"label_seq\"),\n", | |||
| " save_path=None,\n", | |||
| " batch_size=32,\n", | |||
| " n_epochs=5)\n", | |||
| "trainer.train()\n", | |||
| "print('Train finished!')" | |||
| ] | |||
| }, | |||
| { | |||
| "cell_type": "code", | |||
| "execution_count": 26, | |||
| "metadata": {}, | |||
| "outputs": [ | |||
| { | |||
| "name": "stdout", | |||
| "output_type": "stream", | |||
| "text": [ | |||
| "[tester] \nAccuracyMetric: acc=0.280636" | |||
| ] | |||
| }, | |||
| { | |||
| "name": "stdout", | |||
| "output_type": "stream", | |||
| "text": [ | |||
| "\n" | |||
| ] | |||
| }, | |||
| { | |||
| "name": "stdout", | |||
| "output_type": "stream", | |||
| "text": [ | |||
| "{'AccuracyMetric': {'acc': 0.280636}}" | |||
| ] | |||
| }, | |||
| { | |||
| "name": "stdout", | |||
| "output_type": "stream", | |||
| "text": [ | |||
| "\n" | |||
| ] | |||
| } | |||
| ], | |||
| "source": [ | |||
| "# 调用Tester在test_data上评价效果\n", | |||
| "from fastNLP import Tester\n", | |||
| "\n", | |||
| "tester = Tester(data=test_data, model=model, metrics=AccuracyMetric(pred=\"predict\", target=\"label_seq\"),\n", | |||
| " batch_size=4)\n", | |||
| "acc = tester.test()\n", | |||
| "print(acc)" | |||
| ] | |||
| }, | |||
| { | |||
| "cell_type": "code", | |||
| "execution_count": null, | |||
| "metadata": {}, | |||
| "outputs": [], | |||
| "source": [] | |||
| } | |||
| ], | |||
| "metadata": { | |||
| "kernelspec": { | |||
| "display_name": "Python 3", | |||
| "language": "python", | |||
| "name": "python3" | |||
| }, | |||
| "language_info": { | |||
| "codemirror_mode": { | |||
| "name": "ipython", | |||
| "version": 3 | |||
| }, | |||
| "file_extension": ".py", | |||
| "mimetype": "text/x-python", | |||
| "name": "python", | |||
| "nbconvert_exporter": "python", | |||
| "pygments_lexer": "ipython3", | |||
| "version": "3.6.7" | |||
| } | |||
| }, | |||
| "nbformat": 4, | |||
| "nbformat_minor": 2 | |||
| } | |||
| @@ -0,0 +1,860 @@ | |||
| { | |||
| "cells": [ | |||
| { | |||
| "cell_type": "markdown", | |||
| "metadata": {}, | |||
| "source": [ | |||
| "fastNLP上手教程\n", | |||
| "-------\n", | |||
| "\n", | |||
| "fastNLP提供方便的数据预处理,训练和测试模型的功能" | |||
| ] | |||
| }, | |||
| { | |||
| "cell_type": "markdown", | |||
| "metadata": {}, | |||
| "source": [ | |||
| "DataSet & Instance\n", | |||
| "------\n", | |||
| "\n", | |||
| "fastNLP用DataSet和Instance保存和处理数据。每个DataSet表示一个数据集,每个Instance表示一个数据样本。一个DataSet存有多个Instance,每个Instance可以自定义存哪些内容。\n", | |||
| "\n", | |||
| "有一些read_*方法,可以轻松从文件读取数据,存成DataSet。" | |||
| ] | |||
| }, | |||
| { | |||
| "cell_type": "code", | |||
| "execution_count": null, | |||
| "metadata": {}, | |||
| "outputs": [ | |||
| { | |||
| "name": "stdout", | |||
| "output_type": "stream", | |||
| "text": [ | |||
| "{'raw_sentence': A series of escapades demonstrating the adage that what is good for the goose is also good for the gander , some of which occasionally amuses but none of which amounts to much of a story .,\n'label': 1}" | |||
| ] | |||
| }, | |||
| { | |||
| "name": "stdout", | |||
| "output_type": "stream", | |||
| "text": [ | |||
| "\n" | |||
| ] | |||
| } | |||
| ], | |||
| "source": [ | |||
| "from fastNLP import DataSet\n", | |||
| "from fastNLP import Instance\n", | |||
| "\n", | |||
| "# 从csv读取数据到DataSet\n", | |||
| "win_path = \"C:\\\\Users\\zyfeng\\Desktop\\FudanNLP\\\\fastNLP\\\\test\\\\data_for_tests\\\\tutorial_sample_dataset.csv\"\n", | |||
| "dataset = DataSet.read_csv(win_path, headers=('raw_sentence', 'label'), sep='\\t')\n", | |||
| "print(dataset[0])" | |||
| ] | |||
| }, | |||
| { | |||
| "cell_type": "code", | |||
| "execution_count": 2, | |||
| "metadata": {}, | |||
| "outputs": [ | |||
| { | |||
| "data": { | |||
| "text/plain": [ | |||
| "{'raw_sentence': fake data,\n'label': 0}" | |||
| ] | |||
| }, | |||
| "execution_count": 2, | |||
| "metadata": {}, | |||
| "output_type": "execute_result" | |||
| } | |||
| ], | |||
| "source": [ | |||
| "# DataSet.append(Instance)加入新数据\n", | |||
| "\n", | |||
| "dataset.append(Instance(raw_sentence='fake data', label='0'))\n", | |||
| "dataset[-1]" | |||
| ] | |||
| }, | |||
| { | |||
| "cell_type": "code", | |||
| "execution_count": 3, | |||
| "metadata": {}, | |||
| "outputs": [], | |||
| "source": [ | |||
| "# DataSet.apply(func, new_field_name)对数据预处理\n", | |||
| "\n", | |||
| "# 将所有数字转为小写\n", | |||
| "dataset.apply(lambda x: x['raw_sentence'].lower(), new_field_name='raw_sentence')\n", | |||
| "# label转int\n", | |||
| "dataset.apply(lambda x: int(x['label']), new_field_name='label_seq', is_target=True)\n", | |||
| "# 使用空格分割句子\n", | |||
| "dataset.drop(lambda x: len(x['raw_sentence'].split()) == 0)\n", | |||
| "def split_sent(ins):\n", | |||
| " return ins['raw_sentence'].split()\n", | |||
| "dataset.apply(split_sent, new_field_name='words', is_input=True)" | |||
| ] | |||
| }, | |||
| { | |||
| "cell_type": "code", | |||
| "execution_count": 4, | |||
| "metadata": {}, | |||
| "outputs": [], | |||
| "source": [ | |||
| "# DataSet.drop(func)筛除数据\n", | |||
| "# 删除低于某个长度的词语\n", | |||
| "dataset.drop(lambda x: len(x['words']) <= 3)" | |||
| ] | |||
| }, | |||
| { | |||
| "cell_type": "code", | |||
| "execution_count": 7, | |||
| "metadata": {}, | |||
| "outputs": [ | |||
| { | |||
| "name": "stdout", | |||
| "output_type": "stream", | |||
| "text": [ | |||
| "Train size: " | |||
| ] | |||
| }, | |||
| { | |||
| "name": "stdout", | |||
| "output_type": "stream", | |||
| "text": [ | |||
| " " | |||
| ] | |||
| }, | |||
| { | |||
| "name": "stdout", | |||
| "output_type": "stream", | |||
| "text": [ | |||
| "54" | |||
| ] | |||
| }, | |||
| { | |||
| "name": "stdout", | |||
| "output_type": "stream", | |||
| "text": [ | |||
| "\n" | |||
| ] | |||
| }, | |||
| { | |||
| "name": "stdout", | |||
| "output_type": "stream", | |||
| "text": [ | |||
| "Test size: " | |||
| ] | |||
| } | |||
| ], | |||
| "source": [ | |||
| "# 分出测试集、训练集\n", | |||
| "\n", | |||
| "test_data, train_data = dataset.split(0.3)\n", | |||
| "print(\"Train size: \", len(test_data))\n", | |||
| "print(\"Test size: \", len(train_data))" | |||
| ] | |||
| }, | |||
| { | |||
| "cell_type": "markdown", | |||
| "metadata": {}, | |||
| "source": [ | |||
| "Vocabulary\n", | |||
| "------\n", | |||
| "\n", | |||
| "fastNLP中的Vocabulary轻松构建词表,将词转成数字" | |||
| ] | |||
| }, | |||
| { | |||
| "cell_type": "code", | |||
| "execution_count": 8, | |||
| "metadata": {}, | |||
| "outputs": [ | |||
| { | |||
| "name": "stdout", | |||
| "output_type": "stream", | |||
| "text": [ | |||
| "{'raw_sentence': the plot is romantic comedy boilerplate from start to finish .,\n'label': 2,\n'label_seq': 2,\n'words': ['the', 'plot', 'is', 'romantic', 'comedy', 'boilerplate', 'from', 'start', 'to', 'finish', '.'],\n'word_seq': [2, 13, 9, 24, 25, 26, 15, 27, 11, 28, 3]}" | |||
| ] | |||
| }, | |||
| { | |||
| "name": "stdout", | |||
| "output_type": "stream", | |||
| "text": [ | |||
| "\n" | |||
| ] | |||
| } | |||
| ], | |||
| "source": [ | |||
| "from fastNLP import Vocabulary\n", | |||
| "\n", | |||
| "# 构建词表, Vocabulary.add(word)\n", | |||
| "vocab = Vocabulary(min_freq=2)\n", | |||
| "train_data.apply(lambda x: [vocab.add(word) for word in x['words']])\n", | |||
| "vocab.build_vocab()\n", | |||
| "\n", | |||
| "# index句子, Vocabulary.to_index(word)\n", | |||
| "train_data.apply(lambda x: [vocab.to_index(word) for word in x['words']], new_field_name='word_seq', is_input=True)\n", | |||
| "test_data.apply(lambda x: [vocab.to_index(word) for word in x['words']], new_field_name='word_seq', is_input=True)\n", | |||
| "\n", | |||
| "\n", | |||
| "print(test_data[0])" | |||
| ] | |||
| }, | |||
| { | |||
| "cell_type": "code", | |||
| "execution_count": 8, | |||
| "metadata": { | |||
| "scrolled": true | |||
| }, | |||
| "outputs": [ | |||
| { | |||
| "name": "stdout", | |||
| "output_type": "stream", | |||
| "text": [ | |||
| "batch_x has: {'words': array([list(['this', 'kind', 'of', 'hands-on', 'storytelling', 'is', 'ultimately', 'what', 'makes', 'shanghai', 'ghetto', 'move', 'beyond', 'a', 'good', ',', 'dry', ',', 'reliable', 'textbook', 'and', 'what', 'allows', 'it', 'to', 'rank', 'with', 'its', 'worthy', 'predecessors', '.']),\n", | |||
| " list(['the', 'entire', 'movie', 'is', 'filled', 'with', 'deja', 'vu', 'moments', '.'])],\n", | |||
| " dtype=object), 'word_seq': tensor([[ 19, 184, 6, 1, 481, 9, 206, 50, 91, 1210, 1609, 1330,\n", | |||
| " 495, 5, 63, 4, 1269, 4, 1, 1184, 7, 50, 1050, 10,\n", | |||
| " 8, 1611, 16, 21, 1039, 1, 2],\n", | |||
| " [ 3, 711, 22, 9, 1282, 16, 2482, 2483, 200, 2, 0, 0,\n", | |||
| " 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,\n", | |||
| " 0, 0, 0, 0, 0, 0, 0]])}\n", | |||
| "batch_y has: {'label_seq': tensor([3, 2])}\n" | |||
| ] | |||
| } | |||
| ], | |||
| "source": [ | |||
| "# 假设你们需要做强化学习或者gan之类的项目,也许你们可以使用这里的dataset\n", | |||
| "from fastNLP.core.batch import Batch\n", | |||
| "from fastNLP.core.sampler import RandomSampler\n", | |||
| "\n", | |||
| "batch_iterator = Batch(dataset=train_data, batch_size=2, sampler=RandomSampler())\n", | |||
| "for batch_x, batch_y in batch_iterator:\n", | |||
| " print(\"batch_x has: \", batch_x)\n", | |||
| " print(\"batch_y has: \", batch_y)\n", | |||
| " break" | |||
| ] | |||
| }, | |||
| { | |||
| "cell_type": "markdown", | |||
| "metadata": {}, | |||
| "source": [ | |||
| "# Model\n" | |||
| ] | |||
| }, | |||
| { | |||
| "cell_type": "code", | |||
| "execution_count": 9, | |||
| "metadata": { | |||
| "collapsed": false | |||
| }, | |||
| "outputs": [ | |||
| { | |||
| "data": { | |||
| "text/plain": [ | |||
| "CNNText(\n (embed): Embedding(\n (embed): Embedding(77, 50, padding_idx=0)\n (dropout): Dropout(p=0.0)\n )\n (conv_pool): ConvMaxpool(\n (convs): ModuleList(\n (0): Conv1d(50, 3, kernel_size=(3,), stride=(1,), padding=(2,))\n (1): Conv1d(50, 4, kernel_size=(4,), stride=(1,), padding=(2,))\n (2): Conv1d(50, 5, kernel_size=(5,), stride=(1,), padding=(2,))\n )\n )\n (dropout): Dropout(p=0.1)\n (fc): Linear(\n (linear): Linear(in_features=12, out_features=5, bias=True)\n )\n)" | |||
| ] | |||
| }, | |||
| "execution_count": 9, | |||
| "metadata": {}, | |||
| "output_type": "execute_result" | |||
| } | |||
| ], | |||
| "source": [ | |||
| "# 定义一个简单的Pytorch模型\n", | |||
| "\n", | |||
| "from fastNLP.models import CNNText\n", | |||
| "model = CNNText(embed_num=len(vocab), embed_dim=50, num_classes=5, padding=2, dropout=0.1)\n", | |||
| "model" | |||
| ] | |||
| }, | |||
| { | |||
| "cell_type": "markdown", | |||
| "metadata": {}, | |||
| "source": [ | |||
| "Trainer & Tester\n", | |||
| "------\n", | |||
| "\n", | |||
| "使用fastNLP的Trainer训练模型" | |||
| ] | |||
| }, | |||
| { | |||
| "cell_type": "code", | |||
| "execution_count": 11, | |||
| "metadata": {}, | |||
| "outputs": [], | |||
| "source": [ | |||
| "from fastNLP import Trainer\n", | |||
| "from copy import deepcopy\n", | |||
| "from fastNLP import CrossEntropyLoss\n", | |||
| "from fastNLP import AccuracyMetric" | |||
| ] | |||
| }, | |||
| { | |||
| "cell_type": "code", | |||
| "execution_count": 12, | |||
| "metadata": {}, | |||
| "outputs": [ | |||
| { | |||
| "name": "stdout", | |||
| "output_type": "stream", | |||
| "text": [ | |||
| "training epochs started 2018-12-07 14:07:20" | |||
| ] | |||
| }, | |||
| { | |||
| "name": "stdout", | |||
| "output_type": "stream", | |||
| "text": [ | |||
| "\n" | |||
| ] | |||
| }, | |||
| { | |||
| "data": { | |||
| "text/plain": [ | |||
| "HBox(children=(IntProgress(value=0, layout=Layout(flex='2'), max=20), HTML(value='')), layout=Layout(display='…" | |||
| ] | |||
| }, | |||
| "execution_count": 0, | |||
| "metadata": {}, | |||
| "output_type": "execute_result" | |||
| }, | |||
| { | |||
| "name": "stdout", | |||
| "output_type": "stream", | |||
| "text": [ | |||
| "\r" | |||
| ] | |||
| }, | |||
| { | |||
| "name": "stdout", | |||
| "output_type": "stream", | |||
| "text": [ | |||
| "Epoch 1/10. Step:2/20. AccuracyMetric: acc=0.037037" | |||
| ] | |||
| }, | |||
| { | |||
| "name": "stdout", | |||
| "output_type": "stream", | |||
| "text": [ | |||
| "\n" | |||
| ] | |||
| }, | |||
| { | |||
| "name": "stdout", | |||
| "output_type": "stream", | |||
| "text": [ | |||
| "\r" | |||
| ] | |||
| }, | |||
| { | |||
| "name": "stdout", | |||
| "output_type": "stream", | |||
| "text": [ | |||
| "Epoch 2/10. Step:4/20. AccuracyMetric: acc=0.296296" | |||
| ] | |||
| }, | |||
| { | |||
| "name": "stdout", | |||
| "output_type": "stream", | |||
| "text": [ | |||
| "\n" | |||
| ] | |||
| }, | |||
| { | |||
| "name": "stdout", | |||
| "output_type": "stream", | |||
| "text": [ | |||
| "\r" | |||
| ] | |||
| }, | |||
| { | |||
| "name": "stdout", | |||
| "output_type": "stream", | |||
| "text": [ | |||
| "Epoch 3/10. Step:6/20. AccuracyMetric: acc=0.333333" | |||
| ] | |||
| }, | |||
| { | |||
| "name": "stdout", | |||
| "output_type": "stream", | |||
| "text": [ | |||
| "\n" | |||
| ] | |||
| }, | |||
| { | |||
| "name": "stdout", | |||
| "output_type": "stream", | |||
| "text": [ | |||
| "\r" | |||
| ] | |||
| }, | |||
| { | |||
| "name": "stdout", | |||
| "output_type": "stream", | |||
| "text": [ | |||
| "Epoch 4/10. Step:8/20. AccuracyMetric: acc=0.555556" | |||
| ] | |||
| }, | |||
| { | |||
| "name": "stdout", | |||
| "output_type": "stream", | |||
| "text": [ | |||
| "\n" | |||
| ] | |||
| }, | |||
| { | |||
| "name": "stdout", | |||
| "output_type": "stream", | |||
| "text": [ | |||
| "\r" | |||
| ] | |||
| }, | |||
| { | |||
| "name": "stdout", | |||
| "output_type": "stream", | |||
| "text": [ | |||
| "Epoch 5/10. Step:10/20. AccuracyMetric: acc=0.611111" | |||
| ] | |||
| }, | |||
| { | |||
| "name": "stdout", | |||
| "output_type": "stream", | |||
| "text": [ | |||
| "\n" | |||
| ] | |||
| }, | |||
| { | |||
| "name": "stdout", | |||
| "output_type": "stream", | |||
| "text": [ | |||
| "\r" | |||
| ] | |||
| }, | |||
| { | |||
| "name": "stdout", | |||
| "output_type": "stream", | |||
| "text": [ | |||
| "Epoch 6/10. Step:12/20. AccuracyMetric: acc=0.481481" | |||
| ] | |||
| }, | |||
| { | |||
| "name": "stdout", | |||
| "output_type": "stream", | |||
| "text": [ | |||
| "\n" | |||
| ] | |||
| }, | |||
| { | |||
| "name": "stdout", | |||
| "output_type": "stream", | |||
| "text": [ | |||
| "\r" | |||
| ] | |||
| }, | |||
| { | |||
| "name": "stdout", | |||
| "output_type": "stream", | |||
| "text": [ | |||
| "Epoch 7/10. Step:14/20. AccuracyMetric: acc=0.62963" | |||
| ] | |||
| }, | |||
| { | |||
| "name": "stdout", | |||
| "output_type": "stream", | |||
| "text": [ | |||
| "\n" | |||
| ] | |||
| }, | |||
| { | |||
| "name": "stdout", | |||
| "output_type": "stream", | |||
| "text": [ | |||
| "\r" | |||
| ] | |||
| }, | |||
| { | |||
| "name": "stdout", | |||
| "output_type": "stream", | |||
| "text": [ | |||
| "Epoch 8/10. Step:16/20. AccuracyMetric: acc=0.685185" | |||
| ] | |||
| }, | |||
| { | |||
| "name": "stdout", | |||
| "output_type": "stream", | |||
| "text": [ | |||
| "\n" | |||
| ] | |||
| }, | |||
| { | |||
| "name": "stdout", | |||
| "output_type": "stream", | |||
| "text": [ | |||
| "\r" | |||
| ] | |||
| }, | |||
| { | |||
| "name": "stdout", | |||
| "output_type": "stream", | |||
| "text": [ | |||
| "Epoch 9/10. Step:18/20. AccuracyMetric: acc=0.722222" | |||
| ] | |||
| }, | |||
| { | |||
| "name": "stdout", | |||
| "output_type": "stream", | |||
| "text": [ | |||
| "\n" | |||
| ] | |||
| }, | |||
| { | |||
| "name": "stdout", | |||
| "output_type": "stream", | |||
| "text": [ | |||
| "\r" | |||
| ] | |||
| }, | |||
| { | |||
| "name": "stdout", | |||
| "output_type": "stream", | |||
| "text": [ | |||
| "Epoch 10/10. Step:20/20. AccuracyMetric: acc=0.777778" | |||
| ] | |||
| }, | |||
| { | |||
| "name": "stdout", | |||
| "output_type": "stream", | |||
| "text": [ | |||
| "\n" | |||
| ] | |||
| }, | |||
| { | |||
| "name": "stdout", | |||
| "output_type": "stream", | |||
| "text": [ | |||
| "\r" | |||
| ] | |||
| } | |||
| ], | |||
| "source": [ | |||
| "# 进行overfitting测试\n", | |||
| "copy_model = deepcopy(model)\n", | |||
| "overfit_trainer = Trainer(model=copy_model, \n", | |||
| " train_data=test_data, \n", | |||
| " dev_data=test_data,\n", | |||
| " loss=CrossEntropyLoss(pred=\"output\", target=\"label_seq\"),\n", | |||
| " metrics=AccuracyMetric(),\n", | |||
| " n_epochs=10,\n", | |||
| " save_path=None)\n", | |||
| "overfit_trainer.train()" | |||
| ] | |||
| }, | |||
| { | |||
| "cell_type": "code", | |||
| "execution_count": 14, | |||
| "metadata": {}, | |||
| "outputs": [ | |||
| { | |||
| "name": "stdout", | |||
| "output_type": "stream", | |||
| "text": [ | |||
| "training epochs started 2018-12-07 14:08:10" | |||
| ] | |||
| }, | |||
| { | |||
| "name": "stdout", | |||
| "output_type": "stream", | |||
| "text": [ | |||
| "\n" | |||
| ] | |||
| }, | |||
| { | |||
| "data": { | |||
| "text/plain": [ | |||
| "HBox(children=(IntProgress(value=0, layout=Layout(flex='2'), max=5), HTML(value='')), layout=Layout(display='i…" | |||
| ] | |||
| }, | |||
| "execution_count": 0, | |||
| "metadata": {}, | |||
| "output_type": "execute_result" | |||
| }, | |||
| { | |||
| "name": "stdout", | |||
| "output_type": "stream", | |||
| "text": [ | |||
| "\r" | |||
| ] | |||
| }, | |||
| { | |||
| "name": "stdout", | |||
| "output_type": "stream", | |||
| "text": [ | |||
| "Epoch 1/5. Step:1/5. AccuracyMetric: acc=0.037037" | |||
| ] | |||
| }, | |||
| { | |||
| "name": "stdout", | |||
| "output_type": "stream", | |||
| "text": [ | |||
| "\n" | |||
| ] | |||
| }, | |||
| { | |||
| "name": "stdout", | |||
| "output_type": "stream", | |||
| "text": [ | |||
| "\r" | |||
| ] | |||
| }, | |||
| { | |||
| "name": "stdout", | |||
| "output_type": "stream", | |||
| "text": [ | |||
| "Epoch 2/5. Step:2/5. AccuracyMetric: acc=0.037037" | |||
| ] | |||
| }, | |||
| { | |||
| "name": "stdout", | |||
| "output_type": "stream", | |||
| "text": [ | |||
| "\n" | |||
| ] | |||
| }, | |||
| { | |||
| "name": "stdout", | |||
| "output_type": "stream", | |||
| "text": [ | |||
| "\r" | |||
| ] | |||
| }, | |||
| { | |||
| "name": "stdout", | |||
| "output_type": "stream", | |||
| "text": [ | |||
| "Epoch 3/5. Step:3/5. AccuracyMetric: acc=0.037037" | |||
| ] | |||
| }, | |||
| { | |||
| "name": "stdout", | |||
| "output_type": "stream", | |||
| "text": [ | |||
| "\n" | |||
| ] | |||
| }, | |||
| { | |||
| "name": "stdout", | |||
| "output_type": "stream", | |||
| "text": [ | |||
| "\r" | |||
| ] | |||
| }, | |||
| { | |||
| "name": "stdout", | |||
| "output_type": "stream", | |||
| "text": [ | |||
| "Epoch 4/5. Step:4/5. AccuracyMetric: acc=0.185185" | |||
| ] | |||
| }, | |||
| { | |||
| "name": "stdout", | |||
| "output_type": "stream", | |||
| "text": [ | |||
| "\n" | |||
| ] | |||
| }, | |||
| { | |||
| "name": "stdout", | |||
| "output_type": "stream", | |||
| "text": [ | |||
| "\r" | |||
| ] | |||
| }, | |||
| { | |||
| "name": "stdout", | |||
| "output_type": "stream", | |||
| "text": [ | |||
| "Epoch 5/5. Step:5/5. AccuracyMetric: acc=0.240741" | |||
| ] | |||
| }, | |||
| { | |||
| "name": "stdout", | |||
| "output_type": "stream", | |||
| "text": [ | |||
| "\n" | |||
| ] | |||
| }, | |||
| { | |||
| "name": "stdout", | |||
| "output_type": "stream", | |||
| "text": [ | |||
| "\r" | |||
| ] | |||
| }, | |||
| { | |||
| "name": "stdout", | |||
| "output_type": "stream", | |||
| "text": [ | |||
| "Train finished!" | |||
| ] | |||
| }, | |||
| { | |||
| "name": "stdout", | |||
| "output_type": "stream", | |||
| "text": [ | |||
| "\n" | |||
| ] | |||
| } | |||
| ], | |||
| "source": [ | |||
| "# 实例化Trainer,传入模型和数据,进行训练\n", | |||
| "trainer = Trainer(model=model, \n", | |||
| " train_data=train_data, \n", | |||
| " dev_data=test_data,\n", | |||
| " loss=CrossEntropyLoss(pred=\"output\", target=\"label_seq\"),\n", | |||
| " metrics=AccuracyMetric(),\n", | |||
| " n_epochs=5)\n", | |||
| "trainer.train()\n", | |||
| "print('Train finished!')" | |||
| ] | |||
| }, | |||
| { | |||
| "cell_type": "code", | |||
| "execution_count": 15, | |||
| "metadata": {}, | |||
| "outputs": [ | |||
| { | |||
| "name": "stdout", | |||
| "output_type": "stream", | |||
| "text": [ | |||
| "[tester] \nAccuracyMetric: acc=0.240741" | |||
| ] | |||
| }, | |||
| { | |||
| "name": "stdout", | |||
| "output_type": "stream", | |||
| "text": [ | |||
| "\n" | |||
| ] | |||
| } | |||
| ], | |||
| "source": [ | |||
| "from fastNLP import Tester\n", | |||
| "\n", | |||
| "tester = Tester(data=test_data, model=model, metrics=AccuracyMetric())\n", | |||
| "acc = tester.test()" | |||
| ] | |||
| }, | |||
| { | |||
| "cell_type": "code", | |||
| "execution_count": null, | |||
| "metadata": {}, | |||
| "outputs": [], | |||
| "source": [] | |||
| }, | |||
| { | |||
| "cell_type": "markdown", | |||
| "metadata": {}, | |||
| "source": [ | |||
| "# In summary\n", | |||
| "\n", | |||
| "## fastNLP Trainer的伪代码逻辑\n", | |||
| "### 1. 准备DataSet,假设DataSet中共有如下的fields\n", | |||
| " ['raw_sentence', 'word_seq1', 'word_seq2', 'raw_label','label']\n", | |||
| " 通过\n", | |||
| " DataSet.set_input('word_seq1', word_seq2', flag=True)将'word_seq1', 'word_seq2'设置为input\n", | |||
| " 通过\n", | |||
| " DataSet.set_target('label', flag=True)将'label'设置为target\n", | |||
| "### 2. 初始化模型\n", | |||
| " class Model(nn.Module):\n", | |||
| " def __init__(self):\n", | |||
| " xxx\n", | |||
| " def forward(self, word_seq1, word_seq2):\n", | |||
| " # (1) 这里使用的形参名必须和DataSet中的input field的名称对应。因为我们是通过形参名, 进行赋值的\n", | |||
| " # (2) input field的数量可以多于这里的形参数量。但是不能少于。\n", | |||
| " xxxx\n", | |||
| " # 输出必须是一个dict\n", | |||
| "### 3. Trainer的训练过程\n", | |||
| " (1) 从DataSet中按照batch_size取出一个batch,调用Model.forward\n", | |||
| " (2) 将 Model.forward的结果 与 标记为target的field 传入Losser当中。\n", | |||
| " 由于每个人写的Model.forward的output的dict可能key并不一样,比如有人是{'pred':xxx}, {'output': xxx}; \n", | |||
| " 另外每个人将target可能也会设置为不同的名称, 比如有人是label, 有人设置为target;\n", | |||
| " 为了解决以上的问题,我们的loss提供映射机制\n", | |||
| " 比如CrossEntropyLosser的需要的输入是(prediction, target)。但是forward的output是{'output': xxx}; 'label'是target\n", | |||
| " 那么初始化losser的时候写为CrossEntropyLosser(prediction='output', target='label')即可\n", | |||
| " (3) 对于Metric是同理的\n", | |||
| " Metric计算也是从 forward的结果中取值 与 设置target的field中取值。 也是可以通过映射找到对应的值 \n", | |||
| " \n", | |||
| " \n", | |||
| "\n", | |||
| "## 一些问题.\n", | |||
| "### 1. DataSet中为什么需要设置input和target\n", | |||
| " 只有被设置为input或者target的数据才会在train的过程中被取出来\n", | |||
| " (1.1) 我们只会在设置为input的field中寻找传递给Model.forward的参数。\n", | |||
| " (1.2) 我们在传递值给losser或者metric的时候会使用来自: \n", | |||
| " (a)Model.forward的output\n", | |||
| " (b)被设置为target的field\n", | |||
| " \n", | |||
| "\n", | |||
| "### 2. 我们是通过forwad中的形参名将DataSet中的field赋值给对应的参数\n", | |||
| " (1.1) 构建模型过程中,\n", | |||
| " 例如:\n", | |||
| " DataSet中x,seq_lens是input,那么forward就应该是\n", | |||
| " def forward(self, x, seq_lens):\n", | |||
| " pass\n", | |||
| " 我们是通过形参名称进行匹配的field的\n", | |||
| " \n", | |||
| "\n", | |||
| "\n", | |||
| "### 1. 加载数据到DataSet\n", | |||
| "### 2. 使用apply操作对DataSet进行预处理\n", | |||
| " (2.1) 处理过程中将某些field设置为input,某些field设置为target\n", | |||
| "### 3. 构建模型\n", | |||
| " (3.1) 构建模型过程中,需要注意forward函数的形参名需要和DataSet中设置为input的field名称是一致的。\n", | |||
| " 例如:\n", | |||
| " DataSet中x,seq_lens是input,那么forward就应该是\n", | |||
| " def forward(self, x, seq_lens):\n", | |||
| " pass\n", | |||
| " 我们是通过形参名称进行匹配的field的\n", | |||
| " (3.2) 模型的forward的output需要是dict类型的。\n", | |||
| " 建议将输出设置为{\"pred\": xx}.\n", | |||
| " \n" | |||
| ] | |||
| }, | |||
| { | |||
| "cell_type": "code", | |||
| "execution_count": null, | |||
| "metadata": {}, | |||
| "outputs": [], | |||
| "source": [] | |||
| }, | |||
| { | |||
| "cell_type": "code", | |||
| "execution_count": null, | |||
| "metadata": {}, | |||
| "outputs": [], | |||
| "source": [] | |||
| } | |||
| ], | |||
| "metadata": { | |||
| "kernelspec": { | |||
| "display_name": "Python 3", | |||
| "language": "python", | |||
| "name": "python3" | |||
| }, | |||
| "language_info": { | |||
| "codemirror_mode": { | |||
| "name": "ipython", | |||
| "version": 3 | |||
| }, | |||
| "file_extension": ".py", | |||
| "mimetype": "text/x-python", | |||
| "name": "python", | |||
| "nbconvert_exporter": "python", | |||
| "pygments_lexer": "ipython3", | |||
| "version": "3.6.7" | |||
| } | |||
| }, | |||
| "nbformat": 4, | |||
| "nbformat_minor": 2 | |||
| } | |||
| @@ -0,0 +1,333 @@ | |||
| { | |||
| "cells": [ | |||
| { | |||
| "cell_type": "markdown", | |||
| "metadata": { | |||
| "collapsed": true | |||
| }, | |||
| "source": [ | |||
| "# FastNLP 1分钟上手教程" | |||
| ] | |||
| }, | |||
| { | |||
| "cell_type": "markdown", | |||
| "metadata": {}, | |||
| "source": [ | |||
| "## step 1\n", | |||
| "读取数据集" | |||
| ] | |||
| }, | |||
| { | |||
| "cell_type": "code", | |||
| "execution_count": 50, | |||
| "metadata": {}, | |||
| "outputs": [], | |||
| "source": [ | |||
| "from fastNLP import DataSet\n", | |||
| "# linux_path = \"../test/data_for_tests/tutorial_sample_dataset.csv\"\n", | |||
| "win_path = \"C:\\\\Users\\zyfeng\\Desktop\\FudanNLP\\\\fastNLP\\\\test\\\\data_for_tests\\\\tutorial_sample_dataset.csv\"\n", | |||
| "ds = DataSet.read_csv(win_path, headers=('raw_sentence', 'label'), sep='\\t')" | |||
| ] | |||
| }, | |||
| { | |||
| "cell_type": "markdown", | |||
| "metadata": {}, | |||
| "source": [ | |||
| "## step 2\n", | |||
| "数据预处理\n", | |||
| "1. 类型转换\n", | |||
| "2. 切分验证集\n", | |||
| "3. 构建词典" | |||
| ] | |||
| }, | |||
| { | |||
| "cell_type": "code", | |||
| "execution_count": 52, | |||
| "metadata": {}, | |||
| "outputs": [], | |||
| "source": [ | |||
| "# 将所有数字转为小写\n", | |||
| "ds.apply(lambda x: x['raw_sentence'].lower(), new_field_name='raw_sentence')\n", | |||
| "# label转int\n", | |||
| "ds.apply(lambda x: int(x['label']), new_field_name='label_seq', is_target=True)\n", | |||
| "\n", | |||
| "def split_sent(ins):\n", | |||
| " return ins['raw_sentence'].split()\n", | |||
| "ds.apply(split_sent, new_field_name='words', is_input=True)\n" | |||
| ] | |||
| }, | |||
| { | |||
| "cell_type": "code", | |||
| "execution_count": 60, | |||
| "metadata": { | |||
| "collapsed": false | |||
| }, | |||
| "outputs": [ | |||
| { | |||
| "name": "stdout", | |||
| "output_type": "stream", | |||
| "text": [ | |||
| "Train size: " | |||
| ] | |||
| }, | |||
| { | |||
| "name": "stdout", | |||
| "output_type": "stream", | |||
| "text": [ | |||
| " " | |||
| ] | |||
| }, | |||
| { | |||
| "name": "stdout", | |||
| "output_type": "stream", | |||
| "text": [ | |||
| "54" | |||
| ] | |||
| }, | |||
| { | |||
| "name": "stdout", | |||
| "output_type": "stream", | |||
| "text": [ | |||
| "\n" | |||
| ] | |||
| }, | |||
| { | |||
| "name": "stdout", | |||
| "output_type": "stream", | |||
| "text": [ | |||
| "Test size: " | |||
| ] | |||
| }, | |||
| { | |||
| "name": "stdout", | |||
| "output_type": "stream", | |||
| "text": [ | |||
| " " | |||
| ] | |||
| }, | |||
| { | |||
| "name": "stdout", | |||
| "output_type": "stream", | |||
| "text": [ | |||
| "23" | |||
| ] | |||
| }, | |||
| { | |||
| "name": "stdout", | |||
| "output_type": "stream", | |||
| "text": [ | |||
| "\n" | |||
| ] | |||
| } | |||
| ], | |||
| "source": [ | |||
| "# 分割训练集/验证集\n", | |||
| "train_data, dev_data = ds.split(0.3)\n", | |||
| "print(\"Train size: \", len(train_data))\n", | |||
| "print(\"Test size: \", len(dev_data))" | |||
| ] | |||
| }, | |||
| { | |||
| "cell_type": "code", | |||
| "execution_count": 61, | |||
| "metadata": {}, | |||
| "outputs": [], | |||
| "source": [ | |||
| "from fastNLP import Vocabulary\n", | |||
| "vocab = Vocabulary(min_freq=2)\n", | |||
| "train_data.apply(lambda x: [vocab.add(word) for word in x['words']])\n", | |||
| "\n", | |||
| "# index句子, Vocabulary.to_index(word)\n", | |||
| "train_data.apply(lambda x: [vocab.to_index(word) for word in x['words']], new_field_name='word_seq', is_input=True)\n", | |||
| "dev_data.apply(lambda x: [vocab.to_index(word) for word in x['words']], new_field_name='word_seq', is_input=True)\n" | |||
| ] | |||
| }, | |||
| { | |||
| "cell_type": "markdown", | |||
| "metadata": {}, | |||
| "source": [ | |||
| "## step 3\n", | |||
| " 定义模型" | |||
| ] | |||
| }, | |||
| { | |||
| "cell_type": "code", | |||
| "execution_count": 62, | |||
| "metadata": {}, | |||
| "outputs": [], | |||
| "source": [ | |||
| "from fastNLP.models import CNNText\n", | |||
| "model = CNNText(embed_num=len(vocab), embed_dim=50, num_classes=5, padding=2, dropout=0.1)\n" | |||
| ] | |||
| }, | |||
| { | |||
| "cell_type": "markdown", | |||
| "metadata": {}, | |||
| "source": [ | |||
| "## step 4\n", | |||
| "开始训练" | |||
| ] | |||
| }, | |||
| { | |||
| "cell_type": "code", | |||
| "execution_count": 63, | |||
| "metadata": {}, | |||
| "outputs": [ | |||
| { | |||
| "name": "stdout", | |||
| "output_type": "stream", | |||
| "text": [ | |||
| "training epochs started 2018-12-07 14:03:41" | |||
| ] | |||
| }, | |||
| { | |||
| "name": "stdout", | |||
| "output_type": "stream", | |||
| "text": [ | |||
| "\n" | |||
| ] | |||
| }, | |||
| { | |||
| "data": { | |||
| "text/plain": [ | |||
| "HBox(children=(IntProgress(value=0, layout=Layout(flex='2'), max=6), HTML(value='')), layout=Layout(display='i…" | |||
| ] | |||
| }, | |||
| "execution_count": 0, | |||
| "metadata": {}, | |||
| "output_type": "execute_result" | |||
| }, | |||
| { | |||
| "name": "stdout", | |||
| "output_type": "stream", | |||
| "text": [ | |||
| "\r" | |||
| ] | |||
| }, | |||
| { | |||
| "name": "stdout", | |||
| "output_type": "stream", | |||
| "text": [ | |||
| "Epoch 1/3. Step:2/6. AccuracyMetric: acc=0.26087" | |||
| ] | |||
| }, | |||
| { | |||
| "name": "stdout", | |||
| "output_type": "stream", | |||
| "text": [ | |||
| "\n" | |||
| ] | |||
| }, | |||
| { | |||
| "name": "stdout", | |||
| "output_type": "stream", | |||
| "text": [ | |||
| "\r" | |||
| ] | |||
| }, | |||
| { | |||
| "name": "stdout", | |||
| "output_type": "stream", | |||
| "text": [ | |||
| "Epoch 2/3. Step:4/6. AccuracyMetric: acc=0.347826" | |||
| ] | |||
| }, | |||
| { | |||
| "name": "stdout", | |||
| "output_type": "stream", | |||
| "text": [ | |||
| "\n" | |||
| ] | |||
| }, | |||
| { | |||
| "name": "stdout", | |||
| "output_type": "stream", | |||
| "text": [ | |||
| "\r" | |||
| ] | |||
| }, | |||
| { | |||
| "name": "stdout", | |||
| "output_type": "stream", | |||
| "text": [ | |||
| "Epoch 3/3. Step:6/6. AccuracyMetric: acc=0.608696" | |||
| ] | |||
| }, | |||
| { | |||
| "name": "stdout", | |||
| "output_type": "stream", | |||
| "text": [ | |||
| "\n" | |||
| ] | |||
| }, | |||
| { | |||
| "name": "stdout", | |||
| "output_type": "stream", | |||
| "text": [ | |||
| "\r" | |||
| ] | |||
| }, | |||
| { | |||
| "name": "stdout", | |||
| "output_type": "stream", | |||
| "text": [ | |||
| "Train finished!" | |||
| ] | |||
| }, | |||
| { | |||
| "name": "stdout", | |||
| "output_type": "stream", | |||
| "text": [ | |||
| "\n" | |||
| ] | |||
| } | |||
| ], | |||
| "source": [ | |||
| "from fastNLP import Trainer, CrossEntropyLoss, AccuracyMetric\n", | |||
| "trainer = Trainer(model=model, \n", | |||
| " train_data=train_data, \n", | |||
| " dev_data=dev_data,\n", | |||
| " loss=CrossEntropyLoss(),\n", | |||
| " metrics=AccuracyMetric()\n", | |||
| " )\n", | |||
| "trainer.train()\n", | |||
| "print('Train finished!')\n" | |||
| ] | |||
| }, | |||
| { | |||
| "cell_type": "markdown", | |||
| "metadata": {}, | |||
| "source": [ | |||
| "### 本教程结束。更多操作请参考进阶教程。" | |||
| ] | |||
| }, | |||
| { | |||
| "cell_type": "code", | |||
| "execution_count": null, | |||
| "metadata": {}, | |||
| "outputs": [], | |||
| "source": [] | |||
| } | |||
| ], | |||
| "metadata": { | |||
| "kernelspec": { | |||
| "display_name": "Python 2", | |||
| "language": "python", | |||
| "name": "python2" | |||
| }, | |||
| "language_info": { | |||
| "codemirror_mode": { | |||
| "name": "ipython", | |||
| "version": 2 | |||
| }, | |||
| "file_extension": ".py", | |||
| "mimetype": "text/x-python", | |||
| "name": "python", | |||
| "nbconvert_exporter": "python", | |||
| "pygments_lexer": "ipython2", | |||
| "version": "2.7.6" | |||
| } | |||
| }, | |||
| "nbformat": 4, | |||
| "nbformat_minor": 0 | |||
| } | |||
| @@ -6,48 +6,68 @@ | |||
| "collapsed": true | |||
| }, | |||
| "source": [ | |||
| "# 六行代码搞定FastNLP" | |||
| "## FastNLP 进阶教程\n", | |||
| "本教程阅读时间平均30分钟" | |||
| ] | |||
| }, | |||
| { | |||
| "cell_type": "code", | |||
| "execution_count": null, | |||
| "cell_type": "markdown", | |||
| "metadata": {}, | |||
| "outputs": [], | |||
| "source": [ | |||
| "from fastNLP.core.dataset import DataSet\n", | |||
| "import fastNLP.io.dataset_loader" | |||
| "## 数据部分\n", | |||
| "### DataSet\n" | |||
| ] | |||
| }, | |||
| { | |||
| "cell_type": "code", | |||
| "execution_count": null, | |||
| "cell_type": "markdown", | |||
| "metadata": {}, | |||
| "outputs": [], | |||
| "source": [ | |||
| "ds = DataSet.read_naive(\"../test/data_for_tests/tutorial_sample_dataset.csv\")" | |||
| "### Instance" | |||
| ] | |||
| }, | |||
| { | |||
| "cell_type": "code", | |||
| "execution_count": null, | |||
| "cell_type": "markdown", | |||
| "metadata": {}, | |||
| "outputs": [], | |||
| "source": [] | |||
| "source": [ | |||
| "### Vocabulary" | |||
| ] | |||
| }, | |||
| { | |||
| "cell_type": "code", | |||
| "execution_count": null, | |||
| "cell_type": "markdown", | |||
| "metadata": {}, | |||
| "outputs": [], | |||
| "source": [] | |||
| "source": [ | |||
| "## 模型部分\n", | |||
| "### model" | |||
| ] | |||
| }, | |||
| { | |||
| "cell_type": "code", | |||
| "execution_count": null, | |||
| "cell_type": "markdown", | |||
| "metadata": {}, | |||
| "outputs": [], | |||
| "source": [] | |||
| "source": [ | |||
| "## 训练测试部分\n", | |||
| "### Loss" | |||
| ] | |||
| }, | |||
| { | |||
| "cell_type": "markdown", | |||
| "metadata": {}, | |||
| "source": [ | |||
| "### Metric" | |||
| ] | |||
| }, | |||
| { | |||
| "cell_type": "markdown", | |||
| "metadata": {}, | |||
| "source": [ | |||
| "### Trainer" | |||
| ] | |||
| }, | |||
| { | |||
| "cell_type": "markdown", | |||
| "metadata": {}, | |||
| "source": [ | |||
| "### Tester" | |||
| ] | |||
| }, | |||
| { | |||
| "cell_type": "code", | |||
| @@ -1,526 +0,0 @@ | |||
| { | |||
| "cells": [ | |||
| { | |||
| "cell_type": "markdown", | |||
| "metadata": {}, | |||
| "source": [ | |||
| "fastNLP上手教程\n", | |||
| "-------\n", | |||
| "\n", | |||
| "fastNLP提供方便的数据预处理,训练和测试模型的功能" | |||
| ] | |||
| }, | |||
| { | |||
| "cell_type": "code", | |||
| "execution_count": 1, | |||
| "metadata": {}, | |||
| "outputs": [ | |||
| { | |||
| "name": "stderr", | |||
| "output_type": "stream", | |||
| "text": [ | |||
| "/Users/yh/miniconda2/envs/python3/lib/python3.6/site-packages/tqdm/autonotebook/__init__.py:14: TqdmExperimentalWarning: Using `tqdm.autonotebook.tqdm` in notebook mode. Use `tqdm.tqdm` instead to force console mode (e.g. in jupyter console)\n", | |||
| " \" (e.g. in jupyter console)\", TqdmExperimentalWarning)\n" | |||
| ] | |||
| } | |||
| ], | |||
| "source": [ | |||
| "import sys\n", | |||
| "sys.path.append('/Users/yh/Desktop/fastNLP/fastNLP/')\n", | |||
| "\n", | |||
| "import fastNLP as fnlp" | |||
| ] | |||
| }, | |||
| { | |||
| "cell_type": "markdown", | |||
| "metadata": {}, | |||
| "source": [ | |||
| "DataSet & Instance\n", | |||
| "------\n", | |||
| "\n", | |||
| "fastNLP用DataSet和Instance保存和处理数据。每个DataSet表示一个数据集,每个Instance表示一个数据样本。一个DataSet存有多个Instance,每个Instance可以自定义存哪些内容。\n", | |||
| "\n", | |||
| "有一些read_*方法,可以轻松从文件读取数据,存成DataSet。" | |||
| ] | |||
| }, | |||
| { | |||
| "cell_type": "code", | |||
| "execution_count": 2, | |||
| "metadata": {}, | |||
| "outputs": [ | |||
| { | |||
| "name": "stdout", | |||
| "output_type": "stream", | |||
| "text": [ | |||
| "{'raw_sentence': A series of escapades demonstrating the adage that what is good for the goose is also good for the gander , some of which occasionally amuses but none of which amounts to much of a story .,\n", | |||
| "'label': 1}\n" | |||
| ] | |||
| } | |||
| ], | |||
| "source": [ | |||
| "from fastNLP import DataSet\n", | |||
| "from fastNLP import Instance\n", | |||
| "\n", | |||
| "# 从csv读取数据到DataSet\n", | |||
| "dataset = DataSet.read_csv('sentence.csv', headers=('raw_sentence', 'label'), sep='\\t')\n", | |||
| "print(dataset[0])" | |||
| ] | |||
| }, | |||
| { | |||
| "cell_type": "code", | |||
| "execution_count": 3, | |||
| "metadata": {}, | |||
| "outputs": [ | |||
| { | |||
| "data": { | |||
| "text/plain": [ | |||
| "{'raw_sentence': fake data,\n", | |||
| "'label': 0}" | |||
| ] | |||
| }, | |||
| "execution_count": 3, | |||
| "metadata": {}, | |||
| "output_type": "execute_result" | |||
| } | |||
| ], | |||
| "source": [ | |||
| "# DataSet.append(Instance)加入新数据\n", | |||
| "\n", | |||
| "dataset.append(Instance(raw_sentence='fake data', label='0'))\n", | |||
| "dataset[-1]" | |||
| ] | |||
| }, | |||
| { | |||
| "cell_type": "code", | |||
| "execution_count": 4, | |||
| "metadata": {}, | |||
| "outputs": [], | |||
| "source": [ | |||
| "# DataSet.apply(func, new_field_name)对数据预处理\n", | |||
| "\n", | |||
| "# 将所有数字转为小写\n", | |||
| "dataset.apply(lambda x: x['raw_sentence'].lower(), new_field_name='raw_sentence')\n", | |||
| "# label转int\n", | |||
| "dataset.apply(lambda x: int(x['label']), new_field_name='label_seq', is_target=True)\n", | |||
| "# 使用空格分割句子\n", | |||
| "dataset.drop(lambda x:len(x['raw_sentence'].split())==0)\n", | |||
| "def split_sent(ins):\n", | |||
| " return ins['raw_sentence'].split()\n", | |||
| "dataset.apply(split_sent, new_field_name='words', is_input=True)" | |||
| ] | |||
| }, | |||
| { | |||
| "cell_type": "code", | |||
| "execution_count": 5, | |||
| "metadata": {}, | |||
| "outputs": [], | |||
| "source": [ | |||
| "# DataSet.drop(func)筛除数据\n", | |||
| "# 删除低于某个长度的词语\n", | |||
| "# dataset.drop(lambda x: len(x['words']) <= 3)" | |||
| ] | |||
| }, | |||
| { | |||
| "cell_type": "code", | |||
| "execution_count": 6, | |||
| "metadata": {}, | |||
| "outputs": [ | |||
| { | |||
| "name": "stdout", | |||
| "output_type": "stream", | |||
| "text": [ | |||
| "Train size: 5971\n", | |||
| "Test size: 2558\n" | |||
| ] | |||
| } | |||
| ], | |||
| "source": [ | |||
| "# 分出测试集、训练集\n", | |||
| "\n", | |||
| "test_data, train_data = dataset.split(0.3)\n", | |||
| "print(\"Train size: \", len(test_data))\n", | |||
| "print(\"Test size: \", len(train_data))" | |||
| ] | |||
| }, | |||
| { | |||
| "cell_type": "markdown", | |||
| "metadata": {}, | |||
| "source": [ | |||
| "Vocabulary\n", | |||
| "------\n", | |||
| "\n", | |||
| "fastNLP中的Vocabulary轻松构建词表,将词转成数字" | |||
| ] | |||
| }, | |||
| { | |||
| "cell_type": "code", | |||
| "execution_count": 7, | |||
| "metadata": {}, | |||
| "outputs": [ | |||
| { | |||
| "name": "stdout", | |||
| "output_type": "stream", | |||
| "text": [ | |||
| "{'raw_sentence': gussied up with so many distracting special effects and visual party tricks that it 's not clear whether we 're supposed to shriek or laugh .,\n", | |||
| "'label': 1,\n", | |||
| "'label_seq': 1,\n", | |||
| "'words': ['gussied', 'up', 'with', 'so', 'many', 'distracting', 'special', 'effects', 'and', 'visual', 'party', 'tricks', 'that', 'it', \"'s\", 'not', 'clear', 'whether', 'we', \"'re\", 'supposed', 'to', 'shriek', 'or', 'laugh', '.'],\n", | |||
| "'word_seq': [1, 65, 16, 43, 108, 1, 329, 433, 7, 319, 1313, 1, 12, 10, 11, 27, 1428, 567, 86, 134, 1949, 8, 1, 49, 506, 2]}\n" | |||
| ] | |||
| } | |||
| ], | |||
| "source": [ | |||
| "from fastNLP import Vocabulary\n", | |||
| "\n", | |||
| "# 构建词表, Vocabulary.add(word)\n", | |||
| "vocab = Vocabulary(min_freq=2)\n", | |||
| "train_data.apply(lambda x: [vocab.add(word) for word in x['words']])\n", | |||
| "vocab.build_vocab()\n", | |||
| "\n", | |||
| "# index句子, Vocabulary.to_index(word)\n", | |||
| "train_data.apply(lambda x: [vocab.to_index(word) for word in x['words']], new_field_name='word_seq', is_input=True)\n", | |||
| "test_data.apply(lambda x: [vocab.to_index(word) for word in x['words']], new_field_name='word_seq', is_input=True)\n", | |||
| "\n", | |||
| "\n", | |||
| "print(test_data[0])" | |||
| ] | |||
| }, | |||
| { | |||
| "cell_type": "code", | |||
| "execution_count": 8, | |||
| "metadata": { | |||
| "scrolled": true | |||
| }, | |||
| "outputs": [ | |||
| { | |||
| "name": "stdout", | |||
| "output_type": "stream", | |||
| "text": [ | |||
| "batch_x has: {'words': array([list(['this', 'kind', 'of', 'hands-on', 'storytelling', 'is', 'ultimately', 'what', 'makes', 'shanghai', 'ghetto', 'move', 'beyond', 'a', 'good', ',', 'dry', ',', 'reliable', 'textbook', 'and', 'what', 'allows', 'it', 'to', 'rank', 'with', 'its', 'worthy', 'predecessors', '.']),\n", | |||
| " list(['the', 'entire', 'movie', 'is', 'filled', 'with', 'deja', 'vu', 'moments', '.'])],\n", | |||
| " dtype=object), 'word_seq': tensor([[ 19, 184, 6, 1, 481, 9, 206, 50, 91, 1210, 1609, 1330,\n", | |||
| " 495, 5, 63, 4, 1269, 4, 1, 1184, 7, 50, 1050, 10,\n", | |||
| " 8, 1611, 16, 21, 1039, 1, 2],\n", | |||
| " [ 3, 711, 22, 9, 1282, 16, 2482, 2483, 200, 2, 0, 0,\n", | |||
| " 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,\n", | |||
| " 0, 0, 0, 0, 0, 0, 0]])}\n", | |||
| "batch_y has: {'label_seq': tensor([3, 2])}\n" | |||
| ] | |||
| } | |||
| ], | |||
| "source": [ | |||
| "# 假设你们需要做强化学习或者gan之类的项目,也许你们可以使用这里的dataset\n", | |||
| "from fastNLP.core.batch import Batch\n", | |||
| "from fastNLP.core.sampler import RandomSampler\n", | |||
| "\n", | |||
| "batch_iterator = Batch(dataset=train_data, batch_size=2, sampler=RandomSampler())\n", | |||
| "for batch_x, batch_y in batch_iterator:\n", | |||
| " print(\"batch_x has: \", batch_x)\n", | |||
| " print(\"batch_y has: \", batch_y)\n", | |||
| " break" | |||
| ] | |||
| }, | |||
| { | |||
| "cell_type": "markdown", | |||
| "metadata": {}, | |||
| "source": [ | |||
| "# Model\n" | |||
| ] | |||
| }, | |||
| { | |||
| "cell_type": "code", | |||
| "execution_count": 9, | |||
| "metadata": {}, | |||
| "outputs": [ | |||
| { | |||
| "data": { | |||
| "text/plain": [ | |||
| "CNNText(\n", | |||
| " (embed): Embedding(\n", | |||
| " (embed): Embedding(3470, 50, padding_idx=0)\n", | |||
| " (dropout): Dropout(p=0.0)\n", | |||
| " )\n", | |||
| " (conv_pool): ConvMaxpool(\n", | |||
| " (convs): ModuleList(\n", | |||
| " (0): Conv1d(50, 3, kernel_size=(3,), stride=(1,), padding=(2,))\n", | |||
| " (1): Conv1d(50, 4, kernel_size=(4,), stride=(1,), padding=(2,))\n", | |||
| " (2): Conv1d(50, 5, kernel_size=(5,), stride=(1,), padding=(2,))\n", | |||
| " )\n", | |||
| " )\n", | |||
| " (dropout): Dropout(p=0.1)\n", | |||
| " (fc): Linear(\n", | |||
| " (linear): Linear(in_features=12, out_features=5, bias=True)\n", | |||
| " )\n", | |||
| ")" | |||
| ] | |||
| }, | |||
| "execution_count": 9, | |||
| "metadata": {}, | |||
| "output_type": "execute_result" | |||
| } | |||
| ], | |||
| "source": [ | |||
| "# 定义一个简单的Pytorch模型\n", | |||
| "\n", | |||
| "from fastNLP.models import CNNText\n", | |||
| "model = CNNText(embed_num=len(vocab), embed_dim=50, num_classes=5, padding=2, dropout=0.1)\n", | |||
| "model" | |||
| ] | |||
| }, | |||
| { | |||
| "cell_type": "markdown", | |||
| "metadata": {}, | |||
| "source": [ | |||
| "Trainer & Tester\n", | |||
| "------\n", | |||
| "\n", | |||
| "使用fastNLP的Trainer训练模型" | |||
| ] | |||
| }, | |||
| { | |||
| "cell_type": "code", | |||
| "execution_count": 10, | |||
| "metadata": {}, | |||
| "outputs": [], | |||
| "source": [ | |||
| "from fastNLP import Trainer\n", | |||
| "from copy import deepcopy\n", | |||
| "from fastNLP.core.losses import CrossEntropyLoss\n", | |||
| "from fastNLP.core.metrics import AccuracyMetric" | |||
| ] | |||
| }, | |||
| { | |||
| "cell_type": "code", | |||
| "execution_count": 11, | |||
| "metadata": {}, | |||
| "outputs": [ | |||
| { | |||
| "name": "stdout", | |||
| "output_type": "stream", | |||
| "text": [ | |||
| "training epochs started 2018-12-05 15:37:15\n" | |||
| ] | |||
| }, | |||
| { | |||
| "data": { | |||
| "text/plain": [ | |||
| "HBox(children=(IntProgress(value=0, layout=Layout(flex='2'), max=1870), HTML(value='')), layout=Layout(display…" | |||
| ] | |||
| }, | |||
| "metadata": {}, | |||
| "output_type": "display_data" | |||
| }, | |||
| { | |||
| "name": "stdout", | |||
| "output_type": "stream", | |||
| "text": [ | |||
| "Epoch 1/10. Step:187/1870. AccuracyMetric: acc=0.351365\n", | |||
| "Epoch 2/10. Step:374/1870. AccuracyMetric: acc=0.470943\n", | |||
| "Epoch 3/10. Step:561/1870. AccuracyMetric: acc=0.600402\n", | |||
| "Epoch 4/10. Step:748/1870. AccuracyMetric: acc=0.702227\n", | |||
| "Epoch 5/10. Step:935/1870. AccuracyMetric: acc=0.79099\n", | |||
| "Epoch 6/10. Step:1122/1870. AccuracyMetric: acc=0.846424\n", | |||
| "Epoch 7/10. Step:1309/1870. AccuracyMetric: acc=0.874058\n", | |||
| "Epoch 8/10. Step:1496/1870. AccuracyMetric: acc=0.898844\n", | |||
| "Epoch 9/10. Step:1683/1870. AccuracyMetric: acc=0.910568\n", | |||
| "Epoch 10/10. Step:1870/1870. AccuracyMetric: acc=0.921286\n", | |||
| "\r" | |||
| ] | |||
| } | |||
| ], | |||
| "source": [ | |||
| "# 进行overfitting测试\n", | |||
| "copy_model = deepcopy(model)\n", | |||
| "overfit_trainer = Trainer(model=copy_model, \n", | |||
| " train_data=test_data, \n", | |||
| " dev_data=test_data,\n", | |||
| " losser=CrossEntropyLoss(pred=\"output\", target=\"label_seq\"),\n", | |||
| " metrics=AccuracyMetric(),\n", | |||
| " n_epochs=10,\n", | |||
| " save_path=None)\n", | |||
| "overfit_trainer.train()" | |||
| ] | |||
| }, | |||
| { | |||
| "cell_type": "code", | |||
| "execution_count": 12, | |||
| "metadata": {}, | |||
| "outputs": [ | |||
| { | |||
| "name": "stdout", | |||
| "output_type": "stream", | |||
| "text": [ | |||
| "training epochs started 2018-12-05 15:37:41\n" | |||
| ] | |||
| }, | |||
| { | |||
| "data": { | |||
| "text/plain": [ | |||
| "HBox(children=(IntProgress(value=0, layout=Layout(flex='2'), max=400), HTML(value='')), layout=Layout(display=…" | |||
| ] | |||
| }, | |||
| "metadata": {}, | |||
| "output_type": "display_data" | |||
| }, | |||
| { | |||
| "name": "stdout", | |||
| "output_type": "stream", | |||
| "text": [ | |||
| "\r" | |||
| ] | |||
| }, | |||
| { | |||
| "ename": "AttributeError", | |||
| "evalue": "'NoneType' object has no attribute 'squeeze'", | |||
| "traceback": [ | |||
| "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m", | |||
| "\u001b[0;31mAttributeError\u001b[0m Traceback (most recent call last)", | |||
| "\u001b[0;32m<ipython-input-12-5603b8b11a82>\u001b[0m in \u001b[0;36m<module>\u001b[0;34m()\u001b[0m\n\u001b[1;32m 7\u001b[0m \u001b[0mn_epochs\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0;36m5\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 8\u001b[0m save_path='save/')\n\u001b[0;32m----> 9\u001b[0;31m \u001b[0mtrainer\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mtrain\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 10\u001b[0m \u001b[0mprint\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m'Train finished!'\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", | |||
| "\u001b[0;32m~/Desktop/fastNLP/fastNLP/fastNLP/core/trainer.py\u001b[0m in \u001b[0;36mtrain\u001b[0;34m(self)\u001b[0m\n\u001b[1;32m 163\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_summary_writer\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mSummaryWriter\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mpath\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 164\u001b[0m \u001b[0;32mif\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0muse_tqdm\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 165\u001b[0;31m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_tqdm_train\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 166\u001b[0m \u001b[0;32melse\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 167\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_print_train\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", | |||
| "\u001b[0;32m~/Desktop/fastNLP/fastNLP/fastNLP/core/trainer.py\u001b[0m in \u001b[0;36m_tqdm_train\u001b[0;34m(self)\u001b[0m\n\u001b[1;32m 206\u001b[0m \u001b[0mpbar\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mwrite\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0meval_str\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 207\u001b[0m \u001b[0;32mif\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mvalidate_every\u001b[0m \u001b[0;34m<\u001b[0m \u001b[0;36m0\u001b[0m \u001b[0;32mand\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mdev_data\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 208\u001b[0;31m \u001b[0meval_res\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_do_validation\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 209\u001b[0m \u001b[0meval_str\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0;34m\"Epoch {}/{}. Step:{}/{}. \"\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mformat\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mepoch\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mn_epochs\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mstep\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mtotal_steps\u001b[0m\u001b[0;34m)\u001b[0m \u001b[0;34m+\u001b[0m\u001b[0;31m \u001b[0m\u001b[0;31m\\\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 210\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mtester\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_format_eval_results\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0meval_res\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", | |||
| "\u001b[0;32m~/Desktop/fastNLP/fastNLP/fastNLP/core/trainer.py\u001b[0m in \u001b[0;36m_do_validation\u001b[0;34m(self)\u001b[0m\n\u001b[1;32m 265\u001b[0m \u001b[0mres\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mtester\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mtest\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 266\u001b[0m \u001b[0;32mfor\u001b[0m \u001b[0mname\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mnum\u001b[0m \u001b[0;32min\u001b[0m \u001b[0mres\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mitems\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 267\u001b[0;31m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_summary_writer\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0madd_scalar\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m\"valid_{}\"\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mformat\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mname\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mnum\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mglobal_step\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mstep\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 268\u001b[0m \u001b[0;32mif\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0msave_path\u001b[0m \u001b[0;32mis\u001b[0m \u001b[0;32mnot\u001b[0m \u001b[0;32mNone\u001b[0m \u001b[0;32mand\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_better_eval_result\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mres\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 269\u001b[0m \u001b[0mmetric_key\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mmetric_key\u001b[0m \u001b[0;32mif\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mmetric_key\u001b[0m \u001b[0;32mis\u001b[0m \u001b[0;32mnot\u001b[0m \u001b[0;32mNone\u001b[0m \u001b[0;32melse\u001b[0m \u001b[0;34m\"None\"\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", | |||
| "\u001b[0;32m~/miniconda2/envs/python3/lib/python3.6/site-packages/tensorboardX/writer.py\u001b[0m in \u001b[0;36madd_scalar\u001b[0;34m(self, tag, scalar_value, global_step, walltime)\u001b[0m\n\u001b[1;32m 332\u001b[0m \u001b[0;32mif\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_check_caffe2\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mscalar_value\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 333\u001b[0m \u001b[0mscalar_value\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mworkspace\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mFetchBlob\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mscalar_value\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 334\u001b[0;31m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mfile_writer\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0madd_summary\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mscalar\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mtag\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mscalar_value\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mglobal_step\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mwalltime\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 335\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 336\u001b[0m \u001b[0;32mdef\u001b[0m \u001b[0madd_scalars\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mself\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mmain_tag\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mtag_scalar_dict\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mglobal_step\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0;32mNone\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mwalltime\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0;32mNone\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", | |||
| "\u001b[0;32m~/miniconda2/envs/python3/lib/python3.6/site-packages/tensorboardX/summary.py\u001b[0m in \u001b[0;36mscalar\u001b[0;34m(name, scalar, collections)\u001b[0m\n\u001b[1;32m 115\u001b[0m \u001b[0mname\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0m_clean_tag\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mname\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 116\u001b[0m \u001b[0mscalar\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mmake_np\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mscalar\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 117\u001b[0;31m \u001b[0;32massert\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mscalar\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0msqueeze\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mndim\u001b[0m \u001b[0;34m==\u001b[0m \u001b[0;36m0\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m'scalar should be 0D'\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 118\u001b[0m \u001b[0mscalar\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mfloat\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mscalar\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 119\u001b[0m \u001b[0;32mreturn\u001b[0m \u001b[0mSummary\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mvalue\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0mSummary\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mValue\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mtag\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mname\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0msimple_value\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mscalar\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", | |||
| "\u001b[0;31mAttributeError\u001b[0m: 'NoneType' object has no attribute 'squeeze'" | |||
| ], | |||
| "output_type": "error" | |||
| } | |||
| ], | |||
| "source": [ | |||
| "# 实例化Trainer,传入模型和数据,进行训练\n", | |||
| "trainer = Trainer(model=model, \n", | |||
| " train_data=train_data, \n", | |||
| " dev_data=test_data,\n", | |||
| " losser=CrossEntropyLoss(pred=\"output\", target=\"label_seq\"),\n", | |||
| " metrics=AccuracyMetric(),\n", | |||
| " n_epochs=5,\n", | |||
| " save_path='save/')\n", | |||
| "trainer.train()\n", | |||
| "print('Train finished!')" | |||
| ] | |||
| }, | |||
| { | |||
| "cell_type": "code", | |||
| "execution_count": null, | |||
| "metadata": {}, | |||
| "outputs": [], | |||
| "source": [ | |||
| "from fastNLP import Tester\n", | |||
| "\n", | |||
| "tester = Tester(data=test_data, model=model, metrics=AccuracyMetric())\n", | |||
| "acc = tester.test()" | |||
| ] | |||
| }, | |||
| { | |||
| "cell_type": "code", | |||
| "execution_count": null, | |||
| "metadata": {}, | |||
| "outputs": [], | |||
| "source": [] | |||
| }, | |||
| { | |||
| "cell_type": "markdown", | |||
| "metadata": {}, | |||
| "source": [ | |||
| "# In summary\n", | |||
| "\n", | |||
| "## fastNLP Trainer的伪代码逻辑\n", | |||
| "### 1. 准备DataSet,假设DataSet中共有如下的fields\n", | |||
| " ['raw_sentence', 'word_seq1', 'word_seq2', 'raw_label','label']\n", | |||
| " 通过\n", | |||
| " DataSet.set_input('word_seq1', word_seq2', flag=True)将'word_seq1', 'word_seq2'设置为input\n", | |||
| " 通过\n", | |||
| " DataSet.set_target('label', flag=True)将'label'设置为target\n", | |||
| "### 2. 初始化模型\n", | |||
| " class Model(nn.Module):\n", | |||
| " def __init__(self):\n", | |||
| " xxx\n", | |||
| " def forward(self, word_seq1, word_seq2):\n", | |||
| " # (1) 这里使用的形参名必须和DataSet中的input field的名称对应。因为我们是通过形参名, 进行赋值的\n", | |||
| " # (2) input field的数量可以多于这里的形参数量。但是不能少于。\n", | |||
| " xxxx\n", | |||
| " # 输出必须是一个dict\n", | |||
| "### 3. Trainer的训练过程\n", | |||
| " (1) 从DataSet中按照batch_size取出一个batch,调用Model.forward\n", | |||
| " (2) 将 Model.forward的结果 与 标记为target的field 传入Losser当中。\n", | |||
| " 由于每个人写的Model.forward的output的dict可能key并不一样,比如有人是{'pred':xxx}, {'output': xxx}; \n", | |||
| " 另外每个人将target可能也会设置为不同的名称, 比如有人是label, 有人设置为target;\n", | |||
| " 为了解决以上的问题,我们的loss提供映射机制\n", | |||
| " 比如CrossEntropyLosser的需要的输入是(prediction, target)。但是forward的output是{'output': xxx}; 'label'是target\n", | |||
| " 那么初始化losser的时候写为CrossEntropyLosser(prediction='output', target='label')即可\n", | |||
| " (3) 对于Metric是同理的\n", | |||
| " Metric计算也是从 forward的结果中取值 与 设置target的field中取值。 也是可以通过映射找到对应的值 \n", | |||
| " \n", | |||
| " \n", | |||
| "\n", | |||
| "## 一些问题.\n", | |||
| "### 1. DataSet中为什么需要设置input和target\n", | |||
| " 只有被设置为input或者target的数据才会在train的过程中被取出来\n", | |||
| " (1.1) 我们只会在设置为input的field中寻找传递给Model.forward的参数。\n", | |||
| " (1.2) 我们在传递值给losser或者metric的时候会使用来自: \n", | |||
| " (a)Model.forward的output\n", | |||
| " (b)被设置为target的field\n", | |||
| " \n", | |||
| "\n", | |||
| "### 2. 我们是通过forwad中的形参名将DataSet中的field赋值给对应的参数\n", | |||
| " (1.1) 构建模型过程中,\n", | |||
| " 例如:\n", | |||
| " DataSet中x,seq_lens是input,那么forward就应该是\n", | |||
| " def forward(self, x, seq_lens):\n", | |||
| " pass\n", | |||
| " 我们是通过形参名称进行匹配的field的\n", | |||
| " \n", | |||
| "\n", | |||
| "\n", | |||
| "### 1. 加载数据到DataSet\n", | |||
| "### 2. 使用apply操作对DataSet进行预处理\n", | |||
| " (2.1) 处理过程中将某些field设置为input,某些field设置为target\n", | |||
| "### 3. 构建模型\n", | |||
| " (3.1) 构建模型过程中,需要注意forward函数的形参名需要和DataSet中设置为input的field名称是一致的。\n", | |||
| " 例如:\n", | |||
| " DataSet中x,seq_lens是input,那么forward就应该是\n", | |||
| " def forward(self, x, seq_lens):\n", | |||
| " pass\n", | |||
| " 我们是通过形参名称进行匹配的field的\n", | |||
| " (3.2) 模型的forward的output需要是dict类型的。\n", | |||
| " 建议将输出设置为{\"pred\": xx}.\n", | |||
| " \n" | |||
| ] | |||
| }, | |||
| { | |||
| "cell_type": "code", | |||
| "execution_count": null, | |||
| "metadata": {}, | |||
| "outputs": [], | |||
| "source": [] | |||
| }, | |||
| { | |||
| "cell_type": "code", | |||
| "execution_count": null, | |||
| "metadata": {}, | |||
| "outputs": [], | |||
| "source": [] | |||
| } | |||
| ], | |||
| "metadata": { | |||
| "kernelspec": { | |||
| "display_name": "Python 3", | |||
| "language": "python", | |||
| "name": "python3" | |||
| }, | |||
| "language_info": { | |||
| "codemirror_mode": { | |||
| "name": "ipython", | |||
| "version": 3 | |||
| }, | |||
| "file_extension": ".py", | |||
| "mimetype": "text/x-python", | |||
| "name": "python", | |||
| "nbconvert_exporter": "python", | |||
| "pygments_lexer": "ipython3", | |||
| "version": "3.6.7" | |||
| } | |||
| }, | |||
| "nbformat": 4, | |||
| "nbformat_minor": 2 | |||
| } | |||
| @@ -1,447 +0,0 @@ | |||
| { | |||
| "cells": [ | |||
| { | |||
| "cell_type": "markdown", | |||
| "metadata": {}, | |||
| "source": [ | |||
| "fastNLP上手教程\n", | |||
| "-------\n", | |||
| "\n", | |||
| "fastNLP提供方便的数据预处理,训练和测试模型的功能" | |||
| ] | |||
| }, | |||
| { | |||
| "cell_type": "code", | |||
| "execution_count": null, | |||
| "metadata": {}, | |||
| "outputs": [], | |||
| "source": [ | |||
| "import sys\n", | |||
| "sys.path.append('/Users/yh/Desktop/fastNLP/fastNLP/')" | |||
| ] | |||
| }, | |||
| { | |||
| "cell_type": "markdown", | |||
| "metadata": {}, | |||
| "source": [ | |||
| "DataSet & Instance\n", | |||
| "------\n", | |||
| "\n", | |||
| "fastNLP用DataSet和Instance保存和处理数据。每个DataSet表示一个数据集,每个Instance表示一个数据样本。一个DataSet存有多个Instance,每个Instance可以自定义存哪些内容。\n", | |||
| "\n", | |||
| "有一些read_*方法,可以轻松从文件读取数据,存成DataSet。" | |||
| ] | |||
| }, | |||
| { | |||
| "cell_type": "code", | |||
| "execution_count": null, | |||
| "metadata": {}, | |||
| "outputs": [], | |||
| "source": [ | |||
| "from fastNLP import DataSet\n", | |||
| "from fastNLP import Instance\n", | |||
| "\n", | |||
| "# 从csv读取数据到DataSet\n", | |||
| "dataset = DataSet.read_csv('../sentence.csv', headers=('raw_sentence', 'label'), sep='\\t')\n", | |||
| "print(len(dataset))" | |||
| ] | |||
| }, | |||
| { | |||
| "cell_type": "code", | |||
| "execution_count": null, | |||
| "metadata": {}, | |||
| "outputs": [], | |||
| "source": [ | |||
| "# 使用数字索引[k],获取第k个样本\n", | |||
| "print(dataset[0])\n", | |||
| "\n", | |||
| "# 索引也可以是负数\n", | |||
| "print(dataset[-3])" | |||
| ] | |||
| }, | |||
| { | |||
| "cell_type": "markdown", | |||
| "metadata": {}, | |||
| "source": [ | |||
| "## Instance\n", | |||
| "Instance表示一个样本,由一个或多个field(域,属性,特征)组成,每个field有名字和值。\n", | |||
| "\n", | |||
| "在初始化Instance时即可定义它包含的域,使用 \"field_name=field_value\"的写法。" | |||
| ] | |||
| }, | |||
| { | |||
| "cell_type": "code", | |||
| "execution_count": null, | |||
| "metadata": {}, | |||
| "outputs": [], | |||
| "source": [ | |||
| "# DataSet.append(Instance)加入新数据\n", | |||
| "dataset.append(Instance(raw_sentence='fake data', label='0'))\n", | |||
| "dataset[-1]" | |||
| ] | |||
| }, | |||
| { | |||
| "cell_type": "markdown", | |||
| "metadata": {}, | |||
| "source": [ | |||
| "## DataSet.apply方法\n", | |||
| "数据预处理利器" | |||
| ] | |||
| }, | |||
| { | |||
| "cell_type": "code", | |||
| "execution_count": null, | |||
| "metadata": {}, | |||
| "outputs": [], | |||
| "source": [ | |||
| "# 将所有数字转为小写\n", | |||
| "dataset.apply(lambda x: x['raw_sentence'].lower(), new_field_name='raw_sentence')\n", | |||
| "print(dataset[0])" | |||
| ] | |||
| }, | |||
| { | |||
| "cell_type": "code", | |||
| "execution_count": null, | |||
| "metadata": {}, | |||
| "outputs": [], | |||
| "source": [ | |||
| "# label转int\n", | |||
| "dataset.apply(lambda x: int(x['label']), new_field_name='label')\n", | |||
| "print(dataset[0])" | |||
| ] | |||
| }, | |||
| { | |||
| "cell_type": "code", | |||
| "execution_count": null, | |||
| "metadata": {}, | |||
| "outputs": [], | |||
| "source": [ | |||
| "# 使用空格分割句子\n", | |||
| "def split_sent(ins):\n", | |||
| " return ins['raw_sentence'].split()\n", | |||
| "dataset.apply(split_sent, new_field_name='words')\n", | |||
| "print(dataset[0])" | |||
| ] | |||
| }, | |||
| { | |||
| "cell_type": "code", | |||
| "execution_count": null, | |||
| "metadata": {}, | |||
| "outputs": [], | |||
| "source": [ | |||
| "# 增加长度信息\n", | |||
| "dataset.apply(lambda x: len(x['words']), new_field_name='seq_len')\n", | |||
| "print(dataset[0])" | |||
| ] | |||
| }, | |||
| { | |||
| "cell_type": "markdown", | |||
| "metadata": {}, | |||
| "source": [ | |||
| "## DataSet.drop\n", | |||
| "筛选数据" | |||
| ] | |||
| }, | |||
| { | |||
| "cell_type": "code", | |||
| "execution_count": null, | |||
| "metadata": {}, | |||
| "outputs": [], | |||
| "source": [ | |||
| "dataset.drop(lambda x: x['seq_len'] <= 3)\n", | |||
| "print(len(dataset))" | |||
| ] | |||
| }, | |||
| { | |||
| "cell_type": "markdown", | |||
| "metadata": {}, | |||
| "source": [ | |||
| "## 配置DataSet\n", | |||
| "1. 哪些域是特征,哪些域是标签\n", | |||
| "2. 切分训练集/验证集" | |||
| ] | |||
| }, | |||
| { | |||
| "cell_type": "code", | |||
| "execution_count": null, | |||
| "metadata": {}, | |||
| "outputs": [], | |||
| "source": [ | |||
| "# 设置DataSet中,哪些field要转为tensor\n", | |||
| "\n", | |||
| "# set target,loss或evaluate中的golden,计算loss,模型评估时使用\n", | |||
| "dataset.set_target(\"label\")\n", | |||
| "# set input,模型forward时使用\n", | |||
| "dataset.set_input(\"words\")" | |||
| ] | |||
| }, | |||
| { | |||
| "cell_type": "code", | |||
| "execution_count": null, | |||
| "metadata": {}, | |||
| "outputs": [], | |||
| "source": [ | |||
| "# 分出测试集、训练集\n", | |||
| "\n", | |||
| "test_data, train_data = dataset.split(0.3)\n", | |||
| "print(len(test_data))\n", | |||
| "print(len(train_data))" | |||
| ] | |||
| }, | |||
| { | |||
| "cell_type": "markdown", | |||
| "metadata": {}, | |||
| "source": [ | |||
| "Vocabulary\n", | |||
| "------\n", | |||
| "\n", | |||
| "fastNLP中的Vocabulary轻松构建词表,将词转成数字" | |||
| ] | |||
| }, | |||
| { | |||
| "cell_type": "code", | |||
| "execution_count": null, | |||
| "metadata": {}, | |||
| "outputs": [], | |||
| "source": [ | |||
| "from fastNLP import Vocabulary\n", | |||
| "\n", | |||
| "# 构建词表, Vocabulary.add(word)\n", | |||
| "vocab = Vocabulary(min_freq=2)\n", | |||
| "train_data.apply(lambda x: [vocab.add(word) for word in x['words']])\n", | |||
| "vocab.build_vocab()\n", | |||
| "\n", | |||
| "# index句子, Vocabulary.to_index(word)\n", | |||
| "train_data.apply(lambda x: [vocab.to_index(word) for word in x['words']], new_field_name='words')\n", | |||
| "test_data.apply(lambda x: [vocab.to_index(word) for word in x['words']], new_field_name='words')\n", | |||
| "\n", | |||
| "\n", | |||
| "print(test_data[0])" | |||
| ] | |||
| }, | |||
| { | |||
| "cell_type": "markdown", | |||
| "metadata": {}, | |||
| "source": [ | |||
| "# Model\n", | |||
| "定义一个PyTorch模型" | |||
| ] | |||
| }, | |||
| { | |||
| "cell_type": "code", | |||
| "execution_count": null, | |||
| "metadata": {}, | |||
| "outputs": [], | |||
| "source": [ | |||
| "from fastNLP.models import CNNText\n", | |||
| "model = CNNText(embed_num=len(vocab), embed_dim=50, num_classes=5, padding=2, dropout=0.1)\n", | |||
| "model" | |||
| ] | |||
| }, | |||
| { | |||
| "cell_type": "markdown", | |||
| "metadata": {}, | |||
| "source": [ | |||
| "这是上述模型的forward方法。如果你不知道什么是forward方法,请参考我们的PyTorch教程。\n", | |||
| "\n", | |||
| "注意两点:\n", | |||
| "1. forward参数名字叫**word_seq**,请记住。\n", | |||
| "2. forward的返回值是一个**dict**,其中有个key的名字叫**output**。\n", | |||
| "\n", | |||
| "```Python\n", | |||
| " def forward(self, word_seq):\n", | |||
| " \"\"\"\n", | |||
| "\n", | |||
| " :param word_seq: torch.LongTensor, [batch_size, seq_len]\n", | |||
| " :return output: dict of torch.LongTensor, [batch_size, num_classes]\n", | |||
| " \"\"\"\n", | |||
| " x = self.embed(word_seq) # [N,L] -> [N,L,C]\n", | |||
| " x = self.conv_pool(x) # [N,L,C] -> [N,C]\n", | |||
| " x = self.dropout(x)\n", | |||
| " x = self.fc(x) # [N,C] -> [N, N_class]\n", | |||
| " return {'output': x}\n", | |||
| "```" | |||
| ] | |||
| }, | |||
| { | |||
| "cell_type": "markdown", | |||
| "metadata": {}, | |||
| "source": [ | |||
| "这是上述模型的predict方法,是用来直接输出该任务的预测结果,与forward目的不同。\n", | |||
| "\n", | |||
| "注意两点:\n", | |||
| "1. predict参数名也叫**word_seq**。\n", | |||
| "2. predict的返回值是也一个**dict**,其中有个key的名字叫**predict**。\n", | |||
| "\n", | |||
| "```\n", | |||
| " def predict(self, word_seq):\n", | |||
| " \"\"\"\n", | |||
| "\n", | |||
| " :param word_seq: torch.LongTensor, [batch_size, seq_len]\n", | |||
| " :return predict: dict of torch.LongTensor, [batch_size, seq_len]\n", | |||
| " \"\"\"\n", | |||
| " output = self(word_seq)\n", | |||
| " _, predict = output['output'].max(dim=1)\n", | |||
| " return {'predict': predict}\n", | |||
| "```" | |||
| ] | |||
| }, | |||
| { | |||
| "cell_type": "markdown", | |||
| "metadata": {}, | |||
| "source": [ | |||
| "Trainer & Tester\n", | |||
| "------\n", | |||
| "\n", | |||
| "使用fastNLP的Trainer训练模型" | |||
| ] | |||
| }, | |||
| { | |||
| "cell_type": "code", | |||
| "execution_count": null, | |||
| "metadata": {}, | |||
| "outputs": [], | |||
| "source": [ | |||
| "from fastNLP import Trainer\n", | |||
| "from copy import deepcopy\n", | |||
| "from fastNLP.core.losses import CrossEntropyLoss\n", | |||
| "from fastNLP.core.metrics import AccuracyMetric\n", | |||
| "\n", | |||
| "\n", | |||
| "# 更改DataSet中对应field的名称,与模型的forward的参数名一致\n", | |||
| "# 因为forward的参数叫word_seq, 所以要把原本叫words的field改名为word_seq\n", | |||
| "# 这里的演示是让你了解这种**命名规则**\n", | |||
| "train_data.rename_field('words', 'word_seq')\n", | |||
| "test_data.rename_field('words', 'word_seq')\n", | |||
| "\n", | |||
| "# 顺便把label换名为label_seq\n", | |||
| "train_data.rename_field('label', 'label_seq')\n", | |||
| "test_data.rename_field('label', 'label_seq')" | |||
| ] | |||
| }, | |||
| { | |||
| "cell_type": "markdown", | |||
| "metadata": {}, | |||
| "source": [ | |||
| "### loss\n", | |||
| "训练模型需要提供一个损失函数\n", | |||
| "\n", | |||
| "下面提供了一个在分类问题中常用的交叉熵损失。注意它的**初始化参数**。\n", | |||
| "\n", | |||
| "pred参数对应的是模型的forward返回的dict的一个key的名字,这里是\"output\"。\n", | |||
| "\n", | |||
| "target参数对应的是dataset作为标签的field的名字,这里是\"label_seq\"。" | |||
| ] | |||
| }, | |||
| { | |||
| "cell_type": "code", | |||
| "execution_count": null, | |||
| "metadata": {}, | |||
| "outputs": [], | |||
| "source": [ | |||
| "loss = CrossEntropyLoss(pred=\"output\", target=\"label_seq\")" | |||
| ] | |||
| }, | |||
| { | |||
| "cell_type": "markdown", | |||
| "metadata": {}, | |||
| "source": [ | |||
| "### Metric\n", | |||
| "定义评价指标\n", | |||
| "\n", | |||
| "这里使用准确率。参数的“命名规则”跟上面类似。\n", | |||
| "\n", | |||
| "pred参数对应的是模型的predict方法返回的dict的一个key的名字,这里是\"predict\"。\n", | |||
| "\n", | |||
| "target参数对应的是dataset作为标签的field的名字,这里是\"label_seq\"。" | |||
| ] | |||
| }, | |||
| { | |||
| "cell_type": "code", | |||
| "execution_count": null, | |||
| "metadata": {}, | |||
| "outputs": [], | |||
| "source": [ | |||
| "metric = AccuracyMetric(pred=\"predict\", target=\"label_seq\")" | |||
| ] | |||
| }, | |||
| { | |||
| "cell_type": "code", | |||
| "execution_count": null, | |||
| "metadata": {}, | |||
| "outputs": [], | |||
| "source": [ | |||
| "# 实例化Trainer,传入模型和数据,进行训练\n", | |||
| "# 先在test_data拟合\n", | |||
| "copy_model = deepcopy(model)\n", | |||
| "overfit_trainer = Trainer(model=copy_model, train_data=test_data, dev_data=test_data,\n", | |||
| " losser=loss,\n", | |||
| " metrics=metric,\n", | |||
| " save_path=None,\n", | |||
| " batch_size=32,\n", | |||
| " n_epochs=5)\n", | |||
| "overfit_trainer.train()" | |||
| ] | |||
| }, | |||
| { | |||
| "cell_type": "code", | |||
| "execution_count": null, | |||
| "metadata": {}, | |||
| "outputs": [], | |||
| "source": [ | |||
| "# 用train_data训练,在test_data验证\n", | |||
| "trainer = Trainer(model=model, train_data=train_data, dev_data=test_data,\n", | |||
| " losser=CrossEntropyLoss(pred=\"output\", target=\"label_seq\"),\n", | |||
| " metrics=AccuracyMetric(pred=\"predict\", target=\"label_seq\"),\n", | |||
| " save_path=None,\n", | |||
| " batch_size=32,\n", | |||
| " n_epochs=5)\n", | |||
| "trainer.train()\n", | |||
| "print('Train finished!')" | |||
| ] | |||
| }, | |||
| { | |||
| "cell_type": "code", | |||
| "execution_count": null, | |||
| "metadata": {}, | |||
| "outputs": [], | |||
| "source": [ | |||
| "# 调用Tester在test_data上评价效果\n", | |||
| "from fastNLP import Tester\n", | |||
| "\n", | |||
| "tester = Tester(data=test_data, model=model, metrics=AccuracyMetric(pred=\"predict\", target=\"label_seq\"),\n", | |||
| " batch_size=4)\n", | |||
| "acc = tester.test()\n", | |||
| "print(acc)" | |||
| ] | |||
| }, | |||
| { | |||
| "cell_type": "code", | |||
| "execution_count": null, | |||
| "metadata": {}, | |||
| "outputs": [], | |||
| "source": [] | |||
| } | |||
| ], | |||
| "metadata": { | |||
| "kernelspec": { | |||
| "display_name": "Python 3", | |||
| "language": "python", | |||
| "name": "python3" | |||
| }, | |||
| "language_info": { | |||
| "codemirror_mode": { | |||
| "name": "ipython", | |||
| "version": 3 | |||
| }, | |||
| "file_extension": ".py", | |||
| "mimetype": "text/x-python", | |||
| "name": "python", | |||
| "nbconvert_exporter": "python", | |||
| "pygments_lexer": "ipython3", | |||
| "version": "3.6.7" | |||
| } | |||
| }, | |||
| "nbformat": 4, | |||
| "nbformat_minor": 2 | |||
| } | |||