You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

tutorial_5_loss_optimizer.ipynb 16 kB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603
  1. {
  2. "cells": [
  3. {
  4. "cell_type": "markdown",
  5. "metadata": {},
  6. "source": [
  7. "# 使用Trainer和Tester快速训练和测试"
  8. ]
  9. },
  10. {
  11. "cell_type": "markdown",
  12. "metadata": {},
  13. "source": [
  14. "## 数据读入和处理"
  15. ]
  16. },
  17. {
  18. "cell_type": "code",
  19. "execution_count": 1,
  20. "metadata": {},
  21. "outputs": [
  22. {
  23. "name": "stderr",
  24. "output_type": "stream",
  25. "text": [
  26. "/remote-home/ynzheng/anaconda3/envs/now/lib/python3.8/site-packages/FastNLP-0.5.0-py3.8.egg/fastNLP/io/loader/classification.py:340: UserWarning: SST2's test file has no target.\n"
  27. ]
  28. },
  29. {
  30. "name": "stdout",
  31. "output_type": "stream",
  32. "text": [
  33. "In total 3 datasets:\n",
  34. "\ttest has 1821 instances.\n",
  35. "\ttrain has 67349 instances.\n",
  36. "\tdev has 872 instances.\n",
  37. "In total 2 vocabs:\n",
  38. "\twords has 16292 entries.\n",
  39. "\ttarget has 2 entries.\n",
  40. "\n",
  41. "+-----------------------------------+--------+-----------------------------------+---------+\n",
  42. "| raw_words | target | words | seq_len |\n",
  43. "+-----------------------------------+--------+-----------------------------------+---------+\n",
  44. "| hide new secretions from the p... | 1 | [4110, 97, 12009, 39, 2, 6843,... | 7 |\n",
  45. "+-----------------------------------+--------+-----------------------------------+---------+\n",
  46. "Vocabulary(['hide', 'new', 'secretions', 'from', 'the']...)\n"
  47. ]
  48. }
  49. ],
  50. "source": [
  51. "from fastNLP.io import SST2Pipe\n",
  52. "\n",
  53. "pipe = SST2Pipe()\n",
  54. "databundle = pipe.process_from_file()\n",
  55. "vocab = databundle.get_vocab('words')\n",
  56. "print(databundle)\n",
  57. "print(databundle.get_dataset('train')[0])\n",
  58. "print(databundle.get_vocab('words'))"
  59. ]
  60. },
  61. {
  62. "cell_type": "code",
  63. "execution_count": 2,
  64. "metadata": {},
  65. "outputs": [
  66. {
  67. "name": "stdout",
  68. "output_type": "stream",
  69. "text": [
  70. "4925 872 75\n"
  71. ]
  72. }
  73. ],
  74. "source": [
  75. "train_data = databundle.get_dataset('train')[:5000]\n",
  76. "train_data, test_data = train_data.split(0.015)\n",
  77. "dev_data = databundle.get_dataset('dev')\n",
  78. "print(len(train_data),len(dev_data),len(test_data))"
  79. ]
  80. },
  81. {
  82. "cell_type": "code",
  83. "execution_count": 3,
  84. "metadata": {
  85. "scrolled": false
  86. },
  87. "outputs": [
  88. {
  89. "name": "stdout",
  90. "output_type": "stream",
  91. "text": [
  92. "+-------------+-----------+--------+-------+---------+\n",
  93. "| field_names | raw_words | target | words | seq_len |\n",
  94. "+-------------+-----------+--------+-------+---------+\n",
  95. "| is_input | False | False | True | True |\n",
  96. "| is_target | False | True | False | False |\n",
  97. "| ignore_type | | False | False | False |\n",
  98. "| pad_value | | 0 | 0 | 0 |\n",
  99. "+-------------+-----------+--------+-------+---------+\n"
  100. ]
  101. },
  102. {
  103. "data": {
  104. "text/plain": [
  105. "<prettytable.PrettyTable at 0x7f49ec540160>"
  106. ]
  107. },
  108. "execution_count": 3,
  109. "metadata": {},
  110. "output_type": "execute_result"
  111. }
  112. ],
  113. "source": [
  114. "train_data.print_field_meta()"
  115. ]
  116. },
  117. {
  118. "cell_type": "markdown",
  119. "metadata": {},
  120. "source": [
  121. "## 使用内置模型训练"
  122. ]
  123. },
  124. {
  125. "cell_type": "code",
  126. "execution_count": 4,
  127. "metadata": {},
  128. "outputs": [],
  129. "source": [
  130. "from fastNLP.models import CNNText\n",
  131. "\n",
  132. "#词嵌入的维度\n",
  133. "EMBED_DIM = 100\n",
  134. "\n",
  135. "#使用CNNText的时候第一个参数输入一个tuple,作为模型定义embedding的参数\n",
  136. "#还可以传入 kernel_nums, kernel_sizes, padding, dropout的自定义值\n",
  137. "model_cnn = CNNText((len(vocab),EMBED_DIM), num_classes=2, dropout=0.1)"
  138. ]
  139. },
  140. {
  141. "cell_type": "code",
  142. "execution_count": 5,
  143. "metadata": {},
  144. "outputs": [],
  145. "source": [
  146. "from fastNLP import AccuracyMetric\n",
  147. "from fastNLP import Const\n",
  148. "\n",
  149. "# metrics=AccuracyMetric() 在本例中与下面这行代码等价\n",
  150. "metrics=AccuracyMetric(pred=Const.OUTPUT, target=Const.TARGET)"
  151. ]
  152. },
  153. {
  154. "cell_type": "code",
  155. "execution_count": 6,
  156. "metadata": {},
  157. "outputs": [],
  158. "source": [
  159. "from fastNLP import CrossEntropyLoss\n",
  160. "\n",
  161. "# loss = CrossEntropyLoss() 在本例中与下面这行代码等价\n",
  162. "loss = CrossEntropyLoss(pred=Const.OUTPUT, target=Const.TARGET)"
  163. ]
  164. },
  165. {
  166. "cell_type": "code",
  167. "execution_count": 7,
  168. "metadata": {},
  169. "outputs": [],
  170. "source": [
  171. "# 这表示构建了一个损失函数类,由func计算损失函数,其中将从模型返回值或者DataSet的target=True的field\n",
  172. "# 当中找到一个参数名为`pred`的参数传入func一个参数名为`input`的参数;找到一个参数名为`label`的参数\n",
  173. "# 传入func作为一个名为`target`的参数\n",
  174. "#下面自己构建了一个交叉熵函数,和之后直接使用fastNLP中的交叉熵函数是一个效果\n",
  175. "import torch\n",
  176. "from fastNLP import LossFunc\n",
  177. "func = torch.nn.functional.cross_entropy\n",
  178. "loss_func = LossFunc(func, input=Const.OUTPUT, target=Const.TARGET)"
  179. ]
  180. },
  181. {
  182. "cell_type": "code",
  183. "execution_count": 8,
  184. "metadata": {},
  185. "outputs": [],
  186. "source": [
  187. "import torch.optim as optim\n",
  188. "\n",
  189. "#使用 torch.optim 定义优化器\n",
  190. "optimizer=optim.RMSprop(model_cnn.parameters(), lr=0.01, alpha=0.99, eps=1e-08, weight_decay=0, momentum=0, centered=False)"
  191. ]
  192. },
  193. {
  194. "cell_type": "code",
  195. "execution_count": 9,
  196. "metadata": {},
  197. "outputs": [
  198. {
  199. "name": "stdout",
  200. "output_type": "stream",
  201. "text": [
  202. "input fields after batch(if batch size is 2):\n",
  203. "\twords: (1)type:torch.Tensor (2)dtype:torch.int64, (3)shape:torch.Size([2, 4]) \n",
  204. "\tseq_len: (1)type:torch.Tensor (2)dtype:torch.int64, (3)shape:torch.Size([2]) \n",
  205. "target fields after batch(if batch size is 2):\n",
  206. "\ttarget: (1)type:torch.Tensor (2)dtype:torch.int64, (3)shape:torch.Size([2]) \n",
  207. "\n",
  208. "training epochs started 2020-02-27-11-31-25\n"
  209. ]
  210. },
  211. {
  212. "data": {
  213. "application/vnd.jupyter.widget-view+json": {
  214. "model_id": "",
  215. "version_major": 2,
  216. "version_minor": 0
  217. },
  218. "text/plain": [
  219. "HBox(children=(FloatProgress(value=0.0, layout=Layout(flex='2'), max=3080.0), HTML(value='')), layout=Layout(d…"
  220. ]
  221. },
  222. "metadata": {},
  223. "output_type": "display_data"
  224. },
  225. {
  226. "data": {
  227. "application/vnd.jupyter.widget-view+json": {
  228. "model_id": "",
  229. "version_major": 2,
  230. "version_minor": 0
  231. },
  232. "text/plain": [
  233. "HBox(children=(FloatProgress(value=0.0, layout=Layout(flex='2'), max=55.0), HTML(value='')), layout=Layout(dis…"
  234. ]
  235. },
  236. "metadata": {},
  237. "output_type": "display_data"
  238. },
  239. {
  240. "name": "stdout",
  241. "output_type": "stream",
  242. "text": [
  243. "\r",
  244. "Evaluate data in 0.75 seconds!\n",
  245. "\r",
  246. "Evaluation on dev at Epoch 1/10. Step:308/3080: \n",
  247. "\r",
  248. "AccuracyMetric: acc=0.751147\n",
  249. "\n"
  250. ]
  251. },
  252. {
  253. "data": {
  254. "application/vnd.jupyter.widget-view+json": {
  255. "model_id": "",
  256. "version_major": 2,
  257. "version_minor": 0
  258. },
  259. "text/plain": [
  260. "HBox(children=(FloatProgress(value=0.0, layout=Layout(flex='2'), max=55.0), HTML(value='')), layout=Layout(dis…"
  261. ]
  262. },
  263. "metadata": {},
  264. "output_type": "display_data"
  265. },
  266. {
  267. "name": "stdout",
  268. "output_type": "stream",
  269. "text": [
  270. "\r",
  271. "Evaluate data in 0.83 seconds!\n",
  272. "\r",
  273. "Evaluation on dev at Epoch 2/10. Step:616/3080: \n",
  274. "\r",
  275. "AccuracyMetric: acc=0.755734\n",
  276. "\n"
  277. ]
  278. },
  279. {
  280. "data": {
  281. "application/vnd.jupyter.widget-view+json": {
  282. "model_id": "",
  283. "version_major": 2,
  284. "version_minor": 0
  285. },
  286. "text/plain": [
  287. "HBox(children=(FloatProgress(value=0.0, layout=Layout(flex='2'), max=55.0), HTML(value='')), layout=Layout(dis…"
  288. ]
  289. },
  290. "metadata": {},
  291. "output_type": "display_data"
  292. },
  293. {
  294. "name": "stdout",
  295. "output_type": "stream",
  296. "text": [
  297. "\r",
  298. "Evaluate data in 1.32 seconds!\n",
  299. "\r",
  300. "Evaluation on dev at Epoch 3/10. Step:924/3080: \n",
  301. "\r",
  302. "AccuracyMetric: acc=0.758028\n",
  303. "\n"
  304. ]
  305. },
  306. {
  307. "data": {
  308. "application/vnd.jupyter.widget-view+json": {
  309. "model_id": "",
  310. "version_major": 2,
  311. "version_minor": 0
  312. },
  313. "text/plain": [
  314. "HBox(children=(FloatProgress(value=0.0, layout=Layout(flex='2'), max=55.0), HTML(value='')), layout=Layout(dis…"
  315. ]
  316. },
  317. "metadata": {},
  318. "output_type": "display_data"
  319. },
  320. {
  321. "name": "stdout",
  322. "output_type": "stream",
  323. "text": [
  324. "\r",
  325. "Evaluate data in 0.88 seconds!\n",
  326. "\r",
  327. "Evaluation on dev at Epoch 4/10. Step:1232/3080: \n",
  328. "\r",
  329. "AccuracyMetric: acc=0.741972\n",
  330. "\n"
  331. ]
  332. },
  333. {
  334. "data": {
  335. "application/vnd.jupyter.widget-view+json": {
  336. "model_id": "",
  337. "version_major": 2,
  338. "version_minor": 0
  339. },
  340. "text/plain": [
  341. "HBox(children=(FloatProgress(value=0.0, layout=Layout(flex='2'), max=55.0), HTML(value='')), layout=Layout(dis…"
  342. ]
  343. },
  344. "metadata": {},
  345. "output_type": "display_data"
  346. },
  347. {
  348. "name": "stdout",
  349. "output_type": "stream",
  350. "text": [
  351. "\r",
  352. "Evaluate data in 0.96 seconds!\n",
  353. "\r",
  354. "Evaluation on dev at Epoch 5/10. Step:1540/3080: \n",
  355. "\r",
  356. "AccuracyMetric: acc=0.728211\n",
  357. "\n"
  358. ]
  359. },
  360. {
  361. "data": {
  362. "application/vnd.jupyter.widget-view+json": {
  363. "model_id": "",
  364. "version_major": 2,
  365. "version_minor": 0
  366. },
  367. "text/plain": [
  368. "HBox(children=(FloatProgress(value=0.0, layout=Layout(flex='2'), max=55.0), HTML(value='')), layout=Layout(dis…"
  369. ]
  370. },
  371. "metadata": {},
  372. "output_type": "display_data"
  373. },
  374. {
  375. "name": "stdout",
  376. "output_type": "stream",
  377. "text": [
  378. "\r",
  379. "Evaluate data in 0.87 seconds!\n",
  380. "\r",
  381. "Evaluation on dev at Epoch 6/10. Step:1848/3080: \n",
  382. "\r",
  383. "AccuracyMetric: acc=0.755734\n",
  384. "\n"
  385. ]
  386. },
  387. {
  388. "data": {
  389. "application/vnd.jupyter.widget-view+json": {
  390. "model_id": "",
  391. "version_major": 2,
  392. "version_minor": 0
  393. },
  394. "text/plain": [
  395. "HBox(children=(FloatProgress(value=0.0, layout=Layout(flex='2'), max=55.0), HTML(value='')), layout=Layout(dis…"
  396. ]
  397. },
  398. "metadata": {},
  399. "output_type": "display_data"
  400. },
  401. {
  402. "name": "stdout",
  403. "output_type": "stream",
  404. "text": [
  405. "\r",
  406. "Evaluate data in 1.04 seconds!\n",
  407. "\r",
  408. "Evaluation on dev at Epoch 7/10. Step:2156/3080: \n",
  409. "\r",
  410. "AccuracyMetric: acc=0.732798\n",
  411. "\n"
  412. ]
  413. },
  414. {
  415. "data": {
  416. "application/vnd.jupyter.widget-view+json": {
  417. "model_id": "",
  418. "version_major": 2,
  419. "version_minor": 0
  420. },
  421. "text/plain": [
  422. "HBox(children=(FloatProgress(value=0.0, layout=Layout(flex='2'), max=55.0), HTML(value='')), layout=Layout(dis…"
  423. ]
  424. },
  425. "metadata": {},
  426. "output_type": "display_data"
  427. },
  428. {
  429. "name": "stdout",
  430. "output_type": "stream",
  431. "text": [
  432. "\r",
  433. "Evaluate data in 0.57 seconds!\n",
  434. "\r",
  435. "Evaluation on dev at Epoch 8/10. Step:2464/3080: \n",
  436. "\r",
  437. "AccuracyMetric: acc=0.747706\n",
  438. "\n"
  439. ]
  440. },
  441. {
  442. "data": {
  443. "application/vnd.jupyter.widget-view+json": {
  444. "model_id": "",
  445. "version_major": 2,
  446. "version_minor": 0
  447. },
  448. "text/plain": [
  449. "HBox(children=(FloatProgress(value=0.0, layout=Layout(flex='2'), max=55.0), HTML(value='')), layout=Layout(dis…"
  450. ]
  451. },
  452. "metadata": {},
  453. "output_type": "display_data"
  454. },
  455. {
  456. "name": "stdout",
  457. "output_type": "stream",
  458. "text": [
  459. "\r",
  460. "Evaluate data in 0.48 seconds!\n",
  461. "\r",
  462. "Evaluation on dev at Epoch 9/10. Step:2772/3080: \n",
  463. "\r",
  464. "AccuracyMetric: acc=0.732798\n",
  465. "\n"
  466. ]
  467. },
  468. {
  469. "data": {
  470. "application/vnd.jupyter.widget-view+json": {
  471. "model_id": "",
  472. "version_major": 2,
  473. "version_minor": 0
  474. },
  475. "text/plain": [
  476. "HBox(children=(FloatProgress(value=0.0, layout=Layout(flex='2'), max=55.0), HTML(value='')), layout=Layout(dis…"
  477. ]
  478. },
  479. "metadata": {},
  480. "output_type": "display_data"
  481. },
  482. {
  483. "name": "stdout",
  484. "output_type": "stream",
  485. "text": [
  486. "\r",
  487. "Evaluate data in 0.48 seconds!\n",
  488. "\r",
  489. "Evaluation on dev at Epoch 10/10. Step:3080/3080: \n",
  490. "\r",
  491. "AccuracyMetric: acc=0.740826\n",
  492. "\n",
  493. "\r\n",
  494. "In Epoch:3/Step:924, got best dev performance:\n",
  495. "AccuracyMetric: acc=0.758028\n",
  496. "Reloaded the best model.\n"
  497. ]
  498. },
  499. {
  500. "data": {
  501. "text/plain": [
  502. "{'best_eval': {'AccuracyMetric': {'acc': 0.758028}},\n",
  503. " 'best_epoch': 3,\n",
  504. " 'best_step': 924,\n",
  505. " 'seconds': 160.58}"
  506. ]
  507. },
  508. "execution_count": 9,
  509. "metadata": {},
  510. "output_type": "execute_result"
  511. }
  512. ],
  513. "source": [
  514. "from fastNLP import Trainer\n",
  515. "\n",
  516. "#训练的轮数和batch size\n",
  517. "N_EPOCHS = 10\n",
  518. "BATCH_SIZE = 16\n",
  519. "\n",
  520. "#如果在定义trainer的时候没有传入optimizer参数,模型默认的优化器为torch.optim.Adam且learning rate为lr=4e-3\n",
  521. "#这里只使用了loss作为损失函数输入,感兴趣可以尝试其他损失函数(如之前自定义的loss_func)作为输入\n",
  522. "trainer = Trainer(model=model_cnn, train_data=train_data, dev_data=dev_data, loss=loss, metrics=metrics,\n",
  523. "optimizer=optimizer,n_epochs=N_EPOCHS, batch_size=BATCH_SIZE)\n",
  524. "trainer.train()"
  525. ]
  526. },
  527. {
  528. "cell_type": "code",
  529. "execution_count": 10,
  530. "metadata": {},
  531. "outputs": [
  532. {
  533. "data": {
  534. "application/vnd.jupyter.widget-view+json": {
  535. "model_id": "",
  536. "version_major": 2,
  537. "version_minor": 0
  538. },
  539. "text/plain": [
  540. "HBox(children=(FloatProgress(value=0.0, layout=Layout(flex='2'), max=5.0), HTML(value='')), layout=Layout(disp…"
  541. ]
  542. },
  543. "metadata": {},
  544. "output_type": "display_data"
  545. },
  546. {
  547. "name": "stdout",
  548. "output_type": "stream",
  549. "text": [
  550. "\r",
  551. "Evaluate data in 0.43 seconds!\n",
  552. "[tester] \n",
  553. "AccuracyMetric: acc=0.773333\n"
  554. ]
  555. },
  556. {
  557. "data": {
  558. "text/plain": [
  559. "{'AccuracyMetric': {'acc': 0.773333}}"
  560. ]
  561. },
  562. "execution_count": 10,
  563. "metadata": {},
  564. "output_type": "execute_result"
  565. }
  566. ],
  567. "source": [
  568. "from fastNLP import Tester\n",
  569. "\n",
  570. "tester = Tester(test_data, model_cnn, metrics=AccuracyMetric())\n",
  571. "tester.test()"
  572. ]
  573. },
  574. {
  575. "cell_type": "code",
  576. "execution_count": null,
  577. "metadata": {},
  578. "outputs": [],
  579. "source": []
  580. }
  581. ],
  582. "metadata": {
  583. "kernelspec": {
  584. "display_name": "Python Now",
  585. "language": "python",
  586. "name": "now"
  587. },
  588. "language_info": {
  589. "codemirror_mode": {
  590. "name": "ipython",
  591. "version": 3
  592. },
  593. "file_extension": ".py",
  594. "mimetype": "text/x-python",
  595. "name": "python",
  596. "nbconvert_exporter": "python",
  597. "pygments_lexer": "ipython3",
  598. "version": "3.8.0"
  599. }
  600. },
  601. "nbformat": 4,
  602. "nbformat_minor": 2
  603. }