You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

tf01_basic_api.ipynb 17 kB

3 years ago
123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662
  1. {
  2. "cells": [
  3. {
  4. "cell_type": "code",
  5. "execution_count": 4,
  6. "metadata": {},
  7. "outputs": [
  8. {
  9. "name": "stdout",
  10. "output_type": "stream",
  11. "text": [
  12. "2.2.0\n",
  13. "sys.version_info(major=3, minor=6, micro=9, releaselevel='final', serial=0)\n",
  14. "matplotlib 3.3.4\n",
  15. "numpy 1.19.5\n",
  16. "pandas 1.1.5\n",
  17. "sklearn 0.24.2\n",
  18. "tensorflow 2.2.0\n",
  19. "tensorflow.keras 2.3.0-tf\n"
  20. ]
  21. }
  22. ],
  23. "source": [
  24. "import matplotlib as mpl\n",
  25. "import matplotlib.pyplot as plt\n",
  26. "%matplotlib inline\n",
  27. "import numpy as np\n",
  28. "import sklearn\n",
  29. "import pandas as pd\n",
  30. "import os\n",
  31. "import sys\n",
  32. "import time\n",
  33. "import tensorflow as tf\n",
  34. "\n",
  35. "from tensorflow import keras\n",
  36. "\n",
  37. "print(tf.__version__)\n",
  38. "print(sys.version_info)\n",
  39. "for module in mpl, np, pd, sklearn, tf, keras:\n",
  40. " print(module.__name__, module.__version__)"
  41. ]
  42. },
  43. {
  44. "cell_type": "code",
  45. "execution_count": 6,
  46. "metadata": {},
  47. "outputs": [
  48. {
  49. "name": "stdout",
  50. "output_type": "stream",
  51. "text": [
  52. "tf.Tensor(\n",
  53. "[[1. 2. 3.]\n",
  54. " [4. 5. 6.]], shape=(2, 3), dtype=float32)\n",
  55. "--------------------------------------------------\n",
  56. "tf.Tensor(\n",
  57. "[[2. 3.]\n",
  58. " [5. 6.]], shape=(2, 2), dtype=float32)\n",
  59. "--------------------------------------------------\n",
  60. "tf.Tensor([2. 5.], shape=(2,), dtype=float32)\n",
  61. "--------------------------------------------------\n"
  62. ]
  63. }
  64. ],
  65. "source": [
  66. "# constant是常量张量\n",
  67. "t = tf.constant([[1., 2., 3.], [4., 5.,6.]])\n",
  68. "\n",
  69. "# index\n",
  70. "#2.0能够直接获取值时因为execution默认打开的\n",
  71. "print(t)\n",
  72. "print('-'*50)\n",
  73. "print(t[:, 1:])\n",
  74. "print('-'*50)\n",
  75. "print(t[..., 1])\n",
  76. "print('-'*50)\n",
  77. "# t.assign(1)对常量不能进行再次assign设置\n",
  78. "type(t.numpy()) #转为ndarray\n",
  79. "q=t.numpy()"
  80. ]
  81. },
  82. {
  83. "cell_type": "code",
  84. "execution_count": 7,
  85. "metadata": {},
  86. "outputs": [
  87. {
  88. "data": {
  89. "text/plain": [
  90. "<tf.Tensor: shape=(2, 3), dtype=float32, numpy=\n",
  91. "array([[1., 2., 3.],\n",
  92. " [4., 5., 6.]], dtype=float32)>"
  93. ]
  94. },
  95. "execution_count": 7,
  96. "metadata": {},
  97. "output_type": "execute_result"
  98. }
  99. ],
  100. "source": [
  101. "t1= tf.constant(q) #把ndarray变为张量\n",
  102. "t1"
  103. ]
  104. },
  105. {
  106. "cell_type": "code",
  107. "execution_count": 8,
  108. "metadata": {},
  109. "outputs": [
  110. {
  111. "name": "stdout",
  112. "output_type": "stream",
  113. "text": [
  114. "tf.Tensor(\n",
  115. "[[11. 12. 13.]\n",
  116. " [14. 15. 16.]], shape=(2, 3), dtype=float32)\n",
  117. "tf.Tensor(\n",
  118. "[[1. 2. 3.]\n",
  119. " [4. 5. 6.]], shape=(2, 3), dtype=float32)\n",
  120. "tf.Tensor(\n",
  121. "[[ 1. 4. 9.]\n",
  122. " [16. 25. 36.]], shape=(2, 3), dtype=float32)\n",
  123. "tf.Tensor(\n",
  124. "[[1. 2. 3.]\n",
  125. " [4. 5. 6.]], shape=(2, 3), dtype=float32)\n",
  126. "tf.Tensor(\n",
  127. "[[1. 4.]\n",
  128. " [2. 5.]\n",
  129. " [3. 6.]], shape=(3, 2), dtype=float32)\n",
  130. "tf.Tensor(\n",
  131. "[[14. 32.]\n",
  132. " [32. 77.]], shape=(2, 2), dtype=float32)\n"
  133. ]
  134. }
  135. ],
  136. "source": [
  137. "# ops 使用tf本身的math接口对Tensor进行计算\n",
  138. "print(t+10)\n",
  139. "print(t)\n",
  140. "print(tf.square(t))\n",
  141. "print(t)\n",
  142. "#矩阵乘以自己的转置\n",
  143. "print(tf.transpose(t))\n",
  144. "print(t @ tf.transpose(t)) #@是矩阵乘法,和*不一致"
  145. ]
  146. },
  147. {
  148. "cell_type": "code",
  149. "execution_count": 9,
  150. "metadata": {},
  151. "outputs": [
  152. {
  153. "name": "stdout",
  154. "output_type": "stream",
  155. "text": [
  156. "tf.Tensor(\n",
  157. "[[1. 1.4142135 1.7320508]\n",
  158. " [2. 2.236068 2.4494898]], shape=(2, 3), dtype=float32)\n",
  159. "--------------------------------------------------\n"
  160. ]
  161. },
  162. {
  163. "data": {
  164. "text/plain": [
  165. "<tf.Tensor: shape=(2, 3), dtype=float32, numpy=\n",
  166. "array([[0. , 0.6931472, 1.0986123],\n",
  167. " [1.3862944, 1.609438 , 1.7917595]], dtype=float32)>"
  168. ]
  169. },
  170. "execution_count": 9,
  171. "metadata": {},
  172. "output_type": "execute_result"
  173. }
  174. ],
  175. "source": [
  176. "print(tf.sqrt(t))\n",
  177. "print('-'*50)\n",
  178. "# tf.math.sqrt(t)\n",
  179. "tf.math.log(t) #必须加math"
  180. ]
  181. },
  182. {
  183. "cell_type": "code",
  184. "execution_count": 10,
  185. "metadata": {},
  186. "outputs": [
  187. {
  188. "name": "stdout",
  189. "output_type": "stream",
  190. "text": [
  191. "[[1. 2. 3.]\n",
  192. " [4. 5. 6.]]\n",
  193. "[[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]]\n",
  194. "<class 'numpy.ndarray'>\n",
  195. "[[ 1. 4. 9.]\n",
  196. " [16. 25. 36.]]\n",
  197. "tf.Tensor(\n",
  198. "[[1. 2. 3.]\n",
  199. " [4. 5. 6.]], shape=(2, 3), dtype=float64)\n"
  200. ]
  201. }
  202. ],
  203. "source": [
  204. "# numpy conversion\n",
  205. "print(t.numpy()) #可以直接通过numpy取出来\n",
  206. "print(t.numpy().tolist())\n",
  207. "print(type(t.numpy()))\n",
  208. "print(np.square(t)) #直接求平方\n",
  209. "np_t = np.array([[1., 2., 3.], [4., 5., 6.]])\n",
  210. "print(tf.constant(np_t)) #转换为tensor"
  211. ]
  212. },
  213. {
  214. "cell_type": "code",
  215. "execution_count": 11,
  216. "metadata": {},
  217. "outputs": [
  218. {
  219. "name": "stdout",
  220. "output_type": "stream",
  221. "text": [
  222. "tf.Tensor(2.718, shape=(), dtype=float32)\n",
  223. "2.718\n",
  224. "()\n"
  225. ]
  226. }
  227. ],
  228. "source": [
  229. "# Scalars 就是标量,只有一个数值的张量,称为标量\n",
  230. "t = tf.constant(2.718)\n",
  231. "print(t)\n",
  232. "print(t.numpy())\n",
  233. "print(t.shape) #维数"
  234. ]
  235. },
  236. {
  237. "cell_type": "code",
  238. "execution_count": 12,
  239. "metadata": {},
  240. "outputs": [
  241. {
  242. "name": "stdout",
  243. "output_type": "stream",
  244. "text": [
  245. "tf.Tensor(b'cafe', shape=(), dtype=string)\n",
  246. "tf.Tensor(4, shape=(), dtype=int32)\n",
  247. "tf.Tensor(4, shape=(), dtype=int32)\n",
  248. "tf.Tensor([ 99 97 102 101], shape=(4,), dtype=int32)\n"
  249. ]
  250. }
  251. ],
  252. "source": [
  253. "# strings\n",
  254. "t = tf.constant(\"cafe\")\n",
  255. "print(t)\n",
  256. "print(tf.strings.length(t))\n",
  257. "print(tf.strings.length(t, unit=\"UTF8_CHAR\"))\n",
  258. "print(tf.strings.unicode_decode(t, \"UTF8\"))"
  259. ]
  260. },
  261. {
  262. "cell_type": "code",
  263. "execution_count": 13,
  264. "metadata": {},
  265. "outputs": [
  266. {
  267. "name": "stdout",
  268. "output_type": "stream",
  269. "text": [
  270. "tf.Tensor([4 6 2], shape=(3,), dtype=int32)\n",
  271. "tf.Tensor([4 6 6], shape=(3,), dtype=int32)\n",
  272. "<tf.RaggedTensor [[99, 97, 102, 101], [99, 111, 102, 102, 101, 101], [21654, 21857]]>\n"
  273. ]
  274. }
  275. ],
  276. "source": [
  277. "# string array\n",
  278. "t = tf.constant([\"cafe\", \"coffee\", \"咖啡\"])\n",
  279. "#自动求出数组中每一个字符的长度,如果不加unit=\"UTF8_CHAR\",得到的是实际字节存储的长度\n",
  280. "print(tf.strings.length(t, unit=\"UTF8_CHAR\")) \n",
  281. "print(tf.strings.length(t, unit=\"BYTE\")) \n",
  282. "r = tf.strings.unicode_decode(t, \"UTF8\")\n",
  283. "# https://tool.chinaz.com/tools/unicode.aspx 汉字转的是unicode编码\n",
  284. "print(r)\n",
  285. "# RaggedTensor 是指形状分布不固定的(行元素个数不相等)\n",
  286. "# Tensor,2.0新增"
  287. ]
  288. },
  289. {
  290. "cell_type": "code",
  291. "execution_count": 14,
  292. "metadata": {},
  293. "outputs": [
  294. {
  295. "name": "stdout",
  296. "output_type": "stream",
  297. "text": [
  298. "<tf.RaggedTensor [[11, 12], [21, 22, 23], [], [41]]>\n",
  299. "(4, None)\n",
  300. "tf.Tensor([21 22 23], shape=(3,), dtype=int32)\n",
  301. "<tf.RaggedTensor [[21, 22, 23], []]>\n"
  302. ]
  303. }
  304. ],
  305. "source": [
  306. "# ragged tensor\n",
  307. "r = tf.ragged.constant([[11, 12], [21, 22, 23], [], [41]])\n",
  308. "\n",
  309. "# index op\n",
  310. "print(r)\n",
  311. "print(r.shape)\n",
  312. "print(r[1])\n",
  313. "#取一行也是ragged tensor\n",
  314. "print(r[1:3])\n",
  315. "# print(r[:,1])#不能取列索引"
  316. ]
  317. },
  318. {
  319. "cell_type": "code",
  320. "execution_count": 15,
  321. "metadata": {},
  322. "outputs": [
  323. {
  324. "name": "stdout",
  325. "output_type": "stream",
  326. "text": [
  327. "<tf.RaggedTensor [[11, 12], [21, 22, 23], [], [41], [51, 52], [], [], [71]]>\n",
  328. "<tf.RaggedTensor [[11, 12, 51, 52], [21, 22, 23], [], [41, 71]]>\n"
  329. ]
  330. }
  331. ],
  332. "source": [
  333. "# ops on ragged tensor\n",
  334. "r2 = tf.ragged.constant([[51, 52],[], [], [71]])\n",
  335. "print(tf.concat([r, r2], axis = 0))\n",
  336. "print(tf.concat([r, r2], axis = 1)) #行数不相等,不可以拼\n",
  337. "#是否可以把上面的axis改为1"
  338. ]
  339. },
  340. {
  341. "cell_type": "code",
  342. "execution_count": 16,
  343. "metadata": {},
  344. "outputs": [
  345. {
  346. "name": "stdout",
  347. "output_type": "stream",
  348. "text": [
  349. "<tf.RaggedTensor [[11, 12, 13, 14], [21, 22, 23, 15], [41], [41, 42, 43]]>\n"
  350. ]
  351. }
  352. ],
  353. "source": [
  354. "#按轴1进行合并时,行数要一致,行数相等,可以拼\n",
  355. "r3 = tf.ragged.constant([[13, 14], [15], [41], [42, 43]])\n",
  356. "print(tf.concat([r, r3], axis = 1))"
  357. ]
  358. },
  359. {
  360. "cell_type": "code",
  361. "execution_count": 17,
  362. "metadata": {},
  363. "outputs": [
  364. {
  365. "name": "stdout",
  366. "output_type": "stream",
  367. "text": [
  368. "tf.Tensor(\n",
  369. "[[11 12 0]\n",
  370. " [21 22 23]\n",
  371. " [ 0 0 0]\n",
  372. " [41 0 0]], shape=(4, 3), dtype=int32)\n"
  373. ]
  374. }
  375. ],
  376. "source": [
  377. "print(r.to_tensor()) #各种深度学习模型必须输入一个tensor\n",
  378. "#空闲的补0,只能往后面补"
  379. ]
  380. },
  381. {
  382. "cell_type": "code",
  383. "execution_count": 18,
  384. "metadata": {},
  385. "outputs": [
  386. {
  387. "name": "stdout",
  388. "output_type": "stream",
  389. "text": [
  390. "SparseTensor(indices=tf.Tensor(\n",
  391. "[[0 1]\n",
  392. " [1 0]\n",
  393. " [2 3]\n",
  394. " [3 2]], shape=(4, 2), dtype=int64), values=tf.Tensor([1. 2. 3. 5.], shape=(4,), dtype=float32), dense_shape=tf.Tensor([4 4], shape=(2,), dtype=int64))\n"
  395. ]
  396. },
  397. {
  398. "data": {
  399. "text/plain": [
  400. "<tf.Tensor: shape=(4, 4), dtype=float32, numpy=\n",
  401. "array([[0., 1., 0., 0.],\n",
  402. " [2., 0., 0., 0.],\n",
  403. " [0., 0., 0., 3.],\n",
  404. " [0., 0., 5., 0.]], dtype=float32)>"
  405. ]
  406. },
  407. "execution_count": 18,
  408. "metadata": {},
  409. "output_type": "execute_result"
  410. }
  411. ],
  412. "source": [
  413. "# sparse tensor 可以往前面补零,sparse tensor从第一行依次往下填位置\n",
  414. "#sparese tensor存储节省内存空间,磁盘空间\n",
  415. "s = tf.SparseTensor(indices = [[0, 1], [1, 0], [2, 3],[3,2]], #位置\n",
  416. " values = [1., 2., 3.,5], #值\n",
  417. " dense_shape = [4, 4]) #维数\n",
  418. "print(s)\n",
  419. "tt=tf.sparse.to_dense(s)\n",
  420. "tt"
  421. ]
  422. },
  423. {
  424. "cell_type": "code",
  425. "execution_count": 19,
  426. "metadata": {
  427. "scrolled": true
  428. },
  429. "outputs": [
  430. {
  431. "name": "stdout",
  432. "output_type": "stream",
  433. "text": [
  434. "SparseTensor(indices=tf.Tensor(\n",
  435. "[[0 1]\n",
  436. " [1 0]\n",
  437. " [2 3]\n",
  438. " [3 2]], shape=(4, 2), dtype=int64), values=tf.Tensor([ 2. 4. 6. 10.], shape=(4,), dtype=float32), dense_shape=tf.Tensor([4 4], shape=(2,), dtype=int64))\n",
  439. "unsupported operand type(s) for +: 'SparseTensor' and 'int'\n",
  440. "tf.Tensor(\n",
  441. "[[ 30. 40.]\n",
  442. " [ 20. 40.]\n",
  443. " [210. 240.]\n",
  444. " [250. 300.]], shape=(4, 2), dtype=float32)\n"
  445. ]
  446. }
  447. ],
  448. "source": [
  449. "# ops on sparse tensors\n",
  450. "\n",
  451. "s2 = s * 2.0\n",
  452. "print(s2)\n",
  453. "\n",
  454. "#不支持加法\n",
  455. "try:\n",
  456. " s3 = s + 1\n",
  457. "except TypeError as ex:\n",
  458. " print(ex)\n",
  459. "\n",
  460. "s4 = tf.constant([[10., 20.],\n",
  461. " [30., 40.],\n",
  462. " [50., 60.],\n",
  463. " [70., 80.]])\n",
  464. "# tf.sparse.to_dense(s)@s4\n",
  465. "print(tf.sparse.sparse_dense_matmul(s, s4)) #稀疏Tensor和Tensor想乘"
  466. ]
  467. },
  468. {
  469. "cell_type": "code",
  470. "execution_count": 20,
  471. "metadata": {
  472. "scrolled": true
  473. },
  474. "outputs": [
  475. {
  476. "name": "stdout",
  477. "output_type": "stream",
  478. "text": [
  479. "SparseTensor(indices=tf.Tensor(\n",
  480. "[[0 2]\n",
  481. " [2 3]\n",
  482. " [0 1]], shape=(3, 2), dtype=int64), values=tf.Tensor([1. 2. 3.], shape=(3,), dtype=float32), dense_shape=tf.Tensor([3 4], shape=(2,), dtype=int64))\n",
  483. "SparseTensor(indices=tf.Tensor(\n",
  484. "[[0 1]\n",
  485. " [0 2]\n",
  486. " [2 3]], shape=(3, 2), dtype=int64), values=tf.Tensor([3. 1. 2.], shape=(3,), dtype=float32), dense_shape=tf.Tensor([3 4], shape=(2,), dtype=int64))\n",
  487. "tf.Tensor(\n",
  488. "[[0. 3. 1. 0.]\n",
  489. " [0. 0. 0. 0.]\n",
  490. " [0. 0. 0. 2.]], shape=(3, 4), dtype=float32)\n"
  491. ]
  492. }
  493. ],
  494. "source": [
  495. "# sparse tensor\n",
  496. "s5 = tf.SparseTensor(indices = [[0, 2], [2, 3], [0, 1]],\n",
  497. " values = [1., 2., 3.],\n",
  498. " dense_shape = [3, 4])\n",
  499. "# print(tf.sparse.to_dense(s5)) #sparse无顺序时,不能转为tensor,会报错\n",
  500. "print(s5)\n",
  501. "s6 = tf.sparse.reorder(s5)\n",
  502. "print(s6)\n",
  503. "print(tf.sparse.to_dense(s6))"
  504. ]
  505. },
  506. {
  507. "cell_type": "code",
  508. "execution_count": 21,
  509. "metadata": {},
  510. "outputs": [
  511. {
  512. "name": "stdout",
  513. "output_type": "stream",
  514. "text": [
  515. "<tf.Variable 'Variable:0' shape=(2, 3) dtype=float32, numpy=\n",
  516. "array([[1., 2., 3.],\n",
  517. " [4., 5., 6.]], dtype=float32)>\n",
  518. "tf.Tensor(\n",
  519. "[[1. 2. 3.]\n",
  520. " [4. 5. 6.]], shape=(2, 3), dtype=float32)\n",
  521. "--------------------------------------------------\n",
  522. "[[1. 2. 3.]\n",
  523. " [4. 5. 6.]]\n"
  524. ]
  525. }
  526. ],
  527. "source": [
  528. "# Variables\n",
  529. "v = tf.Variable([[1., 2., 3.], [4., 5.,6.]])\n",
  530. "print(v)\n",
  531. "print(v.value())\n",
  532. "print('-'*50)\n",
  533. "print(v.numpy())"
  534. ]
  535. },
  536. {
  537. "cell_type": "code",
  538. "execution_count": 22,
  539. "metadata": {},
  540. "outputs": [
  541. {
  542. "name": "stdout",
  543. "output_type": "stream",
  544. "text": [
  545. "140353908649208\n",
  546. "140353908649208\n",
  547. "[[ 2. 4. 6.]\n",
  548. " [ 8. 10. 12.]]\n",
  549. "--------------------------------------------------\n",
  550. "[[ 2. 42. 6.]\n",
  551. " [ 8. 10. 12.]]\n",
  552. "--------------------------------------------------\n",
  553. "[[ 2. 42. 6.]\n",
  554. " [ 7. 8. 9.]]\n",
  555. "140353908649208\n"
  556. ]
  557. }
  558. ],
  559. "source": [
  560. "# 修改变量时要用assign,改变tensor内某个值,空间没有发生变化,效率高\n",
  561. "# assign value\n",
  562. "print(id(v))\n",
  563. "v.assign(2*v)\n",
  564. "print(id(v))\n",
  565. "print(v.numpy())\n",
  566. "print('-'*50)\n",
  567. "v[0, 1].assign(42) #取某个元素修改\n",
  568. "print(v.numpy())\n",
  569. "print('-'*50)\n",
  570. "v[1].assign([7., 8., 9.]) #取某一行修改\n",
  571. "print(v.numpy())\n",
  572. "print(id(v))"
  573. ]
  574. },
  575. {
  576. "cell_type": "code",
  577. "execution_count": 23,
  578. "metadata": {},
  579. "outputs": [
  580. {
  581. "name": "stdout",
  582. "output_type": "stream",
  583. "text": [
  584. "'ResourceVariable' object does not support item assignment\n"
  585. ]
  586. }
  587. ],
  588. "source": [
  589. "try:\n",
  590. " v[1] = [7., 8., 9.]\n",
  591. "except TypeError as ex:\n",
  592. " print(ex)"
  593. ]
  594. },
  595. {
  596. "cell_type": "code",
  597. "execution_count": 24,
  598. "metadata": {},
  599. "outputs": [
  600. {
  601. "name": "stdout",
  602. "output_type": "stream",
  603. "text": [
  604. "tf.Tensor(\n",
  605. "[[ 4. 84. 12.]\n",
  606. " [14. 16. 18.]], shape=(2, 3), dtype=float32)\n",
  607. "140353905381784\n",
  608. "<class 'tensorflow.python.framework.ops.EagerTensor'>\n"
  609. ]
  610. }
  611. ],
  612. "source": [
  613. "v=2*v\n",
  614. "print(v)\n",
  615. "print(id(v))\n",
  616. "print(type(v))"
  617. ]
  618. },
  619. {
  620. "cell_type": "code",
  621. "execution_count": 25,
  622. "metadata": {},
  623. "outputs": [
  624. {
  625. "data": {
  626. "text/plain": [
  627. "<tf.Tensor: shape=(2,), dtype=float32, numpy=array([1., 2.], dtype=float32)>"
  628. ]
  629. },
  630. "execution_count": 25,
  631. "metadata": {},
  632. "output_type": "execute_result"
  633. }
  634. ],
  635. "source": [
  636. "x = tf.constant([[1., 1.], [2., 2.]])\n",
  637. "tf.reduce_mean(x,axis=1)"
  638. ]
  639. }
  640. ],
  641. "metadata": {
  642. "kernelspec": {
  643. "display_name": "Python 3",
  644. "language": "python",
  645. "name": "python3"
  646. },
  647. "language_info": {
  648. "codemirror_mode": {
  649. "name": "ipython",
  650. "version": 3
  651. },
  652. "file_extension": ".py",
  653. "mimetype": "text/x-python",
  654. "name": "python",
  655. "nbconvert_exporter": "python",
  656. "pygments_lexer": "ipython3",
  657. "version": "3.6.9"
  658. }
  659. },
  660. "nbformat": 4,
  661. "nbformat_minor": 2
  662. }

随着人工智能和大数据的发展,任一方面对自动化工具有着一定的需求,在当下疫情防控期间,使用mindspore来实现yolo模型来进行目标检测及语义分割,对视频或图片都可以进行口罩佩戴检测和行人社交距离检测,来对公共场所的疫情防控来实行自动化管理。