|
123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662 |
- {
- "cells": [
- {
- "cell_type": "code",
- "execution_count": 4,
- "metadata": {},
- "outputs": [
- {
- "name": "stdout",
- "output_type": "stream",
- "text": [
- "2.2.0\n",
- "sys.version_info(major=3, minor=6, micro=9, releaselevel='final', serial=0)\n",
- "matplotlib 3.3.4\n",
- "numpy 1.19.5\n",
- "pandas 1.1.5\n",
- "sklearn 0.24.2\n",
- "tensorflow 2.2.0\n",
- "tensorflow.keras 2.3.0-tf\n"
- ]
- }
- ],
- "source": [
- "import matplotlib as mpl\n",
- "import matplotlib.pyplot as plt\n",
- "%matplotlib inline\n",
- "import numpy as np\n",
- "import sklearn\n",
- "import pandas as pd\n",
- "import os\n",
- "import sys\n",
- "import time\n",
- "import tensorflow as tf\n",
- "\n",
- "from tensorflow import keras\n",
- "\n",
- "print(tf.__version__)\n",
- "print(sys.version_info)\n",
- "for module in mpl, np, pd, sklearn, tf, keras:\n",
- " print(module.__name__, module.__version__)"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": 6,
- "metadata": {},
- "outputs": [
- {
- "name": "stdout",
- "output_type": "stream",
- "text": [
- "tf.Tensor(\n",
- "[[1. 2. 3.]\n",
- " [4. 5. 6.]], shape=(2, 3), dtype=float32)\n",
- "--------------------------------------------------\n",
- "tf.Tensor(\n",
- "[[2. 3.]\n",
- " [5. 6.]], shape=(2, 2), dtype=float32)\n",
- "--------------------------------------------------\n",
- "tf.Tensor([2. 5.], shape=(2,), dtype=float32)\n",
- "--------------------------------------------------\n"
- ]
- }
- ],
- "source": [
- "# constant是常量张量\n",
- "t = tf.constant([[1., 2., 3.], [4., 5.,6.]])\n",
- "\n",
- "# index\n",
- "#2.0能够直接获取值时因为execution默认打开的\n",
- "print(t)\n",
- "print('-'*50)\n",
- "print(t[:, 1:])\n",
- "print('-'*50)\n",
- "print(t[..., 1])\n",
- "print('-'*50)\n",
- "# t.assign(1)对常量不能进行再次assign设置\n",
- "type(t.numpy()) #转为ndarray\n",
- "q=t.numpy()"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": 7,
- "metadata": {},
- "outputs": [
- {
- "data": {
- "text/plain": [
- "<tf.Tensor: shape=(2, 3), dtype=float32, numpy=\n",
- "array([[1., 2., 3.],\n",
- " [4., 5., 6.]], dtype=float32)>"
- ]
- },
- "execution_count": 7,
- "metadata": {},
- "output_type": "execute_result"
- }
- ],
- "source": [
- "t1= tf.constant(q) #把ndarray变为张量\n",
- "t1"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": 8,
- "metadata": {},
- "outputs": [
- {
- "name": "stdout",
- "output_type": "stream",
- "text": [
- "tf.Tensor(\n",
- "[[11. 12. 13.]\n",
- " [14. 15. 16.]], shape=(2, 3), dtype=float32)\n",
- "tf.Tensor(\n",
- "[[1. 2. 3.]\n",
- " [4. 5. 6.]], shape=(2, 3), dtype=float32)\n",
- "tf.Tensor(\n",
- "[[ 1. 4. 9.]\n",
- " [16. 25. 36.]], shape=(2, 3), dtype=float32)\n",
- "tf.Tensor(\n",
- "[[1. 2. 3.]\n",
- " [4. 5. 6.]], shape=(2, 3), dtype=float32)\n",
- "tf.Tensor(\n",
- "[[1. 4.]\n",
- " [2. 5.]\n",
- " [3. 6.]], shape=(3, 2), dtype=float32)\n",
- "tf.Tensor(\n",
- "[[14. 32.]\n",
- " [32. 77.]], shape=(2, 2), dtype=float32)\n"
- ]
- }
- ],
- "source": [
- "# ops 使用tf本身的math接口对Tensor进行计算\n",
- "print(t+10)\n",
- "print(t)\n",
- "print(tf.square(t))\n",
- "print(t)\n",
- "#矩阵乘以自己的转置\n",
- "print(tf.transpose(t))\n",
- "print(t @ tf.transpose(t)) #@是矩阵乘法,和*不一致"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": 9,
- "metadata": {},
- "outputs": [
- {
- "name": "stdout",
- "output_type": "stream",
- "text": [
- "tf.Tensor(\n",
- "[[1. 1.4142135 1.7320508]\n",
- " [2. 2.236068 2.4494898]], shape=(2, 3), dtype=float32)\n",
- "--------------------------------------------------\n"
- ]
- },
- {
- "data": {
- "text/plain": [
- "<tf.Tensor: shape=(2, 3), dtype=float32, numpy=\n",
- "array([[0. , 0.6931472, 1.0986123],\n",
- " [1.3862944, 1.609438 , 1.7917595]], dtype=float32)>"
- ]
- },
- "execution_count": 9,
- "metadata": {},
- "output_type": "execute_result"
- }
- ],
- "source": [
- "print(tf.sqrt(t))\n",
- "print('-'*50)\n",
- "# tf.math.sqrt(t)\n",
- "tf.math.log(t) #必须加math"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": 10,
- "metadata": {},
- "outputs": [
- {
- "name": "stdout",
- "output_type": "stream",
- "text": [
- "[[1. 2. 3.]\n",
- " [4. 5. 6.]]\n",
- "[[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]]\n",
- "<class 'numpy.ndarray'>\n",
- "[[ 1. 4. 9.]\n",
- " [16. 25. 36.]]\n",
- "tf.Tensor(\n",
- "[[1. 2. 3.]\n",
- " [4. 5. 6.]], shape=(2, 3), dtype=float64)\n"
- ]
- }
- ],
- "source": [
- "# numpy conversion\n",
- "print(t.numpy()) #可以直接通过numpy取出来\n",
- "print(t.numpy().tolist())\n",
- "print(type(t.numpy()))\n",
- "print(np.square(t)) #直接求平方\n",
- "np_t = np.array([[1., 2., 3.], [4., 5., 6.]])\n",
- "print(tf.constant(np_t)) #转换为tensor"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": 11,
- "metadata": {},
- "outputs": [
- {
- "name": "stdout",
- "output_type": "stream",
- "text": [
- "tf.Tensor(2.718, shape=(), dtype=float32)\n",
- "2.718\n",
- "()\n"
- ]
- }
- ],
- "source": [
- "# Scalars 就是标量,只有一个数值的张量,称为标量\n",
- "t = tf.constant(2.718)\n",
- "print(t)\n",
- "print(t.numpy())\n",
- "print(t.shape) #维数"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": 12,
- "metadata": {},
- "outputs": [
- {
- "name": "stdout",
- "output_type": "stream",
- "text": [
- "tf.Tensor(b'cafe', shape=(), dtype=string)\n",
- "tf.Tensor(4, shape=(), dtype=int32)\n",
- "tf.Tensor(4, shape=(), dtype=int32)\n",
- "tf.Tensor([ 99 97 102 101], shape=(4,), dtype=int32)\n"
- ]
- }
- ],
- "source": [
- "# strings\n",
- "t = tf.constant(\"cafe\")\n",
- "print(t)\n",
- "print(tf.strings.length(t))\n",
- "print(tf.strings.length(t, unit=\"UTF8_CHAR\"))\n",
- "print(tf.strings.unicode_decode(t, \"UTF8\"))"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": 13,
- "metadata": {},
- "outputs": [
- {
- "name": "stdout",
- "output_type": "stream",
- "text": [
- "tf.Tensor([4 6 2], shape=(3,), dtype=int32)\n",
- "tf.Tensor([4 6 6], shape=(3,), dtype=int32)\n",
- "<tf.RaggedTensor [[99, 97, 102, 101], [99, 111, 102, 102, 101, 101], [21654, 21857]]>\n"
- ]
- }
- ],
- "source": [
- "# string array\n",
- "t = tf.constant([\"cafe\", \"coffee\", \"咖啡\"])\n",
- "#自动求出数组中每一个字符的长度,如果不加unit=\"UTF8_CHAR\",得到的是实际字节存储的长度\n",
- "print(tf.strings.length(t, unit=\"UTF8_CHAR\")) \n",
- "print(tf.strings.length(t, unit=\"BYTE\")) \n",
- "r = tf.strings.unicode_decode(t, \"UTF8\")\n",
- "# https://tool.chinaz.com/tools/unicode.aspx 汉字转的是unicode编码\n",
- "print(r)\n",
- "# RaggedTensor 是指形状分布不固定的(行元素个数不相等)\n",
- "# Tensor,2.0新增"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": 14,
- "metadata": {},
- "outputs": [
- {
- "name": "stdout",
- "output_type": "stream",
- "text": [
- "<tf.RaggedTensor [[11, 12], [21, 22, 23], [], [41]]>\n",
- "(4, None)\n",
- "tf.Tensor([21 22 23], shape=(3,), dtype=int32)\n",
- "<tf.RaggedTensor [[21, 22, 23], []]>\n"
- ]
- }
- ],
- "source": [
- "# ragged tensor\n",
- "r = tf.ragged.constant([[11, 12], [21, 22, 23], [], [41]])\n",
- "\n",
- "# index op\n",
- "print(r)\n",
- "print(r.shape)\n",
- "print(r[1])\n",
- "#取一行也是ragged tensor\n",
- "print(r[1:3])\n",
- "# print(r[:,1])#不能取列索引"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": 15,
- "metadata": {},
- "outputs": [
- {
- "name": "stdout",
- "output_type": "stream",
- "text": [
- "<tf.RaggedTensor [[11, 12], [21, 22, 23], [], [41], [51, 52], [], [], [71]]>\n",
- "<tf.RaggedTensor [[11, 12, 51, 52], [21, 22, 23], [], [41, 71]]>\n"
- ]
- }
- ],
- "source": [
- "# ops on ragged tensor\n",
- "r2 = tf.ragged.constant([[51, 52],[], [], [71]])\n",
- "print(tf.concat([r, r2], axis = 0))\n",
- "print(tf.concat([r, r2], axis = 1)) #行数不相等,不可以拼\n",
- "#是否可以把上面的axis改为1"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": 16,
- "metadata": {},
- "outputs": [
- {
- "name": "stdout",
- "output_type": "stream",
- "text": [
- "<tf.RaggedTensor [[11, 12, 13, 14], [21, 22, 23, 15], [41], [41, 42, 43]]>\n"
- ]
- }
- ],
- "source": [
- "#按轴1进行合并时,行数要一致,行数相等,可以拼\n",
- "r3 = tf.ragged.constant([[13, 14], [15], [41], [42, 43]])\n",
- "print(tf.concat([r, r3], axis = 1))"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": 17,
- "metadata": {},
- "outputs": [
- {
- "name": "stdout",
- "output_type": "stream",
- "text": [
- "tf.Tensor(\n",
- "[[11 12 0]\n",
- " [21 22 23]\n",
- " [ 0 0 0]\n",
- " [41 0 0]], shape=(4, 3), dtype=int32)\n"
- ]
- }
- ],
- "source": [
- "print(r.to_tensor()) #各种深度学习模型必须输入一个tensor\n",
- "#空闲的补0,只能往后面补"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": 18,
- "metadata": {},
- "outputs": [
- {
- "name": "stdout",
- "output_type": "stream",
- "text": [
- "SparseTensor(indices=tf.Tensor(\n",
- "[[0 1]\n",
- " [1 0]\n",
- " [2 3]\n",
- " [3 2]], shape=(4, 2), dtype=int64), values=tf.Tensor([1. 2. 3. 5.], shape=(4,), dtype=float32), dense_shape=tf.Tensor([4 4], shape=(2,), dtype=int64))\n"
- ]
- },
- {
- "data": {
- "text/plain": [
- "<tf.Tensor: shape=(4, 4), dtype=float32, numpy=\n",
- "array([[0., 1., 0., 0.],\n",
- " [2., 0., 0., 0.],\n",
- " [0., 0., 0., 3.],\n",
- " [0., 0., 5., 0.]], dtype=float32)>"
- ]
- },
- "execution_count": 18,
- "metadata": {},
- "output_type": "execute_result"
- }
- ],
- "source": [
- "# sparse tensor 可以往前面补零,sparse tensor从第一行依次往下填位置\n",
- "#sparese tensor存储节省内存空间,磁盘空间\n",
- "s = tf.SparseTensor(indices = [[0, 1], [1, 0], [2, 3],[3,2]], #位置\n",
- " values = [1., 2., 3.,5], #值\n",
- " dense_shape = [4, 4]) #维数\n",
- "print(s)\n",
- "tt=tf.sparse.to_dense(s)\n",
- "tt"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": 19,
- "metadata": {
- "scrolled": true
- },
- "outputs": [
- {
- "name": "stdout",
- "output_type": "stream",
- "text": [
- "SparseTensor(indices=tf.Tensor(\n",
- "[[0 1]\n",
- " [1 0]\n",
- " [2 3]\n",
- " [3 2]], shape=(4, 2), dtype=int64), values=tf.Tensor([ 2. 4. 6. 10.], shape=(4,), dtype=float32), dense_shape=tf.Tensor([4 4], shape=(2,), dtype=int64))\n",
- "unsupported operand type(s) for +: 'SparseTensor' and 'int'\n",
- "tf.Tensor(\n",
- "[[ 30. 40.]\n",
- " [ 20. 40.]\n",
- " [210. 240.]\n",
- " [250. 300.]], shape=(4, 2), dtype=float32)\n"
- ]
- }
- ],
- "source": [
- "# ops on sparse tensors\n",
- "\n",
- "s2 = s * 2.0\n",
- "print(s2)\n",
- "\n",
- "#不支持加法\n",
- "try:\n",
- " s3 = s + 1\n",
- "except TypeError as ex:\n",
- " print(ex)\n",
- "\n",
- "s4 = tf.constant([[10., 20.],\n",
- " [30., 40.],\n",
- " [50., 60.],\n",
- " [70., 80.]])\n",
- "# tf.sparse.to_dense(s)@s4\n",
- "print(tf.sparse.sparse_dense_matmul(s, s4)) #稀疏Tensor和Tensor想乘"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": 20,
- "metadata": {
- "scrolled": true
- },
- "outputs": [
- {
- "name": "stdout",
- "output_type": "stream",
- "text": [
- "SparseTensor(indices=tf.Tensor(\n",
- "[[0 2]\n",
- " [2 3]\n",
- " [0 1]], shape=(3, 2), dtype=int64), values=tf.Tensor([1. 2. 3.], shape=(3,), dtype=float32), dense_shape=tf.Tensor([3 4], shape=(2,), dtype=int64))\n",
- "SparseTensor(indices=tf.Tensor(\n",
- "[[0 1]\n",
- " [0 2]\n",
- " [2 3]], shape=(3, 2), dtype=int64), values=tf.Tensor([3. 1. 2.], shape=(3,), dtype=float32), dense_shape=tf.Tensor([3 4], shape=(2,), dtype=int64))\n",
- "tf.Tensor(\n",
- "[[0. 3. 1. 0.]\n",
- " [0. 0. 0. 0.]\n",
- " [0. 0. 0. 2.]], shape=(3, 4), dtype=float32)\n"
- ]
- }
- ],
- "source": [
- "# sparse tensor\n",
- "s5 = tf.SparseTensor(indices = [[0, 2], [2, 3], [0, 1]],\n",
- " values = [1., 2., 3.],\n",
- " dense_shape = [3, 4])\n",
- "# print(tf.sparse.to_dense(s5)) #sparse无顺序时,不能转为tensor,会报错\n",
- "print(s5)\n",
- "s6 = tf.sparse.reorder(s5)\n",
- "print(s6)\n",
- "print(tf.sparse.to_dense(s6))"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": 21,
- "metadata": {},
- "outputs": [
- {
- "name": "stdout",
- "output_type": "stream",
- "text": [
- "<tf.Variable 'Variable:0' shape=(2, 3) dtype=float32, numpy=\n",
- "array([[1., 2., 3.],\n",
- " [4., 5., 6.]], dtype=float32)>\n",
- "tf.Tensor(\n",
- "[[1. 2. 3.]\n",
- " [4. 5. 6.]], shape=(2, 3), dtype=float32)\n",
- "--------------------------------------------------\n",
- "[[1. 2. 3.]\n",
- " [4. 5. 6.]]\n"
- ]
- }
- ],
- "source": [
- "# Variables\n",
- "v = tf.Variable([[1., 2., 3.], [4., 5.,6.]])\n",
- "print(v)\n",
- "print(v.value())\n",
- "print('-'*50)\n",
- "print(v.numpy())"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": 22,
- "metadata": {},
- "outputs": [
- {
- "name": "stdout",
- "output_type": "stream",
- "text": [
- "140353908649208\n",
- "140353908649208\n",
- "[[ 2. 4. 6.]\n",
- " [ 8. 10. 12.]]\n",
- "--------------------------------------------------\n",
- "[[ 2. 42. 6.]\n",
- " [ 8. 10. 12.]]\n",
- "--------------------------------------------------\n",
- "[[ 2. 42. 6.]\n",
- " [ 7. 8. 9.]]\n",
- "140353908649208\n"
- ]
- }
- ],
- "source": [
- "# 修改变量时要用assign,改变tensor内某个值,空间没有发生变化,效率高\n",
- "# assign value\n",
- "print(id(v))\n",
- "v.assign(2*v)\n",
- "print(id(v))\n",
- "print(v.numpy())\n",
- "print('-'*50)\n",
- "v[0, 1].assign(42) #取某个元素修改\n",
- "print(v.numpy())\n",
- "print('-'*50)\n",
- "v[1].assign([7., 8., 9.]) #取某一行修改\n",
- "print(v.numpy())\n",
- "print(id(v))"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": 23,
- "metadata": {},
- "outputs": [
- {
- "name": "stdout",
- "output_type": "stream",
- "text": [
- "'ResourceVariable' object does not support item assignment\n"
- ]
- }
- ],
- "source": [
- "try:\n",
- " v[1] = [7., 8., 9.]\n",
- "except TypeError as ex:\n",
- " print(ex)"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": 24,
- "metadata": {},
- "outputs": [
- {
- "name": "stdout",
- "output_type": "stream",
- "text": [
- "tf.Tensor(\n",
- "[[ 4. 84. 12.]\n",
- " [14. 16. 18.]], shape=(2, 3), dtype=float32)\n",
- "140353905381784\n",
- "<class 'tensorflow.python.framework.ops.EagerTensor'>\n"
- ]
- }
- ],
- "source": [
- "v=2*v\n",
- "print(v)\n",
- "print(id(v))\n",
- "print(type(v))"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": 25,
- "metadata": {},
- "outputs": [
- {
- "data": {
- "text/plain": [
- "<tf.Tensor: shape=(2,), dtype=float32, numpy=array([1., 2.], dtype=float32)>"
- ]
- },
- "execution_count": 25,
- "metadata": {},
- "output_type": "execute_result"
- }
- ],
- "source": [
- "x = tf.constant([[1., 1.], [2., 2.]])\n",
- "tf.reduce_mean(x,axis=1)"
- ]
- }
- ],
- "metadata": {
- "kernelspec": {
- "display_name": "Python 3",
- "language": "python",
- "name": "python3"
- },
- "language_info": {
- "codemirror_mode": {
- "name": "ipython",
- "version": 3
- },
- "file_extension": ".py",
- "mimetype": "text/x-python",
- "name": "python",
- "nbconvert_exporter": "python",
- "pygments_lexer": "ipython3",
- "version": "3.6.9"
- }
- },
- "nbformat": 4,
- "nbformat_minor": 2
- }
|