@@ -0,0 +1,9 @@ | |||
# https://support.codacy.com/hc/en-us/articles/115002130625-Codacy-Configuration-File | |||
--- | |||
engines: | |||
bandit: | |||
enabled: false # FIXME: make it work | |||
exclude_paths: | |||
- scripts/* | |||
- setup.py | |||
- docker/**/* |
@@ -0,0 +1 @@ | |||
.git |
@@ -1,87 +1,131 @@ | |||
# ---> Android | |||
# Built application files | |||
*.apk | |||
*.aar | |||
*.ap_ | |||
*.aab | |||
# Files for the ART/Dalvik VM | |||
*.dex | |||
# Java class files | |||
*.class | |||
# Generated files | |||
bin/ | |||
gen/ | |||
out/ | |||
# Uncomment the following line in case you need and you don't have the release build type files in your app | |||
# release/ | |||
# Gradle files | |||
.gradle/ | |||
# Byte-compiled / optimized / DLL files | |||
__pycache__/ | |||
*.py[cod] | |||
*$py.class | |||
# C extensions | |||
*.so | |||
# Distribution / packaging | |||
.Python | |||
build/ | |||
develop-eggs/ | |||
dist/ | |||
downloads/ | |||
eggs/ | |||
.eggs/ | |||
lib/ | |||
lib64/ | |||
parts/ | |||
sdist/ | |||
var/ | |||
wheels/ | |||
*.egg-info/ | |||
.installed.cfg | |||
*.egg | |||
MANIFEST | |||
*~ | |||
# PyInstaller | |||
# Usually these files are written by a python script from a template | |||
# before PyInstaller builds the exe, so as to inject date/other infos into it. | |||
*.manifest | |||
*.spec | |||
# Installer logs | |||
pip-log.txt | |||
pip-delete-this-directory.txt | |||
# Unit test / coverage reports | |||
htmlcov/ | |||
.tox/ | |||
.coverage | |||
.coverage.* | |||
.cache | |||
nosetests.xml | |||
coverage.xml | |||
*.cover | |||
.hypothesis/ | |||
.pytest_cache/ | |||
# Translations | |||
*.mo | |||
*.pot | |||
# Django stuff: | |||
*.log | |||
local_settings.py | |||
db.sqlite3 | |||
# Local configuration file (sdk path, etc) | |||
local.properties | |||
# Flask stuff: | |||
instance/ | |||
.webassets-cache | |||
# Proguard folder generated by Eclipse | |||
proguard/ | |||
# Scrapy stuff: | |||
.scrapy | |||
# Log Files | |||
*.log | |||
# Sphinx documentation | |||
docs/_build/ | |||
docs/test_build/ | |||
docs/build_test/ | |||
# PyBuilder | |||
target/ | |||
# Jupyter Notebook | |||
.ipynb_checkpoints | |||
# pyenv | |||
.python-version | |||
# celery beat schedule file | |||
celerybeat-schedule | |||
# SageMath parsed files | |||
*.sage.py | |||
# Environments | |||
.env | |||
.venv | |||
env/ | |||
venv/ | |||
ENV/ | |||
env.bak/ | |||
venv.bak/ | |||
venv_/ | |||
venv2/ | |||
venv3/ | |||
venv_doc/ | |||
venv_py2/ | |||
# Spyder project settings | |||
.spyderproject | |||
.spyproject | |||
# Rope project settings | |||
.ropeproject | |||
# mkdocs documentation | |||
/site | |||
# mypy | |||
.mypy_cache/ | |||
# IDE Specific directories | |||
.DS_Store | |||
.idea | |||
.vscode/ | |||
# TensorLayer Directories | |||
checkpoints | |||
data/ | |||
lib_win/ | |||
# Android Studio Navigation editor temp files | |||
.navigation/ | |||
# Android Studio captures folder | |||
captures/ | |||
# IntelliJ | |||
*.iml | |||
.idea/workspace.xml | |||
.idea/tasks.xml | |||
.idea/gradle.xml | |||
.idea/assetWizardSettings.xml | |||
.idea/dictionaries | |||
.idea/libraries | |||
# Android Studio 3 in .gitignore file. | |||
.idea/caches | |||
.idea/modules.xml | |||
# Comment next line if keeping position of elements in Navigation Editor is relevant for you | |||
.idea/navEditor.xml | |||
# Keystore files | |||
# Uncomment the following lines if you do not want to check your keystore files in. | |||
#*.jks | |||
#*.keystore | |||
# External native build folder generated in Android Studio 2.2 and later | |||
.externalNativeBuild | |||
.cxx/ | |||
# Google Services (e.g. APIs or Firebase) | |||
# google-services.json | |||
# Freeline | |||
freeline.py | |||
freeline/ | |||
freeline_project_description.json | |||
# fastlane | |||
fastlane/report.xml | |||
fastlane/Preview.html | |||
fastlane/screenshots | |||
fastlane/test_output | |||
fastlane/readme.md | |||
# Version control | |||
vcs.xml | |||
# lint | |||
lint/intermediates/ | |||
lint/generated/ | |||
lint/outputs/ | |||
lint/tmp/ | |||
# lint/reports/ | |||
# Custom Scripts | |||
update_tl.bat | |||
update_tl.py | |||
# Data Files and ByteCode files | |||
*.gz | |||
*.npz |
@@ -0,0 +1,75 @@ | |||
############################################################################ | |||
# see https://pyup.io/docs/configuration/ for all available options # | |||
############################################################################ | |||
# configure updates globally | |||
# default: all | |||
# allowed: all, insecure, False | |||
update: all | |||
# configure dependency pinning globally | |||
# default: True | |||
# allowed: True, False | |||
pin: False | |||
# set the default branch | |||
# default: empty, the default branch on GitHub | |||
branch: master | |||
# update schedule | |||
# default: empty | |||
# allowed: "every day", "every week", .. | |||
schedule: "every day" | |||
# search for requirement files | |||
# default: True | |||
# allowed: True, False | |||
search: False | |||
# Specify requirement files by hand, default is empty | |||
# default: empty | |||
# allowed: list | |||
requirements: | |||
# Requirements for the library | |||
- requirements/requirements.txt | |||
# Requirements for the development | |||
- requirements/requirements_tf_cpu.txt | |||
# Requirements for the development | |||
- requirements/requirements_tf_gpu.txt | |||
# Not necessary, but recommended libraries | |||
- requirements/requirements_extra.txt | |||
# Requirements for contrib loggers | |||
- requirements_contrib_loggers.txt | |||
# Requirements for the db | |||
- requirements/requirements_db.txt | |||
# Requirements for the development | |||
- requirements/requirements_dev.txt | |||
# Requirements for building docs | |||
- requirements/requirements_doc.txt | |||
# Requirements for running unittests | |||
- requirements/requirements_test.txt | |||
# add a label to pull requests, default is not set | |||
# requires private repo permissions, even on public repos | |||
# default: empty | |||
#label_prs: update | |||
# configure the branch prefix the bot is using | |||
# default: pyup- | |||
branch_prefix: pyup- | |||
# set a global prefix for PRs | |||
# default: empty | |||
pr_prefix: "PyUP - Dependency Update" | |||
# allow to close stale PRs | |||
# default: True | |||
close_prs: True |
@@ -0,0 +1,14 @@ | |||
# https://docs.readthedocs.io/en/latest/yaml-config.html | |||
build: | |||
image: latest # For python 3.6 | |||
formats: | |||
- epub | |||
python: | |||
version: 3.6 | |||
requirements_file: | |||
requirements/requirements_doc.txt |
@@ -0,0 +1,119 @@ | |||
# https://docs.travis-ci.com/user/languages/python/ | |||
language: python | |||
# https://docs.travis-ci.com/user/caching/#pip-cache | |||
cache: | |||
directories: | |||
- $HOME/.cache/pip/wheels | |||
addons: | |||
apt: | |||
update: false | |||
branches: | |||
only: | |||
- master | |||
- TensorLayer-2.x | |||
- /^\d+\.\d+(\.\d+)?(\S*)?$/ | |||
python: | |||
- "3.6" | |||
- "3.5" | |||
# - "2.7" # TensorLayer 2.0 does not support python2 now | |||
env: | |||
# Backward Compatibility in insured for release less than 1 year old. | |||
# https://pypi.org/project/tensorflow/#history | |||
matrix: | |||
- _TF_VERSION=2.0.0-rc1 | |||
# - _TF_VERSION=1.12.0 # Remove on Oct 22, 2019 | |||
# - _TF_VERSION=1.11.0 # Remove on Sep 28, 2019 | |||
# - _TF_VERSION=1.10.1 # Remove on Aug 24, 2019 | |||
# - _TF_VERSION=1.9.0 # Remove on Jul 10, 2019 | |||
# - _TF_VERSION=1.8.0 # Remove on Apr 28, 2019 | |||
# - _TF_VERSION=1.7.1 # Remove on May 08, 2019 | |||
# - _TF_VERSION=1.7.0 # Remove on Mar 29, 2019 | |||
# - _TF_VERSION=1.6.0 # Remove on Mar 01, 2019 | |||
global: | |||
- PYPI_USER='jonathandekhtiar' | |||
# See https://docs.travis-ci.com/user/encryption-keys/ for more details about secure keys. | |||
### == PYPI_PASSWORD === ### | |||
## To update: travis encrypt PYPI_PASSWORD=################################ | |||
- secure: "fGIRDjfzzP9DhdDshgh/+bWTZ5Y0jTD4aR+gsT1TyAyc6N4f3RRlx70xZZwYMdQ+XC3no/q4na8UzhhuSM0hCCM1EaQ78WF1c6+FBScf4vYGoYgyJ1am+4gu54JXt+4f0bd+s6jyYBafJALUJp5fqHoxCUXqzjrOqGBBU2+JbL71Aaj8yhQuK0VPPABexsQPQM312Gvzg7hy9dh63J0Q02PqINn+CTcwq3gLH9Oua58zWQ7TaT0cdy/hzAc6Yxy3ajo2W5NU+nKROaaG9W57sa7K/v1dshDFFFST2DdGxm9i7vvfPsq0OWM6qWLsec/4mXJWsmai2ygZEv+IhaABb10c7spd2nl7oHFj2UGmldtO5W0zLb1KkCPWDPilFt3lvHM+OS/YaibquL5/5+yGj0LsRJrVyWoMBA8idcQeH4dvTAfySeFpO42VNwW5ez9JaEOh7bBp7naAA8c/fbNJJ5YEW4MEmOZ9dwFTohNNDiN+oITSrcXBS+jukbfTOmtCeYNUker+4G2YwII9cxHXbZeIMrTq9AqTfOVTAYCFaFHKbpSc1+HCyF7n5ZfNC00kBaw93XUnLRzSNKe5Ir791momYL8HecMN3OAI77bz26/pHSfzJnLntq9qx2nLBTnqDuSq5/pHvdZ8hyt+hTDxvF7HJIVMhbnkjoLPxmn4k/I=" | |||
### === GITHUB_PERSONAL_TOKEN === ### | |||
## To update: travis encrypt GITHUB_PERSONAL_TOKEN=################################ | |||
- secure: "kMxg4WfTwhnYMD7WjYk71vgw7XlShPpANauKzfTL6oawDrpQRkBUai4uQwiL3kXVBuVv9rKKKZxxnyAm05iB5wGasPDhlFA1DPF0IbyU3pwQKlw2Xo5qtHdgxBnbms6JJ9z5b+hHCVg+LXYYeUw5qG01Osg5Ue6j1g2juQQHCod00FNuo3fe8ah/Y10Rem6CigH8ofosCrTvN2w1GaetJwVehRYf8JkPC6vQ+Yk8IIjHn2CaVJALbhuchVblxwH0NXXRen915BVBwWRQtsrvEVMXKu7A8sMHmvPz1u3rhXQfjpF2KeVOfy1ZnyiHcLE2HgAPuAGh4kxZAAA8ovmcaCbv8m64bm72BrQApSbt6OEtR9L1UeUwdEgi54FH1XFOHQ9dA6CpiGCyk3wAJZqO0/PkNYVLfb4gPLvnvBRwYBaPgUPvVNhidFu/oODENZmcf8g9ChtuC1GT70EYlVwhgDGqUY7/USZCEvIPe81UToqtIKgcgA+Is51XindumJVMiqTsjgdqeC/wZcw+y37TAjIvvXbtYxeqIKv9zh1JuZppqUhnf+OhI+HHFaY4iu7lQTs3C0WmoLskZAp9srwRtifnVFFkdYzngmPaSjWyko2qiS0cTdFJQB/ljqmnJdksacbv5OOa0Q4qZef/hW774nVx105FlkAIk70D2b5l2pA=" | |||
### === GITHUB_PERSONAL_TOKEN === ### | |||
## To update: travis encrypt HYPERDASH_APIKEY=################################ | |||
- secure: "ez9Ck1VpqWOd2PPu+CMWzd8R4aAIXbjKCk96PCKwWu8VXoHjaPkiy8Nn0LUzSlUg3nKdZmu2JSndwDMy3+lMLG7zE2WlGNY7MF5KM3GrvFpP3cxJQ6OuPcZcEH4j5KtBtNTrNqa8SWglqhc9mr66a92SD8Ydc4aMj6L9nbQvrsvVzIMmMy6weVlpBF35nweYCM8LxlsnqyPLleHPZo3o/k+hsTqQQbiMGjC78tqrGr56u7AjL2+D/m33+dfCGzFvMJFcpLQ5ldbcVU54i5e6V3xJ48P30QOGZaqG3fcpfZsyJEIWjykt6XFA8GfJjaVVbxdlr7zP7Vd9iWBuemnMEX3F9Cy/4x7LmX9PJfsVPC6FQnanDvsZSNO5hpmKe8BTpttJJvxgscOczV4jnI69OzqhSQeyChwtkqhIg1E/53XIO+uLJAAZsCkAco7tjGGXTKyv8ZlpSJwSqsLcmgpmQbfodCoMLcYenTxqKZv78e2B4tOPGQyS2bkSxAqhvAIam7RCq/yEvz5n2/mBFEGwP6OQFIdC7ypO2LyrOlLT7HJjCeYMeKSm+GOD3LW9oIy9QJZpG6N/zAAjnk9C2mYtWRQIBo4qdHjRvyDReevDexI8j0AXySblxREmQ7ZaT6KEDXXZSu5goTlaGm0g2HwAkKu9xYFV/bRtp6+i1mP7CQg=" | |||
matrix: | |||
include: | |||
- python: '3.6' | |||
env: | |||
- _DOC_AND_YAPF_TEST=True | |||
install: | |||
- | | |||
if [[ -v _DOC_AND_YAPF_TEST ]]; then | |||
pip install tensorflow==2.0.0-rc1 | |||
pip install yapf | |||
pip install -e .[doc] | |||
else | |||
pip install tensorflow==$_TF_VERSION | |||
pip install -e .[all_cpu_dev] | |||
fi | |||
script: | |||
# units test | |||
# https://docs.pytest.org/en/latest/ | |||
- rm setup.cfg | |||
- | | |||
if [[ -v _DOC_AND_YAPF_TEST ]]; then | |||
mv setup.travis_doc.cfg setup.cfg | |||
else | |||
mv setup.travis.cfg setup.cfg | |||
fi | |||
- pytest | |||
before_deploy: | |||
- python setup.py sdist | |||
- python setup.py bdist_wheel | |||
- python setup.py bdist_wheel --universal | |||
- python setup.py egg_info | |||
deploy: | |||
# Documentation: https://docs.travis-ci.com/user/deployment/pypi/ | |||
- provider: pypi | |||
user: '$PYPI_USER' | |||
password: '$PYPI_PASSWORD' | |||
skip_cleanup: true | |||
on: | |||
tags: true | |||
python: '3.6' | |||
condition: '$_TF_VERSION = 2.0.0-rc1' | |||
# condition: '$_TF_VERSION = 1.11.0' | |||
# Documentation: https://docs.travis-ci.com/user/deployment/releases/ | |||
- provider: releases | |||
file: | |||
- dist/* | |||
- tensorlayer.egg-info/PKG-INFO | |||
file_glob: true | |||
skip_cleanup: true | |||
api_key: '$GITHUB_PERSONAL_TOKEN' | |||
on: | |||
tags: true | |||
python: '3.6' | |||
condition: '$_TF_VERSION = 2.0.0-rc1' | |||
# condition: '$_TF_VERSION = 1.11.0' |
@@ -0,0 +1,585 @@ | |||
# Changelog | |||
All notable changes to this project will be documented in this file. | |||
The format is based on [Keep a Changelog](https://keepachangelog.com/) | |||
and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html). | |||
<!-- | |||
============== Guiding Principles ============== | |||
* Changelogs are for humans, not machines. | |||
* There should be an entry for every single version. | |||
* The same types of changes should be grouped. | |||
* Versions and sections should be linkable. | |||
* The latest version comes first. | |||
* The release date of each version is displayed. | |||
* Mention whether you follow Semantic Versioning. | |||
============== Types of changes (keep the order) ============== | |||
* `Added` for new features. | |||
* `Changed` for changes in existing functionality. | |||
* `Deprecated` for soon-to-be removed features. | |||
* `Removed` for now removed features. | |||
* `Fixed` for any bug fixes. | |||
* `Security` in case of vulnerabilities. | |||
* `Dependencies Update` in case of vulnerabilities. | |||
* `Contributors` to thank the contributors that worked on this PR. | |||
============== How To Update The Changelog for a New Release ============== | |||
** Always Keep The Unreleased On Top ** | |||
To release a new version, please update the changelog as followed: | |||
1. Rename the `Unreleased` Section to the Section Number | |||
2. Recreate an `Unreleased` Section on top | |||
3. Update the links at the very bottom | |||
======================= START: TEMPLATE TO KEEP IN CASE OF NEED =================== | |||
** DO NOT MODIFY THIS SECTION ! ** | |||
## [Unreleased] | |||
### Added | |||
### Changed | |||
### Dependencies Update | |||
### Deprecated | |||
### Fixed | |||
### Removed | |||
### Security | |||
### Contributors | |||
** DO NOT MODIFY THIS SECTION ! ** | |||
======================= END: TEMPLATE TO KEEP IN CASE OF NEED =================== | |||
--> | |||
<!-- YOU CAN EDIT FROM HERE --> | |||
## [Unreleased] | |||
### Added | |||
### Changed | |||
### Dependencies Update | |||
### Deprecated | |||
### Fixed | |||
- Fix README. (#PR 1044) | |||
- Fix package info. (#PR 1046) | |||
### Removed | |||
### Security | |||
### Contributors | |||
- @luomai (PR #1044, 1046) | |||
## [2.2.0] - 2019-09-13 | |||
TensorLayer 2.2.0 is a maintenance release. | |||
It contains numerous API improvement and bug fixes. | |||
This release is compatible with TensorFlow 2 RC1. | |||
### Added | |||
- Support nested layer customization (#PR 1015) | |||
- Support string dtype in InputLayer (#PR 1017) | |||
- Support Dynamic RNN in RNN (#PR 1023) | |||
- Add ResNet50 static model (#PR 1030) | |||
- Add performance test code in static model (#PR 1041) | |||
### Changed | |||
- `SpatialTransform2dAffine` auto `in_channels` | |||
- support TensorFlow 2.0.0-rc1 | |||
- Update model weights property, now returns its copy (#PR 1010) | |||
### Fixed | |||
- RNN updates: remove warnings, fix if seq_len=0, unitest (#PR 1033) | |||
- BN updates: fix BatchNorm1d for 2D data, refactored (#PR 1040) | |||
### Dependencies Update | |||
### Deprecated | |||
### Fixed | |||
- Fix `tf.models.Model._construct_graph` for list of outputs, e.g. STN case (PR #1010) | |||
- Enable better `in_channels` exception raise. (PR #1015) | |||
- Set allow_pickle=True in np.load() (#PR 1021) | |||
- Remove `private_method` decorator (#PR 1025) | |||
- Copy original model's `trainable_weights` and `nontrainable_weights` when initializing `ModelLayer` (#PR 1026) | |||
- Copy original model's `trainable_weights` and `nontrainable_weights` when initializing `LayerList` (#PR 1029) | |||
- Remove redundant parts in `model.all_layers` (#PR 1029) | |||
- Replace `tf.image.resize_image_with_crop_or_pad` with `tf.image.resize_with_crop_or_pad` (#PR 1032) | |||
- Fix a bug in `ResNet50` static model (#PR 1041) | |||
### Removed | |||
### Security | |||
### Contributors | |||
- @zsdonghao | |||
- @luomai | |||
- @ChrisWu1997: #1010 #1015 #1025 #1030 #1040 | |||
- @warshallrho: #1017 #1021 #1026 #1029 #1032 #1041 | |||
- @ArnoldLIULJ: #1023 | |||
- @JingqingZ: #1023 | |||
## [2.1.0] | |||
### Changed | |||
- Add version_info in model.config. (PR #992) | |||
- Replace tf.nn.func with tf.nn.func.\_\_name\_\_ in model config. (PR #994) | |||
- Add Reinforcement learning tutorials. (PR #995) | |||
- Add RNN layers with simple rnn cell, GRU cell, LSTM cell. (PR #998) | |||
- Update Seq2seq (#998) | |||
- Add Seq2seqLuongAttention model (#998) | |||
### Fixed | |||
### Contributors | |||
- @warshallrho: #992 #994 | |||
- @quantumiracle: #995 | |||
- @Tokarev-TT-33: #995 | |||
- @initial-h: #995 | |||
- @Officium: #995 | |||
- @ArnoldLIULJ: #998 | |||
- @JingqingZ: #998 | |||
## [2.0.2] - 2019-6-5 | |||
### Changed | |||
- change the format of network config, change related code and files; change layer act (PR #980) | |||
### Fixed | |||
- Fix dynamic model cannot track PRelu weights gradients problem (PR #982) | |||
- Raise .weights warning (commit) | |||
### Contributors | |||
- @warshallrho: #980 | |||
- @1FengL: #982 | |||
## [2.0.1] - 2019-5-17 | |||
A maintain release. | |||
### Changed | |||
- remove `tl.layers.initialize_global_variables(sess)` (PR #931) | |||
- support `trainable_weights` (PR #966) | |||
### Added | |||
- Layer | |||
- `InstanceNorm`, `InstanceNorm1d`, `InstanceNorm2d`, `InstanceNorm3d` (PR #963) | |||
* Reinforcement learning tutorials. (PR #995) | |||
### Changed | |||
- remove `tl.layers.initialize_global_variables(sess)` (PR #931) | |||
- update `tutorial_generate_text.py`, `tutorial_ptb_lstm.py`. remove `tutorial_ptb_lstm_state_is_tuple.py` (PR #958) | |||
- change `tl.layers.core`, `tl.models.core` (PR #966) | |||
- change `weights` into `all_weights`, `trainable_weights`, `nontrainable_weights` | |||
### Dependencies Update | |||
- nltk>=3.3,<3.4 => nltk>=3.3,<3.5 (PR #892) | |||
- pytest>=3.6,<3.11 => pytest>=3.6,<4.1 (PR #889) | |||
- yapf>=0.22,<0.25 => yapf==0.25.0 (PR #896) | |||
- imageio==2.5.0 progressbar2==3.39.3 scikit-learn==0.21.0 scikit-image==0.15.0 scipy==1.2.1 wrapt==1.11.1 pymongo==3.8.0 sphinx==2.0.1 wrapt==1.11.1 opencv-python==4.1.0.25 requests==2.21.0 tqdm==4.31.1 lxml==4.3.3 pycodestyle==2.5.0 sphinx==2.0.1 yapf==0.27.0(PR #967) | |||
### Fixed | |||
- fix docs of models @zsdonghao #957 | |||
- In `BatchNorm`, keep dimensions of mean and variance to suit `channels first` (PR #963) | |||
### Contributors | |||
- @warshallrho: #PR966 | |||
- @zsdonghao: #931 | |||
- @yd-yin: #963 | |||
- @Tokarev-TT-33: # 995 | |||
- @initial-h: # 995 | |||
- @quantumiracle: #995 | |||
- @Officium: #995 | |||
- @1FengL: #958 | |||
- @dvklopfenstein: #971 | |||
## [2.0.0] - 2019-05-04 | |||
To many PR for this update, please check [here](https://github.com/tensorlayer/tensorlayer/releases/tag/2.0.0) for more details. | |||
### Changed | |||
* update for TensorLayer 2.0.0 alpha version (PR #952) | |||
* support TensorFlow 2.0.0-alpha | |||
* support both static and dynamic model building | |||
### Dependencies Update | |||
- tensorflow>=1.6,<1.13 => tensorflow>=2.0.0-alpha (PR #952) | |||
- h5py>=2.9 (PR #952) | |||
- cloudpickle>=0.8.1 (PR #952) | |||
- remove matplotlib | |||
### Contributors | |||
- @zsdonghao | |||
- @JingqingZ | |||
- @ChrisWu1997 | |||
- @warshallrho | |||
## [1.11.1] - 2018-11-15 | |||
### Changed | |||
* guide for pose estimation - flipping (PR #884) | |||
* cv2 transform support 2 modes (PR #885) | |||
### Dependencies Update | |||
- pytest>=3.6,<3.9 => pytest>=3.6,<3.10 (PR #874) | |||
- requests>=2.19,<2.20 => requests>=2.19,<2.21 (PR #874) | |||
- tqdm>=4.23,<4.28 => tqdm>=4.23,<4.29 (PR #878) | |||
- pytest>=3.6,<3.10 => pytest>=3.6,<3.11 (PR #886) | |||
- pytest-xdist>=1.22,<1.24 => pytest-xdist>=1.22,<1.25 (PR #883) | |||
- tensorflow>=1.6,<1.12 => tensorflow>=1.6,<1.13 (PR #886) | |||
### Contributors | |||
- @zsdonghao: #884 #885 | |||
## [1.11.0] - 2018-10-18 | |||
### Added | |||
- Layer: | |||
- Release `GroupNormLayer` (PR #850) | |||
- Image affine transformation APIs | |||
- `affine_rotation_matrix` (PR #857) | |||
- `affine_horizontal_flip_matrix` (PR #857) | |||
- `affine_vertical_flip_matrix` (PR #857) | |||
- `affine_shift_matrix` (PR #857) | |||
- `affine_shear_matrix` (PR #857) | |||
- `affine_zoom_matrix` (PR #857) | |||
- `affine_transform_cv2` (PR #857) | |||
- `affine_transform_keypoints` (PR #857) | |||
- Affine transformation tutorial | |||
- `examples/data_process/tutorial_fast_affine_transform.py` (PR #857) | |||
### Changed | |||
- BatchNormLayer: support `data_format` | |||
### Dependencies Update | |||
- matplotlib>=2.2,<2.3 => matplotlib>=2.2,<3.1 (PR #845) | |||
- pydocstyle>=2.1,<2.2 => pydocstyle>=2.1,<3.1 (PR #866) | |||
- scikit-learn>=0.19,<0.20 => scikit-learn>=0.19,<0.21 (PR #851) | |||
- sphinx>=1.7,<1.8 => sphinx>=1.7,<1.9 (PR #842) | |||
- tensorflow>=1.6,<1.11 => tensorflow>=1.6,<1.12 (PR #853) | |||
- tqdm>=4.23,<4.26 => tqdm>=4.23,<4.28 (PR #862 & #868) | |||
- yapf>=0.22,<0.24 => yapf>=0.22,<0.25 (PR #829) | |||
### Fixed | |||
- Correct offset calculation in `tl.prepro.transform_matrix_offset_center` (PR #855) | |||
### Contributors | |||
- @2wins: #850 #855 | |||
- @DEKHTIARJonathan: #853 | |||
- @zsdonghao: #857 | |||
- @luomai: #857 | |||
## [1.10.1] - 2018-09-07 | |||
### Added | |||
- unittest `tests\test_timeout.py` has been added to ensure the network creation process does not freeze. | |||
### Changed | |||
- remove 'tensorboard' param, replaced by 'tensorboard_dir' in `tensorlayer/utils.py` with customizable tensorboard directory (PR #819) | |||
### Removed | |||
- TL Graph API removed. Memory Leaks Issues with this API, will be fixed and integrated in TL 2.0 (PR #818) | |||
### Fixed | |||
- Issue #817 fixed: TL 1.10.0 - Memory Leaks and very slow network creation. | |||
### Dependencies Update | |||
- autopep8>=1.3,<1.4 => autopep8>=1.3,<1.5 (PR #815) | |||
- imageio>=2.3,<2.4 => imageio>=2.3,<2.5 (PR #823) | |||
- pytest>=3.6,<3.8 => pytest>=3.6,<3.9 (PR #823) | |||
- pytest-cov>=2.5,<2.6 => pytest-cov>=2.5,<2.7 (PR #820) | |||
### Contributors | |||
- @DEKHTIARJonathan: #815 #818 #820 #823 | |||
- @ndiy: #819 | |||
- @zsdonghao: #818 | |||
## [1.10.0] - 2018-09-02 | |||
### Added | |||
- API: | |||
- Add `tl.model.vgg19` (PR #698) | |||
- Add `tl.logging.contrib.hyperdash` (PR #739) | |||
- Add `tl.distributed.trainer` (PR #700) | |||
- Add `prefetch_buffer_size` to the `tl.distributed.trainer` (PR #766) | |||
- Add `tl.db.TensorHub` (PR #751) | |||
- Add `tl.files.save_graph` (PR #751) | |||
- Add `tl.files.load_graph_` (PR #751) | |||
- Add `tl.files.save_graph_and_params` (PR #751) | |||
- Add `tl.files.load_graph_and_params` (PR #751) | |||
- Add `tl.prepro.keypoint_random_xxx` (PR #787) | |||
- Documentation: | |||
- Add binary, ternary and dorefa links (PR #711) | |||
- Update input scale of VGG16 and VGG19 to 0~1 (PR #736) | |||
- Update database (PR #751) | |||
- Layer: | |||
- Release SwitchNormLayer (PR #737) | |||
- Release QuanConv2d, QuanConv2dWithBN, QuanDenseLayer, QuanDenseLayerWithBN (PR#735) | |||
- Update Core Layer to support graph (PR #751) | |||
- All Pooling layers support `data_format` (PR #809) | |||
- Setup: | |||
- Creation of installation flaggs `all_dev`, `all_cpu_dev`, and `all_gpu_dev` (PR #739) | |||
- Examples: | |||
- change folder struction (PR #802) | |||
- `tutorial_models_vgg19` has been introduced to show how to use `tl.model.vgg19` (PR #698). | |||
- fix bug of `tutorial_bipedalwalker_a3c_continuous_action.py` (PR #734, Issue #732) | |||
- `tutorial_models_vgg16` and `tutorial_models_vgg19` has been changed the input scale from [0,255] to [0,1](PR #710) | |||
- `tutorial_mnist_distributed_trainer.py` and `tutorial_cifar10_distributed_trainer.py` are added to explain the uses of Distributed Trainer (PR #700) | |||
- add `tutorial_quanconv_cifar10.py` and `tutorial_quanconv_mnist.py` (PR #735) | |||
- add `tutorial_work_with_onnx.py`(PR #775) | |||
- Applications: | |||
- [Arbitrary Style Transfer in Real-time with Adaptive Instance Normalization](https://arxiv.org/abs/1703.06868) (PR #799) | |||
### Changed | |||
- function minibatches changed to avoid wasting samples.(PR #762) | |||
- all the input scale in both vgg16 and vgg19 has been changed the input scale from [0,255] to [0,1](PR #710) | |||
- Dockerfiles merged and refactored into one file (PR #747) | |||
- LazyImports move to the most **top level** imports as possible (PR #739) | |||
- some new test functions have been added in `test_layers_convolution.py`, `test_layers_normalization.py`, `test_layers_core.py` (PR #735) | |||
- documentation now uses mock imports reducing the number of dependencies to compile the documentation (PR #785) | |||
- fixed and enforced pydocstyle D210, D200, D301, D207, D403, D204, D412, D402, D300, D208 (PR #784) | |||
### Deprecated | |||
- `tl.logging.warn` has been deprecated in favor of `tl.logging.warning` (PR #739) | |||
### Removed | |||
- `conv_layers()` has been removed in both vgg16 and vgg19(PR #710) | |||
- graph API (PR #818) | |||
### Fixed | |||
- import error caused by matplotlib on OSX (PR #705) | |||
- missing import in tl.prepro (PR #712) | |||
- Dockerfiles import error fixed - issue #733 (PR #747) | |||
- Fix a typo in `absolute_difference_error` in file: `tensorlayer/cost.py` - Issue #753 (PR #759) | |||
- Fix the bug of scaling the learning rate of trainer (PR #776) | |||
- log error instead of info when npz file not found. (PR #812) | |||
### Dependencies Update | |||
- numpy>=1.14,<1.15 => numpy>=1.14,<1.16 (PR #754) | |||
- pymongo>=3.6,<3.7 => pymongo>=3.6,<3.8 (PR #750) | |||
- pytest>=3.6,<3.7 => tqdm>=3.6,<3.8 (PR #798) | |||
- pytest-xdist>=1.22,<1.23 => pytest-xdist>=1.22,<1.24 (PR #805 and #806) | |||
- tensorflow>=1.8,<1.9 => tensorflow>=1.6,<1.11 (PR #739 and PR #798) | |||
- tqdm>=4.23,<4.25 => tqdm>=4.23,<4.26 (PR #798) | |||
- yapf>=0.21,<0.22 => yapf>=0.22,<0.24 (PR #798 #808) | |||
### Contributors | |||
- @DEKHTIARJonathan: #739 #747 #750 #754 | |||
- @lgarithm: #705 #700 | |||
- @OwenLiuzZ: #698 #710 #775 #776 | |||
- @zsdonghao: #711 #712 #734 #736 #737 #700 #751 #809 #818 | |||
- @luomai: #700 #751 #766 #802 | |||
- @XJTUWYD: #735 | |||
- @mutewall: #735 | |||
- @thangvubk: #759 | |||
- @JunbinWang: #796 | |||
- @boldjoel: #787 | |||
## [1.9.1] - 2018-07-30 | |||
### Fixed | |||
- Issue with tensorflow 1.10.0 fixed | |||
## [1.9.0] - 2018-06-16 | |||
### Added | |||
- API: | |||
- `tl.alphas` and `tl.alphas_like` added following the tf.ones/zeros and tf.zeros_like/ones_like (PR #580) | |||
- `tl.lazy_imports.LazyImport` to import heavy libraries only when necessary (PR #667) | |||
- `tl.act.leaky_relu6` and `tl.layers.PRelu6Layer` have been deprecated (PR #686) | |||
- `tl.act.leaky_twice_relu6` and `tl.layers.PTRelu6Layer` have been deprecated (PR #686) | |||
- CI Tool: | |||
- [Stale Probot](https://github.com/probot/stale) added to clean stale issues (PR #573) | |||
- [Changelog Probot](https://github.com/mikz/probot-changelog) Configuration added (PR #637) | |||
- Travis Builds now handling a matrix of TF Version from TF==1.6.0 to TF==1.8.0 (PR #644) | |||
- CircleCI added to build and upload Docker Containers for each PR merged and tag release (PR #648) | |||
- Decorator: | |||
- `tl.decorators` API created including `deprecated_alias` and `private_method` (PR #660) | |||
- `tl.decorators` API enriched with `protected_method` (PR #675) | |||
- `tl.decorators` API enriched with `deprecated` directly raising warning and modifying documentation (PR #691) | |||
- Docker: | |||
- Containers for each release and for each PR merged on master built (PR #648) | |||
- Containers built in the following configurations (PR #648): | |||
- py2 + cpu | |||
- py2 + gpu | |||
- py3 + cpu | |||
- py3 + gpu | |||
- Documentation: | |||
- Clean README.md (PR #677) | |||
- Release semantic version added on index page (PR #633) | |||
- Optimizers page added (PR #636) | |||
- `AMSGrad` added on Optimizers page added (PR #636) | |||
- Layer: | |||
- ElementwiseLambdaLayer added to use custom function to connect multiple layer inputs (PR #579) | |||
- AtrousDeConv2dLayer added (PR #662) | |||
- Fix bugs of using `tf.layers` in CNN (PR #686) | |||
- Optimizer: | |||
- AMSGrad Optimizer added based on `On the Convergence of Adam and Beyond (ICLR 2018)` (PR #636) | |||
- Setup: | |||
- Creation of installation flaggs `all`, `all_cpu`, and `all_gpu` (PR #660) | |||
- Test: | |||
- `test_utils_predict.py` added to reproduce and fix issue #288 (PR #566) | |||
- `Layer_DeformableConvolution_Test` added to reproduce issue #572 with deformable convolution (PR #573) | |||
- `Array_Op_Alphas_Test` and `Array_Op_Alphas_Like_Test` added to test `tensorlayer/array_ops.py` file (PR #580) | |||
- `test_optimizer_amsgrad.py` added to test `AMSGrad` optimizer (PR #636) | |||
- `test_logging.py` added to insure robustness of the logging API (PR #645) | |||
- `test_decorators.py` added (PR #660) | |||
- `test_activations.py` added (PR #686) | |||
- Tutorials: | |||
- `tutorial_tfslim` has been introduced to show how to use `SlimNetsLayer` (PR #560). | |||
- add the following to all tutorials (PR #697): | |||
```python | |||
tf.logging.set_verbosity(tf.logging.DEBUG) | |||
tl.logging.set_verbosity(tl.logging.DEBUG) | |||
``` | |||
### Changed | |||
- Tensorflow CPU & GPU dependencies moved to separated requirement files in order to allow PyUP.io to parse them (PR #573) | |||
- The document of LambdaLayer for linking it with ElementwiseLambdaLayer (PR #587) | |||
- RTD links point to stable documentation instead of latest used for development (PR #633) | |||
- TF Version older than 1.6.0 are officially unsupported and raises an exception (PR #644) | |||
- README.md Badges Updated with Support Python and Tensorflow Versions (PR #644) | |||
- TL logging API has been consistent with TF logging API and thread-safe (PR #645) | |||
- Relative Imports changed for absolute imports (PR #657) | |||
- `tl.files` refactored into a directory with numerous files (PR #657) | |||
- `tl.files.voc_dataset` fixed because of original Pascal VOC website was down (PR #657) | |||
- extra requirements hidden inside the library added in the project requirements (PR #657) | |||
- requirements files refactored in `requirements/` directory (PR #657) | |||
- README.md and other markdown files have been refactored and cleaned. (PR #639) | |||
- Ternary Convolution Layer added in unittest (PR #658) | |||
- Convolution Layers unittests have been cleaned & refactored (PR #658) | |||
- All the tests are now using a DEBUG level verbosity when run individualy (PR #660) | |||
- `tf.identity` as activation is **ignored**, thus reducing the size of the graph by removing useless operation (PR #667) | |||
- argument dictionaries are now checked and saved within the `Layer` Base Class (PR #667) | |||
- `Layer` Base Class now presenting methods to update faultlessly `all_layers`, `all_params`, and `all_drop` (PR #675) | |||
- Input Layers have been removed from `tl.layers.core` and added to `tl.layers.inputs` (PR #675) | |||
- Input Layers are now considered as true layers in the graph (they represent a placeholder), unittests have been updated (PR #675) | |||
- Layer API is simplified, with automatic feeding `prev_layer` into `self.inputs` (PR #675) | |||
- Complete Documentation Refactoring and Reorganization (namely Layer APIs) (PR #691) | |||
### Deprecated | |||
- `tl.layers.TimeDistributedLayer` argurment `args` is deprecated in favor of `layer_args` (PR #667) | |||
- `tl.act.leaky_relu` have been deprecated in favor of `tf.nn.leaky_relu` (PR #686) | |||
### Removed | |||
- `assert()` calls remove and replaced by `raise AssertionError()` (PR #667) | |||
- `tl.identity` is removed, not used anymore and deprecated for a long time (PR #667) | |||
- All Code specific to `TF.__version__ < "1.6"` have been removed (PR #675) | |||
### Fixed | |||
- Issue #498 - Deprecation Warning Fix in `tl.layers.RNNLayer` with `inspect` (PR #574) | |||
- Issue #498 - Deprecation Warning Fix in `tl.files` with truth value of an empty array is ambiguous (PR #575) | |||
- Issue #565 related to `tl.utils.predict` fixed - `np.hstack` problem in which the results for multiple batches are stacked along `axis=1` (PR #566) | |||
- Issue #572 with `tl.layers.DeformableConv2d` fixed (PR #573) | |||
- Issue #664 with `tl.layers.ConvLSTMLayer` fixed (PR #676) | |||
- Typo of the document of ElementwiseLambdaLayer (PR #588) | |||
- Error in `tl.layers.TernaryConv2d` fixed - self.inputs not defined (PR #658) | |||
- Deprecation warning fixed in `tl.layers.binary._compute_threshold()` (PR #658) | |||
- All references to `tf.logging` replaced by `tl.logging` (PR #661) | |||
- Duplicated code removed when bias was used (PR #667) | |||
- `tensorlayer.third_party.roi_pooling.roi_pooling.roi_pooling_ops` is now lazy loaded to prevent systematic error raised (PR #675) | |||
- Documentation not build in RTD due to old version of theme in docs directory fixed (PR #703) | |||
- Tutorial: | |||
- `tutorial_word2vec_basic.py` saving issue #476 fixed (PR #635) | |||
- All tutorials tested and errors have been fixed (PR #635) | |||
### Dependencies Update | |||
- Update pytest from 3.5.1 to 3.6.0 (PR #647) | |||
- Update progressbar2 from 3.37.1 to 3.38.0 (PR #651) | |||
- Update scikit-image from 0.13.1 to 0.14.0 (PR #656) | |||
- Update keras from 2.1.6 to 2.2.0 (PR #684) | |||
- Update requests from 2.18.4 to 2.19.0 (PR #695) | |||
### Contributors | |||
- @lgarithm: #563 | |||
- @DEKHTIARJonathan: #573 #574 #575 #580 #633 #635 #636 #639 #644 #645 #648 #657 #667 #658 #659 #660 #661 #666 #667 #672 #675 #683 #686 #687 #690 #691 #692 #703 | |||
- @2wins: #560 #566 #662 | |||
- @One-sixth: #579 | |||
- @zsdonghao: #587 #588 #639 #685 #697 | |||
- @luomai: #639 #677 | |||
- @dengyueyun666: #676 | |||
## [1.8.5] - 2018-05-09 | |||
### Added | |||
- Github Templates added (by @DEKHTIARJonathan) | |||
- New issues Template | |||
- New PR Template | |||
- Travis Deploy Automation on new Tag (by @DEKHTIARJonathan) | |||
- Deploy to PyPI and create a new version. | |||
- Deploy to Github Releases and upload the wheel files | |||
- PyUP.io has been added to ensure we are compatible with the latest libraries (by @DEKHTIARJonathan) | |||
- `deconv2d` now handling dilation_rate (by @zsdonghao) | |||
- Documentation unittest added (by @DEKHTIARJonathan) | |||
- `test_layers_core` has been added to ensure that `LayersConfig` is abstract. | |||
### Changed | |||
- All Tests Refactored - Now using unittests and runned with PyTest (by @DEKHTIARJonathan) | |||
- Documentation updated (by @zsdonghao) | |||
- Package Setup Refactored (by @DEKHTIARJonathan) | |||
- Dataset Downlaod now using library progressbar2 (by @DEKHTIARJonathan) | |||
- `deconv2d` function transformed into Class (by @zsdonghao) | |||
- `conv1d` function transformed into Class (by @zsdonghao) | |||
- super resolution functions transformed into Class (by @zsdonghao) | |||
- YAPF coding style improved and enforced (by @DEKHTIARJonathan) | |||
### Fixed | |||
- Backward Compatibility Restored with deprecation warnings (by @DEKHTIARJonathan) | |||
- Tensorflow Deprecation Fix (Issue #498): | |||
- AverageEmbeddingInputlayer (by @zsdonghao) | |||
- load_mpii_pose_dataset (by @zsdonghao) | |||
- maxPool2D initializer issue #551 (by @zsdonghao) | |||
- `LayersConfig` class has been enforced as abstract | |||
- Pooling Layer Issue #557 fixed (by @zsdonghao) | |||
### Dependencies Update | |||
- scipy>=1.0,<1.1 => scipy>=1.1,<1.2 | |||
### Contributors | |||
@zsdonghao @luomai @DEKHTIARJonathan | |||
[Unreleased]: https://github.com/tensorlayer/tensorlayer/compare/2.0....master | |||
[2.2.0]: https://github.com/tensorlayer/tensorlayer/compare/2.2.0...2.2.0 | |||
[2.1.0]: https://github.com/tensorlayer/tensorlayer/compare/2.1.0...2.1.0 | |||
[2.0.2]: https://github.com/tensorlayer/tensorlayer/compare/2.0.2...2.0.2 | |||
[2.0.1]: https://github.com/tensorlayer/tensorlayer/compare/2.0.1...2.0.1 | |||
[2.0.0]: https://github.com/tensorlayer/tensorlayer/compare/2.0.0...2.0.0 | |||
[1.11.1]: https://github.com/tensorlayer/tensorlayer/compare/1.11.0...1.11.0 | |||
[1.11.0]: https://github.com/tensorlayer/tensorlayer/compare/1.10.1...1.11.0 | |||
[1.10.1]: https://github.com/tensorlayer/tensorlayer/compare/1.10.0...1.10.1 | |||
[1.10.0]: https://github.com/tensorlayer/tensorlayer/compare/1.9.1...1.10.0 | |||
[1.9.1]: https://github.com/tensorlayer/tensorlayer/compare/1.9.0...1.9.1 | |||
[1.9.0]: https://github.com/tensorlayer/tensorlayer/compare/1.8.5...1.9.0 | |||
[1.8.5]: https://github.com/tensorlayer/tensorlayer/compare/1.8.4...1.8.5 |
@@ -0,0 +1,199 @@ | |||
# TensorLayer Contributor Guideline | |||
## Welcome to contribute! | |||
You are more than welcome to contribute to TensorLayer! If you have any improvement, please send us your [pull requests](https://help.github.com/en/articles/about-pull-requests). You may implement your improvement on your [fork](https://help.github.com/en/articles/working-with-forks). | |||
## Checklist | |||
* Continuous integration | |||
* Build from sources | |||
* Unittest | |||
* Documentation | |||
* General intro to TensorLayer2 | |||
* How to contribute a new `Layer` | |||
* How to contribute a new `Model` | |||
* How to contribute a new example/tutorial | |||
## Continuous integration | |||
We appreciate contributions | |||
either by adding / improving examples or extending / fixing the core library. | |||
To make your contributions, you would need to follow the [pep8](https://www.python.org/dev/peps/pep-0008/) coding style and [numpydoc](https://numpydoc.readthedocs.io/en/latest/) document style. | |||
We rely on Continuous Integration (CI) for checking push commits. | |||
The following tools are used to ensure that your commits can pass through the CI test: | |||
* [yapf](https://github.com/google/yapf) (format code), compulsory | |||
* [isort](https://github.com/timothycrosley/isort) (sort imports), optional | |||
* [autoflake](https://github.com/myint/autoflake) (remove unused imports), optional | |||
You can simply run | |||
```bash | |||
make format | |||
``` | |||
to apply those tools before submitting your PR. | |||
## Build from sources | |||
```bash | |||
# First clone the repository and change the current directory to the newly cloned repository | |||
git clone https://github.com/zsdonghao/tensorlayer2.git | |||
cd tensorlayer2 | |||
# Install virtualenv if necessary | |||
pip install virtualenv | |||
# Then create a virtualenv called `venv` | |||
virtualenv venv | |||
# Activate the virtualenv | |||
## Linux: | |||
source venv/bin/activate | |||
## Windows: | |||
venv\Scripts\activate.bat | |||
# ============= IF TENSORFLOW IS NOT ALREADY INSTALLED ============= # | |||
# basic installation | |||
pip install . | |||
# advanced: for a machine **without** an NVIDIA GPU | |||
pip install -e ".[all_cpu_dev]" | |||
# advanced: for a machine **with** an NVIDIA GPU | |||
pip install -e ".[all_gpu_dev]" | |||
``` | |||
## Unittest | |||
Launching the unittest for the whole repo: | |||
```bash | |||
# install pytest | |||
pip install pytest | |||
# run pytest | |||
pytest | |||
``` | |||
Running your unittest code on your implemented module only: | |||
```bash | |||
# install coverage | |||
pip install coverage | |||
cd /path/to/your/unittest/code | |||
# For example: cd tests/layers/ | |||
# run unittest | |||
coverage run --source myproject.module -m unittest discover | |||
# For example: coverage run --source tensorlayer.layers -m unittest discover | |||
# generate html report | |||
coverage html | |||
``` | |||
## Documentation | |||
Even though you follow [numpydoc](https://numpydoc.readthedocs.io/en/latest/) document style when writing your code, | |||
this does not ensure those lines appear on TensorLayer online documentation. | |||
You need further modify corresponding RST files in `docs/modules`. | |||
For example, to add your implemented new pooling layer into documentation, modify `docs/modules/layer.rst`. First, insert layer name under Layer list | |||
```rst | |||
Layer list | |||
---------- | |||
.. autosummary:: | |||
NewPoolingLayer | |||
``` | |||
Second, find pooling layer part and add: | |||
```rst | |||
.. ----------------------------------------------------------- | |||
.. Pooling Layers | |||
.. ----------------------------------------------------------- | |||
Pooling Layers | |||
------------------------ | |||
New Pooling Layer | |||
^^^^^^^^^^^^^^^^^^^^^^^^^^ | |||
.. autoclass:: NewPoolingLayer | |||
``` | |||
Finally, test with local documentation: | |||
```bash | |||
cd ./docs | |||
make clean | |||
make html | |||
# then view generated local documentation by ./html/index.html | |||
``` | |||
## General intro to TensorLayer2 | |||
* TensorLayer2 is built on [TensorFlow2](https://www.tensorflow.org/alpha), so TensorLayer2 is purely eager, no sessions, no globals. | |||
* TensorLayer2 supports APIs to build static models and dynamic models. Therefore, all `Layers` should be compatible with the two modes. | |||
```python | |||
# An example of a static model | |||
# A static model has inputs and outputs with fixed shape. | |||
inputs = tl.layers.Input([32, 784]) | |||
dense1 = tl.layers.Dense(n_units=800, act=tf.nn.relu, in_channels=784, name='dense1')(inputs) | |||
dense2 = tl.layers.Dense(n_units=10, act=tf.nn.relu, in_channels=800, name='dense2')(dense1) | |||
model = tl.models.Model(inputs=inputs, outputs=dense2) | |||
# An example of a dynamic model | |||
# A dynamic model has more flexibility. The inputs and outputs may be different in different runs. | |||
class CustomizeModel(tl.models.Model): | |||
def __init__(self): | |||
super(CustomizeModel, self).__init__() | |||
self.dense1 = tl.layers.Dense(n_units=800, act=tf.nn.relu, in_channels=784, name='dense1') | |||
self.dense2 = tl.layers.Dense(n_units=10, act=tf.nn.relu, in_channels=800, name='dense2') | |||
# a dynamic model allows more flexibility by customising forwarding. | |||
def forward(self, x, bar=None): | |||
d1 = self.dense1(x) | |||
if bar: | |||
return d1 | |||
else: | |||
d2 = self.dense2(d1) | |||
return d1, d2 | |||
model = CustomizeModel() | |||
``` | |||
* More examples can be found in [examples](examples/) and [tests/layers](tests/layers/). Note that not all of them are completed. | |||
## How to contribute a new `Layer` | |||
* A `NewLayer` should be a derived from the base class [`Layer`](tensorlayer/layers/core.py). | |||
* Member methods to be overrided: | |||
- `__init__(self, args1, args2, inputs_shape=None, name=None)`: The constructor of the `NewLayer`, which should | |||
- Call `super(NewLayer, self).__init__(name)` to construct the base. | |||
- Define member variables based on the args1, args2 (or even more). | |||
- If the `inputs_shape` is provided, call `self.build(inputs_shape)` and set `self._built=True`. Note that sometimes only `in_channels` should be enough to build the layer like [`Dense`](tensorlayer/layers/dense/base_dense.py). | |||
- Logging by `logging.info(...)`. | |||
- `__repr__(self)`: Return a printable representation of the `NewLayer`. | |||
- `build(self, inputs_shape)`: Build the `NewLayer` by defining weights. | |||
- `forward(self, inputs, **kwargs)`: Forward feeding the `NewLayer`. Note that the forward feeding of some `Layers` may be different during training and testing like [`Dropout`](tensorlayer/layers/dropout.py). | |||
* Unittest: | |||
- Unittest should be done before a pull request. Unittest code can be written in [tests/](tests/) | |||
* Documents: | |||
- Please write a description for each class and method in RST format. The description may include the functionality, arguments, references, examples of the `NewLayer`. | |||
* Examples: [`Dense`](tensorlayer/layers/dense/base_dense.py), [`Dropout`](tensorlayer/layers/dropout.py), [`Conv`](tensorlayer/layers/convolution/simplified_conv.py). | |||
## How to contribute a new `Model` | |||
* A `NewModel` should be derived from the base class [`Model`](tensorlayer/models/core.py) (if dynamic) or an instance of [`Model`](tensorlayer/models/core.py) (if static). | |||
* A static `NewModel` should have fixed inputs and outputs. Please check the example [`VGG_Static`](tensorlayer/models/vgg.py) | |||
* A dynamic `NewModel` has more flexiblility. Please check the example [`VGG16`](tensorlayer/models/vgg16.py) | |||
## How to contribute a new example/tutorial | |||
* A new example/tutorial should implement a complete workflow of deep learning which includes (but not limited) | |||
- `Models` construction based on `Layers`. | |||
- Data processing and loading. | |||
- Training and testing. | |||
- Forward feeding by calling the models. | |||
- Loss function. | |||
- Back propagation by `tf.GradientTape()`. | |||
- Model saving and restoring. | |||
* Examples: [MNIST](examples/basic_tutorials/tutorial_mnist_mlp_static.py), [CIFAR10](examples/basic_tutorials/tutorial_cifar10_cnn_static.py), [FastText](examples/text_classification/tutorial_imdb_fasttext.py) |
@@ -1,208 +0,0 @@ | |||
Apache License | |||
Version 2.0, January 2004 | |||
http://www.apache.org/licenses/ TERMS AND CONDITIONS FOR USE, REPRODUCTION, | |||
AND DISTRIBUTION | |||
1. Definitions. | |||
"License" shall mean the terms and conditions for use, reproduction, and distribution | |||
as defined by Sections 1 through 9 of this document. | |||
"Licensor" shall mean the copyright owner or entity authorized by the copyright | |||
owner that is granting the License. | |||
"Legal Entity" shall mean the union of the acting entity and all other entities | |||
that control, are controlled by, or are under common control with that entity. | |||
For the purposes of this definition, "control" means (i) the power, direct | |||
or indirect, to cause the direction or management of such entity, whether | |||
by contract or otherwise, or (ii) ownership of fifty percent (50%) or more | |||
of the outstanding shares, or (iii) beneficial ownership of such entity. | |||
"You" (or "Your") shall mean an individual or Legal Entity exercising permissions | |||
granted by this License. | |||
"Source" form shall mean the preferred form for making modifications, including | |||
but not limited to software source code, documentation source, and configuration | |||
files. | |||
"Object" form shall mean any form resulting from mechanical transformation | |||
or translation of a Source form, including but not limited to compiled object | |||
code, generated documentation, and conversions to other media types. | |||
"Work" shall mean the work of authorship, whether in Source or Object form, | |||
made available under the License, as indicated by a copyright notice that | |||
is included in or attached to the work (an example is provided in the Appendix | |||
below). | |||
"Derivative Works" shall mean any work, whether in Source or Object form, | |||
that is based on (or derived from) the Work and for which the editorial revisions, | |||
annotations, elaborations, or other modifications represent, as a whole, an | |||
original work of authorship. For the purposes of this License, Derivative | |||
Works shall not include works that remain separable from, or merely link (or | |||
bind by name) to the interfaces of, the Work and Derivative Works thereof. | |||
"Contribution" shall mean any work of authorship, including the original version | |||
of the Work and any modifications or additions to that Work or Derivative | |||
Works thereof, that is intentionally submitted to Licensor for inclusion in | |||
the Work by the copyright owner or by an individual or Legal Entity authorized | |||
to submit on behalf of the copyright owner. For the purposes of this definition, | |||
"submitted" means any form of electronic, verbal, or written communication | |||
sent to the Licensor or its representatives, including but not limited to | |||
communication on electronic mailing lists, source code control systems, and | |||
issue tracking systems that are managed by, or on behalf of, the Licensor | |||
for the purpose of discussing and improving the Work, but excluding communication | |||
that is conspicuously marked or otherwise designated in writing by the copyright | |||
owner as "Not a Contribution." | |||
"Contributor" shall mean Licensor and any individual or Legal Entity on behalf | |||
of whom a Contribution has been received by Licensor and subsequently incorporated | |||
within the Work. | |||
2. Grant of Copyright License. Subject to the terms and conditions of this | |||
License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, | |||
no-charge, royalty-free, irrevocable copyright license to reproduce, prepare | |||
Derivative Works of, publicly display, publicly perform, sublicense, and distribute | |||
the Work and such Derivative Works in Source or Object form. | |||
3. Grant of Patent License. Subject to the terms and conditions of this License, | |||
each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, | |||
no-charge, royalty-free, irrevocable (except as stated in this section) patent | |||
license to make, have made, use, offer to sell, sell, import, and otherwise | |||
transfer the Work, where such license applies only to those patent claims | |||
licensable by such Contributor that are necessarily infringed by their Contribution(s) | |||
alone or by combination of their Contribution(s) with the Work to which such | |||
Contribution(s) was submitted. If You institute patent litigation against | |||
any entity (including a cross-claim or counterclaim in a lawsuit) alleging | |||
that the Work or a Contribution incorporated within the Work constitutes direct | |||
or contributory patent infringement, then any patent licenses granted to You | |||
under this License for that Work shall terminate as of the date such litigation | |||
is filed. | |||
4. Redistribution. You may reproduce and distribute copies of the Work or | |||
Derivative Works thereof in any medium, with or without modifications, and | |||
in Source or Object form, provided that You meet the following conditions: | |||
(a) You must give any other recipients of the Work or Derivative Works a copy | |||
of this License; and | |||
(b) You must cause any modified files to carry prominent notices stating that | |||
You changed the files; and | |||
(c) You must retain, in the Source form of any Derivative Works that You distribute, | |||
all copyright, patent, trademark, and attribution notices from the Source | |||
form of the Work, excluding those notices that do not pertain to any part | |||
of the Derivative Works; and | |||
(d) If the Work includes a "NOTICE" text file as part of its distribution, | |||
then any Derivative Works that You distribute must include a readable copy | |||
of the attribution notices contained within such NOTICE file, excluding those | |||
notices that do not pertain to any part of the Derivative Works, in at least | |||
one of the following places: within a NOTICE text file distributed as part | |||
of the Derivative Works; within the Source form or documentation, if provided | |||
along with the Derivative Works; or, within a display generated by the Derivative | |||
Works, if and wherever such third-party notices normally appear. The contents | |||
of the NOTICE file are for informational purposes only and do not modify the | |||
License. You may add Your own attribution notices within Derivative Works | |||
that You distribute, alongside or as an addendum to the NOTICE text from the | |||
Work, provided that such additional attribution notices cannot be construed | |||
as modifying the License. | |||
You may add Your own copyright statement to Your modifications and may provide | |||
additional or different license terms and conditions for use, reproduction, | |||
or distribution of Your modifications, or for any such Derivative Works as | |||
a whole, provided Your use, reproduction, and distribution of the Work otherwise | |||
complies with the conditions stated in this License. | |||
5. Submission of Contributions. Unless You explicitly state otherwise, any | |||
Contribution intentionally submitted for inclusion in the Work by You to the | |||
Licensor shall be under the terms and conditions of this License, without | |||
any additional terms or conditions. Notwithstanding the above, nothing herein | |||
shall supersede or modify the terms of any separate license agreement you | |||
may have executed with Licensor regarding such Contributions. | |||
6. Trademarks. This License does not grant permission to use the trade names, | |||
trademarks, service marks, or product names of the Licensor, except as required | |||
for reasonable and customary use in describing the origin of the Work and | |||
reproducing the content of the NOTICE file. | |||
7. Disclaimer of Warranty. Unless required by applicable law or agreed to | |||
in writing, Licensor provides the Work (and each Contributor provides its | |||
Contributions) on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY | |||
KIND, either express or implied, including, without limitation, any warranties | |||
or conditions of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR | |||
A PARTICULAR PURPOSE. You are solely responsible for determining the appropriateness | |||
of using or redistributing the Work and assume any risks associated with Your | |||
exercise of permissions under this License. | |||
8. Limitation of Liability. In no event and under no legal theory, whether | |||
in tort (including negligence), contract, or otherwise, unless required by | |||
applicable law (such as deliberate and grossly negligent acts) or agreed to | |||
in writing, shall any Contributor be liable to You for damages, including | |||
any direct, indirect, special, incidental, or consequential damages of any | |||
character arising as a result of this License or out of the use or inability | |||
to use the Work (including but not limited to damages for loss of goodwill, | |||
work stoppage, computer failure or malfunction, or any and all other commercial | |||
damages or losses), even if such Contributor has been advised of the possibility | |||
of such damages. | |||
9. Accepting Warranty or Additional Liability. While redistributing the Work | |||
or Derivative Works thereof, You may choose to offer, and charge a fee for, | |||
acceptance of support, warranty, indemnity, or other liability obligations | |||
and/or rights consistent with this License. However, in accepting such obligations, | |||
You may act only on Your own behalf and on Your sole responsibility, not on | |||
behalf of any other Contributor, and only if You agree to indemnify, defend, | |||
and hold each Contributor harmless for any liability incurred by, or claims | |||
asserted against, such Contributor by reason of your accepting any such warranty | |||
or additional liability. END OF TERMS AND CONDITIONS | |||
APPENDIX: How to apply the Apache License to your work. | |||
To apply the Apache License to your work, attach the following boilerplate | |||
notice, with the fields enclosed by brackets "[]" replaced with your own identifying | |||
information. (Don't include the brackets!) The text should be enclosed in | |||
the appropriate comment syntax for the file format. We also recommend that | |||
a file or class name and description of purpose be included on the same "printed | |||
page" as the copyright notice for easier identification within third-party | |||
archives. | |||
Copyright [yyyy] [name of copyright owner] | |||
Licensed under the Apache License, Version 2.0 (the "License"); | |||
you may not use this file except in compliance with the License. | |||
You may obtain a copy of the License at | |||
http://www.apache.org/licenses/LICENSE-2.0 | |||
Unless required by applicable law or agreed to in writing, software | |||
distributed under the License is distributed on an "AS IS" BASIS, | |||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |||
See the License for the specific language governing permissions and | |||
limitations under the License. |
@@ -0,0 +1,211 @@ | |||
License | |||
======= | |||
Copyright (c) 2016~2018 The TensorLayer contributors. All rights reserved. | |||
Apache License | |||
Version 2.0, January 2004 | |||
http://www.apache.org/licenses/ | |||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION | |||
1. Definitions. | |||
"License" shall mean the terms and conditions for use, reproduction, | |||
and distribution as defined by Sections 1 through 9 of this document. | |||
"Licensor" shall mean the copyright owner or entity authorized by | |||
the copyright owner that is granting the License. | |||
"Legal Entity" shall mean the union of the acting entity and all | |||
other entities that control, are controlled by, or are under common | |||
control with that entity. For the purposes of this definition, | |||
"control" means (i) the power, direct or indirect, to cause the | |||
direction or management of such entity, whether by contract or | |||
otherwise, or (ii) ownership of fifty percent (50%) or more of the | |||
outstanding shares, or (iii) beneficial ownership of such entity. | |||
"You" (or "Your") shall mean an individual or Legal Entity | |||
exercising permissions granted by this License. | |||
"Source" form shall mean the preferred form for making modifications, | |||
including but not limited to software source code, documentation | |||
source, and configuration files. | |||
"Object" form shall mean any form resulting from mechanical | |||
transformation or translation of a Source form, including but | |||
not limited to compiled object code, generated documentation, | |||
and conversions to other media types. | |||
"Work" shall mean the work of authorship, whether in Source or | |||
Object form, made available under the License, as indicated by a | |||
copyright notice that is included in or attached to the work | |||
(an example is provided in the Appendix below). | |||
"Derivative Works" shall mean any work, whether in Source or Object | |||
form, that is based on (or derived from) the Work and for which the | |||
editorial revisions, annotations, elaborations, or other modifications | |||
represent, as a whole, an original work of authorship. For the purposes | |||
of this License, Derivative Works shall not include works that remain | |||
separable from, or merely link (or bind by name) to the interfaces of, | |||
the Work and Derivative Works thereof. | |||
"Contribution" shall mean any work of authorship, including | |||
the original version of the Work and any modifications or additions | |||
to that Work or Derivative Works thereof, that is intentionally | |||
submitted to Licensor for inclusion in the Work by the copyright owner | |||
or by an individual or Legal Entity authorized to submit on behalf of | |||
the copyright owner. For the purposes of this definition, "submitted" | |||
means any form of electronic, verbal, or written communication sent | |||
to the Licensor or its representatives, including but not limited to | |||
communication on electronic mailing lists, source code control systems, | |||
and issue tracking systems that are managed by, or on behalf of, the | |||
Licensor for the purpose of discussing and improving the Work, but | |||
excluding communication that is conspicuously marked or otherwise | |||
designated in writing by the copyright owner as "Not a Contribution." | |||
"Contributor" shall mean Licensor and any individual or Legal Entity | |||
on behalf of whom a Contribution has been received by Licensor and | |||
subsequently incorporated within the Work. | |||
2. Grant of Copyright License. Subject to the terms and conditions of | |||
this License, each Contributor hereby grants to You a perpetual, | |||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable | |||
copyright license to reproduce, prepare Derivative Works of, | |||
publicly display, publicly perform, sublicense, and distribute the | |||
Work and such Derivative Works in Source or Object form. | |||
3. Grant of Patent License. Subject to the terms and conditions of | |||
this License, each Contributor hereby grants to You a perpetual, | |||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable | |||
(except as stated in this section) patent license to make, have made, | |||
use, offer to sell, sell, import, and otherwise transfer the Work, | |||
where such license applies only to those patent claims licensable | |||
by such Contributor that are necessarily infringed by their | |||
Contribution(s) alone or by combination of their Contribution(s) | |||
with the Work to which such Contribution(s) was submitted. If You | |||
institute patent litigation against any entity (including a | |||
cross-claim or counterclaim in a lawsuit) alleging that the Work | |||
or a Contribution incorporated within the Work constitutes direct | |||
or contributory patent infringement, then any patent licenses | |||
granted to You under this License for that Work shall terminate | |||
as of the date such litigation is filed. | |||
4. Redistribution. You may reproduce and distribute copies of the | |||
Work or Derivative Works thereof in any medium, with or without | |||
modifications, and in Source or Object form, provided that You | |||
meet the following conditions: | |||
(a) You must give any other recipients of the Work or | |||
Derivative Works a copy of this License; and | |||
(b) You must cause any modified files to carry prominent notices | |||
stating that You changed the files; and | |||
(c) You must retain, in the Source form of any Derivative Works | |||
that You distribute, all copyright, patent, trademark, and | |||
attribution notices from the Source form of the Work, | |||
excluding those notices that do not pertain to any part of | |||
the Derivative Works; and | |||
(d) If the Work includes a "NOTICE" text file as part of its | |||
distribution, then any Derivative Works that You distribute must | |||
include a readable copy of the attribution notices contained | |||
within such NOTICE file, excluding those notices that do not | |||
pertain to any part of the Derivative Works, in at least one | |||
of the following places: within a NOTICE text file distributed | |||
as part of the Derivative Works; within the Source form or | |||
documentation, if provided along with the Derivative Works; or, | |||
within a display generated by the Derivative Works, if and | |||
wherever such third-party notices normally appear. The contents | |||
of the NOTICE file are for informational purposes only and | |||
do not modify the License. You may add Your own attribution | |||
notices within Derivative Works that You distribute, alongside | |||
or as an addendum to the NOTICE text from the Work, provided | |||
that such additional attribution notices cannot be construed | |||
as modifying the License. | |||
You may add Your own copyright statement to Your modifications and | |||
may provide additional or different license terms and conditions | |||
for use, reproduction, or distribution of Your modifications, or | |||
for any such Derivative Works as a whole, provided Your use, | |||
reproduction, and distribution of the Work otherwise complies with | |||
the conditions stated in this License. | |||
5. Submission of Contributions. Unless You explicitly state otherwise, | |||
any Contribution intentionally submitted for inclusion in the Work | |||
by You to the Licensor shall be under the terms and conditions of | |||
this License, without any additional terms or conditions. | |||
Notwithstanding the above, nothing herein shall supersede or modify | |||
the terms of any separate license agreement you may have executed | |||
with Licensor regarding such Contributions. | |||
6. Trademarks. This License does not grant permission to use the trade | |||
names, trademarks, service marks, or product names of the Licensor, | |||
except as required for reasonable and customary use in describing the | |||
origin of the Work and reproducing the content of the NOTICE file. | |||
7. Disclaimer of Warranty. Unless required by applicable law or | |||
agreed to in writing, Licensor provides the Work (and each | |||
Contributor provides its Contributions) on an "AS IS" BASIS, | |||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or | |||
implied, including, without limitation, any warranties or conditions | |||
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A | |||
PARTICULAR PURPOSE. You are solely responsible for determining the | |||
appropriateness of using or redistributing the Work and assume any | |||
risks associated with Your exercise of permissions under this License. | |||
8. Limitation of Liability. In no event and under no legal theory, | |||
whether in tort (including negligence), contract, or otherwise, | |||
unless required by applicable law (such as deliberate and grossly | |||
negligent acts) or agreed to in writing, shall any Contributor be | |||
liable to You for damages, including any direct, indirect, special, | |||
incidental, or consequential damages of any character arising as a | |||
result of this License or out of the use or inability to use the | |||
Work (including but not limited to damages for loss of goodwill, | |||
work stoppage, computer failure or malfunction, or any and all | |||
other commercial damages or losses), even if such Contributor | |||
has been advised of the possibility of such damages. | |||
9. Accepting Warranty or Additional Liability. While redistributing | |||
the Work or Derivative Works thereof, You may choose to offer, | |||
and charge a fee for, acceptance of support, warranty, indemnity, | |||
or other liability obligations and/or rights consistent with this | |||
License. However, in accepting such obligations, You may act only | |||
on Your own behalf and on Your sole responsibility, not on behalf | |||
of any other Contributor, and only if You agree to indemnify, | |||
defend, and hold each Contributor harmless for any liability | |||
incurred by, or claims asserted against, such Contributor by reason | |||
of your accepting any such warranty or additional liability. | |||
END OF TERMS AND CONDITIONS | |||
APPENDIX: How to apply the Apache License to your work. | |||
To apply the Apache License to your work, attach the following | |||
boilerplate notice, with the fields enclosed by brackets "[]" | |||
replaced with your own identifying information. (Don't include | |||
the brackets!) The text should be enclosed in the appropriate | |||
comment syntax for the file format. We also recommend that a | |||
file or class name and description of purpose be included on the | |||
same "printed page" as the copyright notice for easier | |||
identification within third-party archives. | |||
Copyright 2016, The TensorLayer Authors. | |||
Licensed under the Apache License, Version 2.0 (the "License"); | |||
you may not use this file except in compliance with the License. | |||
You may obtain a copy of the License at | |||
http://www.apache.org/licenses/LICENSE-2.0 | |||
Unless required by applicable law or agreed to in writing, software | |||
distributed under the License is distributed on an "AS IS" BASIS, | |||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |||
See the License for the specific language governing permissions and | |||
limitations under the License. | |||
Contact | |||
======= | |||
Questions? Please contact hao.dong11@imperial.ac.uk |
@@ -0,0 +1,35 @@ | |||
default: | |||
@echo "Usage:" | |||
@echo "\tmake lint # run pylint" | |||
@echo "\tmake format # run yapf, autoflake and isort" | |||
@echo "\tmake install3 # install tensorlayer in current workspace with pip3" | |||
lint: | |||
pylint examples/*.py | |||
pylint tensorlayer | |||
test: | |||
python3 tests/models/test_model_core.py | |||
python3 tests/layers/test_layernode.py | |||
python3 tests/files/test_utils_saveload.py | |||
format: | |||
autoflake -i examples/*.py | |||
autoflake -i tensorlayer/*.py | |||
autoflake -i tensorlayer/**/*.py | |||
isort -rc examples | |||
isort -rc tensorlayer | |||
yapf -i examples/*.py | |||
yapf -i tensorlayer/*.py | |||
yapf -i tensorlayer/**/*.py | |||
install3: | |||
pip3 install -U . --user | |||
TAG = tensorlayer-docs:snaphot | |||
doc: | |||
docker build --rm -t $(TAG) -f docker/docs/Dockerfile . |
@@ -1,20 +1,181 @@ | |||
#### 从命令行创建一个新的仓库 | |||
<a href="https://tensorlayer.readthedocs.io/"> | |||
<div align="center"> | |||
<img src="img/tl_transparent_logo.png" width="50%" height="30%"/> | |||
</div> | |||
</a> | |||
<!--- [](https://badge.fury.io/py/tensorlayer) ---> | |||
<!--- ) ---> | |||
 | |||
[](https://github.com/tensorflow/tensorflow/releases) | |||
[](https://tensorlayer.readthedocs.io/) | |||
[](https://travis-ci.org/tensorlayer/tensorlayer) | |||
[](http://pepy.tech/project/tensorlayer) | |||
[](https://hub.docker.com/r/tensorlayer/tensorlayer/) | |||
[](https://www.codacy.com/app/tensorlayer/tensorlayer) | |||
<!--- [](https://circleci.com/gh/tensorlayer/tensorlayer/tree/master) ---> | |||
<!--- [](https://tensorlayercn.readthedocs.io/) | |||
<!--- [](https://pyup.io/repos/github/tensorlayer/tensorlayer/) ---> | |||
TensorLayer is a novel TensorFlow-based deep learning and reinforcement learning library designed for researchers and engineers. It provides an extensive collection of customizable neural layers to build complex AI models. TensorLayer is awarded the 2017 Best Open Source Software by the [ACM Multimedia Society](https://twitter.com/ImperialDSI/status/923928895325442049). | |||
TensorLayer can also be found at [iHub](https://code.ihub.org.cn/projects/328) and [Gitee](https://gitee.com/organizations/TensorLayer). | |||
# News | |||
🔥 Reinforcement Learning Model Zoo: [Low-level APIs for Research](https://github.com/tensorlayer/tensorlayer/tree/master/examples/reinforcement_learning) and [High-level APIs for Production](https://github.com/tensorlayer/RLzoo) | |||
🔥 [Sipeed Maxi-EMC](https://github.com/sipeed/Maix-EMC): Run TensorLayer models on the **low-cost AI chip** (e.g., K210) (Alpha Version) | |||
<!-- 🔥 [NNoM](https://github.com/majianjia/nnom): Run TensorLayer quantized models on the **MCU** (e.g., STM32) (Coming Soon) --> | |||
🔥 [Free GPU and storage resources](https://github.com/fangde/FreeGPU): TensorLayer users can access to free GPU and storage resources donated by SurgicalAI. Thank you SurgicalAI! | |||
# Design Features | |||
TensorLayer is a new deep learning library designed with simplicity, flexibility and high-performance in mind. | |||
- ***Simplicity*** : TensorLayer has a high-level layer/model abstraction which is effortless to learn. You can learn how deep learning can benefit your AI tasks in minutes through the massive [examples](https://github.com/tensorlayer/awesome-tensorlayer). | |||
- ***Flexibility*** : TensorLayer APIs are transparent and flexible, inspired by the emerging PyTorch library. Compared to the Keras abstraction, TensorLayer makes it much easier to build and train complex AI models. | |||
- ***Zero-cost Abstraction*** : Though simple to use, TensorLayer does not require you to make any compromise in the performance of TensorFlow (Check the following benchmark section for more details). | |||
TensorLayer stands at a unique spot in the TensorFlow wrappers. Other wrappers like Keras and TFLearn | |||
hide many powerful features of TensorFlow and provide little support for writing custom AI models. Inspired by PyTorch, TensorLayer APIs are simple, flexible and Pythonic, | |||
making it easy to learn while being flexible enough to cope with complex AI tasks. | |||
TensorLayer has a fast-growing community. It has been used by researchers and engineers all over the world, including those from Peking University, | |||
Imperial College London, UC Berkeley, Carnegie Mellon University, Stanford University, and companies like Google, Microsoft, Alibaba, Tencent, Xiaomi, and Bloomberg. | |||
# Multilingual Documents | |||
TensorLayer has extensive documentation for both beginners and professionals. The documentation is available in | |||
both English and Chinese. | |||
[](https://tensorlayer.readthedocs.io/) | |||
[](https://tensorlayercn.readthedocs.io/) | |||
[](http://www.broadview.com.cn/book/5059/) | |||
If you want to try the experimental features on the the master branch, you can find the latest document | |||
[here](https://tensorlayer.readthedocs.io/en/latest/). | |||
# Extensive Examples | |||
You can find a large collection of examples that use TensorLayer in [here](examples/) and the following space: | |||
<a href="https://github.com/tensorlayer/awesome-tensorlayer/blob/master/readme.md" target="\_blank"> | |||
<div align="center"> | |||
<img src="img/awesome-mentioned.png" width="40%"/> | |||
</div> | |||
</a> | |||
# Getting Start | |||
TensorLayer 2.0 relies on TensorFlow, numpy, and others. To use GPUs, CUDA and cuDNN are required. | |||
Install TensorFlow: | |||
```bash | |||
pip3 install tensorflow-gpu==2.0.0-rc1 # TensorFlow GPU (version 2.0 RC1) | |||
pip3 install tensorflow # CPU version | |||
``` | |||
Install the stable release of TensorLayer: | |||
```bash | |||
pip3 install tensorlayer | |||
``` | |||
Install the unstable development version of TensorLayer: | |||
```bash | |||
touch README.md | |||
git init | |||
git add README.md | |||
git commit -m "first commit" | |||
git remote add origin https://git.trustie.net/TensorLayer/tensorlayer3.git | |||
git push -u origin master | |||
pip3 install git+https://github.com/tensorlayer/tensorlayer.git | |||
``` | |||
If you want to install the additional dependencies, you can also run | |||
```bash | |||
pip3 install --upgrade tensorlayer[all] # all additional dependencies | |||
pip3 install --upgrade tensorlayer[extra] # only the `extra` dependencies | |||
pip3 install --upgrade tensorlayer[contrib_loggers] # only the `contrib_loggers` dependencies | |||
``` | |||
#### 从命令行推送已经创建的仓库 | |||
If you are TensorFlow 1.X users, you can use TensorLayer 1.11.0: | |||
```bash | |||
git remote add origin https://git.trustie.net/TensorLayer/tensorlayer3.git | |||
git push -u origin master | |||
# For last stable version of TensorLayer 1.X | |||
pip3 install --upgrade tensorlayer==1.11.0 | |||
``` | |||
<!--- | |||
## Using Docker | |||
The [TensorLayer containers](https://hub.docker.com/r/tensorlayer/tensorlayer/) are built on top of the official [TensorFlow containers](https://hub.docker.com/r/tensorflow/tensorflow/): | |||
### Containers with CPU support | |||
```bash | |||
# for CPU version and Python 2 | |||
docker pull tensorlayer/tensorlayer:latest | |||
docker run -it --rm -p 8888:8888 -p 6006:6006 -e PASSWORD=JUPYTER_NB_PASSWORD tensorlayer/tensorlayer:latest | |||
# for CPU version and Python 3 | |||
docker pull tensorlayer/tensorlayer:latest-py3 | |||
docker run -it --rm -p 8888:8888 -p 6006:6006 -e PASSWORD=JUPYTER_NB_PASSWORD tensorlayer/tensorlayer:latest-py3 | |||
``` | |||
### Containers with GPU support | |||
NVIDIA-Docker is required for these containers to work: [Project Link](https://github.com/NVIDIA/nvidia-docker) | |||
```bash | |||
# for GPU version and Python 2 | |||
docker pull tensorlayer/tensorlayer:latest-gpu | |||
nvidia-docker run -it --rm -p 8888:8888 -p 6006:6006 -e PASSWORD=JUPYTER_NB_PASSWORD tensorlayer/tensorlayer:latest-gpu | |||
# for GPU version and Python 3 | |||
docker pull tensorlayer/tensorlayer:latest-gpu-py3 | |||
nvidia-docker run -it --rm -p 8888:8888 -p 6006:6006 -e PASSWORD=JUPYTER_NB_PASSWORD tensorlayer/tensorlayer:latest-gpu-py3 | |||
``` | |||
---> | |||
# Performance Benchmark | |||
The following table shows the training speeds of [VGG16](http://www.robots.ox.ac.uk/~vgg/research/very_deep/) using TensorLayer and native TensorFlow on a TITAN Xp. | |||
| Mode | Lib | Data Format | Max GPU Memory Usage(MB) |Max CPU Memory Usage(MB) | Avg CPU Memory Usage(MB) | Runtime (sec) | | |||
| :-------: | :-------------: | :-----------: | :-----------------: | :-----------------: | :-----------------: | :-----------: | | |||
| AutoGraph | TensorFlow 2.0 | channel last | 11833 | 2161 | 2136 | 74 | | |||
| | Tensorlayer 2.0 | channel last | 11833 | 2187 | 2169 | 76 | | |||
| Graph | Keras | channel last | 8677 | 2580 | 2576 | 101 | | |||
| Eager | TensorFlow 2.0 | channel last | 8723 | 2052 | 2024 | 97 | | |||
| | TensorLayer 2.0 | channel last | 8723 | 2010 | 2007 | 95 | | |||
# Getting Involved | |||
Please read the [Contributor Guideline](CONTRIBUTING.md) before submitting your PRs. | |||
We suggest users to report bugs using Github issues. Users can also discuss how to use TensorLayer in the following slack channel. | |||
<br/> | |||
<a href="https://join.slack.com/t/tensorlayer/shared_invite/enQtMjUyMjczMzU2Njg4LWI0MWU0MDFkOWY2YjQ4YjVhMzI5M2VlZmE4YTNhNGY1NjZhMzUwMmQ2MTc0YWRjMjQzMjdjMTg2MWQ2ZWJhYzc" target="\_blank"> | |||
<div align="center"> | |||
<img src="img/join_slack.png" width="40%"/> | |||
</div> | |||
</a> | |||
<br/> | |||
# Citing TensorLayer | |||
If you find TensorLayer useful for your project, please cite the following paper: | |||
``` | |||
@article{tensorlayer2017, | |||
author = {Dong, Hao and Supratak, Akara and Mai, Luo and Liu, Fangde and Oehmichen, Axel and Yu, Simiao and Guo, Yike}, | |||
journal = {ACM Multimedia}, | |||
title = {{TensorLayer: A Versatile Library for Efficient Deep Learning Development}}, | |||
url = {http://tensorlayer.org}, | |||
year = {2017} | |||
} | |||
``` |
@@ -0,0 +1,201 @@ | |||
|TENSORLAYER-LOGO| | |||
|Awesome| |Documentation-EN| |Documentation-CN| |Book-CN| |Downloads| | |||
|PyPI| |PyPI-Prerelease| |Commits-Since| |Python| |TensorFlow| | |||
|Travis| |Docker| |RTD-EN| |RTD-CN| |PyUP| |Docker-Pulls| |Code-Quality| | |||
|JOIN-SLACK-LOGO| | |||
TensorLayer is a novel TensorFlow-based deep learning and reinforcement | |||
learning library designed for researchers and engineers. It provides a | |||
large collection of customizable neural layers / functions that are key | |||
to build real-world AI applications. TensorLayer is awarded the 2017 | |||
Best Open Source Software by the `ACM Multimedia | |||
Society <http://www.acmmm.org/2017/mm-2017-awardees/>`__. | |||
Why another deep learning library: TensorLayer | |||
============================================== | |||
As deep learning practitioners, we have been looking for a library that | |||
can address various development purposes. This library is easy to adopt | |||
by providing diverse examples, tutorials and pre-trained models. Also, | |||
it allow users to easily fine-tune TensorFlow; while being suitable for | |||
production deployment. TensorLayer aims to satisfy all these purposes. | |||
It has three key features: | |||
- **Simplicity** : TensorLayer lifts the low-level dataflow interface | |||
of TensorFlow to *high-level* layers / models. It is very easy to | |||
learn through the rich `example | |||
codes <https://github.com/tensorlayer/awesome-tensorlayer>`__ | |||
contributed by a wide community. | |||
- **Flexibility** : TensorLayer APIs are transparent: it does not | |||
mask TensorFlow from users; but leaving massive hooks that help | |||
*low-level tuning* and *deep customization*. | |||
- **Zero-cost Abstraction** : TensorLayer can achieve the *full | |||
power* of TensorFlow. The following table shows the training speeds | |||
of classic models using TensorLayer and native TensorFlow on a Titan | |||
X Pascal GPU. | |||
+---------------+-----------------+-----------------+-----------------+ | |||
| | CIFAR-10 | PTB LSTM | Word2Vec | | |||
+===============+=================+=================+=================+ | |||
| TensorLayer | 2528 images/s | 18063 words/s | 58167 words/s | | |||
+---------------+-----------------+-----------------+-----------------+ | |||
| TensorFlow | 2530 images/s | 18075 words/s | 58181 words/s | | |||
+---------------+-----------------+-----------------+-----------------+ | |||
TensorLayer stands at a unique spot in the library landscape. Other | |||
wrapper libraries like Keras and TFLearn also provide high-level | |||
abstractions. They, however, often hide the underlying engine from | |||
users, which make them hard to customize and fine-tune. On the contrary, | |||
TensorLayer APIs are generally flexible and transparent. Users often | |||
find it easy to start with the examples and tutorials, and then dive | |||
into TensorFlow seamlessly. In addition, TensorLayer does not create | |||
library lock-in through native supports for importing components from | |||
Keras, TFSlim and TFLearn. | |||
TensorLayer has a fast growing usage among top researchers and | |||
engineers, from universities like Imperial College London, UC Berkeley, | |||
Carnegie Mellon University, Stanford University, and University of | |||
Technology of Compiegne (UTC), and companies like Google, Microsoft, | |||
Alibaba, Tencent, Xiaomi, and Bloomberg. | |||
Install | |||
======= | |||
TensorLayer has pre-requisites including TensorFlow, numpy, and others. For GPU support, CUDA and cuDNN are required. | |||
The simplest way to install TensorLayer is to use the Python Package Index (PyPI): | |||
.. code:: bash | |||
# for last stable version | |||
pip install --upgrade tensorlayer | |||
# for latest release candidate | |||
pip install --upgrade --pre tensorlayer | |||
# if you want to install the additional dependencies, you can also run | |||
pip install --upgrade tensorlayer[all] # all additional dependencies | |||
pip install --upgrade tensorlayer[extra] # only the `extra` dependencies | |||
pip install --upgrade tensorlayer[contrib_loggers] # only the `contrib_loggers` dependencies | |||
Alternatively, you can install the latest or development version by directly pulling from github: | |||
.. code:: bash | |||
pip install https://github.com/tensorlayer/tensorlayer/archive/master.zip | |||
# or | |||
# pip install https://github.com/tensorlayer/tensorlayer/archive/<branch-name>.zip | |||
Using Docker - a ready-to-use environment | |||
----------------------------------------- | |||
The `TensorLayer | |||
containers <https://hub.docker.com/r/tensorlayer/tensorlayer/>`__ are | |||
built on top of the official `TensorFlow | |||
containers <https://hub.docker.com/r/tensorflow/tensorflow/>`__: | |||
Containers with CPU support | |||
~~~~~~~~~~~~~~~~~~~~~~~~~~~ | |||
.. code:: bash | |||
# for CPU version and Python 2 | |||
docker pull tensorlayer/tensorlayer:latest | |||
docker run -it --rm -p 8888:8888 -p 6006:6006 -e PASSWORD=JUPYTER_NB_PASSWORD tensorlayer/tensorlayer:latest | |||
# for CPU version and Python 3 | |||
docker pull tensorlayer/tensorlayer:latest-py3 | |||
docker run -it --rm -p 8888:8888 -p 6006:6006 -e PASSWORD=JUPYTER_NB_PASSWORD tensorlayer/tensorlayer:latest-py3 | |||
Containers with GPU support | |||
~~~~~~~~~~~~~~~~~~~~~~~~~~~ | |||
NVIDIA-Docker is required for these containers to work: `Project | |||
Link <https://github.com/NVIDIA/nvidia-docker>`__ | |||
.. code:: bash | |||
# for GPU version and Python 2 | |||
docker pull tensorlayer/tensorlayer:latest-gpu | |||
nvidia-docker run -it --rm -p 8888:88888 -p 6006:6006 -e PASSWORD=JUPYTER_NB_PASSWORD tensorlayer/tensorlayer:latest-gpu | |||
# for GPU version and Python 3 | |||
docker pull tensorlayer/tensorlayer:latest-gpu-py3 | |||
nvidia-docker run -it --rm -p 8888:8888 -p 6006:6006 -e PASSWORD=JUPYTER_NB_PASSWORD tensorlayer/tensorlayer:latest-gpu-py3 | |||
Contribute | |||
========== | |||
Please read the `Contributor | |||
Guideline <https://github.com/tensorlayer/tensorlayer/blob/master/CONTRIBUTING.md>`__ | |||
before submitting your PRs. | |||
Cite | |||
==== | |||
If you find this project useful, we would be grateful if you cite the | |||
TensorLayer paper: | |||
:: | |||
@article{tensorlayer2017, | |||
author = {Dong, Hao and Supratak, Akara and Mai, Luo and Liu, Fangde and Oehmichen, Axel and Yu, Simiao and Guo, Yike}, | |||
journal = {ACM Multimedia}, | |||
title = {{TensorLayer: A Versatile Library for Efficient Deep Learning Development}}, | |||
url = {http://tensorlayer.org}, | |||
year = {2017} | |||
} | |||
License | |||
======= | |||
TensorLayer is released under the Apache 2.0 license. | |||
.. |TENSORLAYER-LOGO| image:: https://raw.githubusercontent.com/tensorlayer/tensorlayer/master/img/tl_transparent_logo.png | |||
:target: https://tensorlayer.readthedocs.io/ | |||
.. |JOIN-SLACK-LOGO| image:: https://raw.githubusercontent.com/tensorlayer/tensorlayer/master/img/join_slack.png | |||
:target: https://join.slack.com/t/tensorlayer/shared_invite/enQtMjUyMjczMzU2Njg4LWI0MWU0MDFkOWY2YjQ4YjVhMzI5M2VlZmE4YTNhNGY1NjZhMzUwMmQ2MTc0YWRjMjQzMjdjMTg2MWQ2ZWJhYzc | |||
.. |Awesome| image:: https://awesome.re/mentioned-badge.svg | |||
:target: https://github.com/tensorlayer/awesome-tensorlayer | |||
.. |Documentation-EN| image:: https://img.shields.io/badge/documentation-english-blue.svg | |||
:target: https://tensorlayer.readthedocs.io/ | |||
.. |Documentation-CN| image:: https://img.shields.io/badge/documentation-%E4%B8%AD%E6%96%87-blue.svg | |||
:target: https://tensorlayercn.readthedocs.io/ | |||
.. |Book-CN| image:: https://img.shields.io/badge/book-%E4%B8%AD%E6%96%87-blue.svg | |||
:target: http://www.broadview.com.cn/book/5059/ | |||
.. |Downloads| image:: http://pepy.tech/badge/tensorlayer | |||
:target: http://pepy.tech/project/tensorlayer | |||
.. |PyPI| image:: http://ec2-35-178-47-120.eu-west-2.compute.amazonaws.com/github/release/tensorlayer/tensorlayer.svg?label=PyPI%20-%20Release | |||
:target: https://pypi.org/project/tensorlayer/ | |||
.. |PyPI-Prerelease| image:: http://ec2-35-178-47-120.eu-west-2.compute.amazonaws.com/github/release/tensorlayer/tensorlayer/all.svg?label=PyPI%20-%20Pre-Release | |||
:target: https://pypi.org/project/tensorlayer/ | |||
.. |Commits-Since| image:: http://ec2-35-178-47-120.eu-west-2.compute.amazonaws.com/github/commits-since/tensorlayer/tensorlayer/latest.svg | |||
:target: https://github.com/tensorlayer/tensorlayer/compare/1.10.1...master | |||
.. |Python| image:: http://ec2-35-178-47-120.eu-west-2.compute.amazonaws.com/pypi/pyversions/tensorlayer.svg | |||
:target: https://pypi.org/project/tensorlayer/ | |||
.. |TensorFlow| image:: https://img.shields.io/badge/tensorflow-1.6.0+-blue.svg | |||
:target: https://github.com/tensorflow/tensorflow/releases | |||
.. |Travis| image:: http://ec2-35-178-47-120.eu-west-2.compute.amazonaws.com/travis/tensorlayer/tensorlayer/master.svg?label=Travis | |||
:target: https://travis-ci.org/tensorlayer/tensorlayer | |||
.. |Docker| image:: http://ec2-35-178-47-120.eu-west-2.compute.amazonaws.com/circleci/project/github/tensorlayer/tensorlayer/master.svg?label=Docker%20Build | |||
:target: https://circleci.com/gh/tensorlayer/tensorlayer/tree/master | |||
.. |RTD-EN| image:: http://ec2-35-178-47-120.eu-west-2.compute.amazonaws.com/readthedocs/tensorlayer/latest.svg?label=ReadTheDocs-EN | |||
:target: https://tensorlayer.readthedocs.io/ | |||
.. |RTD-CN| image:: http://ec2-35-178-47-120.eu-west-2.compute.amazonaws.com/readthedocs/tensorlayercn/latest.svg?label=ReadTheDocs-CN | |||
:target: https://tensorlayercn.readthedocs.io/ | |||
.. |PyUP| image:: https://pyup.io/repos/github/tensorlayer/tensorlayer/shield.svg | |||
:target: https://pyup.io/repos/github/tensorlayer/tensorlayer/ | |||
.. |Docker-Pulls| image:: http://ec2-35-178-47-120.eu-west-2.compute.amazonaws.com/docker/pulls/tensorlayer/tensorlayer.svg | |||
:target: https://hub.docker.com/r/tensorlayer/tensorlayer/ | |||
.. |Code-Quality| image:: https://api.codacy.com/project/badge/Grade/d6b118784e25435498e7310745adb848 | |||
:target: https://www.codacy.com/app/tensorlayer/tensorlayer |
@@ -0,0 +1,4 @@ | |||
docker pull hadolint/hadolint:latest | |||
docker run --rm -i hadolint/hadolint hadolint --ignore DL3007 - < Dockerfile | |||
PAUSE; |
@@ -0,0 +1,35 @@ | |||
# Build args. | |||
# * Accepted Values: | |||
# - Python 2 + CPU: "latest" => --build-arg TF_CONTAINER_VERSION="latest" | |||
# - Python 2 + GPU: "latest-gpu" => --build-arg TF_CONTAINER_VERSION="latest-gpu" | |||
# - Python 3 + CPU: "latest-py3" => --build-arg TF_CONTAINER_VERSION="latest-py3" | |||
# - Python 3 + GPU: "latest-gpu-py3" => --build-arg TF_CONTAINER_VERSION="latest-gpu-py3" | |||
ARG TF_CONTAINER_VERSION | |||
FROM tensorflow/tensorflow:${TF_CONTAINER_VERSION} | |||
LABEL version="1.0" maintainer="Jonathan DEKHTIAR <contact@jonathandekhtiar.eu>" | |||
ARG TL_VERSION | |||
ARG TF_CONTAINER_VERSION | |||
RUN echo "Container Tag: ${TF_CONTAINER_VERSION}" \ | |||
&& apt-get update \ | |||
&& case $TF_CONTAINER_VERSION in \ | |||
latest-py3 | latest-gpu-py3) apt-get install -y python3-tk ;; \ | |||
*) apt-get install -y python-tk ;; \ | |||
esac \ | |||
&& if [ -z "$TL_VERSION" ]; then \ | |||
echo "Building a Nightly Release" \ | |||
&& apt-get install -y git \ | |||
&& mkdir /dist/ && cd /dist/ \ | |||
&& git clone https://github.com/tensorlayer/tensorlayer.git \ | |||
&& cd tensorlayer \ | |||
&& pip install --disable-pip-version-check --no-cache-dir --upgrade -e .[all]; \ | |||
else \ | |||
echo "Building Tag Release: $TL_VERSION" \ | |||
&& pip install --disable-pip-version-check --no-cache-dir --upgrade tensorlayer[all]=="$TL_VERSION"; \ | |||
fi \ | |||
&& apt-get autoremove -y \ | |||
&& rm -rf /var/lib/apt/lists/* |
@@ -0,0 +1,14 @@ | |||
FROM ubuntu:bionic | |||
ADD docker/docs/sources.list.ustc /etc/apt/sources.list | |||
ENV DEBIAN_FRONTEND=noninteractive | |||
RUN apt update && \ | |||
apt install -y python3-pip python3-tk python-qt4 wget && \ | |||
pip3 install -i https://pypi.tuna.tsinghua.edu.cn/simple tensorflow | |||
ADD . /tensorlayer | |||
WORKDIR /tensorlayer | |||
RUN ln -s `which pip3` /usr/bin/pip && \ | |||
./scripts/install-horovod-for-doc-test.sh | |||
RUN pip3 install -i https://pypi.tuna.tsinghua.edu.cn/simple . | |||
RUN pip3 install -i https://pypi.tuna.tsinghua.edu.cn/simple -e .[all] | |||
RUN make -C docs html |
@@ -0,0 +1,15 @@ | |||
deb http://mirrors.ustc.edu.cn/ubuntu/ bionic main restricted | |||
deb http://mirrors.ustc.edu.cn/ubuntu/ bionic-updates main restricted | |||
deb http://mirrors.ustc.edu.cn/ubuntu/ bionic universe | |||
deb http://mirrors.ustc.edu.cn/ubuntu/ bionic-updates universe | |||
deb http://mirrors.ustc.edu.cn/ubuntu/ bionic multiverse | |||
deb http://mirrors.ustc.edu.cn/ubuntu/ bionic-updates multiverse | |||
deb http://mirrors.ustc.edu.cn/ubuntu/ bionic-backports main restricted universe multiverse | |||
deb http://mirrors.ustc.edu.cn/ubuntu bionic-security main restricted | |||
deb http://mirrors.ustc.edu.cn/ubuntu bionic-security universe | |||
deb http://mirrors.ustc.edu.cn/ubuntu bionic-security multiverse |
@@ -0,0 +1,68 @@ | |||
import argparse | |||
import requests | |||
import logging | |||
import pip._internal | |||
if __name__ == "__main__": | |||
parser = argparse.ArgumentParser(description='Get the nth version of a given package') | |||
parser.add_argument('--package', type=str, required=True, help='The PyPI you want to inspect') | |||
parser.add_argument('--nth_last_version', type=int, default=1, help='The nth last package will be retrieved') | |||
parser.add_argument('--prerelease', help='Get PreRelease Package Version', action='store_true') | |||
parser.add_argument('--debug', help='Print debug information', action='store_true') | |||
args = parser.parse_args() | |||
# create logger | |||
logger = logging.getLogger("PyPI_CLI") | |||
formatter = logging.Formatter('%(asctime)s - %(levelname)s - %(message)s') | |||
ch = logging.StreamHandler() | |||
ch.setLevel(logging.DEBUG) | |||
ch.setFormatter(formatter) | |||
logger.addHandler(ch) | |||
if args.debug: | |||
logger.setLevel(logging.DEBUG) | |||
logger.debug("Package: %s" % args.package) | |||
logger.debug("nth_last_version: %s" % args.nth_last_version) | |||
logger.debug("prerelease: %s" % args.prerelease) | |||
logger.debug("debug: %s" % args.debug) | |||
finder = pip._internal.index.PackageFinder( | |||
[], | |||
['https://pypi.python.org/simple'], | |||
session=requests.Session() | |||
) | |||
results = finder.find_all_candidates(args.package) | |||
tmp_versions = [str(p.version) for p in results] | |||
logger.debug("%s" % tmp_versions) | |||
versions = list() | |||
for el in tmp_versions: | |||
if el not in versions: | |||
versions.append(el) | |||
pos = -1 | |||
nth_version = 1 | |||
while True: | |||
fetched_version = versions[pos] | |||
logger.debug("Version: %s" % fetched_version) | |||
if nth_version == args.nth_last_version: | |||
if args.prerelease or not ("rc" in fetched_version or "a" in fetched_version or "b" in fetched_version): | |||
break | |||
else: | |||
pos -= 1 | |||
continue | |||
pos -= 1 | |||
nth_version += 1 | |||
print(fetched_version) |
@@ -0,0 +1,37 @@ | |||
import argparse | |||
import logging | |||
if __name__ == "__main__": | |||
parser = argparse.ArgumentParser(description='Determine the version prefix to apply depending on the version name') | |||
parser.add_argument( | |||
'--version', | |||
type=str, | |||
required=True, | |||
help='The Package Version to be installed in the container' | |||
) | |||
parser.add_argument('--debug', help='Print debug information', action='store_true') | |||
args = parser.parse_args() | |||
# create logger | |||
logger = logging.getLogger("VERSION_PREFIX_CLI") | |||
formatter = logging.Formatter('%(asctime)s - %(levelname)s - %(message)s') | |||
ch = logging.StreamHandler() | |||
ch.setLevel(logging.DEBUG) | |||
ch.setFormatter(formatter) | |||
logger.addHandler(ch) | |||
if args.debug: | |||
logger.setLevel(logging.DEBUG) | |||
logger.debug("Package Version: %s" % args.version) | |||
if "rc" in args.version or "a" in args.version or "b" in args.version: | |||
print("latest-dev") | |||
else: | |||
print("latest") |
@@ -0,0 +1,225 @@ | |||
# Makefile for Sphinx documentation | |||
# | |||
# You can set these variables from the command line. | |||
SPHINXOPTS = | |||
SPHINXBUILD = sphinx-build | |||
PAPER = | |||
BUILDDIR = _build | |||
# Internal variables. | |||
PAPEROPT_a4 = -D latex_paper_size=a4 | |||
PAPEROPT_letter = -D latex_paper_size=letter | |||
ALLSPHINXOPTS = -d $(BUILDDIR)/doctrees $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) . | |||
# the i18n builder cannot share the environment and doctrees with the others | |||
I18NSPHINXOPTS = $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) . | |||
.PHONY: help | |||
help: | |||
@echo "Please use \`make <target>' where <target> is one of" | |||
@echo " html to make standalone HTML files" | |||
@echo " dirhtml to make HTML files named index.html in directories" | |||
@echo " singlehtml to make a single large HTML file" | |||
@echo " pickle to make pickle files" | |||
@echo " json to make JSON files" | |||
@echo " htmlhelp to make HTML files and a HTML help project" | |||
@echo " qthelp to make HTML files and a qthelp project" | |||
@echo " applehelp to make an Apple Help Book" | |||
@echo " devhelp to make HTML files and a Devhelp project" | |||
@echo " epub to make an epub" | |||
@echo " epub3 to make an epub3" | |||
@echo " latex to make LaTeX files, you can set PAPER=a4 or PAPER=letter" | |||
@echo " latexpdf to make LaTeX files and run them through pdflatex" | |||
@echo " latexpdfja to make LaTeX files and run them through platex/dvipdfmx" | |||
@echo " text to make text files" | |||
@echo " man to make manual pages" | |||
@echo " texinfo to make Texinfo files" | |||
@echo " info to make Texinfo files and run them through makeinfo" | |||
@echo " gettext to make PO message catalogs" | |||
@echo " changes to make an overview of all changed/added/deprecated items" | |||
@echo " xml to make Docutils-native XML files" | |||
@echo " pseudoxml to make pseudoxml-XML files for display purposes" | |||
@echo " linkcheck to check all external links for integrity" | |||
@echo " doctest to run all doctests embedded in the documentation (if enabled)" | |||
@echo " coverage to run coverage check of the documentation (if enabled)" | |||
@echo " dummy to check syntax errors of document sources" | |||
.PHONY: clean | |||
clean: | |||
rm -rf $(BUILDDIR)/* | |||
.PHONY: html | |||
html: | |||
$(SPHINXBUILD) -b html $(ALLSPHINXOPTS) $(BUILDDIR)/html | |||
@echo | |||
@echo "Build finished. The HTML pages are in $(BUILDDIR)/html." | |||
.PHONY: dirhtml | |||
dirhtml: | |||
$(SPHINXBUILD) -b dirhtml $(ALLSPHINXOPTS) $(BUILDDIR)/dirhtml | |||
@echo | |||
@echo "Build finished. The HTML pages are in $(BUILDDIR)/dirhtml." | |||
.PHONY: singlehtml | |||
singlehtml: | |||
$(SPHINXBUILD) -b singlehtml $(ALLSPHINXOPTS) $(BUILDDIR)/singlehtml | |||
@echo | |||
@echo "Build finished. The HTML page is in $(BUILDDIR)/singlehtml." | |||
.PHONY: pickle | |||
pickle: | |||
$(SPHINXBUILD) -b pickle $(ALLSPHINXOPTS) $(BUILDDIR)/pickle | |||
@echo | |||
@echo "Build finished; now you can process the pickle files." | |||
.PHONY: json | |||
json: | |||
$(SPHINXBUILD) -b json $(ALLSPHINXOPTS) $(BUILDDIR)/json | |||
@echo | |||
@echo "Build finished; now you can process the JSON files." | |||
.PHONY: htmlhelp | |||
htmlhelp: | |||
$(SPHINXBUILD) -b htmlhelp $(ALLSPHINXOPTS) $(BUILDDIR)/htmlhelp | |||
@echo | |||
@echo "Build finished; now you can run HTML Help Workshop with the" \ | |||
".hhp project file in $(BUILDDIR)/htmlhelp." | |||
.PHONY: qthelp | |||
qthelp: | |||
$(SPHINXBUILD) -b qthelp $(ALLSPHINXOPTS) $(BUILDDIR)/qthelp | |||
@echo | |||
@echo "Build finished; now you can run "qcollectiongenerator" with the" \ | |||
".qhcp project file in $(BUILDDIR)/qthelp, like this:" | |||
@echo "# qcollectiongenerator $(BUILDDIR)/qthelp/TLayer.qhcp" | |||
@echo "To view the help file:" | |||
@echo "# assistant -collectionFile $(BUILDDIR)/qthelp/TLayer.qhc" | |||
.PHONY: applehelp | |||
applehelp: | |||
$(SPHINXBUILD) -b applehelp $(ALLSPHINXOPTS) $(BUILDDIR)/applehelp | |||
@echo | |||
@echo "Build finished. The help book is in $(BUILDDIR)/applehelp." | |||
@echo "N.B. You won't be able to view it unless you put it in" \ | |||
"~/Library/Documentation/Help or install it in your application" \ | |||
"bundle." | |||
.PHONY: devhelp | |||
devhelp: | |||
$(SPHINXBUILD) -b devhelp $(ALLSPHINXOPTS) $(BUILDDIR)/devhelp | |||
@echo | |||
@echo "Build finished." | |||
@echo "To view the help file:" | |||
@echo "# mkdir -p $$HOME/.local/share/devhelp/TLayer" | |||
@echo "# ln -s $(BUILDDIR)/devhelp $$HOME/.local/share/devhelp/TLayer" | |||
@echo "# devhelp" | |||
.PHONY: epub | |||
epub: | |||
$(SPHINXBUILD) -b epub $(ALLSPHINXOPTS) $(BUILDDIR)/epub | |||
@echo | |||
@echo "Build finished. The epub file is in $(BUILDDIR)/epub." | |||
.PHONY: epub3 | |||
epub3: | |||
$(SPHINXBUILD) -b epub3 $(ALLSPHINXOPTS) $(BUILDDIR)/epub3 | |||
@echo | |||
@echo "Build finished. The epub3 file is in $(BUILDDIR)/epub3." | |||
.PHONY: latex | |||
latex: | |||
$(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex | |||
@echo | |||
@echo "Build finished; the LaTeX files are in $(BUILDDIR)/latex." | |||
@echo "Run \`make' in that directory to run these through (pdf)latex" \ | |||
"(use \`make latexpdf' here to do that automatically)." | |||
.PHONY: latexpdf | |||
latexpdf: | |||
$(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex | |||
@echo "Running LaTeX files through pdflatex..." | |||
$(MAKE) -C $(BUILDDIR)/latex all-pdf | |||
@echo "pdflatex finished; the PDF files are in $(BUILDDIR)/latex." | |||
.PHONY: latexpdfja | |||
latexpdfja: | |||
$(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex | |||
@echo "Running LaTeX files through platex and dvipdfmx..." | |||
$(MAKE) -C $(BUILDDIR)/latex all-pdf-ja | |||
@echo "pdflatex finished; the PDF files are in $(BUILDDIR)/latex." | |||
.PHONY: text | |||
text: | |||
$(SPHINXBUILD) -b text $(ALLSPHINXOPTS) $(BUILDDIR)/text | |||
@echo | |||
@echo "Build finished. The text files are in $(BUILDDIR)/text." | |||
.PHONY: man | |||
man: | |||
$(SPHINXBUILD) -b man $(ALLSPHINXOPTS) $(BUILDDIR)/man | |||
@echo | |||
@echo "Build finished. The manual pages are in $(BUILDDIR)/man." | |||
.PHONY: texinfo | |||
texinfo: | |||
$(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo | |||
@echo | |||
@echo "Build finished. The Texinfo files are in $(BUILDDIR)/texinfo." | |||
@echo "Run \`make' in that directory to run these through makeinfo" \ | |||
"(use \`make info' here to do that automatically)." | |||
.PHONY: info | |||
info: | |||
$(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo | |||
@echo "Running Texinfo files through makeinfo..." | |||
make -C $(BUILDDIR)/texinfo info | |||
@echo "makeinfo finished; the Info files are in $(BUILDDIR)/texinfo." | |||
.PHONY: gettext | |||
gettext: | |||
$(SPHINXBUILD) -b gettext $(I18NSPHINXOPTS) $(BUILDDIR)/locale | |||
@echo | |||
@echo "Build finished. The message catalogs are in $(BUILDDIR)/locale." | |||
.PHONY: changes | |||
changes: | |||
$(SPHINXBUILD) -b changes $(ALLSPHINXOPTS) $(BUILDDIR)/changes | |||
@echo | |||
@echo "The overview file is in $(BUILDDIR)/changes." | |||
.PHONY: linkcheck | |||
linkcheck: | |||
$(SPHINXBUILD) -b linkcheck $(ALLSPHINXOPTS) $(BUILDDIR)/linkcheck | |||
@echo | |||
@echo "Link check complete; look for any errors in the above output " \ | |||
"or in $(BUILDDIR)/linkcheck/output.txt." | |||
.PHONY: doctest | |||
doctest: | |||
$(SPHINXBUILD) -b doctest $(ALLSPHINXOPTS) $(BUILDDIR)/doctest | |||
@echo "Testing of doctests in the sources finished, look at the " \ | |||
"results in $(BUILDDIR)/doctest/output.txt." | |||
.PHONY: coverage | |||
coverage: | |||
$(SPHINXBUILD) -b coverage $(ALLSPHINXOPTS) $(BUILDDIR)/coverage | |||
@echo "Testing of coverage in the sources finished, look at the " \ | |||
"results in $(BUILDDIR)/coverage/python.txt." | |||
.PHONY: xml | |||
xml: | |||
$(SPHINXBUILD) -b xml $(ALLSPHINXOPTS) $(BUILDDIR)/xml | |||
@echo | |||
@echo "Build finished. The XML files are in $(BUILDDIR)/xml." | |||
.PHONY: pseudoxml | |||
pseudoxml: | |||
$(SPHINXBUILD) -b pseudoxml $(ALLSPHINXOPTS) $(BUILDDIR)/pseudoxml | |||
@echo | |||
@echo "Build finished. The pseudo-XML files are in $(BUILDDIR)/pseudoxml." | |||
.PHONY: dummy | |||
dummy: | |||
$(SPHINXBUILD) -b dummy $(ALLSPHINXOPTS) $(BUILDDIR)/dummy | |||
@echo | |||
@echo "Build finished. Dummy builder generates no files." |
@@ -0,0 +1,8 @@ | |||
/* work around https://github.com/snide/sphinx_rtd_theme/issues/149 */ | |||
.rst-content table.field-list .field-body { | |||
padding-top: 8px; | |||
} | |||
/*.section #basic-2-flip-flop-synchronizer{ | |||
text-align:justify; | |||
}*/ |
@@ -0,0 +1,469 @@ | |||
#!/usr/bin/env python3 | |||
# -*- coding: utf-8 -*- | |||
# | |||
# TensorLayer documentation build configuration file, created by | |||
# sphinx-quickstart on Tue Aug 2 15:30:55 2016. | |||
# | |||
# This file is execfile()d with the current directory set to its | |||
# containing dir. | |||
# | |||
# Note that not all possible configuration values are present in this | |||
# autogenerated file. | |||
# | |||
# All configuration values have a default; values that are commented out | |||
# serve to show the default. | |||
# If extensions (or modules to document with autodoc) are in another directory, | |||
# add these directories to sys.path here. If the directory is relative to the | |||
# documentation root, use os.path.abspath to make it absolute, like shown here. | |||
# | |||
import os, sys, datetime | |||
sys.path.insert(0, os.path.abspath("../")) # Important | |||
sys.path.insert(0, os.path.abspath(os.path.join("..", "tensorlayer"))) # Important | |||
from package_info import __shortversion__ | |||
from package_info import __version__ | |||
# -- General configuration ------------------------------------------------ | |||
# If your documentation needs a minimal Sphinx version, state it here. | |||
# | |||
# needs_sphinx = '1.0' | |||
# Add any Sphinx extension module names here, as strings. They can be | |||
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom | |||
# ones. | |||
# extensions = [ | |||
# 'sphinx.ext.coverage', | |||
# 'sphinx.ext.githubpages', | |||
# 'numpydoc', | |||
# ] | |||
extensions = [ | |||
'sphinx.ext.autodoc', | |||
'sphinx.ext.autosummary', | |||
'sphinx.ext.doctest', | |||
'sphinx.ext.ifconfig', | |||
'sphinx.ext.inheritance_diagram', | |||
'sphinx.ext.intersphinx', | |||
'sphinx.ext.mathjax', | |||
'sphinx.ext.napoleon', | |||
'sphinx.ext.todo', | |||
'sphinx.ext.viewcode', | |||
] | |||
autodoc_mock_imports = [ | |||
'cv2', | |||
'gridfs', | |||
'horovod', | |||
'hyperdash', | |||
'imageio', | |||
'lxml', | |||
'matplotlib', | |||
'nltk', | |||
'numpy', | |||
'PIL', | |||
'progressbar', | |||
'pymongo', | |||
'scipy', | |||
'skimage', | |||
'sklearn', | |||
'tensorflow', | |||
'tqdm', | |||
'h5py', | |||
# TL C++ Packages | |||
'tensorlayer.third_party.roi_pooling.roi_pooling.roi_pooling_ops', | |||
] | |||
# Add any paths that contain templates here, relative to this directory. | |||
templates_path = ['_templates'] | |||
# The suffix(es) of source filenames. | |||
# You can specify multiple suffix as a list of string: | |||
# | |||
# source_suffix = ['.rst', '.md'] | |||
source_suffix = '.rst' | |||
# The encoding of source files. | |||
# | |||
# source_encoding = 'utf-8-sig' | |||
# The master toctree document. | |||
master_doc = 'index' | |||
# General information about the project. | |||
project = 'TensorLayer' | |||
copyright = '2016~%s, TensorLayer Contributors' % (str(datetime.datetime.now().year)) | |||
author = 'TensorLayer Contributors' | |||
# The version info for the project you're documenting, acts as replacement for | |||
# |version| and |release|, also used in various other places throughout the | |||
# built documents. | |||
# | |||
# The short X.Y version. | |||
version = __shortversion__ | |||
# The full version, including alpha/beta/rc tags. | |||
release = __version__ | |||
# The language for content autogenerated by Sphinx. Refer to documentation | |||
# for a list of supported languages. | |||
# | |||
# This is also used if you do content translation via gettext catalogs. | |||
# Usually you set "language" from the command line for these cases. | |||
language = None | |||
# There are two options for replacing |today|: either, you set today to some | |||
# non-false value, then it is used: | |||
# | |||
# today = '' | |||
# | |||
# Else, today_fmt is used as the format for a strftime call. | |||
# | |||
# today_fmt = '%B %d, %Y' | |||
# List of patterns, relative to source directory, that match files and | |||
# directories to ignore when looking for source files. | |||
# This patterns also effect to html_static_path and html_extra_path | |||
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store'] | |||
# The reST default role (used for this markup: `text`) to use for all | |||
# documents. | |||
# | |||
# default_role = None | |||
# If true, '()' will be appended to :func: etc. cross-reference text. | |||
# | |||
# add_function_parentheses = True | |||
# If true, the current module name will be prepended to all description | |||
# unit titles (such as .. function::). | |||
# | |||
# add_module_names = True | |||
# If true, sectionauthor and moduleauthor directives will be shown in the | |||
# output. They are ignored by default. | |||
# | |||
# show_authors = False | |||
# The name of the Pygments (syntax highlighting) style to use. | |||
pygments_style = 'sphinx' | |||
# A list of ignored prefixes for module index sorting. | |||
# modindex_common_prefix = [] | |||
# If true, keep warnings as "system message" paragraphs in the built documents. | |||
# keep_warnings = False | |||
# If true, `todo` and `todoList` produce output, else they produce nothing. | |||
todo_include_todos = False | |||
# -- Options for HTML output ---------------------------------------------- | |||
# The theme to use for HTML and HTML Help pages. See the documentation for | |||
# a list of builtin themes. | |||
# | |||
# html_theme = 'alabaster' | |||
# Theme options are theme-specific and customize the look and feel of a theme | |||
# further. For a list of options available for each theme, see the | |||
# documentation. | |||
# | |||
# html_theme_options = {} | |||
# Add any paths that contain custom themes here, relative to this directory. | |||
# html_theme_path = [] | |||
# The name for this set of Sphinx documents. | |||
# "<project> v<release> documentation" by default. | |||
# | |||
# html_title = 'TensorLayer' | |||
# A shorter title for the navigation bar. Default is the same as html_title. | |||
# | |||
# html_short_title = None | |||
# The name of an image file (relative to this directory) to place at the top | |||
# of the sidebar. | |||
# | |||
# html_logo = None | |||
# The name of an image file (relative to this directory) to use as a favicon of | |||
# the docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 | |||
# pixels large. | |||
# | |||
# html_favicon = None | |||
# Add any paths that contain custom static files (such as style sheets) here, | |||
# relative to this directory. They are copied after the builtin static files, | |||
# so a file named "default.css" will overwrite the builtin "default.css". | |||
html_static_path = [] | |||
# Add any extra paths that contain custom files (such as robots.txt or | |||
# .htaccess) here, relative to this directory. These files are copied | |||
# directly to the root of the documentation. | |||
# | |||
# html_extra_path = [] | |||
# If not None, a 'Last updated on:' timestamp is inserted at every page | |||
# bottom, using the given strftime format. | |||
# The empty string is equivalent to '%b %d, %Y'. | |||
# | |||
# html_last_updated_fmt = None | |||
# If true, SmartyPants will be used to convert quotes and dashes to | |||
# typographically correct entities. | |||
# | |||
# html_use_smartypants = True | |||
# Custom sidebar templates, maps document names to template names. | |||
# | |||
# html_sidebars = {} | |||
# Additional templates that should be rendered to pages, maps page names to | |||
# template names. | |||
# | |||
# html_additional_pages = {} | |||
# If false, no module index is generated. | |||
# | |||
# html_domain_indices = True | |||
# If false, no index is generated. | |||
# | |||
# html_use_index = True | |||
# If true, the index is split into individual pages for each letter. | |||
# | |||
# html_split_index = False | |||
# If true, links to the reST sources are added to the pages. | |||
# | |||
# html_show_sourcelink = True | |||
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True. | |||
# | |||
# html_show_sphinx = True | |||
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True. | |||
# | |||
# html_show_copyright = True | |||
# If true, an OpenSearch description file will be output, and all pages will | |||
# contain a <link> tag referring to it. The value of this option must be the | |||
# base URL from which the finished HTML is served. | |||
# | |||
# html_use_opensearch = '' | |||
# This is the file name suffix for HTML files (e.g. ".xhtml"). | |||
# html_file_suffix = None | |||
# Language to be used for generating the HTML full-text search index. | |||
# Sphinx supports the following languages: | |||
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'h', 'it', 'ja' | |||
# 'nl', 'no', 'pt', 'ro', 'r', 'sv', 'tr', 'zh' | |||
# | |||
# html_search_language = 'en' | |||
# A dictionary with options for the search language support, empty by default. | |||
# 'ja' uses this config value. | |||
# 'zh' user can custom change `jieba` dictionary path. | |||
# | |||
# html_search_options = {'type': 'default'} | |||
# The name of a javascript file (relative to the configuration directory) that | |||
# implements a search results scorer. If empty, the default will be used. | |||
# | |||
# html_search_scorer = 'scorer.js' | |||
# Output file base name for HTML help builder. | |||
htmlhelp_basename = 'TensorLayerdoc' | |||
# -- Options for LaTeX output --------------------------------------------- | |||
latex_elements = { | |||
# The paper size ('letterpaper' or 'a4paper'). | |||
# | |||
# 'papersize': 'letterpaper', | |||
# The font size ('10pt', '11pt' or '12pt'). | |||
# | |||
# 'pointsize': '10pt', | |||
# Additional stuff for the LaTeX preamble. | |||
# | |||
# 'preamble': '', | |||
# Latex figure (float) alignment | |||
# | |||
# 'figure_align': 'htbp', | |||
} | |||
# Grouping the document tree into LaTeX files. List of tuples | |||
# (source start file, target name, title, | |||
# author, documentclass [howto, manual, or own class]). | |||
latex_documents = [ | |||
(master_doc, 'TensorLayer.tex', 'TensorLayer Documentation', | |||
'TensorLayer contributors', 'manual'), | |||
] | |||
# The name of an image file (relative to this directory) to place at the top of | |||
# the title page. | |||
# | |||
# latex_logo = None | |||
# For "manual" documents, if this is true, then toplevel headings are parts, | |||
# not chapters. | |||
# | |||
# latex_use_parts = False | |||
# If true, show page references after internal links. | |||
# | |||
# latex_show_pagerefs = False | |||
# If true, show URL addresses after external links. | |||
# | |||
# latex_show_urls = False | |||
# Documents to append as an appendix to all manuals. | |||
# | |||
# latex_appendices = [] | |||
# If false, no module index is generated. | |||
# | |||
# latex_domain_indices = True | |||
# -- Options for manual page output --------------------------------------- | |||
# One entry per manual page. List of tuples | |||
# (source start file, name, description, authors, manual section). | |||
man_pages = [ | |||
(master_doc, 'tensorlayer', 'TensorLayer Documentation', | |||
[author], 1) | |||
] | |||
# If true, show URL addresses after external links. | |||
# | |||
# man_show_urls = False | |||
# -- Options for Texinfo output ------------------------------------------- | |||
# Grouping the document tree into Texinfo files. List of tuples | |||
# (source start file, target name, title, author, | |||
# dir menu entry, description, category) | |||
texinfo_documents = [ | |||
(master_doc, 'TensorLayer', 'TensorLayer Documentation', | |||
author, 'TensorLayer', 'Deep learning and Reinforcement learning library for Researchers and Engineers.', | |||
'Miscellaneous'), | |||
] | |||
# Documents to append as an appendix to all manuals. | |||
# | |||
# texinfo_appendices = [] | |||
# If false, no module index is generated. | |||
# | |||
# texinfo_domain_indices = True | |||
# How to display URL addresses: 'footnote', 'no', or 'inline'. | |||
# | |||
# texinfo_show_urls = 'footnote' | |||
# If true, do not generate a @detailmenu in the "Top" node's menu. | |||
# | |||
# texinfo_no_detailmenu = False | |||
# -- Options for Epub output ---------------------------------------------- | |||
# Bibliographic Dublin Core info. | |||
epub_title = project | |||
epub_author = author | |||
epub_publisher = author | |||
epub_copyright = copyright | |||
# The basename for the epub file. It defaults to the project name. | |||
# epub_basename = project | |||
# The HTML theme for the epub output. Since the default themes are not | |||
# optimized for small screen space, using the same theme for HTML and epub | |||
# output is usually not wise. This defaults to 'epub', a theme designed to save | |||
# visual space. | |||
# | |||
# epub_theme = 'epub' | |||
# The language of the text. It defaults to the language option | |||
# or 'en' if the language is not set. | |||
# | |||
# epub_language = '' | |||
# The scheme of the identifier. Typical schemes are ISBN or URL. | |||
# epub_scheme = '' | |||
# The unique identifier of the text. This can be a ISBN number | |||
# or the project homepage. | |||
# | |||
# epub_identifier = '' | |||
# A unique identification for the text. | |||
# | |||
# epub_uid = '' | |||
# A tuple containing the cover image and cover page html template filenames. | |||
# | |||
# epub_cover = () | |||
# A sequence of (type, uri, title) tuples for the guide element of content.opf. | |||
# | |||
# epub_guide = () | |||
# HTML files that should be inserted before the pages created by sphinx. | |||
# The format is a list of tuples containing the path and title. | |||
# | |||
# epub_pre_files = [] | |||
# HTML files that should be inserted after the pages created by sphinx. | |||
# The format is a list of tuples containing the path and title. | |||
# | |||
# epub_post_files = [] | |||
# A list of files that should not be packed into the epub file. | |||
epub_exclude_files = ['search.html'] | |||
# The depth of the table of contents in toc.ncx. | |||
# | |||
# epub_tocdepth = 3 | |||
# Allow duplicate toc entries. | |||
# | |||
# epub_tocdup = True | |||
# Choose between 'default' and 'includehidden'. | |||
# | |||
# epub_tocscope = 'default' | |||
# Fix unsupported image types using the Pillow. | |||
# | |||
# epub_fix_images = False | |||
# Scale large images. | |||
# | |||
# epub_max_image_width = 0 | |||
# How to display URL addresses: 'footnote', 'no', or 'inline'. | |||
# | |||
# epub_show_urls = 'inline' | |||
# If false, no index is generated. | |||
# | |||
# epub_use_index = True | |||
pygments_style = 'sphinx' | |||
html_theme = "sphinx_rtd_theme" | |||
html_theme_path = [] |
@@ -0,0 +1,100 @@ | |||
Welcome to TensorLayer | |||
======================================= | |||
.. image:: user/my_figs/tl_transparent_logo.png | |||
:width: 30 % | |||
:align: center | |||
:target: https://github.com/tensorlayer/tensorlayer | |||
**Documentation Version:** |release| | |||
**Jun 2019** `Deep Reinforcement Learning Model ZOO Release !! <https://github.com/tensorlayer/tensorlayer/tree/master/examples/reinforcement_learning>`__. | |||
**Good News:** We won the **Best Open Source Software Award** `@ACM Multimedia (MM) 2017 <http://www.acmmm.org/2017/mm-2017-awardees/>`_. | |||
`TensorLayer`_ is a Deep Learning (DL) and Reinforcement Learning (RL) library extended from `Google TensorFlow <https://www.tensorflow.org>`_. It provides popular DL and RL modules that can be easily customized and assembled for tackling real-world machine learning problems. | |||
More details can be found `here <https://github.com/tensorlayer/tensorlayer>`_. | |||
.. note:: | |||
If you got problem to read the docs online, you could download the repository | |||
on `GitHub`_, then go to ``/docs/_build/html/index.html`` to read the docs | |||
offline. The ``_build`` folder can be generated in ``docs`` using ``make html``. | |||
User Guide | |||
------------ | |||
The TensorLayer user guide explains how to install TensorFlow, CUDA and cuDNN, | |||
how to build and train neural networks using TensorLayer, and how to contribute | |||
to the library as a developer. | |||
.. toctree:: | |||
:maxdepth: 2 | |||
user/installation | |||
user/examples | |||
user/contributing | |||
user/get_involved | |||
user/faq | |||
.. toctree:: | |||
:maxdepth: 2 | |||
:caption: Getting started | |||
user/get_start_model | |||
user/get_start_advance | |||
API Reference | |||
------------- | |||
If you are looking for information on a specific function, class or | |||
method, this part of the documentation is for you. | |||
.. toctree:: | |||
:maxdepth: 2 | |||
:caption: Stable Functionalities | |||
modules/activation | |||
modules/array_ops | |||
modules/cost | |||
modules/prepro | |||
modules/files | |||
modules/iterate | |||
modules/layers | |||
modules/models | |||
modules/nlp | |||
modules/initializers | |||
modules/rein | |||
modules/utils | |||
modules/visualize | |||
.. toctree:: | |||
:maxdepth: 2 | |||
:caption: Alpha Version Functionalities | |||
modules/db | |||
modules/optimizers | |||
modules/distributed | |||
Command-line Reference | |||
---------------------- | |||
TensorLayer provides a handy command-line tool `tl` to perform some common tasks. | |||
.. toctree:: | |||
:maxdepth: 2 | |||
:caption: Command Line Interface | |||
modules/cli | |||
Indices and tables | |||
================== | |||
* :ref:`genindex` | |||
* :ref:`modindex` | |||
* :ref:`search` | |||
.. _GitHub: https://github.com/tensorlayer/tensorlayer | |||
.. _TensorLayer: https://github.com/tensorlayer/tensorlayer/ |
@@ -0,0 +1,281 @@ | |||
@ECHO OFF | |||
REM Command file for Sphinx documentation | |||
if "%SPHINXBUILD%" == "" ( | |||
set SPHINXBUILD=sphinx-build | |||
) | |||
set BUILDDIR=_build | |||
set ALLSPHINXOPTS=-d %BUILDDIR%/doctrees %SPHINXOPTS% . | |||
set I18NSPHINXOPTS=%SPHINXOPTS% . | |||
if NOT "%PAPER%" == "" ( | |||
set ALLSPHINXOPTS=-D latex_paper_size=%PAPER% %ALLSPHINXOPTS% | |||
set I18NSPHINXOPTS=-D latex_paper_size=%PAPER% %I18NSPHINXOPTS% | |||
) | |||
if "%1" == "" goto help | |||
if "%1" == "help" ( | |||
:help | |||
echo.Please use `make ^<target^>` where ^<target^> is one of | |||
echo. html to make standalone HTML files | |||
echo. dirhtml to make HTML files named index.html in directories | |||
echo. singlehtml to make a single large HTML file | |||
echo. pickle to make pickle files | |||
echo. json to make JSON files | |||
echo. htmlhelp to make HTML files and a HTML help project | |||
echo. qthelp to make HTML files and a qthelp project | |||
echo. devhelp to make HTML files and a Devhelp project | |||
echo. epub to make an epub | |||
echo. epub3 to make an epub3 | |||
echo. latex to make LaTeX files, you can set PAPER=a4 or PAPER=letter | |||
echo. text to make text files | |||
echo. man to make manual pages | |||
echo. texinfo to make Texinfo files | |||
echo. gettext to make PO message catalogs | |||
echo. changes to make an overview over all changed/added/deprecated items | |||
echo. xml to make Docutils-native XML files | |||
echo. pseudoxml to make pseudoxml-XML files for display purposes | |||
echo. linkcheck to check all external links for integrity | |||
echo. doctest to run all doctests embedded in the documentation if enabled | |||
echo. coverage to run coverage check of the documentation if enabled | |||
echo. dummy to check syntax errors of document sources | |||
goto end | |||
) | |||
if "%1" == "clean" ( | |||
for /d %%i in (%BUILDDIR%\*) do rmdir /q /s %%i | |||
del /q /s %BUILDDIR%\* | |||
goto end | |||
) | |||
REM Check if sphinx-build is available and fallback to Python version if any | |||
%SPHINXBUILD% 1>NUL 2>NUL | |||
if errorlevel 9009 goto sphinx_python | |||
goto sphinx_ok | |||
:sphinx_python | |||
set SPHINXBUILD=python -m sphinx.__init__ | |||
%SPHINXBUILD% 2> nul | |||
if errorlevel 9009 ( | |||
echo. | |||
echo.The 'sphinx-build' command was not found. Make sure you have Sphinx | |||
echo.installed, then set the SPHINXBUILD environment variable to point | |||
echo.to the full path of the 'sphinx-build' executable. Alternatively you | |||
echo.may add the Sphinx directory to PATH. | |||
echo. | |||
echo.If you don't have Sphinx installed, grab it from | |||
echo.http://sphinx-doc.org/ | |||
exit /b 1 | |||
) | |||
:sphinx_ok | |||
if "%1" == "html" ( | |||
%SPHINXBUILD% -b html %ALLSPHINXOPTS% %BUILDDIR%/html | |||
if errorlevel 1 exit /b 1 | |||
echo. | |||
echo.Build finished. The HTML pages are in %BUILDDIR%/html. | |||
goto end | |||
) | |||
if "%1" == "dirhtml" ( | |||
%SPHINXBUILD% -b dirhtml %ALLSPHINXOPTS% %BUILDDIR%/dirhtml | |||
if errorlevel 1 exit /b 1 | |||
echo. | |||
echo.Build finished. The HTML pages are in %BUILDDIR%/dirhtml. | |||
goto end | |||
) | |||
if "%1" == "singlehtml" ( | |||
%SPHINXBUILD% -b singlehtml %ALLSPHINXOPTS% %BUILDDIR%/singlehtml | |||
if errorlevel 1 exit /b 1 | |||
echo. | |||
echo.Build finished. The HTML pages are in %BUILDDIR%/singlehtml. | |||
goto end | |||
) | |||
if "%1" == "pickle" ( | |||
%SPHINXBUILD% -b pickle %ALLSPHINXOPTS% %BUILDDIR%/pickle | |||
if errorlevel 1 exit /b 1 | |||
echo. | |||
echo.Build finished; now you can process the pickle files. | |||
goto end | |||
) | |||
if "%1" == "json" ( | |||
%SPHINXBUILD% -b json %ALLSPHINXOPTS% %BUILDDIR%/json | |||
if errorlevel 1 exit /b 1 | |||
echo. | |||
echo.Build finished; now you can process the JSON files. | |||
goto end | |||
) | |||
if "%1" == "htmlhelp" ( | |||
%SPHINXBUILD% -b htmlhelp %ALLSPHINXOPTS% %BUILDDIR%/htmlhelp | |||
if errorlevel 1 exit /b 1 | |||
echo. | |||
echo.Build finished; now you can run HTML Help Workshop with the ^ | |||
.hhp project file in %BUILDDIR%/htmlhelp. | |||
goto end | |||
) | |||
if "%1" == "qthelp" ( | |||
%SPHINXBUILD% -b qthelp %ALLSPHINXOPTS% %BUILDDIR%/qthelp | |||
if errorlevel 1 exit /b 1 | |||
echo. | |||
echo.Build finished; now you can run "qcollectiongenerator" with the ^ | |||
.qhcp project file in %BUILDDIR%/qthelp, like this: | |||
echo.^> qcollectiongenerator %BUILDDIR%\qthelp\TLayer.qhcp | |||
echo.To view the help file: | |||
echo.^> assistant -collectionFile %BUILDDIR%\qthelp\TLayer.ghc | |||
goto end | |||
) | |||
if "%1" == "devhelp" ( | |||
%SPHINXBUILD% -b devhelp %ALLSPHINXOPTS% %BUILDDIR%/devhelp | |||
if errorlevel 1 exit /b 1 | |||
echo. | |||
echo.Build finished. | |||
goto end | |||
) | |||
if "%1" == "epub" ( | |||
%SPHINXBUILD% -b epub %ALLSPHINXOPTS% %BUILDDIR%/epub | |||
if errorlevel 1 exit /b 1 | |||
echo. | |||
echo.Build finished. The epub file is in %BUILDDIR%/epub. | |||
goto end | |||
) | |||
if "%1" == "epub3" ( | |||
%SPHINXBUILD% -b epub3 %ALLSPHINXOPTS% %BUILDDIR%/epub3 | |||
if errorlevel 1 exit /b 1 | |||
echo. | |||
echo.Build finished. The epub3 file is in %BUILDDIR%/epub3. | |||
goto end | |||
) | |||
if "%1" == "latex" ( | |||
%SPHINXBUILD% -b latex %ALLSPHINXOPTS% %BUILDDIR%/latex | |||
if errorlevel 1 exit /b 1 | |||
echo. | |||
echo.Build finished; the LaTeX files are in %BUILDDIR%/latex. | |||
goto end | |||
) | |||
if "%1" == "latexpdf" ( | |||
%SPHINXBUILD% -b latex %ALLSPHINXOPTS% %BUILDDIR%/latex | |||
cd %BUILDDIR%/latex | |||
make all-pdf | |||
cd %~dp0 | |||
echo. | |||
echo.Build finished; the PDF files are in %BUILDDIR%/latex. | |||
goto end | |||
) | |||
if "%1" == "latexpdfja" ( | |||
%SPHINXBUILD% -b latex %ALLSPHINXOPTS% %BUILDDIR%/latex | |||
cd %BUILDDIR%/latex | |||
make all-pdf-ja | |||
cd %~dp0 | |||
echo. | |||
echo.Build finished; the PDF files are in %BUILDDIR%/latex. | |||
goto end | |||
) | |||
if "%1" == "text" ( | |||
%SPHINXBUILD% -b text %ALLSPHINXOPTS% %BUILDDIR%/text | |||
if errorlevel 1 exit /b 1 | |||
echo. | |||
echo.Build finished. The text files are in %BUILDDIR%/text. | |||
goto end | |||
) | |||
if "%1" == "man" ( | |||
%SPHINXBUILD% -b man %ALLSPHINXOPTS% %BUILDDIR%/man | |||
if errorlevel 1 exit /b 1 | |||
echo. | |||
echo.Build finished. The manual pages are in %BUILDDIR%/man. | |||
goto end | |||
) | |||
if "%1" == "texinfo" ( | |||
%SPHINXBUILD% -b texinfo %ALLSPHINXOPTS% %BUILDDIR%/texinfo | |||
if errorlevel 1 exit /b 1 | |||
echo. | |||
echo.Build finished. The Texinfo files are in %BUILDDIR%/texinfo. | |||
goto end | |||
) | |||
if "%1" == "gettext" ( | |||
%SPHINXBUILD% -b gettext %I18NSPHINXOPTS% %BUILDDIR%/locale | |||
if errorlevel 1 exit /b 1 | |||
echo. | |||
echo.Build finished. The message catalogs are in %BUILDDIR%/locale. | |||
goto end | |||
) | |||
if "%1" == "changes" ( | |||
%SPHINXBUILD% -b changes %ALLSPHINXOPTS% %BUILDDIR%/changes | |||
if errorlevel 1 exit /b 1 | |||
echo. | |||
echo.The overview file is in %BUILDDIR%/changes. | |||
goto end | |||
) | |||
if "%1" == "linkcheck" ( | |||
%SPHINXBUILD% -b linkcheck %ALLSPHINXOPTS% %BUILDDIR%/linkcheck | |||
if errorlevel 1 exit /b 1 | |||
echo. | |||
echo.Link check complete; look for any errors in the above output ^ | |||
or in %BUILDDIR%/linkcheck/output.txt. | |||
goto end | |||
) | |||
if "%1" == "doctest" ( | |||
%SPHINXBUILD% -b doctest %ALLSPHINXOPTS% %BUILDDIR%/doctest | |||
if errorlevel 1 exit /b 1 | |||
echo. | |||
echo.Testing of doctests in the sources finished, look at the ^ | |||
results in %BUILDDIR%/doctest/output.txt. | |||
goto end | |||
) | |||
if "%1" == "coverage" ( | |||
%SPHINXBUILD% -b coverage %ALLSPHINXOPTS% %BUILDDIR%/coverage | |||
if errorlevel 1 exit /b 1 | |||
echo. | |||
echo.Testing of coverage in the sources finished, look at the ^ | |||
results in %BUILDDIR%/coverage/python.txt. | |||
goto end | |||
) | |||
if "%1" == "xml" ( | |||
%SPHINXBUILD% -b xml %ALLSPHINXOPTS% %BUILDDIR%/xml | |||
if errorlevel 1 exit /b 1 | |||
echo. | |||
echo.Build finished. The XML files are in %BUILDDIR%/xml. | |||
goto end | |||
) | |||
if "%1" == "pseudoxml" ( | |||
%SPHINXBUILD% -b pseudoxml %ALLSPHINXOPTS% %BUILDDIR%/pseudoxml | |||
if errorlevel 1 exit /b 1 | |||
echo. | |||
echo.Build finished. The pseudo-XML files are in %BUILDDIR%/pseudoxml. | |||
goto end | |||
) | |||
if "%1" == "dummy" ( | |||
%SPHINXBUILD% -b dummy %ALLSPHINXOPTS% %BUILDDIR%/dummy | |||
if errorlevel 1 exit /b 1 | |||
echo. | |||
echo.Build finished. Dummy builder generates no files. | |||
goto end | |||
) | |||
:end |
@@ -0,0 +1,73 @@ | |||
API - Activations | |||
========================= | |||
To make TensorLayer simple, we minimize the number of activation functions as much as | |||
we can. So we encourage you to use TensorFlow's function. TensorFlow provides | |||
``tf.nn.relu``, ``tf.nn.relu6``, ``tf.nn.elu``, ``tf.nn.softplus``, | |||
``tf.nn.softsign`` and so on. | |||
For parametric activation, please read the layer APIs. | |||
The shortcut of ``tensorlayer.activation`` is ``tensorlayer.act``. | |||
Your activation | |||
------------------- | |||
Customizes activation function in TensorLayer is very easy. | |||
The following example implements an activation that multiplies its input by 2. | |||
For more complex activation, TensorFlow API will be required. | |||
.. code-block:: python | |||
def double_activation(x): | |||
return x * 2 | |||
double_activation = lambda x: x * 2 | |||
.. automodule:: tensorlayer.activation | |||
.. autosummary:: | |||
leaky_relu | |||
leaky_relu6 | |||
leaky_twice_relu6 | |||
ramp | |||
swish | |||
sign | |||
hard_tanh | |||
pixel_wise_softmax | |||
Ramp | |||
------ | |||
.. autofunction:: ramp | |||
Leaky ReLU | |||
------------ | |||
.. autofunction:: leaky_relu | |||
Leaky ReLU6 | |||
------------ | |||
.. autofunction:: leaky_relu6 | |||
Twice Leaky ReLU6 | |||
----------------- | |||
.. autofunction:: leaky_twice_relu6 | |||
Swish | |||
------------ | |||
.. autofunction:: swish | |||
Sign | |||
--------------------- | |||
.. autofunction:: sign | |||
Hard Tanh | |||
--------------------- | |||
.. autofunction:: hard_tanh | |||
Pixel-wise softmax | |||
-------------------- | |||
.. autofunction:: pixel_wise_softmax | |||
Parametric activation | |||
------------------------------ | |||
See ``tensorlayer.layers``. |
@@ -0,0 +1,20 @@ | |||
API - Array Operations | |||
====================== | |||
.. automodule:: tensorlayer.array_ops | |||
.. autosummary:: | |||
alphas | |||
alphas_like | |||
Tensorflow Tensor Operations | |||
---------------------------- | |||
tl.alphas | |||
^^^^^^^^^ | |||
.. autofunction:: alphas | |||
tl.alphas_like | |||
^^^^^^^^^^^^^^ | |||
.. autofunction:: alphas_like |
@@ -0,0 +1,6 @@ | |||
CLI - Command Line Interface | |||
============================== | |||
.. automodule:: tensorlayer.cli | |||
.. automodule:: tensorlayer.cli.train |
@@ -0,0 +1,100 @@ | |||
API - Cost | |||
================== | |||
To make TensorLayer simple, we minimize the number of cost functions as much as | |||
we can. So we encourage you to use TensorFlow's function, , see `TensorFlow API <https://www.tensorflow.org/versions/r2.0/api_docs/python/tf>`_. | |||
.. note:: | |||
Please refer to `Getting Started <https://github.com/tensorlayer/tensorlayer/tree/master/docs/user>`_ for getting specific weights for weight regularization. | |||
.. automodule:: tensorlayer.cost | |||
.. autosummary:: | |||
cross_entropy | |||
sigmoid_cross_entropy | |||
binary_cross_entropy | |||
mean_squared_error | |||
normalized_mean_square_error | |||
absolute_difference_error | |||
dice_coe | |||
dice_hard_coe | |||
iou_coe | |||
cross_entropy_seq | |||
cross_entropy_seq_with_mask | |||
cosine_similarity | |||
li_regularizer | |||
lo_regularizer | |||
maxnorm_regularizer | |||
maxnorm_o_regularizer | |||
maxnorm_i_regularizer | |||
huber_loss | |||
Softmax cross entropy | |||
---------------------- | |||
.. autofunction:: cross_entropy | |||
Sigmoid cross entropy | |||
---------------------- | |||
.. autofunction:: sigmoid_cross_entropy | |||
Binary cross entropy | |||
------------------------- | |||
.. autofunction:: binary_cross_entropy | |||
Mean squared error (L2) | |||
------------------------- | |||
.. autofunction:: mean_squared_error | |||
Normalized mean square error | |||
-------------------------------- | |||
.. autofunction:: normalized_mean_square_error | |||
Absolute difference error (L1) | |||
-------------------------------- | |||
.. autofunction:: absolute_difference_error | |||
Dice coefficient | |||
------------------------- | |||
.. autofunction:: dice_coe | |||
Hard Dice coefficient | |||
------------------------- | |||
.. autofunction:: dice_hard_coe | |||
IOU coefficient | |||
------------------------- | |||
.. autofunction:: iou_coe | |||
Cross entropy for sequence | |||
----------------------------- | |||
.. autofunction:: cross_entropy_seq | |||
Cross entropy with mask for sequence | |||
---------------------------------------- | |||
.. autofunction:: cross_entropy_seq_with_mask | |||
Cosine similarity | |||
------------------- | |||
.. autofunction:: cosine_similarity | |||
Regularization functions | |||
-------------------------- | |||
For ``tf.nn.l2_loss``, ``tf.contrib.layers.l1_regularizer``, ``tf.contrib.layers.l2_regularizer`` and | |||
``tf.contrib.layers.sum_regularizer``, see tensorflow API. | |||
Maxnorm | |||
^^^^^^^^^^ | |||
.. autofunction:: maxnorm_regularizer | |||
Special | |||
^^^^^^^^^^ | |||
.. autofunction:: li_regularizer | |||
.. autofunction:: lo_regularizer | |||
.. autofunction:: maxnorm_o_regularizer | |||
.. autofunction:: maxnorm_i_regularizer | |||
Huber Loss | |||
^^^^^^^^^^ | |||
.. autofunction:: huber_loss |
@@ -0,0 +1,260 @@ | |||
API - Database | |||
========================= | |||
This is the alpha version of database management system. | |||
If you have any trouble, please ask for help at `tensorlayer@gmail.com <tensorlayer@gmail.com>`_ . | |||
Why Database | |||
---------------- | |||
TensorLayer is designed for real world production, capable of large scale machine learning applications. | |||
TensorLayer database is introduced to address the many data management challenges in the large scale machine learning projects, such as: | |||
1. Finding training data from an enterprise data warehouse. | |||
2. Loading large datasets that are beyond the storage limitation of one computer. | |||
3. Managing different models with version control, and comparing them(e.g. accuracy). | |||
4. Automating the process of training, evaluating and deploying machine learning models. | |||
With the TensorLayer system, we introduce this database technology to address the challenges above. | |||
The database management system is designed with the following three principles in mind. | |||
Everything is Data | |||
^^^^^^^^^^^^^^^^^^ | |||
Data warehouses can store and capture the entire machine learning development process. The data can be categorized as: | |||
1. Dataset: This includes all the data used for training, validation and prediction. The labels can be manually specified or generated by model prediction. | |||
2. Model architecture: The database includes a table that stores different model architectures, enabling users to reuse the many model development works. | |||
3. Model parameters: This database stores all the model parameters of each epoch in the training step. | |||
4. Tasks: A project usually include many small tasks. Each task contains the necessary information such as hyper-parameters for training or validation. For a training task, typical information includes training data, the model parameter, the model architecture, how many epochs the training task has. Validation, testing and inference are also supported by the task system. | |||
5. Loggings: The logs store all the metrics of each machine learning model, such as the time stamp, loss and accuracy of each batch or epoch. | |||
TensorLayer database in principle is a keyword based search engine. Each model, parameter, or training data is assigned many tags. | |||
The storage system organizes data into two layers: the index layer, and the blob layer. The index layer stores all the tags and references to the blob storage. The index layer is implemented based on NoSQL document database such as MongoDB. The blob layer stores videos, medical images or label masks in large chunk size, which is usually implemented based on a file system. Our database is based on MongoDB. The blob system is based on the GridFS while the indexes are stored as documents. | |||
Everything is identified by Query | |||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ | |||
Within the database framework, any entity within the data warehouse, such as the data, model or tasks is specified by the database query language. | |||
As a reference, the query is more space efficient for storage and it can specify multiple objects in a concise way. | |||
Another advantage of such a design is enabling a highly flexible software system. | |||
Many system can be implemented by simply rewriting different components, with many new applications can be implemented just by update the query without modification of any application code. | |||
.. | |||
A pulling based Stream processing pipeline | |||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ | |||
For large training datasets, we provide a stream interface, which can in theory support an unlimitedly large dataset. | |||
A stream interface, implemented as a python generators, keeps on generating new data during training. | |||
When using the stream interface, the idea of epoch does not apply anymore, instead, we specify the batch size and image a epoch will have a fixed large number of steps. | |||
Many techniques are introduced behind the stream interface for performance optimization. | |||
The stream interface is based on the database cursor technology. | |||
For every data query, only the cursors are returned immediately, not the actual query results. | |||
The actual data are loaded later when the generators are evaluated. | |||
The data loading is further optimized in many ways: | |||
1. Data are compressed and decompressed, | |||
2. The data are loaded in bulk model to further optimize the IO traffic | |||
3. The data argumentation or random sampling are computed on the fly, only after the data are loaded into the local computer memory. | |||
4. We also introduced simple cache system that stores the recent blob data. | |||
Based on the stream interface, a continuos machine learning system can be easily implemented. | |||
On a distributed system, the model training, validation and deployment can be run by different computing node which are all running continuously. | |||
The trainer keeps on optimizing the models with new added data, the evaluation node keeps on evaluating the recent generated models and the deployment system keeps pulling the best models from the our database warehouse for application. | |||
Preparation | |||
-------------- | |||
In principle, the database can be implemented by any document oriented NoSQL database system. | |||
The existing implementation is based on MongoDB. | |||
Further implementations on other databases will be released depending on the progress. | |||
It will be straightforward to port our database system to Google Cloud, AWS and Azure. | |||
The following tutorials are based on the MongoDB implementation. | |||
Installing and running MongoDB | |||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ | |||
The installation instruction of MongoDB can be found at | |||
`MongoDB Docs <https://docs.MongoDB.com/manual/installation/>`__. | |||
There are also many MongoDB services from Amazon or GCP, such as Mongo Atlas from MongoDB | |||
User can also use docker, which is a powerful tool for `deploying software <https://hub.docker.com/_/mongo/>`_ . | |||
After installing MongoDB, a MongoDB management tool with graphic user interface will be extremely useful. | |||
Users can also install Studio3T(MongoChef), which is powerful user interface tool for MongoDB and is free for non-commercial use `studio3t <https://studio3t.com/>`_. | |||
Tutorials | |||
---------- | |||
Connect to the database | |||
^^^^^^^^^^^^^^^^^^^^^^^^^ | |||
Similar with MongoDB management tools, IP and port number are required for connecting to the database. | |||
To distinguish the different projects, the database instances have a ``project_name`` argument. | |||
In the following example, we connect to MongoDB on a local machine with the IP ``localhost``, and port ``27017`` (this is the default port number of MongoDB). | |||
.. code-block:: python | |||
db = tl.db.TensorHub(ip='localhost', port=27017, dbname='temp', | |||
username=None, password='password', project_name='tutorial') | |||
Dataset management | |||
^^^^^^^^^^^^^^^^^^^^ | |||
You can save a dataset into the database and allow all machines to access it. | |||
Apart from the dataset key, you can also insert a custom argument such as version and description, for better managing the datasets. | |||
Note that, all saving functions will automatically save a timestamp, allowing you to load staff (data, model, task) using the timestamp. | |||
.. code-block:: python | |||
db.save_dataset(dataset=[X_train, y_train, X_test, y_test], dataset_name='mnist', description='this is a tutorial') | |||
After saving the dataset, others can access the dataset as followed: | |||
.. code-block:: python | |||
dataset = db.find_dataset('mnist') | |||
dataset = db.find_dataset('mnist', version='1.0') | |||
If you have multiple datasets that use the same dataset key, you can get all of them as followed: | |||
.. code-block:: python | |||
datasets = db.find_all_datasets('mnist') | |||
Model management | |||
^^^^^^^^^^^^^^^^^^^^^^^^^ | |||
Save model architecture and parameters into database. | |||
The model architecture is represented by a TL graph, and the parameters are stored as a list of array. | |||
.. code-block:: python | |||
db.save_model(net, accuracy=0.8, loss=2.3, name='second_model') | |||
After saving the model into database, we can load it as follow: | |||
.. code-block:: python | |||
net = db.find_model(sess=sess, accuracy=0.8, loss=2.3) | |||
If there are many models, you can use MongoDB's 'sort' method to find the model you want. | |||
To get the newest or oldest model, you can sort by time: | |||
.. code-block:: python | |||
## newest model | |||
net = db.find_model(sess=sess, sort=[("time", pymongo.DESCENDING)]) | |||
net = db.find_model(sess=sess, sort=[("time", -1)]) | |||
## oldest model | |||
net = db.find_model(sess=sess, sort=[("time", pymongo.ASCENDING)]) | |||
net = db.find_model(sess=sess, sort=[("time", 1)]) | |||
If you save the model along with accuracy, you can get the model with the best accuracy as followed: | |||
.. code-block:: python | |||
net = db.find_model(sess=sess, sort=[("test_accuracy", -1)]) | |||
To delete all models in a project: | |||
.. code-block:: python | |||
db.delete_model() | |||
If you want to specify which model you want to delete, you need to put arguments inside. | |||
Event / Logging management | |||
^^^^^^^^^^^^^^^^^^^^^^^^^^^ | |||
Save training log: | |||
.. code-block:: python | |||
db.save_training_log(accuracy=0.33) | |||
db.save_training_log(accuracy=0.44) | |||
Delete logs that match the requirement: | |||
.. code-block:: python | |||
db.delete_training_log(accuracy=0.33) | |||
Delete all logging of this project: | |||
.. code-block:: python | |||
db.delete_training_log() | |||
db.delete_validation_log() | |||
db.delete_testing_log() | |||
Task distribution | |||
^^^^^^^^^^^^^^^^^^^^^^^^^^^ | |||
A project usually consists of many tasks such as hyper parameter selection. | |||
To make it easier, we can distribute these tasks to several GPU servers. | |||
A task consists of a task script, hyper parameters, desired result and a status. | |||
A task distributor can push both dataset and tasks into a database, allowing task runners on GPU servers to pull and run. | |||
The following is an example that pushes 3 tasks with different hyper parameters. | |||
.. code-block:: python | |||
## save dataset into database, then allow other servers to use it | |||
X_train, y_train, X_val, y_val, X_test, y_test = tl.files.load_mnist_dataset(shape=(-1, 784)) | |||
db.save_dataset((X_train, y_train, X_val, y_val, X_test, y_test), 'mnist', description='handwriting digit') | |||
## push tasks into database, then allow other servers pull tasks to run | |||
db.create_task( | |||
task_name='mnist', script='task_script.py', hyper_parameters=dict(n_units1=800, n_units2=800), | |||
saved_result_keys=['test_accuracy'], description='800-800' | |||
) | |||
db.create_task( | |||
task_name='mnist', script='task_script.py', hyper_parameters=dict(n_units1=600, n_units2=600), | |||
saved_result_keys=['test_accuracy'], description='600-600' | |||
) | |||
db.create_task( | |||
task_name='mnist', script='task_script.py', hyper_parameters=dict(n_units1=400, n_units2=400), | |||
saved_result_keys=['test_accuracy'], description='400-400' | |||
) | |||
## wait for tasks to finish | |||
while db.check_unfinished_task(task_name='mnist'): | |||
print("waiting runners to finish the tasks") | |||
time.sleep(1) | |||
## you can get the model and result from database and do some analysis at the end | |||
The task runners on GPU servers can monitor the database, and run the tasks immediately when they are made available. | |||
In the task script, we can save the final model and results to the database, this allows task distributors to get the desired model and results. | |||
.. code-block:: python | |||
## monitors the database and pull tasks to run | |||
while True: | |||
print("waiting task from distributor") | |||
db.run_task(task_name='mnist', sort=[("time", -1)]) | |||
time.sleep(1) | |||
Example codes | |||
^^^^^^^^^^^^^^^^ | |||
See `here <https://github.com/tensorlayer/tensorlayer/tree/master/example/database>`__. | |||
TensorHub API | |||
--------------------- | |||
.. automodule:: tensorlayer.db | |||
.. autoclass:: TensorHub | |||
:members: |
@@ -0,0 +1,23 @@ | |||
API - Distributed Training | |||
============================= | |||
(Alpha release - usage might change later) | |||
Helper API to run a distributed training. | |||
Check these `examples <https://github.com/tensorlayer/tensorlayer/tree/master/examples/distributed_training>`_. | |||
.. automodule:: tensorlayer.distributed | |||
.. autosummary:: | |||
Trainer | |||
Distributed training | |||
-------------------- | |||
Trainer | |||
^^^^^^^^^^^ | |||
.. autofunction:: Trainer | |||
@@ -0,0 +1,295 @@ | |||
API - Files | |||
=================================== | |||
A collections of helper functions to work with dataset. | |||
Load benchmark dataset, save and restore model, save and load variables. | |||
.. automodule:: tensorlayer.files | |||
.. autosummary:: | |||
load_mnist_dataset | |||
load_fashion_mnist_dataset | |||
load_cifar10_dataset | |||
load_cropped_svhn | |||
load_ptb_dataset | |||
load_matt_mahoney_text8_dataset | |||
load_imdb_dataset | |||
load_nietzsche_dataset | |||
load_wmt_en_fr_dataset | |||
load_flickr25k_dataset | |||
load_flickr1M_dataset | |||
load_cyclegan_dataset | |||
load_celebA_dataset | |||
load_voc_dataset | |||
load_mpii_pose_dataset | |||
download_file_from_google_drive | |||
save_npz | |||
load_npz | |||
assign_weights | |||
load_and_assign_npz | |||
save_npz_dict | |||
load_and_assign_npz_dict | |||
save_weights_to_hdf5 | |||
load_hdf5_to_weights_in_order | |||
load_hdf5_to_weights | |||
save_any_to_npy | |||
load_npy_to_any | |||
file_exists | |||
folder_exists | |||
del_file | |||
del_folder | |||
read_file | |||
load_file_list | |||
load_folder_list | |||
exists_or_mkdir | |||
maybe_download_and_extract | |||
natural_keys | |||
.. | |||
save_ckpt | |||
load_ckpt | |||
save_graph | |||
load_graph | |||
save_graph_and_params | |||
load_graph_and_params | |||
npz_to_W_pdf | |||
Load dataset functions | |||
------------------------ | |||
MNIST | |||
^^^^^^^ | |||
.. autofunction:: load_mnist_dataset | |||
Fashion-MNIST | |||
^^^^^^^^^^^^^^^^ | |||
.. autofunction:: load_fashion_mnist_dataset | |||
CIFAR-10 | |||
^^^^^^^^^^^^ | |||
.. autofunction:: load_cifar10_dataset | |||
SVHN | |||
^^^^^^^ | |||
.. autofunction:: load_cropped_svhn | |||
Penn TreeBank (PTB) | |||
^^^^^^^^^^^^^^^^^^^^^ | |||
.. autofunction:: load_ptb_dataset | |||
Matt Mahoney's text8 | |||
^^^^^^^^^^^^^^^^^^^^^^^ | |||
.. autofunction:: load_matt_mahoney_text8_dataset | |||
IMBD | |||
^^^^^^^^^^^ | |||
.. autofunction:: load_imdb_dataset | |||
Nietzsche | |||
^^^^^^^^^^^^^^ | |||
.. autofunction:: load_nietzsche_dataset | |||
English-to-French translation data from the WMT'15 Website | |||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ | |||
.. autofunction:: load_wmt_en_fr_dataset | |||
Flickr25k | |||
^^^^^^^^^^^^^^^^^^^^^^^^^ | |||
.. autofunction:: load_flickr25k_dataset | |||
Flickr1M | |||
^^^^^^^^^^^^^^^^^^^^^^^^^ | |||
.. autofunction:: load_flickr1M_dataset | |||
CycleGAN | |||
^^^^^^^^^^^^^^^^^^^^^^^^^ | |||
.. autofunction:: load_cyclegan_dataset | |||
CelebA | |||
^^^^^^^^^ | |||
.. autofunction:: load_celebA_dataset | |||
VOC 2007/2012 | |||
^^^^^^^^^^^^^^^^ | |||
.. autofunction:: load_voc_dataset | |||
MPII | |||
^^^^^^^^^^^^^^^^ | |||
.. autofunction:: load_mpii_pose_dataset | |||
Google Drive | |||
^^^^^^^^^^^^^^^^ | |||
.. autofunction:: download_file_from_google_drive | |||
Load and save network | |||
---------------------- | |||
TensorFlow provides ``.ckpt`` file format to save and restore the models, while | |||
we suggest to use standard python file format ``hdf5`` to save models for the | |||
sake of cross-platform. Other file formats such as ``.npz`` are also available. | |||
.. code-block:: python | |||
## save model as .h5 | |||
tl.files.save_weights_to_hdf5('model.h5', network.all_weights) | |||
# restore model from .h5 (in order) | |||
tl.files.load_hdf5_to_weights_in_order('model.h5', network.all_weights) | |||
# restore model from .h5 (by name) | |||
tl.files.load_hdf5_to_weights('model.h5', network.all_weights) | |||
## save model as .npz | |||
tl.files.save_npz(network.all_weights , name='model.npz') | |||
# restore model from .npz (method 1) | |||
load_params = tl.files.load_npz(name='model.npz') | |||
tl.files.assign_weights(sess, load_params, network) | |||
# restore model from .npz (method 2) | |||
tl.files.load_and_assign_npz(sess=sess, name='model.npz', network=network) | |||
## you can assign the pre-trained parameters as follow | |||
# 1st parameter | |||
tl.files.assign_weights(sess, [load_params[0]], network) | |||
# the first three parameters | |||
tl.files.assign_weights(sess, load_params[:3], network) | |||
Save network into list (npz) | |||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ | |||
.. autofunction:: save_npz | |||
Load network from list (npz) | |||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^ | |||
.. autofunction:: load_npz | |||
Assign a list of parameters to network | |||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ | |||
.. autofunction:: assign_weights | |||
Load and assign a list of parameters to network | |||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ | |||
.. autofunction:: load_and_assign_npz | |||
Save network into dict (npz) | |||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ | |||
.. autofunction:: save_npz_dict | |||
Load network from dict (npz) | |||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ | |||
.. autofunction:: load_and_assign_npz_dict | |||
Save network into OrderedDict (hdf5) | |||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ | |||
.. autofunction:: save_weights_to_hdf5 | |||
Load network from hdf5 in order | |||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ | |||
.. autofunction:: load_hdf5_to_weights_in_order | |||
Load network from hdf5 by name | |||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ | |||
.. autofunction:: load_hdf5_to_weights | |||
.. | |||
Save network architecture as a graph | |||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ | |||
.. autofunction:: save_graph | |||
Load network architecture from a graph | |||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ | |||
.. autofunction:: load_graph | |||
Save network architecture and parameters | |||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ | |||
.. autofunction:: save_graph_and_params | |||
Load network architecture and parameters | |||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ | |||
.. autofunction:: load_graph_and_params | |||
.. | |||
Save network into ckpt | |||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ | |||
.. autofunction:: save_ckpt | |||
Load network from ckpt | |||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ | |||
.. autofunction:: load_ckpt | |||
Load and save variables | |||
------------------------ | |||
Save variables as .npy | |||
^^^^^^^^^^^^^^^^^^^^^^^^^ | |||
.. autofunction:: save_any_to_npy | |||
Load variables from .npy | |||
^^^^^^^^^^^^^^^^^^^^^^^^^^^ | |||
.. autofunction:: load_npy_to_any | |||
Folder/File functions | |||
------------------------ | |||
Check file exists | |||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^ | |||
.. autofunction:: file_exists | |||
Check folder exists | |||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^ | |||
.. autofunction:: folder_exists | |||
Delete file | |||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^ | |||
.. autofunction:: del_file | |||
Delete folder | |||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^ | |||
.. autofunction:: del_folder | |||
Read file | |||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^ | |||
.. autofunction:: read_file | |||
Load file list from folder | |||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^ | |||
.. autofunction:: load_file_list | |||
Load folder list from folder | |||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^ | |||
.. autofunction:: load_folder_list | |||
Check and Create folder | |||
^^^^^^^^^^^^^^^^^^^^^^^^^ | |||
.. autofunction:: exists_or_mkdir | |||
Download or extract | |||
^^^^^^^^^^^^^^^^^^^^^^^^^ | |||
.. autofunction:: maybe_download_and_extract | |||
Sort | |||
------- | |||
List of string with number in human order | |||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ | |||
.. autofunction:: natural_keys | |||
Visualizing npz file | |||
---------------------- | |||
.. autofunction:: npz_to_W_pdf |
@@ -0,0 +1,51 @@ | |||
API - Initializers | |||
========================= | |||
To make TensorLayer simple, TensorLayer only warps some basic initializers. For more advanced initializer, | |||
e.g. ``tf.initializers.he_normal``, please refer to TensorFlow provided initializers | |||
`here <https://www.tensorflow.org/versions/r2.0/api_docs/python/tf/initializers>`_. | |||
.. automodule:: tensorlayer.initializers | |||
.. autosummary:: | |||
Initializer | |||
Zeros | |||
Ones | |||
Constant | |||
RandomUniform | |||
RandomNormal | |||
TruncatedNormal | |||
deconv2d_bilinear_upsampling_initializer | |||
Initializer | |||
------------ | |||
.. autoclass:: Initializer | |||
Zeros | |||
------------ | |||
.. autoclass:: Zeros | |||
Ones | |||
------------ | |||
.. autoclass:: Ones | |||
Constant | |||
----------------- | |||
.. autoclass:: Constant | |||
RandomUniform | |||
-------------- | |||
.. autoclass:: RandomUniform | |||
RandomNormal | |||
--------------------- | |||
.. autoclass:: RandomNormal | |||
TruncatedNormal | |||
--------------------- | |||
.. autoclass:: TruncatedNormal | |||
deconv2d_bilinear_upsampling_initializer | |||
------------------------------------------ | |||
.. autofunction:: deconv2d_bilinear_upsampling_initializer |
@@ -0,0 +1,36 @@ | |||
API - Iteration | |||
========================== | |||
Data iteration. | |||
.. automodule:: tensorlayer.iterate | |||
.. autosummary:: | |||
minibatches | |||
seq_minibatches | |||
seq_minibatches2 | |||
ptb_iterator | |||
Non-time series | |||
-------------------- | |||
.. autofunction:: minibatches | |||
Time series | |||
---------------------- | |||
Sequence iteration 1 | |||
^^^^^^^^^^^^^^^^^^^^^^^ | |||
.. autofunction:: seq_minibatches | |||
Sequence iteration 2 | |||
^^^^^^^^^^^^^^^^^^^^^^^ | |||
.. autofunction:: seq_minibatches2 | |||
PTB dataset iteration | |||
^^^^^^^^^^^^^^^^^^^^^^^^ | |||
.. autofunction:: ptb_iterator |
@@ -0,0 +1,701 @@ | |||
API - Layers | |||
============ | |||
.. automodule:: tensorlayer.layers | |||
.. ----------------------------------------------------------- | |||
.. Layer List | |||
.. ----------------------------------------------------------- | |||
Layer list | |||
---------- | |||
.. autosummary:: | |||
Layer | |||
Input | |||
OneHot | |||
Word2vecEmbedding | |||
Embedding | |||
AverageEmbedding | |||
Dense | |||
Dropout | |||
GaussianNoise | |||
DropconnectDense | |||
UpSampling2d | |||
DownSampling2d | |||
Conv1d | |||
Conv2d | |||
Conv3d | |||
DeConv2d | |||
DeConv3d | |||
DepthwiseConv2d | |||
SeparableConv1d | |||
SeparableConv2d | |||
DeformableConv2d | |||
GroupConv2d | |||
PadLayer | |||
PoolLayer | |||
ZeroPad1d | |||
ZeroPad2d | |||
ZeroPad3d | |||
MaxPool1d | |||
MeanPool1d | |||
MaxPool2d | |||
MeanPool2d | |||
MaxPool3d | |||
MeanPool3d | |||
GlobalMaxPool1d | |||
GlobalMeanPool1d | |||
GlobalMaxPool2d | |||
GlobalMeanPool2d | |||
GlobalMaxPool3d | |||
GlobalMeanPool3d | |||
CornerPool2d | |||
SubpixelConv1d | |||
SubpixelConv2d | |||
SpatialTransformer2dAffine | |||
transformer | |||
batch_transformer | |||
BatchNorm | |||
BatchNorm1d | |||
BatchNorm2d | |||
BatchNorm3d | |||
LocalResponseNorm | |||
InstanceNorm | |||
InstanceNorm1d | |||
InstanceNorm2d | |||
InstanceNorm3d | |||
LayerNorm | |||
GroupNorm | |||
SwitchNorm | |||
RNN | |||
SimpleRNN | |||
GRURNN | |||
LSTMRNN | |||
BiRNN | |||
retrieve_seq_length_op | |||
retrieve_seq_length_op2 | |||
retrieve_seq_length_op3 | |||
target_mask_op | |||
Flatten | |||
Reshape | |||
Transpose | |||
Shuffle | |||
Lambda | |||
Concat | |||
Elementwise | |||
ElementwiseLambda | |||
ExpandDims | |||
Tile | |||
Stack | |||
UnStack | |||
Sign | |||
Scale | |||
BinaryDense | |||
BinaryConv2d | |||
TernaryDense | |||
TernaryConv2d | |||
DorefaDense | |||
DorefaConv2d | |||
PRelu | |||
PRelu6 | |||
PTRelu6 | |||
flatten_reshape | |||
initialize_rnn_state | |||
list_remove_repeat | |||
.. ----------------------------------------------------------- | |||
.. Basic Layers | |||
.. ----------------------------------------------------------- | |||
Base Layer | |||
----------- | |||
.. autoclass:: Layer | |||
.. ----------------------------------------------------------- | |||
.. Input Layer | |||
.. ----------------------------------------------------------- | |||
Input Layers | |||
--------------- | |||
Input Layer | |||
^^^^^^^^^^^^^^^^ | |||
.. autofunction:: Input | |||
.. ----------------------------------------------------------- | |||
.. Embedding Layers | |||
.. ----------------------------------------------------------- | |||
One-hot Layer | |||
^^^^^^^^^^^^^^^^^^^^ | |||
.. autoclass:: OneHot | |||
Word2Vec Embedding Layer | |||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ | |||
.. autoclass:: Word2vecEmbedding | |||
Embedding Layer | |||
^^^^^^^^^^^^^^^^^^^^^^^ | |||
.. autoclass:: Embedding | |||
Average Embedding Layer | |||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ | |||
.. autoclass:: AverageEmbedding | |||
.. ----------------------------------------------------------- | |||
.. Activation Layers | |||
.. ----------------------------------------------------------- | |||
Activation Layers | |||
--------------------------- | |||
PReLU Layer | |||
^^^^^^^^^^^^^^^^^ | |||
.. autoclass:: PRelu | |||
PReLU6 Layer | |||
^^^^^^^^^^^^^^^^^^ | |||
.. autoclass:: PRelu6 | |||
PTReLU6 Layer | |||
^^^^^^^^^^^^^^^^^^^ | |||
.. autoclass:: PTRelu6 | |||
.. ----------------------------------------------------------- | |||
.. Convolutional Layers | |||
.. ----------------------------------------------------------- | |||
Convolutional Layers | |||
--------------------- | |||
Convolutions | |||
^^^^^^^^^^^^^^^^^^^^^^^^^^ | |||
Conv1d | |||
""""""""""""""""""""" | |||
.. autoclass:: Conv1d | |||
Conv2d | |||
""""""""""""""""""""" | |||
.. autoclass:: Conv2d | |||
Conv3d | |||
""""""""""""""""""""" | |||
.. autoclass:: Conv3d | |||
Deconvolutions | |||
^^^^^^^^^^^^^^^^^^^^^^^^^^ | |||
DeConv2d | |||
""""""""""""""""""""" | |||
.. autoclass:: DeConv2d | |||
DeConv3d | |||
""""""""""""""""""""" | |||
.. autoclass:: DeConv3d | |||
Deformable Convolutions | |||
^^^^^^^^^^^^^^^^^^^^^^^^^^ | |||
DeformableConv2d | |||
""""""""""""""""""""" | |||
.. autoclass:: DeformableConv2d | |||
Depthwise Convolutions | |||
^^^^^^^^^^^^^^^^^^^^^^^^^^ | |||
DepthwiseConv2d | |||
""""""""""""""""""""" | |||
.. autoclass:: DepthwiseConv2d | |||
Group Convolutions | |||
^^^^^^^^^^^^^^^^^^^^^^^^^^ | |||
GroupConv2d | |||
""""""""""""""""""""" | |||
.. autoclass:: GroupConv2d | |||
Separable Convolutions | |||
^^^^^^^^^^^^^^^^^^^^^^^^^^ | |||
SeparableConv1d | |||
""""""""""""""""""""" | |||
.. autoclass:: SeparableConv1d | |||
SeparableConv2d | |||
""""""""""""""""""""" | |||
.. autoclass:: SeparableConv2d | |||
SubPixel Convolutions | |||
^^^^^^^^^^^^^^^^^^^^^^^^^^ | |||
SubpixelConv1d | |||
""""""""""""""""""""" | |||
.. autoclass:: SubpixelConv1d | |||
SubpixelConv2d | |||
""""""""""""""""""""" | |||
.. autoclass:: SubpixelConv2d | |||
.. ----------------------------------------------------------- | |||
.. Dense Layers | |||
.. ----------------------------------------------------------- | |||
Dense Layers | |||
------------- | |||
Dense Layer | |||
^^^^^^^^^^^^^^^^^^^^^^^^^^ | |||
.. autoclass:: Dense | |||
Drop Connect Dense Layer | |||
^^^^^^^^^^^^^^^^^^^^^^^^^^ | |||
.. autoclass:: DropconnectDense | |||
.. ----------------------------------------------------------- | |||
.. Dropout Layer | |||
.. ----------------------------------------------------------- | |||
Dropout Layers | |||
------------------- | |||
.. autoclass:: Dropout | |||
.. ----------------------------------------------------------- | |||
.. Extend Layers | |||
.. ----------------------------------------------------------- | |||
Extend Layers | |||
------------------- | |||
Expand Dims Layer | |||
^^^^^^^^^^^^^^^^^^^^ | |||
.. autoclass:: ExpandDims | |||
Tile layer | |||
^^^^^^^^^^^^^^^^^^^^ | |||
.. autoclass:: Tile | |||
.. ----------------------------------------------------------- | |||
.. Image Resampling Layers | |||
.. ----------------------------------------------------------- | |||
Image Resampling Layers | |||
------------------------- | |||
2D UpSampling | |||
^^^^^^^^^^^^^^^^^^^^^^^ | |||
.. autoclass:: UpSampling2d | |||
2D DownSampling | |||
^^^^^^^^^^^^^^^^^^^^^^^ | |||
.. autoclass:: DownSampling2d | |||
.. ----------------------------------------------------------- | |||
.. Lambda Layer | |||
.. ----------------------------------------------------------- | |||
Lambda Layers | |||
--------------- | |||
Lambda Layer | |||
^^^^^^^^^^^^^^^^^^^ | |||
.. autoclass:: Lambda | |||
ElementWise Lambda Layer | |||
^^^^^^^^^^^^^^^^^^^^^^^^^^ | |||
.. autoclass:: ElementwiseLambda | |||
.. ----------------------------------------------------------- | |||
.. Merge Layer | |||
.. ----------------------------------------------------------- | |||
Merge Layers | |||
--------------- | |||
Concat Layer | |||
^^^^^^^^^^^^^^^^^^^ | |||
.. autoclass:: Concat | |||
ElementWise Layer | |||
^^^^^^^^^^^^^^^^^^^ | |||
.. autoclass:: Elementwise | |||
.. ----------------------------------------------------------- | |||
.. Noise Layers | |||
.. ----------------------------------------------------------- | |||
Noise Layer | |||
--------------- | |||
.. autoclass:: GaussianNoise | |||
.. ----------------------------------------------------------- | |||
.. Normalization Layers | |||
.. ----------------------------------------------------------- | |||
Normalization Layers | |||
-------------------- | |||
Batch Normalization | |||
^^^^^^^^^^^^^^^^^^^^^^ | |||
.. autoclass:: BatchNorm | |||
Batch Normalization 1D | |||
^^^^^^^^^^^^^^^^^^^^^^^^^ | |||
.. autoclass:: BatchNorm1d | |||
Batch Normalization 2D | |||
^^^^^^^^^^^^^^^^^^^^^^^^^^ | |||
.. autoclass:: BatchNorm2d | |||
Batch Normalization 3D | |||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ | |||
.. autoclass:: BatchNorm3d | |||
Local Response Normalization | |||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ | |||
.. autoclass:: LocalResponseNorm | |||
Instance Normalization | |||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ | |||
.. autoclass:: InstanceNorm | |||
Instance Normalization 1D | |||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ | |||
.. autoclass:: InstanceNorm1d | |||
Instance Normalization 2D | |||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ | |||
.. autoclass:: InstanceNorm2d | |||
Instance Normalization 3D | |||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ | |||
.. autoclass:: InstanceNorm3d | |||
Layer Normalization | |||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ | |||
.. autoclass:: LayerNorm | |||
Group Normalization | |||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ | |||
.. autoclass:: GroupNorm | |||
Switch Normalization | |||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ | |||
.. autoclass:: SwitchNorm | |||
.. ----------------------------------------------------------- | |||
.. Padding Layers | |||
.. ----------------------------------------------------------- | |||
Padding Layers | |||
------------------------ | |||
Pad Layer (Expert API) | |||
^^^^^^^^^^^^^^^^^^^^^^^^^ | |||
Padding layer for any modes. | |||
.. autoclass:: PadLayer | |||
1D Zero padding | |||
^^^^^^^^^^^^^^^^^^^ | |||
.. autoclass:: ZeroPad1d | |||
2D Zero padding | |||
^^^^^^^^^^^^^^^^^^^ | |||
.. autoclass:: ZeroPad2d | |||
3D Zero padding | |||
^^^^^^^^^^^^^^^^^^^ | |||
.. autoclass:: ZeroPad3d | |||
.. ----------------------------------------------------------- | |||
.. Pooling Layers | |||
.. ----------------------------------------------------------- | |||
Pooling Layers | |||
------------------------ | |||
Pool Layer (Expert API) | |||
^^^^^^^^^^^^^^^^^^^^^^^^^ | |||
Pooling layer for any dimensions and any pooling functions. | |||
.. autoclass:: PoolLayer | |||
1D Max pooling | |||
^^^^^^^^^^^^^^^^^^^ | |||
.. autoclass:: MaxPool1d | |||
1D Mean pooling | |||
^^^^^^^^^^^^^^^^^^^ | |||
.. autoclass:: MeanPool1d | |||
2D Max pooling | |||
^^^^^^^^^^^^^^^^^^^ | |||
.. autoclass:: MaxPool2d | |||
2D Mean pooling | |||
^^^^^^^^^^^^^^^^^^^ | |||
.. autoclass:: MeanPool2d | |||
3D Max pooling | |||
^^^^^^^^^^^^^^^^^^^ | |||
.. autoclass:: MaxPool3d | |||
3D Mean pooling | |||
^^^^^^^^^^^^^^^^^^^ | |||
.. autoclass:: MeanPool3d | |||
1D Global Max pooling | |||
^^^^^^^^^^^^^^^^^^^^^^^^^^ | |||
.. autoclass:: GlobalMaxPool1d | |||
1D Global Mean pooling | |||
^^^^^^^^^^^^^^^^^^^^^^^^^^ | |||
.. autoclass:: GlobalMeanPool1d | |||
2D Global Max pooling | |||
^^^^^^^^^^^^^^^^^^^^^^^^^^ | |||
.. autoclass:: GlobalMaxPool2d | |||
2D Global Mean pooling | |||
^^^^^^^^^^^^^^^^^^^^^^^^^^ | |||
.. autoclass:: GlobalMeanPool2d | |||
3D Global Max pooling | |||
^^^^^^^^^^^^^^^^^^^^^^^^^^ | |||
.. autoclass:: GlobalMaxPool3d | |||
3D Global Mean pooling | |||
^^^^^^^^^^^^^^^^^^^^^^^^^^ | |||
.. autoclass:: GlobalMeanPool3d | |||
2D Corner pooling | |||
^^^^^^^^^^^^^^^^^^^^^^^^^^ | |||
.. autoclass:: CornerPool2d | |||
.. ----------------------------------------------------------- | |||
.. Quantized Layers | |||
.. ----------------------------------------------------------- | |||
Quantized Nets | |||
------------------ | |||
This is an experimental API package for building Quantized Neural Networks. We are using matrix multiplication rather than add-minus and bit-count operation at the moment. Therefore, these APIs would not speed up the inferencing, for production, you can train model via TensorLayer and deploy the model into other customized C/C++ implementation (We probably provide users an extra C/C++ binary net framework that can load model from TensorLayer). | |||
Note that, these experimental APIs can be changed in the future. | |||
Sign | |||
^^^^^^^^^^^^^^ | |||
.. autoclass:: Sign | |||
Scale | |||
^^^^^^^^^^^^^^ | |||
.. autoclass:: Scale | |||
Binary Dense Layer | |||
^^^^^^^^^^^^^^^^^^^^^^^^^^ | |||
.. autoclass:: BinaryDense | |||
Binary (De)Convolutions | |||
^^^^^^^^^^^^^^^^^^^^^^^^^^ | |||
BinaryConv2d | |||
""""""""""""""""""""" | |||
.. autoclass:: BinaryConv2d | |||
Ternary Dense Layer | |||
^^^^^^^^^^^^^^^^^^^^^^^^^^ | |||
TernaryDense | |||
""""""""""""""""""""" | |||
.. autoclass:: TernaryDense | |||
Ternary Convolutions | |||
^^^^^^^^^^^^^^^^^^^^^^^^^^ | |||
TernaryConv2d | |||
""""""""""""""""""""" | |||
.. autoclass:: TernaryConv2d | |||
DoReFa Convolutions | |||
^^^^^^^^^^^^^^^^^^^^^^^^^^ | |||
DorefaConv2d | |||
""""""""""""""""""""" | |||
.. autoclass:: DorefaConv2d | |||
DoReFa Convolutions | |||
^^^^^^^^^^^^^^^^^^^^^^^^^^ | |||
DorefaConv2d | |||
""""""""""""""""""""" | |||
.. autoclass:: DorefaConv2d | |||
.. ----------------------------------------------------------- | |||
.. Recurrent Layers | |||
.. ----------------------------------------------------------- | |||
Recurrent Layers | |||
--------------------- | |||
Common Recurrent layer | |||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ | |||
All recurrent layers can implement any type of RNN cell by feeding different cell function (LSTM, GRU etc). | |||
RNN layer | |||
"""""""""""""""""""""""""" | |||
.. autoclass:: RNN | |||
RNN layer with Simple RNN Cell | |||
"""""""""""""""""""""""""""""""""" | |||
.. autoclass:: SimpleRNN | |||
RNN layer with GRU Cell | |||
"""""""""""""""""""""""""""""""""" | |||
.. autoclass:: GRURNN | |||
RNN layer with LSTM Cell | |||
"""""""""""""""""""""""""""""""""" | |||
.. autoclass:: LSTMRNN | |||
Bidirectional layer | |||
""""""""""""""""""""""""""""""""" | |||
.. autoclass:: BiRNN | |||
Advanced Ops for Dynamic RNN | |||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ | |||
These operations usually be used inside Dynamic RNN layer, they can | |||
compute the sequence lengths for different situation and get the last RNN outputs by indexing. | |||
Compute Sequence length 1 | |||
"""""""""""""""""""""""""" | |||
.. autofunction:: retrieve_seq_length_op | |||
Compute Sequence length 2 | |||
""""""""""""""""""""""""""""" | |||
.. autofunction:: retrieve_seq_length_op2 | |||
Compute Sequence length 3 | |||
"""""""""""""""""""""""""""" | |||
.. autofunction:: retrieve_seq_length_op3 | |||
Compute mask of the target sequence | |||
""""""""""""""""""""""""""""""""""""""" | |||
.. autofunction:: target_mask_op | |||
.. ----------------------------------------------------------- | |||
.. Shape Layers | |||
.. ----------------------------------------------------------- | |||
Shape Layers | |||
------------ | |||
Flatten Layer | |||
^^^^^^^^^^^^^^^ | |||
.. autoclass:: Flatten | |||
Reshape Layer | |||
^^^^^^^^^^^^^^^ | |||
.. autoclass:: Reshape | |||
Transpose Layer | |||
^^^^^^^^^^^^^^^^^ | |||
.. autoclass:: Transpose | |||
Shuffle Layer | |||
^^^^^^^^^^^^^^^^^ | |||
.. autoclass:: Shuffle | |||
.. ----------------------------------------------------------- | |||
.. Spatial Transformer Layers | |||
.. ----------------------------------------------------------- | |||
Spatial Transformer | |||
----------------------- | |||
2D Affine Transformation | |||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ | |||
.. autoclass:: SpatialTransformer2dAffine | |||
2D Affine Transformation function | |||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ | |||
.. autofunction:: transformer | |||
Batch 2D Affine Transformation function | |||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ | |||
.. autofunction:: batch_transformer | |||
.. ----------------------------------------------------------- | |||
.. Stack Layers | |||
.. ----------------------------------------------------------- | |||
Stack Layer | |||
------------- | |||
Stack Layer | |||
^^^^^^^^^^^^^^ | |||
.. autoclass:: Stack | |||
Unstack Layer | |||
^^^^^^^^^^^^^^^ | |||
.. autoclass:: UnStack | |||
.. ----------------------------------------------------------- | |||
.. Helper Functions | |||
.. ----------------------------------------------------------- | |||
Helper Functions | |||
------------------------ | |||
Flatten tensor | |||
^^^^^^^^^^^^^^^^^ | |||
.. autofunction:: flatten_reshape | |||
Initialize RNN state | |||
^^^^^^^^^^^^^^^^^^^^^^^^^ | |||
.. autofunction:: initialize_rnn_state | |||
Remove repeated items in a list | |||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ | |||
.. autofunction:: list_remove_repeat | |||
@@ -0,0 +1,59 @@ | |||
API - Models | |||
================================ | |||
TensorLayer provides many pretrained models, you can easily use the whole or a part of the pretrained models via these APIs. | |||
.. automodule:: tensorlayer.models | |||
.. autosummary:: | |||
Model | |||
VGG16 | |||
VGG19 | |||
SqueezeNetV1 | |||
MobileNetV1 | |||
ResNet50 | |||
Seq2seq | |||
Seq2seqLuongAttention | |||
Base Model | |||
----------- | |||
.. autoclass:: Model | |||
VGG16 | |||
---------------------- | |||
.. autofunction:: VGG16 | |||
VGG19 | |||
---------------------- | |||
.. autofunction:: VGG19 | |||
SqueezeNetV1 | |||
---------------- | |||
.. autofunction:: SqueezeNetV1 | |||
MobileNetV1 | |||
---------------- | |||
.. autofunction:: MobileNetV1 | |||
ResNet50 | |||
---------------- | |||
.. autofunction:: ResNet50 | |||
Seq2seq | |||
------------------------ | |||
.. autoclass:: Seq2seq | |||
Seq2seq Luong Attention | |||
------------------------ | |||
.. autoclass:: Seq2seqLuongAttention |
@@ -0,0 +1,148 @@ | |||
API - Natural Language Processing | |||
================================== | |||
Natural Language Processing and Word Representation. | |||
.. automodule:: tensorlayer.nlp | |||
.. autosummary:: | |||
generate_skip_gram_batch | |||
sample | |||
sample_top | |||
SimpleVocabulary | |||
Vocabulary | |||
process_sentence | |||
create_vocab | |||
simple_read_words | |||
read_words | |||
read_analogies_file | |||
build_vocab | |||
build_reverse_dictionary | |||
build_words_dataset | |||
save_vocab | |||
words_to_word_ids | |||
word_ids_to_words | |||
basic_tokenizer | |||
create_vocabulary | |||
initialize_vocabulary | |||
sentence_to_token_ids | |||
data_to_token_ids | |||
moses_multi_bleu | |||
Iteration function for training embedding matrix | |||
------------------------------------------------- | |||
.. autofunction:: generate_skip_gram_batch | |||
Sampling functions | |||
------------------- | |||
Simple sampling | |||
^^^^^^^^^^^^^^^^^^^ | |||
.. autofunction:: sample | |||
Sampling from top k | |||
^^^^^^^^^^^^^^^^^^^^^^ | |||
.. autofunction:: sample_top | |||
Vector representations of words | |||
------------------------------- | |||
Simple vocabulary class | |||
^^^^^^^^^^^^^^^^^^^^^^^^^ | |||
.. autoclass:: SimpleVocabulary | |||
Vocabulary class | |||
^^^^^^^^^^^^^^^^^^^^^^^^^ | |||
.. autoclass:: Vocabulary | |||
Process sentence | |||
^^^^^^^^^^^^^^^^^^^^^^^^ | |||
.. autofunction:: process_sentence | |||
Create vocabulary | |||
^^^^^^^^^^^^^^^^^^^^^^^^ | |||
.. autofunction:: create_vocab | |||
Read words from file | |||
---------------------- | |||
Simple read file | |||
^^^^^^^^^^^^^^^^^^ | |||
.. autofunction:: simple_read_words | |||
Read file | |||
^^^^^^^^^^^^^^^^^^ | |||
.. autofunction:: read_words | |||
Read analogy question file | |||
----------------------------- | |||
.. autofunction:: read_analogies_file | |||
Build vocabulary, word dictionary and word tokenization | |||
-------------------------------------------------------- | |||
Build dictionary from word to id | |||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ | |||
.. autofunction:: build_vocab | |||
Build dictionary from id to word | |||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ | |||
.. autofunction:: build_reverse_dictionary | |||
Build dictionaries for id to word etc | |||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ | |||
.. autofunction:: build_words_dataset | |||
Save vocabulary | |||
^^^^^^^^^^^^^^^^^^^^ | |||
.. autofunction:: save_vocab | |||
Convert words to IDs and IDs to words | |||
-------------------------------------------------------- | |||
These functions can be done by ``Vocabulary`` class. | |||
List of Words to IDs | |||
^^^^^^^^^^^^^^^^^^^^^^^ | |||
.. autofunction:: words_to_word_ids | |||
List of IDs to Words | |||
^^^^^^^^^^^^^^^^^^^^^ | |||
.. autofunction:: word_ids_to_words | |||
Functions for translation | |||
--------------------------- | |||
Word Tokenization | |||
^^^^^^^^^^^^^^^^^^^ | |||
.. autofunction:: basic_tokenizer | |||
Create or read vocabulary | |||
^^^^^^^^^^^^^^^^^^^^^^^^^^^ | |||
.. autofunction:: create_vocabulary | |||
.. autofunction:: initialize_vocabulary | |||
Convert words to IDs and IDs to words | |||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ | |||
.. autofunction:: sentence_to_token_ids | |||
.. autofunction:: data_to_token_ids | |||
Metrics | |||
--------------------------- | |||
BLEU | |||
^^^^^^^^^^^^^^^^^^^ | |||
.. autofunction:: moses_multi_bleu |
@@ -0,0 +1,19 @@ | |||
API - Optimizers | |||
================ | |||
.. automodule:: tensorlayer.optimizers | |||
TensorLayer provides simple API and tools to ease research, development and reduce the time to production. | |||
Therefore, we provide the latest state of the art optimizers that work with Tensorflow. | |||
Optimizers List | |||
--------------- | |||
.. autosummary:: | |||
AMSGrad | |||
AMSGrad Optimizer | |||
----------------- | |||
.. autoclass:: AMSGrad | |||
:members: |
@@ -0,0 +1,641 @@ | |||
API - Data Pre-Processing | |||
========================= | |||
.. automodule:: tensorlayer.prepro | |||
.. autosummary:: | |||
affine_rotation_matrix | |||
affine_horizontal_flip_matrix | |||
affine_vertical_flip_matrix | |||
affine_shift_matrix | |||
affine_shear_matrix | |||
affine_zoom_matrix | |||
affine_respective_zoom_matrix | |||
transform_matrix_offset_center | |||
affine_transform | |||
affine_transform_cv2 | |||
affine_transform_keypoints | |||
projective_transform_by_points | |||
rotation | |||
rotation_multi | |||
crop | |||
crop_multi | |||
flip_axis | |||
flip_axis_multi | |||
shift | |||
shift_multi | |||
shear | |||
shear_multi | |||
shear2 | |||
shear_multi2 | |||
swirl | |||
swirl_multi | |||
elastic_transform | |||
elastic_transform_multi | |||
zoom | |||
respective_zoom | |||
zoom_multi | |||
brightness | |||
brightness_multi | |||
illumination | |||
rgb_to_hsv | |||
hsv_to_rgb | |||
adjust_hue | |||
imresize | |||
pixel_value_scale | |||
samplewise_norm | |||
featurewise_norm | |||
channel_shift | |||
channel_shift_multi | |||
drop | |||
array_to_img | |||
find_contours | |||
pt2map | |||
binary_dilation | |||
dilation | |||
binary_erosion | |||
erosion | |||
obj_box_coord_rescale | |||
obj_box_coords_rescale | |||
obj_box_coord_scale_to_pixelunit | |||
obj_box_coord_centroid_to_upleft_butright | |||
obj_box_coord_upleft_butright_to_centroid | |||
obj_box_coord_centroid_to_upleft | |||
obj_box_coord_upleft_to_centroid | |||
parse_darknet_ann_str_to_list | |||
parse_darknet_ann_list_to_cls_box | |||
obj_box_horizontal_flip | |||
obj_box_imresize | |||
obj_box_crop | |||
obj_box_shift | |||
obj_box_zoom | |||
keypoint_random_crop | |||
keypoint_resize_random_crop | |||
keypoint_random_rotate | |||
keypoint_random_flip | |||
keypoint_random_resize | |||
keypoint_random_resize_shortestedge | |||
pad_sequences | |||
remove_pad_sequences | |||
process_sequences | |||
sequences_add_start_id | |||
sequences_add_end_id | |||
sequences_add_end_id_after_pad | |||
sequences_get_mask | |||
.. | |||
Threading | |||
------------ | |||
.. autofunction:: threading_data | |||
Affine Transform | |||
---------------- | |||
Python can be FAST | |||
^^^^^^^^^^^^^^^^^^ | |||
Image augmentation is a critical step in deep learning. | |||
Though TensorFlow has provided ``tf.image``, | |||
image augmentation often remains as a key bottleneck. | |||
``tf.image`` has three limitations: | |||
- Real-world visual tasks such as object detection, segmentation, and pose estimation | |||
must cope with image meta-data (e.g., coordinates). | |||
These data are beyond ``tf.image`` | |||
which processes images as tensors. | |||
- ``tf.image`` operators | |||
breaks the pure Python programing experience (i.e., users have to | |||
use ``tf.py_func`` in order to call image functions written in Python); however, | |||
frequent uses of ``tf.py_func`` slow down TensorFlow, | |||
making users hard to balance flexibility and performance. | |||
- ``tf.image`` API is inflexible. Image operations are | |||
performed in an order. They are hard to jointly optimize. More importantly, | |||
sequential image operations can significantly | |||
reduces the quality of images, thus affecting training accuracy. | |||
TensorLayer addresses these limitations by providing a | |||
high-performance image augmentation API in Python. | |||
This API bases on affine transformation and ``cv2.wrapAffine``. | |||
It allows you to combine multiple image processing functions into | |||
a single matrix operation. This combined operation | |||
is executed by the fast ``cv2`` library, offering 78x performance improvement (observed in | |||
`openpose-plus <https://github.com/tensorlayer/openpose-plus>`_ for example). | |||
The following example illustrates the rationale | |||
behind this tremendous speed up. | |||
Example | |||
^^^^^^^ | |||
The source code of complete examples can be found \ | |||
`here <https://github.com/tensorlayer/tensorlayer/tree/master/examples/data_process/tutorial_fast_affine_transform.py>`__. | |||
The following is a typical Python program that applies rotation, shifting, flipping, zooming and shearing to an image, | |||
.. code-block:: python | |||
image = tl.vis.read_image('tiger.jpeg') | |||
xx = tl.prepro.rotation(image, rg=-20, is_random=False) | |||
xx = tl.prepro.flip_axis(xx, axis=1, is_random=False) | |||
xx = tl.prepro.shear2(xx, shear=(0., -0.2), is_random=False) | |||
xx = tl.prepro.zoom(xx, zoom_range=0.8) | |||
xx = tl.prepro.shift(xx, wrg=-0.1, hrg=0, is_random=False) | |||
tl.vis.save_image(xx, '_result_slow.png') | |||
However, by leveraging affine transformation, image operations can be combined into one: | |||
.. code-block:: python | |||
# 1. Create required affine transformation matrices | |||
M_rotate = tl.prepro.affine_rotation_matrix(angle=20) | |||
M_flip = tl.prepro.affine_horizontal_flip_matrix(prob=1) | |||
M_shift = tl.prepro.affine_shift_matrix(wrg=0.1, hrg=0, h=h, w=w) | |||
M_shear = tl.prepro.affine_shear_matrix(x_shear=0.2, y_shear=0) | |||
M_zoom = tl.prepro.affine_zoom_matrix(zoom_range=0.8) | |||
# 2. Combine matrices | |||
# NOTE: operations are applied in a reversed order (i.e., rotation is performed first) | |||
M_combined = M_shift.dot(M_zoom).dot(M_shear).dot(M_flip).dot(M_rotate) | |||
# 3. Convert the matrix from Cartesian coordinates (the origin in the middle of image) | |||
# to image coordinates (the origin on the top-left of image) | |||
transform_matrix = tl.prepro.transform_matrix_offset_center(M_combined, x=w, y=h) | |||
# 4. Transform the image using a single operation | |||
result = tl.prepro.affine_transform_cv2(image, transform_matrix) # 76 times faster | |||
tl.vis.save_image(result, '_result_fast.png') | |||
The following figure illustrates the rational behind combined affine transformation. | |||
.. image:: ../images/affine_transform_why.jpg | |||
:width: 100 % | |||
:align: center | |||
Using combined affine transformation has two key benefits. First, it allows \ | |||
you to leverage a pure Python API to achieve orders of magnitudes of speed up in image augmentation, | |||
and thus prevent data pre-processing from becoming a bottleneck in training. \ | |||
Second, performing sequential image transformation requires multiple image interpolations. \ | |||
This produces low-quality input images. In contrast, a combined transformation performs the \ | |||
interpolation only once, and thus | |||
preserve the content in an image. The following figure illustrates these two benefits: | |||
.. image:: ../images/affine_transform_comparison.jpg | |||
:width: 100 % | |||
:align: center | |||
The major reason for combined affine transformation being fast is because it has lower computational complexity. | |||
Assume we have ``k`` affine transformations ``T1, ..., Tk``, where ``Ti`` can be represented by 3x3 matrixes. | |||
The sequential transformation can be represented as ``y = Tk (... T1(x))``, | |||
and the time complexity is ``O(k N)`` where ``N`` is the cost of applying one transformation to image ``x``. | |||
``N`` is linear to the size of ``x``. | |||
For the combined transformation ``y = (Tk ... T1) (x)`` | |||
the time complexity is ``O(27(k - 1) + N) = max{O(27k), O(N)} = O(N)`` (assuming 27k << N) where 27 = 3^3 is the cost for combining two transformations. | |||
Get rotation matrix | |||
^^^^^^^^^^^^^^^^^^^^^^^^^ | |||
.. autofunction:: affine_rotation_matrix | |||
Get horizontal flipping matrix | |||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ | |||
.. autofunction:: affine_horizontal_flip_matrix | |||
Get vertical flipping matrix | |||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^ | |||
.. autofunction:: affine_vertical_flip_matrix | |||
Get shifting matrix | |||
^^^^^^^^^^^^^^^^^^^^^^^^^ | |||
.. autofunction:: affine_shift_matrix | |||
Get shearing matrix | |||
^^^^^^^^^^^^^^^^^^^^^^^^^ | |||
.. autofunction:: affine_shear_matrix | |||
Get zooming matrix | |||
^^^^^^^^^^^^^^^^^^^^^^^^^ | |||
.. autofunction:: affine_zoom_matrix | |||
Get respective zooming matrix | |||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ | |||
.. autofunction:: affine_respective_zoom_matrix | |||
Cartesian to image coordinates | |||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ | |||
.. autofunction:: transform_matrix_offset_center | |||
.. | |||
Apply image transform | |||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ | |||
.. autofunction:: affine_transform | |||
Apply image transform | |||
^^^^^^^^^^^^^^^^^^^^^ | |||
.. autofunction:: affine_transform_cv2 | |||
Apply keypoint transform | |||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ | |||
.. autofunction:: affine_transform_keypoints | |||
Images | |||
----------- | |||
Projective transform by points | |||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ | |||
.. autofunction:: projective_transform_by_points | |||
Rotation | |||
^^^^^^^^^ | |||
.. autofunction:: rotation | |||
.. autofunction:: rotation_multi | |||
Crop | |||
^^^^^^^^^ | |||
.. autofunction:: crop | |||
.. autofunction:: crop_multi | |||
Flip | |||
^^^^^^^^^ | |||
.. autofunction:: flip_axis | |||
.. autofunction:: flip_axis_multi | |||
Shift | |||
^^^^^^^^^ | |||
.. autofunction:: shift | |||
.. autofunction:: shift_multi | |||
Shear | |||
^^^^^^^^^ | |||
.. autofunction:: shear | |||
.. autofunction:: shear_multi | |||
Shear V2 | |||
^^^^^^^^^^^ | |||
.. autofunction:: shear2 | |||
.. autofunction:: shear_multi2 | |||
Swirl | |||
^^^^^^^^^ | |||
.. autofunction:: swirl | |||
.. autofunction:: swirl_multi | |||
Elastic transform | |||
^^^^^^^^^^^^^^^^^^ | |||
.. autofunction:: elastic_transform | |||
.. autofunction:: elastic_transform_multi | |||
Zoom | |||
^^^^^^^^^ | |||
.. autofunction:: zoom | |||
.. autofunction:: zoom_multi | |||
Respective Zoom | |||
^^^^^^^^^^^^^^^^^ | |||
.. autofunction:: respective_zoom | |||
Brightness | |||
^^^^^^^^^^^^ | |||
.. autofunction:: brightness | |||
.. autofunction:: brightness_multi | |||
Brightness, contrast and saturation | |||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ | |||
.. autofunction:: illumination | |||
RGB to HSV | |||
^^^^^^^^^^^^^^ | |||
.. autofunction:: rgb_to_hsv | |||
HSV to RGB | |||
^^^^^^^^^^^^^^ | |||
.. autofunction:: hsv_to_rgb | |||
Adjust Hue | |||
^^^^^^^^^^^^^^ | |||
.. autofunction:: adjust_hue | |||
Resize | |||
^^^^^^^^^^^^ | |||
.. autofunction:: imresize | |||
Pixel value scale | |||
^^^^^^^^^^^^^^^^^^^^ | |||
.. autofunction:: pixel_value_scale | |||
Normalization | |||
^^^^^^^^^^^^^^^ | |||
.. autofunction:: samplewise_norm | |||
.. autofunction:: featurewise_norm | |||
Channel shift | |||
^^^^^^^^^^^^^^ | |||
.. autofunction:: channel_shift | |||
.. autofunction:: channel_shift_multi | |||
Noise | |||
^^^^^^^^^^^^^^ | |||
.. autofunction:: drop | |||
Numpy and PIL | |||
^^^^^^^^^^^^^^ | |||
.. autofunction:: array_to_img | |||
Find contours | |||
^^^^^^^^^^^^^^ | |||
.. autofunction:: find_contours | |||
Points to Image | |||
^^^^^^^^^^^^^^^^^ | |||
.. autofunction:: pt2map | |||
Binary dilation | |||
^^^^^^^^^^^^^^^^^ | |||
.. autofunction:: binary_dilation | |||
Greyscale dilation | |||
^^^^^^^^^^^^^^^^^^^^ | |||
.. autofunction:: dilation | |||
Binary erosion | |||
^^^^^^^^^^^^^^^^^^^^ | |||
.. autofunction:: binary_erosion | |||
Greyscale erosion | |||
^^^^^^^^^^^^^^^^^^^^ | |||
.. autofunction:: erosion | |||
Object detection | |||
------------------- | |||
Tutorial for Image Aug | |||
^^^^^^^^^^^^^^^^^^^^^^^ | |||
Hi, here is an example for image augmentation on VOC dataset. | |||
.. code-block:: python | |||
import tensorlayer as tl | |||
## download VOC 2012 dataset | |||
imgs_file_list, _, _, _, classes, _, _,\ | |||
_, objs_info_list, _ = tl.files.load_voc_dataset(dataset="2012") | |||
## parse annotation and convert it into list format | |||
ann_list = [] | |||
for info in objs_info_list: | |||
ann = tl.prepro.parse_darknet_ann_str_to_list(info) | |||
c, b = tl.prepro.parse_darknet_ann_list_to_cls_box(ann) | |||
ann_list.append([c, b]) | |||
# read and save one image | |||
idx = 2 # you can select your own image | |||
image = tl.vis.read_image(imgs_file_list[idx]) | |||
tl.vis.draw_boxes_and_labels_to_image(image, ann_list[idx][0], | |||
ann_list[idx][1], [], classes, True, save_name='_im_original.png') | |||
# left right flip | |||
im_flip, coords = tl.prepro.obj_box_horizontal_flip(image, | |||
ann_list[idx][1], is_rescale=True, is_center=True, is_random=False) | |||
tl.vis.draw_boxes_and_labels_to_image(im_flip, ann_list[idx][0], | |||
coords, [], classes, True, save_name='_im_flip.png') | |||
# resize | |||
im_resize, coords = tl.prepro.obj_box_imresize(image, | |||
coords=ann_list[idx][1], size=[300, 200], is_rescale=True) | |||
tl.vis.draw_boxes_and_labels_to_image(im_resize, ann_list[idx][0], | |||
coords, [], classes, True, save_name='_im_resize.png') | |||
# crop | |||
im_crop, clas, coords = tl.prepro.obj_box_crop(image, ann_list[idx][0], | |||
ann_list[idx][1], wrg=200, hrg=200, | |||
is_rescale=True, is_center=True, is_random=False) | |||
tl.vis.draw_boxes_and_labels_to_image(im_crop, clas, coords, [], | |||
classes, True, save_name='_im_crop.png') | |||
# shift | |||
im_shfit, clas, coords = tl.prepro.obj_box_shift(image, ann_list[idx][0], | |||
ann_list[idx][1], wrg=0.1, hrg=0.1, | |||
is_rescale=True, is_center=True, is_random=False) | |||
tl.vis.draw_boxes_and_labels_to_image(im_shfit, clas, coords, [], | |||
classes, True, save_name='_im_shift.png') | |||
# zoom | |||
im_zoom, clas, coords = tl.prepro.obj_box_zoom(image, ann_list[idx][0], | |||
ann_list[idx][1], zoom_range=(1.3, 0.7), | |||
is_rescale=True, is_center=True, is_random=False) | |||
tl.vis.draw_boxes_and_labels_to_image(im_zoom, clas, coords, [], | |||
classes, True, save_name='_im_zoom.png') | |||
In practice, you may want to use threading method to process a batch of images as follows. | |||
.. code-block:: python | |||
import tensorlayer as tl | |||
import random | |||
batch_size = 64 | |||
im_size = [416, 416] | |||
n_data = len(imgs_file_list) | |||
jitter = 0.2 | |||
def _data_pre_aug_fn(data): | |||
im, ann = data | |||
clas, coords = ann | |||
## change image brightness, contrast and saturation randomly | |||
im = tl.prepro.illumination(im, gamma=(0.5, 1.5), | |||
contrast=(0.5, 1.5), saturation=(0.5, 1.5), is_random=True) | |||
## flip randomly | |||
im, coords = tl.prepro.obj_box_horizontal_flip(im, coords, | |||
is_rescale=True, is_center=True, is_random=True) | |||
## randomly resize and crop image, it can have same effect as random zoom | |||
tmp0 = random.randint(1, int(im_size[0]*jitter)) | |||
tmp1 = random.randint(1, int(im_size[1]*jitter)) | |||
im, coords = tl.prepro.obj_box_imresize(im, coords, | |||
[im_size[0]+tmp0, im_size[1]+tmp1], is_rescale=True, | |||
interp='bicubic') | |||
im, clas, coords = tl.prepro.obj_box_crop(im, clas, coords, | |||
wrg=im_size[1], hrg=im_size[0], is_rescale=True, | |||
is_center=True, is_random=True) | |||
## rescale value from [0, 255] to [-1, 1] (optional) | |||
im = im / 127.5 - 1 | |||
return im, [clas, coords] | |||
# randomly read a batch of image and the corresponding annotations | |||
idexs = tl.utils.get_random_int(min=0, max=n_data-1, number=batch_size) | |||
b_im_path = [imgs_file_list[i] for i in idexs] | |||
b_images = tl.prepro.threading_data(b_im_path, fn=tl.vis.read_image) | |||
b_ann = [ann_list[i] for i in idexs] | |||
# threading process | |||
data = tl.prepro.threading_data([_ for _ in zip(b_images, b_ann)], | |||
_data_pre_aug_fn) | |||
b_images2 = [d[0] for d in data] | |||
b_ann = [d[1] for d in data] | |||
# save all images | |||
for i in range(len(b_images)): | |||
tl.vis.draw_boxes_and_labels_to_image(b_images[i], | |||
ann_list[idexs[i]][0], ann_list[idexs[i]][1], [], | |||
classes, True, save_name='_bbox_vis_%d_original.png' % i) | |||
tl.vis.draw_boxes_and_labels_to_image((b_images2[i]+1)*127.5, | |||
b_ann[i][0], b_ann[i][1], [], classes, True, | |||
save_name='_bbox_vis_%d.png' % i) | |||
Image Aug with TF Dataset API | |||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ | |||
- Example code for VOC `here <https://github.com/tensorlayer/tensorlayer/blob/master/example/tutorial_tf_dataset_voc.py>`__. | |||
Coordinate pixel unit to percentage | |||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ | |||
.. autofunction:: obj_box_coord_rescale | |||
Coordinates pixel unit to percentage | |||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ | |||
.. autofunction:: obj_box_coords_rescale | |||
Coordinate percentage to pixel unit | |||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ | |||
.. autofunction:: obj_box_coord_scale_to_pixelunit | |||
Coordinate [x_center, x_center, w, h] to up-left button-right | |||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ | |||
.. autofunction:: obj_box_coord_centroid_to_upleft_butright | |||
Coordinate up-left button-right to [x_center, x_center, w, h] | |||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ | |||
.. autofunction:: obj_box_coord_upleft_butright_to_centroid | |||
Coordinate [x_center, x_center, w, h] to up-left-width-high | |||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ | |||
.. autofunction:: obj_box_coord_centroid_to_upleft | |||
Coordinate up-left-width-high to [x_center, x_center, w, h] | |||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ | |||
.. autofunction:: obj_box_coord_upleft_to_centroid | |||
Darknet format string to list | |||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ | |||
.. autofunction:: parse_darknet_ann_str_to_list | |||
Darknet format split class and coordinate | |||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ | |||
.. autofunction:: parse_darknet_ann_list_to_cls_box | |||
Image Aug - Flip | |||
^^^^^^^^^^^^^^^^^^^^^^^^^ | |||
.. autofunction:: obj_box_horizontal_flip | |||
Image Aug - Resize | |||
^^^^^^^^^^^^^^^^^^^^^^^^^ | |||
.. autofunction:: obj_box_imresize | |||
Image Aug - Crop | |||
^^^^^^^^^^^^^^^^^^^^^^^^^ | |||
.. autofunction:: obj_box_crop | |||
Image Aug - Shift | |||
^^^^^^^^^^^^^^^^^^^^^^^^^ | |||
.. autofunction:: obj_box_shift | |||
Image Aug - Zoom | |||
^^^^^^^^^^^^^^^^^^^^^^^^^ | |||
.. autofunction:: obj_box_zoom | |||
Keypoints | |||
------------ | |||
Image Aug - Crop | |||
^^^^^^^^^^^^^^^^^^^^ | |||
.. autofunction:: keypoint_random_crop | |||
Image Aug - Resize then Crop | |||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ | |||
.. autofunction:: keypoint_resize_random_crop | |||
Image Aug - Rotate | |||
^^^^^^^^^^^^^^^^^^^^ | |||
.. autofunction:: keypoint_random_rotate | |||
Image Aug - Flip | |||
^^^^^^^^^^^^^^^^^^^^ | |||
.. autofunction:: keypoint_random_flip | |||
Image Aug - Resize | |||
^^^^^^^^^^^^^^^^^^^^ | |||
.. autofunction:: keypoint_random_resize | |||
Image Aug - Resize Shortest Edge | |||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ | |||
.. autofunction:: keypoint_random_resize_shortestedge | |||
Sequence | |||
--------- | |||
More related functions can be found in ``tensorlayer.nlp``. | |||
Padding | |||
^^^^^^^^^ | |||
.. autofunction:: pad_sequences | |||
Remove Padding | |||
^^^^^^^^^^^^^^^^^ | |||
.. autofunction:: remove_pad_sequences | |||
Process | |||
^^^^^^^^^ | |||
.. autofunction:: process_sequences | |||
Add Start ID | |||
^^^^^^^^^^^^^^^ | |||
.. autofunction:: sequences_add_start_id | |||
Add End ID | |||
^^^^^^^^^^^^^^^ | |||
.. autofunction:: sequences_add_end_id | |||
Add End ID after pad | |||
^^^^^^^^^^^^^^^^^^^^^^^ | |||
.. autofunction:: sequences_add_end_id_after_pad | |||
Get Mask | |||
^^^^^^^^^ | |||
.. autofunction:: sequences_get_mask |
@@ -0,0 +1,33 @@ | |||
API - Reinforcement Learning | |||
============================== | |||
Reinforcement Learning. | |||
.. automodule:: tensorlayer.rein | |||
.. autosummary:: | |||
discount_episode_rewards | |||
cross_entropy_reward_loss | |||
log_weight | |||
choice_action_by_probs | |||
Reward functions | |||
--------------------- | |||
.. autofunction:: discount_episode_rewards | |||
Cost functions | |||
--------------------- | |||
Weighted Cross Entropy | |||
^^^^^^^^^^^^^^^^^^^^^^^^ | |||
.. autofunction:: cross_entropy_reward_loss | |||
Log weight | |||
^^^^^^^^^^^^^^ | |||
.. autofunction:: log_weight | |||
Sampling functions | |||
--------------------- | |||
.. autofunction:: choice_action_by_probs |
@@ -0,0 +1,73 @@ | |||
API - Utility | |||
======================== | |||
.. automodule:: tensorlayer.utils | |||
.. autosummary:: | |||
fit | |||
test | |||
predict | |||
evaluation | |||
class_balancing_oversample | |||
get_random_int | |||
dict_to_one | |||
list_string_to_dict | |||
flatten_list | |||
exit_tensorflow | |||
open_tensorboard | |||
set_gpu_fraction | |||
Training, testing and predicting | |||
---------------------------------- | |||
Training | |||
^^^^^^^^^^^ | |||
.. autofunction:: fit | |||
Evaluation | |||
^^^^^^^^^^^^^ | |||
.. autofunction:: test | |||
Prediction | |||
^^^^^^^^^^^^ | |||
.. autofunction:: predict | |||
Evaluation functions | |||
--------------------- | |||
.. autofunction:: evaluation | |||
Class balancing functions | |||
---------------------------- | |||
.. autofunction:: class_balancing_oversample | |||
Random functions | |||
---------------------------- | |||
.. autofunction:: get_random_int | |||
Dictionary and list | |||
-------------------- | |||
Set all items in dictionary to one | |||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ | |||
.. autofunction:: dict_to_one | |||
Convert list of string to dictionary | |||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ | |||
.. autofunction:: list_string_to_dict | |||
Flatten a list | |||
^^^^^^^^^^^^^^^^^^^ | |||
.. autofunction:: flatten_list | |||
Close TF session and associated processes | |||
----------------------------------------- | |||
.. autofunction:: exit_tensorflow | |||
Open TensorBoard | |||
---------------- | |||
.. autofunction:: open_tensorboard | |||
Set GPU functions | |||
----------------- | |||
.. autofunction:: set_gpu_fraction |
@@ -0,0 +1,76 @@ | |||
API - Visualization | |||
================================ | |||
TensorFlow provides `TensorBoard <https://www.tensorflow.org/get_started/summaries_and_tensorboard>`_ | |||
to visualize the model, activations etc. Here we provide more functions for data visualization. | |||
.. automodule:: tensorlayer.visualize | |||
.. autosummary:: | |||
read_image | |||
read_images | |||
save_image | |||
save_images | |||
draw_boxes_and_labels_to_image | |||
draw_mpii_pose_to_image | |||
draw_weights | |||
CNN2d | |||
frame | |||
images2d | |||
tsne_embedding | |||
Save and read images | |||
---------------------- | |||
Read one image | |||
^^^^^^^^^^^^^^^^^ | |||
.. autofunction:: read_image | |||
Read multiple images | |||
^^^^^^^^^^^^^^^^^^^^^^^^^^ | |||
.. autofunction:: read_images | |||
Save one image | |||
^^^^^^^^^^^^^^^^^^^^^^^^^^ | |||
.. autofunction:: save_image | |||
Save multiple images | |||
^^^^^^^^^^^^^^^^^^^^^^^^^^ | |||
.. autofunction:: save_images | |||
Save image for object detection | |||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ | |||
.. autofunction:: draw_boxes_and_labels_to_image | |||
Save image for pose estimation (MPII) | |||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ | |||
.. autofunction:: draw_mpii_pose_to_image | |||
Visualize model parameters | |||
------------------------------ | |||
Visualize CNN 2d filter | |||
^^^^^^^^^^^^^^^^^^^^^^^^^^ | |||
.. autofunction:: CNN2d | |||
Visualize weights | |||
^^^^^^^^^^^^^^^^^^^^^^^^^^ | |||
.. autofunction:: draw_weights | |||
Visualize images | |||
----------------- | |||
Image by matplotlib | |||
^^^^^^^^^^^^^^^^^^^^^ | |||
.. autofunction:: frame | |||
Images by matplotlib | |||
^^^^^^^^^^^^^^^^^^^^^ | |||
.. autofunction:: images2d | |||
Visualize embeddings | |||
-------------------- | |||
.. autofunction:: tsne_embedding |
@@ -0,0 +1,184 @@ | |||
.. _contributing: | |||
=============== | |||
Contributing | |||
=============== | |||
TensorLayer 2.0 is a major ongoing research project in CFCS, Peking University, the first version was established at Imperial College London in 2016. The goal of the project is to develop a compositional language while complex learning systems | |||
can be built through composition of neural network modules. | |||
Numerous contributors come from various horizons such as: Imperial College London, Tsinghua University, Carnegie Mellon University, Stanford, University of Technology of Compiegne, Google, Microsoft, Bloomberg and etc. | |||
You can easily open a Pull Request (PR) on `GitHub`_, every little step counts and will be credited. | |||
As an open-source project, we highly welcome and value contributions! | |||
**If you are interested in working with us, please contact us at:** `tensorlayer@gmail.com <tensorlayer@gmail.com>`_. | |||
.. image:: ../../img/join_slack.png | |||
:width: 30 % | |||
:align: center | |||
:target: https://join.slack.com/t/tensorlayer/shared_invite/enQtMjUyMjczMzU2Njg4LWI0MWU0MDFkOWY2YjQ4YjVhMzI5M2VlZmE4YTNhNGY1NjZhMzUwMmQ2MTc0YWRjMjQzMjdjMTg2MWQ2ZWJhYzc | |||
Project Maintainers | |||
-------------------------- | |||
The TensorLayer project was started by `Hao Dong <https://zsdonghao.github.io>`_ at Imperial College London in June 2016. | |||
For TensorLayer 2.x, it is now actively developing and maintaining by the following people who has more than 50 contributions: | |||
- **Hao Dong** (`@zsdonghao <https://github.com/zsdonghao>`_) - `<https://zsdonghao.github.io>`_ | |||
- **Jingqing Zhang** (`@JingqingZ <https://github.com/JingqingZ>`_) - `<https://jingqingz.github.io>`_ | |||
- **Rundi Wu** (`@ChrisWu1997 <https://github.com/ChrisWu1997>`_) - `<http://chriswu1997.github.io>`_ | |||
- **Ruihai Wu** (`@warshallrho <https://github.com/warshallrho>`_) - `<https://warshallrho.github.io/>`_ | |||
For TensorLayer 1.x, it was actively developed and maintained by the following people *(in alphabetical order)*: | |||
- **Akara Supratak** (`@akaraspt <https://github.com/akaraspt>`_) - `<https://akaraspt.github.io>`_ | |||
- **Fangde Liu** (`@fangde <https://github.com/fangde>`_) - `<http://fangde.github.io/>`_ | |||
- **Guo Li** (`@lgarithm <https://github.com/lgarithm>`_) - `<https://lgarithm.github.io>`_ | |||
- **Hao Dong** (`@zsdonghao <https://github.com/zsdonghao>`_) - `<https://zsdonghao.github.io>`_ | |||
- **Jonathan Dekhtiar** (`@DEKHTIARJonathan <https://github.com/DEKHTIARJonathan>`_) - `<https://www.jonathandekhtiar.eu>`_ | |||
- **Luo Mai** (`@luomai <https://github.com/luomai>`_) - `<http://www.doc.ic.ac.uk/~lm111/>`_ | |||
- **Simiao Yu** (`@nebulaV <https://github.com/nebulaV>`_) - `<https://nebulav.github.io>`_ | |||
Numerous other contributors can be found in the `Github Contribution Graph <https://github.com/tensorlayer/tensorlayer/graphs/contributors>`_. | |||
What to contribute | |||
------------------ | |||
Your method and example | |||
~~~~~~~~~~~~~~~~~~~~~~~~~~~ | |||
If you have a new method or example in terms of Deep learning or Reinforcement learning, you are welcome to contribute. | |||
* Provide your layers or examples, so everyone can use it. | |||
* Explain how it would work, and link to a scientific paper if applicable. | |||
* Keep the scope as narrow as possible, to make it easier to implement. | |||
Report bugs | |||
~~~~~~~~~~~ | |||
Report bugs at the `GitHub`_, we normally will fix it in 5 hours. | |||
If you are reporting a bug, please include: | |||
* your TensorLayer, TensorFlow and Python version. | |||
* steps to reproduce the bug, ideally reduced to a few Python commands. | |||
* the results you obtain, and the results you expected instead. | |||
If you are unsure whether the behavior you experience is a bug, or if you are | |||
unsure whether it is related to TensorLayer or TensorFlow, please just ask on `our | |||
mailing list`_ first. | |||
Fix bugs | |||
~~~~~~~~ | |||
Look through the GitHub issues for bug reports. Anything tagged with "bug" is | |||
open to whoever wants to implement it. If you discover a bug in TensorLayer you can | |||
fix yourself, by all means feel free to just implement a fix and not report it | |||
first. | |||
Write documentation | |||
~~~~~~~~~~~~~~~~~~~ | |||
Whenever you find something not explained well, misleading, glossed over or | |||
just wrong, please update it! The *Edit on GitHub* link on the top right of | |||
every documentation page and the *[source]* link for every documented entity | |||
in the API reference will help you to quickly locate the origin of any text. | |||
How to contribute | |||
----------------- | |||
Edit on GitHub | |||
~~~~~~~~~~~~~~ | |||
As a very easy way of just fixing issues in the documentation, use the *Edit | |||
on GitHub* link on the top right of a documentation page or the *[source]* link | |||
of an entity in the API reference to open the corresponding source file in | |||
GitHub, then click the *Edit this file* link to edit the file in your browser | |||
and send us a Pull Request. All you need for this is a free GitHub account. | |||
For any more substantial changes, please follow the steps below to setup | |||
TensorLayer for development. | |||
Documentation | |||
~~~~~~~~~~~~~ | |||
The documentation is generated with `Sphinx | |||
<http://sphinx-doc.org/latest/index.html>`_. To build it locally, run the | |||
following commands: | |||
.. code:: bash | |||
pip install Sphinx | |||
sphinx-quickstart | |||
cd docs | |||
make html | |||
If you want to re-generate the whole docs, run the following commands: | |||
.. code :: bash | |||
cd docs | |||
make clean | |||
make html | |||
To write the docs, we recommend you to install `Local RTD VM <http://docs.readthedocs.io/en/latest/custom_installs/local_rtd_vm.html>`_. | |||
Afterwards, open ``docs/_build/html/index.html`` to view the documentation as | |||
it would appear on `readthedocs <http://tensorlayer.readthedocs.org/>`_. If you | |||
changed a lot and seem to get misleading error messages or warnings, run | |||
``make clean html`` to force Sphinx to recreate all files from scratch. | |||
When writing docstrings, follow existing documentation as much as possible to | |||
ensure consistency throughout the library. For additional information on the | |||
syntax and conventions used, please refer to the following documents: | |||
* `reStructuredText Primer <http://sphinx-doc.org/rest.html>`_ | |||
* `Sphinx reST markup constructs <http://sphinx-doc.org/markup/index.html>`_ | |||
* `A Guide to NumPy/SciPy Documentation <https://github.com/numpy/numpy/blob/master/doc/HOWTO_DOCUMENT.rst.txt>`_ | |||
Testing | |||
~~~~~~~ | |||
TensorLayer has a code coverage of 100%, which has proven very helpful in the past, | |||
but also creates some duties: | |||
* Whenever you change any code, you should test whether it breaks existing | |||
features by just running the test scripts. | |||
* Every bug you fix indicates a missing test case, so a proposed bug fix should | |||
come with a new test that fails without your fix. | |||
Sending Pull Requests | |||
~~~~~~~~~~~~~~~~~~~~~ | |||
When you're satisfied with your addition, the tests pass and the documentation | |||
looks good without any markup errors, commit your changes to a new branch, push | |||
that branch to your fork and send us a Pull Request via GitHub's web interface. | |||
All these steps are nicely explained on GitHub: | |||
https://guides.github.com/introduction/flow/ | |||
When filing your Pull Request, please include a description of what it does, to | |||
help us reviewing it. If it is fixing an open issue, say, issue #123, add | |||
*Fixes #123*, *Resolves #123* or *Closes #123* to the description text, so | |||
GitHub will close it when your request is merged. | |||
.. _Release: https://github.com/tensorlayer/tensorlayer/releases | |||
.. _GitHub: https://github.com/tensorlayer/tensorlayer | |||
.. _our mailing list: hao.dong11@imperial.ac.uk |
@@ -0,0 +1,121 @@ | |||
.. _example: | |||
============ | |||
Examples | |||
============ | |||
We list some examples here, but more tutorials and applications can be found in `Github examples <https://github.com/tensorlayer/tensorlayer/tree/master/examples>`__ and `Awesome-TensorLayer <https://github.com/tensorlayer/awesome-tensorlayer>`_. | |||
Basics | |||
============ | |||
- Multi-layer perceptron (MNIST), simple usage. Classification task, see `tutorial_mnist_simple.py <https://github.com/tensorlayer/tensorlayer/blob/master/examples/basic_tutorials/tutorial_mnist_simple.py>`__. | |||
- Multi-layer perceptron (MNIST), dynamic model. Classification with dropout using iterator, see `tutorial_mnist_mlp_dynamic.py method2 <https://github.com/tensorlayer/tensorlayer/blob/master/examples/basic_tutorials/tutorial_mnist_mlp_dynamic.py>`__. | |||
- Multi-layer perceptron (MNIST), static model. Classification with dropout using iterator, see `tutorial_mnist_mlp_static.py <https://github.com/tensorlayer/tensorlayer/blob/master/examples/basic_tutorials/tutorial_mnist_mlp_static.py>`__. | |||
- Convolutional Network (CIFAR-10). Classification task, see `tutorial_cifar10_cnn_static.py <https://github.com/tensorlayer/tensorlayer/blob/master/examples/basic_tutorials/tutorial_cifar10_cnn_static.py>`_. | |||
- TensorFlow dataset API for object detection see `here <https://github.com/tensorlayer/tensorlayer/blob/master/examples/data_process/tutorial_tf_dataset_voc.py>`__. | |||
- Data augmentation with TFRecord. Effective way to load and pre-process data, see `tutorial_tfrecord*.py <https://github.com/tensorlayer/tensorlayer/tree/master/examples/data_process>`__ and `tutorial_cifar10_tfrecord.py <https://github.com/tensorlayer/tensorlayer/blob/master/examples/basic_tutorials/data_process/tutorial_tfrecord.py>`__. | |||
- Data augmentation with TensorLayer. See `tutorial_fast_affine_transform.py <https://github.com/tensorlayer/tensorlayer/blob/master/examples/data_process/tutorial_fast_affine_transform.py>`__ (for quick test only). | |||
Pretrained Models | |||
================== | |||
- VGG 16 (ImageNet). Classification task, see `tutorial_models_vgg16 <https://github.com/tensorlayer/tensorlayer/blob/master/examples/pretrained_cnn/tutorial_models_vgg16.py>`__. | |||
- VGG 19 (ImageNet). Classification task, see `tutorial_models_vgg19.py <https://github.com/tensorlayer/tensorlayer/blob/master/examples/pretrained_cnn/tutorial_vgg19.py>`__. | |||
- SqueezeNet (ImageNet). Model compression, see `tutorial_models_squeezenetv1.py <https://github.com/tensorlayer/tensorlayer/blob/master/examples/pretrained_cnn/tutorial_models_squeezenetv1.py>`__. | |||
- MobileNet (ImageNet). Model compression, see `tutorial_models_mobilenetv1.py <https://github.com/tensorlayer/tensorlayer/blob/master/examples/pretrained_cnn/tutorial_models_mobilenetv1.py>`__. | |||
- All pretrained models in `pretrained-models <https://github.com/tensorlayer/pretrained-models>`__. | |||
Vision | |||
================== | |||
- Arbitrary Style Transfer in Real-time with Adaptive Instance Normalization, see `examples <https://github.com/tensorlayer/adaptive-style-transfer>`__. | |||
- ArcFace: Additive Angular Margin Loss for Deep Face Recognition, see `InsignFace <https://github.com/auroua/InsightFace_TF>`__. | |||
- BinaryNet. Model compression, see `mnist <https://github.com/tensorlayer/tensorlayer/blob/master/examples/quantized_net/tutorial_binarynet_mnist_cnn.py>`__ `cifar10 <https://github.com/tensorlayer/tensorlayer/blob/master/examples/quantized_net/tutorial_binarynet_cifar10_tfrecord.py>`__. | |||
- Ternary Weight Network. Model compression, see `mnist <https://github.com/tensorlayer/tensorlayer/blob/master/examples/quantized_net/tutorial_ternaryweight_mnist_cnn.py>`__ `cifar10 <https://github.com/tensorlayer/tensorlayer/blob/master/examples/quantized_net/tutorial_ternaryweight_cifar10_tfrecord.py>`__. | |||
- DoReFa-Net. Model compression, see `mnist <https://github.com/tensorlayer/tensorlayer/blob/master/examples/quantized_net/tutorial_dorefanet_mnist_cnn.py>`__ `cifar10 <https://github.com/tensorlayer/tensorlayer/blob/master/examples/quantized_net/tutorial_dorefanet_cifar10_tfrecord.py>`__. | |||
- QuanCNN. Model compression, sees `mnist <https://github.com/XJTUI-AIR-FALCON/tensorlayer/blob/master/examples/quantized_net/tutorial_quanconv_mnist.py>`__ `cifar10 <https://github.com/XJTUI-AIR-FALCON/tensorlayer/blob/master/examples/quantized_net/tutorial_quanconv_cifar10.py>`__. | |||
- Wide ResNet (CIFAR) by `ritchieng <https://github.com/ritchieng/wideresnet-tensorlayer>`__. | |||
- `Spatial Transformer Networks <https://arxiv.org/abs/1506.02025>`__ by `zsdonghao <https://github.com/zsdonghao/Spatial-Transformer-Nets>`__. | |||
- `U-Net for brain tumor segmentation <https://github.com/zsdonghao/u-net-brain-tumor>`__ by `zsdonghao <https://github.com/zsdonghao/u-net-brain-tumor>`__. | |||
- Variational Autoencoder (VAE) for (CelebA) by `yzwxx <https://github.com/yzwxx/vae-celebA>`__. | |||
- Variational Autoencoder (VAE) for (MNIST) by `BUPTLdy <https://github.com/BUPTLdy/tl-vae>`__. | |||
- Image Captioning - Reimplementation of Google's `im2txt <https://github.com/tensorflow/models/tree/master/research/im2txt>`__ by `zsdonghao <https://github.com/zsdonghao/Image-Captioning>`__. | |||
Adversarial Learning | |||
======================== | |||
- DCGAN (CelebA). Generating images by `Deep Convolutional Generative Adversarial Networks <http://arxiv.org/abs/1511.06434>`__ by `zsdonghao <https://github.com/tensorlayer/dcgan>`__. | |||
- `Generative Adversarial Text to Image Synthesis <https://github.com/zsdonghao/text-to-image>`__ by `zsdonghao <https://github.com/zsdonghao/text-to-image>`__. | |||
- `Unsupervised Image to Image Translation with Generative Adversarial Networks <https://github.com/zsdonghao/Unsup-Im2Im>`__ by `zsdonghao <https://github.com/zsdonghao/Unsup-Im2Im>`__. | |||
- `Improved CycleGAN <https://github.com/luoxier/CycleGAN_Tensorlayer>`__ with resize-convolution by `luoxier <https://github.com/luoxier/CycleGAN_Tensorlayer>`__. | |||
- `Super Resolution GAN <https://arxiv.org/abs/1609.04802>`__ by `zsdonghao <https://github.com/tensorlayer/SRGAN>`__. | |||
- `BEGAN: Boundary Equilibrium Generative Adversarial Networks <http://arxiv.org/abs/1703.10717>`__ by `2wins <https://github.com/2wins/BEGAN-tensorlayer>`__. | |||
- `DAGAN: Fast Compressed Sensing MRI Reconstruction <https://github.com/nebulaV/DAGAN>`__ by `nebulaV <https://github.com/nebulaV/DAGAN>`__. | |||
Natural Language Processing | |||
============================== | |||
- Recurrent Neural Network (LSTM). Apply multiple LSTM to PTB dataset for language modeling, see `tutorial_ptb_lstm_state_is_tuple.py <https://github.com/tensorlayer/tensorlayer/blob/master/examples/text_ptb/tutorial_ptb_lstm_state_is_tuple.py>`__. | |||
- Word Embedding (Word2vec). Train a word embedding matrix, see `tutorial_word2vec_basic.py <https://github.com/tensorlayer/tensorlayer/blob/master/examples/text_word_embedding/tutorial\_word2vec_basic.py>`__. | |||
- Restore Embedding matrix. Restore a pre-train embedding matrix, see `tutorial_generate_text.py <https://github.com/tensorlayer/tensorlayer/blob/master/examples/text_generation/tutorial_generate_text.py>`__. | |||
- Text Generation. Generates new text scripts, using LSTM network, see `tutorial_generate_text.py <https://github.com/tensorlayer/tensorlayer/blob/master/examples/text_generation/tutorial_generate_text.py>`__. | |||
- Chinese Text Anti-Spam by `pakrchen <https://github.com/pakrchen/text-antispam>`__. | |||
- `Chatbot in 200 lines of code <https://github.com/tensorlayer/seq2seq-chatbot>`__ for `Seq2Seq <http://tensorlayer.readthedocs.io/en/latest/modules/layers.html#simple-seq2seq>`__. | |||
- FastText Sentence Classification (IMDB), see `tutorial_imdb_fasttext.py <https://github.com/tensorlayer/tensorlayer/blob/master/examples/text_classification/tutorial_imdb_fasttext.py>`__ by `tomtung <https://github.com/tomtung>`__. | |||
Reinforcement Learning | |||
============================== | |||
- Policy Gradient / Network (Atari Ping Pong), see `tutorial_atari_pong.py <https://github.com/tensorlayer/tensorlayer/blob/master/examples/reinforcement_learning/tutorial_atari_pong.py>`__. | |||
- Deep Q-Network (Frozen lake), see `tutorial_frozenlake_dqn.py <https://github.com/tensorlayer/tensorlayer/blob/master/examples/reinforcement_learning/tutorial_frozenlake_dqn.py>`__. | |||
- Q-Table learning algorithm (Frozen lake), see `tutorial_frozenlake_q_table.py <https://github.com/tensorlayer/tensorlayer/blob/master/examples/reinforcement_learning/tutorial_frozenlake_q_table.py>`__. | |||
- Asynchronous Policy Gradient using TensorDB (Atari Ping Pong) by `nebulaV <https://github.com/akaraspt/tl_paper>`__. | |||
- AC for discrete action space (Cartpole), see `tutorial_cartpole_ac.py <https://github.com/tensorlayer/tensorlayer/blob/master/examples/reinforcement_learning/tutorial_cartpole_ac.py>`__. | |||
- A3C for continuous action space (Bipedal Walker), see `tutorial_bipedalwalker_a3c*.py <https://github.com/tensorlayer/tensorlayer/blob/master/examples/reinforcement_learning/tutorial_bipedalwalker_a3c_continuous_action.py>`__. | |||
- `DAGGER <https://www.cs.cmu.edu/%7Esross1/publications/Ross-AIStats11-NoRegret.pdf>`__ for (`Gym Torcs <https://github.com/ugo-nama-kun/gym_torcs>`__) by `zsdonghao <https://github.com/zsdonghao/Imitation-Learning-Dagger-Torcs>`__. | |||
- `TRPO <https://arxiv.org/abs/1502.05477>`__ for continuous and discrete action space by `jjkke88 <https://github.com/jjkke88/RL_toolbox>`__. | |||
Miscellaneous | |||
================= | |||
- `Sipeed <https://github.com/sipeed/Maix-EMC>`__ : Run TensorLayer on AI Chips | |||
.. | |||
- TensorDB by `fangde <https://github.com/fangde>`__ see `tl_paper <https://github.com/akaraspt/tl_paper>`__. | |||
- A simple web service - `TensorFlask <https://github.com/JoelKronander/TensorFlask>`__ by `JoelKronander <https://github.com/JoelKronander>`__. | |||
.. | |||
Applications | |||
============= | |||
There are some good applications implemented by TensorLayer. | |||
You may able to find some useful examples for your project. | |||
If you want to share your application, please contact tensorlayer@gmail.com. | |||
1D CNN + LSTM for Biosignal | |||
--------------------------------- | |||
Author : `Akara Supratak <https://akaraspt.github.io>`__ | |||
Introduction | |||
^^^^^^^^^^^^ | |||
Implementation | |||
^^^^^^^^^^^^^^ | |||
Citation | |||
^^^^^^^^ | |||
.. _GitHub: https://github.com/tensorlayer/tensorlayer | |||
.. _Deeplearning Tutorial: http://deeplearning.stanford.edu/tutorial/ | |||
.. _Convolutional Neural Networks for Visual Recognition: http://cs231n.github.io/ | |||
.. _Neural Networks and Deep Learning: http://neuralnetworksanddeeplearning.com/ | |||
.. _TensorFlow tutorial: https://www.tensorflow.org/versions/r0.9/tutorials/index.html | |||
.. _Understand Deep Reinforcement Learning: http://karpathy.github.io/2016/05/31/rl/ | |||
.. _Understand Recurrent Neural Network: http://karpathy.github.io/2015/05/21/rnn-effectiveness/ | |||
.. _Understand LSTM Network: http://colah.github.io/posts/2015-08-Understanding-LSTMs/ | |||
.. _Word Representations: http://colah.github.io/posts/2014-07-NLP-RNNs-Representations/ |
@@ -0,0 +1,79 @@ | |||
.. _faq: | |||
============ | |||
FAQ | |||
============ | |||
How to effectively learn TensorLayer | |||
===================================== | |||
No matter what stage you are in, we recommend you to spend just 10 minutes to | |||
read the source code of TensorLayer and the `Understand layer / Your layer <http://tensorlayer.readthedocs.io/en/stable/modules/layers.html>`__ | |||
in this website, you will find the abstract methods are very simple for everyone. | |||
Reading the source codes helps you to better understand TensorFlow and allows | |||
you to implement your own methods easily. For discussion, we recommend | |||
`Gitter <https://gitter.im/tensorlayer/Lobby#?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge>`__, | |||
`Help Wanted Issues <https://waffle.io/tensorlayer/tensorlayer>`__, | |||
`QQ group <https://github.com/tensorlayer/tensorlayer/blob/master/img/img_qq.png>`__ | |||
and `Wechat group <https://github.com/shorxp/tensorlayer-chinese/blob/master/docs/wechat_group.md>`__. | |||
Beginner | |||
----------- | |||
For people who new to deep learning, the contributors provided a number of tutorials in this website, these tutorials will guide you to understand autoencoder, convolutional neural network, recurrent neural network, word embedding and deep reinforcement learning and etc. If your already understand the basic of deep learning, we recommend you to skip the tutorials and read the example codes on `Github <https://github.com/tensorlayer/tensorlayer>`__ , then implement an example from scratch. | |||
Engineer | |||
------------ | |||
For people from industry, the contributors provided mass format-consistent examples covering computer vision, natural language processing and reinforcement learning. Besides, there are also many TensorFlow users already implemented product-level examples including image captioning, semantic/instance segmentation, machine translation, chatbot and etc., which can be found online. | |||
It is worth noting that a wrapper especially for computer vision `Tf-Slim <https://github.com/tensorflow/models/tree/master/slim#Pretrained>`__ can be connected with TensorLayer seamlessly. | |||
Therefore, you may able to find the examples that can be used in your project. | |||
Researcher | |||
------------- | |||
For people from academia, TensorLayer was originally developed by PhD students who facing issues with other libraries on implement novel algorithm. Installing TensorLayer in editable mode is recommended, so you can extend your methods in TensorLayer. | |||
For research related to image processing such as image captioning, visual QA and etc., you may find it is very helpful to use the existing `Tf-Slim pre-trained models <https://github.com/tensorflow/models/tree/master/slim#Pretrained>`__ with TensorLayer (a specially layer for connecting Tf-Slim is provided). | |||
Install Master Version | |||
======================== | |||
To use all new features of TensorLayer, you need to install the master version from Github. | |||
Before that, you need to make sure you already installed git. | |||
.. code-block:: bash | |||
[stable version] pip install tensorlayer | |||
[master version] pip install git+https://github.com/tensorlayer/tensorlayer.git | |||
Editable Mode | |||
=============== | |||
- 1. Download the TensorLayer folder from Github. | |||
- 2. Before editing the TensorLayer ``.py`` file. | |||
- If your script and TensorLayer folder are in the same folder, when you edit the ``.py`` inside TensorLayer folder, your script can access the new features. | |||
- If your script and TensorLayer folder are not in the same folder, you need to run the following command in the folder contains ``setup.py`` before you edit ``.py`` inside TensorLayer folder. | |||
.. code-block:: bash | |||
pip install -e . | |||
Load Model | |||
=========== | |||
Note that, the ``tl.files.load_npz()`` can only able to load the npz model saved by ``tl.files.save_npz()``. | |||
If you have a model want to load into your TensorLayer network, you can first assign your parameters into a list in order, | |||
then use ``tl.files.assign_params()`` to load the parameters into your TensorLayer model. | |||
.. _GitHub: https://github.com/tensorlayer/tensorlayer | |||
.. _Deeplearning Tutorial: http://deeplearning.stanford.edu/tutorial/ | |||
.. _Convolutional Neural Networks for Visual Recognition: http://cs231n.github.io/ | |||
.. _Neural Networks and Deep Learning: http://neuralnetworksanddeeplearning.com/ | |||
.. _TensorFlow tutorial: https://www.tensorflow.org/versions/r0.9/tutorials/index.html | |||
.. _Understand Deep Reinforcement Learning: http://karpathy.github.io/2016/05/31/rl/ | |||
.. _Understand Recurrent Neural Network: http://karpathy.github.io/2015/05/21/rnn-effectiveness/ | |||
.. _Understand LSTM Network: http://colah.github.io/posts/2015-08-Understanding-LSTMs/ | |||
.. _Word Representations: http://colah.github.io/posts/2014-07-NLP-RNNs-Representations/ |
@@ -0,0 +1,76 @@ | |||
========================= | |||
Get Involved in Research | |||
========================= | |||
Ph.D. Postition @ PKU | |||
============================================================= | |||
Hi, I am `Hao Dong <https://zsdonghao.github.io/>`__, the founder of this project and a new faculty member in EECS, Peking University. I now have a few Ph.D. positions per year open for international students who would like to study AI. If you or your friends are interested in it, feel free to contact me. | |||
PKU is a top 30 university in the global ranking. The application is competitive, apply early is recommended. For the application of next year, please note that the DDL of Chinese Government Scholarship is in the end of each year, please check this `link <http://www.isd.pku.edu.cn/info/1503/5676.htm>`__ for more details. | |||
My homepage: `https://zsdonghao.github.io <https://zsdonghao.github.io/>`__ | |||
Contact: hao.dong [AT] pku.edu.cn | |||
Faculty Postition @ PKU | |||
============================================================= | |||
The Center on Frontiers of Computing Studies (CFCS), Peking University (PKU), China, is a university new initiative co-founded by Professors John Hopcroft (Turing Awardee) and Wen Gao (CAE, ACM/IEEE Fellow). The center aims at developing the excellence on two fronts: research and education. On the research front, the center will provide a world-class research environment, where innovation and impactful research is the central aim, measured by professional reputation among world scholars, not by counting the number of publications and research funding. On the education front, the center deeply involves in the Turing Class, an elite undergraduate program that draws the cream of the crop from the PKU undergraduate talent pool. New curriculum and pedagogy are designed and practiced in this program, with the aim to cultivate a new generation of computer scientist/engineers that are solid in both theories and practices. | |||
**Positions and Qualification** | |||
The center invites applications for tenured/tenure-track faculty positions. We are seeking applicants from all areas of Computer Science, spanning theoretical foundations, systems, software, and applications, with special interests in artificial intelligence and machine learning. We are especially interested in applicants conducting research at the frontiers of Computer Science with other disciplines, such as data sciences, engineering, as well as mathematical, medical, physical, and social sciences. | |||
Applicants are expected to have completed (or be completing) a Ph.D., have demonstrated the ability to pursue a program of research, and have a strong commitment to undergraduate and graduate teaching. A successful candidate will be expected to teach one to two courses at the undergraduate and graduate levels in each semester, and to build and lead a team of undergraduate and graduate students in innovative research. | |||
We are also seeking qualified candidates for postdoctoral positions. Candidates should have a Ph.D. in a relevant discipline or expect a Ph. D within a year, with a substantive record of research accomplishments, and the ability to work collaboratively with faculty members in the center. | |||
**To Apply** | |||
Applicants should send a full curriculum vitae; copies of 3-5 key publications; 3-5 names and contact information of references; and a statement of research and teaching to: CFCS_recruiting[at]pku[dot]edu[dot]cn . To expedite the process, please arrange to have the reference letters sent directly to the above email address. | |||
Application for a postdoctoral position should include a curriculum vita, brief statement of research, and three to five names and contact information of recommendation, and can be directly addressed to an individual faculty member. | |||
We conduct review of applications monthly, immediately upon the recipient of all application materials at the beginning of each month. However, it is highly recommended that applicants submit complete applications sooner than later, as the positions are to be filled quickly. | |||
Postdoc Postition @ ICL | |||
================================================== | |||
Data science is therefore by nature at the core of all modern transdisciplinary scientific activities, as it involves the whole life cycle of data, from acquisition and exploration to analysis and communication of the results. Data science is not only concerned with the tools and methods to obtain, manage and analyse data: it is also about extracting value from data and translating it from asset to product. | |||
Launched on 1st April 2014, the Data Science Institute (DSI) at Imperial College London aims to enhance Imperial's excellence in data-driven research across its faculties by fulfilling the following objectives. | |||
The Data Science Institute is housed in purpose built facilities in the heart of the Imperial College campus in South Kensington. Such a central location provides excellent access to collabroators across the College and across London. | |||
- To act as a focal point for coordinating data science research at Imperial College by facilitating access to funding, engaging with global partners, and stimulating cross-disciplinary collaboration. | |||
- To develop data management and analysis technologies and services for supporting data driven research in the College. | |||
- To promote the training and education of the new generation of data scientist by developing and coordinating new degree courses, and conducting public outreach programmes on data science. | |||
- To advise College on data strategy and policy by providing world-class data science expertise. | |||
- To enable the translation of data science innovation by close collaboration with industry and supporting commercialization. | |||
If you are interested in working with us, please check our | |||
`vacancies <https://www.imperial.ac.uk/data-science/get-involved/vacancies/>`__ | |||
and other ways to | |||
`get involved <https://www.imperial.ac.uk/data-science/get-involved/>`__ | |||
, or feel free to | |||
`contact us <https://www.imperial.ac.uk/data-science/get-involved/contact-us/>`__. | |||
Software Engineer @ SurgicalAI.cn | |||
============================================================= | |||
SurgicalAI is a startup founded by the data scientists and surgical robot experts from Imperial College. Our objective is AI democratise Surgery. By combining 5G, AI and Cloud Computing, SurgicalAI is building a platform enable junor surgeons to perfom complex procedures. As one of the most impactful startup, SurgicalAI is supported by Nvidia, AWS and top surgeons around the world. | |||
Currently based in Hangzhou, China, we are building digital solution for cardiac surgery like TAVR, LAA and Orthopedidcs like TKA and UNA. A demo can be found at here <http://demo5g.surgicalai.cn> | |||
We are activly looking for experts in robotic navigation, computer graphics and medical image analysis experts to join us, building digitalized surgical service platform for the aging world. | |||
Home Page: http://www.surgicalai.cn | |||
Demo Page: http://demo5g.surgicalai.cn | |||
Contact: liufangde@surgicalai.cn |
@@ -0,0 +1,217 @@ | |||
.. _getstartadvance: | |||
================== | |||
Advanced features | |||
================== | |||
Customizing layer | |||
================== | |||
Layers with weights | |||
---------------------- | |||
The fully-connected layer is `a = f(x*W+b)`, the most simple implementation is as follow, which can only support static model. | |||
.. code-block:: python | |||
class Dense(Layer): | |||
"""The :class:`Dense` class is a fully connected layer. | |||
Parameters | |||
---------- | |||
n_units : int | |||
The number of units of this layer. | |||
act : activation function | |||
The activation function of this layer. | |||
name : None or str | |||
A unique layer name. If None, a unique name will be automatically generated. | |||
""" | |||
def __init__( | |||
self, | |||
n_units, # the number of units/channels of this layer | |||
act=None, # None: no activation, tf.nn.relu or 'relu': ReLU ... | |||
name=None, # the name of this layer (optional) | |||
): | |||
super(Dense, self).__init__(name, act=act) # auto naming, dense_1, dense_2 ... | |||
self.n_units = n_units | |||
def build(self, inputs_shape): # initialize the model weights here | |||
shape = [inputs_shape[1], self.n_units] | |||
self.W = self._get_weights("weights", shape=tuple(shape), init=self.W_init) | |||
self.b = self._get_weights("biases", shape=(self.n_units, ), init=self.b_init) | |||
def forward(self, inputs): # call function | |||
z = tf.matmul(inputs, self.W) + self.b | |||
if self.act: # is not None | |||
z = self.act(z) | |||
return z | |||
The full implementation is as follow, which supports both static and dynamic models and allows users to control whether to use the bias, how to initialize the weight values. | |||
.. code-block:: python | |||
class Dense(Layer): | |||
"""The :class:`Dense` class is a fully connected layer. | |||
Parameters | |||
---------- | |||
n_units : int | |||
The number of units of this layer. | |||
act : activation function | |||
The activation function of this layer. | |||
W_init : initializer | |||
The initializer for the weight matrix. | |||
b_init : initializer or None | |||
The initializer for the bias vector. If None, skip biases. | |||
in_channels: int | |||
The number of channels of the previous layer. | |||
If None, it will be automatically detected when the layer is forwarded for the first time. | |||
name : None or str | |||
A unique layer name. If None, a unique name will be automatically generated. | |||
""" | |||
def __init__( | |||
self, | |||
n_units, | |||
act=None, | |||
W_init=tl.initializers.truncated_normal(stddev=0.1), | |||
b_init=tl.initializers.constant(value=0.0), | |||
in_channels=None, # the number of units/channels of the previous layer | |||
name=None, | |||
): | |||
# we feed activation function to the base layer, `None` denotes identity function | |||
# string (e.g., relu, sigmoid) will be converted into function. | |||
super(Dense, self).__init__(name, act=act) | |||
self.n_units = n_units | |||
self.W_init = W_init | |||
self.b_init = b_init | |||
self.in_channels = in_channels | |||
# in dynamic model, the number of input channel is given, we initialize the weights here | |||
if self.in_channels is not None: | |||
self.build(self.in_channels) | |||
self._built = True | |||
logging.info( | |||
"Dense %s: %d %s" % | |||
(self.name, self.n_units, self.act.__name__ if self.act is not None else 'No Activation') | |||
) | |||
def __repr__(self): # optional, for printing information | |||
actstr = self.act.__name__ if self.act is not None else 'No Activation' | |||
s = ('{classname}(n_units={n_units}, ' + actstr) | |||
if self.in_channels is not None: | |||
s += ', in_channels=\'{in_channels}\'' | |||
if self.name is not None: | |||
s += ', name=\'{name}\'' | |||
s += ')' | |||
return s.format(classname=self.__class__.__name__, **self.__dict__) | |||
def build(self, inputs_shape): # initialize the model weights here | |||
if self.in_channels: # if the number of input channel is given, use it | |||
shape = [self.in_channels, self.n_units] | |||
else: # otherwise, get it from static model | |||
self.in_channels = inputs_shape[1] | |||
shape = [inputs_shape[1], self.n_units] | |||
self.W = self._get_weights("weights", shape=tuple(shape), init=self.W_init) | |||
if self.b_init: # if b_init is None, no bias is applied | |||
self.b = self._get_weights("biases", shape=(self.n_units, ), init=self.b_init) | |||
def forward(self, inputs): | |||
z = tf.matmul(inputs, self.W) | |||
if self.b_init: | |||
z = tf.add(z, self.b) | |||
if self.act: | |||
z = self.act(z) | |||
return z | |||
Layers with train/test modes | |||
------------------------------ | |||
We use Dropout as an example here: | |||
.. code-block:: python | |||
class Dropout(Layer): | |||
""" | |||
The :class:`Dropout` class is a noise layer which randomly set some | |||
activations to zero according to a keeping probability. | |||
Parameters | |||
---------- | |||
keep : float | |||
The keeping probability. | |||
The lower the probability it is, the more activations are set to zero. | |||
name : None or str | |||
A unique layer name. | |||
""" | |||
def __init__(self, keep, name=None): | |||
super(Dropout, self).__init__(name) | |||
self.keep = keep | |||
self.build() | |||
self._built = True | |||
logging.info("Dropout %s: keep: %f " % (self.name, self.keep)) | |||
def build(self, inputs_shape=None): | |||
pass # no weights in dropout layer | |||
def forward(self, inputs): | |||
if self.is_train: # this attribute is changed by Model.train() and Model.eval() described above | |||
outputs = tf.nn.dropout(inputs, rate=1 - (self.keep), name=self.name) | |||
else: | |||
outputs = inputs | |||
return outputs | |||
Pre-trained CNN | |||
================ | |||
Get entire CNN | |||
--------------- | |||
.. code-block:: python | |||
import tensorflow as tf | |||
import tensorlayer as tl | |||
import numpy as np | |||
from tensorlayer.models.imagenet_classes import class_names | |||
vgg = tl.models.vgg16(pretrained=True) | |||
img = tl.vis.read_image('data/tiger.jpeg') | |||
img = tl.prepro.imresize(img, (224, 224)).astype(np.float32) / 255 | |||
output = vgg(img, is_train=False) | |||
Get a part of CNN | |||
------------------ | |||
.. code-block:: python | |||
# get VGG without the last layer | |||
cnn = tl.models.vgg16(end_with='fc2_relu', mode='static').as_layer() | |||
# add one more layer and build a new model | |||
ni = tl.layers.Input([None, 224, 224, 3], name="inputs") | |||
nn = cnn(ni) | |||
nn = tl.layers.Dense(n_units=100, name='out')(nn) | |||
model = tl.models.Model(inputs=ni, outputs=nn) | |||
# train your own classifier (only update the last layer) | |||
train_weights = model.get_layer('out').all_weights | |||
Reuse CNN | |||
------------------ | |||
.. code-block:: python | |||
# in dynamic model, we can directly use the same model | |||
# in static model | |||
vgg_layer = tl.models.vgg16().as_layer() | |||
ni_1 = tl.layers.Input([None, 224, 224, 3]) | |||
ni_2 = tl.layers.Input([None, 224, 224, 3]) | |||
a_1 = vgg_layer(ni_1) | |||
a_2 = vgg_layer(ni_2) | |||
M = Model(inputs=[ni_1, ni_2], outputs=[a_1, a_2]) | |||
@@ -0,0 +1,249 @@ | |||
.. _getstartmodel: | |||
=============== | |||
Define a model | |||
=============== | |||
TensorLayer provides two ways to define a model. | |||
Static model allows you to build model in a fluent way while dynamic model allows you to fully control the forward process. | |||
Static model | |||
=============== | |||
.. code-block:: python | |||
import tensorflow as tf | |||
from tensorlayer.layers import Input, Dropout, Dense | |||
from tensorlayer.models import Model | |||
def get_model(inputs_shape): | |||
ni = Input(inputs_shape) | |||
nn = Dropout(keep=0.8)(ni) | |||
nn = Dense(n_units=800, act=tf.nn.relu, name="dense1")(nn) # “name" is optional | |||
nn = Dropout(keep=0.8)(nn) | |||
nn = Dense(n_units=800, act=tf.nn.relu)(nn) | |||
nn = Dropout(keep=0.8)(nn) | |||
nn = Dense(n_units=10, act=None)(nn) | |||
M = Model(inputs=ni, outputs=nn, name="mlp") # “name" is optional | |||
return M | |||
MLP = get_model([None, 784]) | |||
MLP.eval() | |||
outputs = MLP(data) | |||
Dynamic model | |||
======================= | |||
In this case, you need to manually input the output shape of the previous layer to the new layer. | |||
.. code-block:: python | |||
class CustomModel(Model): | |||
def __init__(self): | |||
super(CustomModel, self).__init__() | |||
self.dropout1 = Dropout(keep=0.8) | |||
self.dense1 = Dense(n_units=800, act=tf.nn.relu, in_channels=784) | |||
self.dropout2 = Dropout(keep=0.8) | |||
self.dense2 = Dense(n_units=800, act=tf.nn.relu, in_channels=800) | |||
self.dropout3 = Dropout(keep=0.8) | |||
self.dense3 = Dense(n_units=10, act=None, in_channels=800) | |||
def forward(self, x, foo=False): | |||
z = self.dropout1(x) | |||
z = self.dense1(z) | |||
z = self.dropout2(z) | |||
z = self.dense2(z) | |||
z = self.dropout3(z) | |||
out = self.dense3(z) | |||
if foo: | |||
out = tf.nn.softmax(out) | |||
return out | |||
MLP = CustomModel() | |||
MLP.eval() | |||
outputs = MLP(data, foo=True) # controls the forward here | |||
outputs = MLP(data, foo=False) | |||
Switching train/test modes | |||
============================= | |||
.. code-block:: python | |||
# method 1: switch before forward | |||
Model.train() # enable dropout, batch norm moving avg ... | |||
output = Model(train_data) | |||
... # training code here | |||
Model.eval() # disable dropout, batch norm moving avg ... | |||
output = Model(test_data) | |||
... # testing code here | |||
# method 2: switch while forward | |||
output = Model(train_data, is_train=True) | |||
output = Model(test_data, is_train=False) | |||
Reuse weights | |||
======================= | |||
For static model, call the layer multiple time in model creation | |||
.. code-block:: python | |||
# create siamese network | |||
def create_base_network(input_shape): | |||
'''Base network to be shared (eq. to feature extraction). | |||
''' | |||
input = Input(shape=input_shape) | |||
x = Flatten()(input) | |||
x = Dense(128, act=tf.nn.relu)(x) | |||
x = Dropout(0.9)(x) | |||
x = Dense(128, act=tf.nn.relu)(x) | |||
x = Dropout(0.9)(x) | |||
x = Dense(128, act=tf.nn.relu)(x) | |||
return Model(input, x) | |||
def get_siamese_network(input_shape): | |||
"""Create siamese network with shared base network as layer | |||
""" | |||
base_layer = create_base_network(input_shape).as_layer() # convert model as layer | |||
ni_1 = Input(input_shape) | |||
ni_2 = Input(input_shape) | |||
nn_1 = base_layer(ni_1) # call base_layer twice | |||
nn_2 = base_layer(ni_2) | |||
return Model(inputs=[ni_1, ni_2], outputs=[nn_1, nn_2]) | |||
siamese_net = get_siamese_network([None, 784]) | |||
For dynamic model, call the layer multiple time in forward function | |||
.. code-block:: python | |||
class MyModel(Model): | |||
def __init__(self): | |||
super(MyModel, self).__init__() | |||
self.dense_shared = Dense(n_units=800, act=tf.nn.relu, in_channels=784) | |||
self.dense1 = Dense(n_units=10, act=tf.nn.relu, in_channels=800) | |||
self.dense2 = Dense(n_units=10, act=tf.nn.relu, in_channels=800) | |||
self.cat = Concat() | |||
def forward(self, x): | |||
x1 = self.dense_shared(x) # call dense_shared twice | |||
x2 = self.dense_shared(x) | |||
x1 = self.dense1(x1) | |||
x2 = self.dense2(x2) | |||
out = self.cat([x1, x2]) | |||
return out | |||
model = MyModel() | |||
Print model information | |||
======================= | |||
.. code-block:: python | |||
print(MLP) # simply call print function | |||
# Model( | |||
# (_inputlayer): Input(shape=[None, 784], name='_inputlayer') | |||
# (dropout): Dropout(keep=0.8, name='dropout') | |||
# (dense): Dense(n_units=800, relu, in_channels='784', name='dense') | |||
# (dropout_1): Dropout(keep=0.8, name='dropout_1') | |||
# (dense_1): Dense(n_units=800, relu, in_channels='800', name='dense_1') | |||
# (dropout_2): Dropout(keep=0.8, name='dropout_2') | |||
# (dense_2): Dense(n_units=10, None, in_channels='800', name='dense_2') | |||
# ) | |||
import pprint | |||
pprint.pprint(MLP.config) # print the model architecture | |||
# {'inputs': '_inputlayer_1_node_0', | |||
# 'model_architecture': [{'args': {'dtype': tf.float32, | |||
# 'layer_type': 'normal', | |||
# 'name': '_inputlayer_1', | |||
# 'shape': [None, 784]}, | |||
# 'class': '_InputLayer', | |||
# 'prev_layer': None}, | |||
# {'args': {'keep': 0.8, | |||
# 'layer_type': 'normal', | |||
# 'name': 'dropout_1'}, | |||
# 'class': 'Dropout', | |||
# 'prev_layer': ['_inputlayer_1_node_0']}, | |||
# {'args': {'act': 'relu', | |||
# 'layer_type': 'normal', | |||
# 'n_units': 800, | |||
# 'name': 'dense_1'}, | |||
# 'class': 'Dense', | |||
# 'prev_layer': ['dropout_1_node_0']}, | |||
# {'args': {'keep': 0.8, | |||
# 'layer_type': 'normal', | |||
# 'name': 'dropout_2'}, | |||
# 'class': 'Dropout', | |||
# 'prev_layer': ['dense_1_node_0']}, | |||
# {'args': {'act': 'relu', | |||
# 'layer_type': 'normal', | |||
# 'n_units': 800, | |||
# 'name': 'dense_2'}, | |||
# 'class': 'Dense', | |||
# 'prev_layer': ['dropout_2_node_0']}, | |||
# {'args': {'keep': 0.8, | |||
# 'layer_type': 'normal', | |||
# 'name': 'dropout_3'}, | |||
# 'class': 'Dropout', | |||
# 'prev_layer': ['dense_2_node_0']}, | |||
# {'args': {'act': None, | |||
# 'layer_type': 'normal', | |||
# 'n_units': 10, | |||
# 'name': 'dense_3'}, | |||
# 'class': 'Dense', | |||
# 'prev_layer': ['dropout_3_node_0']}], | |||
# 'name': 'mlp', | |||
# 'outputs': 'dense_3_node_0', | |||
# 'version_info': {'backend': 'tensorflow', | |||
# 'backend_version': '2.0.0-alpha0', | |||
# 'save_date': None, | |||
# 'tensorlayer_version': '2.1.0', | |||
# 'training_device': 'gpu'}} | |||
Get specific weights | |||
======================= | |||
We can get the specific weights by indexing or naming. | |||
.. code-block:: python | |||
# indexing | |||
all_weights = MLP.all_weights | |||
some_weights = MLP.all_weights[1:3] | |||
# naming | |||
some_weights = MLP.get_layer('dense1').all_weights | |||
Save and restore model | |||
======================= | |||
We provide two ways to save and restore models | |||
Save weights only | |||
------------------ | |||
.. code-block:: python | |||
MLP.save_weights('model_weights.h5') # by default, file will be in hdf5 format | |||
MLP.load_weights('model_weights.h5') | |||
Save model architecture and weights (optional) | |||
----------------------------------------------- | |||
.. code-block:: python | |||
# When using Model.load(), there is no need to reimplement or declare the architecture of the model explicitly in code | |||
MLP.save('model.h5', save_weights=True) | |||
MLP = Model.load('model.h5', load_weights=True) | |||
@@ -0,0 +1,210 @@ | |||
.. _installation: | |||
============ | |||
Installation | |||
============ | |||
TensorLayer has some prerequisites that need to be installed first, including | |||
`TensorFlow`_ , numpy and matplotlib. For GPU | |||
support CUDA and cuDNN are required. | |||
If you run into any trouble, please check the `TensorFlow installation | |||
instructions <https://www.tensorflow.org/versions/master/get_started/os_setup.html>`_ | |||
which cover installing the TensorFlow for a range of operating systems including | |||
Mac OX, Linux and Windows, or ask for help on `tensorlayer@gmail.com <tensorlayer@gmail.com>`_ | |||
or `FAQ <http://tensorlayer.readthedocs.io/en/latest/user/more.html>`_. | |||
Install TensorFlow | |||
========================= | |||
.. code-block:: bash | |||
pip3 install tensorflow-gpu==2.0.0-beta1 # specific version (YOU SHOULD INSTALL THIS ONE NOW) | |||
pip3 install tensorflow-gpu # GPU version | |||
pip3 install tensorflow # CPU version | |||
The installation instructions of TensorFlow are written to be very detailed on `TensorFlow`_ website. | |||
However, there are something need to be considered. For example, `TensorFlow`_ officially supports GPU acceleration for Linux, Mac OX and Windows at present. For ARM processor architecture, you need to install TensorFlow from source. | |||
Install TensorLayer | |||
========================= | |||
For stable version: | |||
.. code-block:: bash | |||
pip3 install tensorlayer | |||
For latest version, please install from Github. | |||
.. code-block:: bash | |||
pip3 install git+https://github.com/tensorlayer/tensorlayer.git | |||
or | |||
pip3 install https://github.com/tensorlayer/tensorlayer/archive/master.zip | |||
For developers, you should clone the folder to your local machine and put it along with your project scripts. | |||
.. code-block:: bash | |||
git clone https://github.com/tensorlayer/tensorlayer.git | |||
Alternatively, you can build from the source. | |||
.. code-block:: bash | |||
# First clone the repository and change the current directory to the newly cloned repository | |||
git clone https://github.com/tensorlayer/tensorlayer.git | |||
cd tensorlayer | |||
# Install virtualenv if necessary | |||
pip install virtualenv | |||
# Then create a virtualenv called `venv` | |||
virtualenv venv | |||
# Activate the virtualenv | |||
## Linux: | |||
source venv/bin/activate | |||
## Windows: | |||
venv\Scripts\activate.bat | |||
# basic installation | |||
pip install . | |||
# ============= IF TENSORFLOW IS NOT ALREADY INSTALLED ============= # | |||
# for a machine **without** an NVIDIA GPU | |||
pip install -e ".[all_cpu_dev]" | |||
# for a machine **with** an NVIDIA GPU | |||
pip install -e ".[all_gpu_dev]" | |||
If you want install TensorLayer 1.X, the simplest way to install TensorLayer 1.X is as follow. It will also install the numpy and matplotlib automatically. | |||
.. code-block:: bash | |||
[stable version] pip install tensorlayer==1.x.x | |||
However, if you want to modify or extend TensorLayer 1.X, you can download the repository from | |||
`Github`_ and install it as follow. | |||
.. code-block:: bash | |||
cd to the root of the git tree | |||
pip install -e . | |||
This command will run the ``setup.py`` to install TensorLayer. The ``-e`` reflects | |||
editable, then you can edit the source code in ``tensorlayer`` folder, and ``import`` the edited | |||
TensorLayer. | |||
GPU support | |||
========================== | |||
Thanks to NVIDIA supports, training a fully connected network on a | |||
GPU, which may be 10 to 20 times faster than training them on a CPU. | |||
For convolutional network, may have 50 times faster. | |||
This requires an NVIDIA GPU with CUDA and cuDNN support. | |||
CUDA | |||
---- | |||
The TensorFlow website also teach how to install the CUDA and cuDNN, please see | |||
`TensorFlow GPU Support <https://www.tensorflow.org/versions/master/get_started/os_setup.html#optional-install-cuda-gpus-on-linux>`_. | |||
Download and install the latest CUDA is available from NVIDIA website: | |||
- `CUDA download and install <https://developer.nvidia.com/cuda-downloads>`_ | |||
.. | |||
After installation, make sure ``/usr/local/cuda/bin`` is in your ``PATH`` (use ``echo #PATH`` to check), and | |||
``nvcc --version`` works. Also ensure ``/usr/local/cuda/lib64`` is in your | |||
``LD_LIBRARY_PATH``, so the CUDA libraries can be found. | |||
If CUDA is set up correctly, the following command should print some GPU information on | |||
the terminal: | |||
.. code-block:: bash | |||
python -c "import tensorflow" | |||
cuDNN | |||
-------- | |||
Apart from CUDA, NVIDIA also provides a library for common neural network operations that especially | |||
speeds up Convolutional Neural Networks (CNNs). Again, it can be obtained from | |||
NVIDIA after registering as a developer (it take a while): | |||
Download and install the latest cuDNN is available from NVIDIA website: | |||
- `cuDNN download and install <https://developer.nvidia.com/cudnn>`_ | |||
To install it, copy the ``*.h`` files to ``/usr/local/cuda/include`` and the | |||
``lib*`` files to ``/usr/local/cuda/lib64``. | |||
.. _TensorFlow: https://www.tensorflow.org/versions/master/get_started/os_setup.html | |||
.. _GitHub: https://github.com/tensorlayer/tensorlayer | |||
.. _TensorLayer: https://github.com/tensorlayer/tensorlayer/ | |||
Windows User | |||
============== | |||
TensorLayer is built on the top of Python-version TensorFlow, so please install Python first. | |||
Note:We highly recommend installing Anaconda. The lowest version requirements of Python is py35. | |||
`Anaconda download <https://www.continuum.io/downloads>`_ | |||
GPU support | |||
------------ | |||
Thanks to NVIDIA supports, training a fully connected network on a GPU, which may be 10 to 20 times faster than training them on a CPU. For convolutional network, may have 50 times faster. This requires an NVIDIA GPU with CUDA and cuDNN support. | |||
1. Installing Microsoft Visual Studio | |||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ | |||
You should preinstall Microsoft Visual Studio (VS) before installing CUDA. The lowest version requirements is VS2010. We recommend installing VS2015 or VS2013. CUDA7.5 supports VS2010, VS2012 and VS2013. CUDA8.0 also supports VS2015. | |||
2. Installing CUDA | |||
^^^^^^^^^^^^^^^^^^^^^^^ | |||
Download and install the latest CUDA is available from NVIDIA website: | |||
`CUDA download <https://developer.nvidia.com/CUDA-downloads>`_ | |||
We do not recommend modifying the default installation directory. | |||
3. Installing cuDNN | |||
^^^^^^^^^^^^^^^^^^^^^^ | |||
The NVIDIA CUDA® Deep Neural Network library (cuDNN) is a GPU-accelerated library of primitives for deep neural networks. Download and extract the latest cuDNN is available from NVIDIA website: | |||
`cuDNN download <https://developer.nvidia.com/cuDNN>`_ | |||
After extracting cuDNN, you will get three folders (bin, lib, include). Then these folders should be copied to CUDA installation. (The default installation directory is `C:\\Program Files\\NVIDIA GPU Computing Toolkit\\CUDA\\v8.0`) | |||
Installing TensorLayer | |||
------------------------ | |||
For TensorLayer, please refer to the steps mentioned above. | |||
.. code-block:: bash | |||
pip install tensorflow #CPU version | |||
pip install tensorflow-gpu #GPU version (GPU version and CPU version just choose one) | |||
pip install tensorlayer #Install tensorlayer | |||
Issue | |||
======= | |||
If you get the following output when import tensorlayer, please read `FQA <http://tensorlayer.readthedocs.io/en/latest/user/more.html>`_. | |||
.. code-block:: bash | |||
_tkinter.TclError: no display name and no $DISPLAY environment variable |
@@ -0,0 +1,43 @@ | |||
#!/usr/bin/env python3 | |||
# -*- coding: utf-8 -*- | |||
from tensorlayer.layers import LayerList | |||
from tensorlayer.layers import Dense | |||
import tensorlayer as tl | |||
import numpy as np | |||
layer_list = [] | |||
layer_list.append(Dense(n_units=800, act=tl.ReLU, in_channels=784, name='Dense1')) | |||
layer_list.append(Dense(n_units=800, act=tl.ReLU, in_channels=800, name='Dense2')) | |||
layer_list.append(Dense(n_units=10, act=tl.ReLU, in_channels=800, name='Dense3')) | |||
MLP = LayerList(layer_list) | |||
X_train, y_train, X_val, y_val, X_test, y_test = tl.files.load_mnist_dataset(shape=(-1, 784)) | |||
def generator_train(): | |||
inputs = X_train | |||
targets = y_train | |||
if len(inputs) != len(targets): | |||
raise AssertionError("The length of inputs and targets should be equal") | |||
for _input, _target in zip(inputs, targets): | |||
yield (_input, np.array(_target)) | |||
n_epoch = 50 | |||
batch_size = 128 | |||
print_freq = 2 | |||
shuffle_buffer_size = 128 | |||
# train_weights = MLP.trainable_weights | |||
# print(train_weights) | |||
optimizer = tl.optimizers.Momentum(0.05, 0.9) | |||
train_ds = tl.dataflow.FromGenerator( | |||
generator_train, output_types=(tl.float32, tl.int32) , column_names=['data', 'label'] | |||
) | |||
train_ds = tl.dataflow.Shuffle(train_ds,shuffle_buffer_size) | |||
train_ds = tl.dataflow.Batch(train_ds,batch_size) | |||
model = tl.models.Model(network=MLP, loss_fn=tl.cost.cross_entropy, optimizer=optimizer) | |||
model.train(n_epoch=n_epoch, train_dataset=train_ds, print_freq=print_freq, print_train_batch=False) | |||
model.save_weights('./model.npz', format='npz_dict') | |||
model.load_weights('./model.npz', format='npz_dict') |
@@ -0,0 +1,166 @@ | |||
#!/usr/bin/env python3 | |||
# -*- coding: utf-8 -*- | |||
import time | |||
import numpy as np | |||
import multiprocessing | |||
import tensorflow as tf | |||
from tensorlayer.layers import Module | |||
import tensorlayer as tl | |||
from tensorlayer.layers import (Conv2d, Dense, Flatten, MaxPool2d, BatchNorm2d) | |||
from mindspore.nn import Momentum, WithLossCell | |||
from mindspore import ParameterTuple | |||
import mindspore.nn as nn | |||
import mindspore as ms | |||
from mindspore.ops import composite as C | |||
import mindspore.ops.operations as P | |||
# enable debug logging | |||
tl.logging.set_verbosity(tl.logging.DEBUG) | |||
tl.logging.set_verbosity(tl.logging.DEBUG) | |||
class CNN(Module): | |||
def __init__(self): | |||
super(CNN, self).__init__() | |||
self.conv1 = Conv2d(64, (5, 5), (2, 2), padding='SAME', b_init=None, name='conv1', in_channels=3, act=tl.ReLU, data_format='channels_first') | |||
self.bn = BatchNorm2d(num_features=64, act=tl.ReLU, data_format='channels_first') | |||
self.maxpool1 = MaxPool2d((3, 3), (2, 2), padding='SAME', name='pool1', data_format='channels_first') | |||
self.conv2 = Conv2d(128, (5, 5), (2, 2), padding='SAME', act=tl.ReLU, b_init=None, name='conv2', in_channels=64, data_format='channels_first') | |||
self.maxpool2 = MaxPool2d((3, 3), (2, 2), padding='SAME', name='pool2', data_format='channels_first') | |||
self.flatten = Flatten(name='flatten') | |||
self.dense1 = Dense(120, act=tl.ReLU, name='dense1relu', in_channels=4608) | |||
self.dense2 = Dense(84, act=tl.ReLU, name='dense2relu', in_channels=120) | |||
self.dense3 = Dense(10, act=None, name='output', in_channels=84) | |||
def forward(self, x): | |||
z = self.conv1(x) | |||
z = self.bn(z) | |||
z = self.maxpool1(z) | |||
z = self.conv2(z) | |||
z = self.maxpool2(z) | |||
z = self.flatten(z) | |||
z = self.dense1(z) | |||
z = self.dense2(z) | |||
z = self.dense3(z) | |||
return z | |||
# training settings | |||
batch_size = 128 | |||
n_epoch = 500 | |||
shuffle_buffer_size = 128 | |||
# prepare cifar10 data | |||
X_train, y_train, X_test, y_test = tl.files.load_cifar10_dataset(shape=(-1, 32, 32, 3), plotable=False) | |||
def generator_train(): | |||
inputs = X_train | |||
targets = y_train | |||
if len(inputs) != len(targets): | |||
raise AssertionError("The length of inputs and targets should be equal") | |||
for _input, _target in zip(inputs, targets): | |||
yield _input, _target | |||
def generator_test(): | |||
inputs = X_test | |||
targets = y_test | |||
if len(inputs) != len(targets): | |||
raise AssertionError("The length of inputs and targets should be equal") | |||
for _input, _target in zip(inputs, targets): | |||
# yield _input.encode('utf-8'), _target.encode('utf-8') | |||
yield _input, _target | |||
def _map_fn_train(img, target): | |||
# 1. Randomly crop a [height, width] section of the image. | |||
img = tf.image.random_crop(img, [24, 24, 3]) | |||
# 2. Randomly flip the image horizontally. | |||
img = tf.image.random_flip_left_right(img) | |||
# 3. Randomly change brightness. | |||
img = tf.image.random_brightness(img, max_delta=63) | |||
# 4. Randomly change contrast. | |||
img = tf.image.random_contrast(img, lower=0.2, upper=1.8) | |||
# 5. Subtract off the mean and divide by the variance of the pixels. | |||
img = tf.image.per_image_standardization(img) | |||
target = tf.reshape(target, ()) | |||
return img, target | |||
class GradWrap(Module): | |||
""" GradWrap definition """ | |||
def __init__(self, network): | |||
super(GradWrap, self).__init__(auto_prefix=False) | |||
self.network = network | |||
self.weights = ParameterTuple(filter(lambda x: x.requires_grad, network.get_parameters())) | |||
def forward(self, x, label): | |||
return C.GradOperation(get_by_list=True)(self.network, self.weights)(x, label) | |||
# dataset API and augmentation | |||
train_ds = tf.data.Dataset.from_generator( | |||
generator_train, output_types=(tf.float32, tf.int32) | |||
) # , output_shapes=((24, 24, 3), (1))) | |||
train_ds = train_ds.map(_map_fn_train, num_parallel_calls=multiprocessing.cpu_count()) | |||
# train_ds = train_ds.repeat(n_epoch) | |||
train_ds = train_ds.shuffle(shuffle_buffer_size) | |||
train_ds = train_ds.prefetch(buffer_size=4096) | |||
train_ds = train_ds.batch(batch_size) | |||
# get the network | |||
net = CNN() | |||
train_weights = net.trainable_weights | |||
# optimizer = Adam(train_weights, learning_rate=0.01) | |||
optimizer = Momentum(train_weights, 0.01, 0.5) | |||
criterion = nn.SoftmaxCrossEntropyWithLogits(sparse=True, reduction='mean') | |||
net_with_criterion = WithLossCell(net, criterion) | |||
train_network = GradWrap(net_with_criterion) | |||
train_network.set_train() | |||
# print(train_weights) | |||
for epoch in range(n_epoch): | |||
start_time = time.time() | |||
train_network.set_train() | |||
train_loss, train_acc, n_iter = 0, 0, 0 | |||
for X_batch, y_batch in train_ds: | |||
X_batch = ms.Tensor(X_batch.numpy(), dtype=ms.float32) | |||
y_batch = ms.Tensor(y_batch.numpy(), dtype=ms.int32) | |||
X_batch = tl.nhwc_to_nchw(X_batch) | |||
y_batch = tl.nhwc_to_nchw(y_batch) | |||
output = net(X_batch) | |||
loss_output = criterion(output, y_batch) | |||
grads = train_network(X_batch, y_batch) | |||
success = optimizer(grads) | |||
loss = loss_output.asnumpy() | |||
train_loss += loss | |||
n_iter += 1 | |||
train_acc += np.mean((P.Equal()(P.Argmax(axis=1)(output), y_batch).asnumpy())) | |||
print("Epoch {} of {} took {}".format(epoch + 1, n_epoch, time.time() - start_time)) | |||
print(" train loss: {}".format(train_loss / n_iter)) | |||
print(" train acc: {}".format(train_acc / n_iter)) | |||
print(" loss ", loss) | |||
# start_time = time.time() | |||
# train_loss, train_acc, n_iter = 0, 0, 0 | |||
# for X_batch, y_batch in train_ds: | |||
# net.set_train() | |||
# with tf.GradientTape() as tape: | |||
# # compute outputs | |||
# _logits = net(X_batch) | |||
# # compute loss and update model | |||
# _loss_ce = tl.cost.cross_entropy(_logits, y_batch, name='train_loss') | |||
# grad = tape.gradient(_loss_ce, train_weights) | |||
# optimizer.apply_gradients(zip(grad, train_weights)) | |||
# train_loss += _loss_ce | |||
# train_acc += np.mean(np.equal(np.argmax(_logits, 1), y_batch)) | |||
# n_iter += 1 | |||
# print("Epoch {} of {} took {}".format(epoch + 1, n_epoch, time.time() - start_time)) | |||
# print(" train loss: {}".format(train_loss / n_iter)) | |||
# print(" train acc: {}".format(train_acc / n_iter)) |
@@ -0,0 +1,186 @@ | |||
#!/usr/bin/env python3 | |||
# -*- coding: utf-8 -*- | |||
import time | |||
import numpy as np | |||
import multiprocessing | |||
import tensorflow as tf | |||
from tensorlayer.layers import Module | |||
import tensorlayer as tl | |||
from tensorlayer.layers import (Conv2d, Dense, Flatten, MaxPool2d, BatchNorm2d) | |||
# enable debug logging | |||
tl.logging.set_verbosity(tl.logging.DEBUG) | |||
tl.logging.set_verbosity(tl.logging.DEBUG) | |||
# prepare cifar10 data | |||
X_train, y_train, X_test, y_test = tl.files.load_cifar10_dataset(shape=(-1, 32, 32, 3), plotable=False) | |||
class CNN(Module): | |||
def __init__(self): | |||
super(CNN, self).__init__() | |||
# weights init | |||
W_init = tl.initializers.truncated_normal(stddev=5e-2) | |||
W_init2 = tl.initializers.truncated_normal(stddev=0.04) | |||
b_init2 = tl.initializers.constant(value=0.1) | |||
self.conv1 = Conv2d(64, (5, 5), (1, 1), padding='SAME', W_init=W_init, b_init=None, name='conv1', in_channels=3) | |||
self.bn = BatchNorm2d(num_features=64, act=tl.ReLU) | |||
self.maxpool1 = MaxPool2d((3, 3), (2, 2), padding='SAME', name='pool1') | |||
self.conv2 = Conv2d( | |||
64, (5, 5), (1, 1), padding='SAME', act=tl.ReLU, W_init=W_init, b_init=None, name='conv2', in_channels=64 | |||
) | |||
self.maxpool2 = MaxPool2d((3, 3), (2, 2), padding='SAME', name='pool2') | |||
self.flatten = Flatten(name='flatten') | |||
self.dense1 = Dense(384, act=tl.ReLU, W_init=W_init2, b_init=b_init2, name='dense1relu', in_channels=2304) | |||
self.dense2 = Dense(192, act=tl.ReLU, W_init=W_init2, b_init=b_init2, name='dense2relu', in_channels=384) | |||
self.dense3 = Dense(10, act=None, W_init=W_init2, name='output', in_channels=192) | |||
def forward(self, x): | |||
z = self.conv1(x) | |||
z = self.bn(z) | |||
z = self.maxpool1(z) | |||
z = self.conv2(z) | |||
z = self.maxpool2(z) | |||
z = self.flatten(z) | |||
z = self.dense1(z) | |||
z = self.dense2(z) | |||
z = self.dense3(z) | |||
return z | |||
# get the network | |||
net = CNN() | |||
# training settings | |||
batch_size = 128 | |||
n_epoch = 500 | |||
learning_rate = 0.0001 | |||
print_freq = 5 | |||
n_step_epoch = int(len(y_train) / batch_size) | |||
n_step = n_epoch * n_step_epoch | |||
shuffle_buffer_size = 128 | |||
train_weights = net.trainable_weights | |||
optimizer = tl.optimizers.Adam(learning_rate) | |||
# looking for decay learning rate? see https://github.com/tensorlayer/srgan/blob/master/train.py | |||
def generator_train(): | |||
inputs = X_train | |||
targets = y_train | |||
if len(inputs) != len(targets): | |||
raise AssertionError("The length of inputs and targets should be equal") | |||
for _input, _target in zip(inputs, targets): | |||
# yield _input.encode('utf-8'), _target.encode('utf-8') | |||
yield _input, _target | |||
def generator_test(): | |||
inputs = X_test | |||
targets = y_test | |||
if len(inputs) != len(targets): | |||
raise AssertionError("The length of inputs and targets should be equal") | |||
for _input, _target in zip(inputs, targets): | |||
# yield _input.encode('utf-8'), _target.encode('utf-8') | |||
yield _input, _target | |||
def _map_fn_train(img, target): | |||
# 1. Randomly crop a [height, width] section of the image. | |||
img = tf.image.random_crop(img, [24, 24, 3]) | |||
# 2. Randomly flip the image horizontally. | |||
img = tf.image.random_flip_left_right(img) | |||
# 3. Randomly change brightness. | |||
img = tf.image.random_brightness(img, max_delta=63) | |||
# 4. Randomly change contrast. | |||
img = tf.image.random_contrast(img, lower=0.2, upper=1.8) | |||
# 5. Subtract off the mean and divide by the variance of the pixels. | |||
img = tf.image.per_image_standardization(img) | |||
target = tf.reshape(target, ()) | |||
return img, target | |||
def _map_fn_test(img, target): | |||
# 1. Crop the central [height, width] of the image. | |||
img = tf.image.resize_with_pad(img, 24, 24) | |||
# 2. Subtract off the mean and divide by the variance of the pixels. | |||
img = tf.image.per_image_standardization(img) | |||
img = tf.reshape(img, (24, 24, 3)) | |||
target = tf.reshape(target, ()) | |||
return img, target | |||
# dataset API and augmentation | |||
train_ds = tf.data.Dataset.from_generator( | |||
generator_train, output_types=(tf.float32, tf.int32) | |||
) # , output_shapes=((24, 24, 3), (1))) | |||
train_ds = train_ds.map(_map_fn_train, num_parallel_calls=multiprocessing.cpu_count()) | |||
# train_ds = train_ds.repeat(n_epoch) | |||
train_ds = train_ds.shuffle(shuffle_buffer_size) | |||
train_ds = train_ds.prefetch(buffer_size=4096) | |||
train_ds = train_ds.batch(batch_size) | |||
# value = train_ds.make_one_shot_iterator().get_next() | |||
test_ds = tf.data.Dataset.from_generator( | |||
generator_test, output_types=(tf.float32, tf.int32) | |||
) # , output_shapes=((24, 24, 3), (1))) | |||
# test_ds = test_ds.shuffle(shuffle_buffer_size) | |||
test_ds = test_ds.map(_map_fn_test, num_parallel_calls=multiprocessing.cpu_count()) | |||
# test_ds = test_ds.repeat(n_epoch) | |||
test_ds = test_ds.prefetch(buffer_size=4096) | |||
test_ds = test_ds.batch(batch_size) | |||
# value_test = test_ds.make_one_shot_iterator().get_next() | |||
for epoch in range(n_epoch): | |||
start_time = time.time() | |||
train_loss, train_acc, n_iter = 0, 0, 0 | |||
for X_batch, y_batch in train_ds: | |||
net.set_train() | |||
with tf.GradientTape() as tape: | |||
# compute outputs | |||
_logits = net(X_batch) | |||
# compute loss and update model | |||
_loss_ce = tl.cost.cross_entropy(_logits, y_batch, name='train_loss') | |||
grad = tape.gradient(_loss_ce, train_weights) | |||
optimizer.apply_gradients(zip(grad, train_weights)) | |||
train_loss += _loss_ce | |||
train_acc += np.mean(np.equal(np.argmax(_logits, 1), y_batch)) | |||
n_iter += 1 | |||
print("Epoch {} of {} took {}".format(epoch + 1, n_epoch, time.time() - start_time)) | |||
print(" train loss: {}".format(train_loss / n_iter)) | |||
print(" train acc: {}".format(train_acc / n_iter)) | |||
# use training and evaluation sets to evaluate the model every print_freq epoch | |||
if epoch + 1 == 1 or (epoch + 1) % print_freq == 0: | |||
net.eval() | |||
val_loss, val_acc, n_iter = 0, 0, 0 | |||
for X_batch, y_batch in test_ds: | |||
_logits = net(X_batch) # is_train=False, disable dropout | |||
val_loss += tl.cost.cross_entropy(_logits, y_batch, name='eval_loss') | |||
val_acc += np.mean(np.equal(np.argmax(_logits, 1), y_batch)) | |||
n_iter += 1 | |||
print(" val loss: {}".format(val_loss / n_iter)) | |||
print(" val acc: {}".format(val_acc / n_iter)) | |||
# use testing data to evaluate the model | |||
net.eval() | |||
test_loss, test_acc, n_iter = 0, 0, 0 | |||
for X_batch, y_batch in test_ds: | |||
_logits = net(X_batch) | |||
test_loss += tl.cost.cross_entropy(_logits, y_batch, name='test_loss') | |||
test_acc += np.mean(np.equal(np.argmax(_logits, 1), y_batch)) | |||
n_iter += 1 | |||
print(" test loss: {}".format(test_loss / n_iter)) | |||
print(" test acc: {}".format(test_acc / n_iter)) |
@@ -0,0 +1,100 @@ | |||
#!/usr/bin/env python3 | |||
# -*- coding: utf-8 -*- | |||
import os | |||
os.environ['TL_BACKEND'] = 'dragon' | |||
from tensorlayer.layers import Module | |||
from tensorlayer.layers import Dense | |||
import tensorlayer as tl | |||
import dragon as dg | |||
import time | |||
import argparse | |||
import numpy as np | |||
X_train, y_train, X_val, y_val, X_test, y_test = tl.files.load_mnist_dataset(shape=(-1, 784)) | |||
class CustomModel(Module): | |||
def __init__(self): | |||
super(CustomModel, self).__init__() | |||
self.dense1 = Dense(n_units=800, act=tl.ReLU, in_channels=784) | |||
self.dense2 = Dense(n_units=800, act=tl.ReLU, in_channels=800) | |||
self.dense3 = Dense(n_units=10, act=tl.ReLU, in_channels=800) | |||
def forward(self, x, foo=None): | |||
z = self.dense1(x) | |||
z = self.dense2(z) | |||
out = self.dense3(z) | |||
return out | |||
def parse_args(): | |||
"""Parse the arguments.""" | |||
parser = argparse.ArgumentParser(description='Train a cifar10 resnet') | |||
parser.add_argument('--execution', default='EAGER_MODE', type=str, help='The execution mode') | |||
parser.add_argument('--seed', default=1337, type=int, help='The random seed') | |||
parser.add_argument('--cuda', default=-1, type=int, help='The cuda device to use') | |||
return parser.parse_args() | |||
class Classifier(object): | |||
"""The base classifier class.""" | |||
# TensorSpec for graph execution | |||
image_spec = dg.Tensor([None, 3, 32, 32], 'float32') | |||
label_spec = dg.Tensor([None], 'int64') | |||
def __init__(self, optimizer): | |||
super(Classifier, self).__init__() | |||
self.net = CustomModel() | |||
self.optimizer = optimizer | |||
self.params = self.net.trainable_weights | |||
def step(self, image, label): | |||
with dg.GradientTape() as tape: | |||
logit = self.net(image) | |||
# logit = dg.cast(logit, 'float64') | |||
logit = dg.cast(dg.math.argmax(logit, -1), 'int64') | |||
label = dg.cast(label, 'int64') | |||
# print("logit :\n", logit, label) | |||
# loss = dg.losses.smooth_l1_loss([logit, label]) | |||
loss = dg.math.sum(logit - label) # dg.losses.sparse_softmax_cross_entropy([logit, label]) | |||
accuracy = dg.math.mean(dg.math.equal([logit, label]).astype('float32')) | |||
grads = tape.gradient(loss, self.params) | |||
self.optimizer.apply_gradients(zip(self.params, grads)) | |||
return loss, accuracy, self.optimizer | |||
if __name__ == '__main__': | |||
args = parse_args() | |||
dg.logging.info('Called with args:\n' + str(args)) | |||
np.random.seed(args.seed) | |||
dg.autograph.set_execution(args.execution) | |||
dg.cuda.set_default_device(args.cuda) | |||
# Define the model | |||
model = Classifier(dg.optimizers.SGD(base_lr=0.01, momentum=0.9, weight_decay=1e-4)) | |||
# Compile for graph execution if necessary | |||
if args.execution == 'GRAPH_MODE': | |||
model.step = dg.function( | |||
func=model.step, | |||
input_signature=[model.image_spec, model.label_spec], | |||
) | |||
# Main loop | |||
import tensorflow as tf | |||
batch_size = 200 | |||
for i in range(50): | |||
for X_batch, y_batch in tl.iterate.minibatches(X_train, y_train, batch_size, shuffle=True): | |||
image = dg.EagerTensor(X_batch, copy=False) | |||
label = dg.EagerTensor(y_batch, copy=False, dtype='float32') | |||
loss, accuracy, _ = model.step(image, label) | |||
if i % 20 == 0: | |||
dg.logging.info( | |||
'Iteration %d, lr = %s, loss = %.5f, accuracy = %.3f' % | |||
(i, str(model.optimizer.base_lr), loss, accuracy) | |||
) |
@@ -0,0 +1,92 @@ | |||
#!/usr/bin/env python3 | |||
# -*- coding: utf-8 -*- | |||
import mindspore.nn as nn | |||
import mindspore.ops.operations as P | |||
from mindspore.ops import composite as C | |||
from mindspore.common import dtype as mstype | |||
from mindspore import context, Tensor, ParameterTuple | |||
from mindspore.common.initializer import TruncatedNormal | |||
from mindspore.nn import SoftmaxCrossEntropyWithLogits, Momentum, WithLossCell | |||
import numpy as np | |||
import tensorlayer as tl | |||
import mindspore as ms | |||
import tensorflow as tf | |||
import time | |||
from tensorlayer.layers import Module | |||
from tensorlayer.layers import Dense | |||
import mindspore.nn as nn | |||
class MLP(Module): | |||
def __init__(self): | |||
super(MLP, self).__init__() | |||
self.dense1 = Dense(n_units=800, act=tl.ReLU, in_channels=784) | |||
self.dense2 = Dense(n_units=800, act=tl.ReLU, in_channels=800) | |||
self.dense3 = Dense(n_units=10, act=tl.ReLU, in_channels=800) | |||
def forward(self, x): | |||
z = self.dense1(x) | |||
z = self.dense2(z) | |||
out = self.dense3(z) | |||
return out | |||
class GradWrap(Module): | |||
""" GradWrap definition """ | |||
def __init__(self, network): | |||
super(GradWrap, self).__init__(auto_prefix=False) | |||
self.network = network | |||
self.weights = ParameterTuple(filter(lambda x: x.requires_grad, network.get_parameters())) | |||
def forward(self, x, label): | |||
return C.GradOperation(get_by_list=True)(self.network, self.weights)(x, label) | |||
def generator_train(): | |||
inputs = X_train | |||
targets = y_train | |||
if len(inputs) != len(targets): | |||
raise AssertionError("The length of inputs and targets should be equal") | |||
for _input, _target in zip(inputs, targets): | |||
yield _input, _target | |||
net = MLP() | |||
train_weights = list(filter(lambda x: x.requires_grad, net.get_parameters())) | |||
optimizer = Momentum(filter(lambda x: x.requires_grad, net.get_parameters()), 0.15, 0.8) | |||
criterion = nn.SoftmaxCrossEntropyWithLogits(sparse=True, reduction='mean') | |||
net_with_criterion = WithLossCell(net, criterion) | |||
train_network = GradWrap(net_with_criterion) | |||
train_network.set_train() | |||
X_train, y_train, X_val, y_val, X_test, y_test = tl.files.load_mnist_dataset(shape=(-1, 784)) | |||
train_ds = tf.data.Dataset.from_generator(generator_train, output_types=(tf.float32, tf.int32)) | |||
shuffle_buffer_size = 128 | |||
batch_size = 128 | |||
train_ds = train_ds.shuffle(shuffle_buffer_size) | |||
train_ds = train_ds.batch(batch_size) | |||
n_epoch = 50 | |||
for epoch in range(n_epoch): | |||
start_time = time.time() | |||
train_network.set_train() | |||
train_loss, train_acc, n_iter = 0, 0, 0 | |||
for X_batch, y_batch in train_ds: | |||
X_batch = ms.Tensor(X_batch.numpy(), dtype=ms.float32) | |||
y_batch = ms.Tensor(y_batch.numpy(), dtype=ms.int32) | |||
output = net(X_batch) | |||
loss_output = criterion(output, y_batch) | |||
grads = train_network(X_batch, y_batch) | |||
success = optimizer(grads) | |||
loss = loss_output.asnumpy() | |||
train_loss += loss | |||
n_iter += 1 | |||
train_acc += np.mean((P.Equal()(P.Argmax(axis=1)(output), y_batch).asnumpy())) | |||
print("Epoch {} of {} took {}".format(epoch + 1, n_epoch, time.time() - start_time)) | |||
print(" train loss: {}".format(train_loss / n_iter)) | |||
print(" train acc: {}".format(train_acc / n_iter)) | |||
print(" loss ", loss) |
@@ -0,0 +1,91 @@ | |||
#!/usr/bin/env python3 | |||
# -*- coding: utf-8 -*- | |||
import numpy as np | |||
import time | |||
import tensorflow as tf | |||
import tensorlayer as tl | |||
from tensorlayer.layers import Module | |||
from tensorlayer.layers import Dense, Dropout, BatchNorm1d | |||
X_train, y_train, X_val, y_val, X_test, y_test = tl.files.load_mnist_dataset(shape=(-1, 784)) | |||
class CustomModel(Module): | |||
def __init__(self): | |||
super(CustomModel, self).__init__() | |||
self.dropout1 = Dropout(keep=0.8) | |||
self.dense1 = Dense(n_units=800, in_channels=784) | |||
self.batchnorm = BatchNorm1d(act=tl.ReLU, num_features=800) | |||
self.dropout2 = Dropout(keep=0.8) | |||
self.dense2 = Dense(n_units=800, act=tl.ReLU, in_channels=800) | |||
self.dropout3 = Dropout(keep=0.8) | |||
self.dense3 = Dense(n_units=10, act=tl.ReLU, in_channels=800) | |||
def forward(self, x, foo=None): | |||
z = self.dropout1(x) | |||
z = self.dense1(z) | |||
z = self.batchnorm(z) | |||
z = self.dropout2(z) | |||
z = self.dense2(z) | |||
z = self.dropout3(z) | |||
out = self.dense3(z) | |||
if foo is not None: | |||
out = tl.ops.relu(out) | |||
return out | |||
MLP = CustomModel() | |||
n_epoch = 50 | |||
batch_size = 500 | |||
print_freq = 5 | |||
train_weights = MLP.trainable_weights | |||
optimizer = tl.optimizers.Adam(lr=0.0001) | |||
for epoch in range(n_epoch): ## iterate the dataset n_epoch times | |||
start_time = time.time() | |||
## iterate over the entire training set once (shuffle the data via training) | |||
for X_batch, y_batch in tl.iterate.minibatches(X_train, y_train, batch_size, shuffle=True): | |||
MLP.set_train() # enable dropout | |||
with tf.GradientTape() as tape: | |||
## compute outputs | |||
_logits = MLP(X_batch) | |||
## compute loss and update model | |||
_loss = tl.cost.cross_entropy(_logits, y_batch, name='train_loss') | |||
grad = tape.gradient(_loss, train_weights) | |||
optimizer.apply_gradients(zip(grad, train_weights)) | |||
## use training and evaluation sets to evaluate the model every print_freq epoch | |||
if epoch + 1 == 1 or (epoch + 1) % print_freq == 0: | |||
MLP.set_train() | |||
print("Epoch {} of {} took {}".format(epoch + 1, n_epoch, time.time() - start_time)) | |||
train_loss, train_acc, n_iter = 0, 0, 0 | |||
for X_batch, y_batch in tl.iterate.minibatches(X_train, y_train, batch_size, shuffle=False): | |||
_logits = MLP(X_batch) | |||
train_loss += tl.cost.cross_entropy(_logits, y_batch, name='eval_loss') | |||
train_acc += np.mean(np.equal(np.argmax(_logits, 1), y_batch)) | |||
n_iter += 1 | |||
print(" train loss: {}".format(train_loss / n_iter)) | |||
print(" train acc: {}".format(train_acc / n_iter)) | |||
val_loss, val_acc, n_iter = 0, 0, 0 | |||
for X_batch, y_batch in tl.iterate.minibatches(X_val, y_val, batch_size, shuffle=False): | |||
_logits = MLP(X_batch) # is_train=False, disable dropout | |||
val_loss += tl.cost.cross_entropy(_logits, y_batch, name='eval_loss') | |||
val_acc += np.mean(np.equal(np.argmax(_logits, 1), y_batch)) | |||
n_iter += 1 | |||
print(" val loss: {}".format(val_loss / n_iter)) | |||
print(" val acc: {}".format(val_acc / n_iter)) | |||
## use testing data to evaluate the model | |||
MLP.eval() | |||
test_loss, test_acc, n_iter = 0, 0, 0 | |||
for X_batch, y_batch in tl.iterate.minibatches(X_test, y_test, batch_size, shuffle=False): | |||
_logits = MLP(X_batch, foo=1) | |||
test_loss += tl.cost.cross_entropy(_logits, y_batch, name='test_loss') | |||
test_acc += np.mean(np.equal(np.argmax(_logits, 1), y_batch)) | |||
n_iter += 1 | |||
print(" test foo=1 loss: {}".format(val_loss / n_iter)) | |||
print(" test foo=1 acc: {}".format(val_acc / n_iter)) |
@@ -0,0 +1,117 @@ | |||
#!/usr/bin/env python3 | |||
# -*- coding: utf-8 -*- | |||
import numpy as np | |||
import mindspore.nn as nn | |||
import mindspore.ops.operations as P | |||
from mindspore.ops import composite as C | |||
from mindspore.common import dtype as mstype | |||
from mindspore import context, Tensor, ParameterTuple | |||
from mindspore.common.initializer import TruncatedNormal | |||
from mindspore.nn import Dense, WithLossCell, SoftmaxCrossEntropyWithLogits, Momentum | |||
import tensorlayer as tl | |||
import mindspore as ms | |||
import tensorflow as tf | |||
import time | |||
context.set_context(mode=context.PYNATIVE_MODE, device_target="GPU") | |||
def fc_with_initialize(input_channels, out_channels): | |||
"""weight initial for fc layer""" | |||
weight = weight_variable() | |||
bias = weight_variable() | |||
return nn.Dense(input_channels, out_channels, weight, bias) | |||
def weight_variable(): | |||
"""weight initial""" | |||
return TruncatedNormal(0.02) | |||
class MLP(nn.Cell): | |||
""" | |||
Lenet network | |||
Args: | |||
num_class (int): Num classes. Default: 10. | |||
Returns: | |||
Tensor, output tensor | |||
Examples: | |||
>>> MLP(num_class=10) | |||
""" | |||
def __init__(self, num_class=10): | |||
super(MLP, self).__init__() | |||
self.num_class = num_class | |||
self.fc1 = fc_with_initialize(784, 800) | |||
self.fc2 = fc_with_initialize(800, 800) | |||
self.fc3 = fc_with_initialize(800, self.num_class) | |||
self.relu = nn.ReLU() | |||
def construct(self, x): | |||
x = self.fc1(x) | |||
x = self.relu(x) | |||
x = self.fc2(x) | |||
x = self.relu(x) | |||
x = self.fc3(x) | |||
return x | |||
class GradWrap(nn.Cell): | |||
""" GradWrap definition """ | |||
def __init__(self, network): | |||
super(GradWrap, self).__init__(auto_prefix=False) | |||
self.network = network | |||
self.weights = ParameterTuple(filter(lambda x: x.requires_grad, network.get_parameters())) | |||
def construct(self, x, label): | |||
weights = self.weights | |||
return C.GradOperation('get_by_list', get_by_list=True)(self.network, weights)(x, label) | |||
def generator_train(): | |||
inputs = X_train | |||
targets = y_train | |||
if len(inputs) != len(targets): | |||
raise AssertionError("The length of inputs and targets should be equal") | |||
for _input, _target in zip(inputs, targets): | |||
yield _input, _target | |||
net = MLP() | |||
optimizer = Momentum(filter(lambda x: x.requires_grad, net.get_parameters()), 0.1, 0.9) | |||
criterion = nn.SoftmaxCrossEntropyWithLogits(is_grad=False, sparse=True) | |||
net_with_criterion = WithLossCell(net, criterion) | |||
train_network = GradWrap(net_with_criterion) | |||
train_network.set_train() | |||
X_train, y_train, X_val, y_val, X_test, y_test = tl.files.load_mnist_dataset(shape=(-1, 784)) | |||
train_ds = tf.data.Dataset.from_generator(generator_train, output_types=(tf.float32, tf.int32)) | |||
shuffle_buffer_size = 128 | |||
batch_size = 128 | |||
train_ds = train_ds.shuffle(shuffle_buffer_size) | |||
train_ds = train_ds.batch(batch_size) | |||
n_epoch = 50 | |||
for epoch in range(n_epoch): | |||
start_time = time.time() | |||
train_network.set_train() | |||
train_loss, train_acc, n_iter = 0, 0, 0 | |||
for X_batch, y_batch in train_ds: | |||
X_batch = ms.Tensor(X_batch.numpy(), dtype=ms.float32) | |||
y_batch = ms.Tensor(y_batch.numpy(), dtype=ms.int32) | |||
output = net(X_batch) | |||
loss_output = criterion(output, y_batch) | |||
grads = train_network(X_batch, y_batch) | |||
success = optimizer(grads) | |||
loss = loss_output.asnumpy() | |||
train_loss += loss | |||
n_iter += 1 | |||
# train_acc += np.mean((P.Equal()(P.Argmax(axis=1)(output), y_batch).asnumpy())) | |||
print("Epoch {} of {} took {}".format(epoch + 1, n_epoch, time.time() - start_time)) | |||
print(" train loss: {}".format(train_loss / n_iter)) | |||
# print(" train acc: {}".format(train_acc / n_iter)) | |||
print(" triain weights ", train_network.trainable_params()[0].data) |
@@ -0,0 +1,68 @@ | |||
#!/usr/bin/env python3 | |||
# -*- coding: utf-8 -*- | |||
import numpy as np | |||
import os | |||
os.environ['TL_BACKEND'] = 'tensorflow' | |||
# os.environ['TL_BACKEND'] = 'mindspore' | |||
import tensorlayer as tl | |||
from tensorlayer.layers import Module | |||
from tensorlayer.layers import Dense, Dropout | |||
X_train, y_train, X_val, y_val, X_test, y_test = tl.files.load_mnist_dataset(shape=(-1, 784)) | |||
class CustomModel(Module): | |||
def __init__(self): | |||
super(CustomModel, self).__init__() | |||
self.dropout1 = Dropout(keep=0.8) | |||
self.dense1 = Dense(n_units=800, act=tl.ReLU, in_channels=784) | |||
self.dropout2 = Dropout(keep=0.8) | |||
self.dense2 = Dense(n_units=800, act=tl.ReLU, in_channels=800) | |||
self.dropout3 = Dropout(keep=0.8) | |||
self.dense3 = Dense(n_units=10, act=tl.ReLU, in_channels=800) | |||
def forward(self, x, foo=None): | |||
z = self.dropout1(x) | |||
z = self.dense1(z) | |||
# z = self.bn(z) | |||
z = self.dropout2(z) | |||
z = self.dense2(z) | |||
z = self.dropout3(z) | |||
out = self.dense3(z) | |||
if foo is not None: | |||
out = tl.ops.relu(out) | |||
return out | |||
def generator_train(): | |||
inputs = X_train | |||
targets = y_train | |||
if len(inputs) != len(targets): | |||
raise AssertionError("The length of inputs and targets should be equal") | |||
for _input, _target in zip(inputs, targets): | |||
yield (_input, np.array(_target)) | |||
MLP = CustomModel() | |||
n_epoch = 50 | |||
batch_size = 128 | |||
print_freq = 2 | |||
shuffle_buffer_size = 128 | |||
train_weights = MLP.trainable_weights | |||
optimizer = tl.optimizers.Momentum(0.05, 0.9) | |||
train_ds = tl.dataflow.FromGenerator( | |||
generator_train, output_types=(tl.float32, tl.int32) , column_names=['data', 'label'] | |||
) | |||
train_ds = tl.dataflow.Shuffle(train_ds,shuffle_buffer_size) | |||
train_ds = tl.dataflow.Batch(train_ds,batch_size) | |||
model = tl.models.Model(network=MLP, loss_fn=tl.cost.cross_entropy, optimizer=optimizer) | |||
model.train(n_epoch=n_epoch, train_dataset=train_ds, print_freq=print_freq, print_train_batch=False) | |||
model.save_weights('./model.npz', format='npz_dict') | |||
model.load_weights('./model.npz', format='npz_dict') |
@@ -0,0 +1,211 @@ | |||
#!/usr/bin/env python3 | |||
# -*- coding: utf-8 -*- | |||
import time | |||
import numpy as np | |||
import multiprocessing | |||
import tensorflow as tf | |||
from tensorlayer.layers import Module, SequentialLayer | |||
import tensorlayer as tl | |||
from tensorlayer.layers import (Conv2d, Dense, Flatten, MaxPool2d, BatchNorm2d, Elementwise) | |||
X_train, y_train, X_test, y_test = tl.files.load_cifar10_dataset(shape=(-1, 32, 32, 3), plotable=False) | |||
class Block(Module): | |||
def __init__(self, in_channels): | |||
super(Block, self).__init__() | |||
self.dense1 = Dense(in_channels=in_channels, n_units=256) | |||
self.dense2 = Dense(in_channels=256, n_units=384) | |||
self.dense3 = Dense(in_channels=in_channels, n_units=384) | |||
self.concat = Elementwise(combine_fn=tl.ops.add) | |||
def forward(self, inputs): | |||
z = self.dense1(inputs) | |||
z1 = self.dense2(z) | |||
z2 = self.dense3(inputs) | |||
out = self.concat([z1, z2]) | |||
return out | |||
class CNN(Module): | |||
def __init__(self): | |||
super(CNN, self).__init__() | |||
# weights init | |||
W_init = tl.initializers.truncated_normal(stddev=5e-2) | |||
W_init2 = tl.initializers.truncated_normal(stddev=0.04) | |||
b_init2 = tl.initializers.constant(value=0.1) | |||
self.conv1 = Conv2d(64, (5, 5), (1, 1), padding='SAME', W_init=W_init, b_init=None, name='conv1', in_channels=3) | |||
self.bn = BatchNorm2d(num_features=64, act=tl.ReLU) | |||
self.maxpool1 = MaxPool2d((3, 3), (2, 2), padding='SAME', name='pool1') | |||
self.conv2 = Conv2d( | |||
64, (5, 5), (1, 1), padding='SAME', act=tl.ReLU, W_init=W_init, b_init=None, name='conv2', in_channels=64 | |||
) | |||
self.maxpool2 = MaxPool2d((3, 3), (2, 2), padding='SAME', name='pool2') | |||
self.flatten = Flatten(name='flatten') | |||
self.dense1 = Dense(384, act=tl.ReLU, W_init=W_init2, b_init=b_init2, name='dense1relu', in_channels=2304) | |||
self.dense_add = self.make_layer(in_channel=384) | |||
self.dense2 = Dense(192, act=tl.ReLU, W_init=W_init2, b_init=b_init2, name='dense2relu', in_channels=384) | |||
self.dense3 = Dense(10, act=None, W_init=W_init2, name='output', in_channels=192) | |||
def forward(self, x): | |||
z = self.conv1(x) | |||
z = self.bn(z) | |||
z = self.maxpool1(z) | |||
z = self.conv2(z) | |||
z = self.maxpool2(z) | |||
z = self.flatten(z) | |||
z = self.dense1(z) | |||
z = self.dense_add(z) | |||
z = self.dense2(z) | |||
z = self.dense3(z) | |||
return z | |||
def make_layer(self, in_channel): | |||
layers = [] | |||
_block = Block(in_channel) | |||
layers.append(_block) | |||
for _ in range(1, 3): | |||
range_block = Block(in_channel) | |||
layers.append(range_block) | |||
return SequentialLayer(layers) | |||
# get the network | |||
net = CNN() | |||
# training settings | |||
batch_size = 128 | |||
n_epoch = 500 | |||
learning_rate = 0.0001 | |||
print_freq = 5 | |||
n_step_epoch = int(len(y_train) / batch_size) | |||
n_step = n_epoch * n_step_epoch | |||
shuffle_buffer_size = 128 | |||
train_weights = net.trainable_weights | |||
optimizer = tl.optimizers.Adam(learning_rate) | |||
def generator_train(): | |||
inputs = X_train | |||
targets = y_train | |||
if len(inputs) != len(targets): | |||
raise AssertionError("The length of inputs and targets should be equal") | |||
for _input, _target in zip(inputs, targets): | |||
# yield _input.encode('utf-8'), _target.encode('utf-8') | |||
yield _input, _target | |||
def generator_test(): | |||
inputs = X_test | |||
targets = y_test | |||
if len(inputs) != len(targets): | |||
raise AssertionError("The length of inputs and targets should be equal") | |||
for _input, _target in zip(inputs, targets): | |||
# yield _input.encode('utf-8'), _target.encode('utf-8') | |||
yield _input, _target | |||
def _map_fn_train(img, target): | |||
# 1. Randomly crop a [height, width] section of the image. | |||
img = tf.image.random_crop(img, [24, 24, 3]) | |||
# 2. Randomly flip the image horizontally. | |||
img = tf.image.random_flip_left_right(img) | |||
# 3. Randomly change brightness. | |||
img = tf.image.random_brightness(img, max_delta=63) | |||
# 4. Randomly change contrast. | |||
img = tf.image.random_contrast(img, lower=0.2, upper=1.8) | |||
# 5. Subtract off the mean and divide by the variance of the pixels. | |||
img = tf.image.per_image_standardization(img) | |||
target = tf.reshape(target, ()) | |||
return img, target | |||
def _map_fn_test(img, target): | |||
# 1. Crop the central [height, width] of the image. | |||
img = tf.image.resize_with_pad(img, 24, 24) | |||
# 2. Subtract off the mean and divide by the variance of the pixels. | |||
img = tf.image.per_image_standardization(img) | |||
img = tf.reshape(img, (24, 24, 3)) | |||
target = tf.reshape(target, ()) | |||
return img, target | |||
# dataset API and augmentation | |||
train_ds = tf.data.Dataset.from_generator( | |||
generator_train, output_types=(tf.float32, tf.int32) | |||
) # , output_shapes=((24, 24, 3), (1))) | |||
train_ds = train_ds.map(_map_fn_train, num_parallel_calls=multiprocessing.cpu_count()) | |||
# train_ds = train_ds.repeat(n_epoch) | |||
train_ds = train_ds.shuffle(shuffle_buffer_size) | |||
train_ds = train_ds.prefetch(buffer_size=4096) | |||
train_ds = train_ds.batch(batch_size) | |||
# value = train_ds.make_one_shot_iterator().get_next() | |||
test_ds = tf.data.Dataset.from_generator( | |||
generator_test, output_types=(tf.float32, tf.int32) | |||
) # , output_shapes=((24, 24, 3), (1))) | |||
# test_ds = test_ds.shuffle(shuffle_buffer_size) | |||
test_ds = test_ds.map(_map_fn_test, num_parallel_calls=multiprocessing.cpu_count()) | |||
# test_ds = test_ds.repeat(n_epoch) | |||
test_ds = test_ds.prefetch(buffer_size=4096) | |||
test_ds = test_ds.batch(batch_size) | |||
# value_test = test_ds.make_one_shot_iterator().get_next() | |||
for epoch in range(n_epoch): | |||
start_time = time.time() | |||
train_loss, train_acc, n_iter = 0, 0, 0 | |||
for X_batch, y_batch in train_ds: | |||
net.set_train() | |||
with tf.GradientTape() as tape: | |||
# compute outputs | |||
_logits = net(X_batch) | |||
# compute loss and update model | |||
_loss_ce = tl.cost.cross_entropy(_logits, y_batch, name='train_loss') | |||
grad = tape.gradient(_loss_ce, train_weights) | |||
optimizer.apply_gradients(zip(grad, train_weights)) | |||
train_loss += _loss_ce | |||
train_acc += np.mean(np.equal(np.argmax(_logits, 1), y_batch)) | |||
n_iter += 1 | |||
print("Epoch {} of {} took {}".format(epoch + 1, n_epoch, time.time() - start_time)) | |||
print(" train loss: {}".format(train_loss / n_iter)) | |||
print(" train acc: {}".format(train_acc / n_iter)) | |||
# use training and evaluation sets to evaluate the model every print_freq epoch | |||
if epoch + 1 == 1 or (epoch + 1) % print_freq == 0: | |||
net.eval() | |||
val_loss, val_acc, n_iter = 0, 0, 0 | |||
for X_batch, y_batch in test_ds: | |||
_logits = net(X_batch) # is_train=False, disable dropout | |||
val_loss += tl.cost.cross_entropy(_logits, y_batch, name='eval_loss') | |||
val_acc += np.mean(np.equal(np.argmax(_logits, 1), y_batch)) | |||
n_iter += 1 | |||
print(" val loss: {}".format(val_loss / n_iter)) | |||
print(" val acc: {}".format(val_acc / n_iter)) | |||
# use testing data to evaluate the model | |||
net.eval() | |||
test_loss, test_acc, n_iter = 0, 0, 0 | |||
for X_batch, y_batch in test_ds: | |||
_logits = net(X_batch) | |||
test_loss += tl.cost.cross_entropy(_logits, y_batch, name='test_loss') | |||
test_acc += np.mean(np.equal(np.argmax(_logits, 1), y_batch)) | |||
n_iter += 1 | |||
print(" test loss: {}".format(test_loss / n_iter)) | |||
print(" test acc: {}".format(test_acc / n_iter)) |
@@ -0,0 +1,57 @@ | |||
#! /usr/bin/python | |||
# -*- coding: utf-8 -*- | |||
import os | |||
os.environ['TL_BACKEND'] = 'paddle' | |||
import paddle.nn.functional as F | |||
from paddle.vision.transforms import Compose, Normalize | |||
import paddle | |||
import tensorlayer as tl | |||
from tensorlayer.layers import Module | |||
from tensorlayer.layers import Dense, Flatten | |||
transform = Compose([Normalize(mean=[127.5], | |||
std=[127.5], | |||
data_format='CHW')]) | |||
print('download training data and load training data') | |||
train_dataset = paddle.vision.datasets.MNIST(mode='train', transform=transform) | |||
test_dataset = paddle.vision.datasets.MNIST(mode='test', transform=transform) | |||
print('load finished') | |||
class MLP(Module): | |||
def __init__(self): | |||
super(MLP, self).__init__() | |||
self.linear1 = Dense(n_units=120, in_channels=784, act=tl.ReLU) | |||
self.linear2 = Dense(n_units=84, in_channels=120, act=tl.ReLU) | |||
self.linear3 = Dense(n_units=10, in_channels=84) | |||
self.flatten = Flatten() | |||
def forward(self, x): | |||
x = self.flatten(x) | |||
x = self.linear1(x) | |||
x = self.linear2(x) | |||
x = self.linear3(x) | |||
return x | |||
train_loader = paddle.io.DataLoader(train_dataset, batch_size=64, shuffle=True) | |||
def train(model): | |||
model.train() | |||
epochs = 2 | |||
optim = paddle.optimizer.Adam(learning_rate=0.001, parameters=model.trainable_weights) | |||
for epoch in range(epochs): | |||
for batch_id, data in enumerate(train_loader()): | |||
x_data = data[0] | |||
y_data = data[1] | |||
predicts = model(x_data) | |||
loss = tl.cost.mean_squared_error(predicts, y_data) | |||
acc = paddle.metric.accuracy(predicts, y_data) | |||
loss.backward() | |||
if batch_id % 5 == 0: | |||
print("epoch: {}, batch_id: {}, loss is: {}, acc is: {}".format(epoch, batch_id, loss.numpy(), acc.numpy())) | |||
optim.step() | |||
optim.clear_grad() | |||
model = MLP() | |||
train(model) | |||
@@ -0,0 +1,287 @@ | |||
#! /usr/bin/python | |||
# -*- coding: utf-8 -*- | |||
import tensorflow as tf | |||
import colorsys, random, cv2 | |||
import numpy as np | |||
from tensorlayer.visualize import save_image | |||
def decode_tf(conv_output, output_size, NUM_CLASS, STRIDES, ANCHORS, i=0, XYSCALE=[1, 1, 1]): | |||
batch_size = tf.shape(conv_output)[0] | |||
conv_output = tf.reshape(conv_output, (batch_size, output_size, output_size, 3, 5 + NUM_CLASS)) | |||
conv_raw_dxdy, conv_raw_dwdh, conv_raw_conf, conv_raw_prob = tf.split(conv_output, (2, 2, 1, NUM_CLASS), axis=-1) | |||
xy_grid = tf.meshgrid(tf.range(output_size), tf.range(output_size)) | |||
xy_grid = tf.expand_dims(tf.stack(xy_grid, axis=-1), axis=2) # [gx, gy, 1, 2] | |||
xy_grid = tf.tile(tf.expand_dims(xy_grid, axis=0), [batch_size, 1, 1, 3, 1]) | |||
xy_grid = tf.cast(xy_grid, tf.float32) | |||
pred_xy = ((tf.sigmoid(conv_raw_dxdy) * XYSCALE[i]) - 0.5 * (XYSCALE[i] - 1) + xy_grid) * \ | |||
STRIDES[i] | |||
pred_wh = (tf.exp(conv_raw_dwdh) * ANCHORS[i]) | |||
pred_xywh = tf.concat([pred_xy, pred_wh], axis=-1) | |||
pred_conf = tf.sigmoid(conv_raw_conf) | |||
pred_prob = tf.sigmoid(conv_raw_prob) | |||
pred_prob = pred_conf * pred_prob | |||
pred_prob = tf.reshape(pred_prob, (batch_size, -1, NUM_CLASS)) | |||
pred_xywh = tf.reshape(pred_xywh, (batch_size, -1, 4)) | |||
return pred_xywh, pred_prob | |||
def decode(conv_output, output_size, NUM_CLASS, STRIDES, ANCHORS, i, XYSCALE=[1, 1, 1]): | |||
return decode_tf(conv_output, output_size, NUM_CLASS, STRIDES, ANCHORS, i=i, XYSCALE=XYSCALE) | |||
def filter_boxes(box_xywh, scores, score_threshold=0.4, input_shape=tf.constant([416, 416])): | |||
scores_max = tf.math.reduce_max(scores, axis=-1) | |||
mask = scores_max >= score_threshold | |||
class_boxes = tf.boolean_mask(box_xywh, mask) | |||
pred_conf = tf.boolean_mask(scores, mask) | |||
class_boxes = tf.reshape(class_boxes, [tf.shape(scores)[0], -1, tf.shape(class_boxes)[-1]]) | |||
pred_conf = tf.reshape(pred_conf, [tf.shape(scores)[0], -1, tf.shape(pred_conf)[-1]]) | |||
box_xy, box_wh = tf.split(class_boxes, (2, 2), axis=-1) | |||
input_shape = tf.cast(input_shape, dtype=tf.float32) | |||
box_yx = box_xy[..., ::-1] | |||
box_hw = box_wh[..., ::-1] | |||
box_mins = (box_yx - (box_hw / 2.)) / input_shape | |||
box_maxes = (box_yx + (box_hw / 2.)) / input_shape | |||
boxes = tf.concat( | |||
[ | |||
box_mins[..., 0:1], # y_min | |||
box_mins[..., 1:2], # x_min | |||
box_maxes[..., 0:1], # y_max | |||
box_maxes[..., 1:2] # x_max | |||
], | |||
axis=-1 | |||
) | |||
# return tf.concat([boxes, pred_conf], axis=-1) | |||
return (boxes, pred_conf) | |||
def read_class_names(class_file_name): | |||
names = {} | |||
with open(class_file_name, 'r') as data: | |||
for ID, name in enumerate(data): | |||
names[ID] = name.strip('\n') | |||
return names | |||
def draw_bbox(image, bboxes, show_label=True): | |||
classes = read_class_names('model/coco.names') | |||
num_classes = len(classes) | |||
image_h, image_w, _ = image.shape | |||
hsv_tuples = [(1.0 * x / num_classes, 1., 1.) for x in range(num_classes)] | |||
colors = list(map(lambda x: colorsys.hsv_to_rgb(*x), hsv_tuples)) | |||
colors = list(map(lambda x: (int(x[0] * 255), int(x[1] * 255), int(x[2] * 255)), colors)) | |||
random.seed(0) | |||
random.shuffle(colors) | |||
random.seed(None) | |||
out_boxes, out_scores, out_classes, num_boxes = bboxes | |||
for i in range(num_boxes[0]): | |||
if int(out_classes[0][i]) < 0 or int(out_classes[0][i]) > num_classes: continue | |||
coor = out_boxes[0][i] | |||
coor[0] = int(coor[0] * image_h) | |||
coor[2] = int(coor[2] * image_h) | |||
coor[1] = int(coor[1] * image_w) | |||
coor[3] = int(coor[3] * image_w) | |||
fontScale = 0.5 | |||
score = out_scores[0][i] | |||
class_ind = int(out_classes[0][i]) | |||
bbox_color = colors[class_ind] | |||
bbox_thick = int(0.6 * (image_h + image_w) / 600) | |||
c1, c2 = (coor[1], coor[0]), (coor[3], coor[2]) | |||
cv2.rectangle(image, c1, c2, bbox_color, bbox_thick) | |||
if show_label: | |||
bbox_mess = '%s: %.2f' % (classes[class_ind], score) | |||
t_size = cv2.getTextSize(bbox_mess, 0, fontScale, thickness=bbox_thick // 2)[0] | |||
c3 = (c1[0] + t_size[0], c1[1] - t_size[1] - 3) | |||
cv2.rectangle(image, c1, (np.float32(c3[0]), np.float32(c3[1])), bbox_color, -1) #filled | |||
cv2.putText( | |||
image, bbox_mess, (c1[0], np.float32(c1[1] - 2)), cv2.FONT_HERSHEY_SIMPLEX, fontScale, (0, 0, 0), | |||
bbox_thick // 2, lineType=cv2.LINE_AA | |||
) | |||
return image | |||
def get_anchors(anchors_path, tiny=False): | |||
anchors = np.array(anchors_path) | |||
if tiny: | |||
return anchors.reshape(2, 3, 2) | |||
else: | |||
return anchors.reshape(3, 3, 2) | |||
def decode_train(conv_output, output_size, NUM_CLASS, STRIDES, ANCHORS, i=0, XYSCALE=[1, 1, 1]): | |||
conv_output = tf.reshape(conv_output, (tf.shape(conv_output)[0], output_size, output_size, 3, 5 + NUM_CLASS)) | |||
conv_raw_dxdy, conv_raw_dwdh, conv_raw_conf, conv_raw_prob = tf.split(conv_output, (2, 2, 1, NUM_CLASS), axis=-1) | |||
xy_grid = tf.meshgrid(tf.range(output_size), tf.range(output_size)) | |||
xy_grid = tf.expand_dims(tf.stack(xy_grid, axis=-1), axis=2) # [gx, gy, 1, 2] | |||
xy_grid = tf.tile(tf.expand_dims(xy_grid, axis=0), [tf.shape(conv_output)[0], 1, 1, 3, 1]) | |||
xy_grid = tf.cast(xy_grid, tf.float32) | |||
pred_xy = ((tf.sigmoid(conv_raw_dxdy) * XYSCALE[i]) - 0.5 * (XYSCALE[i] - 1) + xy_grid) * \ | |||
STRIDES[i] | |||
pred_wh = (tf.exp(conv_raw_dwdh) * ANCHORS[i]) | |||
pred_xywh = tf.concat([pred_xy, pred_wh], axis=-1) | |||
pred_conf = tf.sigmoid(conv_raw_conf) | |||
pred_prob = tf.sigmoid(conv_raw_prob) | |||
return tf.concat([pred_xywh, pred_conf, pred_prob], axis=-1) | |||
def yolo4_input_processing(original_image): | |||
image_data = cv2.resize(original_image, (416, 416)) | |||
image_data = image_data / 255. | |||
images_data = [] | |||
for i in range(1): | |||
images_data.append(image_data) | |||
images_data = np.asarray(images_data).astype(np.float32) | |||
batch_data = tf.constant(images_data) | |||
return batch_data | |||
def yolo4_output_processing(feature_maps): | |||
STRIDES = [8, 16, 32] | |||
ANCHORS = get_anchors([12, 16, 19, 36, 40, 28, 36, 75, 76, 55, 72, 146, 142, 110, 192, 243, 459, 401]) | |||
NUM_CLASS = 80 | |||
XYSCALE = [1.2, 1.1, 1.05] | |||
iou_threshold = 0.45 | |||
score_threshold = 0.25 | |||
bbox_tensors = [] | |||
prob_tensors = [] | |||
score_thres = 0.2 | |||
for i, fm in enumerate(feature_maps): | |||
if i == 0: | |||
output_tensors = decode(fm, 416 // 8, NUM_CLASS, STRIDES, ANCHORS, i, XYSCALE) | |||
elif i == 1: | |||
output_tensors = decode(fm, 416 // 16, NUM_CLASS, STRIDES, ANCHORS, i, XYSCALE) | |||
else: | |||
output_tensors = decode(fm, 416 // 32, NUM_CLASS, STRIDES, ANCHORS, i, XYSCALE) | |||
bbox_tensors.append(output_tensors[0]) | |||
prob_tensors.append(output_tensors[1]) | |||
pred_bbox = tf.concat(bbox_tensors, axis=1) | |||
pred_prob = tf.concat(prob_tensors, axis=1) | |||
boxes, pred_conf = filter_boxes( | |||
pred_bbox, pred_prob, score_threshold=score_thres, input_shape=tf.constant([416, 416]) | |||
) | |||
pred = {'concat': tf.concat([boxes, pred_conf], axis=-1)} | |||
for key, value in pred.items(): | |||
boxes = value[:, :, 0:4] | |||
pred_conf = value[:, :, 4:] | |||
boxes, scores, classes, valid_detections = tf.image.combined_non_max_suppression( | |||
boxes=tf.reshape(boxes, (tf.shape(boxes)[0], -1, 1, 4)), | |||
scores=tf.reshape(pred_conf, (tf.shape(pred_conf)[0], -1, tf.shape(pred_conf)[-1])), | |||
max_output_size_per_class=50, max_total_size=50, iou_threshold=iou_threshold, score_threshold=score_threshold | |||
) | |||
output = [boxes.numpy(), scores.numpy(), classes.numpy(), valid_detections.numpy()] | |||
return output | |||
def result_to_json(image, pred_bbox): | |||
image_h, image_w, _ = image.shape | |||
out_boxes, out_scores, out_classes, num_boxes = pred_bbox | |||
class_names = {} | |||
json_result = [] | |||
with open('model/coco.names', 'r') as data: | |||
for ID, name in enumerate(data): | |||
class_names[ID] = name.strip('\n') | |||
nums_class = len(class_names) | |||
for i in range(num_boxes[0]): | |||
if int(out_classes[0][i]) < 0 or int(out_classes[0][i]) > nums_class: continue | |||
coor = out_boxes[0][i] | |||
coor[0] = int(coor[0] * image_h) | |||
coor[2] = int(coor[2] * image_h) | |||
coor[1] = int(coor[1] * image_w) | |||
coor[3] = int(coor[3] * image_w) | |||
score = float(out_scores[0][i]) | |||
class_ind = int(out_classes[0][i]) | |||
bbox = np.array([coor[1], coor[0], coor[3], coor[2]]).tolist() # [x1,y1,x2,y2] | |||
json_result.append({'image': None, 'category_id': class_ind, 'bbox': bbox, 'score': score}) | |||
return json_result | |||
def draw_boxes_and_labels_to_image_with_json(image, json_result, class_list, save_name=None): | |||
"""Draw bboxes and class labels on image. Return the image with bboxes. | |||
Parameters | |||
----------- | |||
image : numpy.array | |||
The RGB image [height, width, channel]. | |||
json_result : list of dict | |||
The object detection result with json format. | |||
classes_list : list of str | |||
For converting ID to string on image. | |||
save_name : None or str | |||
The name of image file (i.e. image.png), if None, not to save image. | |||
Returns | |||
------- | |||
numpy.array | |||
The saved image. | |||
References | |||
----------- | |||
- OpenCV rectangle and putText. | |||
- `scikit-image <http://scikit-image.org/docs/dev/api/skimage.draw.html#skimage.draw.rectangle>`__. | |||
""" | |||
image_h, image_w, _ = image.shape | |||
num_classes = len(class_list) | |||
hsv_tuples = [(1.0 * x / num_classes, 1., 1.) for x in range(num_classes)] | |||
colors = list(map(lambda x: colorsys.hsv_to_rgb(*x), hsv_tuples)) | |||
colors = list(map(lambda x: (int(x[0] * 255), int(x[1] * 255), int(x[2] * 255)), colors)) | |||
random.seed(0) | |||
random.shuffle(colors) | |||
random.seed(None) | |||
bbox_thick = int(0.6 * (image_h + image_w) / 600) | |||
fontScale = 0.5 | |||
for bbox_info in json_result: | |||
image_name = bbox_info['image'] | |||
category_id = bbox_info['category_id'] | |||
if category_id < 0 or category_id > num_classes: continue | |||
bbox = bbox_info['bbox'] # the order of coordinates is [x1, y2, x2, y2] | |||
score = bbox_info['score'] | |||
bbox_color = colors[category_id] | |||
c1, c2 = (int(bbox[0]), int(bbox[1])), (int(bbox[2]), int(bbox[3])) | |||
cv2.rectangle(image, c1, c2, bbox_color, bbox_thick) | |||
bbox_mess = '%s: %.2f' % (class_list[category_id], score) | |||
t_size = cv2.getTextSize(bbox_mess, 0, fontScale, thickness=bbox_thick // 2)[0] | |||
c3 = (c1[0] + t_size[0], c1[1] - t_size[1] - 3) | |||
cv2.rectangle(image, c1, (np.float32(c3[0]), np.float32(c3[1])), bbox_color, -1) | |||
cv2.putText( | |||
image, bbox_mess, (c1[0], np.float32(c1[1] - 2)), cv2.FONT_HERSHEY_SIMPLEX, fontScale, (0, 0, 0), | |||
bbox_thick // 2, lineType=cv2.LINE_AA | |||
) | |||
if save_name is not None: | |||
save_image(image, save_name) | |||
return image |
@@ -0,0 +1,80 @@ | |||
person | |||
bicycle | |||
car | |||
motorbike | |||
aeroplane | |||
bus | |||
train | |||
truck | |||
boat | |||
traffic light | |||
fire hydrant | |||
stop sign | |||
parking meter | |||
bench | |||
bird | |||
cat | |||
dog | |||
horse | |||
sheep | |||
cow | |||
elephant | |||
bear | |||
zebra | |||
giraffe | |||
backpack | |||
umbrella | |||
handbag | |||
tie | |||
suitcase | |||
frisbee | |||
skis | |||
snowboard | |||
sports ball | |||
kite | |||
baseball bat | |||
baseball glove | |||
skateboard | |||
surfboard | |||
tennis racket | |||
bottle | |||
wine glass | |||
cup | |||
fork | |||
knife | |||
spoon | |||
bowl | |||
banana | |||
apple | |||
sandwich | |||
orange | |||
broccoli | |||
carrot | |||
hot dog | |||
pizza | |||
donut | |||
cake | |||
chair | |||
sofa | |||
potted plant | |||
bed | |||
dining table | |||
toilet | |||
tvmonitor | |||
laptop | |||
mouse | |||
remote | |||
keyboard | |||
cell phone | |||
microwave | |||
oven | |||
toaster | |||
sink | |||
refrigerator | |||
book | |||
clock | |||
vase | |||
scissors | |||
teddy bear | |||
hair drier | |||
toothbrush |
@@ -0,0 +1,541 @@ | |||
conv2d_1/filters:0 | |||
batchnorm2d_1/beta:0 | |||
batchnorm2d_1/gamma:0 | |||
batchnorm2d_1/moving_mean:0 | |||
batchnorm2d_1/moving_var:0 | |||
conv2d_2/filters:0 | |||
batchnorm2d_2/beta:0 | |||
batchnorm2d_2/gamma:0 | |||
batchnorm2d_2/moving_mean:0 | |||
batchnorm2d_2/moving_var:0 | |||
conv_rote_block_1/filters:0 | |||
conv2d_3/filters:0 | |||
batchnorm2d_3/beta:0 | |||
batchnorm2d_3/gamma:0 | |||
batchnorm2d_3/moving_mean:0 | |||
batchnorm2d_3/moving_var:0 | |||
batchnorm2d_4/beta:0 | |||
batchnorm2d_4/gamma:0 | |||
batchnorm2d_4/moving_mean:0 | |||
batchnorm2d_4/moving_var:0 | |||
conv2d_4/filters:0 | |||
batchnorm2d_5/beta:0 | |||
batchnorm2d_5/gamma:0 | |||
batchnorm2d_5/moving_mean:0 | |||
batchnorm2d_5/moving_var:0 | |||
conv2d_5/filters:0 | |||
batchnorm2d_6/beta:0 | |||
batchnorm2d_6/gamma:0 | |||
batchnorm2d_6/moving_mean:0 | |||
batchnorm2d_6/moving_var:0 | |||
conv2d_6/filters:0 | |||
batchnorm2d_7/beta:0 | |||
batchnorm2d_7/gamma:0 | |||
batchnorm2d_7/moving_mean:0 | |||
batchnorm2d_7/moving_var:0 | |||
conv2d_7/filters:0 | |||
batchnorm2d_8/beta:0 | |||
batchnorm2d_8/gamma:0 | |||
batchnorm2d_8/moving_mean:0 | |||
batchnorm2d_8/moving_var:0 | |||
conv2d_8/filters:0 | |||
batchnorm2d_9/beta:0 | |||
batchnorm2d_9/gamma:0 | |||
batchnorm2d_9/moving_mean:0 | |||
batchnorm2d_9/moving_var:0 | |||
conv_rote_block_2/filters:0 | |||
conv2d_9/filters:0 | |||
batchnorm2d_10/beta:0 | |||
batchnorm2d_10/gamma:0 | |||
batchnorm2d_10/moving_mean:0 | |||
batchnorm2d_10/moving_var:0 | |||
batchnorm2d_11/beta:0 | |||
batchnorm2d_11/gamma:0 | |||
batchnorm2d_11/moving_mean:0 | |||
batchnorm2d_11/moving_var:0 | |||
conv2d_10/filters:0 | |||
batchnorm2d_12/beta:0 | |||
batchnorm2d_12/gamma:0 | |||
batchnorm2d_12/moving_mean:0 | |||
batchnorm2d_12/moving_var:0 | |||
conv2d_11/filters:0 | |||
batchnorm2d_13/beta:0 | |||
batchnorm2d_13/gamma:0 | |||
batchnorm2d_13/moving_mean:0 | |||
batchnorm2d_13/moving_var:0 | |||
conv2d_12/filters:0 | |||
batchnorm2d_14/beta:0 | |||
batchnorm2d_14/gamma:0 | |||
batchnorm2d_14/moving_mean:0 | |||
batchnorm2d_14/moving_var:0 | |||
conv2d_13/filters:0 | |||
batchnorm2d_15/beta:0 | |||
batchnorm2d_15/gamma:0 | |||
batchnorm2d_15/moving_mean:0 | |||
batchnorm2d_15/moving_var:0 | |||
conv2d_14/filters:0 | |||
batchnorm2d_16/beta:0 | |||
batchnorm2d_16/gamma:0 | |||
batchnorm2d_16/moving_mean:0 | |||
batchnorm2d_16/moving_var:0 | |||
conv2d_15/filters:0 | |||
batchnorm2d_17/beta:0 | |||
batchnorm2d_17/gamma:0 | |||
batchnorm2d_17/moving_mean:0 | |||
batchnorm2d_17/moving_var:0 | |||
conv2d_16/filters:0 | |||
batchnorm2d_18/beta:0 | |||
batchnorm2d_18/gamma:0 | |||
batchnorm2d_18/moving_mean:0 | |||
batchnorm2d_18/moving_var:0 | |||
conv_rote_block_3/filters:0 | |||
conv2d_17/filters:0 | |||
batchnorm2d_19/beta:0 | |||
batchnorm2d_19/gamma:0 | |||
batchnorm2d_19/moving_mean:0 | |||
batchnorm2d_19/moving_var:0 | |||
batchnorm2d_20/beta:0 | |||
batchnorm2d_20/gamma:0 | |||
batchnorm2d_20/moving_mean:0 | |||
batchnorm2d_20/moving_var:0 | |||
conv2d_18/filters:0 | |||
batchnorm2d_21/beta:0 | |||
batchnorm2d_21/gamma:0 | |||
batchnorm2d_21/moving_mean:0 | |||
batchnorm2d_21/moving_var:0 | |||
conv2d_19/filters:0 | |||
batchnorm2d_22/beta:0 | |||
batchnorm2d_22/gamma:0 | |||
batchnorm2d_22/moving_mean:0 | |||
batchnorm2d_22/moving_var:0 | |||
conv2d_20/filters:0 | |||
batchnorm2d_23/beta:0 | |||
batchnorm2d_23/gamma:0 | |||
batchnorm2d_23/moving_mean:0 | |||
batchnorm2d_23/moving_var:0 | |||
conv2d_21/filters:0 | |||
batchnorm2d_24/beta:0 | |||
batchnorm2d_24/gamma:0 | |||
batchnorm2d_24/moving_mean:0 | |||
batchnorm2d_24/moving_var:0 | |||
conv2d_22/filters:0 | |||
batchnorm2d_25/beta:0 | |||
batchnorm2d_25/gamma:0 | |||
batchnorm2d_25/moving_mean:0 | |||
batchnorm2d_25/moving_var:0 | |||
conv2d_23/filters:0 | |||
batchnorm2d_26/beta:0 | |||
batchnorm2d_26/gamma:0 | |||
batchnorm2d_26/moving_mean:0 | |||
batchnorm2d_26/moving_var:0 | |||
conv2d_24/filters:0 | |||
batchnorm2d_27/beta:0 | |||
batchnorm2d_27/gamma:0 | |||
batchnorm2d_27/moving_mean:0 | |||
batchnorm2d_27/moving_var:0 | |||
conv2d_25/filters:0 | |||
batchnorm2d_28/beta:0 | |||
batchnorm2d_28/gamma:0 | |||
batchnorm2d_28/moving_mean:0 | |||
batchnorm2d_28/moving_var:0 | |||
conv2d_26/filters:0 | |||
batchnorm2d_29/beta:0 | |||
batchnorm2d_29/gamma:0 | |||
batchnorm2d_29/moving_mean:0 | |||
batchnorm2d_29/moving_var:0 | |||
conv2d_27/filters:0 | |||
batchnorm2d_30/beta:0 | |||
batchnorm2d_30/gamma:0 | |||
batchnorm2d_30/moving_mean:0 | |||
batchnorm2d_30/moving_var:0 | |||
conv2d_28/filters:0 | |||
batchnorm2d_31/beta:0 | |||
batchnorm2d_31/gamma:0 | |||
batchnorm2d_31/moving_mean:0 | |||
batchnorm2d_31/moving_var:0 | |||
conv2d_29/filters:0 | |||
batchnorm2d_32/beta:0 | |||
batchnorm2d_32/gamma:0 | |||
batchnorm2d_32/moving_mean:0 | |||
batchnorm2d_32/moving_var:0 | |||
conv2d_30/filters:0 | |||
batchnorm2d_33/beta:0 | |||
batchnorm2d_33/gamma:0 | |||
batchnorm2d_33/moving_mean:0 | |||
batchnorm2d_33/moving_var:0 | |||
conv2d_31/filters:0 | |||
batchnorm2d_34/beta:0 | |||
batchnorm2d_34/gamma:0 | |||
batchnorm2d_34/moving_mean:0 | |||
batchnorm2d_34/moving_var:0 | |||
conv2d_32/filters:0 | |||
batchnorm2d_35/beta:0 | |||
batchnorm2d_35/gamma:0 | |||
batchnorm2d_35/moving_mean:0 | |||
batchnorm2d_35/moving_var:0 | |||
conv2d_33/filters:0 | |||
batchnorm2d_36/beta:0 | |||
batchnorm2d_36/gamma:0 | |||
batchnorm2d_36/moving_mean:0 | |||
batchnorm2d_36/moving_var:0 | |||
conv2d_34/filters:0 | |||
batchnorm2d_37/beta:0 | |||
batchnorm2d_37/gamma:0 | |||
batchnorm2d_37/moving_mean:0 | |||
batchnorm2d_37/moving_var:0 | |||
conv2d_35/filters:0 | |||
batchnorm2d_38/beta:0 | |||
batchnorm2d_38/gamma:0 | |||
batchnorm2d_38/moving_mean:0 | |||
batchnorm2d_38/moving_var:0 | |||
conv_yolo_2/filters:0 | |||
batchnorm2d_87/beta:0 | |||
batchnorm2d_87/gamma:0 | |||
batchnorm2d_87/moving_mean:0 | |||
batchnorm2d_87/moving_var:0 | |||
conv2d_36/filters:0 | |||
batchnorm2d_39/beta:0 | |||
batchnorm2d_39/gamma:0 | |||
batchnorm2d_39/moving_mean:0 | |||
batchnorm2d_39/moving_var:0 | |||
conv_rote_block_4/filters:0 | |||
conv2d_37/filters:0 | |||
batchnorm2d_40/beta:0 | |||
batchnorm2d_40/gamma:0 | |||
batchnorm2d_40/moving_mean:0 | |||
batchnorm2d_40/moving_var:0 | |||
batchnorm2d_41/beta:0 | |||
batchnorm2d_41/gamma:0 | |||
batchnorm2d_41/moving_mean:0 | |||
batchnorm2d_41/moving_var:0 | |||
conv2d_38/filters:0 | |||
batchnorm2d_42/beta:0 | |||
batchnorm2d_42/gamma:0 | |||
batchnorm2d_42/moving_mean:0 | |||
batchnorm2d_42/moving_var:0 | |||
conv2d_39/filters:0 | |||
batchnorm2d_43/beta:0 | |||
batchnorm2d_43/gamma:0 | |||
batchnorm2d_43/moving_mean:0 | |||
batchnorm2d_43/moving_var:0 | |||
conv2d_40/filters:0 | |||
batchnorm2d_44/beta:0 | |||
batchnorm2d_44/gamma:0 | |||
batchnorm2d_44/moving_mean:0 | |||
batchnorm2d_44/moving_var:0 | |||
conv2d_41/filters:0 | |||
batchnorm2d_45/beta:0 | |||
batchnorm2d_45/gamma:0 | |||
batchnorm2d_45/moving_mean:0 | |||
batchnorm2d_45/moving_var:0 | |||
conv2d_42/filters:0 | |||
batchnorm2d_46/beta:0 | |||
batchnorm2d_46/gamma:0 | |||
batchnorm2d_46/moving_mean:0 | |||
batchnorm2d_46/moving_var:0 | |||
conv2d_43/filters:0 | |||
batchnorm2d_47/beta:0 | |||
batchnorm2d_47/gamma:0 | |||
batchnorm2d_47/moving_mean:0 | |||
batchnorm2d_47/moving_var:0 | |||
conv2d_44/filters:0 | |||
batchnorm2d_48/beta:0 | |||
batchnorm2d_48/gamma:0 | |||
batchnorm2d_48/moving_mean:0 | |||
batchnorm2d_48/moving_var:0 | |||
conv2d_45/filters:0 | |||
batchnorm2d_49/beta:0 | |||
batchnorm2d_49/gamma:0 | |||
batchnorm2d_49/moving_mean:0 | |||
batchnorm2d_49/moving_var:0 | |||
conv2d_46/filters:0 | |||
batchnorm2d_50/beta:0 | |||
batchnorm2d_50/gamma:0 | |||
batchnorm2d_50/moving_mean:0 | |||
batchnorm2d_50/moving_var:0 | |||
conv2d_47/filters:0 | |||
batchnorm2d_51/beta:0 | |||
batchnorm2d_51/gamma:0 | |||
batchnorm2d_51/moving_mean:0 | |||
batchnorm2d_51/moving_var:0 | |||
conv2d_48/filters:0 | |||
batchnorm2d_52/beta:0 | |||
batchnorm2d_52/gamma:0 | |||
batchnorm2d_52/moving_mean:0 | |||
batchnorm2d_52/moving_var:0 | |||
conv2d_49/filters:0 | |||
batchnorm2d_53/beta:0 | |||
batchnorm2d_53/gamma:0 | |||
batchnorm2d_53/moving_mean:0 | |||
batchnorm2d_53/moving_var:0 | |||
conv2d_50/filters:0 | |||
batchnorm2d_54/beta:0 | |||
batchnorm2d_54/gamma:0 | |||
batchnorm2d_54/moving_mean:0 | |||
batchnorm2d_54/moving_var:0 | |||
conv2d_51/filters:0 | |||
batchnorm2d_55/beta:0 | |||
batchnorm2d_55/gamma:0 | |||
batchnorm2d_55/moving_mean:0 | |||
batchnorm2d_55/moving_var:0 | |||
conv2d_52/filters:0 | |||
batchnorm2d_56/beta:0 | |||
batchnorm2d_56/gamma:0 | |||
batchnorm2d_56/moving_mean:0 | |||
batchnorm2d_56/moving_var:0 | |||
conv2d_53/filters:0 | |||
batchnorm2d_57/beta:0 | |||
batchnorm2d_57/gamma:0 | |||
batchnorm2d_57/moving_mean:0 | |||
batchnorm2d_57/moving_var:0 | |||
conv2d_54/filters:0 | |||
batchnorm2d_58/beta:0 | |||
batchnorm2d_58/gamma:0 | |||
batchnorm2d_58/moving_mean:0 | |||
batchnorm2d_58/moving_var:0 | |||
conv2d_55/filters:0 | |||
batchnorm2d_59/beta:0 | |||
batchnorm2d_59/gamma:0 | |||
batchnorm2d_59/moving_mean:0 | |||
batchnorm2d_59/moving_var:0 | |||
conv_yolo_1/filters:0 | |||
batchnorm2d_80/beta:0 | |||
batchnorm2d_80/gamma:0 | |||
batchnorm2d_80/moving_mean:0 | |||
batchnorm2d_80/moving_var:0 | |||
conv2d_56/filters:0 | |||
batchnorm2d_60/beta:0 | |||
batchnorm2d_60/gamma:0 | |||
batchnorm2d_60/moving_mean:0 | |||
batchnorm2d_60/moving_var:0 | |||
conv_rote_block_5/filters:0 | |||
conv2d_57/filters:0 | |||
batchnorm2d_61/beta:0 | |||
batchnorm2d_61/gamma:0 | |||
batchnorm2d_61/moving_mean:0 | |||
batchnorm2d_61/moving_var:0 | |||
batchnorm2d_62/beta:0 | |||
batchnorm2d_62/gamma:0 | |||
batchnorm2d_62/moving_mean:0 | |||
batchnorm2d_62/moving_var:0 | |||
conv2d_58/filters:0 | |||
batchnorm2d_63/beta:0 | |||
batchnorm2d_63/gamma:0 | |||
batchnorm2d_63/moving_mean:0 | |||
batchnorm2d_63/moving_var:0 | |||
conv2d_59/filters:0 | |||
batchnorm2d_64/beta:0 | |||
batchnorm2d_64/gamma:0 | |||
batchnorm2d_64/moving_mean:0 | |||
batchnorm2d_64/moving_var:0 | |||
conv2d_60/filters:0 | |||
batchnorm2d_65/beta:0 | |||
batchnorm2d_65/gamma:0 | |||
batchnorm2d_65/moving_mean:0 | |||
batchnorm2d_65/moving_var:0 | |||
conv2d_61/filters:0 | |||
batchnorm2d_66/beta:0 | |||
batchnorm2d_66/gamma:0 | |||
batchnorm2d_66/moving_mean:0 | |||
batchnorm2d_66/moving_var:0 | |||
conv2d_62/filters:0 | |||
batchnorm2d_67/beta:0 | |||
batchnorm2d_67/gamma:0 | |||
batchnorm2d_67/moving_mean:0 | |||
batchnorm2d_67/moving_var:0 | |||
conv2d_63/filters:0 | |||
batchnorm2d_68/beta:0 | |||
batchnorm2d_68/gamma:0 | |||
batchnorm2d_68/moving_mean:0 | |||
batchnorm2d_68/moving_var:0 | |||
conv2d_64/filters:0 | |||
batchnorm2d_69/beta:0 | |||
batchnorm2d_69/gamma:0 | |||
batchnorm2d_69/moving_mean:0 | |||
batchnorm2d_69/moving_var:0 | |||
conv2d_65/filters:0 | |||
batchnorm2d_70/beta:0 | |||
batchnorm2d_70/gamma:0 | |||
batchnorm2d_70/moving_mean:0 | |||
batchnorm2d_70/moving_var:0 | |||
conv2d_66/filters:0 | |||
batchnorm2d_71/beta:0 | |||
batchnorm2d_71/gamma:0 | |||
batchnorm2d_71/moving_mean:0 | |||
batchnorm2d_71/moving_var:0 | |||
conv2d_67/filters:0 | |||
batchnorm2d_72/beta:0 | |||
batchnorm2d_72/gamma:0 | |||
batchnorm2d_72/moving_mean:0 | |||
batchnorm2d_72/moving_var:0 | |||
conv2d_68/filters:0 | |||
batchnorm2d_73/beta:0 | |||
batchnorm2d_73/gamma:0 | |||
batchnorm2d_73/moving_mean:0 | |||
batchnorm2d_73/moving_var:0 | |||
conv2d_69/filters:0 | |||
batchnorm2d_74/beta:0 | |||
batchnorm2d_74/gamma:0 | |||
batchnorm2d_74/moving_mean:0 | |||
batchnorm2d_74/moving_var:0 | |||
conv2d_70/filters:0 | |||
batchnorm2d_75/beta:0 | |||
batchnorm2d_75/gamma:0 | |||
batchnorm2d_75/moving_mean:0 | |||
batchnorm2d_75/moving_var:0 | |||
conv2d_71/filters:0 | |||
batchnorm2d_76/beta:0 | |||
batchnorm2d_76/gamma:0 | |||
batchnorm2d_76/moving_mean:0 | |||
batchnorm2d_76/moving_var:0 | |||
conv2d_72/filters:0 | |||
batchnorm2d_77/beta:0 | |||
batchnorm2d_77/gamma:0 | |||
batchnorm2d_77/moving_mean:0 | |||
batchnorm2d_77/moving_var:0 | |||
conv2d_73/filters:0 | |||
batchnorm2d_78/beta:0 | |||
batchnorm2d_78/gamma:0 | |||
batchnorm2d_78/moving_mean:0 | |||
batchnorm2d_78/moving_var:0 | |||
conv2d_74/filters:0 | |||
batchnorm2d_79/beta:0 | |||
batchnorm2d_79/gamma:0 | |||
batchnorm2d_79/moving_mean:0 | |||
batchnorm2d_79/moving_var:0 | |||
conv2d_75/filters:0 | |||
batchnorm2d_81/beta:0 | |||
batchnorm2d_81/gamma:0 | |||
batchnorm2d_81/moving_mean:0 | |||
batchnorm2d_81/moving_var:0 | |||
conv2d_76/filters:0 | |||
batchnorm2d_82/beta:0 | |||
batchnorm2d_82/gamma:0 | |||
batchnorm2d_82/moving_mean:0 | |||
batchnorm2d_82/moving_var:0 | |||
conv2d_77/filters:0 | |||
batchnorm2d_83/beta:0 | |||
batchnorm2d_83/gamma:0 | |||
batchnorm2d_83/moving_mean:0 | |||
batchnorm2d_83/moving_var:0 | |||
conv2d_78/filters:0 | |||
batchnorm2d_84/beta:0 | |||
batchnorm2d_84/gamma:0 | |||
batchnorm2d_84/moving_mean:0 | |||
batchnorm2d_84/moving_var:0 | |||
conv2d_79/filters:0 | |||
batchnorm2d_85/beta:0 | |||
batchnorm2d_85/gamma:0 | |||
batchnorm2d_85/moving_mean:0 | |||
batchnorm2d_85/moving_var:0 | |||
conv2d_80/filters:0 | |||
batchnorm2d_86/beta:0 | |||
batchnorm2d_86/gamma:0 | |||
batchnorm2d_86/moving_mean:0 | |||
batchnorm2d_86/moving_var:0 | |||
conv2d_81/filters:0 | |||
batchnorm2d_88/beta:0 | |||
batchnorm2d_88/gamma:0 | |||
batchnorm2d_88/moving_mean:0 | |||
batchnorm2d_88/moving_var:0 | |||
conv2d_82/filters:0 | |||
batchnorm2d_89/beta:0 | |||
batchnorm2d_89/gamma:0 | |||
batchnorm2d_89/moving_mean:0 | |||
batchnorm2d_89/moving_var:0 | |||
conv2d_83/filters:0 | |||
batchnorm2d_90/beta:0 | |||
batchnorm2d_90/gamma:0 | |||
batchnorm2d_90/moving_mean:0 | |||
batchnorm2d_90/moving_var:0 | |||
conv2d_84/filters:0 | |||
batchnorm2d_91/beta:0 | |||
batchnorm2d_91/gamma:0 | |||
batchnorm2d_91/moving_mean:0 | |||
batchnorm2d_91/moving_var:0 | |||
conv2d_85/filters:0 | |||
batchnorm2d_92/beta:0 | |||
batchnorm2d_92/gamma:0 | |||
batchnorm2d_92/moving_mean:0 | |||
batchnorm2d_92/moving_var:0 | |||
conv_route_1/filters:0 | |||
batchnorm2d_93/beta:0 | |||
batchnorm2d_93/gamma:0 | |||
batchnorm2d_93/moving_mean:0 | |||
batchnorm2d_93/moving_var:0 | |||
conv_route_2/filters:0 | |||
conv2d_86/filters:0 | |||
conv2d_86/biases:0 | |||
batchnorm2d_94/beta:0 | |||
batchnorm2d_94/gamma:0 | |||
batchnorm2d_94/moving_mean:0 | |||
batchnorm2d_94/moving_var:0 | |||
conv2d_87/filters:0 | |||
batchnorm2d_95/beta:0 | |||
batchnorm2d_95/gamma:0 | |||
batchnorm2d_95/moving_mean:0 | |||
batchnorm2d_95/moving_var:0 | |||
conv2d_88/filters:0 | |||
batchnorm2d_96/beta:0 | |||
batchnorm2d_96/gamma:0 | |||
batchnorm2d_96/moving_mean:0 | |||
batchnorm2d_96/moving_var:0 | |||
conv2d_89/filters:0 | |||
batchnorm2d_97/beta:0 | |||
batchnorm2d_97/gamma:0 | |||
batchnorm2d_97/moving_mean:0 | |||
batchnorm2d_97/moving_var:0 | |||
conv2d_90/filters:0 | |||
batchnorm2d_98/beta:0 | |||
batchnorm2d_98/gamma:0 | |||
batchnorm2d_98/moving_mean:0 | |||
batchnorm2d_98/moving_var:0 | |||
conv2d_91/filters:0 | |||
batchnorm2d_99/beta:0 | |||
batchnorm2d_99/gamma:0 | |||
batchnorm2d_99/moving_mean:0 | |||
batchnorm2d_99/moving_var:0 | |||
conv_route_3/filters:0 | |||
batchnorm2d_100/beta:0 | |||
batchnorm2d_100/gamma:0 | |||
batchnorm2d_100/moving_mean:0 | |||
batchnorm2d_100/moving_var:0 | |||
conv_route_4/filters:0 | |||
conv2d_92/filters:0 | |||
conv2d_92/biases:0 | |||
batchnorm2d_101/beta:0 | |||
batchnorm2d_101/gamma:0 | |||
batchnorm2d_101/moving_mean:0 | |||
batchnorm2d_101/moving_var:0 | |||
conv2d_93/filters:0 | |||
batchnorm2d_102/beta:0 | |||
batchnorm2d_102/gamma:0 | |||
batchnorm2d_102/moving_mean:0 | |||
batchnorm2d_102/moving_var:0 | |||
conv2d_94/filters:0 | |||
batchnorm2d_103/beta:0 | |||
batchnorm2d_103/gamma:0 | |||
batchnorm2d_103/moving_mean:0 | |||
batchnorm2d_103/moving_var:0 | |||
conv2d_95/filters:0 | |||
batchnorm2d_104/beta:0 | |||
batchnorm2d_104/gamma:0 | |||
batchnorm2d_104/moving_mean:0 | |||
batchnorm2d_104/moving_var:0 | |||
conv2d_96/filters:0 | |||
batchnorm2d_105/beta:0 | |||
batchnorm2d_105/gamma:0 | |||
batchnorm2d_105/moving_mean:0 | |||
batchnorm2d_105/moving_var:0 | |||
conv2d_97/filters:0 | |||
batchnorm2d_106/beta:0 | |||
batchnorm2d_106/gamma:0 | |||
batchnorm2d_106/moving_mean:0 | |||
batchnorm2d_106/moving_var:0 | |||
conv2d_98/filters:0 | |||
batchnorm2d_107/beta:0 | |||
batchnorm2d_107/gamma:0 | |||
batchnorm2d_107/moving_mean:0 | |||
batchnorm2d_107/moving_var:0 | |||
conv2d_99/filters:0 | |||
conv2d_99/biases:0 |
@@ -0,0 +1,541 @@ | |||
conv2d_1/filters:0 | |||
batchnorm2d_1/beta:0 | |||
batchnorm2d_1/gamma:0 | |||
batchnorm2d_1/moving_mean:0 | |||
batchnorm2d_1/moving_var:0 | |||
conv2d_2/filters:0 | |||
batchnorm2d_2/beta:0 | |||
batchnorm2d_2/gamma:0 | |||
batchnorm2d_2/moving_mean:0 | |||
batchnorm2d_2/moving_var:0 | |||
conv_rote_block_1/filters:0 | |||
batchnorm2d_3/beta:0 | |||
batchnorm2d_3/gamma:0 | |||
batchnorm2d_3/moving_mean:0 | |||
batchnorm2d_3/moving_var:0 | |||
conv2d_3/filters:0 | |||
batchnorm2d_4/beta:0 | |||
batchnorm2d_4/gamma:0 | |||
batchnorm2d_4/moving_mean:0 | |||
batchnorm2d_4/moving_var:0 | |||
conv2d_4/filters:0 | |||
batchnorm2d_5/beta:0 | |||
batchnorm2d_5/gamma:0 | |||
batchnorm2d_5/moving_mean:0 | |||
batchnorm2d_5/moving_var:0 | |||
conv2d_5/filters:0 | |||
batchnorm2d_6/beta:0 | |||
batchnorm2d_6/gamma:0 | |||
batchnorm2d_6/moving_mean:0 | |||
batchnorm2d_6/moving_var:0 | |||
conv2d_6/filters:0 | |||
batchnorm2d_7/beta:0 | |||
batchnorm2d_7/gamma:0 | |||
batchnorm2d_7/moving_mean:0 | |||
batchnorm2d_7/moving_var:0 | |||
conv2d_7/filters:0 | |||
batchnorm2d_8/beta:0 | |||
batchnorm2d_8/gamma:0 | |||
batchnorm2d_8/moving_mean:0 | |||
batchnorm2d_8/moving_var:0 | |||
conv2d_8/filters:0 | |||
batchnorm2d_9/beta:0 | |||
batchnorm2d_9/gamma:0 | |||
batchnorm2d_9/moving_mean:0 | |||
batchnorm2d_9/moving_var:0 | |||
conv_rote_block_2/filters:0 | |||
batchnorm2d_10/beta:0 | |||
batchnorm2d_10/gamma:0 | |||
batchnorm2d_10/moving_mean:0 | |||
batchnorm2d_10/moving_var:0 | |||
conv2d_9/filters:0 | |||
batchnorm2d_11/beta:0 | |||
batchnorm2d_11/gamma:0 | |||
batchnorm2d_11/moving_mean:0 | |||
batchnorm2d_11/moving_var:0 | |||
conv2d_10/filters:0 | |||
batchnorm2d_12/beta:0 | |||
batchnorm2d_12/gamma:0 | |||
batchnorm2d_12/moving_mean:0 | |||
batchnorm2d_12/moving_var:0 | |||
conv2d_11/filters:0 | |||
batchnorm2d_13/beta:0 | |||
batchnorm2d_13/gamma:0 | |||
batchnorm2d_13/moving_mean:0 | |||
batchnorm2d_13/moving_var:0 | |||
conv2d_12/filters:0 | |||
batchnorm2d_14/beta:0 | |||
batchnorm2d_14/gamma:0 | |||
batchnorm2d_14/moving_mean:0 | |||
batchnorm2d_14/moving_var:0 | |||
conv2d_13/filters:0 | |||
batchnorm2d_15/beta:0 | |||
batchnorm2d_15/gamma:0 | |||
batchnorm2d_15/moving_mean:0 | |||
batchnorm2d_15/moving_var:0 | |||
conv2d_14/filters:0 | |||
batchnorm2d_16/beta:0 | |||
batchnorm2d_16/gamma:0 | |||
batchnorm2d_16/moving_mean:0 | |||
batchnorm2d_16/moving_var:0 | |||
conv2d_15/filters:0 | |||
batchnorm2d_17/beta:0 | |||
batchnorm2d_17/gamma:0 | |||
batchnorm2d_17/moving_mean:0 | |||
batchnorm2d_17/moving_var:0 | |||
conv2d_16/filters:0 | |||
batchnorm2d_18/beta:0 | |||
batchnorm2d_18/gamma:0 | |||
batchnorm2d_18/moving_mean:0 | |||
batchnorm2d_18/moving_var:0 | |||
conv_rote_block_3/filters:0 | |||
batchnorm2d_19/beta:0 | |||
batchnorm2d_19/gamma:0 | |||
batchnorm2d_19/moving_mean:0 | |||
batchnorm2d_19/moving_var:0 | |||
conv2d_17/filters:0 | |||
batchnorm2d_20/beta:0 | |||
batchnorm2d_20/gamma:0 | |||
batchnorm2d_20/moving_mean:0 | |||
batchnorm2d_20/moving_var:0 | |||
conv2d_18/filters:0 | |||
batchnorm2d_21/beta:0 | |||
batchnorm2d_21/gamma:0 | |||
batchnorm2d_21/moving_mean:0 | |||
batchnorm2d_21/moving_var:0 | |||
conv2d_19/filters:0 | |||
batchnorm2d_22/beta:0 | |||
batchnorm2d_22/gamma:0 | |||
batchnorm2d_22/moving_mean:0 | |||
batchnorm2d_22/moving_var:0 | |||
conv2d_20/filters:0 | |||
batchnorm2d_23/beta:0 | |||
batchnorm2d_23/gamma:0 | |||
batchnorm2d_23/moving_mean:0 | |||
batchnorm2d_23/moving_var:0 | |||
conv2d_21/filters:0 | |||
batchnorm2d_24/beta:0 | |||
batchnorm2d_24/gamma:0 | |||
batchnorm2d_24/moving_mean:0 | |||
batchnorm2d_24/moving_var:0 | |||
conv2d_22/filters:0 | |||
batchnorm2d_25/beta:0 | |||
batchnorm2d_25/gamma:0 | |||
batchnorm2d_25/moving_mean:0 | |||
batchnorm2d_25/moving_var:0 | |||
conv2d_23/filters:0 | |||
batchnorm2d_26/beta:0 | |||
batchnorm2d_26/gamma:0 | |||
batchnorm2d_26/moving_mean:0 | |||
batchnorm2d_26/moving_var:0 | |||
conv2d_24/filters:0 | |||
batchnorm2d_27/beta:0 | |||
batchnorm2d_27/gamma:0 | |||
batchnorm2d_27/moving_mean:0 | |||
batchnorm2d_27/moving_var:0 | |||
conv2d_25/filters:0 | |||
batchnorm2d_28/beta:0 | |||
batchnorm2d_28/gamma:0 | |||
batchnorm2d_28/moving_mean:0 | |||
batchnorm2d_28/moving_var:0 | |||
conv2d_26/filters:0 | |||
batchnorm2d_29/beta:0 | |||
batchnorm2d_29/gamma:0 | |||
batchnorm2d_29/moving_mean:0 | |||
batchnorm2d_29/moving_var:0 | |||
conv2d_27/filters:0 | |||
batchnorm2d_30/beta:0 | |||
batchnorm2d_30/gamma:0 | |||
batchnorm2d_30/moving_mean:0 | |||
batchnorm2d_30/moving_var:0 | |||
conv2d_28/filters:0 | |||
batchnorm2d_31/beta:0 | |||
batchnorm2d_31/gamma:0 | |||
batchnorm2d_31/moving_mean:0 | |||
batchnorm2d_31/moving_var:0 | |||
conv2d_29/filters:0 | |||
batchnorm2d_32/beta:0 | |||
batchnorm2d_32/gamma:0 | |||
batchnorm2d_32/moving_mean:0 | |||
batchnorm2d_32/moving_var:0 | |||
conv2d_30/filters:0 | |||
batchnorm2d_33/beta:0 | |||
batchnorm2d_33/gamma:0 | |||
batchnorm2d_33/moving_mean:0 | |||
batchnorm2d_33/moving_var:0 | |||
conv2d_31/filters:0 | |||
batchnorm2d_34/beta:0 | |||
batchnorm2d_34/gamma:0 | |||
batchnorm2d_34/moving_mean:0 | |||
batchnorm2d_34/moving_var:0 | |||
conv2d_32/filters:0 | |||
batchnorm2d_35/beta:0 | |||
batchnorm2d_35/gamma:0 | |||
batchnorm2d_35/moving_mean:0 | |||
batchnorm2d_35/moving_var:0 | |||
conv2d_33/filters:0 | |||
batchnorm2d_36/beta:0 | |||
batchnorm2d_36/gamma:0 | |||
batchnorm2d_36/moving_mean:0 | |||
batchnorm2d_36/moving_var:0 | |||
conv2d_34/filters:0 | |||
batchnorm2d_37/beta:0 | |||
batchnorm2d_37/gamma:0 | |||
batchnorm2d_37/moving_mean:0 | |||
batchnorm2d_37/moving_var:0 | |||
conv2d_35/filters:0 | |||
batchnorm2d_38/beta:0 | |||
batchnorm2d_38/gamma:0 | |||
batchnorm2d_38/moving_mean:0 | |||
batchnorm2d_38/moving_var:0 | |||
conv2d_36/filters:0 | |||
batchnorm2d_39/beta:0 | |||
batchnorm2d_39/gamma:0 | |||
batchnorm2d_39/moving_mean:0 | |||
batchnorm2d_39/moving_var:0 | |||
conv_rote_block_4/filters:0 | |||
batchnorm2d_40/beta:0 | |||
batchnorm2d_40/gamma:0 | |||
batchnorm2d_40/moving_mean:0 | |||
batchnorm2d_40/moving_var:0 | |||
conv2d_37/filters:0 | |||
batchnorm2d_41/beta:0 | |||
batchnorm2d_41/gamma:0 | |||
batchnorm2d_41/moving_mean:0 | |||
batchnorm2d_41/moving_var:0 | |||
conv2d_38/filters:0 | |||
batchnorm2d_42/beta:0 | |||
batchnorm2d_42/gamma:0 | |||
batchnorm2d_42/moving_mean:0 | |||
batchnorm2d_42/moving_var:0 | |||
conv2d_39/filters:0 | |||
batchnorm2d_43/beta:0 | |||
batchnorm2d_43/gamma:0 | |||
batchnorm2d_43/moving_mean:0 | |||
batchnorm2d_43/moving_var:0 | |||
conv2d_40/filters:0 | |||
batchnorm2d_44/beta:0 | |||
batchnorm2d_44/gamma:0 | |||
batchnorm2d_44/moving_mean:0 | |||
batchnorm2d_44/moving_var:0 | |||
conv2d_41/filters:0 | |||
batchnorm2d_45/beta:0 | |||
batchnorm2d_45/gamma:0 | |||
batchnorm2d_45/moving_mean:0 | |||
batchnorm2d_45/moving_var:0 | |||
conv2d_42/filters:0 | |||
batchnorm2d_46/beta:0 | |||
batchnorm2d_46/gamma:0 | |||
batchnorm2d_46/moving_mean:0 | |||
batchnorm2d_46/moving_var:0 | |||
conv2d_43/filters:0 | |||
batchnorm2d_47/beta:0 | |||
batchnorm2d_47/gamma:0 | |||
batchnorm2d_47/moving_mean:0 | |||
batchnorm2d_47/moving_var:0 | |||
conv2d_44/filters:0 | |||
batchnorm2d_48/beta:0 | |||
batchnorm2d_48/gamma:0 | |||
batchnorm2d_48/moving_mean:0 | |||
batchnorm2d_48/moving_var:0 | |||
conv2d_45/filters:0 | |||
batchnorm2d_49/beta:0 | |||
batchnorm2d_49/gamma:0 | |||
batchnorm2d_49/moving_mean:0 | |||
batchnorm2d_49/moving_var:0 | |||
conv2d_46/filters:0 | |||
batchnorm2d_50/beta:0 | |||
batchnorm2d_50/gamma:0 | |||
batchnorm2d_50/moving_mean:0 | |||
batchnorm2d_50/moving_var:0 | |||
conv2d_47/filters:0 | |||
batchnorm2d_51/beta:0 | |||
batchnorm2d_51/gamma:0 | |||
batchnorm2d_51/moving_mean:0 | |||
batchnorm2d_51/moving_var:0 | |||
conv2d_48/filters:0 | |||
batchnorm2d_52/beta:0 | |||
batchnorm2d_52/gamma:0 | |||
batchnorm2d_52/moving_mean:0 | |||
batchnorm2d_52/moving_var:0 | |||
conv2d_49/filters:0 | |||
batchnorm2d_53/beta:0 | |||
batchnorm2d_53/gamma:0 | |||
batchnorm2d_53/moving_mean:0 | |||
batchnorm2d_53/moving_var:0 | |||
conv2d_50/filters:0 | |||
batchnorm2d_54/beta:0 | |||
batchnorm2d_54/gamma:0 | |||
batchnorm2d_54/moving_mean:0 | |||
batchnorm2d_54/moving_var:0 | |||
conv2d_51/filters:0 | |||
batchnorm2d_55/beta:0 | |||
batchnorm2d_55/gamma:0 | |||
batchnorm2d_55/moving_mean:0 | |||
batchnorm2d_55/moving_var:0 | |||
conv2d_52/filters:0 | |||
batchnorm2d_56/beta:0 | |||
batchnorm2d_56/gamma:0 | |||
batchnorm2d_56/moving_mean:0 | |||
batchnorm2d_56/moving_var:0 | |||
conv2d_53/filters:0 | |||
batchnorm2d_57/beta:0 | |||
batchnorm2d_57/gamma:0 | |||
batchnorm2d_57/moving_mean:0 | |||
batchnorm2d_57/moving_var:0 | |||
conv2d_54/filters:0 | |||
batchnorm2d_58/beta:0 | |||
batchnorm2d_58/gamma:0 | |||
batchnorm2d_58/moving_mean:0 | |||
batchnorm2d_58/moving_var:0 | |||
conv2d_55/filters:0 | |||
batchnorm2d_59/beta:0 | |||
batchnorm2d_59/gamma:0 | |||
batchnorm2d_59/moving_mean:0 | |||
batchnorm2d_59/moving_var:0 | |||
conv2d_56/filters:0 | |||
batchnorm2d_60/beta:0 | |||
batchnorm2d_60/gamma:0 | |||
batchnorm2d_60/moving_mean:0 | |||
batchnorm2d_60/moving_var:0 | |||
conv_rote_block_5/filters:0 | |||
batchnorm2d_61/beta:0 | |||
batchnorm2d_61/gamma:0 | |||
batchnorm2d_61/moving_mean:0 | |||
batchnorm2d_61/moving_var:0 | |||
conv2d_57/filters:0 | |||
batchnorm2d_62/beta:0 | |||
batchnorm2d_62/gamma:0 | |||
batchnorm2d_62/moving_mean:0 | |||
batchnorm2d_62/moving_var:0 | |||
conv2d_58/filters:0 | |||
batchnorm2d_63/beta:0 | |||
batchnorm2d_63/gamma:0 | |||
batchnorm2d_63/moving_mean:0 | |||
batchnorm2d_63/moving_var:0 | |||
conv2d_59/filters:0 | |||
batchnorm2d_64/beta:0 | |||
batchnorm2d_64/gamma:0 | |||
batchnorm2d_64/moving_mean:0 | |||
batchnorm2d_64/moving_var:0 | |||
conv2d_60/filters:0 | |||
batchnorm2d_65/beta:0 | |||
batchnorm2d_65/gamma:0 | |||
batchnorm2d_65/moving_mean:0 | |||
batchnorm2d_65/moving_var:0 | |||
conv2d_61/filters:0 | |||
batchnorm2d_66/beta:0 | |||
batchnorm2d_66/gamma:0 | |||
batchnorm2d_66/moving_mean:0 | |||
batchnorm2d_66/moving_var:0 | |||
conv2d_62/filters:0 | |||
batchnorm2d_67/beta:0 | |||
batchnorm2d_67/gamma:0 | |||
batchnorm2d_67/moving_mean:0 | |||
batchnorm2d_67/moving_var:0 | |||
conv2d_63/filters:0 | |||
batchnorm2d_68/beta:0 | |||
batchnorm2d_68/gamma:0 | |||
batchnorm2d_68/moving_mean:0 | |||
batchnorm2d_68/moving_var:0 | |||
conv2d_64/filters:0 | |||
batchnorm2d_69/beta:0 | |||
batchnorm2d_69/gamma:0 | |||
batchnorm2d_69/moving_mean:0 | |||
batchnorm2d_69/moving_var:0 | |||
conv2d_65/filters:0 | |||
batchnorm2d_70/beta:0 | |||
batchnorm2d_70/gamma:0 | |||
batchnorm2d_70/moving_mean:0 | |||
batchnorm2d_70/moving_var:0 | |||
conv2d_66/filters:0 | |||
batchnorm2d_71/beta:0 | |||
batchnorm2d_71/gamma:0 | |||
batchnorm2d_71/moving_mean:0 | |||
batchnorm2d_71/moving_var:0 | |||
conv2d_67/filters:0 | |||
batchnorm2d_72/beta:0 | |||
batchnorm2d_72/gamma:0 | |||
batchnorm2d_72/moving_mean:0 | |||
batchnorm2d_72/moving_var:0 | |||
conv2d_68/filters:0 | |||
batchnorm2d_73/beta:0 | |||
batchnorm2d_73/gamma:0 | |||
batchnorm2d_73/moving_mean:0 | |||
batchnorm2d_73/moving_var:0 | |||
conv2d_69/filters:0 | |||
batchnorm2d_74/beta:0 | |||
batchnorm2d_74/gamma:0 | |||
batchnorm2d_74/moving_mean:0 | |||
batchnorm2d_74/moving_var:0 | |||
conv2d_70/filters:0 | |||
batchnorm2d_75/beta:0 | |||
batchnorm2d_75/gamma:0 | |||
batchnorm2d_75/moving_mean:0 | |||
batchnorm2d_75/moving_var:0 | |||
conv2d_71/filters:0 | |||
batchnorm2d_76/beta:0 | |||
batchnorm2d_76/gamma:0 | |||
batchnorm2d_76/moving_mean:0 | |||
batchnorm2d_76/moving_var:0 | |||
conv2d_72/filters:0 | |||
batchnorm2d_77/beta:0 | |||
batchnorm2d_77/gamma:0 | |||
batchnorm2d_77/moving_mean:0 | |||
batchnorm2d_77/moving_var:0 | |||
conv2d_73/filters:0 | |||
batchnorm2d_78/beta:0 | |||
batchnorm2d_78/gamma:0 | |||
batchnorm2d_78/moving_mean:0 | |||
batchnorm2d_78/moving_var:0 | |||
conv2d_74/filters:0 | |||
batchnorm2d_79/beta:0 | |||
batchnorm2d_79/gamma:0 | |||
batchnorm2d_79/moving_mean:0 | |||
batchnorm2d_79/moving_var:0 | |||
conv_yolo_1/filters:0 | |||
batchnorm2d_80/beta:0 | |||
batchnorm2d_80/gamma:0 | |||
batchnorm2d_80/moving_mean:0 | |||
batchnorm2d_80/moving_var:0 | |||
conv2d_75/filters:0 | |||
batchnorm2d_81/beta:0 | |||
batchnorm2d_81/gamma:0 | |||
batchnorm2d_81/moving_mean:0 | |||
batchnorm2d_81/moving_var:0 | |||
conv2d_76/filters:0 | |||
batchnorm2d_82/beta:0 | |||
batchnorm2d_82/gamma:0 | |||
batchnorm2d_82/moving_mean:0 | |||
batchnorm2d_82/moving_var:0 | |||
conv2d_77/filters:0 | |||
batchnorm2d_83/beta:0 | |||
batchnorm2d_83/gamma:0 | |||
batchnorm2d_83/moving_mean:0 | |||
batchnorm2d_83/moving_var:0 | |||
conv2d_78/filters:0 | |||
batchnorm2d_84/beta:0 | |||
batchnorm2d_84/gamma:0 | |||
batchnorm2d_84/moving_mean:0 | |||
batchnorm2d_84/moving_var:0 | |||
conv2d_79/filters:0 | |||
batchnorm2d_85/beta:0 | |||
batchnorm2d_85/gamma:0 | |||
batchnorm2d_85/moving_mean:0 | |||
batchnorm2d_85/moving_var:0 | |||
conv2d_80/filters:0 | |||
batchnorm2d_86/beta:0 | |||
batchnorm2d_86/gamma:0 | |||
batchnorm2d_86/moving_mean:0 | |||
batchnorm2d_86/moving_var:0 | |||
conv_yolo_2/filters:0 | |||
batchnorm2d_87/beta:0 | |||
batchnorm2d_87/gamma:0 | |||
batchnorm2d_87/moving_mean:0 | |||
batchnorm2d_87/moving_var:0 | |||
conv2d_81/filters:0 | |||
batchnorm2d_88/beta:0 | |||
batchnorm2d_88/gamma:0 | |||
batchnorm2d_88/moving_mean:0 | |||
batchnorm2d_88/moving_var:0 | |||
conv2d_82/filters:0 | |||
batchnorm2d_89/beta:0 | |||
batchnorm2d_89/gamma:0 | |||
batchnorm2d_89/moving_mean:0 | |||
batchnorm2d_89/moving_var:0 | |||
conv2d_83/filters:0 | |||
batchnorm2d_90/beta:0 | |||
batchnorm2d_90/gamma:0 | |||
batchnorm2d_90/moving_mean:0 | |||
batchnorm2d_90/moving_var:0 | |||
conv2d_84/filters:0 | |||
batchnorm2d_91/beta:0 | |||
batchnorm2d_91/gamma:0 | |||
batchnorm2d_91/moving_mean:0 | |||
batchnorm2d_91/moving_var:0 | |||
conv2d_85/filters:0 | |||
batchnorm2d_92/beta:0 | |||
batchnorm2d_92/gamma:0 | |||
batchnorm2d_92/moving_mean:0 | |||
batchnorm2d_92/moving_var:0 | |||
conv_route_1/filters:0 | |||
batchnorm2d_93/beta:0 | |||
batchnorm2d_93/gamma:0 | |||
batchnorm2d_93/moving_mean:0 | |||
batchnorm2d_93/moving_var:0 | |||
conv2d_86/filters:0 | |||
conv2d_86/biases:0 | |||
conv_route_2/filters:0 | |||
batchnorm2d_94/beta:0 | |||
batchnorm2d_94/gamma:0 | |||
batchnorm2d_94/moving_mean:0 | |||
batchnorm2d_94/moving_var:0 | |||
conv2d_87/filters:0 | |||
batchnorm2d_95/beta:0 | |||
batchnorm2d_95/gamma:0 | |||
batchnorm2d_95/moving_mean:0 | |||
batchnorm2d_95/moving_var:0 | |||
conv2d_88/filters:0 | |||
batchnorm2d_96/beta:0 | |||
batchnorm2d_96/gamma:0 | |||
batchnorm2d_96/moving_mean:0 | |||
batchnorm2d_96/moving_var:0 | |||
conv2d_89/filters:0 | |||
batchnorm2d_97/beta:0 | |||
batchnorm2d_97/gamma:0 | |||
batchnorm2d_97/moving_mean:0 | |||
batchnorm2d_97/moving_var:0 | |||
conv2d_90/filters:0 | |||
batchnorm2d_98/beta:0 | |||
batchnorm2d_98/gamma:0 | |||
batchnorm2d_98/moving_mean:0 | |||
batchnorm2d_98/moving_var:0 | |||
conv2d_91/filters:0 | |||
batchnorm2d_99/beta:0 | |||
batchnorm2d_99/gamma:0 | |||
batchnorm2d_99/moving_mean:0 | |||
batchnorm2d_99/moving_var:0 | |||
conv_route_3/filters:0 | |||
batchnorm2d_100/beta:0 | |||
batchnorm2d_100/gamma:0 | |||
batchnorm2d_100/moving_mean:0 | |||
batchnorm2d_100/moving_var:0 | |||
conv2d_92/filters:0 | |||
conv2d_92/biases:0 | |||
conv_route_4/filters:0 | |||
batchnorm2d_101/beta:0 | |||
batchnorm2d_101/gamma:0 | |||
batchnorm2d_101/moving_mean:0 | |||
batchnorm2d_101/moving_var:0 | |||
conv2d_93/filters:0 | |||
batchnorm2d_102/beta:0 | |||
batchnorm2d_102/gamma:0 | |||
batchnorm2d_102/moving_mean:0 | |||
batchnorm2d_102/moving_var:0 | |||
conv2d_94/filters:0 | |||
batchnorm2d_103/beta:0 | |||
batchnorm2d_103/gamma:0 | |||
batchnorm2d_103/moving_mean:0 | |||
batchnorm2d_103/moving_var:0 | |||
conv2d_95/filters:0 | |||
batchnorm2d_104/beta:0 | |||
batchnorm2d_104/gamma:0 | |||
batchnorm2d_104/moving_mean:0 | |||
batchnorm2d_104/moving_var:0 | |||
conv2d_96/filters:0 | |||
batchnorm2d_105/beta:0 | |||
batchnorm2d_105/gamma:0 | |||
batchnorm2d_105/moving_mean:0 | |||
batchnorm2d_105/moving_var:0 | |||
conv2d_97/filters:0 | |||
batchnorm2d_106/beta:0 | |||
batchnorm2d_106/gamma:0 | |||
batchnorm2d_106/moving_mean:0 | |||
batchnorm2d_106/moving_var:0 | |||
conv2d_98/filters:0 | |||
batchnorm2d_107/beta:0 | |||
batchnorm2d_107/gamma:0 | |||
batchnorm2d_107/moving_mean:0 | |||
batchnorm2d_107/moving_var:0 | |||
conv2d_99/filters:0 | |||
conv2d_99/biases:0 |
@@ -0,0 +1,541 @@ | |||
layer_with_weights-0/kernel/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-1/beta/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-1/gamma/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-1/moving_mean/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-1/moving_variance/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-2/kernel/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-3/beta/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-3/gamma/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-3/moving_mean/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-3/moving_variance/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-11/kernel/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-13/beta/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-13/gamma/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-13/moving_mean/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-13/moving_variance/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-4/kernel/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-5/beta/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-5/gamma/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-5/moving_mean/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-5/moving_variance/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-6/kernel/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-7/beta/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-7/gamma/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-7/moving_mean/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-7/moving_variance/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-8/kernel/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-9/beta/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-9/gamma/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-9/moving_mean/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-9/moving_variance/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-10/kernel/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-12/beta/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-12/gamma/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-12/moving_mean/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-12/moving_variance/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-14/kernel/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-15/beta/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-15/gamma/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-15/moving_mean/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-15/moving_variance/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-16/kernel/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-17/beta/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-17/gamma/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-17/moving_mean/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-17/moving_variance/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-29/kernel/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-31/beta/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-31/gamma/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-31/moving_mean/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-31/moving_variance/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-18/kernel/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-19/beta/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-19/gamma/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-19/moving_mean/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-19/moving_variance/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-20/kernel/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-21/beta/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-21/gamma/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-21/moving_mean/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-21/moving_variance/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-22/kernel/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-23/beta/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-23/gamma/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-23/moving_mean/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-23/moving_variance/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-24/kernel/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-25/beta/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-25/gamma/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-25/moving_mean/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-25/moving_variance/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-26/kernel/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-27/beta/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-27/gamma/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-27/moving_mean/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-27/moving_variance/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-28/kernel/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-30/beta/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-30/gamma/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-30/moving_mean/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-30/moving_variance/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-32/kernel/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-33/beta/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-33/gamma/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-33/moving_mean/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-33/moving_variance/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-34/kernel/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-35/beta/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-35/gamma/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-35/moving_mean/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-35/moving_variance/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-71/kernel/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-73/beta/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-73/gamma/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-73/moving_mean/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-73/moving_variance/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-36/kernel/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-37/beta/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-37/gamma/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-37/moving_mean/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-37/moving_variance/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-38/kernel/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-39/beta/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-39/gamma/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-39/moving_mean/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-39/moving_variance/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-40/kernel/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-41/beta/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-41/gamma/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-41/moving_mean/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-41/moving_variance/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-42/kernel/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-43/beta/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-43/gamma/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-43/moving_mean/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-43/moving_variance/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-44/kernel/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-45/beta/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-45/gamma/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-45/moving_mean/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-45/moving_variance/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-46/kernel/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-47/beta/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-47/gamma/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-47/moving_mean/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-47/moving_variance/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-48/kernel/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-49/beta/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-49/gamma/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-49/moving_mean/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-49/moving_variance/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-50/kernel/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-51/beta/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-51/gamma/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-51/moving_mean/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-51/moving_variance/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-52/kernel/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-53/beta/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-53/gamma/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-53/moving_mean/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-53/moving_variance/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-54/kernel/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-55/beta/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-55/gamma/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-55/moving_mean/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-55/moving_variance/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-56/kernel/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-57/beta/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-57/gamma/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-57/moving_mean/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-57/moving_variance/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-58/kernel/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-59/beta/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-59/gamma/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-59/moving_mean/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-59/moving_variance/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-60/kernel/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-61/beta/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-61/gamma/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-61/moving_mean/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-61/moving_variance/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-62/kernel/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-63/beta/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-63/gamma/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-63/moving_mean/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-63/moving_variance/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-64/kernel/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-65/beta/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-65/gamma/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-65/moving_mean/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-65/moving_variance/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-66/kernel/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-67/beta/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-67/gamma/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-67/moving_mean/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-67/moving_variance/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-68/kernel/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-69/beta/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-69/gamma/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-69/moving_mean/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-69/moving_variance/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-70/kernel/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-72/beta/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-72/gamma/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-72/moving_mean/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-72/moving_variance/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-74/kernel/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-75/beta/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-75/gamma/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-75/moving_mean/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-75/moving_variance/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-76/kernel/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-77/beta/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-77/gamma/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-77/moving_mean/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-77/moving_variance/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-113/kernel/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-115/beta/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-115/gamma/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-115/moving_mean/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-115/moving_variance/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-78/kernel/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-79/beta/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-79/gamma/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-79/moving_mean/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-79/moving_variance/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-80/kernel/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-81/beta/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-81/gamma/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-81/moving_mean/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-81/moving_variance/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-82/kernel/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-83/beta/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-83/gamma/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-83/moving_mean/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-83/moving_variance/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-84/kernel/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-85/beta/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-85/gamma/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-85/moving_mean/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-85/moving_variance/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-86/kernel/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-87/beta/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-87/gamma/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-87/moving_mean/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-87/moving_variance/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-88/kernel/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-89/beta/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-89/gamma/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-89/moving_mean/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-89/moving_variance/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-90/kernel/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-91/beta/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-91/gamma/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-91/moving_mean/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-91/moving_variance/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-92/kernel/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-93/beta/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-93/gamma/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-93/moving_mean/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-93/moving_variance/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-94/kernel/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-95/beta/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-95/gamma/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-95/moving_mean/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-95/moving_variance/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-96/kernel/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-97/beta/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-97/gamma/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-97/moving_mean/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-97/moving_variance/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-98/kernel/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-99/beta/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-99/gamma/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-99/moving_mean/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-99/moving_variance/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-100/kernel/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-101/beta/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-101/gamma/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-101/moving_mean/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-101/moving_variance/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-102/kernel/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-103/beta/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-103/gamma/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-103/moving_mean/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-103/moving_variance/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-104/kernel/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-105/beta/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-105/gamma/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-105/moving_mean/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-105/moving_variance/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-106/kernel/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-107/beta/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-107/gamma/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-107/moving_mean/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-107/moving_variance/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-108/kernel/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-109/beta/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-109/gamma/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-109/moving_mean/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-109/moving_variance/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-110/kernel/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-111/beta/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-111/gamma/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-111/moving_mean/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-111/moving_variance/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-112/kernel/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-114/beta/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-114/gamma/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-114/moving_mean/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-114/moving_variance/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-116/kernel/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-117/beta/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-117/gamma/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-117/moving_mean/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-117/moving_variance/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-118/kernel/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-119/beta/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-119/gamma/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-119/moving_mean/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-119/moving_variance/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-139/kernel/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-141/beta/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-141/gamma/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-141/moving_mean/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-141/moving_variance/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-120/kernel/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-121/beta/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-121/gamma/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-121/moving_mean/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-121/moving_variance/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-122/kernel/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-123/beta/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-123/gamma/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-123/moving_mean/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-123/moving_variance/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-124/kernel/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-125/beta/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-125/gamma/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-125/moving_mean/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-125/moving_variance/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-126/kernel/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-127/beta/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-127/gamma/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-127/moving_mean/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-127/moving_variance/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-128/kernel/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-129/beta/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-129/gamma/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-129/moving_mean/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-129/moving_variance/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-130/kernel/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-131/beta/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-131/gamma/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-131/moving_mean/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-131/moving_variance/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-132/kernel/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-133/beta/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-133/gamma/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-133/moving_mean/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-133/moving_variance/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-134/kernel/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-135/beta/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-135/gamma/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-135/moving_mean/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-135/moving_variance/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-136/kernel/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-137/beta/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-137/gamma/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-137/moving_mean/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-137/moving_variance/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-138/kernel/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-140/beta/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-140/gamma/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-140/moving_mean/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-140/moving_variance/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-142/kernel/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-143/beta/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-143/gamma/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-143/moving_mean/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-143/moving_variance/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-144/kernel/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-145/beta/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-145/gamma/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-145/moving_mean/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-145/moving_variance/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-146/kernel/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-147/beta/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-147/gamma/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-147/moving_mean/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-147/moving_variance/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-148/kernel/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-149/beta/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-149/gamma/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-149/moving_mean/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-149/moving_variance/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-150/kernel/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-151/beta/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-151/gamma/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-151/moving_mean/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-151/moving_variance/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-152/kernel/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-153/beta/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-153/gamma/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-153/moving_mean/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-153/moving_variance/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-154/kernel/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-155/beta/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-155/gamma/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-155/moving_mean/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-155/moving_variance/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-156/kernel/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-158/beta/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-158/gamma/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-158/moving_mean/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-158/moving_variance/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-157/kernel/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-159/beta/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-159/gamma/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-159/moving_mean/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-159/moving_variance/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-160/kernel/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-161/beta/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-161/gamma/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-161/moving_mean/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-161/moving_variance/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-162/kernel/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-163/beta/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-163/gamma/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-163/moving_mean/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-163/moving_variance/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-164/kernel/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-165/beta/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-165/gamma/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-165/moving_mean/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-165/moving_variance/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-166/kernel/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-167/beta/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-167/gamma/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-167/moving_mean/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-167/moving_variance/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-168/kernel/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-169/beta/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-169/gamma/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-169/moving_mean/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-169/moving_variance/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-170/kernel/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-172/beta/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-172/gamma/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-172/moving_mean/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-172/moving_variance/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-171/kernel/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-173/beta/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-173/gamma/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-173/moving_mean/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-173/moving_variance/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-174/kernel/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-175/beta/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-175/gamma/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-175/moving_mean/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-175/moving_variance/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-176/kernel/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-177/beta/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-177/gamma/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-177/moving_mean/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-177/moving_variance/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-178/kernel/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-179/beta/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-179/gamma/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-179/moving_mean/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-179/moving_variance/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-180/kernel/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-181/beta/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-181/gamma/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-181/moving_mean/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-181/moving_variance/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-182/kernel/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-183/beta/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-183/gamma/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-183/moving_mean/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-183/moving_variance/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-208/kernel/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-211/beta/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-211/gamma/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-211/moving_mean/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-211/moving_variance/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-214/kernel/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-214/bias/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-184/kernel/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-185/beta/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-185/gamma/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-185/moving_mean/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-185/moving_variance/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-186/kernel/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-187/beta/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-187/gamma/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-187/moving_mean/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-187/moving_variance/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-188/kernel/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-189/beta/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-189/gamma/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-189/moving_mean/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-189/moving_variance/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-190/kernel/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-191/beta/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-191/gamma/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-191/moving_mean/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-191/moving_variance/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-192/kernel/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-193/beta/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-193/gamma/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-193/moving_mean/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-193/moving_variance/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-194/kernel/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-195/beta/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-195/gamma/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-195/moving_mean/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-195/moving_variance/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-209/kernel/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-212/beta/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-212/gamma/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-212/moving_mean/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-212/moving_variance/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-215/kernel/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-215/bias/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-196/kernel/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-197/beta/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-197/gamma/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-197/moving_mean/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-197/moving_variance/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-198/kernel/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-199/beta/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-199/gamma/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-199/moving_mean/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-199/moving_variance/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-200/kernel/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-201/beta/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-201/gamma/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-201/moving_mean/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-201/moving_variance/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-202/kernel/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-203/beta/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-203/gamma/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-203/moving_mean/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-203/moving_variance/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-204/kernel/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-205/beta/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-205/gamma/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-205/moving_mean/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-205/moving_variance/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-206/kernel/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-207/beta/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-207/gamma/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-207/moving_mean/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-207/moving_variance/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-210/kernel/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-213/beta/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-213/gamma/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-213/moving_mean/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-213/moving_variance/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-216/kernel/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-216/bias/.ATTRIBUTES/VARIABLE_VALUE |
@@ -0,0 +1,541 @@ | |||
layer_with_weights-0/kernel/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-1/beta/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-1/gamma/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-1/moving_mean/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-1/moving_variance/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-2/kernel/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-3/beta/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-3/gamma/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-3/moving_mean/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-3/moving_variance/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-11/kernel/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-4/kernel/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-13/beta/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-13/gamma/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-13/moving_mean/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-13/moving_variance/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-5/beta/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-5/gamma/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-5/moving_mean/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-5/moving_variance/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-6/kernel/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-7/beta/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-7/gamma/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-7/moving_mean/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-7/moving_variance/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-8/kernel/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-9/beta/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-9/gamma/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-9/moving_mean/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-9/moving_variance/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-10/kernel/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-12/beta/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-12/gamma/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-12/moving_mean/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-12/moving_variance/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-14/kernel/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-15/beta/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-15/gamma/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-15/moving_mean/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-15/moving_variance/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-16/kernel/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-17/beta/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-17/gamma/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-17/moving_mean/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-17/moving_variance/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-29/kernel/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-18/kernel/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-31/beta/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-31/gamma/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-31/moving_mean/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-31/moving_variance/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-19/beta/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-19/gamma/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-19/moving_mean/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-19/moving_variance/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-20/kernel/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-21/beta/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-21/gamma/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-21/moving_mean/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-21/moving_variance/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-22/kernel/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-23/beta/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-23/gamma/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-23/moving_mean/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-23/moving_variance/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-24/kernel/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-25/beta/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-25/gamma/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-25/moving_mean/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-25/moving_variance/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-26/kernel/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-27/beta/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-27/gamma/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-27/moving_mean/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-27/moving_variance/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-28/kernel/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-30/beta/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-30/gamma/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-30/moving_mean/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-30/moving_variance/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-32/kernel/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-33/beta/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-33/gamma/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-33/moving_mean/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-33/moving_variance/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-34/kernel/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-35/beta/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-35/gamma/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-35/moving_mean/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-35/moving_variance/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-71/kernel/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-36/kernel/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-73/beta/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-73/gamma/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-73/moving_mean/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-73/moving_variance/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-37/beta/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-37/gamma/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-37/moving_mean/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-37/moving_variance/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-38/kernel/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-39/beta/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-39/gamma/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-39/moving_mean/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-39/moving_variance/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-40/kernel/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-41/beta/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-41/gamma/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-41/moving_mean/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-41/moving_variance/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-42/kernel/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-43/beta/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-43/gamma/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-43/moving_mean/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-43/moving_variance/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-44/kernel/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-45/beta/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-45/gamma/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-45/moving_mean/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-45/moving_variance/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-46/kernel/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-47/beta/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-47/gamma/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-47/moving_mean/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-47/moving_variance/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-48/kernel/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-49/beta/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-49/gamma/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-49/moving_mean/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-49/moving_variance/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-50/kernel/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-51/beta/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-51/gamma/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-51/moving_mean/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-51/moving_variance/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-52/kernel/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-53/beta/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-53/gamma/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-53/moving_mean/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-53/moving_variance/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-54/kernel/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-55/beta/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-55/gamma/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-55/moving_mean/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-55/moving_variance/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-56/kernel/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-57/beta/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-57/gamma/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-57/moving_mean/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-57/moving_variance/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-58/kernel/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-59/beta/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-59/gamma/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-59/moving_mean/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-59/moving_variance/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-60/kernel/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-61/beta/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-61/gamma/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-61/moving_mean/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-61/moving_variance/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-62/kernel/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-63/beta/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-63/gamma/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-63/moving_mean/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-63/moving_variance/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-64/kernel/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-65/beta/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-65/gamma/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-65/moving_mean/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-65/moving_variance/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-66/kernel/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-67/beta/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-67/gamma/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-67/moving_mean/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-67/moving_variance/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-68/kernel/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-69/beta/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-69/gamma/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-69/moving_mean/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-69/moving_variance/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-70/kernel/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-72/beta/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-72/gamma/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-72/moving_mean/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-72/moving_variance/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-74/kernel/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-75/beta/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-75/gamma/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-75/moving_mean/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-75/moving_variance/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-171/kernel/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-173/beta/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-173/gamma/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-173/moving_mean/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-173/moving_variance/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-76/kernel/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-77/beta/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-77/gamma/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-77/moving_mean/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-77/moving_variance/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-113/kernel/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-78/kernel/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-115/beta/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-115/gamma/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-115/moving_mean/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-115/moving_variance/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-79/beta/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-79/gamma/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-79/moving_mean/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-79/moving_variance/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-80/kernel/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-81/beta/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-81/gamma/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-81/moving_mean/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-81/moving_variance/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-82/kernel/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-83/beta/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-83/gamma/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-83/moving_mean/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-83/moving_variance/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-84/kernel/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-85/beta/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-85/gamma/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-85/moving_mean/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-85/moving_variance/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-86/kernel/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-87/beta/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-87/gamma/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-87/moving_mean/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-87/moving_variance/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-88/kernel/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-89/beta/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-89/gamma/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-89/moving_mean/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-89/moving_variance/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-90/kernel/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-91/beta/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-91/gamma/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-91/moving_mean/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-91/moving_variance/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-92/kernel/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-93/beta/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-93/gamma/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-93/moving_mean/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-93/moving_variance/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-94/kernel/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-95/beta/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-95/gamma/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-95/moving_mean/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-95/moving_variance/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-96/kernel/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-97/beta/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-97/gamma/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-97/moving_mean/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-97/moving_variance/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-98/kernel/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-99/beta/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-99/gamma/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-99/moving_mean/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-99/moving_variance/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-100/kernel/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-101/beta/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-101/gamma/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-101/moving_mean/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-101/moving_variance/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-102/kernel/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-103/beta/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-103/gamma/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-103/moving_mean/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-103/moving_variance/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-104/kernel/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-105/beta/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-105/gamma/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-105/moving_mean/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-105/moving_variance/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-106/kernel/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-107/beta/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-107/gamma/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-107/moving_mean/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-107/moving_variance/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-108/kernel/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-109/beta/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-109/gamma/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-109/moving_mean/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-109/moving_variance/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-110/kernel/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-111/beta/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-111/gamma/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-111/moving_mean/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-111/moving_variance/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-112/kernel/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-114/beta/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-114/gamma/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-114/moving_mean/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-114/moving_variance/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-116/kernel/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-117/beta/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-117/gamma/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-117/moving_mean/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-117/moving_variance/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-157/kernel/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-159/beta/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-159/gamma/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-159/moving_mean/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-159/moving_variance/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-118/kernel/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-119/beta/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-119/gamma/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-119/moving_mean/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-119/moving_variance/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-139/kernel/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-120/kernel/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-141/beta/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-141/gamma/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-141/moving_mean/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-141/moving_variance/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-121/beta/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-121/gamma/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-121/moving_mean/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-121/moving_variance/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-122/kernel/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-123/beta/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-123/gamma/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-123/moving_mean/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-123/moving_variance/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-124/kernel/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-125/beta/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-125/gamma/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-125/moving_mean/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-125/moving_variance/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-126/kernel/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-127/beta/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-127/gamma/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-127/moving_mean/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-127/moving_variance/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-128/kernel/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-129/beta/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-129/gamma/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-129/moving_mean/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-129/moving_variance/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-130/kernel/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-131/beta/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-131/gamma/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-131/moving_mean/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-131/moving_variance/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-132/kernel/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-133/beta/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-133/gamma/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-133/moving_mean/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-133/moving_variance/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-134/kernel/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-135/beta/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-135/gamma/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-135/moving_mean/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-135/moving_variance/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-136/kernel/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-137/beta/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-137/gamma/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-137/moving_mean/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-137/moving_variance/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-138/kernel/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-140/beta/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-140/gamma/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-140/moving_mean/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-140/moving_variance/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-142/kernel/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-143/beta/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-143/gamma/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-143/moving_mean/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-143/moving_variance/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-144/kernel/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-145/beta/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-145/gamma/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-145/moving_mean/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-145/moving_variance/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-146/kernel/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-147/beta/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-147/gamma/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-147/moving_mean/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-147/moving_variance/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-148/kernel/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-149/beta/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-149/gamma/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-149/moving_mean/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-149/moving_variance/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-150/kernel/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-151/beta/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-151/gamma/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-151/moving_mean/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-151/moving_variance/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-152/kernel/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-153/beta/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-153/gamma/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-153/moving_mean/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-153/moving_variance/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-154/kernel/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-155/beta/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-155/gamma/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-155/moving_mean/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-155/moving_variance/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-156/kernel/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-158/beta/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-158/gamma/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-158/moving_mean/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-158/moving_variance/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-160/kernel/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-161/beta/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-161/gamma/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-161/moving_mean/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-161/moving_variance/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-162/kernel/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-163/beta/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-163/gamma/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-163/moving_mean/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-163/moving_variance/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-164/kernel/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-165/beta/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-165/gamma/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-165/moving_mean/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-165/moving_variance/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-166/kernel/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-167/beta/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-167/gamma/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-167/moving_mean/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-167/moving_variance/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-168/kernel/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-169/beta/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-169/gamma/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-169/moving_mean/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-169/moving_variance/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-170/kernel/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-172/beta/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-172/gamma/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-172/moving_mean/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-172/moving_variance/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-174/kernel/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-175/beta/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-175/gamma/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-175/moving_mean/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-175/moving_variance/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-176/kernel/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-177/beta/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-177/gamma/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-177/moving_mean/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-177/moving_variance/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-178/kernel/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-179/beta/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-179/gamma/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-179/moving_mean/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-179/moving_variance/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-180/kernel/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-181/beta/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-181/gamma/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-181/moving_mean/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-181/moving_variance/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-182/kernel/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-183/beta/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-183/gamma/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-183/moving_mean/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-183/moving_variance/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-208/kernel/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-211/beta/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-211/gamma/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-211/moving_mean/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-211/moving_variance/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-184/kernel/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-214/kernel/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-214/bias/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-185/beta/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-185/gamma/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-185/moving_mean/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-185/moving_variance/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-186/kernel/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-187/beta/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-187/gamma/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-187/moving_mean/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-187/moving_variance/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-188/kernel/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-189/beta/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-189/gamma/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-189/moving_mean/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-189/moving_variance/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-190/kernel/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-191/beta/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-191/gamma/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-191/moving_mean/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-191/moving_variance/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-192/kernel/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-193/beta/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-193/gamma/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-193/moving_mean/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-193/moving_variance/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-194/kernel/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-195/beta/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-195/gamma/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-195/moving_mean/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-195/moving_variance/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-209/kernel/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-212/beta/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-212/gamma/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-212/moving_mean/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-212/moving_variance/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-196/kernel/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-215/kernel/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-215/bias/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-197/beta/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-197/gamma/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-197/moving_mean/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-197/moving_variance/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-198/kernel/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-199/beta/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-199/gamma/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-199/moving_mean/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-199/moving_variance/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-200/kernel/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-201/beta/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-201/gamma/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-201/moving_mean/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-201/moving_variance/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-202/kernel/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-203/beta/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-203/gamma/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-203/moving_mean/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-203/moving_variance/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-204/kernel/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-205/beta/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-205/gamma/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-205/moving_mean/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-205/moving_variance/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-206/kernel/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-207/beta/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-207/gamma/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-207/moving_mean/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-207/moving_variance/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-210/kernel/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-213/beta/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-213/gamma/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-213/moving_mean/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-213/moving_variance/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-216/kernel/.ATTRIBUTES/VARIABLE_VALUE | |||
layer_with_weights-216/bias/.ATTRIBUTES/VARIABLE_VALUE |
@@ -0,0 +1,32 @@ | |||
#! /usr/bin/python | |||
# -*- coding: utf-8 -*- | |||
""" | |||
ResNet50 for ImageNet using TL models | |||
""" | |||
import time | |||
import numpy as np | |||
import tensorlayer as tl | |||
from examples.model_zoo.imagenet_classes import class_names | |||
from examples.model_zoo.resnet import ResNet50 | |||
tl.logging.set_verbosity(tl.logging.DEBUG) | |||
# get the whole model | |||
resnet = ResNet50(pretrained=False) | |||
resnet.set_eval() | |||
img1 = tl.vis.read_image('data/tiger.jpeg') | |||
img1 = tl.prepro.imresize(img1, (224, 224))[:, :, ::-1] | |||
img1 = img1 - np.array([103.939, 116.779, 123.68]).reshape((1, 1, 3)) | |||
img1 = img1.astype(np.float32)[np.newaxis, ...] | |||
start_time = time.time() | |||
output = resnet(img1) | |||
prob = tl.ops.softmax(output)[0].numpy() | |||
print(" End time : %.5ss" % (time.time() - start_time)) | |||
preds = (np.argsort(prob)[::-1])[0:5] | |||
for p in preds: | |||
print(class_names[p], prob[p]) |
@@ -0,0 +1,29 @@ | |||
#! /usr/bin/python | |||
# -*- coding: utf-8 -*- | |||
"""VGG-16 for ImageNet using TL models.""" | |||
import time | |||
import numpy as np | |||
import tensorflow as tf | |||
import tensorlayer as tl | |||
from examples.model_zoo.imagenet_classes import class_names | |||
from examples.model_zoo.vgg import vgg16 | |||
tl.logging.set_verbosity(tl.logging.DEBUG) | |||
# get the whole model | |||
vgg = vgg16(pretrained=True) | |||
vgg.set_eval() | |||
img = tl.vis.read_image('data/tiger.jpeg') | |||
img = tl.prepro.imresize(img, (224, 224)).astype(np.float32) / 255 | |||
start_time = time.time() | |||
output = vgg(img) | |||
probs = tf.nn.softmax(output)[0].numpy() | |||
print(" End time : %.5ss" % (time.time() - start_time)) | |||
preds = (np.argsort(probs)[::-1])[0:5] | |||
for p in preds: | |||
print(class_names[p], probs[p]) |
@@ -0,0 +1,28 @@ | |||
import numpy as np | |||
import cv2 | |||
from PIL import Image | |||
from examples.model_zoo.common import yolo4_input_processing, yolo4_output_processing, \ | |||
result_to_json, read_class_names, draw_boxes_and_labels_to_image_with_json | |||
from examples.model_zoo.yolo import YOLOv4 | |||
import tensorlayer as tl | |||
tl.logging.set_verbosity(tl.logging.DEBUG) | |||
INPUT_SIZE = 416 | |||
image_path = './data/kite.jpg' | |||
class_names = read_class_names('./model/coco.names') | |||
original_image = cv2.imread(image_path) | |||
image = cv2.cvtColor(np.array(original_image), cv2.COLOR_BGR2RGB) | |||
model = YOLOv4(NUM_CLASS=80, pretrained=True) | |||
model.set_eval() | |||
batch_data = yolo4_input_processing(original_image) | |||
feature_maps = model(batch_data) | |||
pred_bbox = yolo4_output_processing(feature_maps) | |||
json_result = result_to_json(image, pred_bbox) | |||
image = draw_boxes_and_labels_to_image_with_json(image, json_result, class_names) | |||
image = Image.fromarray(image.astype(np.uint8)) | |||
image.show() |
@@ -0,0 +1,225 @@ | |||
#! /usr/bin/python | |||
# -*- coding: utf-8 -*- | |||
"""ResNet for ImageNet. | |||
# Reference: | |||
- [Deep Residual Learning for Image Recognition]( | |||
https://arxiv.org/abs/1512.03385) (CVPR 2016 Best Paper Award) | |||
""" | |||
import os | |||
import tensorlayer as tl | |||
from tensorlayer import logging | |||
from tensorlayer.files import (assign_weights, maybe_download_and_extract) | |||
from tensorlayer.layers import (BatchNorm, Conv2d, Dense, Elementwise, GlobalMeanPool2d, Input, MaxPool2d) | |||
from tensorlayer.layers import Module, SequentialLayer | |||
__all__ = [ | |||
'ResNet50', | |||
] | |||
block_names = ['2a', '2b', '2c', '3a', '3b', '3c', '3d', '4a', '4b', '4c', '4d', '4e', '4f', '5a', '5b', '5c' | |||
] + ['avg_pool', 'fc1000'] | |||
block_filters = [[64, 64, 256], [128, 128, 512], [256, 256, 1024], [512, 512, 2048]] | |||
in_channels_conv = [64, 256, 512, 1024] | |||
in_channels_identity = [256, 512, 1024, 2048] | |||
henorm = tl.initializers.he_normal() | |||
class identity_block(Module): | |||
"""The identity block where there is no conv layer at shortcut. | |||
Parameters | |||
---------- | |||
input : tf tensor | |||
Input tensor from above layer. | |||
kernel_size : int | |||
The kernel size of middle conv layer at main path. | |||
n_filters : list of integers | |||
The numbers of filters for 3 conv layer at main path. | |||
stage : int | |||
Current stage label. | |||
block : str | |||
Current block label. | |||
Returns | |||
------- | |||
Output tensor of this block. | |||
""" | |||
def __init__(self, kernel_size, n_filters, stage, block): | |||
super(identity_block, self).__init__() | |||
filters1, filters2, filters3 = n_filters | |||
_in_channels = in_channels_identity[stage-2] | |||
conv_name_base = 'res' + str(stage) + block + '_branch' | |||
bn_name_base = 'bn' + str(stage) + block + '_branch' | |||
self.conv1 = Conv2d(filters1, (1, 1), W_init=henorm, name=conv_name_base + '2a', in_channels=_in_channels) | |||
self.bn1 = BatchNorm(name=bn_name_base + '2a', act='relu', num_features=filters1) | |||
ks = (kernel_size, kernel_size) | |||
self.conv2 = Conv2d(filters2, ks, padding='SAME', W_init=henorm, name=conv_name_base + '2b', in_channels=filters1) | |||
self.bn2 = BatchNorm(name=bn_name_base + '2b', act='relu', num_features=filters2) | |||
self.conv3 = Conv2d(filters3, (1, 1), W_init=henorm, name=conv_name_base + '2c', in_channels=filters2) | |||
self.bn3 = BatchNorm(name=bn_name_base + '2c', num_features=filters3) | |||
self.add = Elementwise(tl.add, act='relu') | |||
def forward(self, inputs): | |||
output = self.conv1(inputs) | |||
output = self.bn1(output) | |||
output = self.conv2(output) | |||
output = self.bn2(output) | |||
output = self.conv3(output) | |||
output = self.bn3(output) | |||
result = self.add([output, inputs]) | |||
return result | |||
class conv_block(Module): | |||
def __init__(self, kernel_size, n_filters, stage, block, strides=(2, 2)): | |||
super(conv_block, self).__init__() | |||
filters1, filters2, filters3 = n_filters | |||
_in_channels = in_channels_conv[stage-2] | |||
conv_name_base = 'res' + str(stage) + block + '_branch' | |||
bn_name_base = 'bn' + str(stage) + block + '_branch' | |||
self.conv1 = Conv2d(filters1, (1, 1), strides=strides, W_init=henorm, name=conv_name_base + '2a', in_channels=_in_channels) | |||
self.bn1 = BatchNorm(name=bn_name_base + '2a', act='relu', num_features=filters1) | |||
ks = (kernel_size, kernel_size) | |||
self.conv2 = Conv2d(filters2, ks, padding='SAME', W_init=henorm, name=conv_name_base + '2b', in_channels=filters1) | |||
self.bn2 = BatchNorm(name=bn_name_base + '2b', act='relu', num_features=filters2) | |||
self.conv3 = Conv2d(filters3, (1, 1), W_init=henorm, name=conv_name_base + '2c', in_channels=filters2) | |||
self.bn3 = BatchNorm(name=bn_name_base + '2c', num_features=filters3) | |||
self.shortcut_conv = Conv2d(filters3, (1, 1), strides=strides, W_init=henorm, name=conv_name_base + '1', in_channels=_in_channels) | |||
self.shortcut_bn = BatchNorm(name=bn_name_base + '1', num_features=filters3) | |||
self.add = Elementwise(tl.add, act='relu') | |||
def forward(self, inputs): | |||
output = self.conv1(inputs) | |||
output = self.bn1(output) | |||
output = self.conv2(output) | |||
output = self.bn2(output) | |||
output = self.conv3(output) | |||
output = self.bn3(output) | |||
shortcut = self.shortcut_conv(inputs) | |||
shortcut = self.shortcut_bn(shortcut) | |||
result = self.add([output, shortcut]) | |||
return result | |||
class ResNet50_model(Module): | |||
def __init__(self, end_with='fc1000', n_classes=1000): | |||
super(ResNet50_model, self).__init__() | |||
self.end_with = end_with | |||
self.n_classes = n_classes | |||
self.conv1 = Conv2d(64, (7, 7), in_channels=3, strides=(2, 2), padding='SAME', W_init=henorm, name='conv1') | |||
self.bn_conv1 = BatchNorm(name='bn_conv1', act="relu", num_features=64) | |||
self.max_pool1 = MaxPool2d((3, 3), strides=(2, 2), name='max_pool1') | |||
self.res_layer = self.make_layer() | |||
def forward(self, inputs): | |||
z = self.conv1(inputs) | |||
z = self.bn_conv1(z) | |||
z = self.max_pool1(z) | |||
z = self.res_layer(z) | |||
return z | |||
def make_layer(self): | |||
layer_list = [] | |||
for i, block_name in enumerate(block_names): | |||
if len(block_name) == 2: | |||
stage = int(block_name[0]) | |||
block = block_name[1] | |||
if block == 'a': | |||
strides = (1, 1) if stage == 2 else (2, 2) | |||
layer_list.append(conv_block(3, block_filters[stage - 2], stage=stage, block=block, strides=strides)) | |||
else: | |||
layer_list.append(identity_block(3, block_filters[stage - 2], stage=stage, block=block)) | |||
elif block_name == 'avg_pool': | |||
layer_list.append(GlobalMeanPool2d(name='avg_pool')) | |||
elif block_name == 'fc1000': | |||
layer_list.append(Dense(self.n_classes, name='fc1000', in_channels=2048)) | |||
if block_name == self.end_with: | |||
break | |||
return SequentialLayer(layer_list) | |||
def ResNet50(pretrained=False, end_with='fc1000', n_classes=1000): | |||
"""Pre-trained MobileNetV1 model (static mode). Input shape [?, 224, 224, 3]. | |||
To use pretrained model, input should be in BGR format and subtracted from ImageNet mean [103.939, 116.779, 123.68]. | |||
Parameters | |||
---------- | |||
pretrained : boolean | |||
Whether to load pretrained weights. Default False. | |||
end_with : str | |||
The end point of the model [conv, depth1, depth2 ... depth13, globalmeanpool, out]. | |||
Default ``out`` i.e. the whole model. | |||
n_classes : int | |||
Number of classes in final prediction. | |||
name : None or str | |||
Name for this model. | |||
Examples | |||
--------- | |||
Classify ImageNet classes, see `tutorial_models_resnet50.py` | |||
TODO Modify the usage example according to the model storage location | |||
>>> # get the whole model with pretrained weights | |||
>>> resnet = tl.models.ResNet50(pretrained=True) | |||
>>> # use for inferencing | |||
>>> output = resnet(img1, is_train=False) | |||
>>> prob = tf.nn.softmax(output)[0].numpy() | |||
Extract the features before fc layer | |||
>>> resnet = tl.models.ResNet50(pretrained=True, end_with='5c') | |||
>>> output = resnet(img1, is_train=False) | |||
Returns | |||
------- | |||
ResNet50 model. | |||
""" | |||
network = ResNet50_model(end_with=end_with, n_classes=n_classes) | |||
if pretrained: | |||
restore_params(network) | |||
return network | |||
def restore_params(network, path='models'): | |||
logging.info("Restore pre-trained parameters") | |||
maybe_download_and_extract( | |||
'resnet50_weights_tf_dim_ordering_tf_kernels.h5', | |||
path, | |||
'https://github.com/fchollet/deep-learning-models/releases/download/v0.2/', | |||
) # ls -al | |||
try: | |||
import h5py | |||
except Exception: | |||
raise ImportError('h5py not imported') | |||
f = h5py.File(os.path.join(path, 'resnet50_weights_tf_dim_ordering_tf_kernels.h5'), 'r') | |||
for layer in network.all_layers: | |||
if len(layer.all_weights) == 0: | |||
continue | |||
w_names = list(f[layer.name]) | |||
params = [f[layer.name][n][:] for n in w_names] | |||
# if 'bn' in layer.name: | |||
# params = [x.reshape(1, 1, 1, -1) for x in params] | |||
assign_weights(params, layer) | |||
del params | |||
f.close() |
@@ -0,0 +1,347 @@ | |||
#! /usr/bin/python | |||
# -*- coding: utf-8 -*- | |||
""" | |||
VGG for ImageNet. | |||
Introduction | |||
---------------- | |||
VGG is a convolutional neural network model proposed by K. Simonyan and A. Zisserman | |||
from the University of Oxford in the paper "Very Deep Convolutional Networks for | |||
Large-Scale Image Recognition" . The model achieves 92.7% top-5 test accuracy in ImageNet, | |||
which is a dataset of over 14 million images belonging to 1000 classes. | |||
Download Pre-trained Model | |||
---------------------------- | |||
- Model weights in this example - vgg16_weights.npz : http://www.cs.toronto.edu/~frossard/post/vgg16/ | |||
- Model weights in this example - vgg19.npy : https://media.githubusercontent.com/media/tensorlayer/pretrained-models/master/models/ | |||
- Caffe VGG 16 model : https://gist.github.com/ksimonyan/211839e770f7b538e2d8#file-readme-md | |||
- Tool to convert the Caffe models to TensorFlow's : https://github.com/ethereon/caffe-tensorflow | |||
Note | |||
------ | |||
- For simplified CNN layer see "Convolutional layer (Simplified)" | |||
in read the docs website. | |||
- When feeding other images to the model be sure to properly resize or crop them | |||
beforehand. Distorted images might end up being misclassified. One way of safely | |||
feeding images of multiple sizes is by doing center cropping. | |||
""" | |||
import os | |||
import numpy as np | |||
import tensorlayer as tl | |||
from tensorlayer import logging | |||
from tensorlayer.files import assign_weights, maybe_download_and_extract | |||
from tensorlayer.layers import (BatchNorm, Conv2d, Dense, Flatten, Input, SequentialLayer, MaxPool2d) | |||
from tensorlayer.layers import Module | |||
__all__ = [ | |||
'VGG', | |||
'vgg16', | |||
'vgg19', | |||
'VGG16', | |||
'VGG19', | |||
# 'vgg11', 'vgg11_bn', 'vgg13', 'vgg13_bn', 'vgg16', 'vgg16_bn', | |||
# 'vgg19_bn', 'vgg19', | |||
] | |||
layer_names = [ | |||
['conv1_1', 'conv1_2'], 'pool1', ['conv2_1', 'conv2_2'], 'pool2', | |||
['conv3_1', 'conv3_2', 'conv3_3', 'conv3_4'], 'pool3', ['conv4_1', 'conv4_2', 'conv4_3', 'conv4_4'], 'pool4', | |||
['conv5_1', 'conv5_2', 'conv5_3', 'conv5_4'], 'pool5', 'flatten', 'fc1_relu', 'fc2_relu', 'outputs' | |||
] | |||
cfg = { | |||
'A': [[64], 'M', [128], 'M', [256, 256], 'M', [512, 512], 'M', [512, 512], 'M', 'F', 'fc1', 'fc2', 'O'], | |||
'B': [[64, 64], 'M', [128, 128], 'M', [256, 256], 'M', [512, 512], 'M', [512, 512], 'M', 'F', 'fc1', 'fc2', 'O'], | |||
'D': | |||
[ | |||
[64, 64], 'M', [128, 128], 'M', [256, 256, 256], 'M', [512, 512, 512], 'M', [512, 512, 512], 'M', 'F', | |||
'fc1', 'fc2', 'O' | |||
], | |||
'E': | |||
[ | |||
[64, 64], 'M', [128, 128], 'M', [256, 256, 256, 256], 'M', [512, 512, 512, 512], 'M', [512, 512, 512, 512], | |||
'M', 'F', 'fc1', 'fc2', 'O' | |||
], | |||
} | |||
mapped_cfg = { | |||
'vgg11': 'A', | |||
'vgg11_bn': 'A', | |||
'vgg13': 'B', | |||
'vgg13_bn': 'B', | |||
'vgg16': 'D', | |||
'vgg16_bn': 'D', | |||
'vgg19': 'E', | |||
'vgg19_bn': 'E' | |||
} | |||
model_urls = { | |||
'vgg16': 'http://www.cs.toronto.edu/~frossard/vgg16/', | |||
'vgg19': 'https://media.githubusercontent.com/media/tensorlayer/pretrained-models/master/models/' | |||
} | |||
model_saved_name = {'vgg16': 'vgg16_weights.npz', 'vgg19': 'vgg19.npy'} | |||
class VGG(Module): | |||
def __init__(self, layer_type, batch_norm=False, end_with='outputs', name=None): | |||
super(VGG, self).__init__(name=name) | |||
self.end_with = end_with | |||
config = cfg[mapped_cfg[layer_type]] | |||
self.make_layer = make_layers(config, batch_norm, end_with) | |||
def forward(self, inputs): | |||
""" | |||
inputs : tensor | |||
Shape [None, 224, 224, 3], value range [0, 1]. | |||
""" | |||
inputs = inputs * 255 - np.array([123.68, 116.779, 103.939], dtype=np.float32).reshape([1, 1, 1, 3]) | |||
out = self.make_layer(inputs) | |||
return out | |||
def make_layers(config, batch_norm=False, end_with='outputs'): | |||
layer_list = [] | |||
is_end = False | |||
for layer_group_idx, layer_group in enumerate(config): | |||
if isinstance(layer_group, list): | |||
for idx, layer in enumerate(layer_group): | |||
layer_name = layer_names[layer_group_idx][idx] | |||
n_filter = layer | |||
if idx == 0: | |||
if layer_group_idx > 0: | |||
in_channels = config[layer_group_idx - 2][-1] | |||
else: | |||
in_channels = 3 | |||
else: | |||
in_channels = layer_group[idx - 1] | |||
layer_list.append( | |||
Conv2d( | |||
n_filter=n_filter, filter_size=(3, 3), strides=(1, 1), act=tl.ReLU, padding='SAME', | |||
in_channels=in_channels, name=layer_name | |||
) | |||
) | |||
if batch_norm: | |||
layer_list.append(BatchNorm(num_features=n_filter)) | |||
if layer_name == end_with: | |||
is_end = True | |||
break | |||
else: | |||
layer_name = layer_names[layer_group_idx] | |||
if layer_group == 'M': | |||
layer_list.append(MaxPool2d(filter_size=(2, 2), strides=(2, 2), padding='SAME', name=layer_name)) | |||
elif layer_group == 'O': | |||
layer_list.append(Dense(n_units=1000, in_channels=4096, name=layer_name)) | |||
elif layer_group == 'F': | |||
layer_list.append(Flatten(name='flatten')) | |||
elif layer_group == 'fc1': | |||
layer_list.append(Dense(n_units=4096, act=tl.ReLU, in_channels=512 * 7 * 7, name=layer_name)) | |||
elif layer_group == 'fc2': | |||
layer_list.append(Dense(n_units=4096, act=tl.ReLU, in_channels=4096, name=layer_name)) | |||
if layer_name == end_with: | |||
is_end = True | |||
if is_end: | |||
break | |||
return SequentialLayer(layer_list) | |||
def restore_model(model, layer_type): | |||
logging.info("Restore pre-trained weights") | |||
# download weights | |||
maybe_download_and_extract(model_saved_name[layer_type], 'model', model_urls[layer_type]) | |||
weights = [] | |||
if layer_type == 'vgg16': | |||
npz = np.load(os.path.join('model', model_saved_name[layer_type]), allow_pickle=True) | |||
# get weight list | |||
for val in sorted(npz.items()): | |||
logging.info(" Loading weights %s in %s" % (str(val[1].shape), val[0])) | |||
weights.append(val[1]) | |||
if len(model.all_weights) == len(weights): | |||
break | |||
elif layer_type == 'vgg19': | |||
npz = np.load(os.path.join('model', model_saved_name[layer_type]), allow_pickle=True, encoding='latin1').item() | |||
# get weight list | |||
for val in sorted(npz.items()): | |||
logging.info(" Loading %s in %s" % (str(val[1][0].shape), val[0])) | |||
logging.info(" Loading %s in %s" % (str(val[1][1].shape), val[0])) | |||
weights.extend(val[1]) | |||
if len(model.all_weights) == len(weights): | |||
break | |||
# assign weight values | |||
assign_weights(weights, model) | |||
del weights | |||
def vgg16(pretrained=False, end_with='outputs', mode='dynamic', name=None): | |||
"""Pre-trained VGG16 model. | |||
Parameters | |||
------------ | |||
pretrained : boolean | |||
Whether to load pretrained weights. Default False. | |||
end_with : str | |||
The end point of the model. Default ``fc3_relu`` i.e. the whole model. | |||
mode : str. | |||
Model building mode, 'dynamic' or 'static'. Default 'dynamic'. | |||
name : None or str | |||
A unique layer name. | |||
Examples | |||
--------- | |||
Classify ImageNet classes with VGG16, see `tutorial_models_vgg.py <https://github.com/tensorlayer/tensorlayer/blob/master/example/tutorial_models_vgg.py>`__ | |||
With TensorLayer | |||
TODO Modify the usage example according to the model storage location | |||
>>> # get the whole model, without pre-trained VGG parameters | |||
>>> vgg = tl.models.vgg16() | |||
>>> # get the whole model, restore pre-trained VGG parameters | |||
>>> vgg = tl.models.vgg16(pretrained=True) | |||
>>> # use for inferencing | |||
>>> output = vgg(img, is_train=False) | |||
>>> probs = tf.nn.softmax(output)[0].numpy() | |||
Extract features with VGG16 and Train a classifier with 100 classes | |||
>>> # get VGG without the last layer | |||
>>> cnn = tl.models.vgg16(end_with='fc2_relu', mode='static').as_layer() | |||
>>> # add one more layer and build a new model | |||
>>> ni = Input([None, 224, 224, 3], name="inputs") | |||
>>> nn = cnn(ni) | |||
>>> nn = tl.layers.Dense(n_units=100, name='out')(nn) | |||
>>> model = tl.models.Model(inputs=ni, outputs=nn) | |||
>>> # train your own classifier (only update the last layer) | |||
>>> train_params = model.get_layer('out').trainable_weights | |||
Reuse model | |||
>>> # in dynamic model, we can directly use the same model | |||
>>> # in static model | |||
>>> vgg_layer = tl.models.vgg16().as_layer() | |||
>>> ni_1 = tl.layers.Input([None, 224, 244, 3]) | |||
>>> ni_2 = tl.layers.Input([None, 224, 244, 3]) | |||
>>> a_1 = vgg_layer(ni_1) | |||
>>> a_2 = vgg_layer(ni_2) | |||
>>> M = Model(inputs=[ni_1, ni_2], outputs=[a_1, a_2]) | |||
""" | |||
if mode == 'dynamic': | |||
model = VGG(layer_type='vgg16', batch_norm=False, end_with=end_with, name=name) | |||
elif mode == 'static': | |||
raise NotImplementedError | |||
else: | |||
raise Exception("No such mode %s" % mode) | |||
if pretrained: | |||
restore_model(model, layer_type='vgg16') | |||
return model | |||
def vgg19(pretrained=False, end_with='outputs', mode='dynamic', name=None): | |||
"""Pre-trained VGG19 model. | |||
Parameters | |||
------------ | |||
pretrained : boolean | |||
Whether to load pretrained weights. Default False. | |||
end_with : str | |||
The end point of the model. Default ``fc3_relu`` i.e. the whole model. | |||
mode : str. | |||
Model building mode, 'dynamic' or 'static'. Default 'dynamic'. | |||
name : None or str | |||
A unique layer name. | |||
Examples | |||
--------- | |||
Classify ImageNet classes with VGG19, see `tutorial_models_vgg.py <https://github.com/tensorlayer/tensorlayer/blob/master/example/tutorial_models_vgg.py>`__ | |||
With TensorLayer | |||
>>> # get the whole model, without pre-trained VGG parameters | |||
>>> vgg = tl.models.vgg19() | |||
>>> # get the whole model, restore pre-trained VGG parameters | |||
>>> vgg = tl.models.vgg19(pretrained=True) | |||
>>> # use for inferencing | |||
>>> output = vgg(img, is_train=False) | |||
>>> probs = tf.nn.softmax(output)[0].numpy() | |||
Extract features with VGG19 and Train a classifier with 100 classes | |||
>>> # get VGG without the last layer | |||
>>> cnn = tl.models.vgg19(end_with='fc2_relu', mode='static').as_layer() | |||
>>> # add one more layer and build a new model | |||
>>> ni = Input([None, 224, 224, 3], name="inputs") | |||
>>> nn = cnn(ni) | |||
>>> nn = tl.layers.Dense(n_units=100, name='out')(nn) | |||
>>> model = tl.models.Model(inputs=ni, outputs=nn) | |||
>>> # train your own classifier (only update the last layer) | |||
>>> train_params = model.get_layer('out').trainable_weights | |||
Reuse model | |||
>>> # in dynamic model, we can directly use the same model | |||
>>> # in static model | |||
>>> vgg_layer = tl.models.vgg19().as_layer() | |||
>>> ni_1 = tl.layers.Input([None, 224, 244, 3]) | |||
>>> ni_2 = tl.layers.Input([None, 224, 244, 3]) | |||
>>> a_1 = vgg_layer(ni_1) | |||
>>> a_2 = vgg_layer(ni_2) | |||
>>> M = Model(inputs=[ni_1, ni_2], outputs=[a_1, a_2]) | |||
""" | |||
if mode == 'dynamic': | |||
model = VGG(layer_type='vgg19', batch_norm=False, end_with=end_with, name=name) | |||
elif mode == 'static': | |||
raise NotImplementedError | |||
else: | |||
raise Exception("No such mode %s" % mode) | |||
if pretrained: | |||
restore_model(model, layer_type='vgg19') | |||
return model | |||
VGG16 = vgg16 | |||
VGG19 = vgg19 | |||
# models without pretrained parameters | |||
# def vgg11(pretrained=False, end_with='outputs'): | |||
# model = VGG(layer_type='vgg11', batch_norm=False, end_with=end_with) | |||
# if pretrained: | |||
# model.restore_weights() | |||
# return model | |||
# | |||
# | |||
# def vgg11_bn(pretrained=False, end_with='outputs'): | |||
# model = VGG(layer_type='vgg11_bn', batch_norm=True, end_with=end_with) | |||
# if pretrained: | |||
# model.restore_weights() | |||
# return model | |||
# | |||
# | |||
# def vgg13(pretrained=False, end_with='outputs'): | |||
# model = VGG(layer_type='vgg13', batch_norm=False, end_with=end_with) | |||
# if pretrained: | |||
# model.restore_weights() | |||
# return model | |||
# | |||
# | |||
# def vgg13_bn(pretrained=False, end_with='outputs'): | |||
# model = VGG(layer_type='vgg13_bn', batch_norm=True, end_with=end_with) | |||
# if pretrained: | |||
# model.restore_weights() | |||
# return model | |||
# | |||
# | |||
# def vgg16_bn(pretrained=False, end_with='outputs'): | |||
# model = VGG(layer_type='vgg16_bn', batch_norm=True, end_with=end_with) | |||
# if pretrained: | |||
# model.restore_weights() | |||
# return model | |||
# | |||
# | |||
# def vgg19_bn(pretrained=False, end_with='outputs'): | |||
# model = VGG(layer_type='vgg19_bn', batch_norm=True, end_with=end_with) | |||
# if pretrained: | |||
# model.restore_weights() | |||
# return model |
@@ -0,0 +1,376 @@ | |||
#! /usr/bin/python | |||
# -*- coding: utf-8 -*- | |||
"""YOLOv4 for MS-COCO. | |||
# Reference: | |||
- [tensorflow-yolov4-tflite]( | |||
https://github.com/hunglc007/tensorflow-yolov4-tflite) | |||
""" | |||
import numpy as np | |||
import tensorlayer as tl | |||
from tensorlayer.layers.activation import Mish | |||
from tensorlayer.layers import Conv2d, MaxPool2d, BatchNorm2d, ZeroPad2d, UpSampling2d, Concat, Elementwise | |||
from tensorlayer.layers import Module, SequentialLayer | |||
from tensorlayer import logging | |||
INPUT_SIZE = 416 | |||
weights_url = {'link': 'https://pan.baidu.com/s/1MC1dmEwpxsdgHO1MZ8fYRQ', 'password': 'idsz'} | |||
class Convolutional(Module): | |||
""" | |||
Create Convolution layer | |||
Because it is only a stack of reference layers, there is no build, so self._built=True | |||
""" | |||
def __init__(self, filters_shape, downsample=False, activate=True, bn=True, activate_type='leaky',name=None): | |||
super(Convolutional, self).__init__() | |||
self.act = activate | |||
self.act_type = activate_type | |||
self.downsample = downsample | |||
self.bn = bn | |||
self._built = True | |||
if downsample: | |||
padding = 'VALID' | |||
strides = 2 | |||
else: | |||
strides = 1 | |||
padding = 'SAME' | |||
if bn: | |||
b_init = None | |||
else: | |||
b_init = tl.initializers.constant(value=0.0) | |||
self.zeropad = ZeroPad2d(((1, 0), (1, 0))) | |||
self.conv = Conv2d(n_filter=filters_shape[-1], in_channels=filters_shape[2], filter_size=(filters_shape[0], filters_shape[1]), | |||
strides=(strides, strides),padding=padding, b_init=b_init, name=name) | |||
if bn: | |||
if activate == True: | |||
if activate_type == 'leaky': | |||
self.batchnorm2d = BatchNorm2d(act='leaky_relu0.1', num_features=filters_shape[-1]) | |||
elif activate_type == 'mish': | |||
self.batchnorm2d = BatchNorm2d(act=Mish, num_features=filters_shape[-1]) | |||
else: | |||
self.batchnorm2d = BatchNorm2d(act=None, num_features=filters_shape[-1]) | |||
def forward(self, input): | |||
if self.downsample: | |||
input = self.zeropad(input) | |||
output = self.conv(input) | |||
if self.bn: | |||
output = self.batchnorm2d(output) | |||
return output | |||
class residual_block(Module): | |||
def __init__(self, input_channel, filter_num1, filter_num2, activate_type='leaky'): | |||
super(residual_block, self).__init__() | |||
self.conv1 = Convolutional(filters_shape=(1, 1, input_channel, filter_num1), activate_type=activate_type) | |||
self.conv2 = Convolutional(filters_shape=(3, 3, filter_num1, filter_num2), activate_type=activate_type) | |||
self.add = Elementwise(tl.add) | |||
def forward(self, inputs): | |||
output = self.conv1(inputs) | |||
output = self.conv2(output) | |||
output = self.add([inputs, output]) | |||
return output | |||
def residual_block_num(num, input_channel, filter_num1, filter_num2, activate_type='leaky'): | |||
residual_list = [] | |||
for i in range(num): | |||
residual_list.append(residual_block(input_channel, filter_num1, filter_num2, activate_type=activate_type)) | |||
return SequentialLayer(residual_list) | |||
class cspdarknet53(Module): | |||
def __init__(self): | |||
super(cspdarknet53, self).__init__() | |||
self._built = True | |||
self.conv1_1 = Convolutional((3, 3, 3, 32), activate_type='mish') | |||
self.conv1_2 = Convolutional((3, 3, 32, 64), downsample=True, activate_type='mish') | |||
self.conv1_3 = Convolutional((1, 1, 64, 64), activate_type='mish', name='conv_rote_block_1') | |||
self.conv1_4 = Convolutional((1, 1, 64, 64), activate_type='mish') | |||
self.residual_1 = residual_block_num(1, 64, 32, 64, activate_type="mish") | |||
self.conv2_1 = Convolutional((1, 1, 64, 64), activate_type='mish') | |||
self.concat = Concat() | |||
self.conv2_2 = Convolutional((1, 1, 128, 64), activate_type='mish') | |||
self.conv2_3 = Convolutional((3, 3, 64, 128), downsample=True, activate_type='mish') | |||
self.conv2_4 = Convolutional((1, 1, 128, 64), activate_type='mish', name='conv_rote_block_2') | |||
self.conv2_5 = Convolutional((1, 1, 128, 64), activate_type='mish') | |||
self.residual_2 = residual_block_num(2, 64, 64, 64, activate_type='mish') | |||
self.conv3_1 = Convolutional((1, 1, 64, 64), activate_type='mish') | |||
self.conv3_2 = Convolutional((1, 1, 128, 128), activate_type='mish') | |||
self.conv3_3 = Convolutional((3, 3, 128, 256), downsample=True, activate_type='mish') | |||
self.conv3_4 = Convolutional((1, 1, 256, 128), activate_type='mish', name='conv_rote_block_3') | |||
self.conv3_5 = Convolutional((1, 1, 256, 128), activate_type='mish') | |||
self.residual_3 = residual_block_num(8, 128, 128, 128, activate_type="mish") | |||
self.conv4_1 = Convolutional((1, 1, 128, 128), activate_type='mish') | |||
self.conv4_2 = Convolutional((1, 1, 256, 256), activate_type='mish') | |||
self.conv4_3 = Convolutional((3, 3, 256, 512), downsample=True, activate_type='mish') | |||
self.conv4_4 = Convolutional((1, 1, 512, 256), activate_type='mish', name='conv_rote_block_4') | |||
self.conv4_5 = Convolutional((1, 1, 512, 256), activate_type='mish') | |||
self.residual_4 = residual_block_num(8, 256, 256, 256, activate_type="mish") | |||
self.conv5_1 = Convolutional((1, 1, 256, 256), activate_type='mish') | |||
self.conv5_2 = Convolutional((1, 1, 512, 512), activate_type='mish') | |||
self.conv5_3 = Convolutional((3, 3, 512, 1024), downsample=True, activate_type='mish') | |||
self.conv5_4 = Convolutional((1, 1, 1024, 512), activate_type='mish', name='conv_rote_block_5') | |||
self.conv5_5 = Convolutional((1, 1, 1024, 512), activate_type='mish') | |||
self.residual_5 = residual_block_num(4, 512, 512, 512, activate_type="mish") | |||
self.conv6_1 = Convolutional((1, 1, 512, 512), activate_type='mish') | |||
self.conv6_2 = Convolutional((1, 1, 1024, 1024), activate_type='mish') | |||
self.conv6_3 = Convolutional((1, 1, 1024, 512)) | |||
self.conv6_4 = Convolutional((3, 3, 512, 1024)) | |||
self.conv6_5 = Convolutional((1, 1, 1024, 512)) | |||
self.maxpool1 = MaxPool2d(filter_size=(13, 13), strides=(1, 1)) | |||
self.maxpool2 = MaxPool2d(filter_size=(9, 9), strides=(1, 1)) | |||
self.maxpool3 = MaxPool2d(filter_size=(5, 5), strides=(1, 1)) | |||
self.conv7_1 = Convolutional((1, 1, 2048, 512)) | |||
self.conv7_2 = Convolutional((3, 3, 512, 1024)) | |||
self.conv7_3 = Convolutional((1, 1, 1024, 512)) | |||
def forward(self, input_data): | |||
input_data = self.conv1_1(input_data) | |||
input_data = self.conv1_2(input_data) | |||
route = input_data | |||
route = self.conv1_3(route) | |||
input_data = self.conv1_4(input_data) | |||
input_data = self.residual_1(input_data) | |||
input_data = self.conv2_1(input_data) | |||
input_data = self.concat([input_data, route]) | |||
input_data = self.conv2_2(input_data) | |||
input_data = self.conv2_3(input_data) | |||
route = input_data | |||
route = self.conv2_4(route) | |||
input_data = self.conv2_5(input_data) | |||
input_data = self.residual_2(input_data) | |||
input_data = self.conv3_1(input_data) | |||
input_data = self.concat([input_data, route]) | |||
input_data = self.conv3_2(input_data) | |||
input_data = self.conv3_3(input_data) | |||
route = input_data | |||
route = self.conv3_4(route) | |||
input_data = self.conv3_5(input_data) | |||
input_data = self.residual_3(input_data) | |||
input_data = self.conv4_1(input_data) | |||
input_data = self.concat([input_data, route]) | |||
input_data = self.conv4_2(input_data) | |||
route_1 = input_data | |||
input_data = self.conv4_3(input_data) | |||
route = input_data | |||
route = self.conv4_4(route) | |||
input_data = self.conv4_5(input_data) | |||
input_data = self.residual_4(input_data) | |||
input_data = self.conv5_1(input_data) | |||
input_data = self.concat([input_data, route]) | |||
input_data = self.conv5_2(input_data) | |||
route_2 = input_data | |||
input_data = self.conv5_3(input_data) | |||
route = input_data | |||
route = self.conv5_4(route) | |||
input_data = self.conv5_5(input_data) | |||
input_data = self.residual_5(input_data) | |||
input_data = self.conv6_1(input_data) | |||
input_data = self.concat([input_data, route]) | |||
input_data = self.conv6_2(input_data) | |||
input_data = self.conv6_3(input_data) | |||
input_data = self.conv6_4(input_data) | |||
input_data = self.conv6_5(input_data) | |||
maxpool1 = self.maxpool1(input_data) | |||
maxpool2 = self.maxpool2(input_data) | |||
maxpool3 = self.maxpool3(input_data) | |||
input_data = self.concat([maxpool1, maxpool2, maxpool3, input_data]) | |||
input_data = self.conv7_1(input_data) | |||
input_data = self.conv7_2(input_data) | |||
input_data = self.conv7_3(input_data) | |||
return route_1, route_2, input_data | |||
class YOLOv4_model(Module): | |||
def __init__(self, NUM_CLASS): | |||
super(YOLOv4_model, self).__init__() | |||
self.cspdarnnet = cspdarknet53() | |||
self.conv1_1 = Convolutional((1, 1, 512, 256)) | |||
self.upsamle = UpSampling2d(scale=2) | |||
self.conv1_2 = Convolutional((1, 1, 512, 256), name='conv_yolo_1') | |||
self.concat = Concat() | |||
self.conv2_1 = Convolutional((1, 1, 512, 256)) | |||
self.conv2_2 = Convolutional((3, 3, 256, 512)) | |||
self.conv2_3 = Convolutional((1, 1, 512, 256)) | |||
self.conv2_4 = Convolutional((3, 3, 256, 512)) | |||
self.conv2_5 = Convolutional((1, 1, 512, 256)) | |||
self.conv3_1 = Convolutional((1, 1, 256, 128)) | |||
self.conv3_2 = Convolutional((1, 1, 256, 128), name='conv_yolo_2') | |||
self.conv4_1 = Convolutional((1, 1, 256, 128)) | |||
self.conv4_2 = Convolutional((3, 3, 128, 256)) | |||
self.conv4_3 = Convolutional((1, 1, 256, 128)) | |||
self.conv4_4 = Convolutional((3, 3, 128, 256)) | |||
self.conv4_5 = Convolutional((1, 1, 256, 128)) | |||
self.conv5_1 = Convolutional((3, 3, 128, 256), name='conv_route_1') | |||
self.conv5_2 = Convolutional((1, 1, 256, 3 * (NUM_CLASS + 5)), activate=False, bn=False) | |||
self.conv6_1 = Convolutional((3, 3, 128, 256), downsample=True, name='conv_route_2') | |||
self.conv6_2 = Convolutional((1, 1, 512, 256)) | |||
self.conv6_3 = Convolutional((3, 3, 256, 512)) | |||
self.conv6_4 = Convolutional((1, 1, 512, 256)) | |||
self.conv6_5 = Convolutional((3, 3, 256, 512)) | |||
self.conv6_6 = Convolutional((1, 1, 512, 256)) | |||
self.conv7_1 = Convolutional((3, 3, 256, 512), name='conv_route_3') | |||
self.conv7_2 = Convolutional((1, 1, 512, 3 * (NUM_CLASS + 5)), activate=False, bn=False) | |||
self.conv7_3 = Convolutional((3, 3, 256, 512), downsample=True, name='conv_route_4') | |||
self.conv8_1 = Convolutional((1, 1, 1024, 512)) | |||
self.conv8_2 = Convolutional((3, 3, 512, 1024)) | |||
self.conv8_3 = Convolutional((1, 1, 1024, 512)) | |||
self.conv8_4 = Convolutional((3, 3, 512, 1024)) | |||
self.conv8_5 = Convolutional((1, 1, 1024, 512)) | |||
self.conv9_1 = Convolutional((3, 3, 512, 1024)) | |||
self.conv9_2 = Convolutional((1, 1, 1024, 3 * (NUM_CLASS + 5)), activate=False, bn=False) | |||
def forward(self, inputs): | |||
route_1, route_2, conv = self.cspdarnnet(inputs) | |||
route = conv | |||
conv = self.conv1_1(conv) | |||
conv = self.upsamle(conv) | |||
route_2 = self.conv1_2(route_2) | |||
conv = self.concat([route_2, conv]) | |||
conv = self.conv2_1(conv) | |||
conv = self.conv2_2(conv) | |||
conv = self.conv2_3(conv) | |||
conv = self.conv2_4(conv) | |||
conv = self.conv2_5(conv) | |||
route_2 = conv | |||
conv = self.conv3_1(conv) | |||
conv = self.upsamle(conv) | |||
route_1 = self.conv3_2(route_1) | |||
conv = self.concat([route_1, conv]) | |||
conv = self.conv4_1(conv) | |||
conv = self.conv4_2(conv) | |||
conv = self.conv4_3(conv) | |||
conv = self.conv4_4(conv) | |||
conv = self.conv4_5(conv) | |||
route_1 = conv | |||
conv = self.conv5_1(conv) | |||
conv_sbbox = self.conv5_2(conv) | |||
conv = self.conv6_1(route_1) | |||
conv = self.concat([conv, route_2]) | |||
conv = self.conv6_2(conv) | |||
conv = self.conv6_3(conv) | |||
conv = self.conv6_4(conv) | |||
conv = self.conv6_5(conv) | |||
conv = self.conv6_6(conv) | |||
route_2 = conv | |||
conv = self.conv7_1(conv) | |||
conv_mbbox = self.conv7_2(conv) | |||
conv = self.conv7_3(route_2) | |||
conv = self.concat([conv, route]) | |||
conv = self.conv8_1(conv) | |||
conv = self.conv8_2(conv) | |||
conv = self.conv8_3(conv) | |||
conv = self.conv8_4(conv) | |||
conv = self.conv8_5(conv) | |||
conv = self.conv9_1(conv) | |||
conv_lbbox = self.conv9_2(conv) | |||
return conv_sbbox, conv_mbbox, conv_lbbox | |||
def YOLOv4(NUM_CLASS, pretrained=False): | |||
"""Pre-trained YOLOv4 model. | |||
Parameters | |||
------------ | |||
NUM_CLASS : int | |||
Number of classes in final prediction. | |||
pretrained : boolean | |||
Whether to load pretrained weights. Default False. | |||
Examples | |||
--------- | |||
Object Detection with YOLOv4, see `computer_vision.py | |||
<https://github.com/tensorlayer/tensorlayer/blob/master/tensorlayer/app/computer_vision.py>`__ | |||
With TensorLayer | |||
>>> # get the whole model, without pre-trained YOLOv4 parameters | |||
>>> yolov4 = tl.app.YOLOv4(NUM_CLASS=80, pretrained=False) | |||
>>> # get the whole model, restore pre-trained YOLOv4 parameters | |||
>>> yolov4 = tl.app.YOLOv4(NUM_CLASS=80, pretrained=True) | |||
>>> # use for inferencing | |||
>>> output = yolov4(img, is_train=False) | |||
""" | |||
network = YOLOv4_model(NUM_CLASS=NUM_CLASS) | |||
if pretrained: | |||
restore_params(network, model_path='model/yolov4_model.npz') | |||
return network | |||
def restore_params(network, model_path='models.npz'): | |||
logging.info("Restore pre-trained weights") | |||
try: | |||
npz = np.load(model_path, allow_pickle=True) | |||
except: | |||
print("Download the model file, placed in the /model ") | |||
print("Weights download: ", weights_url['link'], "password:", weights_url['password']) | |||
txt_path = 'model/yolov4_weights3_config.txt' | |||
f = open(txt_path, "r") | |||
line = f.readlines() | |||
for i in range(len(line)): | |||
network.all_weights[i].assign(npz[line[i].strip()]) | |||
logging.info(" Loading weights %s in %s" % (network.all_weights[i].shape, network.all_weights[i].name)) | |||
def tl2_weights_to_tl3_weights(weights_2_path='model/weights_2.txt', weights_3_path='model/weights_3.txt', txt_path='model/yolov4_weights_config.txt'): | |||
weights_2_path = weights_2_path | |||
weights_3_path = weights_3_path | |||
txt_path = txt_path | |||
f1 = open(weights_2_path, "r") | |||
f2 = open(weights_3_path, "r") | |||
f3 = open(txt_path, "r") | |||
line1 = f1.readlines() | |||
line2 = f2.readlines() | |||
line3 = f3.readlines() | |||
_dicts = {} | |||
for i in range(len(line1)): | |||
_dicts[line1[i].strip()] = line3[i].strip() | |||
for j in range(len(line2)): | |||
print(_dicts[line2[j].strip()]) |
@@ -0,0 +1,19 @@ | |||
# Medium GuideLines | |||
## Publication Avatar | |||
 | |||
## Publication Logo (appears on posts) | |||
 | |||
 | |||
Add a publication logo, which appears at the top of all your publication's stories. | |||
It is 72px tall and can have a maximum width of 600px. | |||
## Publication homepage images | |||
Under Homepage and settings > Layout, you can select a header size, upload a logo and add a background image (large header size only). | |||