Browse Source

把暂不发布的功能移到 legacy 文件夹

tags/v0.4.10
ChenXin 5 years ago
parent
commit
1c9a0b5875
20 changed files with 23 additions and 24 deletions
  1. +4
    -2
      fastNLP/core/callback.py
  2. +2
    -2
      fastNLP/models/enas_trainer.py
  3. +0
    -0
      legacy/api/README.md
  4. +0
    -0
      legacy/api/__init__.py
  5. +4
    -4
      legacy/api/api.py
  6. +0
    -0
      legacy/api/converter.py
  7. +0
    -0
      legacy/api/examples.py
  8. +0
    -0
      legacy/api/pipeline.py
  9. +6
    -9
      legacy/api/processor.py
  10. +1
    -1
      legacy/api/utils.py
  11. +0
    -0
      legacy/automl/__init__.py
  12. +0
    -0
      legacy/automl/enas_controller.py
  13. +0
    -0
      legacy/automl/enas_model.py
  14. +6
    -6
      legacy/automl/enas_trainer.py
  15. +0
    -0
      legacy/automl/enas_utils.py
  16. +0
    -0
      legacy/component/__init__.py
  17. +0
    -0
      legacy/component/bert_tokenizer.py
  18. +0
    -0
      legacy/test/api/test_pipeline.py
  19. +0
    -0
      legacy/test/api/test_processor.py
  20. +0
    -0
      legacy/test/automl/test_enas.py

+ 4
- 2
fastNLP/core/callback.py View File

@@ -66,8 +66,9 @@ from ..io.model_io import ModelSaver, ModelLoader

try:
from tensorboardX import SummaryWriter
tensorboardX_flag = True
except:
pass
tensorboardX_flag = False


class Callback(object):
@@ -581,7 +582,8 @@ class TensorboardCallback(Callback):
path = os.path.join("./", 'tensorboard_logs_{}'.format(self.trainer.start_time))
else:
path = os.path.join(save_dir, 'tensorboard_logs_{}'.format(self.trainer.start_time))
self._summary_writer = SummaryWriter(path)
if tensorboardX_flag:
self._summary_writer = SummaryWriter(path)
def on_batch_begin(self, batch_x, batch_y, indices):
if "model" in self.options and self.graph_added is False:


+ 2
- 2
fastNLP/models/enas_trainer.py View File

@@ -78,7 +78,7 @@ class ENASTrainer(Trainer):
results['seconds'] = 0.
return results
try:
if torch.cuda.is_available() and self.use_cuda:
if torch.cuda.is_available() and "cuda" in self.device:
self.model = self.model.cuda()
self._model_device = self.model.parameters().__next__().device
self._mode(self.model, is_test=False)
@@ -337,7 +337,7 @@ class ENASTrainer(Trainer):

# policy loss
loss = -log_probs*utils.get_variable(adv,
self.use_cuda,
'cuda' in self.device,
requires_grad=False)

loss = loss.sum() # or loss.mean()


fastNLP/api/README.md → legacy/api/README.md View File


fastNLP/api/__init__.py → legacy/api/__init__.py View File


fastNLP/api/api.py → legacy/api/api.py View File

@@ -5,13 +5,13 @@ import torch
warnings.filterwarnings('ignore')
import os

from ..core.dataset import DataSet
from fastNLP.core.dataset import DataSet
from .utils import load_url
from .processor import ModelProcessor
from ..io.dataset_loader import _cut_long_sentence, ConllLoader
from ..core.instance import Instance
from fastNLP.io.dataset_loader import _cut_long_sentence, ConllLoader
from fastNLP.core.instance import Instance
from ..api.pipeline import Pipeline
from ..core.metrics import SpanFPreRecMetric
from fastNLP.core.metrics import SpanFPreRecMetric
from .processor import IndexerProcessor

# TODO add pretrain urls

fastNLP/api/converter.py → legacy/api/converter.py View File


fastNLP/api/examples.py → legacy/api/examples.py View File


fastNLP/api/pipeline.py → legacy/api/pipeline.py View File


fastNLP/api/processor.py → legacy/api/processor.py View File

@@ -3,10 +3,10 @@ from collections import defaultdict

import torch

from ..core.batch import Batch
from ..core.dataset import DataSet
from ..core.sampler import SequentialSampler
from ..core.vocabulary import Vocabulary
from fastNLP.core.batch import Batch
from fastNLP.core.dataset import DataSet
from fastNLP.core.sampler import SequentialSampler
from fastNLP.core.vocabulary import Vocabulary


class Processor(object):
@@ -232,7 +232,7 @@ class SeqLenProcessor(Processor):
return dataset


from ..core.utils import _build_args
from fastNLP.core.utils import _build_args


class ModelProcessor(Processor):
@@ -257,10 +257,7 @@ class ModelProcessor(Processor):
data_iterator = Batch(dataset, batch_size=self.batch_size, sampler=SequentialSampler())

batch_output = defaultdict(list)
if hasattr(self.model, "predict"):
predict_func = self.model.predict
else:
predict_func = self.model.forward
predict_func = self.model.forward
with torch.no_grad():
for batch_x, _ in data_iterator:
refined_batch_x = _build_args(predict_func, **batch_x)

fastNLP/api/utils.py → legacy/api/utils.py View File

@@ -22,7 +22,7 @@ except ImportError:
try:
from tqdm.auto import tqdm
except:
from ..core.utils import _pseudo_tqdm as tqdm
from fastNLP.core.utils import _pseudo_tqdm as tqdm
# matches bfd8deac from resnet18-bfd8deac.pth
HASH_REGEX = re.compile(r'-([a-f0-9]*)\.')

fastNLP/automl/__init__.py → legacy/automl/__init__.py View File


fastNLP/automl/enas_controller.py → legacy/automl/enas_controller.py View File


fastNLP/automl/enas_model.py → legacy/automl/enas_model.py View File


fastNLP/automl/enas_trainer.py → legacy/automl/enas_trainer.py View File

@@ -11,15 +11,15 @@ import torch
try:
from tqdm.auto import tqdm
except:
from ..core.utils import _pseudo_tqdm as tqdm
from fastNLP.core.utils import _pseudo_tqdm as tqdm

from ..core.batch import Batch
from ..core.callback import CallbackException
from ..core.dataset import DataSet
from ..core.utils import _move_dict_value_to_device
from fastNLP.core.batch import Batch
from fastNLP.core.callback import CallbackException
from fastNLP.core.dataset import DataSet
from fastNLP.core.utils import _move_dict_value_to_device
import fastNLP
from . import enas_utils as utils
from ..core.utils import _build_args
from fastNLP.core.utils import _build_args

from torch.optim import Adam


fastNLP/automl/enas_utils.py → legacy/automl/enas_utils.py View File


fastNLP/component/__init__.py → legacy/component/__init__.py View File


fastNLP/component/bert_tokenizer.py → legacy/component/bert_tokenizer.py View File


test/api/test_pipeline.py → legacy/test/api/test_pipeline.py View File


test/api/test_processor.py → legacy/test/api/test_processor.py View File


test/automl/test_enas.py → legacy/test/automl/test_enas.py View File


Loading…
Cancel
Save