Browse Source

merge with master

master
智丞 3 years ago
parent
commit
6741bc5afd
7 changed files with 18 additions and 17 deletions
  1. +1
    -1
      modelscope/models/audio/tts/am/sambert_hifi_16k.py
  2. +0
    -1
      modelscope/models/audio/tts/frontend/generic_text_to_speech_frontend.py
  3. +2
    -0
      modelscope/pipelines/nlp/fill_mask_pipeline.py
  4. +10
    -10
      requirements/audio.txt
  5. +1
    -1
      tests/pipelines/test_image_matting.py
  6. +1
    -1
      tests/pipelines/test_text_classification.py
  7. +3
    -3
      tests/pydatasets/test_py_dataset.py

+ 1
- 1
modelscope/models/audio/tts/am/sambert_hifi_16k.py View File

@@ -18,7 +18,7 @@ __all__ = ['SambertNetHifi16k']


def multi_label_symbol_to_sequence(my_classes, my_symbol):
one_hot = MultiLabelBinarizer(my_classes)
one_hot = MultiLabelBinarizer(classes=my_classes)
tokens = my_symbol.strip().split(' ')
sequences = []
for token in tokens:


+ 0
- 1
modelscope/models/audio/tts/frontend/generic_text_to_speech_frontend.py View File

@@ -20,7 +20,6 @@ class GenericTtsFrontend(Model):
def __init__(self, model_dir='.', lang_type='pinyin', *args, **kwargs):
super().__init__(model_dir, *args, **kwargs)
import ttsfrd

frontend = ttsfrd.TtsFrontendEngine()
zip_file = os.path.join(model_dir, 'resource.zip')
self._res_path = os.path.join(model_dir, 'resource')


+ 2
- 0
modelscope/pipelines/nlp/fill_mask_pipeline.py View File

@@ -39,6 +39,7 @@ class FillMaskPipeline(Pipeline):
fill_mask_model.eval()
super().__init__(
model=fill_mask_model, preprocessor=preprocessor, **kwargs)

self.preprocessor = preprocessor
self.tokenizer = preprocessor.tokenizer
self.mask_id = {'veco': 250001, 'sbert': 103}
@@ -94,6 +95,7 @@ class FillMaskPipeline(Pipeline):
pred_strings = []
for ids in rst_ids: # batch
# TODO vocab size is not stable

if self.model.config.vocab_size == 21128: # zh bert
pred_string = self.tokenizer.convert_ids_to_tokens(ids)
pred_string = ''.join(pred_string)


+ 10
- 10
requirements/audio.txt View File

@@ -1,25 +1,25 @@
#tts
h5py==2.10.0
https://isv-data.oss-cn-hangzhou.aliyuncs.com/ics/MaaS/TTS/requirements/ttsfrd-0.0.1-cp36-cp36m-linux_x86_64.whl; python_version=='3.6'
https://isv-data.oss-cn-hangzhou.aliyuncs.com/ics/MaaS/TTS/requirements/ttsfrd-0.0.1-cp37-cp37m-linux_x86_64.whl; python_version=='3.7'
https://isv-data.oss-cn-hangzhou.aliyuncs.com/ics/MaaS/TTS/requirements/ttsfrd-0.0.1-cp38-cp38-linux_x86_64.whl; python_version=='3.8'
https://isv-data.oss-cn-hangzhou.aliyuncs.com/ics/MaaS/TTS/requirements/ttsfrd-0.0.1-cp39-cp39-linux_x86_64.whl; python_version=='3.9'
https://swap.oss-cn-hangzhou.aliyuncs.com/Jiaqi%2Fmaas%2Ftts%2Frequirements%2Fpytorch_wavelets-1.3.0-py3-none-any.whl?Expires=1685688388&OSSAccessKeyId=LTAI4Ffebq4d9jTVDwiSbY4L&Signature=jcQbg5EZ%2Bdys3%2F4BRn3srrKLdIg%3D
h5py
https://isv-data.oss-cn-hangzhou.aliyuncs.com/ics/MaaS/TTS/requirements/pytorch_wavelets-1.3.0-py3-none-any.whl
https://isv-data.oss-cn-hangzhou.aliyuncs.com/ics/MaaS/TTS/requirements/ttsfrd-0.0.2-cp36-cp36m-linux_x86_64.whl; python_version=='3.6'
https://isv-data.oss-cn-hangzhou.aliyuncs.com/ics/MaaS/TTS/requirements/ttsfrd-0.0.2-cp37-cp37m-linux_x86_64.whl; python_version=='3.7'
https://isv-data.oss-cn-hangzhou.aliyuncs.com/ics/MaaS/TTS/requirements/ttsfrd-0.0.2-cp38-cp38-linux_x86_64.whl; python_version=='3.8'
https://isv-data.oss-cn-hangzhou.aliyuncs.com/ics/MaaS/TTS/requirements/ttsfrd-0.0.2-cp39-cp39-linux_x86_64.whl; python_version=='3.9'
inflect
keras==2.2.4
keras
librosa
lxml
matplotlib
nara_wpe
numpy==1.18.*
numpy
protobuf>3,<=3.20
ptflops
PyWavelets>=1.0.0
scikit-learn==0.23.2
scikit-learn
sox
tensorboard
tensorflow==1.15.*
torch==1.10.*
torch
torchaudio
torchvision
tqdm


+ 1
- 1
tests/pipelines/test_image_matting.py View File

@@ -60,7 +60,7 @@ class ImageMattingTest(unittest.TestCase):
cv2.imwrite('result.png', result['output_png'])
print(f'Output written to {osp.abspath("result.png")}')

@unittest.skipUnless(test_level() >= 1, 'skip test in current test level')
@unittest.skipUnless(test_level() >= 2, 'skip test in current test level')
def test_run_with_modelscope_dataset(self):
dataset = PyDataset.load('beans', split='train', target='image')
img_matting = pipeline(Tasks.image_matting, model=self.model_id)


+ 1
- 1
tests/pipelines/test_text_classification.py View File

@@ -87,7 +87,7 @@ class SequenceClassificationTest(unittest.TestCase):
result = text_classification(dataset)
self.printDataset(result)

@unittest.skipUnless(test_level() >= 1, 'skip test in current test level')
@unittest.skipUnless(test_level() >= 2, 'skip test in current test level')
def test_run_with_modelscope_dataset(self):
text_classification = pipeline(task=Tasks.text_classification)
# loaded from modelscope dataset


+ 3
- 3
tests/pydatasets/test_py_dataset.py View File

@@ -33,7 +33,7 @@ class ImgPreprocessor(Preprocessor):

class PyDatasetTest(unittest.TestCase):

@unittest.skipUnless(test_level() >= 1, 'skip test in current test level')
@unittest.skipUnless(test_level() >= 2, 'skip test in current test level')
def test_ds_basic(self):
ms_ds_full = PyDataset.load('squad')
ms_ds_full_hf = hfdata.load_dataset('squad')
@@ -49,7 +49,7 @@ class PyDatasetTest(unittest.TestCase):
print(next(iter(ms_ds_train)))
print(next(iter(ms_image_train)))

@unittest.skipUnless(test_level() >= 1, 'skip test in current test level')
@unittest.skipUnless(test_level() >= 2, 'skip test in current test level')
@require_torch
def test_to_torch_dataset_text(self):
model_id = 'damo/bert-base-sst2'
@@ -64,7 +64,7 @@ class PyDatasetTest(unittest.TestCase):
dataloader = torch.utils.data.DataLoader(pt_dataset, batch_size=5)
print(next(iter(dataloader)))

@unittest.skipUnless(test_level() >= 1, 'skip test in current test level')
@unittest.skipUnless(test_level() >= 2, 'skip test in current test level')
@require_tf
def test_to_tf_dataset_text(self):
import tensorflow as tf


Loading…
Cancel
Save