You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

export.py 4.2 kB

4 years ago
1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495
  1. # Copyright 2021 Huawei Technologies Co., Ltd
  2. #
  3. # Licensed under the Apache License, Version 2.0 (the "License");
  4. # you may not use this file except in compliance with the License.
  5. # You may obtain a copy of the License at
  6. #
  7. # http://www.apache.org/licenses/LICENSE-2.0
  8. #
  9. # Unless required by applicable law or agreed to in writing, software
  10. # distributed under the License is distributed on an "AS IS" BASIS,
  11. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. # See the License for the specific language governing permissions and
  13. # limitations under the License.
  14. # ============================================================================
  15. """export mindir."""
  16. import json
  17. from os.path import join
  18. import argparse
  19. from warnings import warn
  20. from hparams import hparams, hparams_debug_string
  21. from mindspore import context, Tensor
  22. from mindspore.train.serialization import load_checkpoint, load_param_into_net, export
  23. from wavenet_vocoder import WaveNet
  24. from wavenet_vocoder.util import is_mulaw_quantize, is_scalar_input
  25. import numpy as np
  26. from src.loss import PredictNet
  27. parser = argparse.ArgumentParser(description='TTS training')
  28. parser.add_argument('--preset', type=str, default='', help='Path of preset parameters (json).')
  29. parser.add_argument('--speaker_id', type=str, default='',
  30. help=' Use specific speaker of data in case for multi-speaker datasets.')
  31. parser.add_argument('--pretrain_ckpt', type=str, default='', help='Pretrained checkpoint path')
  32. args = parser.parse_args()
  33. if __name__ == '__main__':
  34. context.set_context(mode=context.GRAPH_MODE, device_target="GPU", save_graphs=False)
  35. speaker_id = int(args.speaker_id) if args.speaker_id != '' else None
  36. if args.preset is not None:
  37. with open(args.preset) as f:
  38. hparams.parse_json(f.read())
  39. assert hparams.name == "wavenet_vocoder"
  40. print(hparams_debug_string())
  41. fs = hparams.sample_rate
  42. output_json_path = join(args.checkpoint_dir, "hparams.json")
  43. with open(output_json_path, "w") as f:
  44. json.dump(hparams.values(), f, indent=2)
  45. if is_mulaw_quantize(hparams.input_type):
  46. if hparams.out_channels != hparams.quantize_channels:
  47. raise RuntimeError(
  48. "out_channels must equal to quantize_chennels if input_type is 'mulaw-quantize'")
  49. if hparams.upsample_conditional_features and hparams.cin_channels < 0:
  50. s = "Upsample conv layers were specified while local conditioning disabled. "
  51. s += "Notice that upsample conv layers will never be used."
  52. warn(s)
  53. upsample_params = hparams.upsample_params
  54. upsample_params["cin_channels"] = hparams.cin_channels
  55. upsample_params["cin_pad"] = hparams.cin_pad
  56. model = WaveNet(
  57. out_channels=hparams.out_channels,
  58. layers=hparams.layers,
  59. stacks=hparams.stacks,
  60. residual_channels=hparams.residual_channels,
  61. gate_channels=hparams.gate_channels,
  62. skip_out_channels=hparams.skip_out_channels,
  63. cin_channels=hparams.cin_channels,
  64. gin_channels=hparams.gin_channels,
  65. n_speakers=hparams.n_speakers,
  66. dropout=hparams.dropout,
  67. kernel_size=hparams.kernel_size,
  68. cin_pad=hparams.cin_pad,
  69. upsample_conditional_features=hparams.upsample_conditional_features,
  70. upsample_params=upsample_params,
  71. scalar_input=is_scalar_input(hparams.input_type),
  72. output_distribution=hparams.output_distribution,
  73. )
  74. Net = PredictNet(model)
  75. Net.set_train(False)
  76. receptive_field = model.receptive_field
  77. print("Receptive field (samples / ms): {} / {}".format(receptive_field, receptive_field / fs * 1000))
  78. param_dict = load_checkpoint(args.pretrain_ckpt)
  79. load_param_into_net(model, param_dict)
  80. print('Successfully loading the pre-trained model')
  81. x = np.array(np.random.random((2, 256, 10240)), dtype=np.float32)
  82. c = np.array(np.random.random((2, 80, 44)), dtype=np.float32)
  83. g = np.array([0, 0], dtype=np.int64)
  84. export(Net, Tensor(x), Tensor(c), Tensor(g), file_name="WaveNet", file_format='MINDIR')