You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

evaluate.py 10 kB

4 years ago
123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253
  1. # Copyright 2021 Huawei Technologies Co., Ltd
  2. #
  3. # Licensed under the Apache License, Version 2.0 (the "License");
  4. # you may not use this file except in compliance with the License.
  5. # You may obtain a copy of the License at
  6. #
  7. # http://www.apache.org/licenses/LICENSE-2.0
  8. #
  9. # Unless required by applicable law or agreed to in writing, software
  10. # distributed under the License is distributed on an "AS IS" BASIS,
  11. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. # See the License for the specific language governing permissions and
  13. # limitations under the License.
  14. # ============================================================================
  15. """evaluation"""
  16. import os
  17. from os.path import join
  18. import argparse
  19. import glob
  20. from hparams import hparams, hparams_debug_string
  21. import audio
  22. import numpy as np
  23. from scipy.io import wavfile
  24. from tqdm import tqdm
  25. from mindspore import context, Tensor
  26. from mindspore.train.serialization import load_checkpoint, load_param_into_net
  27. import mindspore.dataset.engine as de
  28. from nnmnkwii import preprocessing as P
  29. from nnmnkwii.datasets import FileSourceDataset
  30. from wavenet_vocoder import WaveNet
  31. from wavenet_vocoder.util import is_mulaw_quantize, is_mulaw, is_scalar_input
  32. from src.dataset import RawAudioDataSource, MelSpecDataSource, DualDataset
  33. parser = argparse.ArgumentParser(description='TTS training')
  34. parser.add_argument('--data_path', type=str, required=True, default='',
  35. help='Directory contains preprocessed features.')
  36. parser.add_argument('--preset', type=str, required=True, default='', help='Path of preset parameters (json).')
  37. parser.add_argument('--pretrain_ckpt', type=str, default='', help='Pretrained checkpoint path')
  38. parser.add_argument('--is_numpy', action="store_false", default=True, help='Using numpy for inference or not')
  39. parser.add_argument('--output_path', type=str, default='./out_wave/', help='Path to save generated audios')
  40. parser.add_argument('--speaker_id', type=str, default='',
  41. help=' Use specific speaker of data in case for multi-speaker datasets.')
  42. args = parser.parse_args()
  43. def get_data_loader(hparam, data_dir):
  44. """
  45. test data loader
  46. """
  47. wav_paths = glob.glob(os.path.join(data_dir, "*-wave.npy"))
  48. if wav_paths:
  49. X = FileSourceDataset(RawAudioDataSource(data_dir,
  50. hop_size=audio.get_hop_size(),
  51. max_steps=None, cin_pad=hparam.cin_pad))
  52. else:
  53. X = None
  54. C = FileSourceDataset(MelSpecDataSource(data_dir,
  55. hop_size=audio.get_hop_size(),
  56. max_steps=None, cin_pad=hparam.cin_pad))
  57. length_x = np.array(C.file_data_source.lengths)
  58. if C[0].shape[-1] != hparam.cin_channels:
  59. raise RuntimeError("Invalid cin_channnels {}. Expected to be {}.".format(hparam.cin_channels, C[0].shape[-1]))
  60. dataset = DualDataset(X, C, length_x, batch_size=hparam.batch_size, hparams=hparam)
  61. data_loader = de.GeneratorDataset(dataset, ["x_batch", "y_batch", "c_batch", "g_batch", "input_lengths", "mask"])
  62. return data_loader, dataset
  63. def batch_wavegen(hparam, net, c_input=None, g_input=None, tqdm_=None, is_numpy=True):
  64. """
  65. generate audio
  66. """
  67. assert c_input is not None
  68. B = c_input.shape[0]
  69. net.set_train(False)
  70. if hparam.upsample_conditional_features:
  71. length = (c_input.shape[-1] - hparam.cin_pad * 2) * audio.get_hop_size()
  72. else:
  73. # already dupulicated
  74. length = c_input.shape[-1]
  75. y_hat = net.incremental_forward(c=c_input, g=g_input, T=length, tqdm=tqdm_, softmax=True, quantize=True,
  76. log_scale_min=hparam.log_scale_min, is_numpy=is_numpy)
  77. if is_mulaw_quantize(hparam.input_type):
  78. # needs to be float since mulaw_inv returns in range of [-1, 1]
  79. y_hat = np.reshape(np.argmax(y_hat, 1), (B, -1))
  80. y_hat = y_hat.astype(np.float32)
  81. for k in range(B):
  82. y_hat[k] = P.inv_mulaw_quantize(y_hat[k], hparam.quantize_channels - 1)
  83. elif is_mulaw(hparam.input_type):
  84. y_hat = np.reshape(y_hat, (B, -1))
  85. for k in range(B):
  86. y_hat[k] = P.inv_mulaw(y_hat[k], hparam.quantize_channels - 1)
  87. else:
  88. y_hat = np.reshape(y_hat, (B, -1))
  89. if hparam.postprocess is not None and hparam.postprocess not in ["", "none"]:
  90. for k in range(B):
  91. y_hat[k] = getattr(audio, hparam.postprocess)(y_hat[k])
  92. if hparam.global_gain_scale > 0:
  93. for k in range(B):
  94. y_hat[k] /= hparam.global_gain_scale
  95. return y_hat
  96. def to_int16(x_):
  97. """
  98. convert datatype to int16
  99. """
  100. if x_.dtype == np.int16:
  101. return x_
  102. assert x_.dtype == np.float32
  103. assert x_.min() >= -1 and x_.max() <= 1.0
  104. return (x_ * 32767).astype(np.int16)
  105. def get_reference_file(hparam, dataset_source, idx):
  106. """
  107. get reference files
  108. """
  109. reference_files = []
  110. reference_feats = []
  111. for _ in range(hparam.batch_size):
  112. if hasattr(dataset_source, "X"):
  113. reference_files.append(dataset_source.X.collected_files[idx][0])
  114. else:
  115. pass
  116. if hasattr(dataset_source, "Mel"):
  117. reference_feats.append(dataset_source.Mel.collected_files[idx][0])
  118. else:
  119. reference_feats.append(dataset_source.collected_files[idx][0])
  120. idx += 1
  121. return reference_files, reference_feats, idx
  122. def get_saved_audio_name(has_ref_file_, ref_file, ref_feat, g_fp):
  123. """get path to save reference audio"""
  124. if has_ref_file_:
  125. target_audio_path = ref_file
  126. name = os.path.splitext(os.path.basename(target_audio_path))[0].replace("-wave", "")
  127. else:
  128. target_feat_path = ref_feat
  129. name = os.path.splitext(os.path.basename(target_feat_path))[0].replace("-feats", "")
  130. # Paths
  131. if g_fp is None:
  132. dst_wav_path_ = join(args.output_path, "{}_gen.wav".format(name))
  133. target_wav_path_ = join(args.output_path, "{}_ref.wav".format(name))
  134. else:
  135. dst_wav_path_ = join(args.output_path, "speaker{}_{}_gen.wav".format(g, name))
  136. target_wav_path_ = join(args.output_path, "speaker{}_{}_ref.wav".format(g, name))
  137. return dst_wav_path_, target_wav_path_
  138. def save_ref_audio(hparam, ref, length, target_wav_path_):
  139. """
  140. save reference audio
  141. """
  142. if is_mulaw_quantize(hparam.input_type):
  143. ref = np.reshape(np.argmax(ref, 0), (-1))[:length]
  144. ref = ref.astype(np.float32)
  145. else:
  146. ref = np.reshape(ref, (-1))[:length]
  147. if is_mulaw_quantize(hparam.input_type):
  148. ref = P.inv_mulaw_quantize(ref, hparam.quantize_channels - 1)
  149. elif is_mulaw(hparam.input_type):
  150. ref = P.inv_mulaw(ref, hparam.quantize_channels - 1)
  151. if hparam.postprocess is not None and hparam.postprocess not in ["", "none"]:
  152. ref = getattr(audio, hparam.postprocess)(ref)
  153. if hparam.global_gain_scale > 0:
  154. ref /= hparam.global_gain_scale
  155. ref = np.clip(ref, -1.0, 1.0)
  156. wavfile.write(target_wav_path_, hparam.sample_rate, to_int16(ref))
  157. if __name__ == '__main__':
  158. context.set_context(mode=context.GRAPH_MODE, device_target='GPU', save_graphs=False)
  159. speaker_id = int(args.speaker_id) if args.speaker_id != '' else None
  160. if args.preset is not None:
  161. with open(args.preset) as f:
  162. hparams.parse_json(f.read())
  163. assert hparams.name == "wavenet_vocoder"
  164. print(hparams_debug_string())
  165. fs = hparams.sample_rate
  166. hparams.batch_size = 10
  167. hparams.max_time_sec = None
  168. hparams.max_time_steps = None
  169. data_loaders, source_dataset = get_data_loader(hparam=hparams, data_dir=args.data_path)
  170. upsample_params = hparams.upsample_params
  171. upsample_params["cin_channels"] = hparams.cin_channels
  172. upsample_params["cin_pad"] = hparams.cin_pad
  173. model = WaveNet(
  174. out_channels=hparams.out_channels,
  175. layers=hparams.layers,
  176. stacks=hparams.stacks,
  177. residual_channels=hparams.residual_channels,
  178. gate_channels=hparams.gate_channels,
  179. skip_out_channels=hparams.skip_out_channels,
  180. cin_channels=hparams.cin_channels,
  181. gin_channels=hparams.gin_channels,
  182. n_speakers=hparams.n_speakers,
  183. dropout=hparams.dropout,
  184. kernel_size=hparams.kernel_size,
  185. cin_pad=hparams.cin_pad,
  186. upsample_conditional_features=hparams.upsample_conditional_features,
  187. upsample_params=upsample_params,
  188. scalar_input=is_scalar_input(hparams.input_type),
  189. output_distribution=hparams.output_distribution,
  190. )
  191. param_dict = load_checkpoint(args.pretrain_ckpt)
  192. load_param_into_net(model, param_dict)
  193. print('Successfully loading the pre-trained model')
  194. os.makedirs(args.output_path, exist_ok=True)
  195. cin_pad = hparams.cin_pad
  196. file_idx = 0
  197. for data in data_loaders.create_dict_iterator():
  198. x, y, c, g, input_lengths = data['x_batch'], data['y_batch'], data['c_batch'], data['g_batch'], data[
  199. 'input_lengths']
  200. if cin_pad > 0:
  201. c = c.asnumpy()
  202. c = np.pad(c, pad_width=(cin_pad, cin_pad), mode="edge")
  203. c = Tensor(c)
  204. ref_files, ref_feats, file_idx = get_reference_file(hparams, source_dataset, file_idx)
  205. # Generate
  206. y_hats = batch_wavegen(hparams, model, data['c_batch'], tqdm_=tqdm, is_numpy=args.is_numpy)
  207. x = x.asnumpy()
  208. input_lengths = input_lengths.asnumpy()
  209. # Save each utt.
  210. has_ref_file = bool(ref_files)
  211. for i, (ref_, gen_, length_) in enumerate(zip(x, y_hats, input_lengths)):
  212. dst_wav_path, target_wav_path = get_saved_audio_name(has_ref_file_=has_ref_file, ref_file=ref_files[i],
  213. ref_feat=ref_feats[i], g_fp=g)
  214. save_ref_audio(hparams, ref_, length_, target_wav_path)
  215. gen = gen_[:length_]
  216. gen = np.clip(gen, -1.0, 1.0)
  217. wavfile.write(dst_wav_path, hparams.sample_rate, to_int16(gen))