-
Notifications
You must be signed in to change notification settings - Fork 26
/
Copy pathinference.py
106 lines (84 loc) · 4.42 KB
/
inference.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
import argparse
import os
import torch
import torchaudio
import text
import utils.make_html as html
from utils import progbar, read_lines_from_file
# default:
# python inference.py --list data/infer_text.txt --out_dir samples/results --model fastpitch --checkpoint pretrained/fastpitch_ar_adv.pth --batch_size 2 --denoise 0
# Examples:
# python inference.py --list data/infer_text.txt --out_dir samples/res_tc2_adv0 --model tacotron2 --checkpoint pretrained/tacotron2_ar_adv.pth --batch_size 2
# python inference.py --list data/infer_text.txt --out_dir samples/res_tc2_adv1 --model tacotron2 --checkpoint pretrained/tacotron2_ar_adv.pth --batch_size 2 --denoise 0.005
# python inference.py --list data/infer_text.txt --out_dir samples/res_fp_adv0 --model fastpitch --checkpoint pretrained/fastpitch_ar_adv.pth --batch_size 2
# python inference.py --list data/infer_text.txt --out_dir samples/res_fp_adv1 --model fastpitch --checkpoint pretrained/fastpitch_ar_adv.pth --batch_size 2 --denoise 0.005
# python inference.py --list data/infer_text.txt --out_dir samples/res_fp_adv2 --model fastpitch --checkpoint pretrained/fastpitch_ar_adv.pth --batch_size 2 --denoise 0.005 --vocoder_sd pretrained/hifigan-asc-v1/g_02500000 --vocoder_config pretrained/hifigan-asc-v1/config.json
def infer(args):
use_cuda_if_available = not args.cpu
device = torch.device(
'cuda' if torch.cuda.is_available() and use_cuda_if_available else 'cpu')
if args.model == 'fastpitch':
from models.fastpitch import FastPitch2Wave
model = FastPitch2Wave(args.checkpoint,
vocoder_sd=args.vocoder_sd,
vocoder_config=args.vocoder_config)
elif args.model == 'tacotron2':
from models.tacotron2 import Tacotron2Wave
model = Tacotron2Wave(args.checkpoint,
vocoder_sd=args.vocoder_sd,
vocoder_config=args.vocoder_config)
else:
raise "model type not supported"
model = model.to(device)
model.eval()
if not os.path.exists(f"{args.out_dir}/wavs"):
os.makedirs(f"{args.out_dir}/wavs")
static_lines = read_lines_from_file(args.list)
static_batches = [static_lines[k:k+args.batch_size]
for k in range(0, len(static_lines), args.batch_size)]
idx = 0
with open(os.path.join(args.out_dir, 'index.html'), 'w', encoding='utf-8') as f:
f.write(html.make_html_start())
for batch in progbar(static_batches):
# infer batch
wav_list = model.tts(batch,
batch_size=args.batch_size,
denoise=args.denoise,
speed=args.speed)
# save wavs and add entries to html file
for (text_line, wav) in zip(batch, wav_list):
torchaudio.save(f'{args.out_dir}/wavs/static{idx}.wav',
wav.unsqueeze(0),
22_050)
text_buckw = text.arabic_to_buckwalter(text_line)
text_arabic = text.buckwalter_to_arabic(text_buckw)
t_phon = text.buckwalter_to_phonemes(text_buckw)
t_phon = text.simplify_phonemes(
t_phon.replace(' ', '').replace('+', ' '))
f.write(html.make_sample_entry2(
f'wavs/static{idx}.wav',
text_arabic,
f"{idx}) {t_phon}"))
idx += 1
f.write(html.make_volume_script(0.5))
f.write(html.make_html_end())
print(f"Saved files to: {args.out_dir}")
def main():
parser = argparse.ArgumentParser()
parser.add_argument(
'--list', type=str, default='./data/infer_text.txt')
parser.add_argument(
'--model', type=str, default='fastpitch')
parser.add_argument(
'--checkpoint', type=str, default='pretrained/fastpitch_ar_adv.pth')
parser.add_argument('--vocoder_sd', type=str, default=None)
parser.add_argument('--vocoder_config', type=str, default=None)
parser.add_argument('--out_dir', type=str, default='samples/results')
parser.add_argument('--speed', type=float, default=1.0)
parser.add_argument('--denoise', type=float, default=0)
parser.add_argument('--batch_size', type=int, default=2)
parser.add_argument('--cpu', action='store_true')
args = parser.parse_args()
infer(args)
if __name__ == '__main__':
main()