Skip to content

Commit 7e56299

Browse files
committed
merge the develop
2 parents b327368 + bd0d69c commit 7e56299

File tree

22 files changed

+1361
-26
lines changed

22 files changed

+1361
-26
lines changed

README.md

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -178,6 +178,7 @@ Via the easy-to-use, efficient, flexible and scalable implementation, our vision
178178
- 🧩 *Cascaded models application*: as an extension of the typical traditional audio tasks, we combine the workflows of the aforementioned tasks with other fields like Natural language processing (NLP) and Computer Vision (CV).
179179

180180
### Recent Update
181+
- 🔥 2023.04.06: Add [subtitle file (.srt format) generation example](./demos/streaming_asr_server).
181182
- 🔥 2023.03.14: Add SVS(Singing Voice Synthesis) examples with Opencpop dataset, including [DiffSinger](./examples/opencpop/svs1)[PWGAN](./examples/opencpop/voc1) and [HiFiGAN](./examples/opencpop/voc5), the effect is continuously optimized.
182183
- 👑 2023.03.09: Add [Wav2vec2ASR-zh](./examples/aishell/asr3).
183184
- 🎉 2023.03.07: Add [TTS ARM Linux C++ Demo (with C++ Chinese Text Frontend)](./demos/TTSArmLinux).

README_cn.md

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -183,6 +183,7 @@
183183
- 🧩 级联模型应用: 作为传统语音任务的扩展,我们结合了自然语言处理、计算机视觉等任务,实现更接近实际需求的产业级应用。
184184

185185
### 近期更新
186+
- 👑 2023.04.06: 新增 [srt格式字幕生成功能](./demos/streaming_asr_server)
186187
- 🔥 2023.03.14: 新增基于 Opencpop 数据集的 SVS (歌唱合成) 示例,包含 [DiffSinger](./examples/opencpop/svs1)[PWGAN](./examples/opencpop/voc1)[HiFiGAN](./examples/opencpop/voc5),效果持续优化中。
187188
- 👑 2023.03.09: 新增 [Wav2vec2ASR-zh](./examples/aishell/asr3)
188189
- 🎉 2023.03.07: 新增 [TTS ARM Linux C++ 部署示例 (包含 C++ 中文文本前端模块)](./demos/TTSArmLinux)

demos/streaming_asr_server/README.md

Lines changed: 351 additions & 0 deletions
Large diffs are not rendered by default.

demos/streaming_asr_server/README_cn.md

Lines changed: 351 additions & 0 deletions
Large diffs are not rendered by default.
Lines changed: 162 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,162 @@
1+
#!/usr/bin/python
2+
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
3+
#
4+
# Licensed under the Apache License, Version 2.0 (the "License");
5+
# you may not use this file except in compliance with the License.
6+
# You may obtain a copy of the License at
7+
#
8+
# http://www.apache.org/licenses/LICENSE-2.0
9+
#
10+
# Unless required by applicable law or agreed to in writing, software
11+
# distributed under the License is distributed on an "AS IS" BASIS,
12+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13+
# See the License for the specific language governing permissions and
14+
# limitations under the License.
15+
# calc avg RTF(NOT Accurate): grep -rn RTF log.txt | awk '{print $NF}' | awk -F "=" '{sum += $NF} END {print "all time",sum, "audio num", NR, "RTF", sum/NR}'
16+
# python3 websocket_client.py --server_ip 127.0.0.1 --port 8290 --punc.server_ip 127.0.0.1 --punc.port 8190 --wavfile ./zh.wav
17+
# python3 websocket_client.py --server_ip 127.0.0.1 --port 8290 --wavfile ./zh.wav
18+
import argparse
19+
import asyncio
20+
import codecs
21+
import os
22+
from pydub import AudioSegment
23+
import re
24+
25+
from paddlespeech.cli.log import logger
26+
from paddlespeech.server.utils.audio_handler import ASRWsAudioHandler
27+
28+
def convert_to_wav(input_file):
29+
# Load audio file
30+
audio = AudioSegment.from_file(input_file)
31+
32+
# Set parameters for audio file
33+
audio = audio.set_channels(1)
34+
audio = audio.set_frame_rate(16000)
35+
36+
# Create output filename
37+
output_file = os.path.splitext(input_file)[0] + ".wav"
38+
39+
# Export audio file as WAV
40+
audio.export(output_file, format="wav")
41+
42+
logger.info(f"{input_file} converted to {output_file}")
43+
44+
def format_time(sec):
45+
# Convert seconds to SRT format (HH:MM:SS,ms)
46+
hours = int(sec/3600)
47+
minutes = int((sec%3600)/60)
48+
seconds = int(sec%60)
49+
milliseconds = int((sec%1)*1000)
50+
return f'{hours:02d}:{minutes:02d}:{seconds:02d},{milliseconds:03d}'
51+
52+
def results2srt(results, srt_file):
53+
"""convert results from paddlespeech to srt format for subtitle
54+
Args:
55+
results (dict): results from paddlespeech
56+
"""
57+
# times contains start and end time of each word
58+
times = results['times']
59+
# result contains the whole sentence including punctuation
60+
result = results['result']
61+
# split result into several sencences by ',' and '。'
62+
sentences = re.split(',|。', result)[:-1]
63+
# print("sentences: ", sentences)
64+
# generate relative time for each sentence in sentences
65+
relative_times = []
66+
word_i = 0
67+
for sentence in sentences:
68+
relative_times.append([])
69+
for word in sentence:
70+
if relative_times[-1] == []:
71+
relative_times[-1].append(times[word_i]['bg'])
72+
if len(relative_times[-1]) == 1:
73+
relative_times[-1].append(times[word_i]['ed'])
74+
else:
75+
relative_times[-1][1] = times[word_i]['ed']
76+
word_i += 1
77+
# print("relative_times: ", relative_times)
78+
# generate srt file acoording to relative_times and sentences
79+
with open(srt_file, 'w') as f:
80+
for i in range(len(sentences)):
81+
# Write index number
82+
f.write(str(i+1)+'\n')
83+
84+
# Write start and end times
85+
start = format_time(relative_times[i][0])
86+
end = format_time(relative_times[i][1])
87+
f.write(start + ' --> ' + end + '\n')
88+
89+
# Write text
90+
f.write(sentences[i]+'\n\n')
91+
logger.info(f"results saved to {srt_file}")
92+
93+
def main(args):
94+
logger.info("asr websocket client start")
95+
handler = ASRWsAudioHandler(
96+
args.server_ip,
97+
args.port,
98+
endpoint=args.endpoint,
99+
punc_server_ip=args.punc_server_ip,
100+
punc_server_port=args.punc_server_port)
101+
loop = asyncio.get_event_loop()
102+
103+
# check if the wav file is mp3 format
104+
# if so, convert it to wav format using convert_to_wav function
105+
if args.wavfile and os.path.exists(args.wavfile):
106+
if args.wavfile.endswith(".mp3"):
107+
convert_to_wav(args.wavfile)
108+
args.wavfile = args.wavfile.replace(".mp3", ".wav")
109+
110+
# support to process single audio file
111+
if args.wavfile and os.path.exists(args.wavfile):
112+
logger.info(f"start to process the wavscp: {args.wavfile}")
113+
result = loop.run_until_complete(handler.run(args.wavfile))
114+
# result = result["result"]
115+
# logger.info(f"asr websocket client finished : {result}")
116+
results2srt(result, args.wavfile.replace(".wav", ".srt"))
117+
118+
# support to process batch audios from wav.scp
119+
if args.wavscp and os.path.exists(args.wavscp):
120+
logger.info(f"start to process the wavscp: {args.wavscp}")
121+
with codecs.open(args.wavscp, 'r', encoding='utf-8') as f,\
122+
codecs.open("result.txt", 'w', encoding='utf-8') as w:
123+
for line in f:
124+
utt_name, utt_path = line.strip().split()
125+
result = loop.run_until_complete(handler.run(utt_path))
126+
result = result["result"]
127+
w.write(f"{utt_name} {result}\n")
128+
129+
130+
if __name__ == "__main__":
131+
logger.info("Start to do streaming asr client")
132+
parser = argparse.ArgumentParser()
133+
parser.add_argument(
134+
'--server_ip', type=str, default='127.0.0.1', help='server ip')
135+
parser.add_argument('--port', type=int, default=8090, help='server port')
136+
parser.add_argument(
137+
'--punc.server_ip',
138+
type=str,
139+
default=None,
140+
dest="punc_server_ip",
141+
help='Punctuation server ip')
142+
parser.add_argument(
143+
'--punc.port',
144+
type=int,
145+
default=8091,
146+
dest="punc_server_port",
147+
help='Punctuation server port')
148+
parser.add_argument(
149+
"--endpoint",
150+
type=str,
151+
default="/paddlespeech/asr/streaming",
152+
help="ASR websocket endpoint")
153+
parser.add_argument(
154+
"--wavfile",
155+
action="store",
156+
help="wav file path ",
157+
default="./16_audio.wav")
158+
parser.add_argument(
159+
"--wavscp", type=str, default=None, help="The batch audios dict text")
160+
args = parser.parse_args()
161+
162+
main(args)

examples/aishell/asr0/local/train.sh

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,6 @@
11
#!/bin/bash
22

3-
if [ $# -lt 2 ] && [ $# -gt 3 ];then
3+
if [ $# -lt 2 ] || [ $# -gt 3 ];then
44
echo "usage: CUDA_VISIBLE_DEVICES=0 ${0} config_path ckpt_name ips(optional)"
55
exit -1
66
fi

examples/aishell/asr1/local/train.sh

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -17,7 +17,7 @@ if [ ${seed} != 0 ]; then
1717
echo "using seed $seed & FLAGS_cudnn_deterministic=True ..."
1818
fi
1919

20-
if [ $# -lt 2 ] && [ $# -gt 3 ];then
20+
if [ $# -lt 2 ] || [ $# -gt 3 ];then
2121
echo "usage: CUDA_VISIBLE_DEVICES=0 ${0} config_path ckpt_name ips(optional)"
2222
exit -1
2323
fi

examples/aishell/asr3/local/train.sh

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,6 @@
11
#!/bin/bash
22

3-
if [ $# -lt 2 ] && [ $# -gt 3 ];then
3+
if [ $# -lt 2 ] || [ $# -gt 3 ];then
44
echo "usage: CUDA_VISIBLE_DEVICES=0 ${0} config_path ckpt_name ips(optional)"
55
exit -1
66
fi

examples/vctk/vc3/conf/default.yaml

Lines changed: 15 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -1,12 +1,23 @@
11
###########################################################
22
# FEATURE EXTRACTION SETTING #
33
###########################################################
4-
# 其实没用上,其实用的是 16000
5-
sr: 24000
4+
# 源码 load 的时候用的 24k, 提取 mel 用的 16k, 后续 load 和提取 mel 都要改成 24k
5+
fs: 16000
66
n_fft: 2048
7-
win_length: 1200
8-
hop_length: 300
7+
n_shift: 300
8+
win_length: 1200 # Window length.(in samples) 50ms
9+
# If set to null, it will be the same as fft_size.
10+
window: "hann" # Window function.
11+
12+
fmin: 0 # Minimum frequency of Mel basis.
13+
fmax: 8000 # Maximum frequency of Mel basis. sr // 2
914
n_mels: 80
15+
# only for StarGANv2 VC
16+
norm: # None here
17+
htk: True
18+
power: 2.0
19+
20+
1021
###########################################################
1122
# MODEL SETTING #
1223
###########################################################

examples/vctk/vc3/local/preprocess.sh

Lines changed: 23 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -6,13 +6,32 @@ stop_stage=100
66
config_path=$1
77

88
if [ ${stage} -le 0 ] && [ ${stop_stage} -ge 0 ]; then
9+
# extract features
10+
echo "Extract features ..."
11+
python3 ${BIN_DIR}/preprocess.py \
12+
--dataset=vctk \
13+
--rootdir=~/datasets/VCTK-Corpus-0.92/ \
14+
--dumpdir=dump \
15+
--config=${config_path} \
16+
--num-cpu=20
917

1018
fi
1119

1220
if [ ${stage} -le 1 ] && [ ${stop_stage} -ge 1 ]; then
13-
14-
fi
15-
16-
if [ ${stage} -le 2 ] && [ ${stop_stage} -ge 2 ]; then
21+
echo "Normalize ..."
22+
python3 ${BIN_DIR}/normalize.py \
23+
--metadata=dump/train/raw/metadata.jsonl \
24+
--dumpdir=dump/train/norm \
25+
--speaker-dict=dump/speaker_id_map.txt
26+
27+
python3 ${BIN_DIR}/normalize.py \
28+
--metadata=dump/dev/raw/metadata.jsonl \
29+
--dumpdir=dump/dev/norm \
30+
--speaker-dict=dump/speaker_id_map.txt
31+
32+
python3 ${BIN_DIR}/normalize.py \
33+
--metadata=dump/test/raw/metadata.jsonl \
34+
--dumpdir=dump/test/norm \
35+
--speaker-dict=dump/speaker_id_map.txt
1736

1837
fi

0 commit comments

Comments
 (0)