Skip to content

Commit 3fb8196

Browse files
committed
Implement "*Is recording a voice message...*" for TTS oobabooga#303
1 parent 0dab2c5 commit 3fb8196

File tree

3 files changed

+9
-4
lines changed

3 files changed

+9
-4
lines changed

extensions/silero_tts/script.py

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -81,6 +81,7 @@ def input_modifier(string):
8181
if (shared.args.chat or shared.args.cai_chat) and len(shared.history['internal']) > 0:
8282
shared.history['visible'][-1] = [shared.history['visible'][-1][0], shared.history['visible'][-1][1].replace('controls autoplay>','controls>')]
8383

84+
shared.processing_message = "*Is recording a voice message...*"
8485
return string
8586

8687
def output_modifier(string):
@@ -119,6 +120,7 @@ def output_modifier(string):
119120
if params['show_text']:
120121
string += f'\n\n{original_string}'
121122

123+
shared.processing_message = "*Is typing...*"
122124
return string
123125

124126
def bot_prefix_modifier(string):

modules/chat.py

Lines changed: 6 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -126,8 +126,9 @@ def chatbot_wrapper(text, max_new_tokens, do_sample, temperature, top_p, typical
126126
else:
127127
prompt = custom_generate_chat_prompt(text, max_new_tokens, name1, name2, context, chat_prompt_size)
128128

129+
# Yield *Is typing...*
129130
if not regenerate:
130-
yield shared.history['visible']+[[visible_text, '*Is typing...*']]
131+
yield shared.history['visible']+[[visible_text, shared.processing_message]]
131132

132133
# Generate
133134
reply = ''
@@ -168,7 +169,8 @@ def impersonate_wrapper(text, max_new_tokens, do_sample, temperature, top_p, typ
168169
prompt = generate_chat_prompt(text, max_new_tokens, name1, name2, context, chat_prompt_size, impersonate=True)
169170

170171
reply = ''
171-
yield '*Is typing...*'
172+
# Yield *Is typing...*
173+
yield shared.processing_message
172174
for i in range(chat_generation_attempts):
173175
for reply in generate_reply(prompt+reply, max_new_tokens, do_sample, temperature, top_p, typical_p, repetition_penalty, top_k, min_length, no_repeat_ngram_size, num_beams, penalty_alpha, length_penalty, early_stopping, eos_token=eos_token, stopping_string=f"\n{name2}:"):
174176
reply, next_character_found = extract_message_from_reply(prompt, reply, name1, name2, check, impersonate=True)
@@ -187,8 +189,8 @@ def regenerate_wrapper(text, max_new_tokens, do_sample, temperature, top_p, typi
187189
else:
188190
last_visible = shared.history['visible'].pop()
189191
last_internal = shared.history['internal'].pop()
190-
191-
yield generate_chat_output(shared.history['visible']+[[last_visible[0], '*Is typing...*']], name1, name2, shared.character)
192+
# Yield '*Is typing...*'
193+
yield generate_chat_output(shared.history['visible']+[[last_visible[0], shared.processing_message]], name1, name2, shared.character)
192194
for _history in chatbot_wrapper(last_internal[0], max_new_tokens, do_sample, temperature, top_p, typical_p, repetition_penalty, top_k, min_length, no_repeat_ngram_size, num_beams, penalty_alpha, length_penalty, early_stopping, name1, name2, context, check, chat_prompt_size, chat_generation_attempts, regenerate=True):
193195
if shared.args.cai_chat:
194196
shared.history['visible'][-1] = [last_visible[0], _history[-1][1]]

modules/shared.py

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -11,6 +11,7 @@
1111
history = {'internal': [], 'visible': []}
1212
character = 'None'
1313
stop_everything = False
14+
processing_message = '*Is typing...*'
1415

1516
# UI elements (buttons, sliders, HTML, etc)
1617
gradio = {}

0 commit comments

Comments
 (0)