gorkemgoknar commited on
Commit
7baef9b
1 Parent(s): e689a3a

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +9 -31
app.py CHANGED
@@ -5,6 +5,8 @@ from transformers import AutoConfig
5
  from transformers import GPT2Tokenizer, GPT2LMHeadModel
6
  from itertools import chain
7
 
 
 
8
  import tempfile
9
  from typing import Optional
10
  from TTS.config import load_config
@@ -99,34 +101,9 @@ def get_chat_response(name,history=[], input_txt = "Hello , what is your name?")
99
 
100
  MODEL_NAME= "tts_models/multilingual/multi-dataset/your_tts"
101
 
102
- def tts(text: str, speaker_idx: str=None):
103
- if len(text) > MAX_TXT_LEN:
104
- text = text[:MAX_TXT_LEN]
105
- print(f"Input text was cutoff since it went over the {MAX_TXT_LEN} character limit.")
106
- print(text, model_name)
107
- # download model
108
- model_path, config_path, model_item = manager.download_model(f"tts_models/{MODEL_NAME}")
109
- vocoder_name: Optional[str] = model_item["default_vocoder"]
110
- # download vocoder
111
- vocoder_path = None
112
- vocoder_config_path = None
113
- if vocoder_name is not None:
114
- vocoder_path, vocoder_config_path, _ = manager.download_model(vocoder_name)
115
- # init synthesizer
116
- synthesizer = Synthesizer(
117
- model_path, config_path, None, None, vocoder_path, vocoder_config_path,
118
- )
119
- # synthesize
120
- if synthesizer is None:
121
- raise NameError("model not found")
122
- wavs = synthesizer.tts(text, speaker_idx)
123
- # return output
124
- with tempfile.NamedTemporaryFile(suffix=".wav", delete=False) as fp:
125
- synthesizer.save_wav(wavs, fp)
126
- return fp.name
127
-
128
-
129
- def greet(character,message,history,voice):
130
 
131
  #gradios set_state/get_state had problems on embedded html!
132
  history = history or {"character": character, "message_history" : [] }
@@ -139,8 +116,9 @@ def greet(character,message,history,voice):
139
 
140
 
141
  response = get_chat_response(character,history=history["message_history"],input_txt=message)
142
- voice = tts(response)
143
-
 
144
  history["message_history"].append((message, response))
145
 
146
 
@@ -177,7 +155,7 @@ article = "<p style='text-align: center'><a href='https://www.linkedin.com/pulse
177
 
178
  history = {"character": "None", "message_history" : [] }
179
  interface= gr.Interface(fn=greet,
180
- inputs=[gr.Audio(source="microphone", type="filepath"),gr.inputs.Dropdown(personality_choices) ,"text", "state"],
181
  outputs=["html","state",gr.Audio(type="filepath")],
182
 
183
  css=css, title=title, description=description,article=article )
 
5
  from transformers import GPT2Tokenizer, GPT2LMHeadModel
6
  from itertools import chain
7
 
8
+ import os
9
+
10
  import tempfile
11
  from typing import Optional
12
  from TTS.config import load_config
 
101
 
102
  MODEL_NAME= "tts_models/multilingual/multi-dataset/your_tts"
103
 
104
+
105
+
106
+ def greet(character,audio,message,history,voice):
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
107
 
108
  #gradios set_state/get_state had problems on embedded html!
109
  history = history or {"character": character, "message_history" : [] }
 
116
 
117
 
118
  response = get_chat_response(character,history=history["message_history"],input_txt=message)
119
+ os.system('tts --text "'+response+'" --model_name tts_models/multilingual/multi-dataset/your_tts --speaker_wav '+audio+' --language_idx "es"')
120
+ voice = "tts_output.wav"
121
+
122
  history["message_history"].append((message, response))
123
 
124
 
 
155
 
156
  history = {"character": "None", "message_history" : [] }
157
  interface= gr.Interface(fn=greet,
158
+ inputs=[gr.inputs.Dropdown(personality_choices),gr.Audio(source="microphone", type="filepath") ,"text", "state"],
159
  outputs=["html","state",gr.Audio(type="filepath")],
160
 
161
  css=css, title=title, description=description,article=article )