Spaces:
Build error
Build error
gorkemgoknar
commited on
Commit
•
0735709
1
Parent(s):
94fbe21
Update app.py
Browse files
app.py
CHANGED
@@ -5,6 +5,14 @@ from transformers import AutoConfig
|
|
5 |
from transformers import GPT2Tokenizer, GPT2LMHeadModel
|
6 |
from itertools import chain
|
7 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
8 |
|
9 |
config = AutoConfig.from_pretrained('gorkemgoknar/gpt2chatbotenglish')
|
10 |
model = GPT2LMHeadModel.from_pretrained('gorkemgoknar/gpt2chatbotenglish', config=config)
|
@@ -89,7 +97,33 @@ def get_chat_response(name,history=[], input_txt = "Hello , what is your name?")
|
|
89 |
| Brother | Allnut | Rose | Qui-Gon | Jar Jar
|
90 |
'''
|
91 |
|
92 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
93 |
|
94 |
|
95 |
def greet(character,message,history):
|
@@ -105,6 +139,7 @@ def greet(character,message,history):
|
|
105 |
|
106 |
|
107 |
response = get_chat_response(character,history=history["message_history"],input_txt=message)
|
|
|
108 |
|
109 |
history["message_history"].append((message, response))
|
110 |
|
|
|
5 |
from transformers import GPT2Tokenizer, GPT2LMHeadModel
|
6 |
from itertools import chain
|
7 |
|
8 |
+
import tempfile
|
9 |
+
from typing import Optional
|
10 |
+
from TTS.config import load_config
|
11 |
+
import gradio as gr
|
12 |
+
import numpy as np
|
13 |
+
from TTS.utils.manage import ModelManager
|
14 |
+
from TTS.utils.synthesizer import Synthesizer
|
15 |
+
|
16 |
|
17 |
config = AutoConfig.from_pretrained('gorkemgoknar/gpt2chatbotenglish')
|
18 |
model = GPT2LMHeadModel.from_pretrained('gorkemgoknar/gpt2chatbotenglish', config=config)
|
|
|
97 |
| Brother | Allnut | Rose | Qui-Gon | Jar Jar
|
98 |
'''
|
99 |
|
100 |
+
MODEL_NAME= "tts_models/multilingual/multi-dataset/your_tts"
|
101 |
+
|
102 |
+
def tts(text: str, speaker_idx: str=None):
|
103 |
+
if len(text) > MAX_TXT_LEN:
|
104 |
+
text = text[:MAX_TXT_LEN]
|
105 |
+
print(f"Input text was cutoff since it went over the {MAX_TXT_LEN} character limit.")
|
106 |
+
print(text, model_name)
|
107 |
+
# download model
|
108 |
+
model_path, config_path, model_item = manager.download_model(f"tts_models/{MODEL_NAME}")
|
109 |
+
vocoder_name: Optional[str] = model_item["default_vocoder"]
|
110 |
+
# download vocoder
|
111 |
+
vocoder_path = None
|
112 |
+
vocoder_config_path = None
|
113 |
+
if vocoder_name is not None:
|
114 |
+
vocoder_path, vocoder_config_path, _ = manager.download_model(vocoder_name)
|
115 |
+
# init synthesizer
|
116 |
+
synthesizer = Synthesizer(
|
117 |
+
model_path, config_path, None, None, vocoder_path, vocoder_config_path,
|
118 |
+
)
|
119 |
+
# synthesize
|
120 |
+
if synthesizer is None:
|
121 |
+
raise NameError("model not found")
|
122 |
+
wavs = synthesizer.tts(text, speaker_idx)
|
123 |
+
# return output
|
124 |
+
with tempfile.NamedTemporaryFile(suffix=".wav", delete=False) as fp:
|
125 |
+
synthesizer.save_wav(wavs, fp)
|
126 |
+
return fp.name
|
127 |
|
128 |
|
129 |
def greet(character,message,history):
|
|
|
139 |
|
140 |
|
141 |
response = get_chat_response(character,history=history["message_history"],input_txt=message)
|
142 |
+
tts(response)
|
143 |
|
144 |
history["message_history"].append((message, response))
|
145 |
|