Spaces:
Sleeping
Sleeping
add gradio tts & app.py which make gradio and api same route
Browse files- Dockerfile +5 -3
- app.py +10 -0
- interface.py +105 -5
- llm/__pycache__/basemodel.cpython-311.pyc +0 -0
- llm/__pycache__/llm.cpython-311.pyc +0 -0
- llm/__pycache__/prompt.cpython-311.pyc +0 -0
- main.py +26 -17
Dockerfile
CHANGED
@@ -12,6 +12,8 @@ RUN pip install --no-cache-dir --upgrade -r requirements.txt
|
|
12 |
COPY --chown=user . /app
|
13 |
|
14 |
EXPOSE 7826 8000 8001
|
15 |
-
CMD python3 interface.py & \
|
16 |
-
|
17 |
-
|
|
|
|
|
|
12 |
COPY --chown=user . /app
|
13 |
|
14 |
EXPOSE 7826 8000 8001
|
15 |
+
# CMD python3 interface.py & \
|
16 |
+
# uvicorn main:app --host 0.0.0.0 --port 8000 & \
|
17 |
+
# uvicorn tts.tts:app --host 0.0.0.0 --port 8001
|
18 |
+
|
19 |
+
CMD python3 app.py
|
app.py
ADDED
@@ -0,0 +1,10 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import uvicorn
|
2 |
+
from main import app
|
3 |
+
from interface import create_gradio_interface
|
4 |
+
import gradio as gr
|
5 |
+
|
6 |
+
if __name__ == "__main__":
|
7 |
+
gr_interface = create_gradio_interface()
|
8 |
+
gr_interface.queue(default_concurrency_limit=1)
|
9 |
+
app = gr.mount_gradio_app(app, gr_interface, path="/")
|
10 |
+
uvicorn.run(app, host='0.0.0.0', port=7860, workers=1)
|
interface.py
CHANGED
@@ -1,7 +1,9 @@
|
|
|
|
1 |
import gradio as gr
|
2 |
import requests
|
3 |
|
4 |
-
|
|
|
5 |
|
6 |
# Function: Get nurse response
|
7 |
def get_nurse_response(user_input, model_name, chat_history):
|
@@ -71,7 +73,97 @@ def view_ehr_details(view):
|
|
71 |
except requests.exceptions.RequestException as e:
|
72 |
return f"Error: {str(e)}"
|
73 |
|
74 |
-
#
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
75 |
def create_gradio_interface():
|
76 |
with gr.Blocks() as interface:
|
77 |
# Title and description
|
@@ -176,7 +268,6 @@ def create_gradio_interface():
|
|
176 |
inputs=[],
|
177 |
outputs=chat_history_output,
|
178 |
)
|
179 |
-
|
180 |
send_button.click(
|
181 |
fn=view_ehr_details,
|
182 |
inputs=[gr.Textbox(value="details", visible=False)],
|
@@ -189,15 +280,24 @@ def create_gradio_interface():
|
|
189 |
outputs=ehr_prompt_output
|
190 |
)
|
191 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
192 |
# Footer
|
193 |
gr.Markdown(
|
194 |
"""
|
195 |
---
|
196 |
-
|
197 |
Powered by Typhoon v1.5x and OpenThaiGPT Models.
|
198 |
"""
|
199 |
)
|
200 |
-
|
201 |
return interface
|
202 |
|
203 |
# Run the Gradio Interface
|
|
|
1 |
+
import os
|
2 |
import gradio as gr
|
3 |
import requests
|
4 |
|
5 |
+
PORT = 7860
|
6 |
+
API_BASE_URL = f"http://localhost:{PORT}"
|
7 |
|
8 |
# Function: Get nurse response
|
9 |
def get_nurse_response(user_input, model_name, chat_history):
|
|
|
73 |
except requests.exceptions.RequestException as e:
|
74 |
return f"Error: {str(e)}"
|
75 |
|
76 |
+
# --------------------------------------------------------------------------------------------
|
77 |
+
def call_botnoi_tts(text, speaker, volume, speed):
|
78 |
+
url = f"{API_BASE_URL}/tts/generate_voice_botnoi/"
|
79 |
+
payload = {
|
80 |
+
"text": text,
|
81 |
+
"speaker": speaker,
|
82 |
+
"volume": volume,
|
83 |
+
"speed": speed,
|
84 |
+
"token": os.getenv("BOTNOI_API_TOKEN")
|
85 |
+
}
|
86 |
+
|
87 |
+
response = requests.post(url, json=payload)
|
88 |
+
if response.status_code == 200:
|
89 |
+
return response.content, "output.mp3"
|
90 |
+
else:
|
91 |
+
return f"Error: {response.status_code} - {response.json().get('detail', 'Unknown error')}", None
|
92 |
+
|
93 |
+
# Helper function to call VAJA9 API
|
94 |
+
def call_vaja9_tts(text, speaker, phrase_break, audiovisual):
|
95 |
+
url = f"{API_BASE_URL}/tts/generate_voice_vaja9/"
|
96 |
+
payload = {
|
97 |
+
"text": text,
|
98 |
+
"speaker": speaker,
|
99 |
+
"phrase_break": phrase_break,
|
100 |
+
"audiovisual": audiovisual
|
101 |
+
}
|
102 |
+
|
103 |
+
response = requests.post(url, json=payload)
|
104 |
+
if response.status_code == 200:
|
105 |
+
return response.content, "output.wav"
|
106 |
+
else:
|
107 |
+
return f"Error: {response.status_code} - {response.json().get('detail', 'Unknown error')}", None
|
108 |
+
# --------------------------------------------------------------------------------------------
|
109 |
+
|
110 |
+
def gradio_tts_interface():
|
111 |
+
with gr.Tabs() as tabs:
|
112 |
+
# Tab for Botnoi TTS API
|
113 |
+
with gr.TabItem("Botnoi TTS"):
|
114 |
+
gr.Markdown("### Generate Voice with Botnoi API")
|
115 |
+
|
116 |
+
botnoi_text = gr.Textbox(label="Text", placeholder="Enter text to synthesize")
|
117 |
+
botnoi_speaker = gr.Textbox(label="Speaker ID", value="52", placeholder="Default: 52")
|
118 |
+
botnoi_volume = gr.Slider(label="Volume", minimum=0, maximum=100, value=100)
|
119 |
+
botnoi_speed = gr.Slider(label="Speed", minimum=0.5, maximum=2.0, step=0.1, value=1.0)
|
120 |
+
|
121 |
+
botnoi_generate = gr.Button("Generate Audio")
|
122 |
+
botnoi_output = gr.Audio(label="Generated Audio")
|
123 |
+
botnoi_error = gr.Textbox(label="Error", interactive=False, visible=False)
|
124 |
+
|
125 |
+
def generate_botnoi_voice(text, speaker, volume, speed):
|
126 |
+
result, file_name = call_botnoi_tts(text, speaker, volume, speed)
|
127 |
+
if file_name:
|
128 |
+
return gr.update(value=result), ""
|
129 |
+
else:
|
130 |
+
return None, result
|
131 |
+
|
132 |
+
botnoi_generate.click(generate_botnoi_voice,
|
133 |
+
inputs=[botnoi_text, botnoi_speaker, botnoi_volume, botnoi_speed],
|
134 |
+
outputs=[botnoi_output, botnoi_error])
|
135 |
+
|
136 |
+
# Tab for VAJA9 TTS API
|
137 |
+
with gr.TabItem("VAJA9 TTS"):
|
138 |
+
gr.Markdown("### Generate Voice with VAJA9 API")
|
139 |
+
|
140 |
+
vaja9_text = gr.Textbox(label="Text", placeholder="Enter text to synthesize")
|
141 |
+
vaja9_speaker = gr.Radio(label="Speaker", choices=["0 - Male", "1 - Female", "2 - Boy", "3 - Girl"], value="1 - Female")
|
142 |
+
vaja9_phrase_break = gr.Radio(label="Phrase Break", choices=["0 - Auto", "1 - None"], value="0 - Auto")
|
143 |
+
vaja9_audiovisual = gr.Radio(label="Audiovisual", choices=["0 - Audio", "1 - Audio + Visual"], value="0 - Audio")
|
144 |
+
|
145 |
+
vaja9_generate = gr.Button("Generate Audio")
|
146 |
+
vaja9_output = gr.Audio(label="Generated Audio")
|
147 |
+
vaja9_error = gr.Textbox(label="Error", interactive=False, visible=False)
|
148 |
+
|
149 |
+
def generate_vaja9_voice(text, speaker, phrase_break, audiovisual):
|
150 |
+
speaker_id = int(speaker.split(" - ")[0])
|
151 |
+
phrase_break_id = int(phrase_break.split(" - ")[0])
|
152 |
+
audiovisual_id = int(audiovisual.split(" - ")[0])
|
153 |
+
|
154 |
+
result, file_name = call_vaja9_tts(text, speaker_id, phrase_break_id, audiovisual_id)
|
155 |
+
if file_name:
|
156 |
+
return gr.update(value=result), ""
|
157 |
+
else:
|
158 |
+
return None, result
|
159 |
+
|
160 |
+
vaja9_generate.click(generate_vaja9_voice,
|
161 |
+
inputs=[vaja9_text, vaja9_speaker, vaja9_phrase_break, vaja9_audiovisual],
|
162 |
+
outputs=[vaja9_output, vaja9_error])
|
163 |
+
return tabs
|
164 |
+
|
165 |
+
# --------------------------------------------------------------------------------------------
|
166 |
+
# Chatbot Interface
|
167 |
def create_gradio_interface():
|
168 |
with gr.Blocks() as interface:
|
169 |
# Title and description
|
|
|
268 |
inputs=[],
|
269 |
outputs=chat_history_output,
|
270 |
)
|
|
|
271 |
send_button.click(
|
272 |
fn=view_ehr_details,
|
273 |
inputs=[gr.Textbox(value="details", visible=False)],
|
|
|
280 |
outputs=ehr_prompt_output
|
281 |
)
|
282 |
|
283 |
+
gr.Markdown(
|
284 |
+
"""
|
285 |
+
---
|
286 |
+
"""
|
287 |
+
)
|
288 |
+
# TTS --------------------------------------------------------------------------------------------
|
289 |
+
gr.Markdown("# Text-to-Speech (TTS) API Test Interface")
|
290 |
+
tts_interface = gradio_tts_interface()
|
291 |
+
|
292 |
# Footer
|
293 |
gr.Markdown(
|
294 |
"""
|
295 |
---
|
296 |
+
Built With ❤️ by **[Piang](https://github.com/microhum)** 🚀
|
297 |
Powered by Typhoon v1.5x and OpenThaiGPT Models.
|
298 |
"""
|
299 |
)
|
300 |
+
|
301 |
return interface
|
302 |
|
303 |
# Run the Gradio Interface
|
llm/__pycache__/basemodel.cpython-311.pyc
CHANGED
Binary files a/llm/__pycache__/basemodel.cpython-311.pyc and b/llm/__pycache__/basemodel.cpython-311.pyc differ
|
|
llm/__pycache__/llm.cpython-311.pyc
CHANGED
Binary files a/llm/__pycache__/llm.cpython-311.pyc and b/llm/__pycache__/llm.cpython-311.pyc differ
|
|
llm/__pycache__/prompt.cpython-311.pyc
CHANGED
Binary files a/llm/__pycache__/prompt.cpython-311.pyc and b/llm/__pycache__/prompt.cpython-311.pyc differ
|
|
main.py
CHANGED
@@ -1,10 +1,13 @@
|
|
|
|
1 |
from typing import Optional
|
|
|
2 |
import uvicorn
|
3 |
from llm.basemodel import EHRModel
|
4 |
from llm.llm import VirtualNurseLLM
|
5 |
-
from fastapi import FastAPI
|
6 |
from fastapi.middleware.cors import CORSMiddleware
|
7 |
-
from fastapi.responses import HTMLResponse
|
|
|
8 |
from pydantic import BaseModel
|
9 |
from llm.models import model_list, get_model
|
10 |
import time
|
@@ -23,6 +26,7 @@ nurse_llm = VirtualNurseLLM(
|
|
23 |
# api_key="dummy"
|
24 |
# )
|
25 |
|
|
|
26 |
app = FastAPI()
|
27 |
|
28 |
app.add_middleware(
|
@@ -51,21 +55,21 @@ class EHRData(BaseModel):
|
|
51 |
class ChatHistory(BaseModel):
|
52 |
chat_history: list
|
53 |
|
54 |
-
@app.get("/", response_class=HTMLResponse)
|
55 |
-
def read_index():
|
56 |
-
|
57 |
-
|
58 |
-
|
59 |
-
|
60 |
-
|
61 |
-
|
62 |
-
|
63 |
-
|
64 |
-
|
65 |
-
|
66 |
-
|
67 |
-
|
68 |
-
|
69 |
|
70 |
@app.get("/history")
|
71 |
def get_chat_history():
|
@@ -124,5 +128,10 @@ def nurse_response(user_input: UserInput):
|
|
124 |
|
125 |
return NurseResponse(nurse_response=response)
|
126 |
|
|
|
|
|
|
|
|
|
|
|
127 |
if __name__ == "__main__":
|
128 |
uvicorn.run("main:app", host="0.0.0.0", port=8000, reload=True)
|
|
|
1 |
+
import os
|
2 |
from typing import Optional
|
3 |
+
import requests
|
4 |
import uvicorn
|
5 |
from llm.basemodel import EHRModel
|
6 |
from llm.llm import VirtualNurseLLM
|
7 |
+
from fastapi import FastAPI, HTTPException
|
8 |
from fastapi.middleware.cors import CORSMiddleware
|
9 |
+
from fastapi.responses import FileResponse, HTMLResponse
|
10 |
+
from pythainlp.tokenize import sent_tokenize
|
11 |
from pydantic import BaseModel
|
12 |
from llm.models import model_list, get_model
|
13 |
import time
|
|
|
26 |
# api_key="dummy"
|
27 |
# )
|
28 |
|
29 |
+
|
30 |
app = FastAPI()
|
31 |
|
32 |
app.add_middleware(
|
|
|
55 |
class ChatHistory(BaseModel):
|
56 |
chat_history: list
|
57 |
|
58 |
+
# @app.get("/", response_class=HTMLResponse)
|
59 |
+
# def read_index():
|
60 |
+
# return """
|
61 |
+
# <!DOCTYPE html>
|
62 |
+
# <html>
|
63 |
+
# <head>
|
64 |
+
# <title>MALI_NURSE API</title>
|
65 |
+
# </head>
|
66 |
+
# <body>
|
67 |
+
# <h1>Welcome to MALI_NURSE API</h1>
|
68 |
+
# <p>This is the index page. Use the link below to access the API docs:</p>
|
69 |
+
# <a href="/docs">Go to Swagger Docs UI</a>
|
70 |
+
# </body>
|
71 |
+
# </html>
|
72 |
+
# """
|
73 |
|
74 |
@app.get("/history")
|
75 |
def get_chat_history():
|
|
|
128 |
|
129 |
return NurseResponse(nurse_response=response)
|
130 |
|
131 |
+
# TTS
|
132 |
+
from tts.tts import app as tts_app
|
133 |
+
|
134 |
+
app.mount("/tts", tts_app)
|
135 |
+
|
136 |
if __name__ == "__main__":
|
137 |
uvicorn.run("main:app", host="0.0.0.0", port=8000, reload=True)
|