File size: 4,075 Bytes
2f3b32c
 
 
 
39f7f02
2f3b32c
39f7f02
 
207c35f
c640ef1
2f3b32c
39f7f02
ab0e126
39f7f02
ab0e126
39f7f02
f4dad5f
 
 
39f7f02
2f3b32c
679f566
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2f3b32c
390611c
 
 
 
 
 
 
ab0e126
679f566
 
 
 
 
 
 
2f3b32c
5a78105
c68088d
2f3b32c
39f7f02
 
679f566
39f7f02
ab0e126
 
b986f28
 
ab0e126
39f7f02
ab0e126
d49facf
39f7f02
679f566
394ca4c
ab0e126
 
 
 
 
39f7f02
 
 
 
 
2f3b32c
 
39f7f02
679f566
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
07e84d7
 
39f7f02
 
 
2606bca
eae3b08
 
2606bca
679f566
 
5d81f8c
2db11b9
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
338c029
 
2db11b9
 
 
 
 
 
 
2f3b32c
97e7837
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
import gradio as gr
import edge_tts
import asyncio
import tempfile
import os
from huggingface_hub import InferenceClient
import re
from streaming_stt_nemo import Model
import torch
import random

default_lang = "en"

engines = { default_lang: Model(default_lang) }

def transcribe(audio):
    lang = "en"
    model = engines[lang]
    text = model.stt_file(audio)[0]
    return text

HF_TOKEN = os.environ.get("HF_TOKEN", None)

def client_fn(model):
    if "Mixtral" in model:
        return InferenceClient("mistralai/Mixtral-8x7B-Instruct-v0.1")
    elif "Llama" in model:
        return InferenceClient("meta-llama/Meta-Llama-3-8B-Instruct")
    elif "Mistral" in model:
        return InferenceClient("mistralai/Mistral-7B-Instruct-v0.3")
    elif "Phi" in model:
        return InferenceClient("microsoft/Phi-3-mini-4k-instruct")
    else: 
        return InferenceClient("microsoft/Phi-3-mini-4k-instruct")

def randomize_seed_fn(seed: int) -> int:
    seed = random.randint(0, 999999)
    return seed

system_instructions1 = """
[SYSTEM] Answer as Real Jarvis JARVIS, Made by 'Tony Stark.' 
Keep conversation friendly, short, clear, and concise. 
Avoid unnecessary introductions and answer the user's questions directly. 
Respond in a normal, conversational manner while being friendly and helpful.
[USER]
"""

def models(text, model="Mixtral 8x7B", seed=42):

    seed = int(randomize_seed_fn(seed))
    generator = torch.Generator().manual_seed(seed)  
    
    client = client_fn(model)
    
    generate_kwargs = dict(
        max_new_tokens=300,
        seed=seed
    )
    
    formatted_prompt = system_instructions1 + text + "[JARVIS]"
    stream = client.text_generation(
        formatted_prompt, **generate_kwargs, stream=True, details=True, return_full_text=False)
    output = ""
    for response in stream:
        if not response.token.text == "</s>":
            output += response.token.text

    return output

async def respond(audio, model, seed):
    user = transcribe(audio)
    reply = models(user, model, seed)
    communicate = edge_tts.Communicate(reply)
    with tempfile.NamedTemporaryFile(delete=False, suffix=".wav") as tmp_file:
        tmp_path = tmp_file.name
        await communicate.save(tmp_path)
    yield tmp_path

DESCRIPTION = """ # <center><b>JARVIS⚡</b></center>
        ### <center>A personal Assistant of Tony Stark for YOU
        ### <center>Voice Chat with your personal Assistant</center>
        """

with gr.Blocks(css="style.css") as demo:    
    gr.Markdown(DESCRIPTION)
    with gr.Row():
        select = gr.Dropdown([ 'Mixtral 8x7B',
        'Llama 3 8B',
        'Mistral 7B v0.3',
        'Phi 3 mini',
    ],
    value="Mistral 7B v0.3",
    label="Model"
    )
        seed = gr.Slider(
        label="Seed",
        minimum=0,
        maximum=999999,
        step=1,
        value=0,
        visible=False
        )
        input = gr.Audio(label="User", sources="microphone", type="filepath", waveform_options=False)
        output = gr.Audio(label="AI", type="filepath",
                        interactive=False,
                        autoplay=True,
                        elem_classes="audio")
        gr.Interface(
            batch=True,
            max_batch_size=10, 
            fn=respond, 
            inputs=[input, select, seed],
            outputs=[output], live=True)  
        
    with gr.Row():
        select = gr.Dropdown([ 'Mixtral 8x7B',
        'Llama 3 8B',
        'Mistral 7B v0.3',
        'Phi 3 mini',
    ],
    value="Mistral 7B v0.3",
    label="Model"
    )
        seed = gr.Slider(
        label="Seed",
        minimum=0,
        maximum=999999,
        step=1,
        value=0,
        visible=False
        )
        input = gr.Textbox(label="User")
        output = gr.Textbox(label="AI", interactive=False)
        gr.Interface(
            batch=True,
            max_batch_size=10, 
            fn=models, 
            inputs=[input, select, seed],
            outputs=[output], live=True)  

if __name__ == "__main__":
    demo.queue(max_size=200).launch()