File size: 2,760 Bytes
9c5645d
ce01037
 
 
fcc93c7
ce01037
 
fcc93c7
9c5645d
 
33a927c
d0e7b69
 
66e320f
9c5645d
ce01037
a590dad
ce01037
9c5645d
fcc93c7
ce01037
 
9c5645d
ce01037
9c5645d
fcc93c7
 
ce01037
fcc93c7
ce01037
 
 
 
9c5645d
fcc93c7
ce01037
 
 
 
 
9c5645d
fcc93c7
 
 
9c5645d
fcc93c7
ce01037
 
 
fcc93c7
ce01037
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
fcc93c7
ce01037
 
 
 
 
 
 
fcc93c7
ce01037
 
 
 
 
 
 
 
fcc93c7
 
9c5645d
 
fcc93c7
 
 
ce01037
 
9c5645d
fcc93c7
 
9c5645d
ce01037
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
import gradio as gr
from gpt4all import GPT4All
from huggingface_hub import hf_hub_download

title = "DiarizationLM GGUF inference on CPU"

description = """
DiarizationLM GGUF inference on CPU
"""

model_path = "models"
# model_name = "model-unsloth.Q4_K_M.gguf"
model_name = "model-unsloth.BF16.gguf"
hf_hub_download(repo_id="google/DiarizationLM-13b-Fisher-v1", filename=model_name, local_dir=model_path, local_dir_use_symlinks=False)

print("Start the model init process")
model = model = GPT4All(model_name, model_path, allow_download = False, device="cpu")
print("Finish the model init process")

model.config["promptTemplate"] = "{0} --> "
model.config["systemPrompt"] = ""
model._is_chat_session_activated = False

max_new_tokens = 2048

print("Finish the model config process")

def generater(message, history, temperature, top_p, top_k):
    prompt = model.config["promptTemplate"].format(message)
    outputs = []    
    for token in model.generate(prompt=prompt, temp=temperature, top_k = top_k, top_p = top_p, max_tokens = max_new_tokens, streaming=True):
        outputs.append(token)
        yield "".join(outputs)


def vote(data: gr.LikeData):
    if data.liked:
        return
    else:
        return

print("Create chatbot")
chatbot = gr.Chatbot()
print("Created chatbot")

print("Add additional_inputs")
additional_inputs=[
    gr.Slider(
        label="temperature",
        value=0.0,
        minimum=0.0,
        maximum=2.0,
        step=0.05,
        interactive=True,
        info="Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic.",
    ),
    gr.Slider(
        label="top_p",
        value=1.0,
        minimum=0.0,
        maximum=1.0,
        step=0.01,
        interactive=True,
        info="0.1 means only the tokens comprising the top 10% probability mass are considered. Suggest set to 1 and use temperature. 1 means 100% and will disable it",
    ),
    gr.Slider(
        label="top_k",
        value=50,
        minimum=0,
        maximum=1000,
        step=1,
        interactive=True,
        info="limits candidate tokens to a fixed number after sorting by probability. Setting it higher than the vocabulary size deactivates this limit.",
    )
]
print("Added additional_inputs")

iface = gr.ChatInterface(
    fn = generater,
    title=title,
    description = description,
    chatbot=chatbot,
    additional_inputs=additional_inputs,
    examples=[
        ["<speaker:1> Hello, how are you doing <speaker:2> today? I am doing well."],
   ]
)

print("Added iface")

with gr.Blocks() as demo:
    chatbot.like(vote, None, None)
    iface.render()

print("Rendered iface")

if __name__ == "__main__":
    demo.queue(max_size=3).launch()