DeepMount00 commited on
Commit
efe44f0
1 Parent(s): 9b5b16c

Upload app.py

Browse files
Files changed (1) hide show
  1. app.py +104 -0
app.py ADDED
@@ -0,0 +1,104 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import time
2
+ from threading import Thread
3
+ from transformers import AutoModelForCausalLM, AutoTokenizer, TextIteratorStreamer
4
+ import gradio as gr
5
+ import torch
6
+
7
+ model_id = r"/home/michele/PycharmProjects/mistral_finetuning/llama_ita_complete_v2"
8
+ # model_id = r"/home/michele/PycharmProjects/mistral_finetuning/mistral_ita_complete_v5"
9
+
10
+ tokenizer = AutoTokenizer.from_pretrained(model_id)
11
+ model = AutoModelForCausalLM.from_pretrained(model_id, torch_dtype=torch.bfloat16, device_map="auto").eval() # to("cuda:0")
12
+
13
+ DESCRIPTION = '''
14
+ <div>
15
+ <h1 style="text-align: center;">Meta Llama3 8B Ita</h1>
16
+ <p>This Space demonstrates the instruction-tuned model <a href="https://huggingface.co/DeepMount00/Llama-3-8b-Ita"><b>Meta Llama3 8b Chat</b></a>.</p>
17
+ </div>
18
+ '''
19
+ PLACEHOLDER = """
20
+ <div style="padding: 30px; text-align: center; display: flex; flex-direction: column; align-items: center;">
21
+ <img src="https://cdn-avatars.huggingface.co/v1/production/uploads/64f1bf6a8b550e875926a590/9IXg0qMUF0OV2cWPT8cZn.jpeg" style="width: 80%; max-width: 550px; height: auto; opacity: 0.50; ">
22
+ <h1 style="font-size: 28px; margin-bottom: 2px; opacity: 0.55;">DeepMount00 llama3</h1>
23
+ <p style="font-size: 18px; margin-bottom: 2px; opacity: 0.65;">Chiedimi qualsiasi cosa...</p>
24
+ </div>
25
+ """
26
+
27
+
28
+ css = """
29
+ h1 {
30
+ text-align: center;
31
+ display: block;
32
+ }
33
+ """
34
+
35
+ @spaces.GPU(duration=120)
36
+ def chat_llama3_8b(message: str, history: list, temperature: float, max_new_tokens: int) -> str:
37
+ # Creare la struttura della conversazione
38
+ conversation = []
39
+ for user, assistant in history:
40
+ conversation.extend([{"role": "user", "content": user}, {"role": "assistant", "content": assistant}])
41
+ conversation.append({"role": "user", "content": message})
42
+
43
+ # Preparare gli input per il modello
44
+ input_ids = tokenizer.apply_chat_template(conversation, return_tensors="pt").to(model.device)
45
+
46
+ # Parametri per la generazione del testo
47
+ do_sample = True if temperature > 0 else False # Usa il campionamento a meno che la temperatura non sia 0
48
+ real_temperature = max(temperature, 0.001) # Evita temperatura 0 che disabilita il campionamento
49
+
50
+ # Generare una risposta dal modello
51
+ generated_ids = model.generate(
52
+ input_ids=input_ids,
53
+ max_new_tokens=max_new_tokens,
54
+ do_sample=do_sample,
55
+ temperature=real_temperature,
56
+ eos_token_id=tokenizer.eos_token_id
57
+ )
58
+
59
+ # Decodificare i token generati
60
+ decoded = tokenizer.batch_decode(generated_ids, skip_special_tokens=True)
61
+ prompt_end_index = decoded[0].find(message) + len(message)
62
+ final_response = decoded[0][prompt_end_index:] if prompt_end_index != -1 else decoded[0]
63
+
64
+ return final_response.strip("assistant")
65
+
66
+
67
+ # Gradio block
68
+ chatbot = gr.Chatbot(height=450, placeholder=PLACEHOLDER, label='Gradio ChatInterface')
69
+
70
+ with gr.Blocks(fill_height=True, css=css) as demo:
71
+ gr.Markdown(DESCRIPTION)
72
+ gr.DuplicateButton(value="Duplicate Space for private use", elem_id="duplicate-button")
73
+ gr.ChatInterface(
74
+ fn=chat_llama3_8b,
75
+ chatbot=chatbot,
76
+ fill_height=True,
77
+ additional_inputs_accordion=gr.Accordion(label="⚙️ Parameters", open=False, render=False),
78
+ additional_inputs=[
79
+ gr.Slider(minimum=0,
80
+ maximum=1,
81
+ step=0.1,
82
+ value=0.001,
83
+ label="Temperature",
84
+ render=False),
85
+ gr.Slider(minimum=128,
86
+ maximum=4096,
87
+ step=1,
88
+ value=512,
89
+ label="Max new tokens",
90
+ render=False),
91
+ ],
92
+ examples=[
93
+ ['Quanto è alta la torre di Pisa?'],
94
+ ["Se un mattone pesa 1kg più mezzo mattone, quanto pesa il mattone? rispondi impostando l'equazione"],
95
+ ['Quanto fa 9.000 * 9.000?'],
96
+ ['Scrivi una funzione python che calcola i primi n numeri di fibonacci'],
97
+ ['Inventa tre indovinelli tutti diversi con le relative risposte in formato json']
98
+ ],
99
+ cache_examples=False,
100
+ )
101
+
102
+
103
+ if __name__ == "__main__":
104
+ demo.launch()