Upload 2 files
Browse files
app.py
ADDED
@@ -0,0 +1,171 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from transformers import AutoTokenizer, AutoModelForCausalLM
|
2 |
+
from unidecode import unidecode
|
3 |
+
from collections import Counter
|
4 |
+
import torch
|
5 |
+
import os
|
6 |
+
import gradio as gr
|
7 |
+
import numpy as np
|
8 |
+
import re
|
9 |
+
import string
|
10 |
+
|
11 |
+
from peft import PeftModel, PeftConfig
|
12 |
+
from transformers import AutoModelForCausalLM
|
13 |
+
from transformers import AutoTokenizer
|
14 |
+
|
15 |
+
tokenizer = AutoTokenizer.from_pretrained("osiria/primo")
|
16 |
+
model = AutoModelForCausalLM.from_pretrained("osiria/primo")
|
17 |
+
model = PeftModel.from_pretrained(model, "osiria/primo")
|
18 |
+
|
19 |
+
class Prime:
|
20 |
+
|
21 |
+
def __init__(self, tokenizer, model):
|
22 |
+
self.tokenizer = tokenizer
|
23 |
+
self.model = model
|
24 |
+
|
25 |
+
def _check_sublist(self, lst, sub_lst, sep = " "):
|
26 |
+
|
27 |
+
l_type = type(lst[0])
|
28 |
+
lst = sep.join(list(map(str, lst)))
|
29 |
+
sub_lst = sep.join(list(map(str, sub_lst)))
|
30 |
+
|
31 |
+
return sub_lst in lst
|
32 |
+
|
33 |
+
def _exclude_sublist(self, lst, sub_lst, sep = " "):
|
34 |
+
|
35 |
+
l_type = type(lst[0])
|
36 |
+
lst = sep.join(list(map(str, lst)))
|
37 |
+
sub_lst = sep.join(list(map(str, sub_lst)))
|
38 |
+
lst = re.sub("\s+", " ", lst.replace(sub_lst, "")).strip().split(sep)
|
39 |
+
lst = list(map(l_type, lst))
|
40 |
+
|
41 |
+
return lst
|
42 |
+
|
43 |
+
def generate(self, prompt, message = "", sep = " [AI]", max_tokens = 100, excluded = [[40, 19]],
|
44 |
+
lookback = 5, resample_tokens = [27793], replace_tokens = {11302: 23318},
|
45 |
+
stop_tokens = [239],
|
46 |
+
sample = False,
|
47 |
+
top_k = 5):
|
48 |
+
|
49 |
+
if message:
|
50 |
+
prompt = message + ". " + prompt
|
51 |
+
prompt = prompt.replace("β", '"').replace("β", '"').replace("β", "'")
|
52 |
+
if not sample:
|
53 |
+
top_k = 2
|
54 |
+
tokens = tokenizer.encode("[HUMAN] " + prompt + sep)
|
55 |
+
tokens_generated = []
|
56 |
+
checkpoint = 0
|
57 |
+
while tokens[-1] not in stop_tokens and len(tokens_generated) < max_tokens:
|
58 |
+
output = model.forward(input_ids=torch.tensor([tokens]).to(device)).logits[0,-1]
|
59 |
+
output = torch.softmax(output, dim = 0)
|
60 |
+
candidates = torch.topk(output, k = top_k)
|
61 |
+
if sample:
|
62 |
+
indices = candidates.indices
|
63 |
+
scores = candidates.values
|
64 |
+
next_token = indices[torch.multinomial(scores, 1)[0].item()]
|
65 |
+
else:
|
66 |
+
next_token = candidates.indices[0]
|
67 |
+
next_token = next_token.item()
|
68 |
+
sub_tokens = tokens_generated[-lookback:] + [next_token]
|
69 |
+
if next_token in resample_tokens:
|
70 |
+
next_token = candidates.indices[1]
|
71 |
+
next_token = next_token.item()
|
72 |
+
if len(tokens_generated) >= (lookback + 1) and next_token in tokens_generated[-2:]:
|
73 |
+
next_token = candidates.indices[1]
|
74 |
+
next_token = next_token.item()
|
75 |
+
elif len(tokens_generated) >= lookback and self._check_sublist(tokens_generated, sub_tokens):
|
76 |
+
if checkpoint:
|
77 |
+
tokens = tokens[:checkpoint]
|
78 |
+
break
|
79 |
+
else:
|
80 |
+
next_token = candidates.indices[1]
|
81 |
+
next_token = next_token.item()
|
82 |
+
sample = True
|
83 |
+
if next_token in replace_tokens:
|
84 |
+
next_token = replace_tokens[next_token]
|
85 |
+
tokens = tokens + [next_token]
|
86 |
+
tokens_generated = tokens_generated + [next_token]
|
87 |
+
if next_token == 5:
|
88 |
+
checkpoint = len(tokens)
|
89 |
+
for ex_lst in excluded:
|
90 |
+
tokens = self._exclude_sublist(tokens, ex_lst)
|
91 |
+
output = tokenizer.decode(tokens, skip_special_tokens=True)
|
92 |
+
output = output.split(sep)[-1].strip()
|
93 |
+
output = output[0].upper() + output[1:]
|
94 |
+
if output[-1] == tokenizer.decode(stop_tokens[0]):
|
95 |
+
output = output[:-1]
|
96 |
+
output = re.sub(" \d\.", "\nβ’", output)
|
97 |
+
return output
|
98 |
+
|
99 |
+
model.eval()
|
100 |
+
device = torch.device("cuda")
|
101 |
+
prime = Prime(tokenizer = tokenizer, model = model)
|
102 |
+
|
103 |
+
def process_input(user_input, max_tokens, sample, top_k, message):
|
104 |
+
return prime.generate(prompt = user_input, message = message,
|
105 |
+
max_tokens = 150, sample = sample,
|
106 |
+
top_k = top_k)
|
107 |
+
|
108 |
+
|
109 |
+
header = '''--------------------------------------------------------------------------------------------------
|
110 |
+
<style>
|
111 |
+
.vertical-text {
|
112 |
+
writing-mode: vertical-lr;
|
113 |
+
text-orientation: upright;
|
114 |
+
background-color:red;
|
115 |
+
}
|
116 |
+
</style>
|
117 |
+
<center>
|
118 |
+
<body>
|
119 |
+
<span class="vertical-text" style="background-color:lightgreen;border-radius: 3px;padding: 3px;">β</span>
|
120 |
+
<span class="vertical-text" style="background-color:orange;border-radius: 3px;padding: 3px;">ββ</span>
|
121 |
+
<span class="vertical-text" style="background-color:lightblue;border-radius: 3px;padding: 3px;">βββββ</span>
|
122 |
+
<span class="vertical-text" style="background-color:tomato;border-radius: 3px;padding: 3px;">βββββ</span>
|
123 |
+
<span class="vertical-text" style="background-color:lightgrey;border-radius: 3px;padding: 3px;">ββ</span>
|
124 |
+
<span class="vertical-text" style="background-color:#CF9FFF;border-radius: 3px;padding: 3px;">β</span>
|
125 |
+
</body>
|
126 |
+
</center>
|
127 |
+
<br>
|
128 |
+
<center><img src="file/prime.png" width="100"></center>
|
129 |
+
'''
|
130 |
+
|
131 |
+
import gradio as gr
|
132 |
+
import random
|
133 |
+
import time
|
134 |
+
|
135 |
+
with gr.Blocks(title="primo", css="footer {visibility: hidden}", theme=gr.themes.Default(text_size="md", spacing_size="md")) as interface:
|
136 |
+
gr.Markdown(header)
|
137 |
+
with gr.Row():
|
138 |
+
with gr.Column(scale=1):
|
139 |
+
gr.Markdown("<b>options</b>")
|
140 |
+
max_tokens = gr.Slider(1, 250, value=150, label="max tokens", info="choose a limit between 1 and 250")
|
141 |
+
sample = gr.Checkbox(label="sampling")
|
142 |
+
top_k = gr.Slider(1, 5, value=1, label="creativity", info="choose a level between 1 and 5")
|
143 |
+
message = gr.Textbox(label="system message", value = "")
|
144 |
+
clear = gr.Button("clear chat")
|
145 |
+
with gr.Column(scale=8):
|
146 |
+
chatbot = gr.Chatbot(label = "prime").style(height=600)
|
147 |
+
msg = gr.Textbox(label = "query")
|
148 |
+
|
149 |
+
def user(user_message, history):
|
150 |
+
return gr.update(value="", interactive=False), history + [[user_message, None]]
|
151 |
+
|
152 |
+
def bot(history, message, max_tokens, sample, top_k):
|
153 |
+
bot_message = process_input(history[-1][0], message = message, max_tokens = max_tokens,
|
154 |
+
sample = sample, top_k = top_k)
|
155 |
+
history[-1][1] = ""
|
156 |
+
for character in bot_message:
|
157 |
+
history[-1][1] += character
|
158 |
+
time.sleep(0.05)
|
159 |
+
yield history
|
160 |
+
|
161 |
+
response = msg.submit(user, [msg, chatbot], [msg, chatbot], queue=False).then(
|
162 |
+
bot, [chatbot, message, max_tokens, sample, top_k], chatbot
|
163 |
+
)
|
164 |
+
response.then(lambda: gr.update(interactive=True), None, [msg], queue=False)
|
165 |
+
clear.click(lambda: None, None, chatbot, queue=False)
|
166 |
+
with gr.Column(scale=1):
|
167 |
+
gr.Markdown("<b>warning</b>")
|
168 |
+
gr.Markdown("the model might behave erratically when presented with prompts which are too far away from its pre-training or fine-tuning and, because of the probabilistic nature of its generation mechanism, it might occasionally produce biased or offensive content with respect to gender, race, ideologies, and political or religious beliefs<br><br>these limitations imply that the model and its outputs should be used with caution, and should not be involved in situations that require the generated text to be fair or true")
|
169 |
+
|
170 |
+
interface.queue()
|
171 |
+
interface.launch()
|
app_it.py
ADDED
@@ -0,0 +1,173 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from transformers import AutoTokenizer, AutoModelForCausalLM
|
2 |
+
from unidecode import unidecode
|
3 |
+
from collections import Counter
|
4 |
+
import torch
|
5 |
+
import os
|
6 |
+
import gradio as gr
|
7 |
+
import numpy as np
|
8 |
+
import re
|
9 |
+
import string
|
10 |
+
|
11 |
+
from peft import PeftModel, PeftConfig
|
12 |
+
from transformers import AutoModelForCausalLM
|
13 |
+
from transformers import AutoTokenizer
|
14 |
+
|
15 |
+
tokenizer = AutoTokenizer.from_pretrained("osiria/primo")
|
16 |
+
model = AutoModelForCausalLM.from_pretrained("osiria/primo")
|
17 |
+
model = PeftModel.from_pretrained(model, "osiria/primo")
|
18 |
+
|
19 |
+
class Prime:
|
20 |
+
|
21 |
+
def __init__(self, tokenizer, model):
|
22 |
+
self.tokenizer = tokenizer
|
23 |
+
self.model = model
|
24 |
+
|
25 |
+
def _check_sublist(self, lst, sub_lst, sep = " "):
|
26 |
+
|
27 |
+
l_type = type(lst[0])
|
28 |
+
lst = sep.join(list(map(str, lst)))
|
29 |
+
sub_lst = sep.join(list(map(str, sub_lst)))
|
30 |
+
|
31 |
+
return sub_lst in lst
|
32 |
+
|
33 |
+
def _exclude_sublist(self, lst, sub_lst, sep = " "):
|
34 |
+
|
35 |
+
l_type = type(lst[0])
|
36 |
+
lst = sep.join(list(map(str, lst)))
|
37 |
+
sub_lst = sep.join(list(map(str, sub_lst)))
|
38 |
+
lst = re.sub("\s+", " ", lst.replace(sub_lst, "")).strip().split(sep)
|
39 |
+
lst = list(map(l_type, lst))
|
40 |
+
|
41 |
+
return lst
|
42 |
+
|
43 |
+
def generate(self, prompt, message = "", sep = " [AI]", max_tokens = 100, excluded = [[40, 19]],
|
44 |
+
lookback = 5, resample_tokens = [27793], replace_tokens = {11302: 23318},
|
45 |
+
stop_tokens = [239],
|
46 |
+
sample = False,
|
47 |
+
top_k = 5):
|
48 |
+
|
49 |
+
print("[HUMAN] " + prompt + sep)
|
50 |
+
print(message, max_tokens, sample, top_k)
|
51 |
+
if message:
|
52 |
+
prompt = message + ". " + prompt
|
53 |
+
prompt = prompt.replace("β", '"').replace("β", '"').replace("β", "'")
|
54 |
+
if not sample:
|
55 |
+
top_k = 2
|
56 |
+
tokens = tokenizer.encode("[HUMAN] " + prompt + sep)
|
57 |
+
tokens_generated = []
|
58 |
+
checkpoint = 0
|
59 |
+
while tokens[-1] not in stop_tokens and len(tokens_generated) < max_tokens:
|
60 |
+
output = model.forward(input_ids=torch.tensor([tokens]).to(device)).logits[0,-1]
|
61 |
+
output = torch.softmax(output, dim = 0)
|
62 |
+
candidates = torch.topk(output, k = top_k)
|
63 |
+
if sample:
|
64 |
+
indices = candidates.indices
|
65 |
+
scores = candidates.values
|
66 |
+
next_token = indices[torch.multinomial(scores, 1)[0].item()]
|
67 |
+
else:
|
68 |
+
next_token = candidates.indices[0]
|
69 |
+
next_token = next_token.item()
|
70 |
+
sub_tokens = tokens_generated[-lookback:] + [next_token]
|
71 |
+
if next_token in resample_tokens:
|
72 |
+
next_token = candidates.indices[1]
|
73 |
+
next_token = next_token.item()
|
74 |
+
if len(tokens_generated) >= (lookback + 1) and next_token in tokens_generated[-2:]:
|
75 |
+
next_token = candidates.indices[1]
|
76 |
+
next_token = next_token.item()
|
77 |
+
elif len(tokens_generated) >= lookback and self._check_sublist(tokens_generated, sub_tokens):
|
78 |
+
if checkpoint:
|
79 |
+
tokens = tokens[:checkpoint]
|
80 |
+
break
|
81 |
+
else:
|
82 |
+
next_token = candidates.indices[1]
|
83 |
+
next_token = next_token.item()
|
84 |
+
sample = True
|
85 |
+
if next_token in replace_tokens:
|
86 |
+
next_token = replace_tokens[next_token]
|
87 |
+
tokens = tokens + [next_token]
|
88 |
+
tokens_generated = tokens_generated + [next_token]
|
89 |
+
if next_token == 5:
|
90 |
+
checkpoint = len(tokens)
|
91 |
+
for ex_lst in excluded:
|
92 |
+
tokens = self._exclude_sublist(tokens, ex_lst)
|
93 |
+
output = tokenizer.decode(tokens, skip_special_tokens=True)
|
94 |
+
output = output.split(sep)[-1].strip()
|
95 |
+
output = output[0].upper() + output[1:]
|
96 |
+
if output[-1] == tokenizer.decode(stop_tokens[0]):
|
97 |
+
output = output[:-1]
|
98 |
+
output = re.sub(" \d\.", "\nβ’", output)
|
99 |
+
return output
|
100 |
+
|
101 |
+
model.eval()
|
102 |
+
device = torch.device("cuda")
|
103 |
+
prime = Prime(tokenizer = tokenizer, model = model)
|
104 |
+
|
105 |
+
def process_input(user_input, max_tokens, sample, top_k, message):
|
106 |
+
return prime.generate(prompt = user_input, message = message,
|
107 |
+
max_tokens = 150, sample = sample,
|
108 |
+
top_k = top_k)
|
109 |
+
|
110 |
+
|
111 |
+
header = '''--------------------------------------------------------------------------------------------------
|
112 |
+
<style>
|
113 |
+
.vertical-text {
|
114 |
+
writing-mode: vertical-lr;
|
115 |
+
text-orientation: upright;
|
116 |
+
background-color:red;
|
117 |
+
}
|
118 |
+
</style>
|
119 |
+
<center>
|
120 |
+
<body>
|
121 |
+
<span class="vertical-text" style="background-color:lightgreen;border-radius: 3px;padding: 3px;">β</span>
|
122 |
+
<span class="vertical-text" style="background-color:orange;border-radius: 3px;padding: 3px;">ββ</span>
|
123 |
+
<span class="vertical-text" style="background-color:lightblue;border-radius: 3px;padding: 3px;">βββββ</span>
|
124 |
+
<span class="vertical-text" style="background-color:tomato;border-radius: 3px;padding: 3px;">βββββ</span>
|
125 |
+
<span class="vertical-text" style="background-color:lightgrey;border-radius: 3px;padding: 3px;">ββ</span>
|
126 |
+
<span class="vertical-text" style="background-color:#CF9FFF;border-radius: 3px;padding: 3px;">β</span>
|
127 |
+
</body>
|
128 |
+
</center>
|
129 |
+
<br>
|
130 |
+
<center><img src="file/primo.png" width="100"></center>
|
131 |
+
'''
|
132 |
+
|
133 |
+
import gradio as gr
|
134 |
+
import random
|
135 |
+
import time
|
136 |
+
|
137 |
+
with gr.Blocks(title="primo", css="footer {visibility: hidden}", theme=gr.themes.Default(text_size="md", spacing_size="md")) as interface:
|
138 |
+
gr.Markdown(header)
|
139 |
+
with gr.Row():
|
140 |
+
with gr.Column(scale=1):
|
141 |
+
gr.Markdown("<b>opzioni</b>")
|
142 |
+
max_tokens = gr.Slider(1, 250, value=150, label="massimo numero di token", info="scegli un limite tra 1 e 250")
|
143 |
+
sample = gr.Checkbox(label="campionamento")
|
144 |
+
top_k = gr.Slider(1, 5, value=1, label="creativitΓ ", info="scegli un livello tra 1 e 5")
|
145 |
+
message = gr.Textbox(label="messaggio di sistema", value = "")
|
146 |
+
clear = gr.Button("pulisci conversazione")
|
147 |
+
with gr.Column(scale=8):
|
148 |
+
chatbot = gr.Chatbot(label = "prime").style(height=600)
|
149 |
+
msg = gr.Textbox(label = "richiesta")
|
150 |
+
|
151 |
+
def user(user_message, history):
|
152 |
+
return gr.update(value="", interactive=False), history + [[user_message, None]]
|
153 |
+
|
154 |
+
def bot(history, message, max_tokens, sample, top_k):
|
155 |
+
bot_message = process_input(history[-1][0], message = message, max_tokens = max_tokens,
|
156 |
+
sample = sample, top_k = top_k)
|
157 |
+
history[-1][1] = ""
|
158 |
+
for character in bot_message:
|
159 |
+
history[-1][1] += character
|
160 |
+
time.sleep(0.05)
|
161 |
+
yield history
|
162 |
+
|
163 |
+
response = msg.submit(user, [msg, chatbot], [msg, chatbot], queue=False).then(
|
164 |
+
bot, [chatbot, message, max_tokens, sample, top_k], chatbot
|
165 |
+
)
|
166 |
+
response.then(lambda: gr.update(interactive=True), None, [msg], queue=False)
|
167 |
+
clear.click(lambda: None, None, chatbot, queue=False)
|
168 |
+
with gr.Column(scale=1):
|
169 |
+
gr.Markdown("<b>attenzione</b>")
|
170 |
+
gr.Markdown("il modello potrebbe comportarsi in maniera imprevista nel caso in cui riceva prompt troppo lontani dal suo pre-training o fine-tuning e, per via della natura probabilistica del meccanismo di generazione, potrebbe occasionalmente produrre contenuti distorti o offensivi in relazione a tematiche come il genere, le etnie, le ideologie, e le convinzioni politiche o religiose<br><br>per via di queste limitazioni, il modello e i suoi output dovrebbero essere usati con cautela, e non dovrebbero essere coinvolti in contesti che richiedono che il testo generato sia corretto o veritiero")
|
171 |
+
|
172 |
+
interface.queue()
|
173 |
+
interface.launch()
|