Spaces:
Paused
Paused
Update app.py
Browse files
app.py
CHANGED
@@ -12,54 +12,54 @@ import time
|
|
12 |
login(token=os.environ["HF_TOKEN"])
|
13 |
|
14 |
# Structure hiérarchique des modèles
|
15 |
-
|
16 |
"meta-llama": {
|
17 |
-
"Llama-2": ["
|
18 |
-
"Llama-3": ["8B", "3.
|
19 |
},
|
20 |
"mistralai": {
|
21 |
"Mistral": ["7B-v0.1", "7B-v0.3"],
|
22 |
"Mixtral": ["8x7B-v0.1"]
|
23 |
},
|
24 |
"google": {
|
25 |
-
"
|
26 |
},
|
27 |
"croissantllm": {
|
28 |
"CroissantLLM": ["Base"]
|
29 |
}
|
30 |
}
|
31 |
|
32 |
-
#
|
33 |
models_and_languages = {
|
34 |
-
"meta-llama/Llama-2-
|
35 |
-
"meta-llama/Llama-2-
|
36 |
-
"meta-llama/Llama-2-
|
37 |
-
"meta-llama/Llama-3-8B": ["en"],
|
38 |
-
"meta-llama/Llama-3-
|
39 |
-
"meta-llama/Llama-3-
|
40 |
"mistralai/Mistral-7B-v0.1": ["en"],
|
41 |
"mistralai/Mixtral-8x7B-v0.1": ["en", "fr", "it", "de", "es"],
|
42 |
"mistralai/Mistral-7B-v0.3": ["en"],
|
43 |
-
"google/
|
44 |
-
"google/
|
45 |
-
"google/
|
46 |
"croissantllm/CroissantLLMBase": ["en", "fr"]
|
47 |
}
|
48 |
|
49 |
# Paramètres recommandés pour chaque modèle
|
50 |
model_parameters = {
|
51 |
-
"meta-llama/Llama-2-
|
52 |
-
"meta-llama/Llama-2-
|
53 |
-
"meta-llama/Llama-2-
|
54 |
-
"meta-llama/Llama-3-8B": {"temperature": 0.75, "top_p": 0.9, "top_k": 50},
|
55 |
-
"meta-llama/Llama-3-
|
56 |
-
"meta-llama/Llama-3-
|
57 |
"mistralai/Mistral-7B-v0.1": {"temperature": 0.7, "top_p": 0.9, "top_k": 50},
|
58 |
"mistralai/Mixtral-8x7B-v0.1": {"temperature": 0.8, "top_p": 0.95, "top_k": 50},
|
59 |
"mistralai/Mistral-7B-v0.3": {"temperature": 0.7, "top_p": 0.9, "top_k": 50},
|
60 |
-
"google/
|
61 |
-
"google/
|
62 |
-
"google/
|
63 |
"croissantllm/CroissantLLMBase": {"temperature": 0.8, "top_p": 0.92, "top_k": 50}
|
64 |
}
|
65 |
|
@@ -69,14 +69,17 @@ tokenizer = None
|
|
69 |
selected_language = None
|
70 |
|
71 |
def update_model_choices(company):
|
72 |
-
return gr.Dropdown(choices=list(
|
73 |
|
74 |
def update_variation_choices(company, model_name):
|
75 |
-
return gr.Dropdown(choices=
|
76 |
|
77 |
def load_model(company, model_name, variation, progress=gr.Progress()):
|
78 |
global model, tokenizer
|
|
|
79 |
full_model_name = f"{company}/{model_name}-{variation}"
|
|
|
|
|
80 |
|
81 |
try:
|
82 |
progress(0, desc="Chargement du tokenizer")
|
@@ -116,14 +119,13 @@ def load_model(company, model_name, variation, progress=gr.Progress()):
|
|
116 |
except Exception as e:
|
117 |
return f"Erreur lors du chargement du modèle : {str(e)}", gr.Dropdown(visible=False), None, None, None
|
118 |
|
119 |
-
# Le reste du code reste
|
120 |
-
# ...
|
121 |
|
122 |
with gr.Blocks() as demo:
|
123 |
gr.Markdown("# LLM&BIAS")
|
124 |
|
125 |
with gr.Accordion("Sélection du modèle"):
|
126 |
-
company_dropdown = gr.Dropdown(choices=list(
|
127 |
model_dropdown = gr.Dropdown(label="Choisissez un modèle", choices=[])
|
128 |
variation_dropdown = gr.Dropdown(label="Choisissez une variation", choices=[])
|
129 |
load_button = gr.Button("Charger le modèle")
|
|
|
12 |
login(token=os.environ["HF_TOKEN"])
|
13 |
|
14 |
# Structure hiérarchique des modèles
|
15 |
+
models_hierarchy = {
|
16 |
"meta-llama": {
|
17 |
+
"Llama-2": ["7b", "13b", "70b"],
|
18 |
+
"Llama-3": ["8B", "3.2-3B", "3.1-8B"]
|
19 |
},
|
20 |
"mistralai": {
|
21 |
"Mistral": ["7B-v0.1", "7B-v0.3"],
|
22 |
"Mixtral": ["8x7B-v0.1"]
|
23 |
},
|
24 |
"google": {
|
25 |
+
"gemma": ["2b", "9b", "27b"]
|
26 |
},
|
27 |
"croissantllm": {
|
28 |
"CroissantLLM": ["Base"]
|
29 |
}
|
30 |
}
|
31 |
|
32 |
+
# Langues supportées par modèle
|
33 |
models_and_languages = {
|
34 |
+
"meta-llama/Llama-2-7b-hf": ["en"],
|
35 |
+
"meta-llama/Llama-2-13b-hf": ["en"],
|
36 |
+
"meta-llama/Llama-2-70b-hf": ["en"],
|
37 |
+
"meta-llama/Meta-Llama-3-8B": ["en"],
|
38 |
+
"meta-llama/Llama-3.2-3B": ["en", "de", "fr", "it", "pt", "hi", "es", "th"],
|
39 |
+
"meta-llama/Llama-3.1-8B": ["en", "de", "fr", "it", "pt", "hi", "es", "th"],
|
40 |
"mistralai/Mistral-7B-v0.1": ["en"],
|
41 |
"mistralai/Mixtral-8x7B-v0.1": ["en", "fr", "it", "de", "es"],
|
42 |
"mistralai/Mistral-7B-v0.3": ["en"],
|
43 |
+
"google/gemma-2-2b": ["en"],
|
44 |
+
"google/gemma-2-9b": ["en"],
|
45 |
+
"google/gemma-2-27b": ["en"],
|
46 |
"croissantllm/CroissantLLMBase": ["en", "fr"]
|
47 |
}
|
48 |
|
49 |
# Paramètres recommandés pour chaque modèle
|
50 |
model_parameters = {
|
51 |
+
"meta-llama/Llama-2-13b-hf": {"temperature": 0.8, "top_p": 0.9, "top_k": 40},
|
52 |
+
"meta-llama/Llama-2-7b-hf": {"temperature": 0.8, "top_p": 0.9, "top_k": 40},
|
53 |
+
"meta-llama/Llama-2-70b-hf": {"temperature": 0.8, "top_p": 0.9, "top_k": 40},
|
54 |
+
"meta-llama/Meta-Llama-3-8B": {"temperature": 0.75, "top_p": 0.9, "top_k": 50},
|
55 |
+
"meta-llama/Llama-3.2-3B": {"temperature": 0.75, "top_p": 0.9, "top_k": 50},
|
56 |
+
"meta-llama/Llama-3.1-8B": {"temperature": 0.75, "top_p": 0.9, "top_k": 50},
|
57 |
"mistralai/Mistral-7B-v0.1": {"temperature": 0.7, "top_p": 0.9, "top_k": 50},
|
58 |
"mistralai/Mixtral-8x7B-v0.1": {"temperature": 0.8, "top_p": 0.95, "top_k": 50},
|
59 |
"mistralai/Mistral-7B-v0.3": {"temperature": 0.7, "top_p": 0.9, "top_k": 50},
|
60 |
+
"google/gemma-2-2b": {"temperature": 0.7, "top_p": 0.95, "top_k": 40},
|
61 |
+
"google/gemma-2-9b": {"temperature": 0.7, "top_p": 0.95, "top_k": 40},
|
62 |
+
"google/gemma-2-27b": {"temperature": 0.7, "top_p": 0.95, "top_k": 40},
|
63 |
"croissantllm/CroissantLLMBase": {"temperature": 0.8, "top_p": 0.92, "top_k": 50}
|
64 |
}
|
65 |
|
|
|
69 |
selected_language = None
|
70 |
|
71 |
def update_model_choices(company):
|
72 |
+
return gr.Dropdown(choices=list(models_hierarchy[company].keys()), value=None)
|
73 |
|
74 |
def update_variation_choices(company, model_name):
|
75 |
+
return gr.Dropdown(choices=models_hierarchy[company][model_name], value=None)
|
76 |
|
77 |
def load_model(company, model_name, variation, progress=gr.Progress()):
|
78 |
global model, tokenizer
|
79 |
+
|
80 |
full_model_name = f"{company}/{model_name}-{variation}"
|
81 |
+
if full_model_name not in models_and_languages:
|
82 |
+
full_model_name = f"{company}/{model_name}{variation}"
|
83 |
|
84 |
try:
|
85 |
progress(0, desc="Chargement du tokenizer")
|
|
|
119 |
except Exception as e:
|
120 |
return f"Erreur lors du chargement du modèle : {str(e)}", gr.Dropdown(visible=False), None, None, None
|
121 |
|
122 |
+
# Le reste du code reste inchangé...
|
|
|
123 |
|
124 |
with gr.Blocks() as demo:
|
125 |
gr.Markdown("# LLM&BIAS")
|
126 |
|
127 |
with gr.Accordion("Sélection du modèle"):
|
128 |
+
company_dropdown = gr.Dropdown(choices=list(models_hierarchy.keys()), label="Choisissez une société")
|
129 |
model_dropdown = gr.Dropdown(label="Choisissez un modèle", choices=[])
|
130 |
variation_dropdown = gr.Dropdown(label="Choisissez une variation", choices=[])
|
131 |
load_button = gr.Button("Charger le modèle")
|