Woziii commited on
Commit
63dd69c
·
verified ·
1 Parent(s): 55f3d52

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +30 -28
app.py CHANGED
@@ -12,54 +12,54 @@ import time
12
  login(token=os.environ["HF_TOKEN"])
13
 
14
  # Structure hiérarchique des modèles
15
- model_hierarchy = {
16
  "meta-llama": {
17
- "Llama-2": ["7B", "13B", "70B"],
18
- "Llama-3": ["8B", "3.2B", "3.1B"]
19
  },
20
  "mistralai": {
21
  "Mistral": ["7B-v0.1", "7B-v0.3"],
22
  "Mixtral": ["8x7B-v0.1"]
23
  },
24
  "google": {
25
- "Gemma": ["2B", "9B", "27B"]
26
  },
27
  "croissantllm": {
28
  "CroissantLLM": ["Base"]
29
  }
30
  }
31
 
32
- # Mise à jour de la liste des modèles et leurs langues supportées
33
  models_and_languages = {
34
- "meta-llama/Llama-2-7B": ["en"],
35
- "meta-llama/Llama-2-13B": ["en"],
36
- "meta-llama/Llama-2-70B": ["en"],
37
- "meta-llama/Llama-3-8B": ["en"],
38
- "meta-llama/Llama-3-3.2B": ["en", "de", "fr", "it", "pt", "hi", "es", "th"],
39
- "meta-llama/Llama-3-3.1B": ["en", "de", "fr", "it", "pt", "hi", "es", "th"],
40
  "mistralai/Mistral-7B-v0.1": ["en"],
41
  "mistralai/Mixtral-8x7B-v0.1": ["en", "fr", "it", "de", "es"],
42
  "mistralai/Mistral-7B-v0.3": ["en"],
43
- "google/Gemma-2B": ["en"],
44
- "google/Gemma-9B": ["en"],
45
- "google/Gemma-27B": ["en"],
46
  "croissantllm/CroissantLLMBase": ["en", "fr"]
47
  }
48
 
49
  # Paramètres recommandés pour chaque modèle
50
  model_parameters = {
51
- "meta-llama/Llama-2-7B": {"temperature": 0.8, "top_p": 0.9, "top_k": 40},
52
- "meta-llama/Llama-2-13B": {"temperature": 0.8, "top_p": 0.9, "top_k": 40},
53
- "meta-llama/Llama-2-70B": {"temperature": 0.8, "top_p": 0.9, "top_k": 40},
54
- "meta-llama/Llama-3-8B": {"temperature": 0.75, "top_p": 0.9, "top_k": 50},
55
- "meta-llama/Llama-3-3.2B": {"temperature": 0.75, "top_p": 0.9, "top_k": 50},
56
- "meta-llama/Llama-3-3.1B": {"temperature": 0.75, "top_p": 0.9, "top_k": 50},
57
  "mistralai/Mistral-7B-v0.1": {"temperature": 0.7, "top_p": 0.9, "top_k": 50},
58
  "mistralai/Mixtral-8x7B-v0.1": {"temperature": 0.8, "top_p": 0.95, "top_k": 50},
59
  "mistralai/Mistral-7B-v0.3": {"temperature": 0.7, "top_p": 0.9, "top_k": 50},
60
- "google/Gemma-2B": {"temperature": 0.7, "top_p": 0.95, "top_k": 40},
61
- "google/Gemma-9B": {"temperature": 0.7, "top_p": 0.95, "top_k": 40},
62
- "google/Gemma-27B": {"temperature": 0.7, "top_p": 0.95, "top_k": 40},
63
  "croissantllm/CroissantLLMBase": {"temperature": 0.8, "top_p": 0.92, "top_k": 50}
64
  }
65
 
@@ -69,14 +69,17 @@ tokenizer = None
69
  selected_language = None
70
 
71
  def update_model_choices(company):
72
- return gr.Dropdown(choices=list(model_hierarchy[company].keys()), value=None)
73
 
74
  def update_variation_choices(company, model_name):
75
- return gr.Dropdown(choices=model_hierarchy[company][model_name], value=None)
76
 
77
  def load_model(company, model_name, variation, progress=gr.Progress()):
78
  global model, tokenizer
 
79
  full_model_name = f"{company}/{model_name}-{variation}"
 
 
80
 
81
  try:
82
  progress(0, desc="Chargement du tokenizer")
@@ -116,14 +119,13 @@ def load_model(company, model_name, variation, progress=gr.Progress()):
116
  except Exception as e:
117
  return f"Erreur lors du chargement du modèle : {str(e)}", gr.Dropdown(visible=False), None, None, None
118
 
119
- # Le reste du code reste inchangé
120
- # ...
121
 
122
  with gr.Blocks() as demo:
123
  gr.Markdown("# LLM&BIAS")
124
 
125
  with gr.Accordion("Sélection du modèle"):
126
- company_dropdown = gr.Dropdown(choices=list(model_hierarchy.keys()), label="Choisissez une société")
127
  model_dropdown = gr.Dropdown(label="Choisissez un modèle", choices=[])
128
  variation_dropdown = gr.Dropdown(label="Choisissez une variation", choices=[])
129
  load_button = gr.Button("Charger le modèle")
 
12
  login(token=os.environ["HF_TOKEN"])
13
 
14
  # Structure hiérarchique des modèles
15
+ models_hierarchy = {
16
  "meta-llama": {
17
+ "Llama-2": ["7b", "13b", "70b"],
18
+ "Llama-3": ["8B", "3.2-3B", "3.1-8B"]
19
  },
20
  "mistralai": {
21
  "Mistral": ["7B-v0.1", "7B-v0.3"],
22
  "Mixtral": ["8x7B-v0.1"]
23
  },
24
  "google": {
25
+ "gemma": ["2b", "9b", "27b"]
26
  },
27
  "croissantllm": {
28
  "CroissantLLM": ["Base"]
29
  }
30
  }
31
 
32
+ # Langues supportées par modèle
33
  models_and_languages = {
34
+ "meta-llama/Llama-2-7b-hf": ["en"],
35
+ "meta-llama/Llama-2-13b-hf": ["en"],
36
+ "meta-llama/Llama-2-70b-hf": ["en"],
37
+ "meta-llama/Meta-Llama-3-8B": ["en"],
38
+ "meta-llama/Llama-3.2-3B": ["en", "de", "fr", "it", "pt", "hi", "es", "th"],
39
+ "meta-llama/Llama-3.1-8B": ["en", "de", "fr", "it", "pt", "hi", "es", "th"],
40
  "mistralai/Mistral-7B-v0.1": ["en"],
41
  "mistralai/Mixtral-8x7B-v0.1": ["en", "fr", "it", "de", "es"],
42
  "mistralai/Mistral-7B-v0.3": ["en"],
43
+ "google/gemma-2-2b": ["en"],
44
+ "google/gemma-2-9b": ["en"],
45
+ "google/gemma-2-27b": ["en"],
46
  "croissantllm/CroissantLLMBase": ["en", "fr"]
47
  }
48
 
49
  # Paramètres recommandés pour chaque modèle
50
  model_parameters = {
51
+ "meta-llama/Llama-2-13b-hf": {"temperature": 0.8, "top_p": 0.9, "top_k": 40},
52
+ "meta-llama/Llama-2-7b-hf": {"temperature": 0.8, "top_p": 0.9, "top_k": 40},
53
+ "meta-llama/Llama-2-70b-hf": {"temperature": 0.8, "top_p": 0.9, "top_k": 40},
54
+ "meta-llama/Meta-Llama-3-8B": {"temperature": 0.75, "top_p": 0.9, "top_k": 50},
55
+ "meta-llama/Llama-3.2-3B": {"temperature": 0.75, "top_p": 0.9, "top_k": 50},
56
+ "meta-llama/Llama-3.1-8B": {"temperature": 0.75, "top_p": 0.9, "top_k": 50},
57
  "mistralai/Mistral-7B-v0.1": {"temperature": 0.7, "top_p": 0.9, "top_k": 50},
58
  "mistralai/Mixtral-8x7B-v0.1": {"temperature": 0.8, "top_p": 0.95, "top_k": 50},
59
  "mistralai/Mistral-7B-v0.3": {"temperature": 0.7, "top_p": 0.9, "top_k": 50},
60
+ "google/gemma-2-2b": {"temperature": 0.7, "top_p": 0.95, "top_k": 40},
61
+ "google/gemma-2-9b": {"temperature": 0.7, "top_p": 0.95, "top_k": 40},
62
+ "google/gemma-2-27b": {"temperature": 0.7, "top_p": 0.95, "top_k": 40},
63
  "croissantllm/CroissantLLMBase": {"temperature": 0.8, "top_p": 0.92, "top_k": 50}
64
  }
65
 
 
69
  selected_language = None
70
 
71
  def update_model_choices(company):
72
+ return gr.Dropdown(choices=list(models_hierarchy[company].keys()), value=None)
73
 
74
  def update_variation_choices(company, model_name):
75
+ return gr.Dropdown(choices=models_hierarchy[company][model_name], value=None)
76
 
77
  def load_model(company, model_name, variation, progress=gr.Progress()):
78
  global model, tokenizer
79
+
80
  full_model_name = f"{company}/{model_name}-{variation}"
81
+ if full_model_name not in models_and_languages:
82
+ full_model_name = f"{company}/{model_name}{variation}"
83
 
84
  try:
85
  progress(0, desc="Chargement du tokenizer")
 
119
  except Exception as e:
120
  return f"Erreur lors du chargement du modèle : {str(e)}", gr.Dropdown(visible=False), None, None, None
121
 
122
+ # Le reste du code reste inchangé...
 
123
 
124
  with gr.Blocks() as demo:
125
  gr.Markdown("# LLM&BIAS")
126
 
127
  with gr.Accordion("Sélection du modèle"):
128
+ company_dropdown = gr.Dropdown(choices=list(models_hierarchy.keys()), label="Choisissez une société")
129
  model_dropdown = gr.Dropdown(label="Choisissez un modèle", choices=[])
130
  variation_dropdown = gr.Dropdown(label="Choisissez une variation", choices=[])
131
  load_button = gr.Button("Charger le modèle")