Woziii commited on
Commit
2759f98
1 Parent(s): 76c425f

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +7 -6
app.py CHANGED
@@ -6,7 +6,9 @@ import matplotlib.pyplot as plt
6
  import numpy as np
7
  from huggingface_hub import login
8
  import os
 
9
  login(token=os.environ["HF_TOKEN"])
 
10
  # Liste des modèles
11
  models = [
12
  "meta-llama/Llama-2-13b", "meta-llama/Llama-2-7b", "meta-llama/Llama-2-70b",
@@ -23,14 +25,13 @@ tokenizer = None
23
  def load_model(model_name):
24
  global model, tokenizer
25
  tokenizer = AutoTokenizer.from_pretrained(model_name)
26
- model = AutoModelForCausalLM.from_pretrained(model_name)
27
- return f"Modèle {model_name} chargé avec succès."
28
 
29
- @spaces.GPU(duration=300)
30
  def generate_text(input_text, temperature, top_p, top_k):
31
  global model, tokenizer
32
 
33
- inputs = tokenizer(input_text, return_tensors="pt").to("cuda")
34
 
35
  with torch.no_grad():
36
  outputs = model.generate(
@@ -46,8 +47,8 @@ def generate_text(input_text, temperature, top_p, top_k):
46
  generated_text = tokenizer.decode(outputs.sequences[0], skip_special_tokens=True)
47
 
48
  # Extraire les attentions et les logits
49
- attentions = outputs.attentions[-1][0][-1].cpu().numpy()
50
- logits = outputs.scores[-1][0].cpu()
51
 
52
  # Visualiser l'attention
53
  plt.figure(figsize=(10, 10))
 
6
  import numpy as np
7
  from huggingface_hub import login
8
  import os
9
+
10
  login(token=os.environ["HF_TOKEN"])
11
+
12
  # Liste des modèles
13
  models = [
14
  "meta-llama/Llama-2-13b", "meta-llama/Llama-2-7b", "meta-llama/Llama-2-70b",
 
25
  def load_model(model_name):
26
  global model, tokenizer
27
  tokenizer = AutoTokenizer.from_pretrained(model_name)
28
+ model = AutoModelForCausalLM.from_pretrained(model_name, device_map="cpu")
29
+ return f"Modèle {model_name} chargé avec succès sur CPU."
30
 
 
31
  def generate_text(input_text, temperature, top_p, top_k):
32
  global model, tokenizer
33
 
34
+ inputs = tokenizer(input_text, return_tensors="pt")
35
 
36
  with torch.no_grad():
37
  outputs = model.generate(
 
47
  generated_text = tokenizer.decode(outputs.sequences[0], skip_special_tokens=True)
48
 
49
  # Extraire les attentions et les logits
50
+ attentions = outputs.attentions[-1][0][-1].numpy()
51
+ logits = outputs.scores[-1][0]
52
 
53
  # Visualiser l'attention
54
  plt.figure(figsize=(10, 10))