Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
@@ -1,54 +1,26 @@
|
|
1 |
-
from transformers import pipeline
|
2 |
import gradio as gr
|
3 |
-
import
|
4 |
-
import spaces
|
5 |
-
|
6 |
-
# Charger le modèle GPT de Hugging Face
|
7 |
-
model_id = "deepseek-ai/deepseek-llm-7b-chat"
|
8 |
-
pipe = pipeline("text-generation", model=model_id)
|
9 |
-
|
10 |
-
# Consigne que le modèle suivra pour chaque chat
|
11 |
-
system_message = """Classify the text into neutral, negative or positive.
|
12 |
-
Text: This movie is definitely one of my favorite movies of its kind. The interaction between respectable and morally strong characters is an ode to chivalry and the honor code amongst thieves and policemen.
|
13 |
-
Sentiment:
|
14 |
-
"""
|
15 |
|
16 |
-
|
17 |
-
|
18 |
-
def generate_response(user_message, history):
|
19 |
-
try:
|
20 |
-
# Initialiser history s'il est None
|
21 |
-
if history is None:
|
22 |
-
history = []
|
23 |
|
24 |
-
|
25 |
-
|
26 |
-
|
27 |
-
|
28 |
-
|
29 |
-
|
30 |
-
|
31 |
-
|
32 |
-
# Ajouter le nouveau message de l'utilisateur
|
33 |
-
conversation_text += f"User: {user_message}\n"
|
34 |
-
|
35 |
-
# Générer une réponse
|
36 |
-
result = pipe(conversation_text, max_new_tokens=150)
|
37 |
-
response = result[0]['generated_text'].split("User: ")[-1].strip() # Extraire la réponse générée
|
38 |
-
|
39 |
-
# Mettre à jour l'historique
|
40 |
-
history.append((user_message, response))
|
41 |
-
return history, response
|
42 |
-
except Exception as e:
|
43 |
-
# En cas d'erreur, retourner l'historique inchangé et un message d'erreur
|
44 |
-
return history, f"Error: {str(e)}"
|
45 |
|
46 |
-
#
|
47 |
-
|
48 |
-
fn=
|
49 |
-
inputs=
|
50 |
-
outputs=
|
51 |
-
|
|
|
52 |
)
|
53 |
|
54 |
-
|
|
|
|
|
|
|
|
1 |
import gradio as gr
|
2 |
+
from transformers import pipeline
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
3 |
|
4 |
+
# Charger le pipeline de classification des sentiments
|
5 |
+
sentiment_analyzer = pipeline("sentiment-analysis", model="distilbert-base-uncased-finetuned-sst-2-english")
|
|
|
|
|
|
|
|
|
|
|
6 |
|
7 |
+
def analyze_sentiment(text):
|
8 |
+
# Analyser le sentiment du texte
|
9 |
+
result = sentiment_analyzer(text)
|
10 |
+
# Retourner le résultat sous forme de texte
|
11 |
+
sentiment = result[0]['label']
|
12 |
+
score = result[0]['score']
|
13 |
+
return f"Sentiment: {sentiment}, Score: {score:.2f}"
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
14 |
|
15 |
+
# Créer l'interface Gradio
|
16 |
+
interface = gr.Interface(
|
17 |
+
fn=analyze_sentiment,
|
18 |
+
inputs="text",
|
19 |
+
outputs="text",
|
20 |
+
title="Analyse des Sentiments",
|
21 |
+
description="Entrez un texte pour analyser son sentiment (positif, négatif ou neutre)."
|
22 |
)
|
23 |
|
24 |
+
# Lancer l'interface dans un Space Hugging Face
|
25 |
+
if __name__ == "__main__":
|
26 |
+
interface.launch()
|