File size: 1,240 Bytes
1b27ac3
5b4c169
1b27ac3
4ffc5f1
ce10f9a
07fca4f
d43b4cf
ce10f9a
35f0746
 
604d57b
1b27ac3
05bab40
47f2ac0
 
d5b1d28
1b27ac3
 
dac4aa1
1b27ac3
 
 
 
 
63e1b70
1b27ac3
 
35f0746
 
1917b0b
d5b1d28
 
35f0746
 
 
 
 
 
 
 
ce8a810
35f0746
 
1917b0b
a3ae69c
1b27ac3
 
 
a3ae69c
ce10f9a
a3ae69c
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
import os
import requests
import gradio as gr

api_token = os.environ.get("TOKEN")
API_URL = "https://api-inference.huggingface.co/models/meta-llama/Meta-Llama-3-8B-Instruct"
headers = {"Authorization": f"Bearer {api_token}"}



def query(payload):
    response = requests.post(API_URL, headers=headers, json=payload)
    return response.json()

def analyze_sentiment(text):
    output = query({
        "inputs": f'''<|begin_of_text|>
<|start_header_id|>system<|end_header_id|>
You are a feeling analyser and you'll say only "positive1" if I'm feeling positive and "negative1" if I'm feeling sad
<|eot_id|>
<|start_header_id|>user<|end_header_id|>
{text}
<|eot_id|>
<|start_header_id|>assistant<|end_header_id|>

'''
    })



    if isinstance(output, list) and len(output) > 0:
        response = output[0].get('generated_text', '').strip().lower()

        positive_count = response.count('positive')
        negative_count = response.count('negative')

    if positive_count >= 2:
        return 'positive'
    elif negative_count >= 2:
        return 'negative'
    else:
        return f"Erreur: Réponse ambiguë - '{response}'"


demo = gr.Interface(
    fn=analyze_sentiment,
    inputs="text",
    outputs="text"
)

demo.launch()