File size: 2,433 Bytes
e50a51c
75f1b92
7af13f4
da4611f
48193db
7af13f4
 
 
 
75f1b92
7af13f4
 
0997d43
 
75f1b92
0997d43
7af13f4
48193db
7af13f4
 
0997d43
75f1b92
 
0997d43
75f1b92
da4611f
0997d43
7af13f4
 
 
 
 
 
 
 
 
 
 
 
 
 
 
e50a51c
 
19964c7
e50a51c
19964c7
 
 
 
 
 
7af13f4
 
 
 
d3d7595
7af13f4
06b445f
a14bbf4
48193db
75f1b92
 
81d1362
 
03b5ddd
81d1362
e50a51c
8a3503b
 
 
 
 
06b445f
8a3503b
7af13f4
8a3503b
d3aaf9a
e50a51c
d3aaf9a
75f1b92
8a3503b
 
 
 
 
75f1b92
8a3503b
7af13f4
8a3503b
e50a51c
d3aaf9a
7af13f4
326066b
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
import sklearn
from os import O_ACCMODE
import gradio as gr
import joblib
from transformers import pipeline
import requests.exceptions
from huggingface_hub import HfApi, hf_hub_download
from huggingface_hub.repocard import metadata_load


app = gr.Blocks()

model_id_1 = "juliensimon/distilbert-amazon-shoe-reviews"
model_id_2 = "juliensimon/distilbert-amazon-shoe-reviews"

def load_agent(model_id):
    """
    This function load the agent's results
    """
    # Load the metrics
    metadata = get_metadata(model_id)

    # get predictions
    predictions = predict(model_id)


    return model_id, predictions


def get_metadata(model_id):
    """
    Get the metadata of the model repo
    :param model_id:
    :return: metadata
    """
    try:
        readme_path = hf_hub_download(model_id, filename="README.md")
        metadata = metadata_load(readme_path)
        print(metadata)
        return metadata
    except requests.exceptions.HTTPError:
        return None

classifier = pipeline("text-classification", model="juliensimon/distilbert-amazon-shoe-reviews")
                
def predict(review):
        prediction = classifier(review)
        print(prediction)
        stars = prediction[0]['label']
        stars = (int)(stars.split('_')[1])+1
        score = 100*prediction[0]['score']
        return "{} {:.0f}%".format("\U00002B50"*stars, score)

with app:
    gr.Markdown(
    """
    # Compare Sentiment Analysis Models 
    
    Type text to predict sentiment.
    """)   
    with gr.Row():
        inp_1= gr.Textbox(label="Type text here.",placeholder="The customer service was satisfactory.")

    gr.Markdown(
    """
    **Model Predictions**
    """)

    gr.Markdown(
    """
    Model 1 = juliensimon/distilbert-amazon-shoe-reviews
    """)

    with gr.Row():
        btn1 = gr.Button("Predict for Model 1")
    with gr.Row():
        out_1 = gr.Textbox(label="Prediction for Model 1")

    classifier = pipeline("text-classification", model=model_id_1)   
    btn1.click(fn=predict, inputs=inp_1, outputs=out_1)

    gr.Markdown(
    """
    Model 2 = juliensimon/distilbert-amazon-shoe-reviews
    """)

    with gr.Row():
        btn2 = gr.Button("Predict for Model 2")
    with gr.Row():
        out_2 = gr.Textbox(label="Prediction for Model 2")   
    classifier = pipeline("text-classification", model=model_id_2)    
    btn2.click(fn=predict, inputs=inp_1, outputs=out_2)
    
app.launch()