File size: 1,419 Bytes
ed60c2e
 
5b95e34
 
 
ed60c2e
 
90ed2ce
ed60c2e
 
 
5b95e34
ed60c2e
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
90ed2ce
 
316b0d8
 
ed60c2e
 
 
 
316b0d8
ed60c2e
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
"""Gradio app that showcases Danish offensive text models."""

import warnings
from numba.core.errors import NumbaDeprecationWarning
warnings.filterwarnings("ignore", category=NumbaDeprecationWarning)
import gradio as gr
from transformers import pipeline
from typing import Tuple, Dict, List


def main():

    pipe = pipeline(
        task="text-classification",
        model="alexandrainst/da-offensive-detection-small",
    )

    examples = [
        "Din store idiot.",
        "Jeg er glad for at være her.",
        "Hvem tror du, du er?",
        "Har du hæklefejl i kysen?",
        "Hej med dig, jeg hedder Peter.",
        "Fuck hvor er det dejligt, det her :)",
        "🍆",
        "😊",
    ]

    def classification(text) -> Tuple[Dict[str, float], dict]:
        output: List[dict] = pipe(text)[0]
        print(text, output)
        return {output["label"]: output["score"]}

    demo = gr.Interface(
        fn=classification,
        inputs=gr.Textbox(placeholder="Enter sentence here...", value=examples[0]),
        outputs=gr.Label(),
        examples=examples,
        title="Danish Offensive Text Detection",
        description="""
Detect offensive text in Danish. Write any text in the box below, and the model will predict whether the text is offensive or not:

_Also, be patient, as this demo is running on a CPU!_""",
    )

    demo.launch()

if __name__ == "__main__":
    main()