File size: 1,837 Bytes
a0d14d5
 
 
 
 
 
 
 
 
 
 
0a85f4d
a0d14d5
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
import streamlit as st
import torch
from transformers import BertConfig, BertForSequenceClassification, BertTokenizer
import numpy as np


# Load the model and tokenizer
def load_model():
    tokenizer = BertTokenizer.from_pretrained('beomi/kcbert-base')
    config = BertConfig.from_pretrained('beomi/kcbert-base', num_labels=7)
    model = BertForSequenceClassification.from_pretrained('beomi/kcbert-base', config=config)
    model_state_dict = torch.load('YEINJEONG/sentiment7', map_location=torch.device('cpu')) # cpu μ‚¬μš©
    model.load_state_dict(model_state_dict)
    model.eval()
    return model, tokenizer

model, tokenizer = load_model()

# Define the inference function
def inference(input_doc):
    inputs = tokenizer(input_doc, return_tensors='pt')
    outputs = model(**inputs)
    probs = torch.softmax(outputs.logits, dim=1).squeeze().tolist()
    class_idx = {'곡포': 0, 'λ†€λžŒ': 1, 'λΆ„λ…Έ': 2, 'μŠ¬ν””': 3, '쀑립': 4, '행볡': 5, '혐였': 6}
    results = {class_name: prob for class_name, prob in zip(class_idx, probs)}
    # Find the class with the highest probability
    max_prob_class = max(results, key=results.get)
    max_prob = results[max_prob_class]
    # Display results
    return [results, f"κ°€μž₯ κ°•ν•˜κ²Œ λ‚˜νƒ€λ‚œ 감정: {max_prob_class}"]
    '''    for class_name, prob in results.items():
        print(f"{class_name}: {prob:.2%}")'''

# Set up the Streamlit interface
st.title('감정뢄석(Sentiment Analysis)')
st.markdown('<small style="color:grey;">글에 λ‚˜νƒ€λ‚œ 곡포, λ†€λžŒ, λΆ„λ…Έ, μŠ¬ν””, 쀑립, 행볡, 혐였의 정도λ₯Ό λΉ„μœ¨λ‘œ μ•Œλ €λ“œλ¦½λ‹ˆλ‹€.</small>', unsafe_allow_html=True)
user_input = st.text_area("이 곳에 κΈ€ μž…λ ₯(100자 μ΄ν•˜ ꢌμž₯):")
if st.button('μ‹œμž‘'):
    result = inference(user_input)
    st.write(result[0])
    st.write(result[1])