Usage
# import library
import torch
from transformers import AutoTokenizer, AutoModelForSequenceClassification, TextClassificationPipeline
# load model
tokenizer = AutoTokenizer.from_pretrained("Copycats/koelectra-base-v3-generalized-sentiment-analysis")
model = AutoModelForSequenceClassification.from_pretrained("Copycats/koelectra-base-v3-generalized-sentiment-analysis")
sentiment_classifier = TextClassificationPipeline(tokenizer=tokenizer, model=model)
# target reviews
review_list = [
'μ΄μκ³ μ’μμ~~~μ»κΈ°λ νΈνκ³ μμ΄κ³ μ΄μλ€κ³ μκΈ°λ°©μ κ°λ€λκ³ μμ¨μ~^^',
'μμ§ μ
μ΄λ³΄μ§ μμμ§λ§ κ΅μ₯ν κ°λ²Όμμ~~ λ€λ₯Έ 리뷰μ²λΌ μ΄κΉ‘μ΄ μ’ λλ€μγ
λ§μ‘±ν©λλ€. μμ² λΉ λ₯Έλ°μ‘ κ°μ¬λλ €μ :)',
'μ¬κ΅¬λ§€ νκ±΄λ° λ무λ무 κ°μ±λΉμΈκ±° κ°μμ!! λ€μμ λ μκ°λλ©΄ 3κ°μ§Έ λ μ΄λ―..γ
γ
',
'κ°μ΅λμ΄ λ무 μ μ΄μ. λ°©μ΄ μμ§ μλ€λ©΄ 무쑰건 ν°κ±Έλ‘ꡬ맀νμΈμ. λ¬Όλλ μ‘°κΈλ°μ μλ€μ΄κ°μ μ°κΈ°λ λΆνΈν¨',
'νλ²μ
μλλ° μμ λ΄μ μ λ€ νλ¦¬κ³ μ€λ°₯λ κ³μ λμ΅λλ€. λ§κ° μ²λ¦¬ λ무 μλ§ μλκ°μ?',
'λ°λ»νκ³ μ’κΈ΄νλ° λ°°μ‘μ΄ λλ €μ',
'λ§μ μλλ° κ°κ²©μ΄ μλ νΈμ΄μμ'
]
# predict
for idx, review in enumerate(review_list):
pred = sentiment_classifier(review)
print(f'{review}\n>> {pred[0]}')
μ΄μκ³ μ’μμ~~~μ»κΈ°λ νΈνκ³ μμ΄κ³ μ΄μλ€κ³ μκΈ°λ°©μ κ°λ€λκ³ μμ¨μ~^^
>> {'label': '1', 'score': 0.9945501685142517}
μμ§ μ
μ΄λ³΄μ§ μμμ§λ§ κ΅μ₯ν κ°λ²Όμμ~~ λ€λ₯Έ 리뷰μ²λΌ μ΄κΉ‘μ΄ μ’ λλ€μγ
λ§μ‘±ν©λλ€. μμ² λΉ λ₯Έλ°μ‘ κ°μ¬λλ €μ :)
>> {'label': '1', 'score': 0.995430588722229}
μ¬κ΅¬λ§€ νκ±΄λ° λ무λ무 κ°μ±λΉμΈκ±° κ°μμ!! λ€μμ λ μκ°λλ©΄ 3κ°μ§Έ λ μ΄λ―..γ
γ
>> {'label': '1', 'score': 0.9959582686424255}
κ°μ΅λμ΄ λ무 μ μ΄μ. λ°©μ΄ μμ§ μλ€λ©΄ 무쑰건 ν°κ±Έλ‘ꡬ맀νμΈμ. λ¬Όλλ μ‘°κΈλ°μ μλ€μ΄κ°μ μ°κΈ°λ λΆνΈν¨
>> {'label': '0', 'score': 0.9984619617462158}
νλ²μ
μλλ° μμ λ΄μ μ λ€ νλ¦¬κ³ μ€λ°₯λ κ³μ λμ΅λλ€. λ§κ° μ²λ¦¬ λ무 μλ§ μλκ°μ?
>> {'label': '0', 'score': 0.9991756677627563}
λ°λ»νκ³ μ’κΈ΄νλ° λ°°μ‘μ΄ λλ €μ
>> {'label': '1', 'score': 0.6473883390426636}
λ§μ μλλ° κ°κ²©μ΄ μλ νΈμ΄μμ
>> {'label': '1', 'score': 0.5128092169761658}
- label 0 : negative review
- label 1 : positive review
- Downloads last month
- 25,306
This model does not have enough activity to be deployed to Inference API (serverless) yet. Increase its social
visibility and check back later, or deploy to Inference Endpoints (dedicated)
instead.