lumalik commited on
Commit
f2f3017
·
1 Parent(s): 1c8882e

simplified example code

Browse files
Files changed (1) hide show
  1. README.md +9 -12
README.md CHANGED
@@ -17,20 +17,17 @@ tokenizer = AutoTokenizer.from_pretrained("lumalik/vent-roberta-emotion")
17
  model = AutoModelForSequenceClassification.from_pretrained("lumalik/vent-roberta-emotion")
18
  model.eval()
19
 
20
- texts = ["I love her sooo much", "I hate you!"]
 
 
 
 
 
21
 
22
  for text in texts:
23
- encoded_text = tokenizer.encode_plus(text,
24
- add_special_tokens=True,
25
- max_length=128,
26
- return_token_type_ids=True,
27
- padding="max_length",
28
- truncation=True,
29
- return_attention_mask=True)
30
-
31
- output = model(input_ids=torch.tensor(encoded_text['input_ids'], dtype=torch.long).unsqueeze(0),
32
- token_type_ids=torch.tensor(encoded_text['token_type_ids'], dtype=torch.long).unsqueeze(0),
33
- attention_mask=torch.tensor(encoded_text['attention_mask'], dtype=torch.long).unsqueeze(0))
34
 
35
  output = softmax(output[0].detach().numpy(), axis=1)
36
 
 
17
  model = AutoModelForSequenceClassification.from_pretrained("lumalik/vent-roberta-emotion")
18
  model.eval()
19
 
20
+ texts = ["You wont believe what happened to me today",
21
+ "You wont believe what happened to me today!",
22
+ "You wont believe what happened to me today...",
23
+ "You wont believe what happened to me today <3",
24
+ "You wont believe what happened to me today :)",
25
+ "You wont believe what happened to me today :("]
26
 
27
  for text in texts:
28
+ encoded_text = tokenizer(text, return_tensors="pt")
29
+
30
+ output = model(**encoded_text)
 
 
 
 
 
 
 
 
31
 
32
  output = softmax(output[0].detach().numpy(), axis=1)
33