tykea commited on
Commit
fdb5626
β€’
1 Parent(s): 31cc873

adding resource file

Browse files
XLM Roberta Sentiment Analysis 512 8.ipynb ADDED
The diff for this file is too large to render. See raw diff
 
data/test.xlsx ADDED
Binary file (102 kB). View file
 
eda.ipynb ADDED
The diff for this file is too large to render. See raw diff
 
run.ipynb ADDED
@@ -0,0 +1,111 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "cells": [
3
+ {
4
+ "cell_type": "code",
5
+ "execution_count": null,
6
+ "metadata": {},
7
+ "outputs": [],
8
+ "source": [
9
+ "%pip install transformers\n",
10
+ "%pip install torch\n",
11
+ "%pip install pandas\n",
12
+ "%pip install scikit-learn\n",
13
+ "%pip install datasets\n",
14
+ "%pip install evaluate\n",
15
+ "%pip install tqdm\n",
16
+ "%pip install openpyxl\n",
17
+ "%pip install numpy"
18
+ ]
19
+ },
20
+ {
21
+ "cell_type": "code",
22
+ "execution_count": 2,
23
+ "metadata": {},
24
+ "outputs": [],
25
+ "source": [
26
+ "from transformers import AutoTokenizer, AutoModelForSequenceClassification, AutoModelForMaskedLM\n",
27
+ "import torch"
28
+ ]
29
+ },
30
+ {
31
+ "cell_type": "code",
32
+ "execution_count": 3,
33
+ "metadata": {},
34
+ "outputs": [
35
+ {
36
+ "name": "stderr",
37
+ "output_type": "stream",
38
+ "text": [
39
+ "Some weights of XLMRobertaForMaskedLM were not initialized from the model checkpoint at fine-tuned-512-8 and are newly initialized: ['lm_head.bias', 'lm_head.decoder.bias', 'lm_head.dense.bias', 'lm_head.dense.weight', 'lm_head.layer_norm.bias', 'lm_head.layer_norm.weight']\n",
40
+ "You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference.\n"
41
+ ]
42
+ }
43
+ ],
44
+ "source": [
45
+ "model = AutoModelForSequenceClassification.from_pretrained(\"fine-tuned-512-8\")\n",
46
+ "masked_model = AutoModelForMaskedLM.from_pretrained(\"fine-tuned-512-8\") \n",
47
+ "tokenizer = AutoTokenizer.from_pretrained('tokenizer', padding=True)"
48
+ ]
49
+ },
50
+ {
51
+ "cell_type": "code",
52
+ "execution_count": 4,
53
+ "metadata": {},
54
+ "outputs": [],
55
+ "source": [
56
+ "def predict(text):\n",
57
+ " inputs = tokenizer(text, return_tensors=\"pt\", truncation=True, padding=True)\n",
58
+ " labels_mapping = {0: 'negative', 1: 'positive'}\n",
59
+ " outputs = model(**inputs)\n",
60
+ " logits = outputs.logits\n",
61
+ " predicted_class = torch.argmax(logits, dim=1).item()\n",
62
+ " print(f\"Predicted Class: {labels_mapping[predicted_class]}\")"
63
+ ]
64
+ },
65
+ {
66
+ "cell_type": "code",
67
+ "execution_count": 12,
68
+ "metadata": {},
69
+ "outputs": [
70
+ {
71
+ "name": "stdout",
72
+ "output_type": "stream",
73
+ "text": [
74
+ "Predicted Class: negative\n"
75
+ ]
76
+ }
77
+ ],
78
+ "source": [
79
+ "predict('αž“αŸαŸ‡αž‚αžΈαž‡αžΆαž›αŸ’αž”αŸ‡αž˜αž·αž“αžœαž·αž‡αŸ’αž‡αž˜αžΆαž“αžŸαž˜αŸ’αžšαžΆαž”αŸ‹αž”αŸ’αžšαž‡αžΆαž‡αž“αžαŸ’αž˜αŸ‚αžš')"
80
+ ]
81
+ },
82
+ {
83
+ "cell_type": "code",
84
+ "execution_count": null,
85
+ "metadata": {},
86
+ "outputs": [],
87
+ "source": []
88
+ }
89
+ ],
90
+ "metadata": {
91
+ "kernelspec": {
92
+ "display_name": "cadtml",
93
+ "language": "python",
94
+ "name": "python3"
95
+ },
96
+ "language_info": {
97
+ "codemirror_mode": {
98
+ "name": "ipython",
99
+ "version": 3
100
+ },
101
+ "file_extension": ".py",
102
+ "mimetype": "text/x-python",
103
+ "name": "python",
104
+ "nbconvert_exporter": "python",
105
+ "pygments_lexer": "ipython3",
106
+ "version": "3.10.14"
107
+ }
108
+ },
109
+ "nbformat": 4,
110
+ "nbformat_minor": 2
111
+ }