File size: 2,943 Bytes
fdb5626
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "%pip install transformers\n",
    "%pip install torch\n",
    "%pip install pandas\n",
    "%pip install scikit-learn\n",
    "%pip install datasets\n",
    "%pip install evaluate\n",
    "%pip install tqdm\n",
    "%pip install openpyxl\n",
    "%pip install numpy"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "metadata": {},
   "outputs": [],
   "source": [
    "from transformers import AutoTokenizer, AutoModelForSequenceClassification, AutoModelForMaskedLM\n",
    "import torch"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Some weights of XLMRobertaForMaskedLM were not initialized from the model checkpoint at fine-tuned-512-8 and are newly initialized: ['lm_head.bias', 'lm_head.decoder.bias', 'lm_head.dense.bias', 'lm_head.dense.weight', 'lm_head.layer_norm.bias', 'lm_head.layer_norm.weight']\n",
      "You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference.\n"
     ]
    }
   ],
   "source": [
    "model = AutoModelForSequenceClassification.from_pretrained(\"fine-tuned-512-8\")\n",
    "masked_model = AutoModelForMaskedLM.from_pretrained(\"fine-tuned-512-8\")  \n",
    "tokenizer = AutoTokenizer.from_pretrained('tokenizer', padding=True)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "metadata": {},
   "outputs": [],
   "source": [
    "def predict(text):\n",
    "    inputs = tokenizer(text, return_tensors=\"pt\", truncation=True, padding=True)\n",
    "    labels_mapping = {0: 'negative', 1: 'positive'}\n",
    "    outputs = model(**inputs)\n",
    "    logits = outputs.logits\n",
    "    predicted_class = torch.argmax(logits, dim=1).item()\n",
    "    print(f\"Predicted Class: {labels_mapping[predicted_class]}\")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 12,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Predicted Class: negative\n"
     ]
    }
   ],
   "source": [
    "predict('αž“αŸαŸ‡αž‚αžΈαž‡αžΆαž›αŸ’αž”αŸ‡αž˜αž·αž“αžœαž·αž‡αŸ’αž‡αž˜αžΆαž“αžŸαž˜αŸ’αžšαžΆαž”αŸ‹αž”αŸ’αžšαž‡αžΆαž‡αž“αžαŸ’αž˜αŸ‚αžš')"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "cadtml",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.10.14"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 2
}