Datasets:
Tasks:
Sentence Similarity
Modalities:
Text
Formats:
parquet
Sub-tasks:
semantic-similarity-classification
Languages:
English
Size:
1K - 10K
License:
devopsmarc
commited on
Commit
·
448c5dd
1
Parent(s):
c3306ef
Delete Github_Issues_Dataset.ipynb
Browse files- Github_Issues_Dataset.ipynb +0 -750
Github_Issues_Dataset.ipynb
DELETED
@@ -1,750 +0,0 @@
|
|
1 |
-
{
|
2 |
-
"cells": [
|
3 |
-
{
|
4 |
-
"cell_type": "code",
|
5 |
-
"execution_count": 1,
|
6 |
-
"id": "26c2c9a4",
|
7 |
-
"metadata": {
|
8 |
-
"scrolled": true
|
9 |
-
},
|
10 |
-
"outputs": [
|
11 |
-
{
|
12 |
-
"name": "stdout",
|
13 |
-
"output_type": "stream",
|
14 |
-
"text": [
|
15 |
-
"Requirement already satisfied: requests in c:\\users\\060971ca8\\appdata\\local\\anaconda3\\lib\\site-packages (2.29.0)\n",
|
16 |
-
"Requirement already satisfied: charset-normalizer<4,>=2 in c:\\users\\060971ca8\\appdata\\local\\anaconda3\\lib\\site-packages (from requests) (2.0.4)\n",
|
17 |
-
"Requirement already satisfied: idna<4,>=2.5 in c:\\users\\060971ca8\\appdata\\local\\anaconda3\\lib\\site-packages (from requests) (3.4)\n",
|
18 |
-
"Requirement already satisfied: urllib3<1.27,>=1.21.1 in c:\\users\\060971ca8\\appdata\\local\\anaconda3\\lib\\site-packages (from requests) (1.26.16)\n",
|
19 |
-
"Requirement already satisfied: certifi>=2017.4.17 in c:\\users\\060971ca8\\appdata\\local\\anaconda3\\lib\\site-packages (from requests) (2023.5.7)\n"
|
20 |
-
]
|
21 |
-
}
|
22 |
-
],
|
23 |
-
"source": [
|
24 |
-
"!pip install requests"
|
25 |
-
]
|
26 |
-
},
|
27 |
-
{
|
28 |
-
"cell_type": "code",
|
29 |
-
"execution_count": 2,
|
30 |
-
"id": "29c220c8",
|
31 |
-
"metadata": {},
|
32 |
-
"outputs": [],
|
33 |
-
"source": [
|
34 |
-
"import requests\n",
|
35 |
-
"\n",
|
36 |
-
"url = \"https://api.github.com/repos/huggingface/datasets/issues?page=1&per_page=1\"\n",
|
37 |
-
"response = requests.get(url)\n"
|
38 |
-
]
|
39 |
-
},
|
40 |
-
{
|
41 |
-
"cell_type": "code",
|
42 |
-
"execution_count": 3,
|
43 |
-
"id": "915c0703",
|
44 |
-
"metadata": {},
|
45 |
-
"outputs": [
|
46 |
-
{
|
47 |
-
"data": {
|
48 |
-
"text/plain": [
|
49 |
-
"200"
|
50 |
-
]
|
51 |
-
},
|
52 |
-
"execution_count": 3,
|
53 |
-
"metadata": {},
|
54 |
-
"output_type": "execute_result"
|
55 |
-
}
|
56 |
-
],
|
57 |
-
"source": [
|
58 |
-
"response.status_code"
|
59 |
-
]
|
60 |
-
},
|
61 |
-
{
|
62 |
-
"cell_type": "code",
|
63 |
-
"execution_count": 4,
|
64 |
-
"id": "b8440767",
|
65 |
-
"metadata": {},
|
66 |
-
"outputs": [
|
67 |
-
{
|
68 |
-
"data": {
|
69 |
-
"text/plain": [
|
70 |
-
"[{'url': 'https://api.github.com/repos/huggingface/datasets/issues/6151',\n",
|
71 |
-
" 'repository_url': 'https://api.github.com/repos/huggingface/datasets',\n",
|
72 |
-
" 'labels_url': 'https://api.github.com/repos/huggingface/datasets/issues/6151/labels{/name}',\n",
|
73 |
-
" 'comments_url': 'https://api.github.com/repos/huggingface/datasets/issues/6151/comments',\n",
|
74 |
-
" 'events_url': 'https://api.github.com/repos/huggingface/datasets/issues/6151/events',\n",
|
75 |
-
" 'html_url': 'https://github.com/huggingface/datasets/issues/6151',\n",
|
76 |
-
" 'id': 1851497818,\n",
|
77 |
-
" 'node_id': 'I_kwDODunzps5uW51a',\n",
|
78 |
-
" 'number': 6151,\n",
|
79 |
-
" 'title': 'Faster sorting for single key items',\n",
|
80 |
-
" 'user': {'login': 'jackapbutler',\n",
|
81 |
-
" 'id': 47942453,\n",
|
82 |
-
" 'node_id': 'MDQ6VXNlcjQ3OTQyNDUz',\n",
|
83 |
-
" 'avatar_url': 'https://avatars.githubusercontent.com/u/47942453?v=4',\n",
|
84 |
-
" 'gravatar_id': '',\n",
|
85 |
-
" 'url': 'https://api.github.com/users/jackapbutler',\n",
|
86 |
-
" 'html_url': 'https://github.com/jackapbutler',\n",
|
87 |
-
" 'followers_url': 'https://api.github.com/users/jackapbutler/followers',\n",
|
88 |
-
" 'following_url': 'https://api.github.com/users/jackapbutler/following{/other_user}',\n",
|
89 |
-
" 'gists_url': 'https://api.github.com/users/jackapbutler/gists{/gist_id}',\n",
|
90 |
-
" 'starred_url': 'https://api.github.com/users/jackapbutler/starred{/owner}{/repo}',\n",
|
91 |
-
" 'subscriptions_url': 'https://api.github.com/users/jackapbutler/subscriptions',\n",
|
92 |
-
" 'organizations_url': 'https://api.github.com/users/jackapbutler/orgs',\n",
|
93 |
-
" 'repos_url': 'https://api.github.com/users/jackapbutler/repos',\n",
|
94 |
-
" 'events_url': 'https://api.github.com/users/jackapbutler/events{/privacy}',\n",
|
95 |
-
" 'received_events_url': 'https://api.github.com/users/jackapbutler/received_events',\n",
|
96 |
-
" 'type': 'User',\n",
|
97 |
-
" 'site_admin': False},\n",
|
98 |
-
" 'labels': [{'id': 1935892871,\n",
|
99 |
-
" 'node_id': 'MDU6TGFiZWwxOTM1ODkyODcx',\n",
|
100 |
-
" 'url': 'https://api.github.com/repos/huggingface/datasets/labels/enhancement',\n",
|
101 |
-
" 'name': 'enhancement',\n",
|
102 |
-
" 'color': 'a2eeef',\n",
|
103 |
-
" 'default': True,\n",
|
104 |
-
" 'description': 'New feature or request'}],\n",
|
105 |
-
" 'state': 'open',\n",
|
106 |
-
" 'locked': False,\n",
|
107 |
-
" 'assignee': None,\n",
|
108 |
-
" 'assignees': [],\n",
|
109 |
-
" 'milestone': None,\n",
|
110 |
-
" 'comments': 0,\n",
|
111 |
-
" 'created_at': '2023-08-15T14:02:31Z',\n",
|
112 |
-
" 'updated_at': '2023-08-15T14:17:09Z',\n",
|
113 |
-
" 'closed_at': None,\n",
|
114 |
-
" 'author_association': 'NONE',\n",
|
115 |
-
" 'active_lock_reason': None,\n",
|
116 |
-
" 'body': '### Feature request\\r\\n\\r\\nA faster way to sort a dataset which contains a large number of rows.\\r\\n\\r\\n### Motivation\\r\\n\\r\\nThe current sorting implementations took significantly longer than expected when I was running on a dataset trying to sort by timestamps. \\r\\n\\r\\n**Code snippet:**\\r\\n```python\\r\\nds = datasets.load_dataset( \"json\", **{\"data_files\": {\"train\": \"path-to-jsonlines\"}, \"split\": \"train\"}, num_proc=os.cpu_count(), keep_in_memory=True) \\r\\nsorted_ds = ds.sort(\"pubDate\", keep_in_memory=True)\\r\\n```\\r\\n\\r\\nHowever, once I switched to a different method which\\r\\n1. unpacked to a list of tuples\\r\\n2. sorted tuples by key\\r\\n3. run `.select` with the sorted list of indices\\r\\nIt was significantly faster (orders of magnitude, especially with M\\'s of rows)\\r\\n\\r\\n### Your contribution\\r\\n\\r\\nI\\'d be happy to implement a crude single key sorting algorithm so that other users can benefit from this trick. Broadly, this would take a `Dataset` and perform;\\r\\n\\r\\n```python\\r\\n# ds is a Dataset object\\r\\n# key_name is the sorting key\\r\\n\\r\\nclass Dataset:\\r\\n ...\\r\\n def _sort(key_name: str) -> Dataset:\\r\\n index_keys = [(i,x) for i,x in enumerate(self[key_name])]\\r\\n sorted_rows = sorted(row_pubdate, key=lambda x: x[1])\\r\\n sorted_indicies = [x[0] for x in sorted_rows]\\r\\n return self.select(sorted_indicies)\\r\\n```',\n",
|
117 |
-
" 'reactions': {'url': 'https://api.github.com/repos/huggingface/datasets/issues/6151/reactions',\n",
|
118 |
-
" 'total_count': 0,\n",
|
119 |
-
" '+1': 0,\n",
|
120 |
-
" '-1': 0,\n",
|
121 |
-
" 'laugh': 0,\n",
|
122 |
-
" 'hooray': 0,\n",
|
123 |
-
" 'confused': 0,\n",
|
124 |
-
" 'heart': 0,\n",
|
125 |
-
" 'rocket': 0,\n",
|
126 |
-
" 'eyes': 0},\n",
|
127 |
-
" 'timeline_url': 'https://api.github.com/repos/huggingface/datasets/issues/6151/timeline',\n",
|
128 |
-
" 'performed_via_github_app': None,\n",
|
129 |
-
" 'state_reason': None}]"
|
130 |
-
]
|
131 |
-
},
|
132 |
-
"execution_count": 4,
|
133 |
-
"metadata": {},
|
134 |
-
"output_type": "execute_result"
|
135 |
-
}
|
136 |
-
],
|
137 |
-
"source": [
|
138 |
-
"response.json()"
|
139 |
-
]
|
140 |
-
},
|
141 |
-
{
|
142 |
-
"cell_type": "code",
|
143 |
-
"execution_count": 5,
|
144 |
-
"id": "96ab6dc2",
|
145 |
-
"metadata": {},
|
146 |
-
"outputs": [],
|
147 |
-
"source": [
|
148 |
-
"GITHUB_TOKEN = \"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\"\n",
|
149 |
-
"headers = {\"Authorization\": f\"token {GITHUB_TOKEN}\"}"
|
150 |
-
]
|
151 |
-
},
|
152 |
-
{
|
153 |
-
"cell_type": "code",
|
154 |
-
"execution_count": 6,
|
155 |
-
"id": "c5cd6f0f",
|
156 |
-
"metadata": {},
|
157 |
-
"outputs": [],
|
158 |
-
"source": [
|
159 |
-
"import time\n",
|
160 |
-
"import math\n",
|
161 |
-
"from pathlib import Path\n",
|
162 |
-
"import pandas as pd\n",
|
163 |
-
"from tqdm.notebook import tqdm\n",
|
164 |
-
"\n",
|
165 |
-
"\n",
|
166 |
-
"def fetch_issues(\n",
|
167 |
-
" owner=\"huggingface\",\n",
|
168 |
-
" repo=\"datasets\",\n",
|
169 |
-
" num_issues=10_000,\n",
|
170 |
-
" rate_limit=5_000,\n",
|
171 |
-
" issues_path=Path(\".\"),\n",
|
172 |
-
"):\n",
|
173 |
-
" if not issues_path.is_dir():\n",
|
174 |
-
" issues_path.mkdir(exist_ok=True)\n",
|
175 |
-
"\n",
|
176 |
-
" batch = []\n",
|
177 |
-
" all_issues = []\n",
|
178 |
-
" per_page = 100 # Number of issues to return per page\n",
|
179 |
-
" num_pages = math.ceil(num_issues / per_page)\n",
|
180 |
-
" base_url = \"https://api.github.com/repos\"\n",
|
181 |
-
"\n",
|
182 |
-
" for page in tqdm(range(num_pages)):\n",
|
183 |
-
" # Query with state=all to get both open and closed issues\n",
|
184 |
-
" query = f\"issues?page={page}&per_page={per_page}&state=all\"\n",
|
185 |
-
" issues = requests.get(f\"{base_url}/{owner}/{repo}/{query}\", headers=headers)\n",
|
186 |
-
" batch.extend(issues.json())\n",
|
187 |
-
"\n",
|
188 |
-
" if len(batch) > rate_limit and len(all_issues) < num_issues:\n",
|
189 |
-
" all_issues.extend(batch)\n",
|
190 |
-
" batch = [] # Flush batch for next time period\n",
|
191 |
-
" print(f\"Reached GitHub rate limit. Sleeping for one hour ...\")\n",
|
192 |
-
" time.sleep(60 * 60 + 1)\n",
|
193 |
-
"\n",
|
194 |
-
" all_issues.extend(batch)\n",
|
195 |
-
" df = pd.DataFrame.from_records(all_issues)\n",
|
196 |
-
" df.to_json(f\"{issues_path}/{repo}-issues.jsonl\", orient=\"records\", lines=True)\n",
|
197 |
-
" print(\n",
|
198 |
-
" f\"Downloaded all the issues for {repo}! Dataset stored at {issues_path}/{repo}-issues.jsonl\"\n",
|
199 |
-
" )"
|
200 |
-
]
|
201 |
-
},
|
202 |
-
{
|
203 |
-
"cell_type": "code",
|
204 |
-
"execution_count": 7,
|
205 |
-
"id": "3cb793df",
|
206 |
-
"metadata": {},
|
207 |
-
"outputs": [
|
208 |
-
{
|
209 |
-
"data": {
|
210 |
-
"application/vnd.jupyter.widget-view+json": {
|
211 |
-
"model_id": "12f444a712834ac1bea766e6175344ac",
|
212 |
-
"version_major": 2,
|
213 |
-
"version_minor": 0
|
214 |
-
},
|
215 |
-
"text/plain": [
|
216 |
-
" 0%| | 0/100 [00:00<?, ?it/s]"
|
217 |
-
]
|
218 |
-
},
|
219 |
-
"metadata": {},
|
220 |
-
"output_type": "display_data"
|
221 |
-
},
|
222 |
-
{
|
223 |
-
"name": "stdout",
|
224 |
-
"output_type": "stream",
|
225 |
-
"text": [
|
226 |
-
"Reached GitHub rate limit. Sleeping for one hour ...\n",
|
227 |
-
"Downloaded all the issues for datasets! Dataset stored at ./datasets-issues.jsonl\n"
|
228 |
-
]
|
229 |
-
}
|
230 |
-
],
|
231 |
-
"source": [
|
232 |
-
"fetch_issues()"
|
233 |
-
]
|
234 |
-
},
|
235 |
-
{
|
236 |
-
"cell_type": "code",
|
237 |
-
"execution_count": 8,
|
238 |
-
"id": "50e56012",
|
239 |
-
"metadata": {},
|
240 |
-
"outputs": [
|
241 |
-
{
|
242 |
-
"name": "stdout",
|
243 |
-
"output_type": "stream",
|
244 |
-
"text": [
|
245 |
-
"Requirement already satisfied: pandas in c:\\users\\060971ca8\\appdata\\local\\anaconda3\\lib\\site-packages (1.5.3)\n",
|
246 |
-
"Requirement already satisfied: python-dateutil>=2.8.1 in c:\\users\\060971ca8\\appdata\\local\\anaconda3\\lib\\site-packages (from pandas) (2.8.2)\n",
|
247 |
-
"Requirement already satisfied: pytz>=2020.1 in c:\\users\\060971ca8\\appdata\\local\\anaconda3\\lib\\site-packages (from pandas) (2022.7)\n",
|
248 |
-
"Requirement already satisfied: numpy>=1.21.0 in c:\\users\\060971ca8\\appdata\\local\\anaconda3\\lib\\site-packages (from pandas) (1.24.3)\n",
|
249 |
-
"Requirement already satisfied: six>=1.5 in c:\\users\\060971ca8\\appdata\\local\\anaconda3\\lib\\site-packages (from python-dateutil>=2.8.1->pandas) (1.16.0)\n"
|
250 |
-
]
|
251 |
-
}
|
252 |
-
],
|
253 |
-
"source": [
|
254 |
-
"!pip install pandas\n"
|
255 |
-
]
|
256 |
-
},
|
257 |
-
{
|
258 |
-
"cell_type": "code",
|
259 |
-
"execution_count": 9,
|
260 |
-
"id": "ab9674e9",
|
261 |
-
"metadata": {},
|
262 |
-
"outputs": [
|
263 |
-
{
|
264 |
-
"name": "stdout",
|
265 |
-
"output_type": "stream",
|
266 |
-
"text": [
|
267 |
-
"The file is located at: C:\\Users\\060971CA8\\a_HF_Spaces\\datasets-issues.jsonl\n"
|
268 |
-
]
|
269 |
-
}
|
270 |
-
],
|
271 |
-
"source": [
|
272 |
-
"import os\n",
|
273 |
-
"\n",
|
274 |
-
"# Get the current working directory\n",
|
275 |
-
"current_dir = os.getcwd()\n",
|
276 |
-
"\n",
|
277 |
-
"# Construct the absolute path to the file\n",
|
278 |
-
"file_path = os.path.join(current_dir, \"datasets-issues.jsonl\")\n",
|
279 |
-
"\n",
|
280 |
-
"print(f\"The file is located at: {file_path}\")\n"
|
281 |
-
]
|
282 |
-
},
|
283 |
-
{
|
284 |
-
"cell_type": "code",
|
285 |
-
"execution_count": 10,
|
286 |
-
"id": "9f13ee06",
|
287 |
-
"metadata": {},
|
288 |
-
"outputs": [
|
289 |
-
{
|
290 |
-
"name": "stdout",
|
291 |
-
"output_type": "stream",
|
292 |
-
"text": [
|
293 |
-
"Requirement already satisfied: datasets in c:\\users\\060971ca8\\appdata\\local\\anaconda3\\lib\\site-packages (2.13.1)\n",
|
294 |
-
"Requirement already satisfied: numpy>=1.17 in c:\\users\\060971ca8\\appdata\\local\\anaconda3\\lib\\site-packages (from datasets) (1.24.3)\n",
|
295 |
-
"Requirement already satisfied: pyarrow>=8.0.0 in c:\\users\\060971ca8\\appdata\\local\\anaconda3\\lib\\site-packages (from datasets) (11.0.0)\n",
|
296 |
-
"Requirement already satisfied: dill<0.3.7,>=0.3.0 in c:\\users\\060971ca8\\appdata\\local\\anaconda3\\lib\\site-packages (from datasets) (0.3.6)\n",
|
297 |
-
"Requirement already satisfied: pandas in c:\\users\\060971ca8\\appdata\\local\\anaconda3\\lib\\site-packages (from datasets) (1.5.3)\n",
|
298 |
-
"Requirement already satisfied: requests>=2.19.0 in c:\\users\\060971ca8\\appdata\\local\\anaconda3\\lib\\site-packages (from datasets) (2.29.0)\n",
|
299 |
-
"Requirement already satisfied: tqdm>=4.62.1 in c:\\users\\060971ca8\\appdata\\local\\anaconda3\\lib\\site-packages (from datasets) (4.65.0)\n",
|
300 |
-
"Requirement already satisfied: xxhash in c:\\users\\060971ca8\\appdata\\local\\anaconda3\\lib\\site-packages (from datasets) (3.2.0)\n",
|
301 |
-
"Requirement already satisfied: multiprocess in c:\\users\\060971ca8\\appdata\\local\\anaconda3\\lib\\site-packages (from datasets) (0.70.14)\n",
|
302 |
-
"Requirement already satisfied: fsspec[http]>=2021.11.1 in c:\\users\\060971ca8\\appdata\\local\\anaconda3\\lib\\site-packages (from datasets) (2023.3.0)\n",
|
303 |
-
"Requirement already satisfied: aiohttp in c:\\users\\060971ca8\\appdata\\local\\anaconda3\\lib\\site-packages (from datasets) (3.8.3)\n",
|
304 |
-
"Requirement already satisfied: huggingface-hub<1.0.0,>=0.11.0 in c:\\users\\060971ca8\\appdata\\local\\anaconda3\\lib\\site-packages (from datasets) (0.16.4)\n",
|
305 |
-
"Requirement already satisfied: packaging in c:\\users\\060971ca8\\appdata\\local\\anaconda3\\lib\\site-packages (from datasets) (23.0)\n",
|
306 |
-
"Requirement already satisfied: pyyaml>=5.1 in c:\\users\\060971ca8\\appdata\\local\\anaconda3\\lib\\site-packages (from datasets) (6.0)\n",
|
307 |
-
"Requirement already satisfied: attrs>=17.3.0 in c:\\users\\060971ca8\\appdata\\local\\anaconda3\\lib\\site-packages (from aiohttp->datasets) (22.1.0)\n",
|
308 |
-
"Requirement already satisfied: charset-normalizer<3.0,>=2.0 in c:\\users\\060971ca8\\appdata\\local\\anaconda3\\lib\\site-packages (from aiohttp->datasets) (2.0.4)\n",
|
309 |
-
"Requirement already satisfied: multidict<7.0,>=4.5 in c:\\users\\060971ca8\\appdata\\local\\anaconda3\\lib\\site-packages (from aiohttp->datasets) (6.0.2)\n",
|
310 |
-
"Requirement already satisfied: async-timeout<5.0,>=4.0.0a3 in c:\\users\\060971ca8\\appdata\\local\\anaconda3\\lib\\site-packages (from aiohttp->datasets) (4.0.2)\n",
|
311 |
-
"Requirement already satisfied: yarl<2.0,>=1.0 in c:\\users\\060971ca8\\appdata\\local\\anaconda3\\lib\\site-packages (from aiohttp->datasets) (1.8.1)\n",
|
312 |
-
"Requirement already satisfied: frozenlist>=1.1.1 in c:\\users\\060971ca8\\appdata\\local\\anaconda3\\lib\\site-packages (from aiohttp->datasets) (1.3.3)\n",
|
313 |
-
"Requirement already satisfied: aiosignal>=1.1.2 in c:\\users\\060971ca8\\appdata\\local\\anaconda3\\lib\\site-packages (from aiohttp->datasets) (1.2.0)\n",
|
314 |
-
"Requirement already satisfied: filelock in c:\\users\\060971ca8\\appdata\\local\\anaconda3\\lib\\site-packages (from huggingface-hub<1.0.0,>=0.11.0->datasets) (3.9.0)\n",
|
315 |
-
"Requirement already satisfied: typing-extensions>=3.7.4.3 in c:\\users\\060971ca8\\appdata\\local\\anaconda3\\lib\\site-packages (from huggingface-hub<1.0.0,>=0.11.0->datasets) (4.7.1)\n",
|
316 |
-
"Requirement already satisfied: idna<4,>=2.5 in c:\\users\\060971ca8\\appdata\\local\\anaconda3\\lib\\site-packages (from requests>=2.19.0->datasets) (3.4)\n",
|
317 |
-
"Requirement already satisfied: urllib3<1.27,>=1.21.1 in c:\\users\\060971ca8\\appdata\\local\\anaconda3\\lib\\site-packages (from requests>=2.19.0->datasets) (1.26.16)\n",
|
318 |
-
"Requirement already satisfied: certifi>=2017.4.17 in c:\\users\\060971ca8\\appdata\\local\\anaconda3\\lib\\site-packages (from requests>=2.19.0->datasets) (2023.5.7)\n",
|
319 |
-
"Requirement already satisfied: colorama in c:\\users\\060971ca8\\appdata\\local\\anaconda3\\lib\\site-packages (from tqdm>=4.62.1->datasets) (0.4.6)\n",
|
320 |
-
"Requirement already satisfied: python-dateutil>=2.8.1 in c:\\users\\060971ca8\\appdata\\local\\anaconda3\\lib\\site-packages (from pandas->datasets) (2.8.2)\n",
|
321 |
-
"Requirement already satisfied: pytz>=2020.1 in c:\\users\\060971ca8\\appdata\\local\\anaconda3\\lib\\site-packages (from pandas->datasets) (2022.7)\n",
|
322 |
-
"Requirement already satisfied: six>=1.5 in c:\\users\\060971ca8\\appdata\\local\\anaconda3\\lib\\site-packages (from python-dateutil>=2.8.1->pandas->datasets) (1.16.0)\n",
|
323 |
-
"Note: you may need to restart the kernel to use updated packages.\n"
|
324 |
-
]
|
325 |
-
}
|
326 |
-
],
|
327 |
-
"source": [
|
328 |
-
"pip install datasets\n"
|
329 |
-
]
|
330 |
-
},
|
331 |
-
{
|
332 |
-
"cell_type": "code",
|
333 |
-
"execution_count": 11,
|
334 |
-
"id": "141a71d1",
|
335 |
-
"metadata": {},
|
336 |
-
"outputs": [
|
337 |
-
{
|
338 |
-
"name": "stdout",
|
339 |
-
"output_type": "stream",
|
340 |
-
"text": [
|
341 |
-
"Requirement already satisfied: datasets in c:\\users\\060971ca8\\appdata\\local\\anaconda3\\lib\\site-packages (2.13.1)\n",
|
342 |
-
"Collecting datasets\n",
|
343 |
-
" Downloading datasets-2.14.4-py3-none-any.whl (519 kB)\n",
|
344 |
-
" 0.0/519.3 kB ? eta -:--:--\n",
|
345 |
-
" -------- 112.6/519.3 kB 3.3 MB/s eta 0:00:01\n",
|
346 |
-
" -------------------------------------- 519.3/519.3 kB 6.5 MB/s eta 0:00:00\n",
|
347 |
-
"Requirement already satisfied: numpy>=1.17 in c:\\users\\060971ca8\\appdata\\local\\anaconda3\\lib\\site-packages (from datasets) (1.24.3)\n",
|
348 |
-
"Requirement already satisfied: pyarrow>=8.0.0 in c:\\users\\060971ca8\\appdata\\local\\anaconda3\\lib\\site-packages (from datasets) (11.0.0)\n",
|
349 |
-
"Requirement already satisfied: dill<0.3.8,>=0.3.0 in c:\\users\\060971ca8\\appdata\\local\\anaconda3\\lib\\site-packages (from datasets) (0.3.6)\n",
|
350 |
-
"Requirement already satisfied: pandas in c:\\users\\060971ca8\\appdata\\local\\anaconda3\\lib\\site-packages (from datasets) (1.5.3)\n",
|
351 |
-
"Requirement already satisfied: requests>=2.19.0 in c:\\users\\060971ca8\\appdata\\local\\anaconda3\\lib\\site-packages (from datasets) (2.29.0)\n",
|
352 |
-
"Requirement already satisfied: tqdm>=4.62.1 in c:\\users\\060971ca8\\appdata\\local\\anaconda3\\lib\\site-packages (from datasets) (4.65.0)\n",
|
353 |
-
"Requirement already satisfied: xxhash in c:\\users\\060971ca8\\appdata\\local\\anaconda3\\lib\\site-packages (from datasets) (3.2.0)\n",
|
354 |
-
"Requirement already satisfied: multiprocess in c:\\users\\060971ca8\\appdata\\local\\anaconda3\\lib\\site-packages (from datasets) (0.70.14)\n",
|
355 |
-
"Requirement already satisfied: fsspec[http]>=2021.11.1 in c:\\users\\060971ca8\\appdata\\local\\anaconda3\\lib\\site-packages (from datasets) (2023.3.0)\n",
|
356 |
-
"Requirement already satisfied: aiohttp in c:\\users\\060971ca8\\appdata\\local\\anaconda3\\lib\\site-packages (from datasets) (3.8.3)\n",
|
357 |
-
"Requirement already satisfied: huggingface-hub<1.0.0,>=0.14.0 in c:\\users\\060971ca8\\appdata\\local\\anaconda3\\lib\\site-packages (from datasets) (0.16.4)\n",
|
358 |
-
"Requirement already satisfied: packaging in c:\\users\\060971ca8\\appdata\\local\\anaconda3\\lib\\site-packages (from datasets) (23.0)\n",
|
359 |
-
"Requirement already satisfied: pyyaml>=5.1 in c:\\users\\060971ca8\\appdata\\local\\anaconda3\\lib\\site-packages (from datasets) (6.0)\n",
|
360 |
-
"Requirement already satisfied: attrs>=17.3.0 in c:\\users\\060971ca8\\appdata\\local\\anaconda3\\lib\\site-packages (from aiohttp->datasets) (22.1.0)\n",
|
361 |
-
"Requirement already satisfied: charset-normalizer<3.0,>=2.0 in c:\\users\\060971ca8\\appdata\\local\\anaconda3\\lib\\site-packages (from aiohttp->datasets) (2.0.4)\n",
|
362 |
-
"Requirement already satisfied: multidict<7.0,>=4.5 in c:\\users\\060971ca8\\appdata\\local\\anaconda3\\lib\\site-packages (from aiohttp->datasets) (6.0.2)\n",
|
363 |
-
"Requirement already satisfied: async-timeout<5.0,>=4.0.0a3 in c:\\users\\060971ca8\\appdata\\local\\anaconda3\\lib\\site-packages (from aiohttp->datasets) (4.0.2)\n",
|
364 |
-
"Requirement already satisfied: yarl<2.0,>=1.0 in c:\\users\\060971ca8\\appdata\\local\\anaconda3\\lib\\site-packages (from aiohttp->datasets) (1.8.1)\n",
|
365 |
-
"Requirement already satisfied: frozenlist>=1.1.1 in c:\\users\\060971ca8\\appdata\\local\\anaconda3\\lib\\site-packages (from aiohttp->datasets) (1.3.3)\n",
|
366 |
-
"Requirement already satisfied: aiosignal>=1.1.2 in c:\\users\\060971ca8\\appdata\\local\\anaconda3\\lib\\site-packages (from aiohttp->datasets) (1.2.0)\n",
|
367 |
-
"Requirement already satisfied: filelock in c:\\users\\060971ca8\\appdata\\local\\anaconda3\\lib\\site-packages (from huggingface-hub<1.0.0,>=0.14.0->datasets) (3.9.0)\n",
|
368 |
-
"Requirement already satisfied: typing-extensions>=3.7.4.3 in c:\\users\\060971ca8\\appdata\\local\\anaconda3\\lib\\site-packages (from huggingface-hub<1.0.0,>=0.14.0->datasets) (4.7.1)\n",
|
369 |
-
"Requirement already satisfied: idna<4,>=2.5 in c:\\users\\060971ca8\\appdata\\local\\anaconda3\\lib\\site-packages (from requests>=2.19.0->datasets) (3.4)\n",
|
370 |
-
"Requirement already satisfied: urllib3<1.27,>=1.21.1 in c:\\users\\060971ca8\\appdata\\local\\anaconda3\\lib\\site-packages (from requests>=2.19.0->datasets) (1.26.16)\n",
|
371 |
-
"Requirement already satisfied: certifi>=2017.4.17 in c:\\users\\060971ca8\\appdata\\local\\anaconda3\\lib\\site-packages (from requests>=2.19.0->datasets) (2023.5.7)\n",
|
372 |
-
"Requirement already satisfied: colorama in c:\\users\\060971ca8\\appdata\\local\\anaconda3\\lib\\site-packages (from tqdm>=4.62.1->datasets) (0.4.6)\n",
|
373 |
-
"Requirement already satisfied: python-dateutil>=2.8.1 in c:\\users\\060971ca8\\appdata\\local\\anaconda3\\lib\\site-packages (from pandas->datasets) (2.8.2)\n",
|
374 |
-
"Requirement already satisfied: pytz>=2020.1 in c:\\users\\060971ca8\\appdata\\local\\anaconda3\\lib\\site-packages (from pandas->datasets) (2022.7)\n",
|
375 |
-
"Requirement already satisfied: six>=1.5 in c:\\users\\060971ca8\\appdata\\local\\anaconda3\\lib\\site-packages (from python-dateutil>=2.8.1->pandas->datasets) (1.16.0)\n",
|
376 |
-
"Installing collected packages: datasets\n",
|
377 |
-
" Attempting uninstall: datasets\n",
|
378 |
-
" Found existing installation: datasets 2.13.1\n",
|
379 |
-
" Uninstalling datasets-2.13.1:\n",
|
380 |
-
" Successfully uninstalled datasets-2.13.1\n",
|
381 |
-
"Successfully installed datasets-2.14.4\n",
|
382 |
-
"Note: you may need to restart the kernel to use updated packages.\n"
|
383 |
-
]
|
384 |
-
}
|
385 |
-
],
|
386 |
-
"source": [
|
387 |
-
"pip install datasets --upgrade"
|
388 |
-
]
|
389 |
-
},
|
390 |
-
{
|
391 |
-
"cell_type": "code",
|
392 |
-
"execution_count": 12,
|
393 |
-
"id": "319bdad1",
|
394 |
-
"metadata": {},
|
395 |
-
"outputs": [],
|
396 |
-
"source": [
|
397 |
-
"from datasets import load_dataset\n",
|
398 |
-
"from datasets import Dataset\n"
|
399 |
-
]
|
400 |
-
},
|
401 |
-
{
|
402 |
-
"cell_type": "code",
|
403 |
-
"execution_count": 15,
|
404 |
-
"id": "eb77eadb",
|
405 |
-
"metadata": {},
|
406 |
-
"outputs": [
|
407 |
-
{
|
408 |
-
"data": {
|
409 |
-
"application/vnd.jupyter.widget-view+json": {
|
410 |
-
"model_id": "fca0976311064de6bea95943252def8c",
|
411 |
-
"version_major": 2,
|
412 |
-
"version_minor": 0
|
413 |
-
},
|
414 |
-
"text/plain": [
|
415 |
-
"Downloading data files: 0%| | 0/1 [00:00<?, ?it/s]"
|
416 |
-
]
|
417 |
-
},
|
418 |
-
"metadata": {},
|
419 |
-
"output_type": "display_data"
|
420 |
-
},
|
421 |
-
{
|
422 |
-
"data": {
|
423 |
-
"application/vnd.jupyter.widget-view+json": {
|
424 |
-
"model_id": "56ebb5c3c00a492581e2f919037e6f34",
|
425 |
-
"version_major": 2,
|
426 |
-
"version_minor": 0
|
427 |
-
},
|
428 |
-
"text/plain": [
|
429 |
-
"Extracting data files: 0%| | 0/1 [00:00<?, ?it/s]"
|
430 |
-
]
|
431 |
-
},
|
432 |
-
"metadata": {},
|
433 |
-
"output_type": "display_data"
|
434 |
-
},
|
435 |
-
{
|
436 |
-
"data": {
|
437 |
-
"application/vnd.jupyter.widget-view+json": {
|
438 |
-
"model_id": "783c057424a2491692779373d2dd2b03",
|
439 |
-
"version_major": 2,
|
440 |
-
"version_minor": 0
|
441 |
-
},
|
442 |
-
"text/plain": [
|
443 |
-
"Generating train split: 0 examples [00:00, ? examples/s]"
|
444 |
-
]
|
445 |
-
},
|
446 |
-
"metadata": {},
|
447 |
-
"output_type": "display_data"
|
448 |
-
},
|
449 |
-
{
|
450 |
-
"ename": "DatasetGenerationError",
|
451 |
-
"evalue": "An error occurred while generating the dataset",
|
452 |
-
"output_type": "error",
|
453 |
-
"traceback": [
|
454 |
-
"\u001b[1;31m---------------------------------------------------------------------------\u001b[0m",
|
455 |
-
"\u001b[1;31mTypeError\u001b[0m Traceback (most recent call last)",
|
456 |
-
"File \u001b[1;32m~\\AppData\\Local\\anaconda3\\Lib\\site-packages\\datasets\\builder.py:1940\u001b[0m, in \u001b[0;36mArrowBasedBuilder._prepare_split_single\u001b[1;34m(self, gen_kwargs, fpath, file_format, max_shard_size, job_id)\u001b[0m\n\u001b[0;32m 1933\u001b[0m writer \u001b[38;5;241m=\u001b[39m writer_class(\n\u001b[0;32m 1934\u001b[0m features\u001b[38;5;241m=\u001b[39mwriter\u001b[38;5;241m.\u001b[39m_features,\n\u001b[0;32m 1935\u001b[0m path\u001b[38;5;241m=\u001b[39mfpath\u001b[38;5;241m.\u001b[39mreplace(\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mSSSSS\u001b[39m\u001b[38;5;124m\"\u001b[39m, \u001b[38;5;124mf\u001b[39m\u001b[38;5;124m\"\u001b[39m\u001b[38;5;132;01m{\u001b[39;00mshard_id\u001b[38;5;132;01m:\u001b[39;00m\u001b[38;5;124m05d\u001b[39m\u001b[38;5;132;01m}\u001b[39;00m\u001b[38;5;124m\"\u001b[39m)\u001b[38;5;241m.\u001b[39mreplace(\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mJJJJJ\u001b[39m\u001b[38;5;124m\"\u001b[39m, \u001b[38;5;124mf\u001b[39m\u001b[38;5;124m\"\u001b[39m\u001b[38;5;132;01m{\u001b[39;00mjob_id\u001b[38;5;132;01m:\u001b[39;00m\u001b[38;5;124m05d\u001b[39m\u001b[38;5;132;01m}\u001b[39;00m\u001b[38;5;124m\"\u001b[39m),\n\u001b[1;32m (...)\u001b[0m\n\u001b[0;32m 1938\u001b[0m embed_local_files\u001b[38;5;241m=\u001b[39membed_local_files,\n\u001b[0;32m 1939\u001b[0m )\n\u001b[1;32m-> 1940\u001b[0m writer\u001b[38;5;241m.\u001b[39mwrite_table(table)\n\u001b[0;32m 1941\u001b[0m num_examples_progress_update \u001b[38;5;241m+\u001b[39m\u001b[38;5;241m=\u001b[39m \u001b[38;5;28mlen\u001b[39m(table)\n",
|
457 |
-
"File \u001b[1;32m~\\AppData\\Local\\anaconda3\\Lib\\site-packages\\datasets\\arrow_writer.py:572\u001b[0m, in \u001b[0;36mArrowWriter.write_table\u001b[1;34m(self, pa_table, writer_batch_size)\u001b[0m\n\u001b[0;32m 571\u001b[0m pa_table \u001b[38;5;241m=\u001b[39m pa_table\u001b[38;5;241m.\u001b[39mcombine_chunks()\n\u001b[1;32m--> 572\u001b[0m pa_table \u001b[38;5;241m=\u001b[39m table_cast(pa_table, \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_schema)\n\u001b[0;32m 573\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39membed_local_files:\n",
|
458 |
-
"File \u001b[1;32m~\\AppData\\Local\\anaconda3\\Lib\\site-packages\\datasets\\table.py:2328\u001b[0m, in \u001b[0;36mtable_cast\u001b[1;34m(table, schema)\u001b[0m\n\u001b[0;32m 2327\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m table\u001b[38;5;241m.\u001b[39mschema \u001b[38;5;241m!=\u001b[39m schema:\n\u001b[1;32m-> 2328\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m cast_table_to_schema(table, schema)\n\u001b[0;32m 2329\u001b[0m \u001b[38;5;28;01melif\u001b[39;00m table\u001b[38;5;241m.\u001b[39mschema\u001b[38;5;241m.\u001b[39mmetadata \u001b[38;5;241m!=\u001b[39m schema\u001b[38;5;241m.\u001b[39mmetadata:\n",
|
459 |
-
"File \u001b[1;32m~\\AppData\\Local\\anaconda3\\Lib\\site-packages\\datasets\\table.py:2287\u001b[0m, in \u001b[0;36mcast_table_to_schema\u001b[1;34m(table, schema)\u001b[0m\n\u001b[0;32m 2286\u001b[0m \u001b[38;5;28;01mraise\u001b[39;00m \u001b[38;5;167;01mValueError\u001b[39;00m(\u001b[38;5;124mf\u001b[39m\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mCouldn\u001b[39m\u001b[38;5;124m'\u001b[39m\u001b[38;5;124mt cast\u001b[39m\u001b[38;5;130;01m\\n\u001b[39;00m\u001b[38;5;132;01m{\u001b[39;00mtable\u001b[38;5;241m.\u001b[39mschema\u001b[38;5;132;01m}\u001b[39;00m\u001b[38;5;130;01m\\n\u001b[39;00m\u001b[38;5;124mto\u001b[39m\u001b[38;5;130;01m\\n\u001b[39;00m\u001b[38;5;132;01m{\u001b[39;00mfeatures\u001b[38;5;132;01m}\u001b[39;00m\u001b[38;5;130;01m\\n\u001b[39;00m\u001b[38;5;124mbecause column names don\u001b[39m\u001b[38;5;124m'\u001b[39m\u001b[38;5;124mt match\u001b[39m\u001b[38;5;124m\"\u001b[39m)\n\u001b[1;32m-> 2287\u001b[0m arrays \u001b[38;5;241m=\u001b[39m [cast_array_to_feature(table[name], feature) \u001b[38;5;28;01mfor\u001b[39;00m name, feature \u001b[38;5;129;01min\u001b[39;00m features\u001b[38;5;241m.\u001b[39mitems()]\n\u001b[0;32m 2288\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m pa\u001b[38;5;241m.\u001b[39mTable\u001b[38;5;241m.\u001b[39mfrom_arrays(arrays, schema\u001b[38;5;241m=\u001b[39mschema)\n",
|
460 |
-
"File \u001b[1;32m~\\AppData\\Local\\anaconda3\\Lib\\site-packages\\datasets\\table.py:2287\u001b[0m, in \u001b[0;36m<listcomp>\u001b[1;34m(.0)\u001b[0m\n\u001b[0;32m 2286\u001b[0m \u001b[38;5;28;01mraise\u001b[39;00m \u001b[38;5;167;01mValueError\u001b[39;00m(\u001b[38;5;124mf\u001b[39m\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mCouldn\u001b[39m\u001b[38;5;124m'\u001b[39m\u001b[38;5;124mt cast\u001b[39m\u001b[38;5;130;01m\\n\u001b[39;00m\u001b[38;5;132;01m{\u001b[39;00mtable\u001b[38;5;241m.\u001b[39mschema\u001b[38;5;132;01m}\u001b[39;00m\u001b[38;5;130;01m\\n\u001b[39;00m\u001b[38;5;124mto\u001b[39m\u001b[38;5;130;01m\\n\u001b[39;00m\u001b[38;5;132;01m{\u001b[39;00mfeatures\u001b[38;5;132;01m}\u001b[39;00m\u001b[38;5;130;01m\\n\u001b[39;00m\u001b[38;5;124mbecause column names don\u001b[39m\u001b[38;5;124m'\u001b[39m\u001b[38;5;124mt match\u001b[39m\u001b[38;5;124m\"\u001b[39m)\n\u001b[1;32m-> 2287\u001b[0m arrays \u001b[38;5;241m=\u001b[39m [cast_array_to_feature(table[name], feature) \u001b[38;5;28;01mfor\u001b[39;00m name, feature \u001b[38;5;129;01min\u001b[39;00m features\u001b[38;5;241m.\u001b[39mitems()]\n\u001b[0;32m 2288\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m pa\u001b[38;5;241m.\u001b[39mTable\u001b[38;5;241m.\u001b[39mfrom_arrays(arrays, schema\u001b[38;5;241m=\u001b[39mschema)\n",
|
461 |
-
"File \u001b[1;32m~\\AppData\\Local\\anaconda3\\Lib\\site-packages\\datasets\\table.py:1831\u001b[0m, in \u001b[0;36m_wrap_for_chunked_arrays.<locals>.wrapper\u001b[1;34m(array, *args, **kwargs)\u001b[0m\n\u001b[0;32m 1830\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;28misinstance\u001b[39m(array, pa\u001b[38;5;241m.\u001b[39mChunkedArray):\n\u001b[1;32m-> 1831\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m pa\u001b[38;5;241m.\u001b[39mchunked_array([func(chunk, \u001b[38;5;241m*\u001b[39margs, \u001b[38;5;241m*\u001b[39m\u001b[38;5;241m*\u001b[39mkwargs) \u001b[38;5;28;01mfor\u001b[39;00m chunk \u001b[38;5;129;01min\u001b[39;00m array\u001b[38;5;241m.\u001b[39mchunks])\n\u001b[0;32m 1832\u001b[0m \u001b[38;5;28;01melse\u001b[39;00m:\n",
|
462 |
-
"File \u001b[1;32m~\\AppData\\Local\\anaconda3\\Lib\\site-packages\\datasets\\table.py:1831\u001b[0m, in \u001b[0;36m<listcomp>\u001b[1;34m(.0)\u001b[0m\n\u001b[0;32m 1830\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;28misinstance\u001b[39m(array, pa\u001b[38;5;241m.\u001b[39mChunkedArray):\n\u001b[1;32m-> 1831\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m pa\u001b[38;5;241m.\u001b[39mchunked_array([func(chunk, \u001b[38;5;241m*\u001b[39margs, \u001b[38;5;241m*\u001b[39m\u001b[38;5;241m*\u001b[39mkwargs) \u001b[38;5;28;01mfor\u001b[39;00m chunk \u001b[38;5;129;01min\u001b[39;00m array\u001b[38;5;241m.\u001b[39mchunks])\n\u001b[0;32m 1832\u001b[0m \u001b[38;5;28;01melse\u001b[39;00m:\n",
|
463 |
-
"File \u001b[1;32m~\\AppData\\Local\\anaconda3\\Lib\\site-packages\\datasets\\table.py:2073\u001b[0m, in \u001b[0;36mcast_array_to_feature\u001b[1;34m(array, feature, allow_number_to_str)\u001b[0m\n\u001b[0;32m 2072\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m array\n\u001b[1;32m-> 2073\u001b[0m arrays \u001b[38;5;241m=\u001b[39m [_c(array\u001b[38;5;241m.\u001b[39mfield(name), subfeature) \u001b[38;5;28;01mfor\u001b[39;00m name, subfeature \u001b[38;5;129;01min\u001b[39;00m feature\u001b[38;5;241m.\u001b[39mitems()]\n\u001b[0;32m 2074\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m pa\u001b[38;5;241m.\u001b[39mStructArray\u001b[38;5;241m.\u001b[39mfrom_arrays(arrays, names\u001b[38;5;241m=\u001b[39m\u001b[38;5;28mlist\u001b[39m(feature), mask\u001b[38;5;241m=\u001b[39marray\u001b[38;5;241m.\u001b[39mis_null())\n",
|
464 |
-
"File \u001b[1;32m~\\AppData\\Local\\anaconda3\\Lib\\site-packages\\datasets\\table.py:2073\u001b[0m, in \u001b[0;36m<listcomp>\u001b[1;34m(.0)\u001b[0m\n\u001b[0;32m 2072\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m array\n\u001b[1;32m-> 2073\u001b[0m arrays \u001b[38;5;241m=\u001b[39m [_c(array\u001b[38;5;241m.\u001b[39mfield(name), subfeature) \u001b[38;5;28;01mfor\u001b[39;00m name, subfeature \u001b[38;5;129;01min\u001b[39;00m feature\u001b[38;5;241m.\u001b[39mitems()]\n\u001b[0;32m 2074\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m pa\u001b[38;5;241m.\u001b[39mStructArray\u001b[38;5;241m.\u001b[39mfrom_arrays(arrays, names\u001b[38;5;241m=\u001b[39m\u001b[38;5;28mlist\u001b[39m(feature), mask\u001b[38;5;241m=\u001b[39marray\u001b[38;5;241m.\u001b[39mis_null())\n",
|
465 |
-
"File \u001b[1;32m~\\AppData\\Local\\anaconda3\\Lib\\site-packages\\datasets\\table.py:1833\u001b[0m, in \u001b[0;36m_wrap_for_chunked_arrays.<locals>.wrapper\u001b[1;34m(array, *args, **kwargs)\u001b[0m\n\u001b[0;32m 1832\u001b[0m \u001b[38;5;28;01melse\u001b[39;00m:\n\u001b[1;32m-> 1833\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m func(array, \u001b[38;5;241m*\u001b[39margs, \u001b[38;5;241m*\u001b[39m\u001b[38;5;241m*\u001b[39mkwargs)\n",
|
466 |
-
"File \u001b[1;32m~\\AppData\\Local\\anaconda3\\Lib\\site-packages\\datasets\\table.py:2143\u001b[0m, in \u001b[0;36mcast_array_to_feature\u001b[1;34m(array, feature, allow_number_to_str)\u001b[0m\n\u001b[0;32m 2142\u001b[0m \u001b[38;5;28;01melif\u001b[39;00m \u001b[38;5;129;01mnot\u001b[39;00m \u001b[38;5;28misinstance\u001b[39m(feature, (Sequence, \u001b[38;5;28mdict\u001b[39m, \u001b[38;5;28mlist\u001b[39m, \u001b[38;5;28mtuple\u001b[39m)):\n\u001b[1;32m-> 2143\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m array_cast(array, feature(), allow_number_to_str\u001b[38;5;241m=\u001b[39mallow_number_to_str)\n\u001b[0;32m 2144\u001b[0m \u001b[38;5;28;01mraise\u001b[39;00m \u001b[38;5;167;01mTypeError\u001b[39;00m(\u001b[38;5;124mf\u001b[39m\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mCouldn\u001b[39m\u001b[38;5;124m'\u001b[39m\u001b[38;5;124mt cast array of type\u001b[39m\u001b[38;5;130;01m\\n\u001b[39;00m\u001b[38;5;132;01m{\u001b[39;00marray\u001b[38;5;241m.\u001b[39mtype\u001b[38;5;132;01m}\u001b[39;00m\u001b[38;5;130;01m\\n\u001b[39;00m\u001b[38;5;124mto\u001b[39m\u001b[38;5;130;01m\\n\u001b[39;00m\u001b[38;5;132;01m{\u001b[39;00mfeature\u001b[38;5;132;01m}\u001b[39;00m\u001b[38;5;124m\"\u001b[39m)\n",
|
467 |
-
"File \u001b[1;32m~\\AppData\\Local\\anaconda3\\Lib\\site-packages\\datasets\\table.py:1833\u001b[0m, in \u001b[0;36m_wrap_for_chunked_arrays.<locals>.wrapper\u001b[1;34m(array, *args, **kwargs)\u001b[0m\n\u001b[0;32m 1832\u001b[0m \u001b[38;5;28;01melse\u001b[39;00m:\n\u001b[1;32m-> 1833\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m func(array, \u001b[38;5;241m*\u001b[39margs, \u001b[38;5;241m*\u001b[39m\u001b[38;5;241m*\u001b[39mkwargs)\n",
|
468 |
-
"File \u001b[1;32m~\\AppData\\Local\\anaconda3\\Lib\\site-packages\\datasets\\table.py:2026\u001b[0m, in \u001b[0;36marray_cast\u001b[1;34m(array, pa_type, allow_number_to_str)\u001b[0m\n\u001b[0;32m 2025\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m pa\u001b[38;5;241m.\u001b[39mtypes\u001b[38;5;241m.\u001b[39mis_null(pa_type) \u001b[38;5;129;01mand\u001b[39;00m \u001b[38;5;129;01mnot\u001b[39;00m pa\u001b[38;5;241m.\u001b[39mtypes\u001b[38;5;241m.\u001b[39mis_null(array\u001b[38;5;241m.\u001b[39mtype):\n\u001b[1;32m-> 2026\u001b[0m \u001b[38;5;28;01mraise\u001b[39;00m \u001b[38;5;167;01mTypeError\u001b[39;00m(\u001b[38;5;124mf\u001b[39m\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mCouldn\u001b[39m\u001b[38;5;124m'\u001b[39m\u001b[38;5;124mt cast array of type \u001b[39m\u001b[38;5;132;01m{\u001b[39;00marray\u001b[38;5;241m.\u001b[39mtype\u001b[38;5;132;01m}\u001b[39;00m\u001b[38;5;124m to \u001b[39m\u001b[38;5;132;01m{\u001b[39;00mpa_type\u001b[38;5;132;01m}\u001b[39;00m\u001b[38;5;124m\"\u001b[39m)\n\u001b[0;32m 2027\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m array\u001b[38;5;241m.\u001b[39mcast(pa_type)\n",
|
469 |
-
"\u001b[1;31mTypeError\u001b[0m: Couldn't cast array of type timestamp[s] to null",
|
470 |
-
"\nThe above exception was the direct cause of the following exception:\n",
|
471 |
-
"\u001b[1;31mDatasetGenerationError\u001b[0m Traceback (most recent call last)",
|
472 |
-
"Cell \u001b[1;32mIn[15], line 5\u001b[0m\n\u001b[0;32m 3\u001b[0m \u001b[38;5;66;03m# Correct file path\u001b[39;00m\n\u001b[0;32m 4\u001b[0m file_path \u001b[38;5;241m=\u001b[39m \u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mC:/Users/060971CA8/a_HF_Spaces/datasets-issues.jsonl\u001b[39m\u001b[38;5;124m\"\u001b[39m\n\u001b[1;32m----> 5\u001b[0m issues_dataset \u001b[38;5;241m=\u001b[39m load_dataset(\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mjson\u001b[39m\u001b[38;5;124m\"\u001b[39m, data_files\u001b[38;5;241m=\u001b[39mfile_path, split\u001b[38;5;241m=\u001b[39m\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mtrain\u001b[39m\u001b[38;5;124m\"\u001b[39m)\n\u001b[0;32m 7\u001b[0m \u001b[38;5;28mprint\u001b[39m(issues_dataset)\n",
|
473 |
-
"File \u001b[1;32m~\\AppData\\Local\\anaconda3\\Lib\\site-packages\\datasets\\load.py:2136\u001b[0m, in \u001b[0;36mload_dataset\u001b[1;34m(path, name, data_dir, data_files, split, cache_dir, features, download_config, download_mode, verification_mode, ignore_verifications, keep_in_memory, save_infos, revision, token, use_auth_token, task, streaming, num_proc, storage_options, **config_kwargs)\u001b[0m\n\u001b[0;32m 2133\u001b[0m try_from_hf_gcs \u001b[38;5;241m=\u001b[39m path \u001b[38;5;129;01mnot\u001b[39;00m \u001b[38;5;129;01min\u001b[39;00m _PACKAGED_DATASETS_MODULES\n\u001b[0;32m 2135\u001b[0m \u001b[38;5;66;03m# Download and prepare data\u001b[39;00m\n\u001b[1;32m-> 2136\u001b[0m builder_instance\u001b[38;5;241m.\u001b[39mdownload_and_prepare(\n\u001b[0;32m 2137\u001b[0m download_config\u001b[38;5;241m=\u001b[39mdownload_config,\n\u001b[0;32m 2138\u001b[0m download_mode\u001b[38;5;241m=\u001b[39mdownload_mode,\n\u001b[0;32m 2139\u001b[0m verification_mode\u001b[38;5;241m=\u001b[39mverification_mode,\n\u001b[0;32m 2140\u001b[0m try_from_hf_gcs\u001b[38;5;241m=\u001b[39mtry_from_hf_gcs,\n\u001b[0;32m 2141\u001b[0m num_proc\u001b[38;5;241m=\u001b[39mnum_proc,\n\u001b[0;32m 2142\u001b[0m storage_options\u001b[38;5;241m=\u001b[39mstorage_options,\n\u001b[0;32m 2143\u001b[0m )\n\u001b[0;32m 2145\u001b[0m \u001b[38;5;66;03m# Build dataset for splits\u001b[39;00m\n\u001b[0;32m 2146\u001b[0m keep_in_memory \u001b[38;5;241m=\u001b[39m (\n\u001b[0;32m 2147\u001b[0m keep_in_memory \u001b[38;5;28;01mif\u001b[39;00m keep_in_memory \u001b[38;5;129;01mis\u001b[39;00m \u001b[38;5;129;01mnot\u001b[39;00m \u001b[38;5;28;01mNone\u001b[39;00m \u001b[38;5;28;01melse\u001b[39;00m is_small_dataset(builder_instance\u001b[38;5;241m.\u001b[39minfo\u001b[38;5;241m.\u001b[39mdataset_size)\n\u001b[0;32m 2148\u001b[0m )\n",
|
474 |
-
"File \u001b[1;32m~\\AppData\\Local\\anaconda3\\Lib\\site-packages\\datasets\\builder.py:954\u001b[0m, in \u001b[0;36mDatasetBuilder.download_and_prepare\u001b[1;34m(self, output_dir, download_config, download_mode, verification_mode, ignore_verifications, try_from_hf_gcs, dl_manager, base_path, use_auth_token, file_format, max_shard_size, num_proc, storage_options, **download_and_prepare_kwargs)\u001b[0m\n\u001b[0;32m 952\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m num_proc \u001b[38;5;129;01mis\u001b[39;00m \u001b[38;5;129;01mnot\u001b[39;00m \u001b[38;5;28;01mNone\u001b[39;00m:\n\u001b[0;32m 953\u001b[0m prepare_split_kwargs[\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mnum_proc\u001b[39m\u001b[38;5;124m\"\u001b[39m] \u001b[38;5;241m=\u001b[39m num_proc\n\u001b[1;32m--> 954\u001b[0m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_download_and_prepare(\n\u001b[0;32m 955\u001b[0m dl_manager\u001b[38;5;241m=\u001b[39mdl_manager,\n\u001b[0;32m 956\u001b[0m verification_mode\u001b[38;5;241m=\u001b[39mverification_mode,\n\u001b[0;32m 957\u001b[0m \u001b[38;5;241m*\u001b[39m\u001b[38;5;241m*\u001b[39mprepare_split_kwargs,\n\u001b[0;32m 958\u001b[0m \u001b[38;5;241m*\u001b[39m\u001b[38;5;241m*\u001b[39mdownload_and_prepare_kwargs,\n\u001b[0;32m 959\u001b[0m )\n\u001b[0;32m 960\u001b[0m \u001b[38;5;66;03m# Sync info\u001b[39;00m\n\u001b[0;32m 961\u001b[0m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39minfo\u001b[38;5;241m.\u001b[39mdataset_size \u001b[38;5;241m=\u001b[39m \u001b[38;5;28msum\u001b[39m(split\u001b[38;5;241m.\u001b[39mnum_bytes \u001b[38;5;28;01mfor\u001b[39;00m split \u001b[38;5;129;01min\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39minfo\u001b[38;5;241m.\u001b[39msplits\u001b[38;5;241m.\u001b[39mvalues())\n",
|
475 |
-
"File \u001b[1;32m~\\AppData\\Local\\anaconda3\\Lib\\site-packages\\datasets\\builder.py:1049\u001b[0m, in \u001b[0;36mDatasetBuilder._download_and_prepare\u001b[1;34m(self, dl_manager, verification_mode, **prepare_split_kwargs)\u001b[0m\n\u001b[0;32m 1045\u001b[0m split_dict\u001b[38;5;241m.\u001b[39madd(split_generator\u001b[38;5;241m.\u001b[39msplit_info)\n\u001b[0;32m 1047\u001b[0m \u001b[38;5;28;01mtry\u001b[39;00m:\n\u001b[0;32m 1048\u001b[0m \u001b[38;5;66;03m# Prepare split will record examples associated to the split\u001b[39;00m\n\u001b[1;32m-> 1049\u001b[0m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_prepare_split(split_generator, \u001b[38;5;241m*\u001b[39m\u001b[38;5;241m*\u001b[39mprepare_split_kwargs)\n\u001b[0;32m 1050\u001b[0m \u001b[38;5;28;01mexcept\u001b[39;00m \u001b[38;5;167;01mOSError\u001b[39;00m \u001b[38;5;28;01mas\u001b[39;00m e:\n\u001b[0;32m 1051\u001b[0m \u001b[38;5;28;01mraise\u001b[39;00m \u001b[38;5;167;01mOSError\u001b[39;00m(\n\u001b[0;32m 1052\u001b[0m \u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mCannot find data file. \u001b[39m\u001b[38;5;124m\"\u001b[39m\n\u001b[0;32m 1053\u001b[0m \u001b[38;5;241m+\u001b[39m (\u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mmanual_download_instructions \u001b[38;5;129;01mor\u001b[39;00m \u001b[38;5;124m\"\u001b[39m\u001b[38;5;124m\"\u001b[39m)\n\u001b[0;32m 1054\u001b[0m \u001b[38;5;241m+\u001b[39m \u001b[38;5;124m\"\u001b[39m\u001b[38;5;130;01m\\n\u001b[39;00m\u001b[38;5;124mOriginal error:\u001b[39m\u001b[38;5;130;01m\\n\u001b[39;00m\u001b[38;5;124m\"\u001b[39m\n\u001b[0;32m 1055\u001b[0m \u001b[38;5;241m+\u001b[39m \u001b[38;5;28mstr\u001b[39m(e)\n\u001b[0;32m 1056\u001b[0m ) \u001b[38;5;28;01mfrom\u001b[39;00m \u001b[38;5;28;01mNone\u001b[39;00m\n",
|
476 |
-
"File \u001b[1;32m~\\AppData\\Local\\anaconda3\\Lib\\site-packages\\datasets\\builder.py:1813\u001b[0m, in \u001b[0;36mArrowBasedBuilder._prepare_split\u001b[1;34m(self, split_generator, file_format, num_proc, max_shard_size)\u001b[0m\n\u001b[0;32m 1811\u001b[0m job_id \u001b[38;5;241m=\u001b[39m \u001b[38;5;241m0\u001b[39m\n\u001b[0;32m 1812\u001b[0m \u001b[38;5;28;01mwith\u001b[39;00m pbar:\n\u001b[1;32m-> 1813\u001b[0m \u001b[38;5;28;01mfor\u001b[39;00m job_id, done, content \u001b[38;5;129;01min\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_prepare_split_single(\n\u001b[0;32m 1814\u001b[0m gen_kwargs\u001b[38;5;241m=\u001b[39mgen_kwargs, job_id\u001b[38;5;241m=\u001b[39mjob_id, \u001b[38;5;241m*\u001b[39m\u001b[38;5;241m*\u001b[39m_prepare_split_args\n\u001b[0;32m 1815\u001b[0m ):\n\u001b[0;32m 1816\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m done:\n\u001b[0;32m 1817\u001b[0m result \u001b[38;5;241m=\u001b[39m content\n",
|
477 |
-
"File \u001b[1;32m~\\AppData\\Local\\anaconda3\\Lib\\site-packages\\datasets\\builder.py:1958\u001b[0m, in \u001b[0;36mArrowBasedBuilder._prepare_split_single\u001b[1;34m(self, gen_kwargs, fpath, file_format, max_shard_size, job_id)\u001b[0m\n\u001b[0;32m 1956\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;28misinstance\u001b[39m(e, SchemaInferenceError) \u001b[38;5;129;01mand\u001b[39;00m e\u001b[38;5;241m.\u001b[39m__context__ \u001b[38;5;129;01mis\u001b[39;00m \u001b[38;5;129;01mnot\u001b[39;00m \u001b[38;5;28;01mNone\u001b[39;00m:\n\u001b[0;32m 1957\u001b[0m e \u001b[38;5;241m=\u001b[39m e\u001b[38;5;241m.\u001b[39m__context__\n\u001b[1;32m-> 1958\u001b[0m \u001b[38;5;28;01mraise\u001b[39;00m DatasetGenerationError(\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mAn error occurred while generating the dataset\u001b[39m\u001b[38;5;124m\"\u001b[39m) \u001b[38;5;28;01mfrom\u001b[39;00m \u001b[38;5;21;01me\u001b[39;00m\n\u001b[0;32m 1960\u001b[0m \u001b[38;5;28;01myield\u001b[39;00m job_id, \u001b[38;5;28;01mTrue\u001b[39;00m, (total_num_examples, total_num_bytes, writer\u001b[38;5;241m.\u001b[39m_features, num_shards, shard_lengths)\n",
|
478 |
-
"\u001b[1;31mDatasetGenerationError\u001b[0m: An error occurred while generating the dataset"
|
479 |
-
]
|
480 |
-
}
|
481 |
-
],
|
482 |
-
"source": [
|
483 |
-
"from datasets import load_dataset\n",
|
484 |
-
"\n",
|
485 |
-
"# Correct file path\n",
|
486 |
-
"file_path = \"C:/Users/060971CA8/a_HF_Spaces/datasets-issues.jsonl\"\n",
|
487 |
-
"issues_dataset = load_dataset(\"json\", data_files=file_path, split=\"train\")\n",
|
488 |
-
"\n",
|
489 |
-
"print(issues_dataset)\n"
|
490 |
-
]
|
491 |
-
},
|
492 |
-
{
|
493 |
-
"cell_type": "code",
|
494 |
-
"execution_count": 16,
|
495 |
-
"id": "d5ef113b",
|
496 |
-
"metadata": {},
|
497 |
-
"outputs": [
|
498 |
-
{
|
499 |
-
"name": "stdout",
|
500 |
-
"output_type": "stream",
|
501 |
-
"text": [
|
502 |
-
"Dataset({\n",
|
503 |
-
" features: ['url', 'repository_url', 'labels_url', 'comments_url', 'events_url', 'html_url', 'id', 'node_id', 'number', 'title', 'user', 'labels', 'state', 'locked', 'assignee', 'assignees', 'milestone', 'comments', 'created_at', 'updated_at', 'closed_at', 'author_association', 'active_lock_reason', 'body', 'reactions', 'timeline_url', 'performed_via_github_app', 'state_reason', 'draft', 'pull_request'],\n",
|
504 |
-
" num_rows: 6139\n",
|
505 |
-
"})\n"
|
506 |
-
]
|
507 |
-
}
|
508 |
-
],
|
509 |
-
"source": [
|
510 |
-
"import pandas as pd\n",
|
511 |
-
"from datasets import Dataset\n",
|
512 |
-
"\n",
|
513 |
-
"# Correct file path\n",
|
514 |
-
"file_path = \"C:/Users/060971CA8/a_HF_Spaces/datasets-issues.jsonl\"\n",
|
515 |
-
"df = pd.read_json(file_path, lines=True)\n",
|
516 |
-
"\n",
|
517 |
-
"for col in df.columns:\n",
|
518 |
-
" if df[col].dtype == 'datetime64[ns]':\n",
|
519 |
-
" df[col] = df[col].astype(str)\n",
|
520 |
-
"\n",
|
521 |
-
"issues_dataset = Dataset.from_pandas(df)\n",
|
522 |
-
"\n",
|
523 |
-
"print(issues_dataset)\n"
|
524 |
-
]
|
525 |
-
},
|
526 |
-
{
|
527 |
-
"cell_type": "code",
|
528 |
-
"execution_count": 17,
|
529 |
-
"id": "179b2818",
|
530 |
-
"metadata": {},
|
531 |
-
"outputs": [],
|
532 |
-
"source": [
|
533 |
-
"sample = issues_dataset.shuffle(seed=666).select(range(3))"
|
534 |
-
]
|
535 |
-
},
|
536 |
-
{
|
537 |
-
"cell_type": "code",
|
538 |
-
"execution_count": 18,
|
539 |
-
"id": "ea0a0067",
|
540 |
-
"metadata": {},
|
541 |
-
"outputs": [
|
542 |
-
{
|
543 |
-
"name": "stdout",
|
544 |
-
"output_type": "stream",
|
545 |
-
"text": [
|
546 |
-
">> URL: https://github.com/huggingface/datasets/pull/4516\n",
|
547 |
-
">> Pull request: {'diff_url': 'https://github.com/huggingface/datasets/pull/4516.diff', 'html_url': 'https://github.com/huggingface/datasets/pull/4516', 'merged_at': '2022-06-28T13:23:05Z', 'patch_url': 'https://github.com/huggingface/datasets/pull/4516.patch', 'url': 'https://api.github.com/repos/huggingface/datasets/pulls/4516'}\n",
|
548 |
-
"\n",
|
549 |
-
">> URL: https://github.com/huggingface/datasets/issues/5346\n",
|
550 |
-
">> Pull request: None\n",
|
551 |
-
"\n",
|
552 |
-
">> URL: https://github.com/huggingface/datasets/pull/783\n",
|
553 |
-
">> Pull request: {'diff_url': 'https://github.com/huggingface/datasets/pull/783.diff', 'html_url': 'https://github.com/huggingface/datasets/pull/783', 'merged_at': None, 'patch_url': 'https://github.com/huggingface/datasets/pull/783.patch', 'url': 'https://api.github.com/repos/huggingface/datasets/pulls/783'}\n",
|
554 |
-
"\n"
|
555 |
-
]
|
556 |
-
}
|
557 |
-
],
|
558 |
-
"source": [
|
559 |
-
"for url, pr in zip(sample[\"html_url\"], sample[\"pull_request\"]):\n",
|
560 |
-
" print(f\">> URL: {url}\")\n",
|
561 |
-
" print(f\">> Pull request: {pr}\\n\")"
|
562 |
-
]
|
563 |
-
},
|
564 |
-
{
|
565 |
-
"cell_type": "code",
|
566 |
-
"execution_count": 19,
|
567 |
-
"id": "0388dabb",
|
568 |
-
"metadata": {},
|
569 |
-
"outputs": [
|
570 |
-
{
|
571 |
-
"data": {
|
572 |
-
"text/plain": [
|
573 |
-
"[{'url': 'https://api.github.com/repos/huggingface/datasets/issues/comments/897594128',\n",
|
574 |
-
" 'html_url': 'https://github.com/huggingface/datasets/pull/2792#issuecomment-897594128',\n",
|
575 |
-
" 'issue_url': 'https://api.github.com/repos/huggingface/datasets/issues/2792',\n",
|
576 |
-
" 'id': 897594128,\n",
|
577 |
-
" 'node_id': 'IC_kwDODunzps41gDMQ',\n",
|
578 |
-
" 'user': {'login': 'bhavitvyamalik',\n",
|
579 |
-
" 'id': 19718818,\n",
|
580 |
-
" 'node_id': 'MDQ6VXNlcjE5NzE4ODE4',\n",
|
581 |
-
" 'avatar_url': 'https://avatars.githubusercontent.com/u/19718818?v=4',\n",
|
582 |
-
" 'gravatar_id': '',\n",
|
583 |
-
" 'url': 'https://api.github.com/users/bhavitvyamalik',\n",
|
584 |
-
" 'html_url': 'https://github.com/bhavitvyamalik',\n",
|
585 |
-
" 'followers_url': 'https://api.github.com/users/bhavitvyamalik/followers',\n",
|
586 |
-
" 'following_url': 'https://api.github.com/users/bhavitvyamalik/following{/other_user}',\n",
|
587 |
-
" 'gists_url': 'https://api.github.com/users/bhavitvyamalik/gists{/gist_id}',\n",
|
588 |
-
" 'starred_url': 'https://api.github.com/users/bhavitvyamalik/starred{/owner}{/repo}',\n",
|
589 |
-
" 'subscriptions_url': 'https://api.github.com/users/bhavitvyamalik/subscriptions',\n",
|
590 |
-
" 'organizations_url': 'https://api.github.com/users/bhavitvyamalik/orgs',\n",
|
591 |
-
" 'repos_url': 'https://api.github.com/users/bhavitvyamalik/repos',\n",
|
592 |
-
" 'events_url': 'https://api.github.com/users/bhavitvyamalik/events{/privacy}',\n",
|
593 |
-
" 'received_events_url': 'https://api.github.com/users/bhavitvyamalik/received_events',\n",
|
594 |
-
" 'type': 'User',\n",
|
595 |
-
" 'site_admin': False},\n",
|
596 |
-
" 'created_at': '2021-08-12T12:21:52Z',\n",
|
597 |
-
" 'updated_at': '2021-08-12T12:31:17Z',\n",
|
598 |
-
" 'author_association': 'CONTRIBUTOR',\n",
|
599 |
-
" 'body': \"@albertvillanova my tests are failing here:\\r\\n```\\r\\ndataset_name = 'gooaq'\\r\\n\\r\\n def test_load_dataset(self, dataset_name):\\r\\n configs = self.dataset_tester.load_all_configs(dataset_name, is_local=True)[:1]\\r\\n> self.dataset_tester.check_load_dataset(dataset_name, configs, is_local=True, use_local_dummy_data=True)\\r\\n\\r\\ntests/test_dataset_common.py:234: \\r\\n_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ \\r\\ntests/test_dataset_common.py:187: in check_load_dataset\\r\\n self.parent.assertTrue(len(dataset[split]) > 0)\\r\\nE AssertionError: False is not true\\r\\n```\\r\\nWhen I try loading dataset on local machine it works fine. Any suggestions on how can I avoid this error?\",\n",
|
600 |
-
" 'reactions': {'url': 'https://api.github.com/repos/huggingface/datasets/issues/comments/897594128/reactions',\n",
|
601 |
-
" 'total_count': 0,\n",
|
602 |
-
" '+1': 0,\n",
|
603 |
-
" '-1': 0,\n",
|
604 |
-
" 'laugh': 0,\n",
|
605 |
-
" 'hooray': 0,\n",
|
606 |
-
" 'confused': 0,\n",
|
607 |
-
" 'heart': 0,\n",
|
608 |
-
" 'rocket': 0,\n",
|
609 |
-
" 'eyes': 0},\n",
|
610 |
-
" 'performed_via_github_app': None},\n",
|
611 |
-
" {'url': 'https://api.github.com/repos/huggingface/datasets/issues/comments/898644889',\n",
|
612 |
-
" 'html_url': 'https://github.com/huggingface/datasets/pull/2792#issuecomment-898644889',\n",
|
613 |
-
" 'issue_url': 'https://api.github.com/repos/huggingface/datasets/issues/2792',\n",
|
614 |
-
" 'id': 898644889,\n",
|
615 |
-
" 'node_id': 'IC_kwDODunzps41kDuZ',\n",
|
616 |
-
" 'user': {'login': 'bhavitvyamalik',\n",
|
617 |
-
" 'id': 19718818,\n",
|
618 |
-
" 'node_id': 'MDQ6VXNlcjE5NzE4ODE4',\n",
|
619 |
-
" 'avatar_url': 'https://avatars.githubusercontent.com/u/19718818?v=4',\n",
|
620 |
-
" 'gravatar_id': '',\n",
|
621 |
-
" 'url': 'https://api.github.com/users/bhavitvyamalik',\n",
|
622 |
-
" 'html_url': 'https://github.com/bhavitvyamalik',\n",
|
623 |
-
" 'followers_url': 'https://api.github.com/users/bhavitvyamalik/followers',\n",
|
624 |
-
" 'following_url': 'https://api.github.com/users/bhavitvyamalik/following{/other_user}',\n",
|
625 |
-
" 'gists_url': 'https://api.github.com/users/bhavitvyamalik/gists{/gist_id}',\n",
|
626 |
-
" 'starred_url': 'https://api.github.com/users/bhavitvyamalik/starred{/owner}{/repo}',\n",
|
627 |
-
" 'subscriptions_url': 'https://api.github.com/users/bhavitvyamalik/subscriptions',\n",
|
628 |
-
" 'organizations_url': 'https://api.github.com/users/bhavitvyamalik/orgs',\n",
|
629 |
-
" 'repos_url': 'https://api.github.com/users/bhavitvyamalik/repos',\n",
|
630 |
-
" 'events_url': 'https://api.github.com/users/bhavitvyamalik/events{/privacy}',\n",
|
631 |
-
" 'received_events_url': 'https://api.github.com/users/bhavitvyamalik/received_events',\n",
|
632 |
-
" 'type': 'User',\n",
|
633 |
-
" 'site_admin': False},\n",
|
634 |
-
" 'created_at': '2021-08-13T18:28:27Z',\n",
|
635 |
-
" 'updated_at': '2021-08-13T18:28:27Z',\n",
|
636 |
-
" 'author_association': 'CONTRIBUTOR',\n",
|
637 |
-
" 'body': 'Thanks for the help, @albertvillanova! All tests are passing now.',\n",
|
638 |
-
" 'reactions': {'url': 'https://api.github.com/repos/huggingface/datasets/issues/comments/898644889/reactions',\n",
|
639 |
-
" 'total_count': 0,\n",
|
640 |
-
" '+1': 0,\n",
|
641 |
-
" '-1': 0,\n",
|
642 |
-
" 'laugh': 0,\n",
|
643 |
-
" 'hooray': 0,\n",
|
644 |
-
" 'confused': 0,\n",
|
645 |
-
" 'heart': 0,\n",
|
646 |
-
" 'rocket': 0,\n",
|
647 |
-
" 'eyes': 0},\n",
|
648 |
-
" 'performed_via_github_app': None}]"
|
649 |
-
]
|
650 |
-
},
|
651 |
-
"execution_count": 19,
|
652 |
-
"metadata": {},
|
653 |
-
"output_type": "execute_result"
|
654 |
-
}
|
655 |
-
],
|
656 |
-
"source": [
|
657 |
-
"issue_number = 2792\n",
|
658 |
-
"url = f\"https://api.github.com/repos/huggingface/datasets/issues/{issue_number}/comments\"\n",
|
659 |
-
"response = requests.get(url, headers=headers)\n",
|
660 |
-
"response.json()"
|
661 |
-
]
|
662 |
-
},
|
663 |
-
{
|
664 |
-
"cell_type": "code",
|
665 |
-
"execution_count": 20,
|
666 |
-
"id": "2404a310",
|
667 |
-
"metadata": {},
|
668 |
-
"outputs": [
|
669 |
-
{
|
670 |
-
"data": {
|
671 |
-
"text/plain": [
|
672 |
-
"[\"@albertvillanova my tests are failing here:\\r\\n```\\r\\ndataset_name = 'gooaq'\\r\\n\\r\\n def test_load_dataset(self, dataset_name):\\r\\n configs = self.dataset_tester.load_all_configs(dataset_name, is_local=True)[:1]\\r\\n> self.dataset_tester.check_load_dataset(dataset_name, configs, is_local=True, use_local_dummy_data=True)\\r\\n\\r\\ntests/test_dataset_common.py:234: \\r\\n_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ \\r\\ntests/test_dataset_common.py:187: in check_load_dataset\\r\\n self.parent.assertTrue(len(dataset[split]) > 0)\\r\\nE AssertionError: False is not true\\r\\n```\\r\\nWhen I try loading dataset on local machine it works fine. Any suggestions on how can I avoid this error?\",\n",
|
673 |
-
" 'Thanks for the help, @albertvillanova! All tests are passing now.']"
|
674 |
-
]
|
675 |
-
},
|
676 |
-
"execution_count": 20,
|
677 |
-
"metadata": {},
|
678 |
-
"output_type": "execute_result"
|
679 |
-
}
|
680 |
-
],
|
681 |
-
"source": [
|
682 |
-
"def get_comments(issue_number):\n",
|
683 |
-
" url = f\"https://api.github.com/repos/huggingface/datasets/issues/{issue_number}/comments\"\n",
|
684 |
-
" response = requests.get(url, headers=headers)\n",
|
685 |
-
" return [r[\"body\"] for r in response.json()]\n",
|
686 |
-
"\n",
|
687 |
-
"\n",
|
688 |
-
"# Test our function works as expected\n",
|
689 |
-
"get_comments(2792)"
|
690 |
-
]
|
691 |
-
},
|
692 |
-
{
|
693 |
-
"cell_type": "code",
|
694 |
-
"execution_count": 21,
|
695 |
-
"id": "092dd794",
|
696 |
-
"metadata": {},
|
697 |
-
"outputs": [
|
698 |
-
{
|
699 |
-
"data": {
|
700 |
-
"application/vnd.jupyter.widget-view+json": {
|
701 |
-
"model_id": "3e9442d14cbf46e38c78e561a0717fd5",
|
702 |
-
"version_major": 2,
|
703 |
-
"version_minor": 0
|
704 |
-
},
|
705 |
-
"text/plain": [
|
706 |
-
"Map: 0%| | 0/6139 [00:00<?, ? examples/s]"
|
707 |
-
]
|
708 |
-
},
|
709 |
-
"metadata": {},
|
710 |
-
"output_type": "display_data"
|
711 |
-
}
|
712 |
-
],
|
713 |
-
"source": [
|
714 |
-
"# Depending on your internet connection, this can take a few minutes...\n",
|
715 |
-
"issues_with_comments_dataset = issues_dataset.map(\n",
|
716 |
-
" lambda x: {\"comments\": get_comments(x[\"number\"])}\n",
|
717 |
-
")"
|
718 |
-
]
|
719 |
-
},
|
720 |
-
{
|
721 |
-
"cell_type": "code",
|
722 |
-
"execution_count": null,
|
723 |
-
"id": "013b0fa9",
|
724 |
-
"metadata": {},
|
725 |
-
"outputs": [],
|
726 |
-
"source": []
|
727 |
-
}
|
728 |
-
],
|
729 |
-
"metadata": {
|
730 |
-
"kernelspec": {
|
731 |
-
"display_name": "Python 3 (ipykernel)",
|
732 |
-
"language": "python",
|
733 |
-
"name": "python3"
|
734 |
-
},
|
735 |
-
"language_info": {
|
736 |
-
"codemirror_mode": {
|
737 |
-
"name": "ipython",
|
738 |
-
"version": 3
|
739 |
-
},
|
740 |
-
"file_extension": ".py",
|
741 |
-
"mimetype": "text/x-python",
|
742 |
-
"name": "python",
|
743 |
-
"nbconvert_exporter": "python",
|
744 |
-
"pygments_lexer": "ipython3",
|
745 |
-
"version": "3.11.3"
|
746 |
-
}
|
747 |
-
},
|
748 |
-
"nbformat": 4,
|
749 |
-
"nbformat_minor": 5
|
750 |
-
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|