File size: 1,760 Bytes
152bd83 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 |
import json
import itertools
from datasets import load_dataset
def segment_cells(content):
# segment notebooks into lists of individual cells
cells = []
cell_types = []
for cell in content['cells']:
# select only non-empty cells
if len(cell['source']) != 0:
cells.append(' '.join(cell['source']))
cell_types.append(cell['cell_type'])
return cells, cell_types
def parse_notebook(batch):
try:
cells, types = segment_cells(json.loads(batch['content']))
# get cell types and group them into lists
cell_type_groups = [list(g) for k,g in itertools.groupby(types)]
cell_types = [k for k,g in itertools.groupby(types)]
cell_groups = []
group_start = 0
for g in cell_type_groups:
cell_groups.append(cells[group_start:group_start+len(g)])
group_start += len(g)
batch['cells'] = cell_groups
batch['cell_types'] = cell_types
batch['cell_type_groups'] = cell_type_groups
except:
# if json.loads() returns error, skip and add a placeholder
batch['cells'] = [['empty']]
batch['cell_types'] = ['empty']
batch['cell_type_groups'] = [['empty']]
del batch['content']
return batch
if __name__ == "__main__":
# load dataset
dataset = load_dataset("bigcode/the-stack",data_dir="data/jupyter-notebook", split="train",use_auth_token=True)
# segment notebooks
dataset = dataset.map(segment)
# filter out erronous cells via placeholders
dataset = dataset.filter(lambda entry: entry['cell_types']!=['empty'])
# push to hub
dataset.push_to_hub("bigcode/jupyter-parsed") |