File size: 8,131 Bytes
03f95d1 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 |
import json
import random
import os
import datasets
opt_out_github_login_the_stack_12 = '''tomas
bermannoah
rtzoeller
sdatkinson
jtth
benui-dev
DrTimothyAldenDavis
pranftw
marcsances
sitmo
jucor
digantamisra98
przemb
jisakiel
kellycochran
AkashM398
MeSayan
SWinxy
zitterbewegung
fredster33
demangejeremy
PPPI
henilp105
tobymcclean
cynddl
spate141
plaaosert
woylie
alexkoster
satyrnidae
rclark
zakkolar
jimscard
marjoleinF
ghchinoy
jmfadeley
fuhrmanator
stephen-three
retro-ai
abolouri
matthewheath
jollypolly123
RJRauch
ivansche
'''.splitlines()
paths = '''diffs_10485761_11010048.jsonl
diffs_1048577_1572864.jsonl
diffs_11010049_11534336.jsonl
diffs_11534337_12058624.jsonl
diffs_12058625_12582912.jsonl
diffs_12582913_13107200.jsonl
diffs_13107201_13631488.jsonl
diffs_13631489_14155776.jsonl
diffs_14155777_14680064.jsonl
diffs_14680065_15204352.jsonl
diffs_15204353_15728640.jsonl
diffs_15728641_16252928.jsonl
diffs_1572865_2097152.jsonl
diffs_16252929_16777216.jsonl
diffs_1_524288.jsonl
diffs_2097153_2621440.jsonl
diffs_25165825_25690112.jsonl
diffs_25690113_26214400.jsonl
diffs_26214401_26738688.jsonl
diffs_2621441_3145728.jsonl
diffs_26738689_27262976.jsonl
diffs_27262977_27787264.jsonl
diffs_27787265_28311552.jsonl
diffs_28311553_28835840.jsonl
diffs_28835841_29360128.jsonl
diffs_29360129_29884416.jsonl
diffs_29884417_30408704.jsonl
diffs_30408705_30932992.jsonl
diffs_30932993_31457280.jsonl
diffs_31457281_31981568.jsonl
diffs_3145729_3670016.jsonl
diffs_31981569_32505856.jsonl
diffs_32505857_33030144.jsonl
diffs_33030145_33554432.jsonl
diffs_33554433_34078720.jsonl
diffs_34078721_34603008.jsonl
diffs_34603009_35127296.jsonl
diffs_35127297_35651584.jsonl
diffs_35651585_36175872.jsonl
diffs_36175873_36700160.jsonl
diffs_36700161_37224448.jsonl
diffs_3670017_4194304.jsonl
diffs_37224449_37748736.jsonl
diffs_37748737_38273024.jsonl
diffs_38273025_38797312.jsonl
diffs_38797313_39321600.jsonl
diffs_39321601_39845888.jsonl
diffs_39845889_40370176.jsonl
diffs_40370177_40894464.jsonl
diffs_40894465_41418752.jsonl
diffs_41418753_41943040.jsonl
diffs_41943041_42467328.jsonl
diffs_4194305_4718592.jsonl
diffs_42467329_42991616.jsonl
diffs_42991617_43515904.jsonl
diffs_43515905_44040192.jsonl
diffs_44040193_44564480.jsonl
diffs_44564481_45088768.jsonl
diffs_45088769_45613056.jsonl
diffs_45613057_46137344.jsonl
diffs_46137345_46661632.jsonl
diffs_46661633_47185920.jsonl
diffs_47185921_47710208.jsonl
diffs_4718593_5242880.jsonl
diffs_47710209_48234496.jsonl
diffs_48234497_48758784.jsonl
diffs_48758785_49283072.jsonl
diffs_49283073_49807360.jsonl
diffs_49807361_50331648.jsonl
diffs_50331649_50855936.jsonl
diffs_50855937_51380224.jsonl
diffs_51380225_51904512.jsonl
diffs_51904513_52428800.jsonl
diffs_52428801_52953088.jsonl
diffs_5242881_5767168.jsonl
diffs_524289_1048576.jsonl
diffs_52953089_53477376.jsonl
diffs_53477377_54001664.jsonl
diffs_54001665_54525952.jsonl
diffs_54525953_55050240.jsonl
diffs_55050241_55574528.jsonl
diffs_55574529_56098816.jsonl
diffs_56098817_56623104.jsonl
diffs_56623105_57147392.jsonl
diffs_57147393_57671680.jsonl
diffs_57671681_58195968.jsonl
diffs_5767169_6291456.jsonl
diffs_58195969_58720256.jsonl
diffs_49807361_50331648.jsonl
diffs_50331649_50855936.jsonl
diffs_50855937_51380224.jsonl
diffs_51380225_51904512.jsonl
diffs_51904513_52428800.jsonl
diffs_52428801_52953088.jsonl
diffs_5242881_5767168.jsonl
diffs_524289_1048576.jsonl
diffs_52953089_53477376.jsonl
diffs_53477377_54001664.jsonl
diffs_54001665_54525952.jsonl
diffs_54525953_55050240.jsonl
diffs_55050241_55574528.jsonl
diffs_55574529_56098816.jsonl
diffs_56098817_56623104.jsonl
diffs_56623105_57147392.jsonl
diffs_57147393_57671680.jsonl
diffs_57671681_58195968.jsonl
diffs_5767169_6291456.jsonl
diffs_58195969_58720256.jsonl
diffs_58720257_59244544.jsonl
diffs_59244545_59768832.jsonl
diffs_59768833_60293120.jsonl
diffs_60293121_60817408.jsonl
diffs_60817409_61341696.jsonl
diffs_61341697_61865984.jsonl
diffs_61865985_62390272.jsonl
diffs_62390273_62914560.jsonl
diffs_62914561_63438848.jsonl
diffs_6291457_6815744.jsonl
diffs_63438849_63963136.jsonl
diffs_63963137_64487424.jsonl
diffs_64487425_65011712.jsonl
diffs_65011713_65536000.jsonl
diffs_65536001_66060288.jsonl
diffs_66060289_66584576.jsonl
diffs_66584577_67108864.jsonl
diffs_6815745_7340032.jsonl
diffs_7340033_7864320.jsonl
diffs_7864321_8388608.jsonl
diffs_8388609_8912896.jsonl
diffs_8912897_9437184.jsonl
diffs_9437185_9961472.jsonl'''.splitlines()
if False:#__name__ == "__main__":
data = {}
# licenses:
# {"repos":"samoht/ocaml-conduit,samoht/ocaml-conduit,avsm/ocaml-conduit,mirage/ocaml-conduit,djs55/ocaml-conduit,rgrinberg/ocaml-conduit,rgrinberg/ocaml-conduit,djs55/ocaml-conduit","license":"isc"}
with open('licenses_merged.jsonl', 'r') as f:
for line in f:
line = line.strip()
if not line:
continue
d = json.loads(line)
for repo in d['repos'].split(","):
data[repo] = d['license']
NUM_PROC = 8
# path = "diffs_9961473_10485760.jsonl"
for path in paths:
ds = datasets.load_dataset("json", data_files=path)
print(ds)
print(random.sample(data.items(), 10))
def map_col(example):
for repo in example["repos"].split(","):
if repo in data:
example["license"] = data[repo]
return example
example["license"] = 'unknown'
return example
def opt_out(example):
repos = example["repos"].split(",")
if (len(repos) == 1) and (repos[0].split("/")[0] in opt_out_github_login_the_stack_12):
return False
return True
ds['train'].map(map_col).filter(opt_out).to_json(path.replace(".jsonl", "_new.jsonl"), num_proc=NUM_PROC)
"""
AND ( (lang_table.language_name LIKE 'Python')
OR (lang_table.language_name LIKE 'Java')
OR (lang_table.language_name LIKE 'JavaScript')
OR (lang_table.language_name LIKE 'HTML')
OR (lang_table.language_name LIKE 'Common Lisp')
OR (lang_table.language_name LIKE 'Shell')
OR (lang_table.language_name LIKE 'R')
OR (lang_table.language_name LIKE 'Perl%')
OR (lang_table.language_name LIKE 'SQL')
OR (lang_table.language_name LIKE 'C')
OR (lang_table.language_name LIKE 'C#')
OR (lang_table.language_name LIKE 'C++')
OR (lang_table.language_name LIKE 'TypeScript')
OR (lang_table.language_name LIKE 'Go')
OR (lang_table.language_name LIKE 'Rust')
OR (lang_table.language_name LIKE 'Swift')
OR (lang_table.language_name LIKE 'PHP')
OR (lang_table.language_name LIKE 'Dart')
OR (lang_table.language_name LIKE 'Kotlin')
OR (lang_table.language_name LIKE 'Matlab')
OR (lang_table.language_name LIKE 'MATLAB')
OR (lang_table.language_name LIKE 'Ruby') )
"""
if __name__ == "__main__":
with open("programming_languages.json", "r") as f:
prog_langs = json.load(f)
ext_to_lang = {}
for lang, exts in prog_langs.items():
for ext in exts:
ext_to_lang[ext] = lang
LANGS = [
'Python',
'Java',
'JavaScript',
'HTML',
'Common Lisp',
'Shell',
'R',
'Perl',
'Perl6', # Differentiate Perl6 from Perl
'SQL',
'C',
'C#',
'C++',
'TypeScript',
'Go',
'Rust',
'Swift',
'PHP',
'Dart',
'Kotlin',
'Matlab',
'MATLAB',
'Ruby'
]
for lang in LANGS:
if lang not in prog_langs:
print("Missing", lang)
def add_lang(example):
example["language"] = ext_to_lang.get(example["new_file"].split(".")[-1], "unknown")
return example
for path in paths:
ds = datasets.load_dataset("json", data_files=path)
ds = ds['train'].map(add_lang)
vals = ds.unique('language')
for val in vals:
ds.filter(lambda x: x['language'] == val).to_json(path.replace(".jsonl", f"_{val}.jsonl"), num_proc=NUM_PROC)
|