Pierre Lepagnol commited on
Commit
b109984
1 Parent(s): 5a6e9a6

Adding scripts + data

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. classification/Bioresponse/label.json +4 -0
  2. classification/Bioresponse/rules.json +472 -0
  3. classification/Bioresponse/test.json +0 -0
  4. classification/Bioresponse/train.json +3 -0
  5. classification/Bioresponse/valid.json +0 -0
  6. classification/PhishingWebsites/label.json +4 -0
  7. classification/PhishingWebsites/rules.json +357 -0
  8. classification/PhishingWebsites/test.json +0 -0
  9. classification/PhishingWebsites/train.json +0 -0
  10. classification/PhishingWebsites/valid.json +0 -0
  11. classification/agnews/label.json +6 -0
  12. classification/agnews/readme.txt +87 -0
  13. classification/agnews/test.json +0 -0
  14. classification/agnews/train.json +3 -0
  15. classification/agnews/valid.json +0 -0
  16. classification/bank-marketing/label.json +4 -0
  17. classification/bank-marketing/rules.json +472 -0
  18. classification/bank-marketing/test.json +0 -0
  19. classification/bank-marketing/train.json +0 -0
  20. classification/bank-marketing/valid.json +0 -0
  21. classification/basketball/label.json +1 -0
  22. classification/basketball/readme.txt +18 -0
  23. classification/basketball/test.json +3 -0
  24. classification/basketball/train.json +3 -0
  25. classification/basketball/valid.json +3 -0
  26. classification/cdr/label.json +1 -0
  27. classification/cdr/readme.txt +286 -0
  28. classification/cdr/test.json +0 -0
  29. classification/cdr/train.json +0 -0
  30. classification/cdr/valid.json +0 -0
  31. classification/census/label.json +1 -0
  32. classification/census/labeled_ids.json +4 -0
  33. classification/census/readme.txt +20 -0
  34. classification/census/test.json +3 -0
  35. classification/census/train.json +0 -0
  36. classification/census/valid.json +0 -0
  37. classification/chemprot/label.json +12 -0
  38. classification/chemprot/readme.txt +213 -0
  39. classification/chemprot/test.json +0 -0
  40. classification/chemprot/train.json +0 -0
  41. classification/chemprot/valid.json +0 -0
  42. classification/commercial/label.json +1 -0
  43. classification/commercial/readme.txt +22 -0
  44. classification/commercial/test.json +3 -0
  45. classification/commercial/train.json +3 -0
  46. classification/commercial/valid.json +3 -0
  47. classification/imdb/label.json +4 -0
  48. classification/imdb/readme.txt +60 -0
  49. classification/imdb/test.json +0 -0
  50. classification/imdb/train.json +3 -0
classification/Bioresponse/label.json ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ {
2
+ "0": "0",
3
+ "1": "1"
4
+ }
classification/Bioresponse/rules.json ADDED
@@ -0,0 +1,472 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "rules": [
3
+ {
4
+ "rule": [
5
+ [
6
+ 740,
7
+ 0.054,
8
+ false
9
+ ],
10
+ [
11
+ 218,
12
+ 0.663,
13
+ false
14
+ ],
15
+ [
16
+ 1271,
17
+ 0.5,
18
+ false
19
+ ]
20
+ ],
21
+ "label": 1,
22
+ "accuracy": 100.0,
23
+ "support": 2
24
+ },
25
+ {
26
+ "rule": [
27
+ [
28
+ 85,
29
+ 0.166,
30
+ false
31
+ ],
32
+ [
33
+ 1722,
34
+ 0.5,
35
+ true
36
+ ],
37
+ [
38
+ 102,
39
+ 0.255,
40
+ true
41
+ ]
42
+ ],
43
+ "label": 0,
44
+ "accuracy": 75.0,
45
+ "support": 5
46
+ },
47
+ {
48
+ "rule": [
49
+ [
50
+ 4,
51
+ 0.162,
52
+ false
53
+ ],
54
+ [
55
+ 194,
56
+ 0.737,
57
+ false
58
+ ],
59
+ [
60
+ 14,
61
+ 0.558,
62
+ true
63
+ ]
64
+ ],
65
+ "label": 0,
66
+ "accuracy": 66.67,
67
+ "support": 8
68
+ },
69
+ {
70
+ "rule": [
71
+ [
72
+ 597,
73
+ 0.125,
74
+ true
75
+ ],
76
+ [
77
+ 700,
78
+ 0.033,
79
+ true
80
+ ],
81
+ [
82
+ 1340,
83
+ 0.5,
84
+ false
85
+ ]
86
+ ],
87
+ "label": 1,
88
+ "accuracy": 75.0,
89
+ "support": 29
90
+ },
91
+ {
92
+ "rule": [
93
+ [
94
+ 450,
95
+ 0.083,
96
+ true
97
+ ],
98
+ [
99
+ 1108,
100
+ 0.5,
101
+ true
102
+ ],
103
+ [
104
+ 82,
105
+ 0.389,
106
+ false
107
+ ]
108
+ ],
109
+ "label": 1,
110
+ "accuracy": 77.27,
111
+ "support": 27
112
+ },
113
+ {
114
+ "rule": [
115
+ [
116
+ 98,
117
+ 0.707,
118
+ true
119
+ ],
120
+ [
121
+ 1366,
122
+ 0.5,
123
+ true
124
+ ],
125
+ [
126
+ 228,
127
+ 0.125,
128
+ false
129
+ ]
130
+ ],
131
+ "label": 0,
132
+ "accuracy": 82.98,
133
+ "support": 30
134
+ },
135
+ {
136
+ "rule": [
137
+ [
138
+ 55,
139
+ 0.143,
140
+ true
141
+ ],
142
+ [
143
+ 47,
144
+ 0.068,
145
+ false
146
+ ],
147
+ [
148
+ 1463,
149
+ 0.5,
150
+ true
151
+ ]
152
+ ],
153
+ "label": 0,
154
+ "accuracy": 70.05,
155
+ "support": 130
156
+ },
157
+ {
158
+ "rule": [
159
+ [
160
+ 1312,
161
+ 0.5,
162
+ false
163
+ ],
164
+ [
165
+ 1127,
166
+ 0.5,
167
+ false
168
+ ],
169
+ [
170
+ 1404,
171
+ 0.5,
172
+ false
173
+ ]
174
+ ],
175
+ "label": 0,
176
+ "accuracy": 75.0,
177
+ "support": 6
178
+ },
179
+ {
180
+ "rule": [
181
+ [
182
+ 659,
183
+ 0.071,
184
+ true
185
+ ],
186
+ [
187
+ 73,
188
+ 0.202,
189
+ false
190
+ ],
191
+ [
192
+ 425,
193
+ 0.188,
194
+ true
195
+ ]
196
+ ],
197
+ "label": 0,
198
+ "accuracy": 86.6,
199
+ "support": 65
200
+ },
201
+ {
202
+ "rule": [
203
+ [
204
+ 201,
205
+ 0.593,
206
+ false
207
+ ],
208
+ [
209
+ 612,
210
+ 0.062,
211
+ false
212
+ ],
213
+ [
214
+ 91,
215
+ 0.026,
216
+ false
217
+ ]
218
+ ],
219
+ "label": 1,
220
+ "accuracy": 100.0,
221
+ "support": 6
222
+ },
223
+ {
224
+ "rule": [
225
+ [
226
+ 83,
227
+ 0.086,
228
+ true
229
+ ],
230
+ [
231
+ 102,
232
+ 0.377,
233
+ false
234
+ ],
235
+ [
236
+ 181,
237
+ 0.315,
238
+ true
239
+ ]
240
+ ],
241
+ "label": 0,
242
+ "accuracy": 87.3,
243
+ "support": 32
244
+ },
245
+ {
246
+ "rule": [
247
+ [
248
+ 20,
249
+ 0.016,
250
+ false
251
+ ],
252
+ [
253
+ 38,
254
+ 0.155,
255
+ true
256
+ ],
257
+ [
258
+ 9,
259
+ 0.14,
260
+ false
261
+ ]
262
+ ],
263
+ "label": 0,
264
+ "accuracy": 50.0,
265
+ "support": 31
266
+ },
267
+ {
268
+ "rule": [
269
+ [
270
+ 197,
271
+ 0.09,
272
+ false
273
+ ],
274
+ [
275
+ 559,
276
+ 0.139,
277
+ true
278
+ ],
279
+ [
280
+ 503,
281
+ 0.012,
282
+ true
283
+ ]
284
+ ],
285
+ "label": 0,
286
+ "accuracy": 86.67,
287
+ "support": 32
288
+ },
289
+ {
290
+ "rule": [
291
+ [
292
+ 176,
293
+ 0.251,
294
+ true
295
+ ],
296
+ [
297
+ 754,
298
+ 0.038,
299
+ true
300
+ ],
301
+ [
302
+ 201,
303
+ 0.81,
304
+ false
305
+ ]
306
+ ],
307
+ "label": 0,
308
+ "accuracy": 93.62,
309
+ "support": 27
310
+ },
311
+ {
312
+ "rule": [
313
+ [
314
+ 978,
315
+ 0.5,
316
+ false
317
+ ],
318
+ [
319
+ 1026,
320
+ 0.5,
321
+ true
322
+ ],
323
+ [
324
+ 181,
325
+ 0.217,
326
+ true
327
+ ]
328
+ ],
329
+ "label": 0,
330
+ "accuracy": 57.42,
331
+ "support": 163
332
+ },
333
+ {
334
+ "rule": [
335
+ [
336
+ 594,
337
+ 0.15,
338
+ true
339
+ ],
340
+ [
341
+ 1222,
342
+ 0.5,
343
+ true
344
+ ],
345
+ [
346
+ 4,
347
+ 0.165,
348
+ false
349
+ ]
350
+ ],
351
+ "label": 0,
352
+ "accuracy": 65.21,
353
+ "support": 279
354
+ },
355
+ {
356
+ "rule": [
357
+ [
358
+ 958,
359
+ 0.5,
360
+ true
361
+ ],
362
+ [
363
+ 144,
364
+ 0.163,
365
+ true
366
+ ],
367
+ [
368
+ 1215,
369
+ 0.5,
370
+ true
371
+ ]
372
+ ],
373
+ "label": 0,
374
+ "accuracy": 64.11,
375
+ "support": 247
376
+ },
377
+ {
378
+ "rule": [
379
+ [
380
+ 99,
381
+ 0.728,
382
+ false
383
+ ],
384
+ [
385
+ 1449,
386
+ 0.5,
387
+ true
388
+ ],
389
+ [
390
+ 117,
391
+ 0.784,
392
+ false
393
+ ]
394
+ ],
395
+ "label": 1,
396
+ "accuracy": 100.0,
397
+ "support": 1
398
+ },
399
+ {
400
+ "rule": [
401
+ [
402
+ 1068,
403
+ 0.5,
404
+ true
405
+ ],
406
+ [
407
+ 426,
408
+ 0.125,
409
+ true
410
+ ],
411
+ [
412
+ 73,
413
+ 0.337,
414
+ true
415
+ ]
416
+ ],
417
+ "label": 0,
418
+ "accuracy": 55.78,
419
+ "support": 287
420
+ },
421
+ {
422
+ "rule": [
423
+ [
424
+ 658,
425
+ 0.219,
426
+ false
427
+ ],
428
+ [
429
+ 410,
430
+ 0.125,
431
+ true
432
+ ],
433
+ [
434
+ 841,
435
+ 0.083,
436
+ true
437
+ ]
438
+ ],
439
+ "label": 1,
440
+ "accuracy": 71.58,
441
+ "support": 185
442
+ }
443
+ ],
444
+ "rules_description": [
445
+ "if (D741 > 0.054) and (D219 > 0.663) and (D1272 > 0.5) then class: 1 (proba: 100.0%) | based on 2 samples",
446
+ "if (D86 > 0.166) and (D1723 <= 0.5) and (D103 <= 0.255) then class: 0 (proba: 75.0%) | based on 5 samples",
447
+ "if (D5 > 0.162) and (D195 > 0.737) and (D15 <= 0.558) then class: 0 (proba: 66.67%) | based on 8 samples",
448
+ "if (D598 <= 0.125) and (D701 <= 0.033) and (D1341 > 0.5) then class: 1 (proba: 75.0%) | based on 29 samples",
449
+ "if (D451 <= 0.083) and (D1109 <= 0.5) and (D83 > 0.389) then class: 1 (proba: 77.27%) | based on 27 samples",
450
+ "if (D99 <= 0.707) and (D1367 <= 0.5) and (D229 > 0.125) then class: 0 (proba: 82.98%) | based on 30 samples",
451
+ "if (D56 <= 0.143) and (D48 > 0.068) and (D1464 <= 0.5) then class: 0 (proba: 70.05%) | based on 130 samples",
452
+ "if (D1313 > 0.5) and (D1128 > 0.5) and (D1405 > 0.5) then class: 0 (proba: 75.0%) | based on 6 samples",
453
+ "if (D660 <= 0.071) and (D74 > 0.202) and (D426 <= 0.188) then class: 0 (proba: 86.6%) | based on 65 samples",
454
+ "if (D202 > 0.593) and (D613 > 0.062) and (D92 > 0.026) then class: 1 (proba: 100.0%) | based on 6 samples",
455
+ "if (D84 <= 0.086) and (D103 > 0.377) and (D182 <= 0.315) then class: 0 (proba: 87.3%) | based on 32 samples",
456
+ "if (D21 > 0.016) and (D39 <= 0.155) and (D10 > 0.14) then class: 0 (proba: 50.0%) | based on 31 samples",
457
+ "if (D198 > 0.09) and (D560 <= 0.139) and (D504 <= 0.012) then class: 0 (proba: 86.67%) | based on 32 samples",
458
+ "if (D177 <= 0.251) and (D755 <= 0.038) and (D202 > 0.81) then class: 0 (proba: 93.62%) | based on 27 samples",
459
+ "if (D979 > 0.5) and (D1027 <= 0.5) and (D182 <= 0.217) then class: 0 (proba: 57.42%) | based on 163 samples",
460
+ "if (D595 <= 0.15) and (D1223 <= 0.5) and (D5 > 0.165) then class: 0 (proba: 65.21%) | based on 279 samples",
461
+ "if (D959 <= 0.5) and (D145 <= 0.163) and (D1216 <= 0.5) then class: 0 (proba: 64.11%) | based on 247 samples",
462
+ "if (D100 > 0.728) and (D1450 <= 0.5) and (D118 > 0.784) then class: 1 (proba: 100.0%) | based on 1 samples",
463
+ "if (D1069 <= 0.5) and (D427 <= 0.125) and (D74 <= 0.337) then class: 0 (proba: 55.78%) | based on 287 samples",
464
+ "if (D659 > 0.219) and (D411 <= 0.125) and (D842 <= 0.083) then class: 1 (proba: 71.58%) | based on 185 samples"
465
+ ],
466
+ "config": {
467
+ "n_trees": 20,
468
+ "max_depth": 3,
469
+ "max_features": 20,
470
+ "used_data": 0.2
471
+ }
472
+ }
classification/Bioresponse/test.json ADDED
The diff for this file is too large to render. See raw diff
 
classification/Bioresponse/train.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9c597e58013825c4cf6f83cd9c0126decd5c99d791561dddcb9498d428092ae6
3
+ size 29881496
classification/Bioresponse/valid.json ADDED
The diff for this file is too large to render. See raw diff
 
classification/PhishingWebsites/label.json ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ {
2
+ "0": "-1",
3
+ "1": "1"
4
+ }
classification/PhishingWebsites/rules.json ADDED
@@ -0,0 +1,357 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "rules": [
3
+ {
4
+ "rule": [
5
+ [
6
+ 7,
7
+ 0.5,
8
+ false
9
+ ],
10
+ [
11
+ 13,
12
+ 0.5,
13
+ true
14
+ ],
15
+ [
16
+ 24,
17
+ 0.5,
18
+ false
19
+ ]
20
+ ],
21
+ "label": 0,
22
+ "accuracy": 98.61,
23
+ "support": 48
24
+ },
25
+ {
26
+ "rule": [
27
+ [
28
+ 7,
29
+ 0.5,
30
+ true
31
+ ],
32
+ [
33
+ 19,
34
+ 0.5,
35
+ false
36
+ ],
37
+ [
38
+ 12,
39
+ 0.5,
40
+ true
41
+ ]
42
+ ],
43
+ "label": 0,
44
+ "accuracy": 96.67,
45
+ "support": 19
46
+ },
47
+ {
48
+ "rule": [
49
+ [
50
+ 18,
51
+ 0.5,
52
+ false
53
+ ],
54
+ [
55
+ 7,
56
+ 0.5,
57
+ false
58
+ ],
59
+ [
60
+ 25,
61
+ 1.5,
62
+ false
63
+ ]
64
+ ],
65
+ "label": 1,
66
+ "accuracy": 89.19,
67
+ "support": 25
68
+ },
69
+ {
70
+ "rule": [
71
+ [
72
+ 15,
73
+ 0.5,
74
+ true
75
+ ],
76
+ [
77
+ 5,
78
+ 0.5,
79
+ true
80
+ ],
81
+ [
82
+ 3,
83
+ 0.5,
84
+ false
85
+ ]
86
+ ],
87
+ "label": 0,
88
+ "accuracy": 74.14,
89
+ "support": 74
90
+ },
91
+ {
92
+ "rule": [
93
+ [
94
+ 25,
95
+ 1.5,
96
+ true
97
+ ],
98
+ [
99
+ 23,
100
+ 0.5,
101
+ false
102
+ ],
103
+ [
104
+ 12,
105
+ 0.5,
106
+ true
107
+ ]
108
+ ],
109
+ "label": 0,
110
+ "accuracy": 50.0,
111
+ "support": 95
112
+ },
113
+ {
114
+ "rule": [
115
+ [
116
+ 8,
117
+ 0.5,
118
+ false
119
+ ],
120
+ [
121
+ 25,
122
+ 1.5,
123
+ true
124
+ ],
125
+ [
126
+ 18,
127
+ 0.5,
128
+ true
129
+ ]
130
+ ],
131
+ "label": 0,
132
+ "accuracy": 74.86,
133
+ "support": 108
134
+ },
135
+ {
136
+ "rule": [
137
+ [
138
+ 13,
139
+ 0.5,
140
+ false
141
+ ],
142
+ [
143
+ 17,
144
+ 0.5,
145
+ false
146
+ ],
147
+ [
148
+ 6,
149
+ 1.5,
150
+ false
151
+ ]
152
+ ],
153
+ "label": 1,
154
+ "accuracy": 88.97,
155
+ "support": 188
156
+ },
157
+ {
158
+ "rule": [
159
+ [
160
+ 14,
161
+ 0.5,
162
+ false
163
+ ],
164
+ [
165
+ 17,
166
+ 0.5,
167
+ false
168
+ ],
169
+ [
170
+ 7,
171
+ 0.5,
172
+ true
173
+ ]
174
+ ],
175
+ "label": 0,
176
+ "accuracy": 91.69,
177
+ "support": 199
178
+ },
179
+ {
180
+ "rule": [
181
+ [
182
+ 6,
183
+ 1.5,
184
+ true
185
+ ],
186
+ [
187
+ 14,
188
+ 0.5,
189
+ false
190
+ ],
191
+ [
192
+ 25,
193
+ 1.5,
194
+ false
195
+ ]
196
+ ],
197
+ "label": 1,
198
+ "accuracy": 57.03,
199
+ "support": 163
200
+ },
201
+ {
202
+ "rule": [
203
+ [
204
+ 5,
205
+ 0.5,
206
+ true
207
+ ],
208
+ [
209
+ 7,
210
+ 0.5,
211
+ false
212
+ ],
213
+ [
214
+ 6,
215
+ 1.5,
216
+ true
217
+ ]
218
+ ],
219
+ "label": 1,
220
+ "accuracy": 56.23,
221
+ "support": 226
222
+ },
223
+ {
224
+ "rule": [
225
+ [
226
+ 13,
227
+ 0.5,
228
+ false
229
+ ],
230
+ [
231
+ 17,
232
+ 0.5,
233
+ false
234
+ ],
235
+ [
236
+ 23,
237
+ 0.5,
238
+ true
239
+ ]
240
+ ],
241
+ "label": 1,
242
+ "accuracy": 65.52,
243
+ "support": 182
244
+ },
245
+ {
246
+ "rule": [
247
+ [
248
+ 14,
249
+ 0.5,
250
+ false
251
+ ],
252
+ [
253
+ 8,
254
+ 0.5,
255
+ true
256
+ ],
257
+ [
258
+ 13,
259
+ 0.5,
260
+ false
261
+ ]
262
+ ],
263
+ "label": 1,
264
+ "accuracy": 72.7,
265
+ "support": 239
266
+ },
267
+ {
268
+ "rule": [
269
+ [
270
+ 13,
271
+ 0.5,
272
+ true
273
+ ],
274
+ [
275
+ 28,
276
+ 0.5,
277
+ true
278
+ ],
279
+ [
280
+ 7,
281
+ 0.5,
282
+ true
283
+ ]
284
+ ],
285
+ "label": 0,
286
+ "accuracy": 100.0,
287
+ "support": 65
288
+ },
289
+ {
290
+ "rule": [
291
+ [
292
+ 5,
293
+ 0.5,
294
+ true
295
+ ],
296
+ [
297
+ 14,
298
+ 0.5,
299
+ true
300
+ ],
301
+ [
302
+ 4,
303
+ 0.5,
304
+ true
305
+ ]
306
+ ],
307
+ "label": 0,
308
+ "accuracy": 77.19,
309
+ "support": 34
310
+ },
311
+ {
312
+ "rule": [
313
+ [
314
+ 5,
315
+ 0.5,
316
+ true
317
+ ],
318
+ [
319
+ 28,
320
+ 1.5,
321
+ true
322
+ ],
323
+ [
324
+ 25,
325
+ 1.5,
326
+ true
327
+ ]
328
+ ],
329
+ "label": 0,
330
+ "accuracy": 75.35,
331
+ "support": 327
332
+ }
333
+ ],
334
+ "rules_description": [
335
+ "if (SSLfinal_State > 0.5) and (URL_of_Anchor <= 0.5) and (DNSRecord > 0.5) then class: -1 (proba: 98.61%) | based on 48 samples",
336
+ "if (SSLfinal_State <= 0.5) and (on_mouseover > 0.5) and (Request_URL <= 0.5) then class: -1 (proba: 96.67%) | based on 19 samples",
337
+ "if (Redirect > 0.5) and (SSLfinal_State > 0.5) and (web_traffic > 1.5) then class: 1 (proba: 89.19%) | based on 25 samples",
338
+ "if (SFH <= 0.5) and (Prefix_Suffix <= 0.5) and (having_At_Symbol > 0.5) then class: -1 (proba: 74.14%) | based on 74 samples",
339
+ "if (web_traffic <= 1.5) and (age_of_domain > 0.5) and (Request_URL <= 0.5) then class: -1 (proba: 50.0%) | based on 95 samples",
340
+ "if (Domain_registeration_length > 0.5) and (web_traffic <= 1.5) and (Redirect <= 0.5) then class: -1 (proba: 74.86%) | based on 108 samples",
341
+ "if (URL_of_Anchor > 0.5) and (Abnormal_URL > 0.5) and (having_Sub_Domain > 1.5) then class: 1 (proba: 88.97%) | based on 188 samples",
342
+ "if (Links_in_tags > 0.5) and (Abnormal_URL > 0.5) and (SSLfinal_State <= 0.5) then class: -1 (proba: 91.69%) | based on 199 samples",
343
+ "if (having_Sub_Domain <= 1.5) and (Links_in_tags > 0.5) and (web_traffic > 1.5) then class: 1 (proba: 57.03%) | based on 163 samples",
344
+ "if (Prefix_Suffix <= 0.5) and (SSLfinal_State > 0.5) and (having_Sub_Domain <= 1.5) then class: 1 (proba: 56.23%) | based on 226 samples",
345
+ "if (URL_of_Anchor > 0.5) and (Abnormal_URL > 0.5) and (age_of_domain <= 0.5) then class: 1 (proba: 65.52%) | based on 182 samples",
346
+ "if (Links_in_tags > 0.5) and (Domain_registeration_length <= 0.5) and (URL_of_Anchor > 0.5) then class: 1 (proba: 72.7%) | based on 239 samples",
347
+ "if (URL_of_Anchor <= 0.5) and (Links_pointing_to_page <= 0.5) and (SSLfinal_State <= 0.5) then class: -1 (proba: 100.0%) | based on 65 samples",
348
+ "if (Prefix_Suffix <= 0.5) and (Links_in_tags <= 0.5) and (double_slash_redirecting <= 0.5) then class: -1 (proba: 77.19%) | based on 34 samples",
349
+ "if (Prefix_Suffix <= 0.5) and (Links_pointing_to_page <= 1.5) and (web_traffic <= 1.5) then class: -1 (proba: 75.35%) | based on 327 samples"
350
+ ],
351
+ "config": {
352
+ "n_trees": 15,
353
+ "max_depth": 3,
354
+ "max_features": 4,
355
+ "used_data": 0.1
356
+ }
357
+ }
classification/PhishingWebsites/test.json ADDED
The diff for this file is too large to render. See raw diff
 
classification/PhishingWebsites/train.json ADDED
The diff for this file is too large to render. See raw diff
 
classification/PhishingWebsites/valid.json ADDED
The diff for this file is too large to render. See raw diff
 
classification/agnews/label.json ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ {
2
+ "0": "World",
3
+ "1": "Sports",
4
+ "2": "Business",
5
+ "3": "Sci/Tech"
6
+ }
classification/agnews/readme.txt ADDED
@@ -0,0 +1,87 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Agnews Topic classification dataset
2
+
3
+ https://github.com/weakrules/Denoise-multi-weak-sources/blob/master/rules-noisy-labels/Agnews/angews_rule.py
4
+
5
+
6
+ # Labels
7
+ "0": "World",
8
+ "1": "Sports",
9
+ "2": "Business",
10
+ "3": "Sci/Tech"
11
+
12
+
13
+
14
+
15
+
16
+
17
+
18
+ # Labeling functions (all 9 lf are keyword lf)
19
+
20
+ ## LF1 0: world
21
+
22
+ r1 = ["atomic", "captives", "baghdad", "israeli", "iraqis", "iranian", "afghanistan", "wounding", "terrorism", "soldiers", \
23
+ "palestinians", "palestinian", "policemen", "iraqi", "terrorist", 'north korea', 'korea', \
24
+ 'israel', 'u.n.', 'egypt', 'iran', 'iraq', 'nato', 'armed', 'peace']
25
+
26
+
27
+ ## LF2 0: world
28
+
29
+ r2= [' war ', 'prime minister', 'president', 'commander', 'minister', 'annan', "military", "militant", "kill", 'operator']
30
+
31
+
32
+
33
+
34
+ ## LF3 1: sports
35
+
36
+ r3 = ["goals", "bledsoe", "coaches", "touchdowns", "kansas", "rankings", "no.", \
37
+ "champ", "cricketers", "hockey", "champions", "quarterback", 'club', 'team', 'baseball', 'basketball', 'soccer', 'football', 'boxing', 'swimming', \
38
+ 'world cup', 'nba',"olympics","final", "finals", 'fifa', 'racist', 'racism']
39
+
40
+
41
+
42
+ ## LF4 1: sports
43
+
44
+ r4 = ['athlete', 'striker', 'defender', 'goalkeeper', 'midfielder', 'shooting guard', 'power forward', 'point guard', 'pitcher', 'catcher', 'first base', 'second base', 'third base','shortstop','fielder']
45
+
46
+
47
+
48
+
49
+ ## LF5 1: sports
50
+
51
+ r5=['lakers','chelsea', 'piston','cavaliers', 'rockets', 'clippers','ronaldo', \
52
+ 'celtics', 'hawks','76ers', 'raptors', 'pacers', 'suns', 'warriors','blazers','knicks','timberwolves', 'hornets', 'wizards', 'nuggets', 'mavericks', 'grizzlies', 'spurs', \
53
+ 'cowboys', 'redskins', 'falcons', 'panthers', 'eagles', 'saints', 'buccaneers', '49ers', 'cardinals', 'texans', 'seahawks', 'vikings', 'patriots', 'colts', 'jaguars', 'raiders', 'chargers', 'bengals', 'steelers', 'browns', \
54
+ 'braves','marlins','mets','phillies','cubs','brewers','cardinals', 'diamondbacks','rockies', 'dodgers', 'padres', 'orioles', 'sox', 'yankees', 'jays', 'sox', 'indians', 'tigers', 'royals', 'twins','astros', 'angels', 'athletics', 'mariners', 'rangers', \
55
+ 'arsenal', 'burnley', 'newcastle', 'leicester', 'manchester united', 'everton', 'southampton', 'hotspur','tottenham', 'fulham', 'watford', 'sheffield','crystal palace', 'derby', 'charlton', 'aston villa', 'blackburn', 'west ham', 'birmingham city', 'middlesbrough', \
56
+ 'real madrid', 'barcelona', 'villarreal', 'valencia', 'betis', 'espanyol','levante', 'sevilla', 'juventus', 'inter milan', 'ac milan', 'as roma', 'benfica', 'porto', 'getafe', 'bayern', 'schalke', 'bremen', 'lyon', 'paris saint', 'monaco', 'dynamo']
57
+
58
+
59
+
60
+
61
+ ## LF6 3: tech
62
+
63
+ r6 = ["technology", "engineering", "science", "research", "cpu", "windows", "unix", "system", 'computing', 'compute']#, "wireless","chip", "pc", ]
64
+
65
+
66
+
67
+
68
+ ## LF7 3: tech
69
+
70
+ r7= ["google", "apple", "microsoft", "nasa", "yahoo", "intel", "dell", \
71
+ 'huawei',"ibm", "siemens", "nokia", "samsung", 'panasonic', \
72
+ 't-mobile', 'nvidia', 'adobe', 'salesforce', 'linkedin', 'silicon', 'wiki'
73
+ ]
74
+
75
+
76
+
77
+
78
+ ## LF8 - 2:business
79
+
80
+ r8= ["stock", "account", "financ", "goods", "retail", 'economy', 'chairman', 'bank', 'deposit', 'economic', 'dow jones', 'index', '$', 'percent', 'interest rate', 'growth', 'profit', 'tax', 'loan', 'credit', 'invest']
81
+
82
+
83
+
84
+
85
+ ## LF9 - 2:business
86
+
87
+ r9= ["delta", "cola", "toyota", "costco", "gucci", 'citibank', 'airlines']
classification/agnews/test.json ADDED
The diff for this file is too large to render. See raw diff
 
classification/agnews/train.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6aa806387c07ad0ee53d3ecebcc47dc0c22c2961cd34e248656d32441ba079a3
3
+ size 28043077
classification/agnews/valid.json ADDED
The diff for this file is too large to render. See raw diff
 
classification/bank-marketing/label.json ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ {
2
+ "0": "1",
3
+ "1": "2"
4
+ }
classification/bank-marketing/rules.json ADDED
@@ -0,0 +1,472 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "rules": [
3
+ {
4
+ "rule": [
5
+ [
6
+ 15,
7
+ 2.5,
8
+ false
9
+ ],
10
+ [
11
+ 5,
12
+ 21726.0,
13
+ false
14
+ ],
15
+ [
16
+ 6,
17
+ 0.5,
18
+ true
19
+ ]
20
+ ],
21
+ "label": 0,
22
+ "accuracy": 100.0,
23
+ "support": 1
24
+ },
25
+ {
26
+ "rule": [
27
+ [
28
+ 15,
29
+ 2.5,
30
+ false
31
+ ],
32
+ [
33
+ 13,
34
+ 313.0,
35
+ false
36
+ ],
37
+ [
38
+ 11,
39
+ 50.5,
40
+ true
41
+ ]
42
+ ],
43
+ "label": 0,
44
+ "accuracy": 100.0,
45
+ "support": 2
46
+ },
47
+ {
48
+ "rule": [
49
+ [
50
+ 11,
51
+ 215.5,
52
+ true
53
+ ],
54
+ [
55
+ 14,
56
+ 0.5,
57
+ false
58
+ ],
59
+ [
60
+ 15,
61
+ 2.5,
62
+ false
63
+ ]
64
+ ],
65
+ "label": 1,
66
+ "accuracy": 84.98,
67
+ "support": 171
68
+ },
69
+ {
70
+ "rule": [
71
+ [
72
+ 8,
73
+ 0.5,
74
+ true
75
+ ],
76
+ [
77
+ 10,
78
+ 2.5,
79
+ false
80
+ ],
81
+ [
82
+ 10,
83
+ 4.5,
84
+ false
85
+ ]
86
+ ],
87
+ "label": 0,
88
+ "accuracy": 58.14,
89
+ "support": 26
90
+ },
91
+ {
92
+ "rule": [
93
+ [
94
+ 14,
95
+ 0.5,
96
+ false
97
+ ],
98
+ [
99
+ 11,
100
+ 158.5,
101
+ false
102
+ ],
103
+ [
104
+ 15,
105
+ 2.5,
106
+ false
107
+ ]
108
+ ],
109
+ "label": 1,
110
+ "accuracy": 95.21,
111
+ "support": 504
112
+ },
113
+ {
114
+ "rule": [
115
+ [
116
+ 0,
117
+ 60.5,
118
+ true
119
+ ],
120
+ [
121
+ 15,
122
+ 2.5,
123
+ false
124
+ ],
125
+ [
126
+ 11,
127
+ 119.5,
128
+ false
129
+ ]
130
+ ],
131
+ "label": 1,
132
+ "accuracy": 95.07,
133
+ "support": 463
134
+ },
135
+ {
136
+ "rule": [
137
+ [
138
+ 5,
139
+ 103.5,
140
+ true
141
+ ],
142
+ [
143
+ 14,
144
+ 0.5,
145
+ false
146
+ ],
147
+ [
148
+ 11,
149
+ 199.5,
150
+ true
151
+ ]
152
+ ],
153
+ "label": 0,
154
+ "accuracy": 71.9,
155
+ "support": 83
156
+ },
157
+ {
158
+ "rule": [
159
+ [
160
+ 13,
161
+ 9.0,
162
+ false
163
+ ],
164
+ [
165
+ 11,
166
+ 159.5,
167
+ true
168
+ ],
169
+ [
170
+ 6,
171
+ 0.5,
172
+ true
173
+ ]
174
+ ],
175
+ "label": 0,
176
+ "accuracy": 83.72,
177
+ "support": 166
178
+ },
179
+ {
180
+ "rule": [
181
+ [
182
+ 11,
183
+ 208.5,
184
+ false
185
+ ],
186
+ [
187
+ 14,
188
+ 0.5,
189
+ false
190
+ ],
191
+ [
192
+ 15,
193
+ 2.5,
194
+ true
195
+ ]
196
+ ],
197
+ "label": 1,
198
+ "accuracy": 74.56,
199
+ "support": 539
200
+ },
201
+ {
202
+ "rule": [
203
+ [
204
+ 8,
205
+ 0.5,
206
+ true
207
+ ],
208
+ [
209
+ 3,
210
+ 1.5,
211
+ false
212
+ ],
213
+ [
214
+ 10,
215
+ 3.0,
216
+ false
217
+ ]
218
+ ],
219
+ "label": 1,
220
+ "accuracy": 77.78,
221
+ "support": 7
222
+ },
223
+ {
224
+ "rule": [
225
+ [
226
+ 0,
227
+ 60.5,
228
+ false
229
+ ],
230
+ [
231
+ 8,
232
+ 0.5,
233
+ false
234
+ ],
235
+ [
236
+ 8,
237
+ 1.5,
238
+ false
239
+ ]
240
+ ],
241
+ "label": 1,
242
+ "accuracy": 80.99,
243
+ "support": 96
244
+ },
245
+ {
246
+ "rule": [
247
+ [
248
+ 15,
249
+ 2.5,
250
+ true
251
+ ],
252
+ [
253
+ 11,
254
+ 362.5,
255
+ false
256
+ ],
257
+ [
258
+ 0,
259
+ 60.5,
260
+ false
261
+ ]
262
+ ],
263
+ "label": 1,
264
+ "accuracy": 91.23,
265
+ "support": 99
266
+ },
267
+ {
268
+ "rule": [
269
+ [
270
+ 5,
271
+ 930.5,
272
+ false
273
+ ],
274
+ [
275
+ 15,
276
+ 2.5,
277
+ true
278
+ ],
279
+ [
280
+ 8,
281
+ 0.5,
282
+ false
283
+ ]
284
+ ],
285
+ "label": 1,
286
+ "accuracy": 59.69,
287
+ "support": 1552
288
+ },
289
+ {
290
+ "rule": [
291
+ [
292
+ 7,
293
+ 0.5,
294
+ false
295
+ ],
296
+ [
297
+ 11,
298
+ 526.5,
299
+ false
300
+ ],
301
+ [
302
+ 2,
303
+ 0.5,
304
+ false
305
+ ]
306
+ ],
307
+ "label": 1,
308
+ "accuracy": 88.89,
309
+ "support": 86
310
+ },
311
+ {
312
+ "rule": [
313
+ [
314
+ 14,
315
+ 0.5,
316
+ true
317
+ ],
318
+ [
319
+ 11,
320
+ 360.5,
321
+ true
322
+ ],
323
+ [
324
+ 0,
325
+ 60.5,
326
+ false
327
+ ]
328
+ ],
329
+ "label": 1,
330
+ "accuracy": 76.88,
331
+ "support": 111
332
+ },
333
+ {
334
+ "rule": [
335
+ [
336
+ 13,
337
+ 4.0,
338
+ true
339
+ ],
340
+ [
341
+ 5,
342
+ 236.5,
343
+ false
344
+ ],
345
+ [
346
+ 11,
347
+ 393.5,
348
+ false
349
+ ]
350
+ ],
351
+ "label": 1,
352
+ "accuracy": 80.3,
353
+ "support": 1018
354
+ },
355
+ {
356
+ "rule": [
357
+ [
358
+ 14,
359
+ 0.5,
360
+ true
361
+ ],
362
+ [
363
+ 5,
364
+ 77.5,
365
+ false
366
+ ],
367
+ [
368
+ 11,
369
+ 322.5,
370
+ false
371
+ ]
372
+ ],
373
+ "label": 1,
374
+ "accuracy": 76.77,
375
+ "support": 1400
376
+ },
377
+ {
378
+ "rule": [
379
+ [
380
+ 15,
381
+ 2.5,
382
+ true
383
+ ],
384
+ [
385
+ 8,
386
+ 0.5,
387
+ true
388
+ ],
389
+ [
390
+ 5,
391
+ 414.5,
392
+ true
393
+ ]
394
+ ],
395
+ "label": 0,
396
+ "accuracy": 80.94,
397
+ "support": 629
398
+ },
399
+ {
400
+ "rule": [
401
+ [
402
+ 15,
403
+ 2.5,
404
+ true
405
+ ],
406
+ [
407
+ 11,
408
+ 365.5,
409
+ true
410
+ ],
411
+ [
412
+ 10,
413
+ 7.5,
414
+ true
415
+ ]
416
+ ],
417
+ "label": 0,
418
+ "accuracy": 81.56,
419
+ "support": 2616
420
+ },
421
+ {
422
+ "rule": [
423
+ [
424
+ 8,
425
+ 0.5,
426
+ true
427
+ ],
428
+ [
429
+ 11,
430
+ 472.5,
431
+ true
432
+ ],
433
+ [
434
+ 15,
435
+ 0.5,
436
+ true
437
+ ]
438
+ ],
439
+ "label": 0,
440
+ "accuracy": 95.6,
441
+ "support": 853
442
+ }
443
+ ],
444
+ "rules_description": [
445
+ "if (V16 > 2.5) and (V6 > 21726.0) and (V7 <= 0.5) then class: 1 (proba: 100.0%) | based on 1 samples",
446
+ "if (V16 > 2.5) and (V14 > 313.0) and (V12 <= 50.5) then class: 1 (proba: 100.0%) | based on 2 samples",
447
+ "if (V12 <= 215.5) and (V15 > 0.5) and (V16 > 2.5) then class: 2 (proba: 84.98%) | based on 171 samples",
448
+ "if (V9 <= 0.5) and (V11 > 2.5) and (V11 > 4.5) then class: 1 (proba: 58.14%) | based on 26 samples",
449
+ "if (V15 > 0.5) and (V12 > 158.5) and (V16 > 2.5) then class: 2 (proba: 95.21%) | based on 504 samples",
450
+ "if (V1 <= 60.5) and (V16 > 2.5) and (V12 > 119.5) then class: 2 (proba: 95.07%) | based on 463 samples",
451
+ "if (V6 <= 103.5) and (V15 > 0.5) and (V12 <= 199.5) then class: 1 (proba: 71.9%) | based on 83 samples",
452
+ "if (V14 > 9.0) and (V12 <= 159.5) and (V7 <= 0.5) then class: 1 (proba: 83.72%) | based on 166 samples",
453
+ "if (V12 > 208.5) and (V15 > 0.5) and (V16 <= 2.5) then class: 2 (proba: 74.56%) | based on 539 samples",
454
+ "if (V9 <= 0.5) and (V4 > 1.5) and (V11 > 3.0) then class: 2 (proba: 77.78%) | based on 7 samples",
455
+ "if (V1 > 60.5) and (V9 > 0.5) and (V9 > 1.5) then class: 2 (proba: 80.99%) | based on 96 samples",
456
+ "if (V16 <= 2.5) and (V12 > 362.5) and (V1 > 60.5) then class: 2 (proba: 91.23%) | based on 99 samples",
457
+ "if (V6 > 930.5) and (V16 <= 2.5) and (V9 > 0.5) then class: 2 (proba: 59.69%) | based on 1,552 samples",
458
+ "if (V8 > 0.5) and (V12 > 526.5) and (V3 > 0.5) then class: 2 (proba: 88.89%) | based on 86 samples",
459
+ "if (V15 <= 0.5) and (V12 <= 360.5) and (V1 > 60.5) then class: 2 (proba: 76.88%) | based on 111 samples",
460
+ "if (V14 <= 4.0) and (V6 > 236.5) and (V12 > 393.5) then class: 2 (proba: 80.3%) | based on 1,018 samples",
461
+ "if (V15 <= 0.5) and (V6 > 77.5) and (V12 > 322.5) then class: 2 (proba: 76.77%) | based on 1,400 samples",
462
+ "if (V16 <= 2.5) and (V9 <= 0.5) and (V6 <= 414.5) then class: 1 (proba: 80.94%) | based on 629 samples",
463
+ "if (V16 <= 2.5) and (V12 <= 365.5) and (V11 <= 7.5) then class: 1 (proba: 81.56%) | based on 2,616 samples",
464
+ "if (V9 <= 0.5) and (V12 <= 472.5) and (V16 <= 0.5) then class: 1 (proba: 95.6%) | based on 853 samples"
465
+ ],
466
+ "config": {
467
+ "n_trees": 20,
468
+ "max_depth": 3,
469
+ "max_features": 4,
470
+ "used_data": 0.2
471
+ }
472
+ }
classification/bank-marketing/test.json ADDED
The diff for this file is too large to render. See raw diff
 
classification/bank-marketing/train.json ADDED
The diff for this file is too large to render. See raw diff
 
classification/bank-marketing/valid.json ADDED
The diff for this file is too large to render. See raw diff
 
classification/basketball/label.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"0": 0, "1": 1}
classification/basketball/readme.txt ADDED
@@ -0,0 +1,18 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Basketball - A Video Dataset for Activity Recognition in the Basketball Game
2
+
3
+ # Source:
4
+
5
+ D. Y. Fu, M. F. Chen, F. Sala, S. M. Hooper, K. Fatahalian, and C. Ré. Fast and three-rious: Speeding up weak supervision with triplet methods. In ICML, pages 3280–3291, 2020.
6
+
7
+
8
+ # Labels:
9
+
10
+ 0: negative (the game is not basketball)
11
+
12
+ 1: positive (the game is basketball)
13
+
14
+
15
+
16
+ 4 Labeling functions
17
+
18
+ LFs: these sources rely on an off-the-shelf object detector to detect balls or people, and use heuristics based on the average pixel of the detected ball or distance between the ball and person to determine whether the sport being played is basketball or not.
classification/basketball/test.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ef43d2a39e7d08de16b0c95cbeeef9f10e59600c3e7e1d2a2f6c36be974bc685
3
+ size 52411265
classification/basketball/train.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:af2bf8f5d7fc78a50edbf8ad16fc18aa86ba5f8d57467cffe5c43b90f4920683
3
+ size 771265380
classification/basketball/valid.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:84cd4595908e00030bd6132269c6f10e5d1ee9f118c48b4e7150f91bfb18bc36
3
+ size 45661535
classification/cdr/label.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"0": "Negative", "1": "Positive"}
classification/cdr/readme.txt ADDED
@@ -0,0 +1,286 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ CDR - Extracting Chemical-Disease Relations from Academic Literature
2
+
3
+ # Source:
4
+ https://github.com/snorkel-team/snorkel-extraction/tree/master/tutorials/cdr
5
+
6
+ # Labels:
7
+
8
+ 0: Negative, the drug does NOT induce the disease
9
+ 1: Positive, the drug induces the disease
10
+
11
+
12
+
13
+ 33 Label functions (Use ctrl+F to search for implementation)
14
+ LFs = [
15
+ LF_c_cause_d,
16
+ LF_c_d,
17
+ LF_c_induced_d,
18
+ LF_c_treat_d,
19
+ LF_c_treat_d_wide,
20
+ LF_closer_chem,
21
+ LF_closer_dis,
22
+ LF_ctd_marker_c_d,
23
+ LF_ctd_marker_induce,
24
+ LF_ctd_therapy_treat,
25
+ LF_ctd_unspecified_treat,
26
+ LF_ctd_unspecified_induce,
27
+ LF_d_following_c,
28
+ LF_d_induced_by_c,
29
+ LF_d_induced_by_c_tight,
30
+ LF_d_treat_c,
31
+ LF_develop_d_following_c,
32
+ LF_far_c_d,
33
+ LF_far_d_c,
34
+ LF_improve_before_disease,
35
+ LF_in_ctd_therapy,
36
+ LF_in_ctd_marker,
37
+ LF_in_patient_with,
38
+ LF_induce,
39
+ LF_induce_name,
40
+ LF_induced_other,
41
+ LF_level,
42
+ LF_measure,
43
+ LF_neg_d,
44
+ LF_risk_d,
45
+ LF_treat_d,
46
+ LF_uncertain,
47
+ LF_weak_assertions,
48
+ ]
49
+
50
+
51
+ ##### Distant supervision approaches
52
+ # We'll use the [Comparative Toxicogenomics Database](http://ctdbase.org/) (CTD) for distant supervision.
53
+ # The CTD lists chemical-condition entity pairs under three categories: therapy, marker, and unspecified.
54
+ # Therapy means the chemical treats the condition, marker means the chemical is typically present with the condition,
55
+ # and unspecified is...unspecified. We can write LFs based on these categories.
56
+
57
+ ### LF_in_ctd_unspecified
58
+ def LF_in_ctd_unspecified(c):
59
+ return -1 * cand_in_ctd_unspecified(c)
60
+
61
+ ### LF_in_ctd_therapy
62
+ def LF_in_ctd_therapy(c):
63
+ return -1 * cand_in_ctd_therapy(c)
64
+
65
+ ### LF_in_ctd_marker
66
+ def LF_in_ctd_marker(c):
67
+ return cand_in_ctd_marker(c)
68
+
69
+
70
+
71
+
72
+
73
+ ##### Text pattern approaches
74
+ # Now we'll use some LF helpers to create LFs based on indicative text patterns.
75
+ # We came up with these rules by using the viewer to examine training candidates and noting frequent patterns.
76
+
77
+ import re
78
+ from snorkel.lf_helpers import (
79
+ get_tagged_text,
80
+ rule_regex_search_tagged_text,
81
+ rule_regex_search_btw_AB,
82
+ rule_regex_search_btw_BA,
83
+ rule_regex_search_before_A,
84
+ rule_regex_search_before_B,
85
+ )
86
+
87
+ # List to parenthetical
88
+ def ltp(x):
89
+ return '(' + '|'.join(x) + ')'
90
+
91
+ ### LF_induce
92
+ def LF_induce(c):
93
+ return 1 if re.search(r'{{A}}.{0,20}induc.{0,20}{{B}}', get_tagged_text(c), flags=re.I) else 0
94
+
95
+ ### LF_d_induced_by_c
96
+ causal_past = ['induced', 'caused', 'due']
97
+ def LF_d_induced_by_c(c):
98
+ return rule_regex_search_btw_BA(c, '.{0,50}' + ltp(causal_past) + '.{0,9}(by|to).{0,50}', 1)
99
+
100
+ ### LF_d_induced_by_c_tight
101
+ def LF_d_induced_by_c_tight(c):
102
+ return rule_regex_search_btw_BA(c, '.{0,50}' + ltp(causal_past) + ' (by|to) ', 1)
103
+
104
+ ### LF_induce_name
105
+ def LF_induce_name(c):
106
+ return 1 if 'induc' in c.chemical.get_span().lower() else 0
107
+
108
+ ### LF_c_cause_d
109
+ causal = ['cause[sd]?', 'induce[sd]?', 'associated with']
110
+ def LF_c_cause_d(c):
111
+ return 1 if (
112
+ re.search(r'{{A}}.{0,50} ' + ltp(causal) + '.{0,50}{{B}}', get_tagged_text(c), re.I)
113
+ and not re.search('{{A}}.{0,50}(not|no).{0,20}' + ltp(causal) + '.{0,50}{{B}}', get_tagged_text(c), re.I)
114
+ ) else 0
115
+
116
+ ### LF_d_treat_c
117
+ treat = ['treat', 'effective', 'prevent', 'resistant', 'slow', 'promise', 'therap']
118
+ def LF_d_treat_c(c):
119
+ return rule_regex_search_btw_BA(c, '.{0,50}' + ltp(treat) + '.{0,50}', -1)
120
+
121
+ ### LF_c_treat_d
122
+ def LF_c_treat_d(c):
123
+ return rule_regex_search_btw_AB(c, '.{0,50}' + ltp(treat) + '.{0,50}', -1)
124
+
125
+ ### LF_treat_d
126
+ def LF_treat_d(c):
127
+ return rule_regex_search_before_B(c, ltp(treat) + '.{0,50}', -1)
128
+
129
+ ### LF_c_treat_d_wide
130
+ def LF_c_treat_d_wide(c):
131
+ return rule_regex_search_btw_AB(c, '.{0,200}' + ltp(treat) + '.{0,200}', -1)
132
+
133
+ ### LF_c_d
134
+ def LF_c_d(c):
135
+ return 1 if ('{{A}} {{B}}' in get_tagged_text(c)) else 0
136
+
137
+ ### LF_c_induced_d
138
+ def LF_c_induced_d(c):
139
+ return 1 if (
140
+ ('{{A}} {{B}}' in get_tagged_text(c)) and
141
+ (('-induc' in c[0].get_span().lower()) or ('-assoc' in c[0].get_span().lower()))
142
+ ) else 0
143
+
144
+ ### LF_improve_before_disease
145
+ def LF_improve_before_disease(c):
146
+ return rule_regex_search_before_B(c, 'improv.*', -1)
147
+
148
+ ### LF_in_patient_with
149
+ pat_terms = ['in a patient with ', 'in patients with']
150
+ def LF_in_patient_with(c):
151
+ return -1 if re.search(ltp(pat_terms) + '{{B}}', get_tagged_text(c), flags=re.I) else 0
152
+
153
+ ### LF_uncertain
154
+ uncertain = ['combin', 'possible', 'unlikely']
155
+ def LF_uncertain(c):
156
+ return rule_regex_search_before_A(c, ltp(uncertain) + '.*', -1)
157
+
158
+ ### LF_induced_other
159
+ def LF_induced_other(c):
160
+ return rule_regex_search_tagged_text(c, '{{A}}.{20,1000}-induced {{B}}', -1)
161
+
162
+ ### LF_far_c_d
163
+ def LF_far_c_d(c):
164
+ return rule_regex_search_btw_AB(c, '.{100,5000}', -1)
165
+
166
+ ### LF_far_d_c
167
+ def LF_far_d_c(c):
168
+ return rule_regex_search_btw_BA(c, '.{100,5000}', -1)
169
+
170
+ ### LF_risk_d
171
+ def LF_risk_d(c):
172
+ return rule_regex_search_before_B(c, 'risk of ', 1)
173
+
174
+ ### LF_develop_d_following_c
175
+ def LF_develop_d_following_c(c):
176
+ return 1 if re.search(r'develop.{0,25}{{B}}.{0,25}following.{0,25}{{A}}', get_tagged_text(c), flags=re.I) else 0
177
+
178
+ ### LF_d_following_c
179
+ procedure, following = ['inject', 'administrat'], ['following']
180
+ def LF_d_following_c(c):
181
+ return 1 if re.search('{{B}}.{0,50}' + ltp(following) + '.{0,20}{{A}}.{0,50}' + ltp(procedure), get_tagged_text(c), flags=re.I) else 0
182
+
183
+ ### LF_measure
184
+ def LF_measure(c):
185
+ return -1 if re.search('measur.{0,75}{{A}}', get_tagged_text(c), flags=re.I) else 0
186
+
187
+ ### LF_level
188
+ def LF_level(c):
189
+ return -1 if re.search('{{A}}.{0,25} level', get_tagged_text(c), flags=re.I) else 0
190
+
191
+ ### LF_neg_d
192
+ def LF_neg_d(c):
193
+ return -1 if re.search('(none|not|no) .{0,25}{{B}}', get_tagged_text(c), flags=re.I) else 0
194
+
195
+ ### LF_weak_assertions
196
+ WEAK_PHRASES = ['none', 'although', 'was carried out', 'was conducted',
197
+ 'seems', 'suggests', 'risk', 'implicated',
198
+ 'the aim', 'to (investigate|assess|study)']
199
+
200
+ WEAK_RGX = r'|'.join(WEAK_PHRASES)
201
+ def LF_weak_assertions(c):
202
+ return -1 if re.search(WEAK_RGX, get_tagged_text(c), flags=re.I) else 0
203
+
204
+
205
+
206
+
207
+
208
+
209
+ ##### Composite LFs
210
+
211
+ # The following LFs take some of the strongest distant supervision and text pattern LFs,
212
+ # and combine them to form more specific LFs. These LFs introduce some obvious
213
+ # dependencies within the LF set, which we will model later.
214
+
215
+ ### LF_ctd_marker_c_d
216
+ def LF_ctd_marker_c_d(c):
217
+ return LF_c_d(c) * cand_in_ctd_marker(c)
218
+
219
+ ### LF_ctd_marker_induce
220
+ def LF_ctd_marker_induce(c):
221
+ return (LF_c_induced_d(c) or LF_d_induced_by_c_tight(c)) * cand_in_ctd_marker(c)
222
+
223
+ ### LF_ctd_therapy_treat
224
+ def LF_ctd_therapy_treat(c):
225
+ return LF_c_treat_d_wide(c) * cand_in_ctd_therapy(c)
226
+
227
+ ### LF_ctd_unspecified_treat
228
+ def LF_ctd_unspecified_treat(c):
229
+ return LF_c_treat_d_wide(c) * cand_in_ctd_unspecified(c)
230
+
231
+ ### LF_ctd_unspecified_induce
232
+ def LF_ctd_unspecified_induce(c):
233
+ return (LF_c_induced_d(c) or LF_d_induced_by_c_tight(c)) * cand_in_ctd_unspecified(c)
234
+
235
+
236
+
237
+
238
+
239
+
240
+ ##### Rules based on context hierarchy
241
+ # These last two rules will make use of the context hierarchy.
242
+ # The first checks if there is a chemical mention much closer to the candidate's disease mention
243
+ # than the candidate's chemical mention. The second does the analog for diseases.
244
+
245
+ ### LF_closer_chem
246
+ def LF_closer_chem(c):
247
+ # Get distance between chemical and disease
248
+ chem_start, chem_end = c.chemical.get_word_start(), c.chemical.get_word_end()
249
+ dis_start, dis_end = c.disease.get_word_start(), c.disease.get_word_end()
250
+ if dis_start < chem_start:
251
+ dist = chem_start - dis_end
252
+ else:
253
+ dist = dis_start - chem_end
254
+ # Try to find chemical closer than @dist/2 in either direction
255
+ sent = c.get_parent()
256
+ closest_other_chem = float('inf')
257
+ for i in range(dis_end, min(len(sent.words), dis_end + dist // 2)):
258
+ et, cid = sent.entity_types[i], sent.entity_cids[i]
259
+ if et == 'Chemical' and cid != sent.entity_cids[chem_start]:
260
+ return -1
261
+ for i in range(max(0, dis_start - dist // 2), dis_start):
262
+ et, cid = sent.entity_types[i], sent.entity_cids[i]
263
+ if et == 'Chemical' and cid != sent.entity_cids[chem_start]:
264
+ return -1
265
+ return 0
266
+
267
+ ### LF_closer_dis
268
+ def LF_closer_dis(c):
269
+ # Get distance between chemical and disease
270
+ chem_start, chem_end = c.chemical.get_word_start(), c.chemical.get_word_end()
271
+ dis_start, dis_end = c.disease.get_word_start(), c.disease.get_word_end()
272
+ if dis_start < chem_start:
273
+ dist = chem_start - dis_end
274
+ else:
275
+ dist = dis_start - chem_end
276
+ # Try to find chemical disease than @dist/8 in either direction
277
+ sent = c.get_parent()
278
+ for i in range(chem_end, min(len(sent.words), chem_end + dist // 8)):
279
+ et, cid = sent.entity_types[i], sent.entity_cids[i]
280
+ if et == 'Disease' and cid != sent.entity_cids[dis_start]:
281
+ return -1
282
+ for i in range(max(0, chem_start - dist // 8), chem_start):
283
+ et, cid = sent.entity_types[i], sent.entity_cids[i]
284
+ if et == 'Disease' and cid != sent.entity_cids[dis_start]:
285
+ return -1
286
+ return 0
classification/cdr/test.json ADDED
The diff for this file is too large to render. See raw diff
 
classification/cdr/train.json ADDED
The diff for this file is too large to render. See raw diff
 
classification/cdr/valid.json ADDED
The diff for this file is too large to render. See raw diff
 
classification/census/label.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"0": "negative", "1": "positive"}
classification/census/labeled_ids.json ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ {
2
+ "labeled_ids": ["10000", "10001", "10002", "10003", "10004", "10005", "10006", "10007", "10008", "10009", "10010", "10011", "10012", "10013", "10014", "10015", "10016", "10017", "10018", "10019", "10020", "10021", "10022", "10023", "10024", "10025", "10026", "10027", "10028", "10029", "10030", "10031", "10032", "10033", "10034", "10035", "10036", "10037", "10038", "10039", "10040", "10041", "10042", "10043", "10044", "10045", "10046", "10047", "10048", "10049", "10050", "10051", "10052", "10053", "10054", "10055", "10056", "10057", "10058", "10059", "10060", "10061", "10062", "10063", "10064", "10065", "10066", "10067", "10068", "10069", "10070", "10071", "10072", "10073", "10074", "10075", "10076", "10077", "10078", "10079", "10080", "10081", "10082"],
3
+ "lf_exemplar_ids": ["10000", "10001", "10002", "10003", "10004", "10005", "10006", "10007", "10008", "10009", "10010", "10011", "10012", "10013", "10014", "10015", "10016", "10017", "10018", "10019", "10020", "10021", "10022", "10023", "10024", "10025", "10026", "10027", "10028", "10029", "10030", "10031", "10032", "10033", "10034", "10035", "10036", "10037", "10038", "10039", "10040", "10041", "10042", "10043", "10044", "10045", "10046", "10047", "10048", "10049", "10050", "10051", "10052", "10053", "10054", "10055", "10056", "10057", "10058", "10059", "10060", "10061", "10062", "10063", "10064", "10065", "10066", "10067", "10068", "10069", "10070", "10071", "10072", "10073", "10074", "10075", "10076", "10077", "10078", "10079", "10080", "10081", "10082"]
4
+ }
classification/census/readme.txt ADDED
@@ -0,0 +1,20 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Census income classification
2
+
3
+ http://archive.ics.uci.edu/ml/datasets/Census+Income
4
+
5
+
6
+ Label:
7
+ Negative : person earns lees than or equal to 50K
8
+ Positive : person earns more than 50k
9
+
10
+ Rules:
11
+
12
+ For this case we created the rules synthetically as follows:
13
+ We hold out disjoint 16k random points from the training dataset as a proxy for human knowledge and extract a PART decision list from it as our set of rules. We retain only
14
+ those rules which fire on L.
15
+
16
+ https://mathematicaforprediction.wordpress.com/2014/03/30/classification-and-association-rules-for-census-income-data/
17
+
18
+
19
+
20
+
classification/census/test.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ad12ab08dd411302b2bb62e260882fba1a3fba9950755f4de39176ed25b6fa17
3
+ size 15846146
classification/census/train.json ADDED
The diff for this file is too large to render. See raw diff
 
classification/census/valid.json ADDED
The diff for this file is too large to render. See raw diff
 
classification/chemprot/label.json ADDED
@@ -0,0 +1,12 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "0": "Part of",
3
+ "1": "Regulator",
4
+ "2": "Upregulator",
5
+ "3": "Downregulator",
6
+ "4": "Agonist",
7
+ "5": "Antagonist",
8
+ "6": "Modulator",
9
+ "7": "Cofactor",
10
+ "8": "Substrate/Product",
11
+ "9": "NOT"
12
+ }
classification/chemprot/readme.txt ADDED
@@ -0,0 +1,213 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Chemprot Relation Classification Dataset
2
+ https://github.com/yueyu1030/COSINE/tree/main/data/chemprot
3
+
4
+
5
+
6
+ # Labels
7
+
8
+ "0": "Part of",
9
+ "1": "Regulator",
10
+ "2": "Upregulator",
11
+ "3": "Downregulator",
12
+ "4": "Agonist",
13
+ "5": "Antagonist",
14
+ "6": "Modulator",
15
+ "7": "Cofactor",
16
+ "8": "Substrate/Product",
17
+ "9": "NOT"
18
+
19
+
20
+
21
+
22
+ # Labeling Functions
23
+
24
+
25
+ ## Part of
26
+ @labeling_function()
27
+ def lf_amino_acid(x):
28
+ return 1 if 'amino acid' in x.sentence.lower() else ABSTAIN
29
+
30
+ @labeling_function()
31
+ def lf_replace(x):
32
+ return 1 if 'replace' in x.sentence.lower() else ABSTAIN
33
+
34
+ @labeling_function()
35
+ def lf_mutant(x):
36
+ return 1 if 'mutant' in x.sentence.lower() or 'mutat' in x.sentence.lower() else ABSTAIN
37
+
38
+
39
+
40
+
41
+
42
+
43
+
44
+
45
+ ## Regulator
46
+ @labeling_function()
47
+ def lf_bind(x):
48
+ return 2 if 'bind' in x.sentence.lower() else ABSTAIN
49
+
50
+ @labeling_function()
51
+ def lf_interact(x):
52
+ return 2 if 'interact' in x.sentence.lower() else ABSTAIN
53
+
54
+ @labeling_function()
55
+ def lf_affinity(x):
56
+ return 2 if 'affinit' in x.sentence.lower() else ABSTAIN
57
+
58
+
59
+
60
+
61
+
62
+
63
+
64
+
65
+
66
+ ## Upregulator
67
+ # Activator
68
+ @labeling_function()
69
+ def lf_activate(x):
70
+ return 3 if 'activat' in x.sentence.lower() else ABSTAIN
71
+
72
+ @labeling_function()
73
+ def lf_increase(x):
74
+ return 3 if 'increas' in x.sentence.lower() else ABSTAIN
75
+
76
+ @labeling_function()
77
+ def lf_induce(x):
78
+ return 3 if 'induc' in x.sentence.lower() else ABSTAIN
79
+
80
+ @labeling_function()
81
+ def lf_stimulate(x):
82
+ return 3 if 'stimulat' in x.sentence.lower() else ABSTAIN
83
+
84
+ @labeling_function()
85
+ def lf_upregulate(x):
86
+ return 3 if 'upregulat' in x.sentence.lower() else ABSTAIN
87
+
88
+
89
+
90
+
91
+
92
+
93
+
94
+
95
+
96
+
97
+
98
+
99
+ ## Downregulator
100
+ @labeling_function()
101
+ def lf_downregulate(x):
102
+ return 4 if 'downregulat' in x.sentence.lower() or 'down-regulat' in x.sentence.lower() else ABSTAIN
103
+
104
+ @labeling_function()
105
+ def lf_reduce(x):
106
+ return 4 if 'reduc' in x.sentence.lower() else ABSTAIN
107
+
108
+ @labeling_function()
109
+ def lf_inhibit(x):
110
+ return 4 if 'inhibit' in x.sentence.lower() else ABSTAIN
111
+
112
+ @labeling_function()
113
+ def lf_decrease(x):
114
+ return 4 if 'decreas' in x.sentence.lower() else ABSTAIN
115
+
116
+
117
+
118
+
119
+
120
+
121
+
122
+
123
+
124
+
125
+
126
+ ## Agonist
127
+ @labeling_function()
128
+ def lf_agonist(x):
129
+ return 5 if ' agoni' in x.sentence.lower() or "\tagoni" in x.sentence.lower() else ABSTAIN
130
+
131
+
132
+
133
+
134
+
135
+
136
+
137
+
138
+
139
+ ## Antagonist
140
+ @labeling_function()
141
+ def lf_antagonist(x):
142
+ return 6 if 'antagon' in x.sentence.lower() else ABSTAIN
143
+
144
+
145
+
146
+
147
+
148
+
149
+
150
+
151
+
152
+ ## Modulator
153
+ @labeling_function()
154
+ def lf_modulate(x):
155
+ return 7 if 'modulat' in x.sentence.lower() else ABSTAIN
156
+
157
+ @labeling_function()
158
+ def lf_allosteric(x):
159
+ return 7 if 'allosteric' in x.sentence.lower() else ABSTAIN
160
+
161
+
162
+
163
+
164
+
165
+
166
+
167
+
168
+
169
+ ## Cofactor
170
+ @labeling_function()
171
+ def lf_cofactor(x):
172
+ return 8 if 'cofactor' in x.sentence.lower() else ABSTAIN
173
+
174
+
175
+
176
+
177
+
178
+
179
+
180
+
181
+ ## Substrate/Product
182
+ @labeling_function()
183
+ def lf_substrate(x):
184
+ return 9 if 'substrate' in x.sentence.lower() else ABSTAIN
185
+
186
+ @labeling_function()
187
+ def lf_transport(x):
188
+ return 9 if 'transport' in x.sentence.lower() else ABSTAIN
189
+
190
+ @labeling_function()
191
+ def lf_catalyze(x):
192
+ return 9 if 'catalyz' in x.sentence.lower() or 'catalys' in x.sentence.lower() else ABSTAIN
193
+
194
+ @labeling_function()
195
+ def lf_product(x):
196
+ return 9 if "produc" in x.sentence.lower() else ABSTAIN
197
+
198
+ @labeling_function()
199
+ def lf_convert(x):
200
+ return 9 if "conver" in x.sentence.lower() else ABSTAIN
201
+
202
+
203
+
204
+
205
+
206
+
207
+
208
+
209
+
210
+ ## NOT
211
+ @labeling_function()
212
+ def lf_not(x):
213
+ return 10 if 'not' in x.sentence.lower() else ABSTAIN
classification/chemprot/test.json ADDED
The diff for this file is too large to render. See raw diff
 
classification/chemprot/train.json ADDED
The diff for this file is too large to render. See raw diff
 
classification/chemprot/valid.json ADDED
The diff for this file is too large to render. See raw diff
 
classification/commercial/label.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"0": 0, "1": 1}
classification/commercial/readme.txt ADDED
@@ -0,0 +1,22 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Commercial
2
+
3
+ # Source:
4
+
5
+ D. Y. Fu, M. F. Chen, F. Sala, S. M. Hooper, K. Fatahalian, and C. Ré. Fast and three-rious: Speeding up weak supervision with triplet methods. In ICML, pages 3280–3291, 2020.
6
+
7
+
8
+ # Labels:
9
+ 0: negative (the graph is not commercials)
10
+
11
+ 1: positive (the graph is commercials)
12
+
13
+
14
+
15
+ 4 Labeling functions
16
+
17
+ LFs: In this dataset, there is a strong signal for the presence or absence of commercials in pixel histograms and the text; in particular, commercials are book-ended on either side by sequences of black frames, and commercial segments tend to have mixed-case or missing transcripts (whereas news segments are in all caps). We use these signals to build the weak supervision sources.
18
+
19
+
20
+
21
+
22
+
classification/commercial/test.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c096f125a0d80437085a39795cde5e8b1a2147a2b22cad1b670b4f0bc54a675b
3
+ size 322533732
classification/commercial/train.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:27d25bb8c139e2ea14e619ca72c647d62dadb3645ab0e40325db4e1653827a25
3
+ size 2761720065
classification/commercial/valid.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6a67a253cdc2dddb8f838b1909e396fd78730a36e7244147608960017c885097
3
+ size 407904603
classification/imdb/label.json ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ {
2
+ "0": "Negative",
3
+ "1": "Positive"
4
+ }
classification/imdb/readme.txt ADDED
@@ -0,0 +1,60 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ IMDB Sentiment Classification
2
+
3
+ https://github.com/weakrules/Denoise-multi-weak-sources/tree/master/rules-noisy-labels/IMDB
4
+
5
+ # Labels
6
+
7
+ "0": "Negative",
8
+ "1": "Positive"
9
+
10
+
11
+ # Labeling functions
12
+
13
+ lfs = [
14
+ expression_nexttime,
15
+ keyword_compare,
16
+ keyword_general,
17
+ keyword_finish,
18
+ keyword_plot
19
+ ]
20
+
21
+
22
+ # lf - expression_nexttime
23
+
24
+ expression_nexttime = make_expression_lf(name="expression_nexttime",
25
+ pre_pos=["will ", " ll ", "would ", " d ", "can t wait to "],
26
+ expression=[" next time", " again", " rewatch", " anymore", " rewind"])
27
+
28
+
29
+
30
+
31
+ # lf - keyword_compare
32
+
33
+ keyword_compare = make_keyword_lf(name="keyword_compare",
34
+ keywords_pos=[],
35
+ keywords_neg=[" than this", " than the film", " than the movie"])
36
+
37
+
38
+
39
+ # lf - keyword_general
40
+
41
+ keyword_general = make_keyword_lf(name="keyword_general",
42
+ keywords_pos=["masterpiece", "outstanding", "perfect", "great", "good", "nice", "best", "excellent", "worthy", "awesome", "enjoy", "positive", "pleasant", "wonderful", "amazing", "superb", "fantastic", "marvellous", "fabulous"],
43
+ keywords_neg=["bad", "worst", "horrible", "awful", "terrible", "crap", "shit", "garbage", "rubbish", "waste"])
44
+
45
+
46
+
47
+ # lf - keyword_finish
48
+
49
+ keyword_finish = make_keyword_lf(name="keyword_finish",
50
+ keywords_pos=[],
51
+ keywords_neg=["fast forward", "n t finish"])
52
+
53
+
54
+
55
+
56
+ # lf - keyword_plot
57
+
58
+ keyword_plot = make_keyword_lf(name="keyword_plot",
59
+ keywords_pos=["well written", "absorbing", "attractive", "innovative", "instructive", "interesting", "touching", "moving"],
60
+ keywords_neg=["to sleep", "fell asleep", "boring", "dull", "plain"])
classification/imdb/test.json ADDED
The diff for this file is too large to render. See raw diff
 
classification/imdb/train.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6c415ba520bb77b92f2dd3ba458c6a04ddffac03f506094cd24f25d1b9a28f21
3
+ size 26860548