pietrolesci
commited on
Commit
•
cd4aacd
1
Parent(s):
05353c6
Upload folder using huggingface_hub
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- .gitattributes +221 -0
- outputs/README.md +9 -0
- outputs/ablations/README.md +7 -0
- outputs/ablations/additional_randomsubset_1000/bert-base-uncased_randomsubset_entropy_2024-04-02T18-37-18_49537867_0/.early_stopping.jsonl +2 -0
- outputs/ablations/additional_randomsubset_1000/bert-base-uncased_randomsubset_entropy_2024-04-02T18-37-18_49537867_0/.hydra/config.yaml +99 -0
- outputs/ablations/additional_randomsubset_1000/bert-base-uncased_randomsubset_entropy_2024-04-02T18-37-18_49537867_0/.hydra/hydra.yaml +210 -0
- outputs/ablations/additional_randomsubset_1000/bert-base-uncased_randomsubset_entropy_2024-04-02T18-37-18_49537867_0/.hydra/overrides.yaml +9 -0
- outputs/ablations/additional_randomsubset_1000/bert-base-uncased_randomsubset_entropy_2024-04-02T18-37-18_49537867_0/active_train.log +132 -0
- outputs/ablations/additional_randomsubset_1000/bert-base-uncased_randomsubset_entropy_2024-04-02T18-37-18_49537867_0/hparams.yaml +99 -0
- outputs/ablations/additional_randomsubset_1000/bert-base-uncased_randomsubset_entropy_2024-04-02T18-37-18_49537867_0/logs/labelled_dataset.parquet +3 -0
- outputs/ablations/additional_randomsubset_1000/bert-base-uncased_randomsubset_entropy_2024-04-02T18-37-18_49537867_0/logs/subpool_ids.jsonl +0 -0
- outputs/ablations/additional_randomsubset_1000/bert-base-uncased_randomsubset_entropy_2024-04-02T18-37-18_49537867_0/tb_logs/version_0/events.out.tfevents.1712190719.gpu-q-19.3169922.0 +3 -0
- outputs/ablations/additional_randomsubset_1000/bert-base-uncased_randomsubset_entropy_2024-04-02T18-37-18_49537867_0/tensorboard_logs.parquet +3 -0
- outputs/ablations/additional_randomsubset_1000/bert-base-uncased_randomsubset_entropy_2024-04-02T18-37-18_49537867_1/.early_stopping.jsonl +4 -0
- outputs/ablations/additional_randomsubset_1000/bert-base-uncased_randomsubset_entropy_2024-04-02T18-37-18_49537867_1/.hydra/config.yaml +99 -0
- outputs/ablations/additional_randomsubset_1000/bert-base-uncased_randomsubset_entropy_2024-04-02T18-37-18_49537867_1/.hydra/hydra.yaml +210 -0
- outputs/ablations/additional_randomsubset_1000/bert-base-uncased_randomsubset_entropy_2024-04-02T18-37-18_49537867_1/.hydra/overrides.yaml +9 -0
- outputs/ablations/additional_randomsubset_1000/bert-base-uncased_randomsubset_entropy_2024-04-02T18-37-18_49537867_1/active_train.log +132 -0
- outputs/ablations/additional_randomsubset_1000/bert-base-uncased_randomsubset_entropy_2024-04-02T18-37-18_49537867_1/hparams.yaml +99 -0
- outputs/ablations/additional_randomsubset_1000/bert-base-uncased_randomsubset_entropy_2024-04-02T18-37-18_49537867_1/logs/labelled_dataset.parquet +3 -0
- outputs/ablations/additional_randomsubset_1000/bert-base-uncased_randomsubset_entropy_2024-04-02T18-37-18_49537867_1/logs/subpool_ids.jsonl +0 -0
- outputs/ablations/additional_randomsubset_1000/bert-base-uncased_randomsubset_entropy_2024-04-02T18-37-18_49537867_1/tb_logs/version_0/events.out.tfevents.1712312687.gpu-q-14.3012723.0 +3 -0
- outputs/ablations/additional_randomsubset_1000/bert-base-uncased_randomsubset_entropy_2024-04-02T18-37-18_49537867_1/tensorboard_logs.parquet +3 -0
- outputs/ablations/additional_randomsubset_1000/bert-base-uncased_randomsubset_entropy_2024-04-02T18-37-18_49537867_2/.early_stopping.jsonl +4 -0
- outputs/ablations/additional_randomsubset_1000/bert-base-uncased_randomsubset_entropy_2024-04-02T18-37-18_49537867_2/.hydra/config.yaml +99 -0
- outputs/ablations/additional_randomsubset_1000/bert-base-uncased_randomsubset_entropy_2024-04-02T18-37-18_49537867_2/.hydra/hydra.yaml +210 -0
- outputs/ablations/additional_randomsubset_1000/bert-base-uncased_randomsubset_entropy_2024-04-02T18-37-18_49537867_2/.hydra/overrides.yaml +9 -0
- outputs/ablations/additional_randomsubset_1000/bert-base-uncased_randomsubset_entropy_2024-04-02T18-37-18_49537867_2/active_train.log +132 -0
- outputs/ablations/additional_randomsubset_1000/bert-base-uncased_randomsubset_entropy_2024-04-02T18-37-18_49537867_2/hparams.yaml +99 -0
- outputs/ablations/additional_randomsubset_1000/bert-base-uncased_randomsubset_entropy_2024-04-02T18-37-18_49537867_2/logs/labelled_dataset.parquet +3 -0
- outputs/ablations/additional_randomsubset_1000/bert-base-uncased_randomsubset_entropy_2024-04-02T18-37-18_49537867_2/logs/subpool_ids.jsonl +0 -0
- outputs/ablations/additional_randomsubset_1000/bert-base-uncased_randomsubset_entropy_2024-04-02T18-37-18_49537867_2/tb_logs/version_0/events.out.tfevents.1712312687.gpu-q-14.3012722.0 +3 -0
- outputs/ablations/additional_randomsubset_1000/bert-base-uncased_randomsubset_entropy_2024-04-02T18-37-18_49537867_2/tensorboard_logs.parquet +3 -0
- outputs/ablations/additional_randomsubset_1000/bert-base-uncased_randomsubset_entropy_2024-04-02T18-37-18_49537867_3/.early_stopping.jsonl +3 -0
- outputs/ablations/additional_randomsubset_1000/bert-base-uncased_randomsubset_entropy_2024-04-02T18-37-18_49537867_3/.hydra/config.yaml +99 -0
- outputs/ablations/additional_randomsubset_1000/bert-base-uncased_randomsubset_entropy_2024-04-02T18-37-18_49537867_3/.hydra/hydra.yaml +210 -0
- outputs/ablations/additional_randomsubset_1000/bert-base-uncased_randomsubset_entropy_2024-04-02T18-37-18_49537867_3/.hydra/overrides.yaml +9 -0
- outputs/ablations/additional_randomsubset_1000/bert-base-uncased_randomsubset_entropy_2024-04-02T18-37-18_49537867_3/active_train.log +132 -0
- outputs/ablations/additional_randomsubset_1000/bert-base-uncased_randomsubset_entropy_2024-04-02T18-37-18_49537867_3/hparams.yaml +99 -0
- outputs/ablations/additional_randomsubset_1000/bert-base-uncased_randomsubset_entropy_2024-04-02T18-37-18_49537867_3/logs/labelled_dataset.parquet +3 -0
- outputs/ablations/additional_randomsubset_1000/bert-base-uncased_randomsubset_entropy_2024-04-02T18-37-18_49537867_3/logs/subpool_ids.jsonl +0 -0
- outputs/ablations/additional_randomsubset_1000/bert-base-uncased_randomsubset_entropy_2024-04-02T18-37-18_49537867_3/tb_logs/version_0/events.out.tfevents.1712349958.gpu-q-11.665611.0 +3 -0
- outputs/ablations/additional_randomsubset_1000/bert-base-uncased_randomsubset_entropy_2024-04-02T18-37-18_49537867_3/tensorboard_logs.parquet +3 -0
- outputs/ablations/additional_randomsubset_1000/bert-base-uncased_randomsubset_entropy_2024-04-02T18-37-18_49537867_4/.early_stopping.jsonl +2 -0
- outputs/ablations/additional_randomsubset_1000/bert-base-uncased_randomsubset_entropy_2024-04-02T18-37-18_49537867_4/.hydra/config.yaml +99 -0
- outputs/ablations/additional_randomsubset_1000/bert-base-uncased_randomsubset_entropy_2024-04-02T18-37-18_49537867_4/.hydra/hydra.yaml +210 -0
- outputs/ablations/additional_randomsubset_1000/bert-base-uncased_randomsubset_entropy_2024-04-02T18-37-18_49537867_4/.hydra/overrides.yaml +9 -0
- outputs/ablations/additional_randomsubset_1000/bert-base-uncased_randomsubset_entropy_2024-04-02T18-37-18_49537867_4/active_train.log +132 -0
- outputs/ablations/additional_randomsubset_1000/bert-base-uncased_randomsubset_entropy_2024-04-02T18-37-18_49537867_4/hparams.yaml +99 -0
- outputs/ablations/additional_randomsubset_1000/bert-base-uncased_randomsubset_entropy_2024-04-02T18-37-18_49537867_4/logs/labelled_dataset.parquet +3 -0
.gitattributes
CHANGED
@@ -53,3 +53,224 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
|
53 |
*.jpg filter=lfs diff=lfs merge=lfs -text
|
54 |
*.jpeg filter=lfs diff=lfs merge=lfs -text
|
55 |
*.webp filter=lfs diff=lfs merge=lfs -text
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
53 |
*.jpg filter=lfs diff=lfs merge=lfs -text
|
54 |
*.jpeg filter=lfs diff=lfs merge=lfs -text
|
55 |
*.webp filter=lfs diff=lfs merge=lfs -text
|
56 |
+
outputs/ablations/superextended_budget/bert-base_seals_badge_2023-10-09T18-56-11_active_train_0/logs/subpool_ids.jsonl filter=lfs diff=lfs merge=lfs -text
|
57 |
+
outputs/main/agnews-business-.01/bert-base-uncased_randomsubset_badge_2024-03-25T14-54-50_48771791_32/logs/subpool_ids.jsonl filter=lfs diff=lfs merge=lfs -text
|
58 |
+
outputs/main/agnews-business-.01/bert-base-uncased_randomsubset_badge_2024-03-25T14-54-50_48771791_33/logs/subpool_ids.jsonl filter=lfs diff=lfs merge=lfs -text
|
59 |
+
outputs/main/agnews-business-.01/bert-base-uncased_randomsubset_badge_2024-03-25T14-54-50_48771791_34/logs/subpool_ids.jsonl filter=lfs diff=lfs merge=lfs -text
|
60 |
+
outputs/main/agnews-business-.01/bert-base-uncased_randomsubset_badge_2024-03-25T14-54-50_48771791_35/logs/subpool_ids.jsonl filter=lfs diff=lfs merge=lfs -text
|
61 |
+
outputs/main/agnews-business-.01/bert-base-uncased_randomsubset_badge_2024-03-25T14-54-50_48771791_36/logs/subpool_ids.jsonl filter=lfs diff=lfs merge=lfs -text
|
62 |
+
outputs/main/agnews-business-.01/bert-base-uncased_randomsubset_badge_2024-03-25T14-54-50_48771791_37/logs/subpool_ids.jsonl filter=lfs diff=lfs merge=lfs -text
|
63 |
+
outputs/main/agnews-business-.01/bert-base-uncased_randomsubset_badge_2024-03-25T14-54-50_48771791_38/logs/subpool_ids.jsonl filter=lfs diff=lfs merge=lfs -text
|
64 |
+
outputs/main/agnews-business-.01/bert-base-uncased_randomsubset_badge_2024-03-25T14-54-50_48771791_39/logs/subpool_ids.jsonl filter=lfs diff=lfs merge=lfs -text
|
65 |
+
outputs/main/agnews-business-.01/bert-base-uncased_randomsubset_entropy_2023-12-08T08-48-39_36902592_33/logs/subpool_ids.jsonl filter=lfs diff=lfs merge=lfs -text
|
66 |
+
outputs/main/agnews-business-.01/bert-base-uncased_randomsubset_entropy_2023-12-08T08-48-39_36902592_35/logs/subpool_ids.jsonl filter=lfs diff=lfs merge=lfs -text
|
67 |
+
outputs/main/agnews-business-.01/bert-base-uncased_randomsubset_entropy_2023-12-08T08-48-39_36902592_37/logs/subpool_ids.jsonl filter=lfs diff=lfs merge=lfs -text
|
68 |
+
outputs/main/agnews-business-.01/bert-base-uncased_randomsubset_entropy_2023-12-08T08-48-39_36902592_39/logs/subpool_ids.jsonl filter=lfs diff=lfs merge=lfs -text
|
69 |
+
outputs/main/agnews-business-.01/bert-base-uncased_randomsubset_entropy_2023-12-08T08-48-39_36902592_41/logs/subpool_ids.jsonl filter=lfs diff=lfs merge=lfs -text
|
70 |
+
outputs/main/agnews-business-.01/bert-base-uncased_randomsubset_entropy_2023-12-08T08-48-39_36902592_43/logs/subpool_ids.jsonl filter=lfs diff=lfs merge=lfs -text
|
71 |
+
outputs/main/agnews-business-.01/bert-base-uncased_randomsubset_entropy_2023-12-08T08-48-39_36902592_45/logs/subpool_ids.jsonl filter=lfs diff=lfs merge=lfs -text
|
72 |
+
outputs/main/agnews-business-.01/bert-base-uncased_randomsubset_entropy_2023-12-08T08-48-39_36902592_47/logs/subpool_ids.jsonl filter=lfs diff=lfs merge=lfs -text
|
73 |
+
outputs/main/agnews-business-.01/bert-base-uncased_randomsubset_ftbertkm_2024-03-25T14-54-50_48771791_40/logs/subpool_ids.jsonl filter=lfs diff=lfs merge=lfs -text
|
74 |
+
outputs/main/agnews-business-.01/bert-base-uncased_randomsubset_ftbertkm_2024-03-25T14-54-50_48771791_41/logs/subpool_ids.jsonl filter=lfs diff=lfs merge=lfs -text
|
75 |
+
outputs/main/agnews-business-.01/bert-base-uncased_randomsubset_ftbertkm_2024-03-25T14-54-50_48771791_42/logs/subpool_ids.jsonl filter=lfs diff=lfs merge=lfs -text
|
76 |
+
outputs/main/agnews-business-.01/bert-base-uncased_randomsubset_ftbertkm_2024-03-25T14-54-50_48771791_43/logs/subpool_ids.jsonl filter=lfs diff=lfs merge=lfs -text
|
77 |
+
outputs/main/agnews-business-.01/bert-base-uncased_randomsubset_ftbertkm_2024-03-25T14-54-50_48771791_44/logs/subpool_ids.jsonl filter=lfs diff=lfs merge=lfs -text
|
78 |
+
outputs/main/agnews-business-.01/bert-base-uncased_randomsubset_ftbertkm_2024-03-25T14-54-50_48771791_45/logs/subpool_ids.jsonl filter=lfs diff=lfs merge=lfs -text
|
79 |
+
outputs/main/agnews-business-.01/bert-base-uncased_randomsubset_ftbertkm_2024-03-25T14-54-50_48771791_46/logs/subpool_ids.jsonl filter=lfs diff=lfs merge=lfs -text
|
80 |
+
outputs/main/agnews-business-.01/bert-base-uncased_randomsubset_ftbertkm_2024-03-25T14-54-50_48771791_47/logs/subpool_ids.jsonl filter=lfs diff=lfs merge=lfs -text
|
81 |
+
outputs/main/agnews-business-.01/bert-base-uncased_seals_badge_2024-03-25T14-54-50_48771791_16/logs/subpool_ids.jsonl filter=lfs diff=lfs merge=lfs -text
|
82 |
+
outputs/main/agnews-business-.01/bert-base-uncased_seals_badge_2024-03-25T14-54-50_48771791_17/logs/subpool_ids.jsonl filter=lfs diff=lfs merge=lfs -text
|
83 |
+
outputs/main/agnews-business-.01/bert-base-uncased_seals_badge_2024-03-25T14-54-50_48771791_18/logs/subpool_ids.jsonl filter=lfs diff=lfs merge=lfs -text
|
84 |
+
outputs/main/agnews-business-.01/bert-base-uncased_seals_badge_2024-03-25T14-54-50_48771791_19/logs/subpool_ids.jsonl filter=lfs diff=lfs merge=lfs -text
|
85 |
+
outputs/main/agnews-business-.01/bert-base-uncased_seals_badge_2024-03-25T14-54-50_48771791_20/logs/subpool_ids.jsonl filter=lfs diff=lfs merge=lfs -text
|
86 |
+
outputs/main/agnews-business-.01/bert-base-uncased_seals_badge_2024-03-25T14-54-50_48771791_21/logs/subpool_ids.jsonl filter=lfs diff=lfs merge=lfs -text
|
87 |
+
outputs/main/agnews-business-.01/bert-base-uncased_seals_badge_2024-03-25T14-54-50_48771791_22/logs/subpool_ids.jsonl filter=lfs diff=lfs merge=lfs -text
|
88 |
+
outputs/main/agnews-business-.01/bert-base-uncased_seals_badge_2024-03-25T14-54-50_48771791_23/logs/subpool_ids.jsonl filter=lfs diff=lfs merge=lfs -text
|
89 |
+
outputs/main/agnews-business-.01/bert-base-uncased_seals_entropy_2023-12-08T08-48-39_36902592_17/logs/subpool_ids.jsonl filter=lfs diff=lfs merge=lfs -text
|
90 |
+
outputs/main/agnews-business-.01/bert-base-uncased_seals_entropy_2023-12-08T08-48-39_36902592_19/logs/subpool_ids.jsonl filter=lfs diff=lfs merge=lfs -text
|
91 |
+
outputs/main/agnews-business-.01/bert-base-uncased_seals_entropy_2023-12-08T08-48-39_36902592_21/logs/subpool_ids.jsonl filter=lfs diff=lfs merge=lfs -text
|
92 |
+
outputs/main/agnews-business-.01/bert-base-uncased_seals_entropy_2023-12-08T08-48-39_36902592_23/logs/subpool_ids.jsonl filter=lfs diff=lfs merge=lfs -text
|
93 |
+
outputs/main/agnews-business-.01/bert-base-uncased_seals_entropy_2023-12-08T08-48-39_36902592_25/logs/subpool_ids.jsonl filter=lfs diff=lfs merge=lfs -text
|
94 |
+
outputs/main/agnews-business-.01/bert-base-uncased_seals_entropy_2023-12-08T08-48-39_36902592_27/logs/subpool_ids.jsonl filter=lfs diff=lfs merge=lfs -text
|
95 |
+
outputs/main/agnews-business-.01/bert-base-uncased_seals_entropy_2023-12-08T08-48-39_36902592_29/logs/subpool_ids.jsonl filter=lfs diff=lfs merge=lfs -text
|
96 |
+
outputs/main/agnews-business-.01/bert-base-uncased_seals_entropy_2023-12-08T08-48-39_36902592_31/logs/subpool_ids.jsonl filter=lfs diff=lfs merge=lfs -text
|
97 |
+
outputs/main/agnews-business-.01/bert-base-uncased_seals_ftbertkm_2024-03-25T14-54-50_48771791_24/logs/subpool_ids.jsonl filter=lfs diff=lfs merge=lfs -text
|
98 |
+
outputs/main/agnews-business-.01/bert-base-uncased_seals_ftbertkm_2024-03-25T14-54-50_48771791_25/logs/subpool_ids.jsonl filter=lfs diff=lfs merge=lfs -text
|
99 |
+
outputs/main/agnews-business-.01/bert-base-uncased_seals_ftbertkm_2024-03-25T14-54-50_48771791_26/logs/subpool_ids.jsonl filter=lfs diff=lfs merge=lfs -text
|
100 |
+
outputs/main/agnews-business-.01/bert-base-uncased_seals_ftbertkm_2024-03-25T14-54-50_48771791_27/logs/subpool_ids.jsonl filter=lfs diff=lfs merge=lfs -text
|
101 |
+
outputs/main/agnews-business-.01/bert-base-uncased_seals_ftbertkm_2024-03-25T14-54-50_48771791_28/logs/subpool_ids.jsonl filter=lfs diff=lfs merge=lfs -text
|
102 |
+
outputs/main/agnews-business-.01/bert-base-uncased_seals_ftbertkm_2024-03-25T14-54-50_48771791_29/logs/subpool_ids.jsonl filter=lfs diff=lfs merge=lfs -text
|
103 |
+
outputs/main/agnews-business-.01/bert-base-uncased_seals_ftbertkm_2024-03-25T14-54-50_48771791_30/logs/subpool_ids.jsonl filter=lfs diff=lfs merge=lfs -text
|
104 |
+
outputs/main/agnews-business-.01/bert-base-uncased_seals_ftbertkm_2024-03-25T14-54-50_48771791_31/logs/subpool_ids.jsonl filter=lfs diff=lfs merge=lfs -text
|
105 |
+
outputs/main/amazon-agri/bert-base-uncased_seals_badge_2023-10-09T15-09-23_29222618_10/logs/subpool_ids.jsonl filter=lfs diff=lfs merge=lfs -text
|
106 |
+
outputs/main/amazon-agri/bert-base-uncased_seals_badge_2023-10-09T15-09-23_29222618_11/logs/subpool_ids.jsonl filter=lfs diff=lfs merge=lfs -text
|
107 |
+
outputs/main/amazon-agri/bert-base-uncased_seals_badge_2023-10-09T15-09-23_29222618_12/logs/subpool_ids.jsonl filter=lfs diff=lfs merge=lfs -text
|
108 |
+
outputs/main/amazon-agri/bert-base-uncased_seals_badge_2023-10-09T15-09-23_29222618_13/logs/subpool_ids.jsonl filter=lfs diff=lfs merge=lfs -text
|
109 |
+
outputs/main/amazon-agri/bert-base-uncased_seals_badge_2023-10-09T15-09-23_29222618_14/logs/subpool_ids.jsonl filter=lfs diff=lfs merge=lfs -text
|
110 |
+
outputs/main/amazon-agri/bert-base-uncased_seals_badge_2023-10-09T15-09-23_29222618_15/logs/subpool_ids.jsonl filter=lfs diff=lfs merge=lfs -text
|
111 |
+
outputs/main/amazon-agri/bert-base-uncased_seals_badge_2023-10-11T11-26-25_29414693_1/logs/subpool_ids.jsonl filter=lfs diff=lfs merge=lfs -text
|
112 |
+
outputs/main/amazon-agri/bert-base-uncased_seals_entropy_2023-10-02T17-40-02_28786438_10/logs/subpool_ids.jsonl filter=lfs diff=lfs merge=lfs -text
|
113 |
+
outputs/main/amazon-agri/bert-base-uncased_seals_entropy_2023-10-02T17-40-02_28786438_11/logs/subpool_ids.jsonl filter=lfs diff=lfs merge=lfs -text
|
114 |
+
outputs/main/amazon-agri/bert-base-uncased_seals_entropy_2023-10-02T17-40-02_28786438_12/logs/subpool_ids.jsonl filter=lfs diff=lfs merge=lfs -text
|
115 |
+
outputs/main/amazon-agri/bert-base-uncased_seals_entropy_2023-10-02T17-40-02_28786438_13/logs/subpool_ids.jsonl filter=lfs diff=lfs merge=lfs -text
|
116 |
+
outputs/main/amazon-agri/bert-base-uncased_seals_entropy_2023-10-02T17-40-02_28786438_14/logs/subpool_ids.jsonl filter=lfs diff=lfs merge=lfs -text
|
117 |
+
outputs/main/amazon-agri/bert-base-uncased_seals_entropy_2023-10-02T17-40-02_28786438_15/logs/subpool_ids.jsonl filter=lfs diff=lfs merge=lfs -text
|
118 |
+
outputs/main/amazon-agri/bert-base-uncased_seals_entropy_2023-10-02T17-40-02_28786438_8/logs/subpool_ids.jsonl filter=lfs diff=lfs merge=lfs -text
|
119 |
+
outputs/main/amazon-agri/bert-base-uncased_seals_entropy_2023-10-02T17-40-02_28786438_9/logs/subpool_ids.jsonl filter=lfs diff=lfs merge=lfs -text
|
120 |
+
outputs/main/amazon-agri/bert-base-uncased_seals_ftbertkm_2023-10-09T15-13-57_29223081_10/logs/subpool_ids.jsonl filter=lfs diff=lfs merge=lfs -text
|
121 |
+
outputs/main/amazon-agri/bert-base-uncased_seals_ftbertkm_2023-10-09T15-13-57_29223081_11/logs/subpool_ids.jsonl filter=lfs diff=lfs merge=lfs -text
|
122 |
+
outputs/main/amazon-agri/bert-base-uncased_seals_ftbertkm_2023-10-09T15-13-57_29223081_12/logs/subpool_ids.jsonl filter=lfs diff=lfs merge=lfs -text
|
123 |
+
outputs/main/amazon-agri/bert-base-uncased_seals_ftbertkm_2023-10-09T15-13-57_29223081_13/logs/subpool_ids.jsonl filter=lfs diff=lfs merge=lfs -text
|
124 |
+
outputs/main/amazon-agri/bert-base-uncased_seals_ftbertkm_2023-10-09T15-13-57_29223081_14/logs/subpool_ids.jsonl filter=lfs diff=lfs merge=lfs -text
|
125 |
+
outputs/main/amazon-agri/bert-base-uncased_seals_ftbertkm_2023-10-09T15-13-57_29223081_15/logs/subpool_ids.jsonl filter=lfs diff=lfs merge=lfs -text
|
126 |
+
outputs/main/amazon-agri/bert-base-uncased_seals_ftbertkm_2023-10-09T15-13-57_29223081_8/logs/subpool_ids.jsonl filter=lfs diff=lfs merge=lfs -text
|
127 |
+
outputs/main/amazon-agri/bert-base-uncased_seals_ftbertkm_2023-10-09T15-13-57_29223081_9/logs/subpool_ids.jsonl filter=lfs diff=lfs merge=lfs -text
|
128 |
+
outputs/main/amazon-multi/bert-base-uncased_seals_badge_2023-10-09T15-09-23_29222618_40/logs/subpool_ids.jsonl filter=lfs diff=lfs merge=lfs -text
|
129 |
+
outputs/main/amazon-multi/bert-base-uncased_seals_badge_2023-10-09T15-09-23_29222618_41/logs/subpool_ids.jsonl filter=lfs diff=lfs merge=lfs -text
|
130 |
+
outputs/main/amazon-multi/bert-base-uncased_seals_badge_2023-10-09T15-09-23_29222618_42/logs/subpool_ids.jsonl filter=lfs diff=lfs merge=lfs -text
|
131 |
+
outputs/main/amazon-multi/bert-base-uncased_seals_badge_2023-10-09T15-09-23_29222618_43/logs/subpool_ids.jsonl filter=lfs diff=lfs merge=lfs -text
|
132 |
+
outputs/main/amazon-multi/bert-base-uncased_seals_badge_2023-10-09T15-09-23_29222618_44/logs/subpool_ids.jsonl filter=lfs diff=lfs merge=lfs -text
|
133 |
+
outputs/main/amazon-multi/bert-base-uncased_seals_badge_2023-10-09T15-09-23_29222618_45/logs/subpool_ids.jsonl filter=lfs diff=lfs merge=lfs -text
|
134 |
+
outputs/main/amazon-multi/bert-base-uncased_seals_badge_2023-10-09T15-09-23_29222618_46/logs/subpool_ids.jsonl filter=lfs diff=lfs merge=lfs -text
|
135 |
+
outputs/main/amazon-multi/bert-base-uncased_seals_badge_2023-10-09T15-09-23_29222618_47/logs/subpool_ids.jsonl filter=lfs diff=lfs merge=lfs -text
|
136 |
+
outputs/main/amazon-multi/bert-base-uncased_seals_entropy_2023-10-09T15-15-18_29223088_10/logs/subpool_ids.jsonl filter=lfs diff=lfs merge=lfs -text
|
137 |
+
outputs/main/amazon-multi/bert-base-uncased_seals_entropy_2023-10-09T15-15-18_29223088_11/logs/subpool_ids.jsonl filter=lfs diff=lfs merge=lfs -text
|
138 |
+
outputs/main/amazon-multi/bert-base-uncased_seals_entropy_2023-10-09T15-15-18_29223088_12/logs/subpool_ids.jsonl filter=lfs diff=lfs merge=lfs -text
|
139 |
+
outputs/main/amazon-multi/bert-base-uncased_seals_entropy_2023-10-09T15-15-18_29223088_13/logs/subpool_ids.jsonl filter=lfs diff=lfs merge=lfs -text
|
140 |
+
outputs/main/amazon-multi/bert-base-uncased_seals_entropy_2023-10-09T15-15-18_29223088_14/logs/subpool_ids.jsonl filter=lfs diff=lfs merge=lfs -text
|
141 |
+
outputs/main/amazon-multi/bert-base-uncased_seals_entropy_2023-10-09T15-15-18_29223088_15/logs/subpool_ids.jsonl filter=lfs diff=lfs merge=lfs -text
|
142 |
+
outputs/main/amazon-multi/bert-base-uncased_seals_entropy_2023-10-09T15-15-18_29223088_8/logs/subpool_ids.jsonl filter=lfs diff=lfs merge=lfs -text
|
143 |
+
outputs/main/amazon-multi/bert-base-uncased_seals_entropy_2023-10-09T15-15-18_29223088_9/logs/subpool_ids.jsonl filter=lfs diff=lfs merge=lfs -text
|
144 |
+
outputs/main/amazon-multi/bert-base-uncased_seals_ftbertkm_2023-10-09T15-13-57_29223081_40/logs/subpool_ids.jsonl filter=lfs diff=lfs merge=lfs -text
|
145 |
+
outputs/main/amazon-multi/bert-base-uncased_seals_ftbertkm_2023-10-09T15-13-57_29223081_41/logs/subpool_ids.jsonl filter=lfs diff=lfs merge=lfs -text
|
146 |
+
outputs/main/amazon-multi/bert-base-uncased_seals_ftbertkm_2023-10-09T15-13-57_29223081_42/logs/subpool_ids.jsonl filter=lfs diff=lfs merge=lfs -text
|
147 |
+
outputs/main/amazon-multi/bert-base-uncased_seals_ftbertkm_2023-10-09T15-13-57_29223081_43/logs/subpool_ids.jsonl filter=lfs diff=lfs merge=lfs -text
|
148 |
+
outputs/main/amazon-multi/bert-base-uncased_seals_ftbertkm_2023-10-09T15-13-57_29223081_44/logs/subpool_ids.jsonl filter=lfs diff=lfs merge=lfs -text
|
149 |
+
outputs/main/amazon-multi/bert-base-uncased_seals_ftbertkm_2023-10-09T15-13-57_29223081_46/logs/subpool_ids.jsonl filter=lfs diff=lfs merge=lfs -text
|
150 |
+
outputs/main/amazon-multi/bert-base-uncased_seals_ftbertkm_2023-10-11T11-53-06_29416993_4/logs/subpool_ids.jsonl filter=lfs diff=lfs merge=lfs -text
|
151 |
+
outputs/main/wikitoxic-.01/bert-base-uncased_seals_badge_2023-10-09T15-09-23_29222618_74/logs/subpool_ids.jsonl filter=lfs diff=lfs merge=lfs -text
|
152 |
+
outputs/main/wikitoxic-.01/bert-base-uncased_seals_badge_2023-10-09T15-09-23_29222618_75/logs/subpool_ids.jsonl filter=lfs diff=lfs merge=lfs -text
|
153 |
+
outputs/main/wikitoxic-.01/bert-base-uncased_seals_badge_2023-10-09T15-09-23_29222618_77/logs/subpool_ids.jsonl filter=lfs diff=lfs merge=lfs -text
|
154 |
+
outputs/main/wikitoxic-.01/bert-base-uncased_seals_badge_2023-10-09T15-09-23_29222618_79/logs/subpool_ids.jsonl filter=lfs diff=lfs merge=lfs -text
|
155 |
+
outputs/main/wikitoxic-.01/bert-base-uncased_seals_badge_2023-10-11T11-23-59_29414517_4/logs/subpool_ids.jsonl filter=lfs diff=lfs merge=lfs -text
|
156 |
+
outputs/main/wikitoxic-.01/bert-base-uncased_seals_badge_2023-10-11T11-28-11_29414827_3/logs/subpool_ids.jsonl filter=lfs diff=lfs merge=lfs -text
|
157 |
+
outputs/main/wikitoxic-.01/bert-base-uncased_seals_badge_2023-10-11T11-29-50_29414952_3/logs/subpool_ids.jsonl filter=lfs diff=lfs merge=lfs -text
|
158 |
+
outputs/main/wikitoxic-.01/bert-base-uncased_seals_badge_2023-10-11T11-44-27_29416566_0/logs/subpool_ids.jsonl filter=lfs diff=lfs merge=lfs -text
|
159 |
+
outputs/main/wikitoxic-.01/bert-base-uncased_seals_badge_2023-10-11T11-55-21_29417051_3/logs/subpool_ids.jsonl filter=lfs diff=lfs merge=lfs -text
|
160 |
+
outputs/main/wikitoxic-.01/bert-base-uncased_seals_entropy_2023-10-09T15-15-18_29223088_41/logs/subpool_ids.jsonl filter=lfs diff=lfs merge=lfs -text
|
161 |
+
outputs/main/wikitoxic-.01/bert-base-uncased_seals_entropy_2023-10-09T15-15-18_29223088_42/logs/subpool_ids.jsonl filter=lfs diff=lfs merge=lfs -text
|
162 |
+
outputs/main/wikitoxic-.01/bert-base-uncased_seals_entropy_2023-10-09T15-15-18_29223088_43/logs/subpool_ids.jsonl filter=lfs diff=lfs merge=lfs -text
|
163 |
+
outputs/main/wikitoxic-.01/bert-base-uncased_seals_entropy_2023-10-09T15-15-18_29223088_44/logs/subpool_ids.jsonl filter=lfs diff=lfs merge=lfs -text
|
164 |
+
outputs/main/wikitoxic-.01/bert-base-uncased_seals_entropy_2023-10-09T15-15-18_29223088_46/logs/subpool_ids.jsonl filter=lfs diff=lfs merge=lfs -text
|
165 |
+
outputs/main/wikitoxic-.01/bert-base-uncased_seals_entropy_2023-10-09T15-15-18_29223088_47/logs/subpool_ids.jsonl filter=lfs diff=lfs merge=lfs -text
|
166 |
+
outputs/main/wikitoxic-.01/bert-base-uncased_seals_entropy_2023-10-11T11-23-59_29414517_1/logs/subpool_ids.jsonl filter=lfs diff=lfs merge=lfs -text
|
167 |
+
outputs/main/wikitoxic-.01/bert-base-uncased_seals_ftbertkm_2023-10-09T15-13-57_29223081_72/logs/subpool_ids.jsonl filter=lfs diff=lfs merge=lfs -text
|
168 |
+
outputs/main/wikitoxic-.01/bert-base-uncased_seals_ftbertkm_2023-10-09T15-13-57_29223081_73/logs/subpool_ids.jsonl filter=lfs diff=lfs merge=lfs -text
|
169 |
+
outputs/main/wikitoxic-.01/bert-base-uncased_seals_ftbertkm_2023-10-09T15-13-57_29223081_74/logs/subpool_ids.jsonl filter=lfs diff=lfs merge=lfs -text
|
170 |
+
outputs/main/wikitoxic-.01/bert-base-uncased_seals_ftbertkm_2023-10-09T15-13-57_29223081_75/logs/subpool_ids.jsonl filter=lfs diff=lfs merge=lfs -text
|
171 |
+
outputs/main/wikitoxic-.01/bert-base-uncased_seals_ftbertkm_2023-10-09T15-13-57_29223081_76/logs/subpool_ids.jsonl filter=lfs diff=lfs merge=lfs -text
|
172 |
+
outputs/main/wikitoxic-.01/bert-base-uncased_seals_ftbertkm_2023-10-09T15-13-57_29223081_77/logs/subpool_ids.jsonl filter=lfs diff=lfs merge=lfs -text
|
173 |
+
outputs/main/wikitoxic-.01/bert-base-uncased_seals_ftbertkm_2023-10-09T15-13-57_29223081_78/logs/subpool_ids.jsonl filter=lfs diff=lfs merge=lfs -text
|
174 |
+
outputs/main/wikitoxic-.01/bert-base-uncased_seals_ftbertkm_2023-10-09T15-13-57_29223081_79/logs/subpool_ids.jsonl filter=lfs diff=lfs merge=lfs -text
|
175 |
+
outputs/other_models/albert-base-v2/agnews-business-.01/albert-base-v2_randomsubset_entropy_2023-12-04T18-17-02_35861566_24/logs/subpool_ids.jsonl filter=lfs diff=lfs merge=lfs -text
|
176 |
+
outputs/other_models/albert-base-v2/agnews-business-.01/albert-base-v2_randomsubset_entropy_2023-12-04T18-17-02_35861566_25/logs/subpool_ids.jsonl filter=lfs diff=lfs merge=lfs -text
|
177 |
+
outputs/other_models/albert-base-v2/agnews-business-.01/albert-base-v2_randomsubset_entropy_2023-12-04T18-17-02_35861566_26/logs/subpool_ids.jsonl filter=lfs diff=lfs merge=lfs -text
|
178 |
+
outputs/other_models/albert-base-v2/agnews-business-.01/albert-base-v2_randomsubset_entropy_2023-12-04T18-17-02_35861566_27/logs/subpool_ids.jsonl filter=lfs diff=lfs merge=lfs -text
|
179 |
+
outputs/other_models/albert-base-v2/agnews-business-.01/albert-base-v2_randomsubset_entropy_2023-12-04T18-17-02_35861566_28/logs/subpool_ids.jsonl filter=lfs diff=lfs merge=lfs -text
|
180 |
+
outputs/other_models/albert-base-v2/agnews-business-.01/albert-base-v2_randomsubset_entropy_2023-12-04T18-17-02_35861566_29/logs/subpool_ids.jsonl filter=lfs diff=lfs merge=lfs -text
|
181 |
+
outputs/other_models/albert-base-v2/agnews-business-.01/albert-base-v2_randomsubset_entropy_2023-12-04T18-17-02_35861566_30/logs/subpool_ids.jsonl filter=lfs diff=lfs merge=lfs -text
|
182 |
+
outputs/other_models/albert-base-v2/agnews-business-.01/albert-base-v2_randomsubset_entropy_2023-12-04T18-17-02_35861566_31/logs/subpool_ids.jsonl filter=lfs diff=lfs merge=lfs -text
|
183 |
+
outputs/other_models/albert-base-v2/agnews-business-.01/albert-base-v2_seals_entropy_2023-12-04T18-17-02_35861566_32/logs/subpool_ids.jsonl filter=lfs diff=lfs merge=lfs -text
|
184 |
+
outputs/other_models/albert-base-v2/agnews-business-.01/albert-base-v2_seals_entropy_2023-12-04T18-17-02_35861566_33/logs/subpool_ids.jsonl filter=lfs diff=lfs merge=lfs -text
|
185 |
+
outputs/other_models/albert-base-v2/agnews-business-.01/albert-base-v2_seals_entropy_2023-12-04T18-17-02_35861566_34/logs/subpool_ids.jsonl filter=lfs diff=lfs merge=lfs -text
|
186 |
+
outputs/other_models/albert-base-v2/agnews-business-.01/albert-base-v2_seals_entropy_2023-12-04T18-17-02_35861566_35/logs/subpool_ids.jsonl filter=lfs diff=lfs merge=lfs -text
|
187 |
+
outputs/other_models/albert-base-v2/agnews-business-.01/albert-base-v2_seals_entropy_2023-12-04T18-17-02_35861566_36/logs/subpool_ids.jsonl filter=lfs diff=lfs merge=lfs -text
|
188 |
+
outputs/other_models/albert-base-v2/agnews-business-.01/albert-base-v2_seals_entropy_2023-12-04T18-17-02_35861566_37/logs/subpool_ids.jsonl filter=lfs diff=lfs merge=lfs -text
|
189 |
+
outputs/other_models/albert-base-v2/agnews-business-.01/albert-base-v2_seals_entropy_2023-12-04T18-17-02_35861566_38/logs/subpool_ids.jsonl filter=lfs diff=lfs merge=lfs -text
|
190 |
+
outputs/other_models/albert-base-v2/agnews-business-.01/albert-base-v2_seals_entropy_2023-12-04T18-17-02_35861566_39/logs/subpool_ids.jsonl filter=lfs diff=lfs merge=lfs -text
|
191 |
+
outputs/other_models/albert-base-v2/amazon-agri/albert-base-v2_seals_entropy_2023-12-04T18-17-02_35861566_10/logs/subpool_ids.jsonl filter=lfs diff=lfs merge=lfs -text
|
192 |
+
outputs/other_models/albert-base-v2/amazon-agri/albert-base-v2_seals_entropy_2023-12-04T18-17-02_35861566_11/logs/subpool_ids.jsonl filter=lfs diff=lfs merge=lfs -text
|
193 |
+
outputs/other_models/albert-base-v2/amazon-agri/albert-base-v2_seals_entropy_2023-12-04T18-17-02_35861566_12/logs/subpool_ids.jsonl filter=lfs diff=lfs merge=lfs -text
|
194 |
+
outputs/other_models/albert-base-v2/amazon-agri/albert-base-v2_seals_entropy_2023-12-04T18-17-02_35861566_13/logs/subpool_ids.jsonl filter=lfs diff=lfs merge=lfs -text
|
195 |
+
outputs/other_models/albert-base-v2/amazon-agri/albert-base-v2_seals_entropy_2023-12-04T18-17-02_35861566_14/logs/subpool_ids.jsonl filter=lfs diff=lfs merge=lfs -text
|
196 |
+
outputs/other_models/albert-base-v2/amazon-agri/albert-base-v2_seals_entropy_2023-12-04T18-17-02_35861566_15/logs/subpool_ids.jsonl filter=lfs diff=lfs merge=lfs -text
|
197 |
+
outputs/other_models/albert-base-v2/amazon-agri/albert-base-v2_seals_entropy_2023-12-04T18-17-02_35861566_8/logs/subpool_ids.jsonl filter=lfs diff=lfs merge=lfs -text
|
198 |
+
outputs/other_models/albert-base-v2/amazon-agri/albert-base-v2_seals_entropy_2023-12-04T18-17-02_35861566_9/logs/subpool_ids.jsonl filter=lfs diff=lfs merge=lfs -text
|
199 |
+
outputs/other_models/bert-tiny/agnews-business-.01/bert-tiny_randomsubset_entropy_2023-12-08T08-48-39_36902592_32/logs/subpool_ids.jsonl filter=lfs diff=lfs merge=lfs -text
|
200 |
+
outputs/other_models/bert-tiny/agnews-business-.01/bert-tiny_randomsubset_entropy_2023-12-08T08-48-39_36902592_34/logs/subpool_ids.jsonl filter=lfs diff=lfs merge=lfs -text
|
201 |
+
outputs/other_models/bert-tiny/agnews-business-.01/bert-tiny_randomsubset_entropy_2023-12-08T08-48-39_36902592_36/logs/subpool_ids.jsonl filter=lfs diff=lfs merge=lfs -text
|
202 |
+
outputs/other_models/bert-tiny/agnews-business-.01/bert-tiny_randomsubset_entropy_2023-12-08T08-48-39_36902592_38/logs/subpool_ids.jsonl filter=lfs diff=lfs merge=lfs -text
|
203 |
+
outputs/other_models/bert-tiny/agnews-business-.01/bert-tiny_randomsubset_entropy_2023-12-08T08-48-39_36902592_40/logs/subpool_ids.jsonl filter=lfs diff=lfs merge=lfs -text
|
204 |
+
outputs/other_models/bert-tiny/agnews-business-.01/bert-tiny_randomsubset_entropy_2023-12-08T08-48-39_36902592_42/logs/subpool_ids.jsonl filter=lfs diff=lfs merge=lfs -text
|
205 |
+
outputs/other_models/bert-tiny/agnews-business-.01/bert-tiny_randomsubset_entropy_2023-12-08T08-48-39_36902592_44/logs/subpool_ids.jsonl filter=lfs diff=lfs merge=lfs -text
|
206 |
+
outputs/other_models/bert-tiny/agnews-business-.01/bert-tiny_randomsubset_entropy_2023-12-08T08-48-39_36902592_46/logs/subpool_ids.jsonl filter=lfs diff=lfs merge=lfs -text
|
207 |
+
outputs/other_models/bert-tiny/agnews-business-.01/bert-tiny_seals_entropy_2023-12-08T08-48-39_36902592_16/logs/subpool_ids.jsonl filter=lfs diff=lfs merge=lfs -text
|
208 |
+
outputs/other_models/bert-tiny/agnews-business-.01/bert-tiny_seals_entropy_2023-12-08T08-48-39_36902592_18/logs/subpool_ids.jsonl filter=lfs diff=lfs merge=lfs -text
|
209 |
+
outputs/other_models/bert-tiny/agnews-business-.01/bert-tiny_seals_entropy_2023-12-08T08-48-39_36902592_20/logs/subpool_ids.jsonl filter=lfs diff=lfs merge=lfs -text
|
210 |
+
outputs/other_models/bert-tiny/agnews-business-.01/bert-tiny_seals_entropy_2023-12-08T08-48-39_36902592_22/logs/subpool_ids.jsonl filter=lfs diff=lfs merge=lfs -text
|
211 |
+
outputs/other_models/bert-tiny/agnews-business-.01/bert-tiny_seals_entropy_2023-12-08T08-48-39_36902592_24/logs/subpool_ids.jsonl filter=lfs diff=lfs merge=lfs -text
|
212 |
+
outputs/other_models/bert-tiny/agnews-business-.01/bert-tiny_seals_entropy_2023-12-08T08-48-39_36902592_26/logs/subpool_ids.jsonl filter=lfs diff=lfs merge=lfs -text
|
213 |
+
outputs/other_models/bert-tiny/agnews-business-.01/bert-tiny_seals_entropy_2023-12-08T08-48-39_36902592_28/logs/subpool_ids.jsonl filter=lfs diff=lfs merge=lfs -text
|
214 |
+
outputs/other_models/bert-tiny/agnews-business-.01/bert-tiny_seals_entropy_2023-12-08T08-48-39_36902592_30/logs/subpool_ids.jsonl filter=lfs diff=lfs merge=lfs -text
|
215 |
+
outputs/other_models/bert-tiny/amazon-agri/bert-tiny_randomsubset_entropy_2023-10-04T10-38-41_28870852_16/logs/subpool_ids.jsonl filter=lfs diff=lfs merge=lfs -text
|
216 |
+
outputs/other_models/bert-tiny/amazon-agri/bert-tiny_randomsubset_entropy_2023-10-04T10-38-41_28870852_17/logs/subpool_ids.jsonl filter=lfs diff=lfs merge=lfs -text
|
217 |
+
outputs/other_models/bert-tiny/amazon-agri/bert-tiny_randomsubset_entropy_2023-10-04T10-38-41_28870852_18/logs/subpool_ids.jsonl filter=lfs diff=lfs merge=lfs -text
|
218 |
+
outputs/other_models/bert-tiny/amazon-agri/bert-tiny_randomsubset_entropy_2023-10-04T10-38-41_28870852_19/logs/subpool_ids.jsonl filter=lfs diff=lfs merge=lfs -text
|
219 |
+
outputs/other_models/bert-tiny/amazon-agri/bert-tiny_randomsubset_entropy_2023-10-04T10-38-41_28870852_20/logs/subpool_ids.jsonl filter=lfs diff=lfs merge=lfs -text
|
220 |
+
outputs/other_models/bert-tiny/amazon-agri/bert-tiny_randomsubset_entropy_2023-10-04T10-38-41_28870852_22/logs/subpool_ids.jsonl filter=lfs diff=lfs merge=lfs -text
|
221 |
+
outputs/other_models/bert-tiny/amazon-agri/bert-tiny_randomsubset_entropy_2023-10-04T10-38-41_28870852_23/logs/subpool_ids.jsonl filter=lfs diff=lfs merge=lfs -text
|
222 |
+
outputs/other_models/bert-tiny/amazon-agri/bert-tiny_seals_entropy_2023-10-04T10-38-41_28870852_10/logs/subpool_ids.jsonl filter=lfs diff=lfs merge=lfs -text
|
223 |
+
outputs/other_models/bert-tiny/amazon-agri/bert-tiny_seals_entropy_2023-10-04T10-38-41_28870852_11/logs/subpool_ids.jsonl filter=lfs diff=lfs merge=lfs -text
|
224 |
+
outputs/other_models/bert-tiny/amazon-agri/bert-tiny_seals_entropy_2023-10-04T10-38-41_28870852_12/logs/subpool_ids.jsonl filter=lfs diff=lfs merge=lfs -text
|
225 |
+
outputs/other_models/bert-tiny/amazon-agri/bert-tiny_seals_entropy_2023-10-04T10-38-41_28870852_13/logs/subpool_ids.jsonl filter=lfs diff=lfs merge=lfs -text
|
226 |
+
outputs/other_models/bert-tiny/amazon-agri/bert-tiny_seals_entropy_2023-10-04T10-38-41_28870852_14/logs/subpool_ids.jsonl filter=lfs diff=lfs merge=lfs -text
|
227 |
+
outputs/other_models/bert-tiny/amazon-agri/bert-tiny_seals_entropy_2023-10-04T10-38-41_28870852_15/logs/subpool_ids.jsonl filter=lfs diff=lfs merge=lfs -text
|
228 |
+
outputs/other_models/bert-tiny/amazon-agri/bert-tiny_seals_entropy_2023-10-04T10-38-41_28870852_8/logs/subpool_ids.jsonl filter=lfs diff=lfs merge=lfs -text
|
229 |
+
outputs/other_models/bert-tiny/amazon-agri/bert-tiny_seals_entropy_2023-10-04T10-38-41_28870852_9/logs/subpool_ids.jsonl filter=lfs diff=lfs merge=lfs -text
|
230 |
+
outputs/other_models/deberta-v3-base/agnews-business-.01/deberta-v3-base_seals_entropy_2023-12-06T10-52-24_36657787_10/logs/subpool_ids.jsonl filter=lfs diff=lfs merge=lfs -text
|
231 |
+
outputs/other_models/deberta-v3-base/agnews-business-.01/deberta-v3-base_seals_entropy_2023-12-06T10-52-24_36657787_11/logs/subpool_ids.jsonl filter=lfs diff=lfs merge=lfs -text
|
232 |
+
outputs/other_models/deberta-v3-base/agnews-business-.01/deberta-v3-base_seals_entropy_2023-12-06T10-52-24_36657787_12/logs/subpool_ids.jsonl filter=lfs diff=lfs merge=lfs -text
|
233 |
+
outputs/other_models/deberta-v3-base/agnews-business-.01/deberta-v3-base_seals_entropy_2023-12-06T10-52-24_36657787_13/logs/subpool_ids.jsonl filter=lfs diff=lfs merge=lfs -text
|
234 |
+
outputs/other_models/deberta-v3-base/agnews-business-.01/deberta-v3-base_seals_entropy_2023-12-06T10-52-24_36657787_14/logs/subpool_ids.jsonl filter=lfs diff=lfs merge=lfs -text
|
235 |
+
outputs/other_models/deberta-v3-base/agnews-business-.01/deberta-v3-base_seals_entropy_2023-12-06T10-52-24_36657787_15/logs/subpool_ids.jsonl filter=lfs diff=lfs merge=lfs -text
|
236 |
+
outputs/other_models/deberta-v3-base/agnews-business-.01/deberta-v3-base_seals_entropy_2023-12-06T10-52-24_36657787_9/logs/subpool_ids.jsonl filter=lfs diff=lfs merge=lfs -text
|
237 |
+
outputs/other_models/gpt2/agnews-business-.01/gpt2_randomsubset_entropy_2023-12-08T18-52-14_36955715_40/logs/subpool_ids.jsonl filter=lfs diff=lfs merge=lfs -text
|
238 |
+
outputs/other_models/gpt2/agnews-business-.01/gpt2_randomsubset_entropy_2023-12-08T18-52-14_36955715_41/logs/subpool_ids.jsonl filter=lfs diff=lfs merge=lfs -text
|
239 |
+
outputs/other_models/gpt2/agnews-business-.01/gpt2_randomsubset_entropy_2023-12-08T18-52-14_36955715_42/logs/subpool_ids.jsonl filter=lfs diff=lfs merge=lfs -text
|
240 |
+
outputs/other_models/gpt2/agnews-business-.01/gpt2_randomsubset_entropy_2023-12-08T18-52-14_36955715_43/logs/subpool_ids.jsonl filter=lfs diff=lfs merge=lfs -text
|
241 |
+
outputs/other_models/gpt2/agnews-business-.01/gpt2_randomsubset_entropy_2023-12-08T18-52-14_36955715_44/logs/subpool_ids.jsonl filter=lfs diff=lfs merge=lfs -text
|
242 |
+
outputs/other_models/gpt2/agnews-business-.01/gpt2_randomsubset_entropy_2023-12-08T18-52-14_36955715_45/logs/subpool_ids.jsonl filter=lfs diff=lfs merge=lfs -text
|
243 |
+
outputs/other_models/gpt2/agnews-business-.01/gpt2_randomsubset_entropy_2023-12-08T18-52-14_36955715_46/logs/subpool_ids.jsonl filter=lfs diff=lfs merge=lfs -text
|
244 |
+
outputs/other_models/gpt2/agnews-business-.01/gpt2_randomsubset_entropy_2023-12-08T18-52-14_36955715_47/logs/subpool_ids.jsonl filter=lfs diff=lfs merge=lfs -text
|
245 |
+
outputs/other_models/gpt2/agnews-business-.01/gpt2_seals_entropy_2023-12-08T18-52-14_36955715_32/logs/subpool_ids.jsonl filter=lfs diff=lfs merge=lfs -text
|
246 |
+
outputs/other_models/gpt2/agnews-business-.01/gpt2_seals_entropy_2023-12-08T18-52-14_36955715_33/logs/subpool_ids.jsonl filter=lfs diff=lfs merge=lfs -text
|
247 |
+
outputs/other_models/gpt2/agnews-business-.01/gpt2_seals_entropy_2023-12-08T18-52-14_36955715_34/logs/subpool_ids.jsonl filter=lfs diff=lfs merge=lfs -text
|
248 |
+
outputs/other_models/gpt2/agnews-business-.01/gpt2_seals_entropy_2023-12-08T18-52-14_36955715_35/logs/subpool_ids.jsonl filter=lfs diff=lfs merge=lfs -text
|
249 |
+
outputs/other_models/gpt2/agnews-business-.01/gpt2_seals_entropy_2023-12-08T18-52-14_36955715_36/logs/subpool_ids.jsonl filter=lfs diff=lfs merge=lfs -text
|
250 |
+
outputs/other_models/gpt2/agnews-business-.01/gpt2_seals_entropy_2023-12-08T18-52-14_36955715_37/logs/subpool_ids.jsonl filter=lfs diff=lfs merge=lfs -text
|
251 |
+
outputs/other_models/gpt2/agnews-business-.01/gpt2_seals_entropy_2023-12-08T18-52-14_36955715_38/logs/subpool_ids.jsonl filter=lfs diff=lfs merge=lfs -text
|
252 |
+
outputs/other_models/gpt2/agnews-business-.01/gpt2_seals_entropy_2023-12-08T18-52-14_36955715_39/logs/subpool_ids.jsonl filter=lfs diff=lfs merge=lfs -text
|
253 |
+
outputs/other_models/gpt2/amazon-agri/gpt2_seals_entropy_2023-12-08T18-52-14_36955715_10/logs/subpool_ids.jsonl filter=lfs diff=lfs merge=lfs -text
|
254 |
+
outputs/other_models/gpt2/amazon-agri/gpt2_seals_entropy_2023-12-08T18-52-14_36955715_11/logs/subpool_ids.jsonl filter=lfs diff=lfs merge=lfs -text
|
255 |
+
outputs/other_models/gpt2/amazon-agri/gpt2_seals_entropy_2023-12-08T18-52-14_36955715_12/logs/subpool_ids.jsonl filter=lfs diff=lfs merge=lfs -text
|
256 |
+
outputs/other_models/gpt2/amazon-agri/gpt2_seals_entropy_2023-12-08T18-52-14_36955715_13/logs/subpool_ids.jsonl filter=lfs diff=lfs merge=lfs -text
|
257 |
+
outputs/other_models/gpt2/amazon-agri/gpt2_seals_entropy_2023-12-08T18-52-14_36955715_14/logs/subpool_ids.jsonl filter=lfs diff=lfs merge=lfs -text
|
258 |
+
outputs/other_models/gpt2/amazon-agri/gpt2_seals_entropy_2023-12-08T18-52-14_36955715_15/logs/subpool_ids.jsonl filter=lfs diff=lfs merge=lfs -text
|
259 |
+
outputs/other_models/gpt2/amazon-agri/gpt2_seals_entropy_2023-12-08T18-52-14_36955715_8/logs/subpool_ids.jsonl filter=lfs diff=lfs merge=lfs -text
|
260 |
+
outputs/other_models/gpt2/amazon-agri/gpt2_seals_entropy_2023-12-08T18-52-14_36955715_9/logs/subpool_ids.jsonl filter=lfs diff=lfs merge=lfs -text
|
261 |
+
outputs/other_models/t5-base/agnews-business-.01/t5-base_seals_entropy_2023-12-08T18-54-17_36955942_32/logs/subpool_ids.jsonl filter=lfs diff=lfs merge=lfs -text
|
262 |
+
outputs/other_models/t5-base/agnews-business-.01/t5-base_seals_entropy_2023-12-08T18-54-17_36955942_33/logs/subpool_ids.jsonl filter=lfs diff=lfs merge=lfs -text
|
263 |
+
outputs/other_models/t5-base/agnews-business-.01/t5-base_seals_entropy_2023-12-08T18-54-17_36955942_34/logs/subpool_ids.jsonl filter=lfs diff=lfs merge=lfs -text
|
264 |
+
outputs/other_models/t5-base/agnews-business-.01/t5-base_seals_entropy_2023-12-08T18-54-17_36955942_35/logs/subpool_ids.jsonl filter=lfs diff=lfs merge=lfs -text
|
265 |
+
outputs/other_models/t5-base/agnews-business-.01/t5-base_seals_entropy_2023-12-08T18-54-17_36955942_36/logs/subpool_ids.jsonl filter=lfs diff=lfs merge=lfs -text
|
266 |
+
outputs/other_models/t5-base/agnews-business-.01/t5-base_seals_entropy_2023-12-08T18-54-17_36955942_37/logs/subpool_ids.jsonl filter=lfs diff=lfs merge=lfs -text
|
267 |
+
outputs/other_models/t5-base/agnews-business-.01/t5-base_seals_entropy_2023-12-08T18-54-17_36955942_38/logs/subpool_ids.jsonl filter=lfs diff=lfs merge=lfs -text
|
268 |
+
outputs/other_models/t5-base/agnews-business-.01/t5-base_seals_entropy_2023-12-08T18-54-17_36955942_39/logs/subpool_ids.jsonl filter=lfs diff=lfs merge=lfs -text
|
269 |
+
outputs/other_models/t5-base/amazon-agri/t5-base_seals_entropy_2023-12-08T18-54-17_36955942_10/logs/subpool_ids.jsonl filter=lfs diff=lfs merge=lfs -text
|
270 |
+
outputs/other_models/t5-base/amazon-agri/t5-base_seals_entropy_2023-12-08T18-54-17_36955942_11/logs/subpool_ids.jsonl filter=lfs diff=lfs merge=lfs -text
|
271 |
+
outputs/other_models/t5-base/amazon-agri/t5-base_seals_entropy_2023-12-08T18-54-17_36955942_12/logs/subpool_ids.jsonl filter=lfs diff=lfs merge=lfs -text
|
272 |
+
outputs/other_models/t5-base/amazon-agri/t5-base_seals_entropy_2023-12-08T18-54-17_36955942_13/logs/subpool_ids.jsonl filter=lfs diff=lfs merge=lfs -text
|
273 |
+
outputs/other_models/t5-base/amazon-agri/t5-base_seals_entropy_2023-12-08T18-54-17_36955942_14/logs/subpool_ids.jsonl filter=lfs diff=lfs merge=lfs -text
|
274 |
+
outputs/other_models/t5-base/amazon-agri/t5-base_seals_entropy_2023-12-08T18-54-17_36955942_15/logs/subpool_ids.jsonl filter=lfs diff=lfs merge=lfs -text
|
275 |
+
outputs/other_models/t5-base/amazon-agri/t5-base_seals_entropy_2023-12-08T18-54-17_36955942_8/logs/subpool_ids.jsonl filter=lfs diff=lfs merge=lfs -text
|
276 |
+
outputs/other_models/t5-base/amazon-agri/t5-base_seals_entropy_2023-12-08T18-54-17_36955942_9/logs/subpool_ids.jsonl filter=lfs diff=lfs merge=lfs -text
|
outputs/README.md
ADDED
@@ -0,0 +1,9 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
This repo contains the artifacts produced from the experiments reported in the paper "Computationally Efficient Active Learning for Large Imbalanced Datasets" (Lesci and Vlachos, 2024).
|
2 |
+
|
3 |
+
Each subfolder contains a README.md file describing its contents. In summary:
|
4 |
+
|
5 |
+
- `main/` contains the main results for all datasets considered created using `bert-base-uncased`
|
6 |
+
|
7 |
+
- `other_models/` contains the results for two datasets using five additional models
|
8 |
+
|
9 |
+
- `ablations/` contains the outputs of the ablations for the proposed AnchorAL method
|
outputs/ablations/README.md
ADDED
@@ -0,0 +1,7 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
These are the ablations for the `anchoral` pool filtering method
|
2 |
+
|
3 |
+
- datasets: `amazon_agri`
|
4 |
+
|
5 |
+
- active learning trategies: `entropy`
|
6 |
+
|
7 |
+
- models: `bert-base-uncased`
|
outputs/ablations/additional_randomsubset_1000/bert-base-uncased_randomsubset_entropy_2024-04-02T18-37-18_49537867_0/.early_stopping.jsonl
ADDED
@@ -0,0 +1,2 @@
|
|
|
|
|
|
|
1 |
+
{"best_score":1.0,"best_step":11,"stage":"train","interval":"epoch","stopping_step":21,"reason":"Monitored metric `train/avg_f1_minclass` did not improve in the last 10 epochs."}
|
2 |
+
{"best_score":1.0,"best_step":34,"stage":"train","interval":"epoch","stopping_step":44,"reason":"Monitored metric `train/avg_f1_minclass` did not improve in the last 10 epochs."}
|
outputs/ablations/additional_randomsubset_1000/bert-base-uncased_randomsubset_entropy_2024-04-02T18-37-18_49537867_0/.hydra/config.yaml
ADDED
@@ -0,0 +1,99 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
estimator:
|
2 |
+
accelerator: gpu
|
3 |
+
precision: 32
|
4 |
+
deterministic: true
|
5 |
+
tf32_mode: high
|
6 |
+
callbacks:
|
7 |
+
timer:
|
8 |
+
_target_: energizer.active_learning.callbacks.Timer
|
9 |
+
save_outputs:
|
10 |
+
_target_: src.callbacks.SaveOutputs
|
11 |
+
dirpath: ./logs/
|
12 |
+
instance_level: false
|
13 |
+
batch_level: false
|
14 |
+
epoch_level: false
|
15 |
+
early_stopping:
|
16 |
+
_target_: energizer.callbacks.early_stopping.EarlyStopping
|
17 |
+
monitor: train/avg_f1_minclass
|
18 |
+
stage: train
|
19 |
+
interval: epoch
|
20 |
+
mode: max
|
21 |
+
min_delta: 1.0e-05
|
22 |
+
patience: 10
|
23 |
+
stopping_threshold: null
|
24 |
+
divergence_threshold: null
|
25 |
+
verbose: true
|
26 |
+
model_checkpoint:
|
27 |
+
_target_: energizer.callbacks.model_checkpoint.ModelCheckpoint
|
28 |
+
dirpath: .checkpoints
|
29 |
+
monitor: train/avg_f1_minclass
|
30 |
+
stage: train
|
31 |
+
mode: max
|
32 |
+
save_last: false
|
33 |
+
save_top_k: 1
|
34 |
+
verbose: true
|
35 |
+
loggers:
|
36 |
+
tensorboard:
|
37 |
+
_target_: energizer.loggers.TensorBoardLogger
|
38 |
+
root_dir: ./
|
39 |
+
name: tb_logs
|
40 |
+
version: null
|
41 |
+
data:
|
42 |
+
batch_size: 32
|
43 |
+
eval_batch_size: 256
|
44 |
+
num_workers: 32
|
45 |
+
pin_memory: true
|
46 |
+
drop_last: false
|
47 |
+
persistent_workers: true
|
48 |
+
shuffle: true
|
49 |
+
seed: 654321
|
50 |
+
replacement: false
|
51 |
+
max_length: 512
|
52 |
+
active_data:
|
53 |
+
budget: 100
|
54 |
+
positive_budget: 5
|
55 |
+
seed: 654321
|
56 |
+
fit:
|
57 |
+
min_steps: 100
|
58 |
+
max_epochs: 10
|
59 |
+
learning_rate: 4.0e-05
|
60 |
+
optimizer: adamw
|
61 |
+
log_interval: ${log_interval}
|
62 |
+
enable_progress_bar: ${enable_progress_bar}
|
63 |
+
limit_train_batches: ${limit_batches}
|
64 |
+
limit_validation_batches: ${limit_batches}
|
65 |
+
active_fit:
|
66 |
+
max_budget: 5000
|
67 |
+
query_size: 25
|
68 |
+
reinit_model: true
|
69 |
+
limit_pool_batches: ${limit_batches}
|
70 |
+
limit_test_batches: ${limit_batches}
|
71 |
+
test:
|
72 |
+
log_interval: ${log_interval}
|
73 |
+
enable_progress_bar: ${enable_progress_bar}
|
74 |
+
limit_batches: ${limit_batches}
|
75 |
+
strategy:
|
76 |
+
name: randomsubset_entropy
|
77 |
+
args:
|
78 |
+
seed: 42
|
79 |
+
subpool_size: 1000
|
80 |
+
model:
|
81 |
+
name: bert-base-uncased
|
82 |
+
seed: 654321
|
83 |
+
dataset:
|
84 |
+
name: amazon-agri
|
85 |
+
text_column: text
|
86 |
+
label_column: labels
|
87 |
+
uid_column: uid
|
88 |
+
prepared_path: ${data_path}/prepared/amazoncat-agri
|
89 |
+
processed_path: ${data_path}/processed/amazoncat-13k
|
90 |
+
minority_classes:
|
91 |
+
- 1
|
92 |
+
index_metric: all-mpnet-base-v2_cosine
|
93 |
+
log_interval: 1
|
94 |
+
enable_progress_bar: false
|
95 |
+
limit_batches: null
|
96 |
+
seed: 42
|
97 |
+
experiment_group: additional_randomsubset_1000
|
98 |
+
run_name: ${dataset.name}/${model.name}_${strategy.name}_${now:%Y-%m-%d}T${now:%H-%M-%S}
|
99 |
+
data_path: /rds/user/pl487/hpc-work/anchoral/data
|
outputs/ablations/additional_randomsubset_1000/bert-base-uncased_randomsubset_entropy_2024-04-02T18-37-18_49537867_0/.hydra/hydra.yaml
ADDED
@@ -0,0 +1,210 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
hydra:
|
2 |
+
run:
|
3 |
+
dir: ./outputs/${experiment_group}/${run_name}
|
4 |
+
sweep:
|
5 |
+
dir: ./outputs/multirun/${experiment_group}
|
6 |
+
subdir: ${run_name}_${hydra.job.id}
|
7 |
+
launcher:
|
8 |
+
submitit_folder: ${hydra.sweep.dir}/.submitit/%j
|
9 |
+
timeout_min: 360
|
10 |
+
cpus_per_task: null
|
11 |
+
gpus_per_node: null
|
12 |
+
tasks_per_node: 1
|
13 |
+
mem_gb: null
|
14 |
+
nodes: 1
|
15 |
+
name: ${experiment_group}
|
16 |
+
stderr_to_stdout: false
|
17 |
+
_target_: hydra_plugins.hydra_submitit_launcher.submitit_launcher.SlurmLauncher
|
18 |
+
partition: ampere
|
19 |
+
qos: null
|
20 |
+
comment: null
|
21 |
+
constraint: null
|
22 |
+
exclude: null
|
23 |
+
gres: gpu:1
|
24 |
+
cpus_per_gpu: null
|
25 |
+
gpus_per_task: null
|
26 |
+
mem_per_gpu: null
|
27 |
+
mem_per_cpu: null
|
28 |
+
account: VLACHOS-SL3-GPU
|
29 |
+
signal_delay_s: 120
|
30 |
+
max_num_timeout: 0
|
31 |
+
additional_parameters: {}
|
32 |
+
array_parallelism: 256
|
33 |
+
setup:
|
34 |
+
- . /etc/profile.d/modules.sh
|
35 |
+
- module list
|
36 |
+
- echo -e 'loading other modules'
|
37 |
+
- module load rhel8/default-amp
|
38 |
+
- module load cuda/12.1
|
39 |
+
- module load cudnn/8.9_cuda-12.1
|
40 |
+
- module list
|
41 |
+
sweeper:
|
42 |
+
_target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper
|
43 |
+
max_batch_size: null
|
44 |
+
params: null
|
45 |
+
help:
|
46 |
+
app_name: ${hydra.job.name}
|
47 |
+
header: '${hydra.help.app_name} is powered by Hydra.
|
48 |
+
|
49 |
+
'
|
50 |
+
footer: 'Powered by Hydra (https://hydra.cc)
|
51 |
+
|
52 |
+
Use --hydra-help to view Hydra specific help
|
53 |
+
|
54 |
+
'
|
55 |
+
template: '${hydra.help.header}
|
56 |
+
|
57 |
+
== Configuration groups ==
|
58 |
+
|
59 |
+
Compose your configuration from those groups (group=option)
|
60 |
+
|
61 |
+
|
62 |
+
$APP_CONFIG_GROUPS
|
63 |
+
|
64 |
+
|
65 |
+
== Config ==
|
66 |
+
|
67 |
+
Override anything in the config (foo.bar=value)
|
68 |
+
|
69 |
+
|
70 |
+
$CONFIG
|
71 |
+
|
72 |
+
|
73 |
+
${hydra.help.footer}
|
74 |
+
|
75 |
+
'
|
76 |
+
hydra_help:
|
77 |
+
template: 'Hydra (${hydra.runtime.version})
|
78 |
+
|
79 |
+
See https://hydra.cc for more info.
|
80 |
+
|
81 |
+
|
82 |
+
== Flags ==
|
83 |
+
|
84 |
+
$FLAGS_HELP
|
85 |
+
|
86 |
+
|
87 |
+
== Configuration groups ==
|
88 |
+
|
89 |
+
Compose your configuration from those groups (For example, append hydra/job_logging=disabled
|
90 |
+
to command line)
|
91 |
+
|
92 |
+
|
93 |
+
$HYDRA_CONFIG_GROUPS
|
94 |
+
|
95 |
+
|
96 |
+
Use ''--cfg hydra'' to Show the Hydra config.
|
97 |
+
|
98 |
+
'
|
99 |
+
hydra_help: ???
|
100 |
+
hydra_logging:
|
101 |
+
version: 1
|
102 |
+
formatters:
|
103 |
+
colorlog:
|
104 |
+
(): colorlog.ColoredFormatter
|
105 |
+
format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s'
|
106 |
+
handlers:
|
107 |
+
console:
|
108 |
+
class: logging.StreamHandler
|
109 |
+
formatter: colorlog
|
110 |
+
stream: ext://sys.stdout
|
111 |
+
root:
|
112 |
+
level: INFO
|
113 |
+
handlers:
|
114 |
+
- console
|
115 |
+
disable_existing_loggers: false
|
116 |
+
job_logging:
|
117 |
+
version: 1
|
118 |
+
formatters:
|
119 |
+
simple:
|
120 |
+
format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s'
|
121 |
+
colorlog:
|
122 |
+
(): colorlog.ColoredFormatter
|
123 |
+
format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s]
|
124 |
+
- %(message)s'
|
125 |
+
log_colors:
|
126 |
+
DEBUG: purple
|
127 |
+
INFO: green
|
128 |
+
WARNING: yellow
|
129 |
+
ERROR: red
|
130 |
+
CRITICAL: bold_red
|
131 |
+
handlers:
|
132 |
+
console:
|
133 |
+
class: logging.StreamHandler
|
134 |
+
formatter: colorlog
|
135 |
+
stream: ext://sys.stdout
|
136 |
+
file:
|
137 |
+
class: logging.FileHandler
|
138 |
+
formatter: simple
|
139 |
+
filename: ${hydra.job.name}.log
|
140 |
+
root:
|
141 |
+
level: INFO
|
142 |
+
handlers:
|
143 |
+
- console
|
144 |
+
- file
|
145 |
+
disable_existing_loggers: false
|
146 |
+
env: {}
|
147 |
+
mode: MULTIRUN
|
148 |
+
searchpath: []
|
149 |
+
callbacks: {}
|
150 |
+
output_subdir: .hydra
|
151 |
+
overrides:
|
152 |
+
hydra:
|
153 |
+
- hydra.launcher.timeout_min=360
|
154 |
+
- hydra.mode=MULTIRUN
|
155 |
+
task:
|
156 |
+
- experiment_group=additional_randomsubset_1000
|
157 |
+
- dataset=amazon_agri
|
158 |
+
- strategy=randomsubset_entropy
|
159 |
+
- data.seed=654321
|
160 |
+
- model.seed=654321
|
161 |
+
- active_data.seed=654321
|
162 |
+
- model.name=bert-base-uncased
|
163 |
+
- +launcher=slurm
|
164 |
+
- strategy.args.subpool_size=1000
|
165 |
+
job:
|
166 |
+
name: active_train
|
167 |
+
chdir: true
|
168 |
+
override_dirname: +launcher=slurm,active_data.seed=654321,data.seed=654321,dataset=amazon_agri,experiment_group=additional_randomsubset_1000,model.name=bert-base-uncased,model.seed=654321,strategy.args.subpool_size=1000,strategy=randomsubset_entropy
|
169 |
+
id: '49537867_0'
|
170 |
+
num: 0
|
171 |
+
config_name: conf
|
172 |
+
env_set: {}
|
173 |
+
env_copy: []
|
174 |
+
config:
|
175 |
+
override_dirname:
|
176 |
+
kv_sep: '='
|
177 |
+
item_sep: ','
|
178 |
+
exclude_keys: []
|
179 |
+
runtime:
|
180 |
+
version: 1.3.2
|
181 |
+
version_base: '1.3'
|
182 |
+
cwd: /rds/user/pl487/hpc-work/anchoral
|
183 |
+
config_sources:
|
184 |
+
- path: hydra.conf
|
185 |
+
schema: pkg
|
186 |
+
provider: hydra
|
187 |
+
- path: /rds/user/pl487/hpc-work/anchoral/conf
|
188 |
+
schema: file
|
189 |
+
provider: main
|
190 |
+
- path: hydra_plugins.hydra_colorlog.conf
|
191 |
+
schema: pkg
|
192 |
+
provider: hydra-colorlog
|
193 |
+
- path: ''
|
194 |
+
schema: structured
|
195 |
+
provider: schema
|
196 |
+
output_dir: /rds/user/pl487/hpc-work/anchoral/outputs/multirun/additional_randomsubset_1000/amazon-agri/bert-base-uncased_randomsubset_entropy_2024-04-02T18-37-18_49537867_0
|
197 |
+
choices:
|
198 |
+
launcher: slurm
|
199 |
+
dataset: amazon_agri
|
200 |
+
strategy: randomsubset_entropy
|
201 |
+
hydra/env: default
|
202 |
+
hydra/callbacks: null
|
203 |
+
hydra/job_logging: colorlog
|
204 |
+
hydra/hydra_logging: colorlog
|
205 |
+
hydra/hydra_help: default
|
206 |
+
hydra/help: default
|
207 |
+
hydra/sweeper: basic
|
208 |
+
hydra/launcher: submitit_slurm
|
209 |
+
hydra/output: default
|
210 |
+
verbose: false
|
outputs/ablations/additional_randomsubset_1000/bert-base-uncased_randomsubset_entropy_2024-04-02T18-37-18_49537867_0/.hydra/overrides.yaml
ADDED
@@ -0,0 +1,9 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
- experiment_group=additional_randomsubset_1000
|
2 |
+
- dataset=amazon_agri
|
3 |
+
- strategy=randomsubset_entropy
|
4 |
+
- data.seed=654321
|
5 |
+
- model.seed=654321
|
6 |
+
- active_data.seed=654321
|
7 |
+
- model.name=bert-base-uncased
|
8 |
+
- +launcher=slurm
|
9 |
+
- strategy.args.subpool_size=1000
|
outputs/ablations/additional_randomsubset_1000/bert-base-uncased_randomsubset_entropy_2024-04-02T18-37-18_49537867_0/active_train.log
ADDED
@@ -0,0 +1,132 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
[2024-04-04 01:31:45,918][hydra][INFO] -
|
2 |
+
estimator:
|
3 |
+
accelerator: gpu
|
4 |
+
precision: 32
|
5 |
+
deterministic: true
|
6 |
+
tf32_mode: high
|
7 |
+
callbacks:
|
8 |
+
timer:
|
9 |
+
_target_: energizer.active_learning.callbacks.Timer
|
10 |
+
save_outputs:
|
11 |
+
_target_: src.callbacks.SaveOutputs
|
12 |
+
dirpath: ./logs/
|
13 |
+
instance_level: false
|
14 |
+
batch_level: false
|
15 |
+
epoch_level: false
|
16 |
+
early_stopping:
|
17 |
+
_target_: energizer.callbacks.early_stopping.EarlyStopping
|
18 |
+
monitor: train/avg_f1_minclass
|
19 |
+
stage: train
|
20 |
+
interval: epoch
|
21 |
+
mode: max
|
22 |
+
min_delta: 1.0e-05
|
23 |
+
patience: 10
|
24 |
+
stopping_threshold: null
|
25 |
+
divergence_threshold: null
|
26 |
+
verbose: true
|
27 |
+
model_checkpoint:
|
28 |
+
_target_: energizer.callbacks.model_checkpoint.ModelCheckpoint
|
29 |
+
dirpath: .checkpoints
|
30 |
+
monitor: train/avg_f1_minclass
|
31 |
+
stage: train
|
32 |
+
mode: max
|
33 |
+
save_last: false
|
34 |
+
save_top_k: 1
|
35 |
+
verbose: true
|
36 |
+
loggers:
|
37 |
+
tensorboard:
|
38 |
+
_target_: energizer.loggers.TensorBoardLogger
|
39 |
+
root_dir: ./
|
40 |
+
name: tb_logs
|
41 |
+
version: null
|
42 |
+
data:
|
43 |
+
batch_size: 32
|
44 |
+
eval_batch_size: 256
|
45 |
+
num_workers: 32
|
46 |
+
pin_memory: true
|
47 |
+
drop_last: false
|
48 |
+
persistent_workers: true
|
49 |
+
shuffle: true
|
50 |
+
seed: 654321
|
51 |
+
replacement: false
|
52 |
+
max_length: 512
|
53 |
+
active_data:
|
54 |
+
budget: 100
|
55 |
+
positive_budget: 5
|
56 |
+
seed: 654321
|
57 |
+
fit:
|
58 |
+
min_steps: 100
|
59 |
+
max_epochs: 10
|
60 |
+
learning_rate: 4.0e-05
|
61 |
+
optimizer: adamw
|
62 |
+
log_interval: 1
|
63 |
+
enable_progress_bar: false
|
64 |
+
limit_train_batches: null
|
65 |
+
limit_validation_batches: null
|
66 |
+
active_fit:
|
67 |
+
max_budget: 5000
|
68 |
+
query_size: 25
|
69 |
+
reinit_model: true
|
70 |
+
limit_pool_batches: null
|
71 |
+
limit_test_batches: null
|
72 |
+
test:
|
73 |
+
log_interval: 1
|
74 |
+
enable_progress_bar: false
|
75 |
+
limit_batches: null
|
76 |
+
strategy:
|
77 |
+
name: randomsubset_entropy
|
78 |
+
args:
|
79 |
+
seed: 42
|
80 |
+
subpool_size: 1000
|
81 |
+
model:
|
82 |
+
name: bert-base-uncased
|
83 |
+
seed: 654321
|
84 |
+
dataset:
|
85 |
+
name: amazon-agri
|
86 |
+
text_column: text
|
87 |
+
label_column: labels
|
88 |
+
uid_column: uid
|
89 |
+
prepared_path: /rds/user/pl487/hpc-work/anchoral/data/prepared/amazoncat-agri
|
90 |
+
processed_path: /rds/user/pl487/hpc-work/anchoral/data/processed/amazoncat-13k
|
91 |
+
minority_classes:
|
92 |
+
- 1
|
93 |
+
index_metric: all-mpnet-base-v2_cosine
|
94 |
+
log_interval: 1
|
95 |
+
enable_progress_bar: false
|
96 |
+
limit_batches: null
|
97 |
+
seed: 42
|
98 |
+
experiment_group: additional_randomsubset_1000
|
99 |
+
run_name: amazon-agri/bert-base-uncased_randomsubset_entropy_2024-04-02T18-37-18
|
100 |
+
data_path: /rds/user/pl487/hpc-work/anchoral/data
|
101 |
+
|
102 |
+
======================================================================
|
103 |
+
[2024-04-04 01:31:45,934][hydra][INFO] - Running active learning with strategy {'name': 'randomsubset_entropy', 'args': {'seed': 42, 'subpool_size': 1000}}
|
104 |
+
[2024-04-04 01:31:45,995][hydra][INFO] - Seed enabled: 42
|
105 |
+
[2024-04-04 01:31:54,137][hydra][INFO] - Labelled size: 100 Pool size: 1186139 Test size: 5285
|
106 |
+
Label distribution:
|
107 |
+
| | labels | count | perc |
|
108 |
+
|---:|:---------|--------:|-------:|
|
109 |
+
| 0 | Negative | 95 | 0.95 |
|
110 |
+
| 1 | Positive | 5 | 0.05 |
|
111 |
+
[2024-04-04 01:31:54,238][hydra][INFO] - Batch:
|
112 |
+
{<InputKeys.INPUT_IDS: 'input_ids'>: tensor([[ 101, 3521, 13903, 102]]), <InputKeys.ATT_MASK: 'attention_mask'>: tensor([[1, 1, 1, 1]]), <InputKeys.LABELS: 'labels'>: tensor([0]), <InputKeys.ON_CPU: 'on_cpu'>: {<SpecialKeys.ID: 'uid'>: [1462254]}}
|
113 |
+
[2024-04-04 01:31:59,115][hydra][INFO] - Loggers: {'tensorboard': <energizer.loggers.tensorboard.TensorBoardLogger object at 0x1465666bc310>}
|
114 |
+
[2024-04-04 01:31:59,115][hydra][INFO] - Callbacks: {'timer': <energizer.active_learning.callbacks.Timer object at 0x1462bd3c2370>, 'save_outputs': <src.callbacks.SaveOutputs object at 0x1462bd3c2340>, 'early_stopping': <energizer.callbacks.early_stopping.EarlyStopping object at 0x1462bd3c2e80>, 'model_checkpoint': <energizer.callbacks.model_checkpoint.ModelCheckpoint object at 0x146139ebd7f0>}
|
115 |
+
[2024-04-04 01:31:59,138][hydra][INFO] -
|
116 |
+
| Name | Type | Params
|
117 |
+
-----------------------------------------
|
118 |
+
0 | bert | BertModel | 109 M
|
119 |
+
1 | dropout | Dropout | 0
|
120 |
+
2 | classifier | Linear | 1.5 K
|
121 |
+
-----------------------------------------
|
122 |
+
109 M Trainable params
|
123 |
+
0 Non-trainable params
|
124 |
+
109 M Total params
|
125 |
+
437.935 Total estimated model params size (MB)
|
126 |
+
0.00 GB CUDA Memory used
|
127 |
+
[2024-04-04 07:28:59,149][submitit][INFO] - Job has timed out. Ran 357 minutes out of requested 360 minutes.
|
128 |
+
[2024-04-04 07:28:59,196][submitit][WARNING] - Caught signal SIGUSR2 on gpu-q-19: this job is timed-out.
|
129 |
+
[2024-04-04 07:28:59,216][submitit][INFO] - Calling checkpoint method.
|
130 |
+
[2024-04-04 07:28:59,286][submitit][INFO] - Job not requeued because: timed-out too many times.
|
131 |
+
[2024-04-04 07:28:59,286][submitit][WARNING] - Bypassing signal SIGCONT
|
132 |
+
[2024-04-04 07:28:59,303][submitit][INFO] - Job completed successfully
|
outputs/ablations/additional_randomsubset_1000/bert-base-uncased_randomsubset_entropy_2024-04-02T18-37-18_49537867_0/hparams.yaml
ADDED
@@ -0,0 +1,99 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
estimator:
|
2 |
+
accelerator: gpu
|
3 |
+
precision: 32
|
4 |
+
deterministic: true
|
5 |
+
tf32_mode: high
|
6 |
+
callbacks:
|
7 |
+
timer:
|
8 |
+
_target_: energizer.active_learning.callbacks.Timer
|
9 |
+
save_outputs:
|
10 |
+
_target_: src.callbacks.SaveOutputs
|
11 |
+
dirpath: ./logs/
|
12 |
+
instance_level: false
|
13 |
+
batch_level: false
|
14 |
+
epoch_level: false
|
15 |
+
early_stopping:
|
16 |
+
_target_: energizer.callbacks.early_stopping.EarlyStopping
|
17 |
+
monitor: train/avg_f1_minclass
|
18 |
+
stage: train
|
19 |
+
interval: epoch
|
20 |
+
mode: max
|
21 |
+
min_delta: 1.0e-05
|
22 |
+
patience: 10
|
23 |
+
stopping_threshold: null
|
24 |
+
divergence_threshold: null
|
25 |
+
verbose: true
|
26 |
+
model_checkpoint:
|
27 |
+
_target_: energizer.callbacks.model_checkpoint.ModelCheckpoint
|
28 |
+
dirpath: .checkpoints
|
29 |
+
monitor: train/avg_f1_minclass
|
30 |
+
stage: train
|
31 |
+
mode: max
|
32 |
+
save_last: false
|
33 |
+
save_top_k: 1
|
34 |
+
verbose: true
|
35 |
+
loggers:
|
36 |
+
tensorboard:
|
37 |
+
_target_: energizer.loggers.TensorBoardLogger
|
38 |
+
root_dir: ./
|
39 |
+
name: tb_logs
|
40 |
+
version: null
|
41 |
+
data:
|
42 |
+
batch_size: 32
|
43 |
+
eval_batch_size: 256
|
44 |
+
num_workers: 32
|
45 |
+
pin_memory: true
|
46 |
+
drop_last: false
|
47 |
+
persistent_workers: true
|
48 |
+
shuffle: true
|
49 |
+
seed: 654321
|
50 |
+
replacement: false
|
51 |
+
max_length: 512
|
52 |
+
active_data:
|
53 |
+
budget: 100
|
54 |
+
positive_budget: 5
|
55 |
+
seed: 654321
|
56 |
+
fit:
|
57 |
+
min_steps: 100
|
58 |
+
max_epochs: 10
|
59 |
+
learning_rate: 4.0e-05
|
60 |
+
optimizer: adamw
|
61 |
+
log_interval: 1
|
62 |
+
enable_progress_bar: false
|
63 |
+
limit_train_batches: null
|
64 |
+
limit_validation_batches: null
|
65 |
+
active_fit:
|
66 |
+
max_budget: 5000
|
67 |
+
query_size: 25
|
68 |
+
reinit_model: true
|
69 |
+
limit_pool_batches: null
|
70 |
+
limit_test_batches: null
|
71 |
+
test:
|
72 |
+
log_interval: 1
|
73 |
+
enable_progress_bar: false
|
74 |
+
limit_batches: null
|
75 |
+
strategy:
|
76 |
+
name: randomsubset_entropy
|
77 |
+
args:
|
78 |
+
seed: 42
|
79 |
+
subpool_size: 1000
|
80 |
+
model:
|
81 |
+
name: bert-base-uncased
|
82 |
+
seed: 654321
|
83 |
+
dataset:
|
84 |
+
name: amazon-agri
|
85 |
+
text_column: text
|
86 |
+
label_column: labels
|
87 |
+
uid_column: uid
|
88 |
+
prepared_path: /rds/user/pl487/hpc-work/anchoral/data/prepared/amazoncat-agri
|
89 |
+
processed_path: /rds/user/pl487/hpc-work/anchoral/data/processed/amazoncat-13k
|
90 |
+
minority_classes:
|
91 |
+
- 1
|
92 |
+
index_metric: all-mpnet-base-v2_cosine
|
93 |
+
log_interval: 1
|
94 |
+
enable_progress_bar: false
|
95 |
+
limit_batches: null
|
96 |
+
seed: 42
|
97 |
+
experiment_group: additional_randomsubset_1000
|
98 |
+
run_name: amazon-agri/bert-base-uncased_randomsubset_entropy_2024-04-02T18-37-18
|
99 |
+
data_path: /rds/user/pl487/hpc-work/anchoral/data
|
outputs/ablations/additional_randomsubset_1000/bert-base-uncased_randomsubset_entropy_2024-04-02T18-37-18_49537867_0/logs/labelled_dataset.parquet
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:1722c0291bb5a4876a365982b5e7cdb13d0ccb67775b4b06041803403a46474c
|
3 |
+
size 6285837
|
outputs/ablations/additional_randomsubset_1000/bert-base-uncased_randomsubset_entropy_2024-04-02T18-37-18_49537867_0/logs/subpool_ids.jsonl
ADDED
The diff for this file is too large to render.
See raw diff
|
|
outputs/ablations/additional_randomsubset_1000/bert-base-uncased_randomsubset_entropy_2024-04-02T18-37-18_49537867_0/tb_logs/version_0/events.out.tfevents.1712190719.gpu-q-19.3169922.0
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:60547cc7a054714358acf8b5514df314746814bcf264769cb28344e8f2fd15cf
|
3 |
+
size 7818912
|
outputs/ablations/additional_randomsubset_1000/bert-base-uncased_randomsubset_entropy_2024-04-02T18-37-18_49537867_0/tensorboard_logs.parquet
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:3540628c61a741b2b78235a0fe74ff1b96944f835a6bd83b64cd23f0da4d5d53
|
3 |
+
size 1369944
|
outputs/ablations/additional_randomsubset_1000/bert-base-uncased_randomsubset_entropy_2024-04-02T18-37-18_49537867_1/.early_stopping.jsonl
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{"best_score":0.1875,"best_step":0,"stage":"train","interval":"epoch","stopping_step":10,"reason":"Monitored metric `train/avg_f1_minclass` did not improve in the last 10 epochs."}
|
2 |
+
{"best_score":1.0,"best_step":23,"stage":"train","interval":"epoch","stopping_step":33,"reason":"Monitored metric `train/avg_f1_minclass` did not improve in the last 10 epochs."}
|
3 |
+
{"best_score":0.0,"best_step":86,"stage":"train","interval":"epoch","stopping_step":96,"reason":"Monitored metric `train/avg_f1_minclass` did not improve in the last 10 epochs."}
|
4 |
+
{"best_score":0.08,"best_step":97,"stage":"train","interval":"epoch","stopping_step":107,"reason":"Monitored metric `train/avg_f1_minclass` did not improve in the last 10 epochs."}
|
outputs/ablations/additional_randomsubset_1000/bert-base-uncased_randomsubset_entropy_2024-04-02T18-37-18_49537867_1/.hydra/config.yaml
ADDED
@@ -0,0 +1,99 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
estimator:
|
2 |
+
accelerator: gpu
|
3 |
+
precision: 32
|
4 |
+
deterministic: true
|
5 |
+
tf32_mode: high
|
6 |
+
callbacks:
|
7 |
+
timer:
|
8 |
+
_target_: energizer.active_learning.callbacks.Timer
|
9 |
+
save_outputs:
|
10 |
+
_target_: src.callbacks.SaveOutputs
|
11 |
+
dirpath: ./logs/
|
12 |
+
instance_level: false
|
13 |
+
batch_level: false
|
14 |
+
epoch_level: false
|
15 |
+
early_stopping:
|
16 |
+
_target_: energizer.callbacks.early_stopping.EarlyStopping
|
17 |
+
monitor: train/avg_f1_minclass
|
18 |
+
stage: train
|
19 |
+
interval: epoch
|
20 |
+
mode: max
|
21 |
+
min_delta: 1.0e-05
|
22 |
+
patience: 10
|
23 |
+
stopping_threshold: null
|
24 |
+
divergence_threshold: null
|
25 |
+
verbose: true
|
26 |
+
model_checkpoint:
|
27 |
+
_target_: energizer.callbacks.model_checkpoint.ModelCheckpoint
|
28 |
+
dirpath: .checkpoints
|
29 |
+
monitor: train/avg_f1_minclass
|
30 |
+
stage: train
|
31 |
+
mode: max
|
32 |
+
save_last: false
|
33 |
+
save_top_k: 1
|
34 |
+
verbose: true
|
35 |
+
loggers:
|
36 |
+
tensorboard:
|
37 |
+
_target_: energizer.loggers.TensorBoardLogger
|
38 |
+
root_dir: ./
|
39 |
+
name: tb_logs
|
40 |
+
version: null
|
41 |
+
data:
|
42 |
+
batch_size: 32
|
43 |
+
eval_batch_size: 256
|
44 |
+
num_workers: 32
|
45 |
+
pin_memory: true
|
46 |
+
drop_last: false
|
47 |
+
persistent_workers: true
|
48 |
+
shuffle: true
|
49 |
+
seed: 654321
|
50 |
+
replacement: false
|
51 |
+
max_length: 512
|
52 |
+
active_data:
|
53 |
+
budget: 100
|
54 |
+
positive_budget: 5
|
55 |
+
seed: 123456
|
56 |
+
fit:
|
57 |
+
min_steps: 100
|
58 |
+
max_epochs: 10
|
59 |
+
learning_rate: 4.0e-05
|
60 |
+
optimizer: adamw
|
61 |
+
log_interval: ${log_interval}
|
62 |
+
enable_progress_bar: ${enable_progress_bar}
|
63 |
+
limit_train_batches: ${limit_batches}
|
64 |
+
limit_validation_batches: ${limit_batches}
|
65 |
+
active_fit:
|
66 |
+
max_budget: 5000
|
67 |
+
query_size: 25
|
68 |
+
reinit_model: true
|
69 |
+
limit_pool_batches: ${limit_batches}
|
70 |
+
limit_test_batches: ${limit_batches}
|
71 |
+
test:
|
72 |
+
log_interval: ${log_interval}
|
73 |
+
enable_progress_bar: ${enable_progress_bar}
|
74 |
+
limit_batches: ${limit_batches}
|
75 |
+
strategy:
|
76 |
+
name: randomsubset_entropy
|
77 |
+
args:
|
78 |
+
seed: 42
|
79 |
+
subpool_size: 1000
|
80 |
+
model:
|
81 |
+
name: bert-base-uncased
|
82 |
+
seed: 654321
|
83 |
+
dataset:
|
84 |
+
name: amazon-agri
|
85 |
+
text_column: text
|
86 |
+
label_column: labels
|
87 |
+
uid_column: uid
|
88 |
+
prepared_path: ${data_path}/prepared/amazoncat-agri
|
89 |
+
processed_path: ${data_path}/processed/amazoncat-13k
|
90 |
+
minority_classes:
|
91 |
+
- 1
|
92 |
+
index_metric: all-mpnet-base-v2_cosine
|
93 |
+
log_interval: 1
|
94 |
+
enable_progress_bar: false
|
95 |
+
limit_batches: null
|
96 |
+
seed: 42
|
97 |
+
experiment_group: additional_randomsubset_1000
|
98 |
+
run_name: ${dataset.name}/${model.name}_${strategy.name}_${now:%Y-%m-%d}T${now:%H-%M-%S}
|
99 |
+
data_path: /rds/user/pl487/hpc-work/anchoral/data
|
outputs/ablations/additional_randomsubset_1000/bert-base-uncased_randomsubset_entropy_2024-04-02T18-37-18_49537867_1/.hydra/hydra.yaml
ADDED
@@ -0,0 +1,210 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
hydra:
|
2 |
+
run:
|
3 |
+
dir: ./outputs/${experiment_group}/${run_name}
|
4 |
+
sweep:
|
5 |
+
dir: ./outputs/multirun/${experiment_group}
|
6 |
+
subdir: ${run_name}_${hydra.job.id}
|
7 |
+
launcher:
|
8 |
+
submitit_folder: ${hydra.sweep.dir}/.submitit/%j
|
9 |
+
timeout_min: 360
|
10 |
+
cpus_per_task: null
|
11 |
+
gpus_per_node: null
|
12 |
+
tasks_per_node: 1
|
13 |
+
mem_gb: null
|
14 |
+
nodes: 1
|
15 |
+
name: ${experiment_group}
|
16 |
+
stderr_to_stdout: false
|
17 |
+
_target_: hydra_plugins.hydra_submitit_launcher.submitit_launcher.SlurmLauncher
|
18 |
+
partition: ampere
|
19 |
+
qos: null
|
20 |
+
comment: null
|
21 |
+
constraint: null
|
22 |
+
exclude: null
|
23 |
+
gres: gpu:1
|
24 |
+
cpus_per_gpu: null
|
25 |
+
gpus_per_task: null
|
26 |
+
mem_per_gpu: null
|
27 |
+
mem_per_cpu: null
|
28 |
+
account: VLACHOS-SL3-GPU
|
29 |
+
signal_delay_s: 120
|
30 |
+
max_num_timeout: 0
|
31 |
+
additional_parameters: {}
|
32 |
+
array_parallelism: 256
|
33 |
+
setup:
|
34 |
+
- . /etc/profile.d/modules.sh
|
35 |
+
- module list
|
36 |
+
- echo -e 'loading other modules'
|
37 |
+
- module load rhel8/default-amp
|
38 |
+
- module load cuda/12.1
|
39 |
+
- module load cudnn/8.9_cuda-12.1
|
40 |
+
- module list
|
41 |
+
sweeper:
|
42 |
+
_target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper
|
43 |
+
max_batch_size: null
|
44 |
+
params: null
|
45 |
+
help:
|
46 |
+
app_name: ${hydra.job.name}
|
47 |
+
header: '${hydra.help.app_name} is powered by Hydra.
|
48 |
+
|
49 |
+
'
|
50 |
+
footer: 'Powered by Hydra (https://hydra.cc)
|
51 |
+
|
52 |
+
Use --hydra-help to view Hydra specific help
|
53 |
+
|
54 |
+
'
|
55 |
+
template: '${hydra.help.header}
|
56 |
+
|
57 |
+
== Configuration groups ==
|
58 |
+
|
59 |
+
Compose your configuration from those groups (group=option)
|
60 |
+
|
61 |
+
|
62 |
+
$APP_CONFIG_GROUPS
|
63 |
+
|
64 |
+
|
65 |
+
== Config ==
|
66 |
+
|
67 |
+
Override anything in the config (foo.bar=value)
|
68 |
+
|
69 |
+
|
70 |
+
$CONFIG
|
71 |
+
|
72 |
+
|
73 |
+
${hydra.help.footer}
|
74 |
+
|
75 |
+
'
|
76 |
+
hydra_help:
|
77 |
+
template: 'Hydra (${hydra.runtime.version})
|
78 |
+
|
79 |
+
See https://hydra.cc for more info.
|
80 |
+
|
81 |
+
|
82 |
+
== Flags ==
|
83 |
+
|
84 |
+
$FLAGS_HELP
|
85 |
+
|
86 |
+
|
87 |
+
== Configuration groups ==
|
88 |
+
|
89 |
+
Compose your configuration from those groups (For example, append hydra/job_logging=disabled
|
90 |
+
to command line)
|
91 |
+
|
92 |
+
|
93 |
+
$HYDRA_CONFIG_GROUPS
|
94 |
+
|
95 |
+
|
96 |
+
Use ''--cfg hydra'' to Show the Hydra config.
|
97 |
+
|
98 |
+
'
|
99 |
+
hydra_help: ???
|
100 |
+
hydra_logging:
|
101 |
+
version: 1
|
102 |
+
formatters:
|
103 |
+
colorlog:
|
104 |
+
(): colorlog.ColoredFormatter
|
105 |
+
format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s'
|
106 |
+
handlers:
|
107 |
+
console:
|
108 |
+
class: logging.StreamHandler
|
109 |
+
formatter: colorlog
|
110 |
+
stream: ext://sys.stdout
|
111 |
+
root:
|
112 |
+
level: INFO
|
113 |
+
handlers:
|
114 |
+
- console
|
115 |
+
disable_existing_loggers: false
|
116 |
+
job_logging:
|
117 |
+
version: 1
|
118 |
+
formatters:
|
119 |
+
simple:
|
120 |
+
format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s'
|
121 |
+
colorlog:
|
122 |
+
(): colorlog.ColoredFormatter
|
123 |
+
format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s]
|
124 |
+
- %(message)s'
|
125 |
+
log_colors:
|
126 |
+
DEBUG: purple
|
127 |
+
INFO: green
|
128 |
+
WARNING: yellow
|
129 |
+
ERROR: red
|
130 |
+
CRITICAL: bold_red
|
131 |
+
handlers:
|
132 |
+
console:
|
133 |
+
class: logging.StreamHandler
|
134 |
+
formatter: colorlog
|
135 |
+
stream: ext://sys.stdout
|
136 |
+
file:
|
137 |
+
class: logging.FileHandler
|
138 |
+
formatter: simple
|
139 |
+
filename: ${hydra.job.name}.log
|
140 |
+
root:
|
141 |
+
level: INFO
|
142 |
+
handlers:
|
143 |
+
- console
|
144 |
+
- file
|
145 |
+
disable_existing_loggers: false
|
146 |
+
env: {}
|
147 |
+
mode: MULTIRUN
|
148 |
+
searchpath: []
|
149 |
+
callbacks: {}
|
150 |
+
output_subdir: .hydra
|
151 |
+
overrides:
|
152 |
+
hydra:
|
153 |
+
- hydra.launcher.timeout_min=360
|
154 |
+
- hydra.mode=MULTIRUN
|
155 |
+
task:
|
156 |
+
- experiment_group=additional_randomsubset_1000
|
157 |
+
- dataset=amazon_agri
|
158 |
+
- strategy=randomsubset_entropy
|
159 |
+
- data.seed=654321
|
160 |
+
- model.seed=654321
|
161 |
+
- active_data.seed=123456
|
162 |
+
- model.name=bert-base-uncased
|
163 |
+
- +launcher=slurm
|
164 |
+
- strategy.args.subpool_size=1000
|
165 |
+
job:
|
166 |
+
name: active_train
|
167 |
+
chdir: true
|
168 |
+
override_dirname: +launcher=slurm,active_data.seed=123456,data.seed=654321,dataset=amazon_agri,experiment_group=additional_randomsubset_1000,model.name=bert-base-uncased,model.seed=654321,strategy.args.subpool_size=1000,strategy=randomsubset_entropy
|
169 |
+
id: '49537867_1'
|
170 |
+
num: 1
|
171 |
+
config_name: conf
|
172 |
+
env_set: {}
|
173 |
+
env_copy: []
|
174 |
+
config:
|
175 |
+
override_dirname:
|
176 |
+
kv_sep: '='
|
177 |
+
item_sep: ','
|
178 |
+
exclude_keys: []
|
179 |
+
runtime:
|
180 |
+
version: 1.3.2
|
181 |
+
version_base: '1.3'
|
182 |
+
cwd: /rds/user/pl487/hpc-work/anchoral
|
183 |
+
config_sources:
|
184 |
+
- path: hydra.conf
|
185 |
+
schema: pkg
|
186 |
+
provider: hydra
|
187 |
+
- path: /rds/user/pl487/hpc-work/anchoral/conf
|
188 |
+
schema: file
|
189 |
+
provider: main
|
190 |
+
- path: hydra_plugins.hydra_colorlog.conf
|
191 |
+
schema: pkg
|
192 |
+
provider: hydra-colorlog
|
193 |
+
- path: ''
|
194 |
+
schema: structured
|
195 |
+
provider: schema
|
196 |
+
output_dir: /rds/user/pl487/hpc-work/anchoral/outputs/multirun/additional_randomsubset_1000/amazon-agri/bert-base-uncased_randomsubset_entropy_2024-04-02T18-37-18_49537867_1
|
197 |
+
choices:
|
198 |
+
launcher: slurm
|
199 |
+
dataset: amazon_agri
|
200 |
+
strategy: randomsubset_entropy
|
201 |
+
hydra/env: default
|
202 |
+
hydra/callbacks: null
|
203 |
+
hydra/job_logging: colorlog
|
204 |
+
hydra/hydra_logging: colorlog
|
205 |
+
hydra/hydra_help: default
|
206 |
+
hydra/help: default
|
207 |
+
hydra/sweeper: basic
|
208 |
+
hydra/launcher: submitit_slurm
|
209 |
+
hydra/output: default
|
210 |
+
verbose: false
|
outputs/ablations/additional_randomsubset_1000/bert-base-uncased_randomsubset_entropy_2024-04-02T18-37-18_49537867_1/.hydra/overrides.yaml
ADDED
@@ -0,0 +1,9 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
- experiment_group=additional_randomsubset_1000
|
2 |
+
- dataset=amazon_agri
|
3 |
+
- strategy=randomsubset_entropy
|
4 |
+
- data.seed=654321
|
5 |
+
- model.seed=654321
|
6 |
+
- active_data.seed=123456
|
7 |
+
- model.name=bert-base-uncased
|
8 |
+
- +launcher=slurm
|
9 |
+
- strategy.args.subpool_size=1000
|
outputs/ablations/additional_randomsubset_1000/bert-base-uncased_randomsubset_entropy_2024-04-02T18-37-18_49537867_1/active_train.log
ADDED
@@ -0,0 +1,132 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
[2024-04-05 11:24:12,955][hydra][INFO] -
|
2 |
+
estimator:
|
3 |
+
accelerator: gpu
|
4 |
+
precision: 32
|
5 |
+
deterministic: true
|
6 |
+
tf32_mode: high
|
7 |
+
callbacks:
|
8 |
+
timer:
|
9 |
+
_target_: energizer.active_learning.callbacks.Timer
|
10 |
+
save_outputs:
|
11 |
+
_target_: src.callbacks.SaveOutputs
|
12 |
+
dirpath: ./logs/
|
13 |
+
instance_level: false
|
14 |
+
batch_level: false
|
15 |
+
epoch_level: false
|
16 |
+
early_stopping:
|
17 |
+
_target_: energizer.callbacks.early_stopping.EarlyStopping
|
18 |
+
monitor: train/avg_f1_minclass
|
19 |
+
stage: train
|
20 |
+
interval: epoch
|
21 |
+
mode: max
|
22 |
+
min_delta: 1.0e-05
|
23 |
+
patience: 10
|
24 |
+
stopping_threshold: null
|
25 |
+
divergence_threshold: null
|
26 |
+
verbose: true
|
27 |
+
model_checkpoint:
|
28 |
+
_target_: energizer.callbacks.model_checkpoint.ModelCheckpoint
|
29 |
+
dirpath: .checkpoints
|
30 |
+
monitor: train/avg_f1_minclass
|
31 |
+
stage: train
|
32 |
+
mode: max
|
33 |
+
save_last: false
|
34 |
+
save_top_k: 1
|
35 |
+
verbose: true
|
36 |
+
loggers:
|
37 |
+
tensorboard:
|
38 |
+
_target_: energizer.loggers.TensorBoardLogger
|
39 |
+
root_dir: ./
|
40 |
+
name: tb_logs
|
41 |
+
version: null
|
42 |
+
data:
|
43 |
+
batch_size: 32
|
44 |
+
eval_batch_size: 256
|
45 |
+
num_workers: 32
|
46 |
+
pin_memory: true
|
47 |
+
drop_last: false
|
48 |
+
persistent_workers: true
|
49 |
+
shuffle: true
|
50 |
+
seed: 654321
|
51 |
+
replacement: false
|
52 |
+
max_length: 512
|
53 |
+
active_data:
|
54 |
+
budget: 100
|
55 |
+
positive_budget: 5
|
56 |
+
seed: 123456
|
57 |
+
fit:
|
58 |
+
min_steps: 100
|
59 |
+
max_epochs: 10
|
60 |
+
learning_rate: 4.0e-05
|
61 |
+
optimizer: adamw
|
62 |
+
log_interval: 1
|
63 |
+
enable_progress_bar: false
|
64 |
+
limit_train_batches: null
|
65 |
+
limit_validation_batches: null
|
66 |
+
active_fit:
|
67 |
+
max_budget: 5000
|
68 |
+
query_size: 25
|
69 |
+
reinit_model: true
|
70 |
+
limit_pool_batches: null
|
71 |
+
limit_test_batches: null
|
72 |
+
test:
|
73 |
+
log_interval: 1
|
74 |
+
enable_progress_bar: false
|
75 |
+
limit_batches: null
|
76 |
+
strategy:
|
77 |
+
name: randomsubset_entropy
|
78 |
+
args:
|
79 |
+
seed: 42
|
80 |
+
subpool_size: 1000
|
81 |
+
model:
|
82 |
+
name: bert-base-uncased
|
83 |
+
seed: 654321
|
84 |
+
dataset:
|
85 |
+
name: amazon-agri
|
86 |
+
text_column: text
|
87 |
+
label_column: labels
|
88 |
+
uid_column: uid
|
89 |
+
prepared_path: /rds/user/pl487/hpc-work/anchoral/data/prepared/amazoncat-agri
|
90 |
+
processed_path: /rds/user/pl487/hpc-work/anchoral/data/processed/amazoncat-13k
|
91 |
+
minority_classes:
|
92 |
+
- 1
|
93 |
+
index_metric: all-mpnet-base-v2_cosine
|
94 |
+
log_interval: 1
|
95 |
+
enable_progress_bar: false
|
96 |
+
limit_batches: null
|
97 |
+
seed: 42
|
98 |
+
experiment_group: additional_randomsubset_1000
|
99 |
+
run_name: amazon-agri/bert-base-uncased_randomsubset_entropy_2024-04-02T18-37-18
|
100 |
+
data_path: /rds/user/pl487/hpc-work/anchoral/data
|
101 |
+
|
102 |
+
======================================================================
|
103 |
+
[2024-04-05 11:24:12,962][hydra][INFO] - Running active learning with strategy {'name': 'randomsubset_entropy', 'args': {'seed': 42, 'subpool_size': 1000}}
|
104 |
+
[2024-04-05 11:24:12,989][hydra][INFO] - Seed enabled: 42
|
105 |
+
[2024-04-05 11:24:40,874][hydra][INFO] - Labelled size: 100 Pool size: 1186139 Test size: 5285
|
106 |
+
Label distribution:
|
107 |
+
| | labels | count | perc |
|
108 |
+
|---:|:---------|--------:|-------:|
|
109 |
+
| 0 | Negative | 95 | 0.95 |
|
110 |
+
| 1 | Positive | 5 | 0.05 |
|
111 |
+
[2024-04-05 11:24:40,990][hydra][INFO] - Batch:
|
112 |
+
{<InputKeys.INPUT_IDS: 'input_ids'>: tensor([[ 101, 3521, 13903, 102]]), <InputKeys.ATT_MASK: 'attention_mask'>: tensor([[1, 1, 1, 1]]), <InputKeys.LABELS: 'labels'>: tensor([0]), <InputKeys.ON_CPU: 'on_cpu'>: {<SpecialKeys.ID: 'uid'>: [1462254]}}
|
113 |
+
[2024-04-05 11:24:47,413][hydra][INFO] - Loggers: {'tensorboard': <energizer.loggers.tensorboard.TensorBoardLogger object at 0x148f9578f310>}
|
114 |
+
[2024-04-05 11:24:47,413][hydra][INFO] - Callbacks: {'timer': <energizer.active_learning.callbacks.Timer object at 0x148b60e612e0>, 'save_outputs': <src.callbacks.SaveOutputs object at 0x148b68365340>, 'early_stopping': <energizer.callbacks.early_stopping.EarlyStopping object at 0x148b68365310>, 'model_checkpoint': <energizer.callbacks.model_checkpoint.ModelCheckpoint object at 0x148b683652e0>}
|
115 |
+
[2024-04-05 11:24:47,481][hydra][INFO] -
|
116 |
+
| Name | Type | Params
|
117 |
+
-----------------------------------------
|
118 |
+
0 | bert | BertModel | 109 M
|
119 |
+
1 | dropout | Dropout | 0
|
120 |
+
2 | classifier | Linear | 1.5 K
|
121 |
+
-----------------------------------------
|
122 |
+
109 M Trainable params
|
123 |
+
0 Non-trainable params
|
124 |
+
109 M Total params
|
125 |
+
437.935 Total estimated model params size (MB)
|
126 |
+
0.00 GB CUDA Memory used
|
127 |
+
[2024-04-05 17:21:24,875][submitit][INFO] - Job has timed out. Ran 357 minutes out of requested 360 minutes.
|
128 |
+
[2024-04-05 17:21:24,906][submitit][WARNING] - Caught signal SIGUSR2 on gpu-q-14: this job is timed-out.
|
129 |
+
[2024-04-05 17:21:24,920][submitit][INFO] - Calling checkpoint method.
|
130 |
+
[2024-04-05 17:21:24,943][submitit][INFO] - Job not requeued because: timed-out too many times.
|
131 |
+
[2024-04-05 17:21:24,943][submitit][WARNING] - Bypassing signal SIGCONT
|
132 |
+
[2024-04-05 17:21:24,947][submitit][INFO] - Job completed successfully
|
outputs/ablations/additional_randomsubset_1000/bert-base-uncased_randomsubset_entropy_2024-04-02T18-37-18_49537867_1/hparams.yaml
ADDED
@@ -0,0 +1,99 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
estimator:
|
2 |
+
accelerator: gpu
|
3 |
+
precision: 32
|
4 |
+
deterministic: true
|
5 |
+
tf32_mode: high
|
6 |
+
callbacks:
|
7 |
+
timer:
|
8 |
+
_target_: energizer.active_learning.callbacks.Timer
|
9 |
+
save_outputs:
|
10 |
+
_target_: src.callbacks.SaveOutputs
|
11 |
+
dirpath: ./logs/
|
12 |
+
instance_level: false
|
13 |
+
batch_level: false
|
14 |
+
epoch_level: false
|
15 |
+
early_stopping:
|
16 |
+
_target_: energizer.callbacks.early_stopping.EarlyStopping
|
17 |
+
monitor: train/avg_f1_minclass
|
18 |
+
stage: train
|
19 |
+
interval: epoch
|
20 |
+
mode: max
|
21 |
+
min_delta: 1.0e-05
|
22 |
+
patience: 10
|
23 |
+
stopping_threshold: null
|
24 |
+
divergence_threshold: null
|
25 |
+
verbose: true
|
26 |
+
model_checkpoint:
|
27 |
+
_target_: energizer.callbacks.model_checkpoint.ModelCheckpoint
|
28 |
+
dirpath: .checkpoints
|
29 |
+
monitor: train/avg_f1_minclass
|
30 |
+
stage: train
|
31 |
+
mode: max
|
32 |
+
save_last: false
|
33 |
+
save_top_k: 1
|
34 |
+
verbose: true
|
35 |
+
loggers:
|
36 |
+
tensorboard:
|
37 |
+
_target_: energizer.loggers.TensorBoardLogger
|
38 |
+
root_dir: ./
|
39 |
+
name: tb_logs
|
40 |
+
version: null
|
41 |
+
data:
|
42 |
+
batch_size: 32
|
43 |
+
eval_batch_size: 256
|
44 |
+
num_workers: 32
|
45 |
+
pin_memory: true
|
46 |
+
drop_last: false
|
47 |
+
persistent_workers: true
|
48 |
+
shuffle: true
|
49 |
+
seed: 654321
|
50 |
+
replacement: false
|
51 |
+
max_length: 512
|
52 |
+
active_data:
|
53 |
+
budget: 100
|
54 |
+
positive_budget: 5
|
55 |
+
seed: 123456
|
56 |
+
fit:
|
57 |
+
min_steps: 100
|
58 |
+
max_epochs: 10
|
59 |
+
learning_rate: 4.0e-05
|
60 |
+
optimizer: adamw
|
61 |
+
log_interval: 1
|
62 |
+
enable_progress_bar: false
|
63 |
+
limit_train_batches: null
|
64 |
+
limit_validation_batches: null
|
65 |
+
active_fit:
|
66 |
+
max_budget: 5000
|
67 |
+
query_size: 25
|
68 |
+
reinit_model: true
|
69 |
+
limit_pool_batches: null
|
70 |
+
limit_test_batches: null
|
71 |
+
test:
|
72 |
+
log_interval: 1
|
73 |
+
enable_progress_bar: false
|
74 |
+
limit_batches: null
|
75 |
+
strategy:
|
76 |
+
name: randomsubset_entropy
|
77 |
+
args:
|
78 |
+
seed: 42
|
79 |
+
subpool_size: 1000
|
80 |
+
model:
|
81 |
+
name: bert-base-uncased
|
82 |
+
seed: 654321
|
83 |
+
dataset:
|
84 |
+
name: amazon-agri
|
85 |
+
text_column: text
|
86 |
+
label_column: labels
|
87 |
+
uid_column: uid
|
88 |
+
prepared_path: /rds/user/pl487/hpc-work/anchoral/data/prepared/amazoncat-agri
|
89 |
+
processed_path: /rds/user/pl487/hpc-work/anchoral/data/processed/amazoncat-13k
|
90 |
+
minority_classes:
|
91 |
+
- 1
|
92 |
+
index_metric: all-mpnet-base-v2_cosine
|
93 |
+
log_interval: 1
|
94 |
+
enable_progress_bar: false
|
95 |
+
limit_batches: null
|
96 |
+
seed: 42
|
97 |
+
experiment_group: additional_randomsubset_1000
|
98 |
+
run_name: amazon-agri/bert-base-uncased_randomsubset_entropy_2024-04-02T18-37-18
|
99 |
+
data_path: /rds/user/pl487/hpc-work/anchoral/data
|
outputs/ablations/additional_randomsubset_1000/bert-base-uncased_randomsubset_entropy_2024-04-02T18-37-18_49537867_1/logs/labelled_dataset.parquet
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:def4df8ca65dde069bb7a06988a3ff5a4e945a4d2cce8d1ae7e1b52db8ff4d19
|
3 |
+
size 8998912
|
outputs/ablations/additional_randomsubset_1000/bert-base-uncased_randomsubset_entropy_2024-04-02T18-37-18_49537867_1/logs/subpool_ids.jsonl
ADDED
The diff for this file is too large to render.
See raw diff
|
|
outputs/ablations/additional_randomsubset_1000/bert-base-uncased_randomsubset_entropy_2024-04-02T18-37-18_49537867_1/tb_logs/version_0/events.out.tfevents.1712312687.gpu-q-14.3012723.0
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:bef835c44464799231b9afe159f70720bee016dd9c974babd54d7b3e23d5bfc7
|
3 |
+
size 7709249
|
outputs/ablations/additional_randomsubset_1000/bert-base-uncased_randomsubset_entropy_2024-04-02T18-37-18_49537867_1/tensorboard_logs.parquet
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:4330fbcf02b8ba34a3ee6a330d347dab379165856a2f22a7b9c0fdf382d2e914
|
3 |
+
size 1332545
|
outputs/ablations/additional_randomsubset_1000/bert-base-uncased_randomsubset_entropy_2024-04-02T18-37-18_49537867_2/.early_stopping.jsonl
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{"best_score":1.0,"best_step":10,"stage":"train","interval":"epoch","stopping_step":20,"reason":"Monitored metric `train/avg_f1_minclass` did not improve in the last 10 epochs."}
|
2 |
+
{"best_score":1.0,"best_step":33,"stage":"train","interval":"epoch","stopping_step":43,"reason":"Monitored metric `train/avg_f1_minclass` did not improve in the last 10 epochs."}
|
3 |
+
{"best_score":0.0,"best_step":44,"stage":"train","interval":"epoch","stopping_step":54,"reason":"Monitored metric `train/avg_f1_minclass` did not improve in the last 10 epochs."}
|
4 |
+
{"best_score":0.0,"best_step":87,"stage":"train","interval":"epoch","stopping_step":97,"reason":"Monitored metric `train/avg_f1_minclass` did not improve in the last 10 epochs."}
|
outputs/ablations/additional_randomsubset_1000/bert-base-uncased_randomsubset_entropy_2024-04-02T18-37-18_49537867_2/.hydra/config.yaml
ADDED
@@ -0,0 +1,99 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
estimator:
|
2 |
+
accelerator: gpu
|
3 |
+
precision: 32
|
4 |
+
deterministic: true
|
5 |
+
tf32_mode: high
|
6 |
+
callbacks:
|
7 |
+
timer:
|
8 |
+
_target_: energizer.active_learning.callbacks.Timer
|
9 |
+
save_outputs:
|
10 |
+
_target_: src.callbacks.SaveOutputs
|
11 |
+
dirpath: ./logs/
|
12 |
+
instance_level: false
|
13 |
+
batch_level: false
|
14 |
+
epoch_level: false
|
15 |
+
early_stopping:
|
16 |
+
_target_: energizer.callbacks.early_stopping.EarlyStopping
|
17 |
+
monitor: train/avg_f1_minclass
|
18 |
+
stage: train
|
19 |
+
interval: epoch
|
20 |
+
mode: max
|
21 |
+
min_delta: 1.0e-05
|
22 |
+
patience: 10
|
23 |
+
stopping_threshold: null
|
24 |
+
divergence_threshold: null
|
25 |
+
verbose: true
|
26 |
+
model_checkpoint:
|
27 |
+
_target_: energizer.callbacks.model_checkpoint.ModelCheckpoint
|
28 |
+
dirpath: .checkpoints
|
29 |
+
monitor: train/avg_f1_minclass
|
30 |
+
stage: train
|
31 |
+
mode: max
|
32 |
+
save_last: false
|
33 |
+
save_top_k: 1
|
34 |
+
verbose: true
|
35 |
+
loggers:
|
36 |
+
tensorboard:
|
37 |
+
_target_: energizer.loggers.TensorBoardLogger
|
38 |
+
root_dir: ./
|
39 |
+
name: tb_logs
|
40 |
+
version: null
|
41 |
+
data:
|
42 |
+
batch_size: 32
|
43 |
+
eval_batch_size: 256
|
44 |
+
num_workers: 32
|
45 |
+
pin_memory: true
|
46 |
+
drop_last: false
|
47 |
+
persistent_workers: true
|
48 |
+
shuffle: true
|
49 |
+
seed: 654321
|
50 |
+
replacement: false
|
51 |
+
max_length: 512
|
52 |
+
active_data:
|
53 |
+
budget: 100
|
54 |
+
positive_budget: 5
|
55 |
+
seed: 654321
|
56 |
+
fit:
|
57 |
+
min_steps: 100
|
58 |
+
max_epochs: 10
|
59 |
+
learning_rate: 4.0e-05
|
60 |
+
optimizer: adamw
|
61 |
+
log_interval: ${log_interval}
|
62 |
+
enable_progress_bar: ${enable_progress_bar}
|
63 |
+
limit_train_batches: ${limit_batches}
|
64 |
+
limit_validation_batches: ${limit_batches}
|
65 |
+
active_fit:
|
66 |
+
max_budget: 5000
|
67 |
+
query_size: 25
|
68 |
+
reinit_model: true
|
69 |
+
limit_pool_batches: ${limit_batches}
|
70 |
+
limit_test_batches: ${limit_batches}
|
71 |
+
test:
|
72 |
+
log_interval: ${log_interval}
|
73 |
+
enable_progress_bar: ${enable_progress_bar}
|
74 |
+
limit_batches: ${limit_batches}
|
75 |
+
strategy:
|
76 |
+
name: randomsubset_entropy
|
77 |
+
args:
|
78 |
+
seed: 42
|
79 |
+
subpool_size: 1000
|
80 |
+
model:
|
81 |
+
name: bert-base-uncased
|
82 |
+
seed: 123456
|
83 |
+
dataset:
|
84 |
+
name: amazon-agri
|
85 |
+
text_column: text
|
86 |
+
label_column: labels
|
87 |
+
uid_column: uid
|
88 |
+
prepared_path: ${data_path}/prepared/amazoncat-agri
|
89 |
+
processed_path: ${data_path}/processed/amazoncat-13k
|
90 |
+
minority_classes:
|
91 |
+
- 1
|
92 |
+
index_metric: all-mpnet-base-v2_cosine
|
93 |
+
log_interval: 1
|
94 |
+
enable_progress_bar: false
|
95 |
+
limit_batches: null
|
96 |
+
seed: 42
|
97 |
+
experiment_group: additional_randomsubset_1000
|
98 |
+
run_name: ${dataset.name}/${model.name}_${strategy.name}_${now:%Y-%m-%d}T${now:%H-%M-%S}
|
99 |
+
data_path: /rds/user/pl487/hpc-work/anchoral/data
|
outputs/ablations/additional_randomsubset_1000/bert-base-uncased_randomsubset_entropy_2024-04-02T18-37-18_49537867_2/.hydra/hydra.yaml
ADDED
@@ -0,0 +1,210 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
hydra:
|
2 |
+
run:
|
3 |
+
dir: ./outputs/${experiment_group}/${run_name}
|
4 |
+
sweep:
|
5 |
+
dir: ./outputs/multirun/${experiment_group}
|
6 |
+
subdir: ${run_name}_${hydra.job.id}
|
7 |
+
launcher:
|
8 |
+
submitit_folder: ${hydra.sweep.dir}/.submitit/%j
|
9 |
+
timeout_min: 360
|
10 |
+
cpus_per_task: null
|
11 |
+
gpus_per_node: null
|
12 |
+
tasks_per_node: 1
|
13 |
+
mem_gb: null
|
14 |
+
nodes: 1
|
15 |
+
name: ${experiment_group}
|
16 |
+
stderr_to_stdout: false
|
17 |
+
_target_: hydra_plugins.hydra_submitit_launcher.submitit_launcher.SlurmLauncher
|
18 |
+
partition: ampere
|
19 |
+
qos: null
|
20 |
+
comment: null
|
21 |
+
constraint: null
|
22 |
+
exclude: null
|
23 |
+
gres: gpu:1
|
24 |
+
cpus_per_gpu: null
|
25 |
+
gpus_per_task: null
|
26 |
+
mem_per_gpu: null
|
27 |
+
mem_per_cpu: null
|
28 |
+
account: VLACHOS-SL3-GPU
|
29 |
+
signal_delay_s: 120
|
30 |
+
max_num_timeout: 0
|
31 |
+
additional_parameters: {}
|
32 |
+
array_parallelism: 256
|
33 |
+
setup:
|
34 |
+
- . /etc/profile.d/modules.sh
|
35 |
+
- module list
|
36 |
+
- echo -e 'loading other modules'
|
37 |
+
- module load rhel8/default-amp
|
38 |
+
- module load cuda/12.1
|
39 |
+
- module load cudnn/8.9_cuda-12.1
|
40 |
+
- module list
|
41 |
+
sweeper:
|
42 |
+
_target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper
|
43 |
+
max_batch_size: null
|
44 |
+
params: null
|
45 |
+
help:
|
46 |
+
app_name: ${hydra.job.name}
|
47 |
+
header: '${hydra.help.app_name} is powered by Hydra.
|
48 |
+
|
49 |
+
'
|
50 |
+
footer: 'Powered by Hydra (https://hydra.cc)
|
51 |
+
|
52 |
+
Use --hydra-help to view Hydra specific help
|
53 |
+
|
54 |
+
'
|
55 |
+
template: '${hydra.help.header}
|
56 |
+
|
57 |
+
== Configuration groups ==
|
58 |
+
|
59 |
+
Compose your configuration from those groups (group=option)
|
60 |
+
|
61 |
+
|
62 |
+
$APP_CONFIG_GROUPS
|
63 |
+
|
64 |
+
|
65 |
+
== Config ==
|
66 |
+
|
67 |
+
Override anything in the config (foo.bar=value)
|
68 |
+
|
69 |
+
|
70 |
+
$CONFIG
|
71 |
+
|
72 |
+
|
73 |
+
${hydra.help.footer}
|
74 |
+
|
75 |
+
'
|
76 |
+
hydra_help:
|
77 |
+
template: 'Hydra (${hydra.runtime.version})
|
78 |
+
|
79 |
+
See https://hydra.cc for more info.
|
80 |
+
|
81 |
+
|
82 |
+
== Flags ==
|
83 |
+
|
84 |
+
$FLAGS_HELP
|
85 |
+
|
86 |
+
|
87 |
+
== Configuration groups ==
|
88 |
+
|
89 |
+
Compose your configuration from those groups (For example, append hydra/job_logging=disabled
|
90 |
+
to command line)
|
91 |
+
|
92 |
+
|
93 |
+
$HYDRA_CONFIG_GROUPS
|
94 |
+
|
95 |
+
|
96 |
+
Use ''--cfg hydra'' to Show the Hydra config.
|
97 |
+
|
98 |
+
'
|
99 |
+
hydra_help: ???
|
100 |
+
hydra_logging:
|
101 |
+
version: 1
|
102 |
+
formatters:
|
103 |
+
colorlog:
|
104 |
+
(): colorlog.ColoredFormatter
|
105 |
+
format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s'
|
106 |
+
handlers:
|
107 |
+
console:
|
108 |
+
class: logging.StreamHandler
|
109 |
+
formatter: colorlog
|
110 |
+
stream: ext://sys.stdout
|
111 |
+
root:
|
112 |
+
level: INFO
|
113 |
+
handlers:
|
114 |
+
- console
|
115 |
+
disable_existing_loggers: false
|
116 |
+
job_logging:
|
117 |
+
version: 1
|
118 |
+
formatters:
|
119 |
+
simple:
|
120 |
+
format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s'
|
121 |
+
colorlog:
|
122 |
+
(): colorlog.ColoredFormatter
|
123 |
+
format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s]
|
124 |
+
- %(message)s'
|
125 |
+
log_colors:
|
126 |
+
DEBUG: purple
|
127 |
+
INFO: green
|
128 |
+
WARNING: yellow
|
129 |
+
ERROR: red
|
130 |
+
CRITICAL: bold_red
|
131 |
+
handlers:
|
132 |
+
console:
|
133 |
+
class: logging.StreamHandler
|
134 |
+
formatter: colorlog
|
135 |
+
stream: ext://sys.stdout
|
136 |
+
file:
|
137 |
+
class: logging.FileHandler
|
138 |
+
formatter: simple
|
139 |
+
filename: ${hydra.job.name}.log
|
140 |
+
root:
|
141 |
+
level: INFO
|
142 |
+
handlers:
|
143 |
+
- console
|
144 |
+
- file
|
145 |
+
disable_existing_loggers: false
|
146 |
+
env: {}
|
147 |
+
mode: MULTIRUN
|
148 |
+
searchpath: []
|
149 |
+
callbacks: {}
|
150 |
+
output_subdir: .hydra
|
151 |
+
overrides:
|
152 |
+
hydra:
|
153 |
+
- hydra.launcher.timeout_min=360
|
154 |
+
- hydra.mode=MULTIRUN
|
155 |
+
task:
|
156 |
+
- experiment_group=additional_randomsubset_1000
|
157 |
+
- dataset=amazon_agri
|
158 |
+
- strategy=randomsubset_entropy
|
159 |
+
- data.seed=654321
|
160 |
+
- model.seed=123456
|
161 |
+
- active_data.seed=654321
|
162 |
+
- model.name=bert-base-uncased
|
163 |
+
- +launcher=slurm
|
164 |
+
- strategy.args.subpool_size=1000
|
165 |
+
job:
|
166 |
+
name: active_train
|
167 |
+
chdir: true
|
168 |
+
override_dirname: +launcher=slurm,active_data.seed=654321,data.seed=654321,dataset=amazon_agri,experiment_group=additional_randomsubset_1000,model.name=bert-base-uncased,model.seed=123456,strategy.args.subpool_size=1000,strategy=randomsubset_entropy
|
169 |
+
id: '49537867_2'
|
170 |
+
num: 2
|
171 |
+
config_name: conf
|
172 |
+
env_set: {}
|
173 |
+
env_copy: []
|
174 |
+
config:
|
175 |
+
override_dirname:
|
176 |
+
kv_sep: '='
|
177 |
+
item_sep: ','
|
178 |
+
exclude_keys: []
|
179 |
+
runtime:
|
180 |
+
version: 1.3.2
|
181 |
+
version_base: '1.3'
|
182 |
+
cwd: /rds/user/pl487/hpc-work/anchoral
|
183 |
+
config_sources:
|
184 |
+
- path: hydra.conf
|
185 |
+
schema: pkg
|
186 |
+
provider: hydra
|
187 |
+
- path: /rds/user/pl487/hpc-work/anchoral/conf
|
188 |
+
schema: file
|
189 |
+
provider: main
|
190 |
+
- path: hydra_plugins.hydra_colorlog.conf
|
191 |
+
schema: pkg
|
192 |
+
provider: hydra-colorlog
|
193 |
+
- path: ''
|
194 |
+
schema: structured
|
195 |
+
provider: schema
|
196 |
+
output_dir: /rds/user/pl487/hpc-work/anchoral/outputs/multirun/additional_randomsubset_1000/amazon-agri/bert-base-uncased_randomsubset_entropy_2024-04-02T18-37-18_49537867_2
|
197 |
+
choices:
|
198 |
+
launcher: slurm
|
199 |
+
dataset: amazon_agri
|
200 |
+
strategy: randomsubset_entropy
|
201 |
+
hydra/env: default
|
202 |
+
hydra/callbacks: null
|
203 |
+
hydra/job_logging: colorlog
|
204 |
+
hydra/hydra_logging: colorlog
|
205 |
+
hydra/hydra_help: default
|
206 |
+
hydra/help: default
|
207 |
+
hydra/sweeper: basic
|
208 |
+
hydra/launcher: submitit_slurm
|
209 |
+
hydra/output: default
|
210 |
+
verbose: false
|
outputs/ablations/additional_randomsubset_1000/bert-base-uncased_randomsubset_entropy_2024-04-02T18-37-18_49537867_2/.hydra/overrides.yaml
ADDED
@@ -0,0 +1,9 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
- experiment_group=additional_randomsubset_1000
|
2 |
+
- dataset=amazon_agri
|
3 |
+
- strategy=randomsubset_entropy
|
4 |
+
- data.seed=654321
|
5 |
+
- model.seed=123456
|
6 |
+
- active_data.seed=654321
|
7 |
+
- model.name=bert-base-uncased
|
8 |
+
- +launcher=slurm
|
9 |
+
- strategy.args.subpool_size=1000
|
outputs/ablations/additional_randomsubset_1000/bert-base-uncased_randomsubset_entropy_2024-04-02T18-37-18_49537867_2/active_train.log
ADDED
@@ -0,0 +1,132 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
[2024-04-05 11:24:12,956][hydra][INFO] -
|
2 |
+
estimator:
|
3 |
+
accelerator: gpu
|
4 |
+
precision: 32
|
5 |
+
deterministic: true
|
6 |
+
tf32_mode: high
|
7 |
+
callbacks:
|
8 |
+
timer:
|
9 |
+
_target_: energizer.active_learning.callbacks.Timer
|
10 |
+
save_outputs:
|
11 |
+
_target_: src.callbacks.SaveOutputs
|
12 |
+
dirpath: ./logs/
|
13 |
+
instance_level: false
|
14 |
+
batch_level: false
|
15 |
+
epoch_level: false
|
16 |
+
early_stopping:
|
17 |
+
_target_: energizer.callbacks.early_stopping.EarlyStopping
|
18 |
+
monitor: train/avg_f1_minclass
|
19 |
+
stage: train
|
20 |
+
interval: epoch
|
21 |
+
mode: max
|
22 |
+
min_delta: 1.0e-05
|
23 |
+
patience: 10
|
24 |
+
stopping_threshold: null
|
25 |
+
divergence_threshold: null
|
26 |
+
verbose: true
|
27 |
+
model_checkpoint:
|
28 |
+
_target_: energizer.callbacks.model_checkpoint.ModelCheckpoint
|
29 |
+
dirpath: .checkpoints
|
30 |
+
monitor: train/avg_f1_minclass
|
31 |
+
stage: train
|
32 |
+
mode: max
|
33 |
+
save_last: false
|
34 |
+
save_top_k: 1
|
35 |
+
verbose: true
|
36 |
+
loggers:
|
37 |
+
tensorboard:
|
38 |
+
_target_: energizer.loggers.TensorBoardLogger
|
39 |
+
root_dir: ./
|
40 |
+
name: tb_logs
|
41 |
+
version: null
|
42 |
+
data:
|
43 |
+
batch_size: 32
|
44 |
+
eval_batch_size: 256
|
45 |
+
num_workers: 32
|
46 |
+
pin_memory: true
|
47 |
+
drop_last: false
|
48 |
+
persistent_workers: true
|
49 |
+
shuffle: true
|
50 |
+
seed: 654321
|
51 |
+
replacement: false
|
52 |
+
max_length: 512
|
53 |
+
active_data:
|
54 |
+
budget: 100
|
55 |
+
positive_budget: 5
|
56 |
+
seed: 654321
|
57 |
+
fit:
|
58 |
+
min_steps: 100
|
59 |
+
max_epochs: 10
|
60 |
+
learning_rate: 4.0e-05
|
61 |
+
optimizer: adamw
|
62 |
+
log_interval: 1
|
63 |
+
enable_progress_bar: false
|
64 |
+
limit_train_batches: null
|
65 |
+
limit_validation_batches: null
|
66 |
+
active_fit:
|
67 |
+
max_budget: 5000
|
68 |
+
query_size: 25
|
69 |
+
reinit_model: true
|
70 |
+
limit_pool_batches: null
|
71 |
+
limit_test_batches: null
|
72 |
+
test:
|
73 |
+
log_interval: 1
|
74 |
+
enable_progress_bar: false
|
75 |
+
limit_batches: null
|
76 |
+
strategy:
|
77 |
+
name: randomsubset_entropy
|
78 |
+
args:
|
79 |
+
seed: 42
|
80 |
+
subpool_size: 1000
|
81 |
+
model:
|
82 |
+
name: bert-base-uncased
|
83 |
+
seed: 123456
|
84 |
+
dataset:
|
85 |
+
name: amazon-agri
|
86 |
+
text_column: text
|
87 |
+
label_column: labels
|
88 |
+
uid_column: uid
|
89 |
+
prepared_path: /rds/user/pl487/hpc-work/anchoral/data/prepared/amazoncat-agri
|
90 |
+
processed_path: /rds/user/pl487/hpc-work/anchoral/data/processed/amazoncat-13k
|
91 |
+
minority_classes:
|
92 |
+
- 1
|
93 |
+
index_metric: all-mpnet-base-v2_cosine
|
94 |
+
log_interval: 1
|
95 |
+
enable_progress_bar: false
|
96 |
+
limit_batches: null
|
97 |
+
seed: 42
|
98 |
+
experiment_group: additional_randomsubset_1000
|
99 |
+
run_name: amazon-agri/bert-base-uncased_randomsubset_entropy_2024-04-02T18-37-18
|
100 |
+
data_path: /rds/user/pl487/hpc-work/anchoral/data
|
101 |
+
|
102 |
+
======================================================================
|
103 |
+
[2024-04-05 11:24:12,963][hydra][INFO] - Running active learning with strategy {'name': 'randomsubset_entropy', 'args': {'seed': 42, 'subpool_size': 1000}}
|
104 |
+
[2024-04-05 11:24:12,989][hydra][INFO] - Seed enabled: 42
|
105 |
+
[2024-04-05 11:24:40,874][hydra][INFO] - Labelled size: 100 Pool size: 1186139 Test size: 5285
|
106 |
+
Label distribution:
|
107 |
+
| | labels | count | perc |
|
108 |
+
|---:|:---------|--------:|-------:|
|
109 |
+
| 0 | Negative | 95 | 0.95 |
|
110 |
+
| 1 | Positive | 5 | 0.05 |
|
111 |
+
[2024-04-05 11:24:40,990][hydra][INFO] - Batch:
|
112 |
+
{<InputKeys.INPUT_IDS: 'input_ids'>: tensor([[ 101, 3521, 13903, 102]]), <InputKeys.ATT_MASK: 'attention_mask'>: tensor([[1, 1, 1, 1]]), <InputKeys.LABELS: 'labels'>: tensor([0]), <InputKeys.ON_CPU: 'on_cpu'>: {<SpecialKeys.ID: 'uid'>: [1462254]}}
|
113 |
+
[2024-04-05 11:24:47,413][hydra][INFO] - Loggers: {'tensorboard': <energizer.loggers.tensorboard.TensorBoardLogger object at 0x152349aec340>}
|
114 |
+
[2024-04-05 11:24:47,413][hydra][INFO] - Callbacks: {'timer': <energizer.active_learning.callbacks.Timer object at 0x151f1513af70>, 'save_outputs': <src.callbacks.SaveOutputs object at 0x151f1513a940>, 'early_stopping': <energizer.callbacks.early_stopping.EarlyStopping object at 0x151f15156580>, 'model_checkpoint': <energizer.callbacks.model_checkpoint.ModelCheckpoint object at 0x151f15156040>}
|
115 |
+
[2024-04-05 11:24:47,481][hydra][INFO] -
|
116 |
+
| Name | Type | Params
|
117 |
+
-----------------------------------------
|
118 |
+
0 | bert | BertModel | 109 M
|
119 |
+
1 | dropout | Dropout | 0
|
120 |
+
2 | classifier | Linear | 1.5 K
|
121 |
+
-----------------------------------------
|
122 |
+
109 M Trainable params
|
123 |
+
0 Non-trainable params
|
124 |
+
109 M Total params
|
125 |
+
437.935 Total estimated model params size (MB)
|
126 |
+
0.00 GB CUDA Memory used
|
127 |
+
[2024-04-05 17:21:24,817][submitit][INFO] - Job has timed out. Ran 357 minutes out of requested 360 minutes.
|
128 |
+
[2024-04-05 17:21:24,845][submitit][WARNING] - Caught signal SIGUSR2 on gpu-q-14: this job is timed-out.
|
129 |
+
[2024-04-05 17:21:24,866][submitit][INFO] - Calling checkpoint method.
|
130 |
+
[2024-04-05 17:21:24,890][submitit][INFO] - Job not requeued because: timed-out too many times.
|
131 |
+
[2024-04-05 17:21:24,890][submitit][WARNING] - Bypassing signal SIGCONT
|
132 |
+
[2024-04-05 17:21:24,894][submitit][INFO] - Job completed successfully
|
outputs/ablations/additional_randomsubset_1000/bert-base-uncased_randomsubset_entropy_2024-04-02T18-37-18_49537867_2/hparams.yaml
ADDED
@@ -0,0 +1,99 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
estimator:
|
2 |
+
accelerator: gpu
|
3 |
+
precision: 32
|
4 |
+
deterministic: true
|
5 |
+
tf32_mode: high
|
6 |
+
callbacks:
|
7 |
+
timer:
|
8 |
+
_target_: energizer.active_learning.callbacks.Timer
|
9 |
+
save_outputs:
|
10 |
+
_target_: src.callbacks.SaveOutputs
|
11 |
+
dirpath: ./logs/
|
12 |
+
instance_level: false
|
13 |
+
batch_level: false
|
14 |
+
epoch_level: false
|
15 |
+
early_stopping:
|
16 |
+
_target_: energizer.callbacks.early_stopping.EarlyStopping
|
17 |
+
monitor: train/avg_f1_minclass
|
18 |
+
stage: train
|
19 |
+
interval: epoch
|
20 |
+
mode: max
|
21 |
+
min_delta: 1.0e-05
|
22 |
+
patience: 10
|
23 |
+
stopping_threshold: null
|
24 |
+
divergence_threshold: null
|
25 |
+
verbose: true
|
26 |
+
model_checkpoint:
|
27 |
+
_target_: energizer.callbacks.model_checkpoint.ModelCheckpoint
|
28 |
+
dirpath: .checkpoints
|
29 |
+
monitor: train/avg_f1_minclass
|
30 |
+
stage: train
|
31 |
+
mode: max
|
32 |
+
save_last: false
|
33 |
+
save_top_k: 1
|
34 |
+
verbose: true
|
35 |
+
loggers:
|
36 |
+
tensorboard:
|
37 |
+
_target_: energizer.loggers.TensorBoardLogger
|
38 |
+
root_dir: ./
|
39 |
+
name: tb_logs
|
40 |
+
version: null
|
41 |
+
data:
|
42 |
+
batch_size: 32
|
43 |
+
eval_batch_size: 256
|
44 |
+
num_workers: 32
|
45 |
+
pin_memory: true
|
46 |
+
drop_last: false
|
47 |
+
persistent_workers: true
|
48 |
+
shuffle: true
|
49 |
+
seed: 654321
|
50 |
+
replacement: false
|
51 |
+
max_length: 512
|
52 |
+
active_data:
|
53 |
+
budget: 100
|
54 |
+
positive_budget: 5
|
55 |
+
seed: 654321
|
56 |
+
fit:
|
57 |
+
min_steps: 100
|
58 |
+
max_epochs: 10
|
59 |
+
learning_rate: 4.0e-05
|
60 |
+
optimizer: adamw
|
61 |
+
log_interval: 1
|
62 |
+
enable_progress_bar: false
|
63 |
+
limit_train_batches: null
|
64 |
+
limit_validation_batches: null
|
65 |
+
active_fit:
|
66 |
+
max_budget: 5000
|
67 |
+
query_size: 25
|
68 |
+
reinit_model: true
|
69 |
+
limit_pool_batches: null
|
70 |
+
limit_test_batches: null
|
71 |
+
test:
|
72 |
+
log_interval: 1
|
73 |
+
enable_progress_bar: false
|
74 |
+
limit_batches: null
|
75 |
+
strategy:
|
76 |
+
name: randomsubset_entropy
|
77 |
+
args:
|
78 |
+
seed: 42
|
79 |
+
subpool_size: 1000
|
80 |
+
model:
|
81 |
+
name: bert-base-uncased
|
82 |
+
seed: 123456
|
83 |
+
dataset:
|
84 |
+
name: amazon-agri
|
85 |
+
text_column: text
|
86 |
+
label_column: labels
|
87 |
+
uid_column: uid
|
88 |
+
prepared_path: /rds/user/pl487/hpc-work/anchoral/data/prepared/amazoncat-agri
|
89 |
+
processed_path: /rds/user/pl487/hpc-work/anchoral/data/processed/amazoncat-13k
|
90 |
+
minority_classes:
|
91 |
+
- 1
|
92 |
+
index_metric: all-mpnet-base-v2_cosine
|
93 |
+
log_interval: 1
|
94 |
+
enable_progress_bar: false
|
95 |
+
limit_batches: null
|
96 |
+
seed: 42
|
97 |
+
experiment_group: additional_randomsubset_1000
|
98 |
+
run_name: amazon-agri/bert-base-uncased_randomsubset_entropy_2024-04-02T18-37-18
|
99 |
+
data_path: /rds/user/pl487/hpc-work/anchoral/data
|
outputs/ablations/additional_randomsubset_1000/bert-base-uncased_randomsubset_entropy_2024-04-02T18-37-18_49537867_2/logs/labelled_dataset.parquet
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:9e2b994b3dcaca8205dbd158ffaee83491928e5015a94102a062933ab4749ccb
|
3 |
+
size 11253046
|
outputs/ablations/additional_randomsubset_1000/bert-base-uncased_randomsubset_entropy_2024-04-02T18-37-18_49537867_2/logs/subpool_ids.jsonl
ADDED
The diff for this file is too large to render.
See raw diff
|
|
outputs/ablations/additional_randomsubset_1000/bert-base-uncased_randomsubset_entropy_2024-04-02T18-37-18_49537867_2/tb_logs/version_0/events.out.tfevents.1712312687.gpu-q-14.3012722.0
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:b2bdad78638224bdadf483c3251865ef9af3c5a74d7a182782b1d4d372bdbae1
|
3 |
+
size 7725057
|
outputs/ablations/additional_randomsubset_1000/bert-base-uncased_randomsubset_entropy_2024-04-02T18-37-18_49537867_2/tensorboard_logs.parquet
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:9a6bf3791e0b2bd9231f1596852765c3d2958b597df27a6a45aa06ba4626ab7c
|
3 |
+
size 1308044
|
outputs/ablations/additional_randomsubset_1000/bert-base-uncased_randomsubset_entropy_2024-04-02T18-37-18_49537867_3/.early_stopping.jsonl
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
{"best_score":0.5,"best_step":8,"stage":"train","interval":"epoch","stopping_step":18,"reason":"Monitored metric `train/avg_f1_minclass` did not improve in the last 10 epochs."}
|
2 |
+
{"best_score":1.0,"best_step":27,"stage":"train","interval":"epoch","stopping_step":37,"reason":"Monitored metric `train/avg_f1_minclass` did not improve in the last 10 epochs."}
|
3 |
+
{"best_score":0.0,"best_step":38,"stage":"train","interval":"epoch","stopping_step":48,"reason":"Monitored metric `train/avg_f1_minclass` did not improve in the last 10 epochs."}
|
outputs/ablations/additional_randomsubset_1000/bert-base-uncased_randomsubset_entropy_2024-04-02T18-37-18_49537867_3/.hydra/config.yaml
ADDED
@@ -0,0 +1,99 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
estimator:
|
2 |
+
accelerator: gpu
|
3 |
+
precision: 32
|
4 |
+
deterministic: true
|
5 |
+
tf32_mode: high
|
6 |
+
callbacks:
|
7 |
+
timer:
|
8 |
+
_target_: energizer.active_learning.callbacks.Timer
|
9 |
+
save_outputs:
|
10 |
+
_target_: src.callbacks.SaveOutputs
|
11 |
+
dirpath: ./logs/
|
12 |
+
instance_level: false
|
13 |
+
batch_level: false
|
14 |
+
epoch_level: false
|
15 |
+
early_stopping:
|
16 |
+
_target_: energizer.callbacks.early_stopping.EarlyStopping
|
17 |
+
monitor: train/avg_f1_minclass
|
18 |
+
stage: train
|
19 |
+
interval: epoch
|
20 |
+
mode: max
|
21 |
+
min_delta: 1.0e-05
|
22 |
+
patience: 10
|
23 |
+
stopping_threshold: null
|
24 |
+
divergence_threshold: null
|
25 |
+
verbose: true
|
26 |
+
model_checkpoint:
|
27 |
+
_target_: energizer.callbacks.model_checkpoint.ModelCheckpoint
|
28 |
+
dirpath: .checkpoints
|
29 |
+
monitor: train/avg_f1_minclass
|
30 |
+
stage: train
|
31 |
+
mode: max
|
32 |
+
save_last: false
|
33 |
+
save_top_k: 1
|
34 |
+
verbose: true
|
35 |
+
loggers:
|
36 |
+
tensorboard:
|
37 |
+
_target_: energizer.loggers.TensorBoardLogger
|
38 |
+
root_dir: ./
|
39 |
+
name: tb_logs
|
40 |
+
version: null
|
41 |
+
data:
|
42 |
+
batch_size: 32
|
43 |
+
eval_batch_size: 256
|
44 |
+
num_workers: 32
|
45 |
+
pin_memory: true
|
46 |
+
drop_last: false
|
47 |
+
persistent_workers: true
|
48 |
+
shuffle: true
|
49 |
+
seed: 654321
|
50 |
+
replacement: false
|
51 |
+
max_length: 512
|
52 |
+
active_data:
|
53 |
+
budget: 100
|
54 |
+
positive_budget: 5
|
55 |
+
seed: 123456
|
56 |
+
fit:
|
57 |
+
min_steps: 100
|
58 |
+
max_epochs: 10
|
59 |
+
learning_rate: 4.0e-05
|
60 |
+
optimizer: adamw
|
61 |
+
log_interval: ${log_interval}
|
62 |
+
enable_progress_bar: ${enable_progress_bar}
|
63 |
+
limit_train_batches: ${limit_batches}
|
64 |
+
limit_validation_batches: ${limit_batches}
|
65 |
+
active_fit:
|
66 |
+
max_budget: 5000
|
67 |
+
query_size: 25
|
68 |
+
reinit_model: true
|
69 |
+
limit_pool_batches: ${limit_batches}
|
70 |
+
limit_test_batches: ${limit_batches}
|
71 |
+
test:
|
72 |
+
log_interval: ${log_interval}
|
73 |
+
enable_progress_bar: ${enable_progress_bar}
|
74 |
+
limit_batches: ${limit_batches}
|
75 |
+
strategy:
|
76 |
+
name: randomsubset_entropy
|
77 |
+
args:
|
78 |
+
seed: 42
|
79 |
+
subpool_size: 1000
|
80 |
+
model:
|
81 |
+
name: bert-base-uncased
|
82 |
+
seed: 123456
|
83 |
+
dataset:
|
84 |
+
name: amazon-agri
|
85 |
+
text_column: text
|
86 |
+
label_column: labels
|
87 |
+
uid_column: uid
|
88 |
+
prepared_path: ${data_path}/prepared/amazoncat-agri
|
89 |
+
processed_path: ${data_path}/processed/amazoncat-13k
|
90 |
+
minority_classes:
|
91 |
+
- 1
|
92 |
+
index_metric: all-mpnet-base-v2_cosine
|
93 |
+
log_interval: 1
|
94 |
+
enable_progress_bar: false
|
95 |
+
limit_batches: null
|
96 |
+
seed: 42
|
97 |
+
experiment_group: additional_randomsubset_1000
|
98 |
+
run_name: ${dataset.name}/${model.name}_${strategy.name}_${now:%Y-%m-%d}T${now:%H-%M-%S}
|
99 |
+
data_path: /rds/user/pl487/hpc-work/anchoral/data
|
outputs/ablations/additional_randomsubset_1000/bert-base-uncased_randomsubset_entropy_2024-04-02T18-37-18_49537867_3/.hydra/hydra.yaml
ADDED
@@ -0,0 +1,210 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
hydra:
|
2 |
+
run:
|
3 |
+
dir: ./outputs/${experiment_group}/${run_name}
|
4 |
+
sweep:
|
5 |
+
dir: ./outputs/multirun/${experiment_group}
|
6 |
+
subdir: ${run_name}_${hydra.job.id}
|
7 |
+
launcher:
|
8 |
+
submitit_folder: ${hydra.sweep.dir}/.submitit/%j
|
9 |
+
timeout_min: 360
|
10 |
+
cpus_per_task: null
|
11 |
+
gpus_per_node: null
|
12 |
+
tasks_per_node: 1
|
13 |
+
mem_gb: null
|
14 |
+
nodes: 1
|
15 |
+
name: ${experiment_group}
|
16 |
+
stderr_to_stdout: false
|
17 |
+
_target_: hydra_plugins.hydra_submitit_launcher.submitit_launcher.SlurmLauncher
|
18 |
+
partition: ampere
|
19 |
+
qos: null
|
20 |
+
comment: null
|
21 |
+
constraint: null
|
22 |
+
exclude: null
|
23 |
+
gres: gpu:1
|
24 |
+
cpus_per_gpu: null
|
25 |
+
gpus_per_task: null
|
26 |
+
mem_per_gpu: null
|
27 |
+
mem_per_cpu: null
|
28 |
+
account: VLACHOS-SL3-GPU
|
29 |
+
signal_delay_s: 120
|
30 |
+
max_num_timeout: 0
|
31 |
+
additional_parameters: {}
|
32 |
+
array_parallelism: 256
|
33 |
+
setup:
|
34 |
+
- . /etc/profile.d/modules.sh
|
35 |
+
- module list
|
36 |
+
- echo -e 'loading other modules'
|
37 |
+
- module load rhel8/default-amp
|
38 |
+
- module load cuda/12.1
|
39 |
+
- module load cudnn/8.9_cuda-12.1
|
40 |
+
- module list
|
41 |
+
sweeper:
|
42 |
+
_target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper
|
43 |
+
max_batch_size: null
|
44 |
+
params: null
|
45 |
+
help:
|
46 |
+
app_name: ${hydra.job.name}
|
47 |
+
header: '${hydra.help.app_name} is powered by Hydra.
|
48 |
+
|
49 |
+
'
|
50 |
+
footer: 'Powered by Hydra (https://hydra.cc)
|
51 |
+
|
52 |
+
Use --hydra-help to view Hydra specific help
|
53 |
+
|
54 |
+
'
|
55 |
+
template: '${hydra.help.header}
|
56 |
+
|
57 |
+
== Configuration groups ==
|
58 |
+
|
59 |
+
Compose your configuration from those groups (group=option)
|
60 |
+
|
61 |
+
|
62 |
+
$APP_CONFIG_GROUPS
|
63 |
+
|
64 |
+
|
65 |
+
== Config ==
|
66 |
+
|
67 |
+
Override anything in the config (foo.bar=value)
|
68 |
+
|
69 |
+
|
70 |
+
$CONFIG
|
71 |
+
|
72 |
+
|
73 |
+
${hydra.help.footer}
|
74 |
+
|
75 |
+
'
|
76 |
+
hydra_help:
|
77 |
+
template: 'Hydra (${hydra.runtime.version})
|
78 |
+
|
79 |
+
See https://hydra.cc for more info.
|
80 |
+
|
81 |
+
|
82 |
+
== Flags ==
|
83 |
+
|
84 |
+
$FLAGS_HELP
|
85 |
+
|
86 |
+
|
87 |
+
== Configuration groups ==
|
88 |
+
|
89 |
+
Compose your configuration from those groups (For example, append hydra/job_logging=disabled
|
90 |
+
to command line)
|
91 |
+
|
92 |
+
|
93 |
+
$HYDRA_CONFIG_GROUPS
|
94 |
+
|
95 |
+
|
96 |
+
Use ''--cfg hydra'' to Show the Hydra config.
|
97 |
+
|
98 |
+
'
|
99 |
+
hydra_help: ???
|
100 |
+
hydra_logging:
|
101 |
+
version: 1
|
102 |
+
formatters:
|
103 |
+
colorlog:
|
104 |
+
(): colorlog.ColoredFormatter
|
105 |
+
format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s'
|
106 |
+
handlers:
|
107 |
+
console:
|
108 |
+
class: logging.StreamHandler
|
109 |
+
formatter: colorlog
|
110 |
+
stream: ext://sys.stdout
|
111 |
+
root:
|
112 |
+
level: INFO
|
113 |
+
handlers:
|
114 |
+
- console
|
115 |
+
disable_existing_loggers: false
|
116 |
+
job_logging:
|
117 |
+
version: 1
|
118 |
+
formatters:
|
119 |
+
simple:
|
120 |
+
format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s'
|
121 |
+
colorlog:
|
122 |
+
(): colorlog.ColoredFormatter
|
123 |
+
format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s]
|
124 |
+
- %(message)s'
|
125 |
+
log_colors:
|
126 |
+
DEBUG: purple
|
127 |
+
INFO: green
|
128 |
+
WARNING: yellow
|
129 |
+
ERROR: red
|
130 |
+
CRITICAL: bold_red
|
131 |
+
handlers:
|
132 |
+
console:
|
133 |
+
class: logging.StreamHandler
|
134 |
+
formatter: colorlog
|
135 |
+
stream: ext://sys.stdout
|
136 |
+
file:
|
137 |
+
class: logging.FileHandler
|
138 |
+
formatter: simple
|
139 |
+
filename: ${hydra.job.name}.log
|
140 |
+
root:
|
141 |
+
level: INFO
|
142 |
+
handlers:
|
143 |
+
- console
|
144 |
+
- file
|
145 |
+
disable_existing_loggers: false
|
146 |
+
env: {}
|
147 |
+
mode: MULTIRUN
|
148 |
+
searchpath: []
|
149 |
+
callbacks: {}
|
150 |
+
output_subdir: .hydra
|
151 |
+
overrides:
|
152 |
+
hydra:
|
153 |
+
- hydra.launcher.timeout_min=360
|
154 |
+
- hydra.mode=MULTIRUN
|
155 |
+
task:
|
156 |
+
- experiment_group=additional_randomsubset_1000
|
157 |
+
- dataset=amazon_agri
|
158 |
+
- strategy=randomsubset_entropy
|
159 |
+
- data.seed=654321
|
160 |
+
- model.seed=123456
|
161 |
+
- active_data.seed=123456
|
162 |
+
- model.name=bert-base-uncased
|
163 |
+
- +launcher=slurm
|
164 |
+
- strategy.args.subpool_size=1000
|
165 |
+
job:
|
166 |
+
name: active_train
|
167 |
+
chdir: true
|
168 |
+
override_dirname: +launcher=slurm,active_data.seed=123456,data.seed=654321,dataset=amazon_agri,experiment_group=additional_randomsubset_1000,model.name=bert-base-uncased,model.seed=123456,strategy.args.subpool_size=1000,strategy=randomsubset_entropy
|
169 |
+
id: '49537867_3'
|
170 |
+
num: 3
|
171 |
+
config_name: conf
|
172 |
+
env_set: {}
|
173 |
+
env_copy: []
|
174 |
+
config:
|
175 |
+
override_dirname:
|
176 |
+
kv_sep: '='
|
177 |
+
item_sep: ','
|
178 |
+
exclude_keys: []
|
179 |
+
runtime:
|
180 |
+
version: 1.3.2
|
181 |
+
version_base: '1.3'
|
182 |
+
cwd: /rds/user/pl487/hpc-work/anchoral
|
183 |
+
config_sources:
|
184 |
+
- path: hydra.conf
|
185 |
+
schema: pkg
|
186 |
+
provider: hydra
|
187 |
+
- path: /rds/user/pl487/hpc-work/anchoral/conf
|
188 |
+
schema: file
|
189 |
+
provider: main
|
190 |
+
- path: hydra_plugins.hydra_colorlog.conf
|
191 |
+
schema: pkg
|
192 |
+
provider: hydra-colorlog
|
193 |
+
- path: ''
|
194 |
+
schema: structured
|
195 |
+
provider: schema
|
196 |
+
output_dir: /rds/user/pl487/hpc-work/anchoral/outputs/multirun/additional_randomsubset_1000/amazon-agri/bert-base-uncased_randomsubset_entropy_2024-04-02T18-37-18_49537867_3
|
197 |
+
choices:
|
198 |
+
launcher: slurm
|
199 |
+
dataset: amazon_agri
|
200 |
+
strategy: randomsubset_entropy
|
201 |
+
hydra/env: default
|
202 |
+
hydra/callbacks: null
|
203 |
+
hydra/job_logging: colorlog
|
204 |
+
hydra/hydra_logging: colorlog
|
205 |
+
hydra/hydra_help: default
|
206 |
+
hydra/help: default
|
207 |
+
hydra/sweeper: basic
|
208 |
+
hydra/launcher: submitit_slurm
|
209 |
+
hydra/output: default
|
210 |
+
verbose: false
|
outputs/ablations/additional_randomsubset_1000/bert-base-uncased_randomsubset_entropy_2024-04-02T18-37-18_49537867_3/.hydra/overrides.yaml
ADDED
@@ -0,0 +1,9 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
- experiment_group=additional_randomsubset_1000
|
2 |
+
- dataset=amazon_agri
|
3 |
+
- strategy=randomsubset_entropy
|
4 |
+
- data.seed=654321
|
5 |
+
- model.seed=123456
|
6 |
+
- active_data.seed=123456
|
7 |
+
- model.name=bert-base-uncased
|
8 |
+
- +launcher=slurm
|
9 |
+
- strategy.args.subpool_size=1000
|
outputs/ablations/additional_randomsubset_1000/bert-base-uncased_randomsubset_entropy_2024-04-02T18-37-18_49537867_3/active_train.log
ADDED
@@ -0,0 +1,132 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
[2024-04-05 21:45:44,959][hydra][INFO] -
|
2 |
+
estimator:
|
3 |
+
accelerator: gpu
|
4 |
+
precision: 32
|
5 |
+
deterministic: true
|
6 |
+
tf32_mode: high
|
7 |
+
callbacks:
|
8 |
+
timer:
|
9 |
+
_target_: energizer.active_learning.callbacks.Timer
|
10 |
+
save_outputs:
|
11 |
+
_target_: src.callbacks.SaveOutputs
|
12 |
+
dirpath: ./logs/
|
13 |
+
instance_level: false
|
14 |
+
batch_level: false
|
15 |
+
epoch_level: false
|
16 |
+
early_stopping:
|
17 |
+
_target_: energizer.callbacks.early_stopping.EarlyStopping
|
18 |
+
monitor: train/avg_f1_minclass
|
19 |
+
stage: train
|
20 |
+
interval: epoch
|
21 |
+
mode: max
|
22 |
+
min_delta: 1.0e-05
|
23 |
+
patience: 10
|
24 |
+
stopping_threshold: null
|
25 |
+
divergence_threshold: null
|
26 |
+
verbose: true
|
27 |
+
model_checkpoint:
|
28 |
+
_target_: energizer.callbacks.model_checkpoint.ModelCheckpoint
|
29 |
+
dirpath: .checkpoints
|
30 |
+
monitor: train/avg_f1_minclass
|
31 |
+
stage: train
|
32 |
+
mode: max
|
33 |
+
save_last: false
|
34 |
+
save_top_k: 1
|
35 |
+
verbose: true
|
36 |
+
loggers:
|
37 |
+
tensorboard:
|
38 |
+
_target_: energizer.loggers.TensorBoardLogger
|
39 |
+
root_dir: ./
|
40 |
+
name: tb_logs
|
41 |
+
version: null
|
42 |
+
data:
|
43 |
+
batch_size: 32
|
44 |
+
eval_batch_size: 256
|
45 |
+
num_workers: 32
|
46 |
+
pin_memory: true
|
47 |
+
drop_last: false
|
48 |
+
persistent_workers: true
|
49 |
+
shuffle: true
|
50 |
+
seed: 654321
|
51 |
+
replacement: false
|
52 |
+
max_length: 512
|
53 |
+
active_data:
|
54 |
+
budget: 100
|
55 |
+
positive_budget: 5
|
56 |
+
seed: 123456
|
57 |
+
fit:
|
58 |
+
min_steps: 100
|
59 |
+
max_epochs: 10
|
60 |
+
learning_rate: 4.0e-05
|
61 |
+
optimizer: adamw
|
62 |
+
log_interval: 1
|
63 |
+
enable_progress_bar: false
|
64 |
+
limit_train_batches: null
|
65 |
+
limit_validation_batches: null
|
66 |
+
active_fit:
|
67 |
+
max_budget: 5000
|
68 |
+
query_size: 25
|
69 |
+
reinit_model: true
|
70 |
+
limit_pool_batches: null
|
71 |
+
limit_test_batches: null
|
72 |
+
test:
|
73 |
+
log_interval: 1
|
74 |
+
enable_progress_bar: false
|
75 |
+
limit_batches: null
|
76 |
+
strategy:
|
77 |
+
name: randomsubset_entropy
|
78 |
+
args:
|
79 |
+
seed: 42
|
80 |
+
subpool_size: 1000
|
81 |
+
model:
|
82 |
+
name: bert-base-uncased
|
83 |
+
seed: 123456
|
84 |
+
dataset:
|
85 |
+
name: amazon-agri
|
86 |
+
text_column: text
|
87 |
+
label_column: labels
|
88 |
+
uid_column: uid
|
89 |
+
prepared_path: /rds/user/pl487/hpc-work/anchoral/data/prepared/amazoncat-agri
|
90 |
+
processed_path: /rds/user/pl487/hpc-work/anchoral/data/processed/amazoncat-13k
|
91 |
+
minority_classes:
|
92 |
+
- 1
|
93 |
+
index_metric: all-mpnet-base-v2_cosine
|
94 |
+
log_interval: 1
|
95 |
+
enable_progress_bar: false
|
96 |
+
limit_batches: null
|
97 |
+
seed: 42
|
98 |
+
experiment_group: additional_randomsubset_1000
|
99 |
+
run_name: amazon-agri/bert-base-uncased_randomsubset_entropy_2024-04-02T18-37-18
|
100 |
+
data_path: /rds/user/pl487/hpc-work/anchoral/data
|
101 |
+
|
102 |
+
======================================================================
|
103 |
+
[2024-04-05 21:45:44,966][hydra][INFO] - Running active learning with strategy {'name': 'randomsubset_entropy', 'args': {'seed': 42, 'subpool_size': 1000}}
|
104 |
+
[2024-04-05 21:45:44,992][hydra][INFO] - Seed enabled: 42
|
105 |
+
[2024-04-05 21:45:52,370][hydra][INFO] - Labelled size: 100 Pool size: 1186139 Test size: 5285
|
106 |
+
Label distribution:
|
107 |
+
| | labels | count | perc |
|
108 |
+
|---:|:---------|--------:|-------:|
|
109 |
+
| 0 | Negative | 95 | 0.95 |
|
110 |
+
| 1 | Positive | 5 | 0.05 |
|
111 |
+
[2024-04-05 21:45:52,524][hydra][INFO] - Batch:
|
112 |
+
{<InputKeys.INPUT_IDS: 'input_ids'>: tensor([[ 101, 3521, 13903, 102]]), <InputKeys.ATT_MASK: 'attention_mask'>: tensor([[1, 1, 1, 1]]), <InputKeys.LABELS: 'labels'>: tensor([0]), <InputKeys.ON_CPU: 'on_cpu'>: {<SpecialKeys.ID: 'uid'>: [1462254]}}
|
113 |
+
[2024-04-05 21:45:57,989][hydra][INFO] - Loggers: {'tensorboard': <energizer.loggers.tensorboard.TensorBoardLogger object at 0x14ed16274b20>}
|
114 |
+
[2024-04-05 21:45:57,989][hydra][INFO] - Callbacks: {'timer': <energizer.active_learning.callbacks.Timer object at 0x14ed0eb50130>, 'save_outputs': <src.callbacks.SaveOutputs object at 0x14ed0eb758e0>, 'early_stopping': <energizer.callbacks.early_stopping.EarlyStopping object at 0x14ed0eb75be0>, 'model_checkpoint': <energizer.callbacks.model_checkpoint.ModelCheckpoint object at 0x14ed161b05b0>}
|
115 |
+
[2024-04-05 21:45:58,013][hydra][INFO] -
|
116 |
+
| Name | Type | Params
|
117 |
+
-----------------------------------------
|
118 |
+
0 | bert | BertModel | 109 M
|
119 |
+
1 | dropout | Dropout | 0
|
120 |
+
2 | classifier | Linear | 1.5 K
|
121 |
+
-----------------------------------------
|
122 |
+
109 M Trainable params
|
123 |
+
0 Non-trainable params
|
124 |
+
109 M Total params
|
125 |
+
437.935 Total estimated model params size (MB)
|
126 |
+
0.00 GB CUDA Memory used
|
127 |
+
[2024-04-06 03:42:58,881][submitit][INFO] - Job has timed out. Ran 357 minutes out of requested 360 minutes.
|
128 |
+
[2024-04-06 03:42:58,950][submitit][WARNING] - Caught signal SIGUSR2 on gpu-q-11: this job is timed-out.
|
129 |
+
[2024-04-06 03:42:58,975][submitit][INFO] - Calling checkpoint method.
|
130 |
+
[2024-04-06 03:42:59,023][submitit][INFO] - Job not requeued because: timed-out too many times.
|
131 |
+
[2024-04-06 03:42:59,023][submitit][WARNING] - Bypassing signal SIGCONT
|
132 |
+
[2024-04-06 03:42:59,028][submitit][INFO] - Job completed successfully
|
outputs/ablations/additional_randomsubset_1000/bert-base-uncased_randomsubset_entropy_2024-04-02T18-37-18_49537867_3/hparams.yaml
ADDED
@@ -0,0 +1,99 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
estimator:
|
2 |
+
accelerator: gpu
|
3 |
+
precision: 32
|
4 |
+
deterministic: true
|
5 |
+
tf32_mode: high
|
6 |
+
callbacks:
|
7 |
+
timer:
|
8 |
+
_target_: energizer.active_learning.callbacks.Timer
|
9 |
+
save_outputs:
|
10 |
+
_target_: src.callbacks.SaveOutputs
|
11 |
+
dirpath: ./logs/
|
12 |
+
instance_level: false
|
13 |
+
batch_level: false
|
14 |
+
epoch_level: false
|
15 |
+
early_stopping:
|
16 |
+
_target_: energizer.callbacks.early_stopping.EarlyStopping
|
17 |
+
monitor: train/avg_f1_minclass
|
18 |
+
stage: train
|
19 |
+
interval: epoch
|
20 |
+
mode: max
|
21 |
+
min_delta: 1.0e-05
|
22 |
+
patience: 10
|
23 |
+
stopping_threshold: null
|
24 |
+
divergence_threshold: null
|
25 |
+
verbose: true
|
26 |
+
model_checkpoint:
|
27 |
+
_target_: energizer.callbacks.model_checkpoint.ModelCheckpoint
|
28 |
+
dirpath: .checkpoints
|
29 |
+
monitor: train/avg_f1_minclass
|
30 |
+
stage: train
|
31 |
+
mode: max
|
32 |
+
save_last: false
|
33 |
+
save_top_k: 1
|
34 |
+
verbose: true
|
35 |
+
loggers:
|
36 |
+
tensorboard:
|
37 |
+
_target_: energizer.loggers.TensorBoardLogger
|
38 |
+
root_dir: ./
|
39 |
+
name: tb_logs
|
40 |
+
version: null
|
41 |
+
data:
|
42 |
+
batch_size: 32
|
43 |
+
eval_batch_size: 256
|
44 |
+
num_workers: 32
|
45 |
+
pin_memory: true
|
46 |
+
drop_last: false
|
47 |
+
persistent_workers: true
|
48 |
+
shuffle: true
|
49 |
+
seed: 654321
|
50 |
+
replacement: false
|
51 |
+
max_length: 512
|
52 |
+
active_data:
|
53 |
+
budget: 100
|
54 |
+
positive_budget: 5
|
55 |
+
seed: 123456
|
56 |
+
fit:
|
57 |
+
min_steps: 100
|
58 |
+
max_epochs: 10
|
59 |
+
learning_rate: 4.0e-05
|
60 |
+
optimizer: adamw
|
61 |
+
log_interval: 1
|
62 |
+
enable_progress_bar: false
|
63 |
+
limit_train_batches: null
|
64 |
+
limit_validation_batches: null
|
65 |
+
active_fit:
|
66 |
+
max_budget: 5000
|
67 |
+
query_size: 25
|
68 |
+
reinit_model: true
|
69 |
+
limit_pool_batches: null
|
70 |
+
limit_test_batches: null
|
71 |
+
test:
|
72 |
+
log_interval: 1
|
73 |
+
enable_progress_bar: false
|
74 |
+
limit_batches: null
|
75 |
+
strategy:
|
76 |
+
name: randomsubset_entropy
|
77 |
+
args:
|
78 |
+
seed: 42
|
79 |
+
subpool_size: 1000
|
80 |
+
model:
|
81 |
+
name: bert-base-uncased
|
82 |
+
seed: 123456
|
83 |
+
dataset:
|
84 |
+
name: amazon-agri
|
85 |
+
text_column: text
|
86 |
+
label_column: labels
|
87 |
+
uid_column: uid
|
88 |
+
prepared_path: /rds/user/pl487/hpc-work/anchoral/data/prepared/amazoncat-agri
|
89 |
+
processed_path: /rds/user/pl487/hpc-work/anchoral/data/processed/amazoncat-13k
|
90 |
+
minority_classes:
|
91 |
+
- 1
|
92 |
+
index_metric: all-mpnet-base-v2_cosine
|
93 |
+
log_interval: 1
|
94 |
+
enable_progress_bar: false
|
95 |
+
limit_batches: null
|
96 |
+
seed: 42
|
97 |
+
experiment_group: additional_randomsubset_1000
|
98 |
+
run_name: amazon-agri/bert-base-uncased_randomsubset_entropy_2024-04-02T18-37-18
|
99 |
+
data_path: /rds/user/pl487/hpc-work/anchoral/data
|
outputs/ablations/additional_randomsubset_1000/bert-base-uncased_randomsubset_entropy_2024-04-02T18-37-18_49537867_3/logs/labelled_dataset.parquet
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:ce754d2dca678ea42cf3e001792a4da9d72d80a37c90016e17da4832440ff6cf
|
3 |
+
size 9931425
|
outputs/ablations/additional_randomsubset_1000/bert-base-uncased_randomsubset_entropy_2024-04-02T18-37-18_49537867_3/logs/subpool_ids.jsonl
ADDED
The diff for this file is too large to render.
See raw diff
|
|
outputs/ablations/additional_randomsubset_1000/bert-base-uncased_randomsubset_entropy_2024-04-02T18-37-18_49537867_3/tb_logs/version_0/events.out.tfevents.1712349958.gpu-q-11.665611.0
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:9378cad52d178e5900a8a7a0058bae572304a7cc9363fd6ddc009e219d7b1ae6
|
3 |
+
size 7674684
|
outputs/ablations/additional_randomsubset_1000/bert-base-uncased_randomsubset_entropy_2024-04-02T18-37-18_49537867_3/tensorboard_logs.parquet
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:44e805c104f815bc852528ec18b754f1ea1988a203419f6ee476bb46574d374d
|
3 |
+
size 1318230
|
outputs/ablations/additional_randomsubset_1000/bert-base-uncased_randomsubset_entropy_2024-04-02T18-37-18_49537867_4/.early_stopping.jsonl
ADDED
@@ -0,0 +1,2 @@
|
|
|
|
|
|
|
1 |
+
{"best_score":1.0,"best_step":12,"stage":"train","interval":"epoch","stopping_step":22,"reason":"Monitored metric `train/avg_f1_minclass` did not improve in the last 10 epochs."}
|
2 |
+
{"best_score":1.0,"best_step":35,"stage":"train","interval":"epoch","stopping_step":45,"reason":"Monitored metric `train/avg_f1_minclass` did not improve in the last 10 epochs."}
|
outputs/ablations/additional_randomsubset_1000/bert-base-uncased_randomsubset_entropy_2024-04-02T18-37-18_49537867_4/.hydra/config.yaml
ADDED
@@ -0,0 +1,99 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
estimator:
|
2 |
+
accelerator: gpu
|
3 |
+
precision: 32
|
4 |
+
deterministic: true
|
5 |
+
tf32_mode: high
|
6 |
+
callbacks:
|
7 |
+
timer:
|
8 |
+
_target_: energizer.active_learning.callbacks.Timer
|
9 |
+
save_outputs:
|
10 |
+
_target_: src.callbacks.SaveOutputs
|
11 |
+
dirpath: ./logs/
|
12 |
+
instance_level: false
|
13 |
+
batch_level: false
|
14 |
+
epoch_level: false
|
15 |
+
early_stopping:
|
16 |
+
_target_: energizer.callbacks.early_stopping.EarlyStopping
|
17 |
+
monitor: train/avg_f1_minclass
|
18 |
+
stage: train
|
19 |
+
interval: epoch
|
20 |
+
mode: max
|
21 |
+
min_delta: 1.0e-05
|
22 |
+
patience: 10
|
23 |
+
stopping_threshold: null
|
24 |
+
divergence_threshold: null
|
25 |
+
verbose: true
|
26 |
+
model_checkpoint:
|
27 |
+
_target_: energizer.callbacks.model_checkpoint.ModelCheckpoint
|
28 |
+
dirpath: .checkpoints
|
29 |
+
monitor: train/avg_f1_minclass
|
30 |
+
stage: train
|
31 |
+
mode: max
|
32 |
+
save_last: false
|
33 |
+
save_top_k: 1
|
34 |
+
verbose: true
|
35 |
+
loggers:
|
36 |
+
tensorboard:
|
37 |
+
_target_: energizer.loggers.TensorBoardLogger
|
38 |
+
root_dir: ./
|
39 |
+
name: tb_logs
|
40 |
+
version: null
|
41 |
+
data:
|
42 |
+
batch_size: 32
|
43 |
+
eval_batch_size: 256
|
44 |
+
num_workers: 32
|
45 |
+
pin_memory: true
|
46 |
+
drop_last: false
|
47 |
+
persistent_workers: true
|
48 |
+
shuffle: true
|
49 |
+
seed: 123456
|
50 |
+
replacement: false
|
51 |
+
max_length: 512
|
52 |
+
active_data:
|
53 |
+
budget: 100
|
54 |
+
positive_budget: 5
|
55 |
+
seed: 654321
|
56 |
+
fit:
|
57 |
+
min_steps: 100
|
58 |
+
max_epochs: 10
|
59 |
+
learning_rate: 4.0e-05
|
60 |
+
optimizer: adamw
|
61 |
+
log_interval: ${log_interval}
|
62 |
+
enable_progress_bar: ${enable_progress_bar}
|
63 |
+
limit_train_batches: ${limit_batches}
|
64 |
+
limit_validation_batches: ${limit_batches}
|
65 |
+
active_fit:
|
66 |
+
max_budget: 5000
|
67 |
+
query_size: 25
|
68 |
+
reinit_model: true
|
69 |
+
limit_pool_batches: ${limit_batches}
|
70 |
+
limit_test_batches: ${limit_batches}
|
71 |
+
test:
|
72 |
+
log_interval: ${log_interval}
|
73 |
+
enable_progress_bar: ${enable_progress_bar}
|
74 |
+
limit_batches: ${limit_batches}
|
75 |
+
strategy:
|
76 |
+
name: randomsubset_entropy
|
77 |
+
args:
|
78 |
+
seed: 42
|
79 |
+
subpool_size: 1000
|
80 |
+
model:
|
81 |
+
name: bert-base-uncased
|
82 |
+
seed: 654321
|
83 |
+
dataset:
|
84 |
+
name: amazon-agri
|
85 |
+
text_column: text
|
86 |
+
label_column: labels
|
87 |
+
uid_column: uid
|
88 |
+
prepared_path: ${data_path}/prepared/amazoncat-agri
|
89 |
+
processed_path: ${data_path}/processed/amazoncat-13k
|
90 |
+
minority_classes:
|
91 |
+
- 1
|
92 |
+
index_metric: all-mpnet-base-v2_cosine
|
93 |
+
log_interval: 1
|
94 |
+
enable_progress_bar: false
|
95 |
+
limit_batches: null
|
96 |
+
seed: 42
|
97 |
+
experiment_group: additional_randomsubset_1000
|
98 |
+
run_name: ${dataset.name}/${model.name}_${strategy.name}_${now:%Y-%m-%d}T${now:%H-%M-%S}
|
99 |
+
data_path: /rds/user/pl487/hpc-work/anchoral/data
|
outputs/ablations/additional_randomsubset_1000/bert-base-uncased_randomsubset_entropy_2024-04-02T18-37-18_49537867_4/.hydra/hydra.yaml
ADDED
@@ -0,0 +1,210 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
hydra:
|
2 |
+
run:
|
3 |
+
dir: ./outputs/${experiment_group}/${run_name}
|
4 |
+
sweep:
|
5 |
+
dir: ./outputs/multirun/${experiment_group}
|
6 |
+
subdir: ${run_name}_${hydra.job.id}
|
7 |
+
launcher:
|
8 |
+
submitit_folder: ${hydra.sweep.dir}/.submitit/%j
|
9 |
+
timeout_min: 360
|
10 |
+
cpus_per_task: null
|
11 |
+
gpus_per_node: null
|
12 |
+
tasks_per_node: 1
|
13 |
+
mem_gb: null
|
14 |
+
nodes: 1
|
15 |
+
name: ${experiment_group}
|
16 |
+
stderr_to_stdout: false
|
17 |
+
_target_: hydra_plugins.hydra_submitit_launcher.submitit_launcher.SlurmLauncher
|
18 |
+
partition: ampere
|
19 |
+
qos: null
|
20 |
+
comment: null
|
21 |
+
constraint: null
|
22 |
+
exclude: null
|
23 |
+
gres: gpu:1
|
24 |
+
cpus_per_gpu: null
|
25 |
+
gpus_per_task: null
|
26 |
+
mem_per_gpu: null
|
27 |
+
mem_per_cpu: null
|
28 |
+
account: VLACHOS-SL3-GPU
|
29 |
+
signal_delay_s: 120
|
30 |
+
max_num_timeout: 0
|
31 |
+
additional_parameters: {}
|
32 |
+
array_parallelism: 256
|
33 |
+
setup:
|
34 |
+
- . /etc/profile.d/modules.sh
|
35 |
+
- module list
|
36 |
+
- echo -e 'loading other modules'
|
37 |
+
- module load rhel8/default-amp
|
38 |
+
- module load cuda/12.1
|
39 |
+
- module load cudnn/8.9_cuda-12.1
|
40 |
+
- module list
|
41 |
+
sweeper:
|
42 |
+
_target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper
|
43 |
+
max_batch_size: null
|
44 |
+
params: null
|
45 |
+
help:
|
46 |
+
app_name: ${hydra.job.name}
|
47 |
+
header: '${hydra.help.app_name} is powered by Hydra.
|
48 |
+
|
49 |
+
'
|
50 |
+
footer: 'Powered by Hydra (https://hydra.cc)
|
51 |
+
|
52 |
+
Use --hydra-help to view Hydra specific help
|
53 |
+
|
54 |
+
'
|
55 |
+
template: '${hydra.help.header}
|
56 |
+
|
57 |
+
== Configuration groups ==
|
58 |
+
|
59 |
+
Compose your configuration from those groups (group=option)
|
60 |
+
|
61 |
+
|
62 |
+
$APP_CONFIG_GROUPS
|
63 |
+
|
64 |
+
|
65 |
+
== Config ==
|
66 |
+
|
67 |
+
Override anything in the config (foo.bar=value)
|
68 |
+
|
69 |
+
|
70 |
+
$CONFIG
|
71 |
+
|
72 |
+
|
73 |
+
${hydra.help.footer}
|
74 |
+
|
75 |
+
'
|
76 |
+
hydra_help:
|
77 |
+
template: 'Hydra (${hydra.runtime.version})
|
78 |
+
|
79 |
+
See https://hydra.cc for more info.
|
80 |
+
|
81 |
+
|
82 |
+
== Flags ==
|
83 |
+
|
84 |
+
$FLAGS_HELP
|
85 |
+
|
86 |
+
|
87 |
+
== Configuration groups ==
|
88 |
+
|
89 |
+
Compose your configuration from those groups (For example, append hydra/job_logging=disabled
|
90 |
+
to command line)
|
91 |
+
|
92 |
+
|
93 |
+
$HYDRA_CONFIG_GROUPS
|
94 |
+
|
95 |
+
|
96 |
+
Use ''--cfg hydra'' to Show the Hydra config.
|
97 |
+
|
98 |
+
'
|
99 |
+
hydra_help: ???
|
100 |
+
hydra_logging:
|
101 |
+
version: 1
|
102 |
+
formatters:
|
103 |
+
colorlog:
|
104 |
+
(): colorlog.ColoredFormatter
|
105 |
+
format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s'
|
106 |
+
handlers:
|
107 |
+
console:
|
108 |
+
class: logging.StreamHandler
|
109 |
+
formatter: colorlog
|
110 |
+
stream: ext://sys.stdout
|
111 |
+
root:
|
112 |
+
level: INFO
|
113 |
+
handlers:
|
114 |
+
- console
|
115 |
+
disable_existing_loggers: false
|
116 |
+
job_logging:
|
117 |
+
version: 1
|
118 |
+
formatters:
|
119 |
+
simple:
|
120 |
+
format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s'
|
121 |
+
colorlog:
|
122 |
+
(): colorlog.ColoredFormatter
|
123 |
+
format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s]
|
124 |
+
- %(message)s'
|
125 |
+
log_colors:
|
126 |
+
DEBUG: purple
|
127 |
+
INFO: green
|
128 |
+
WARNING: yellow
|
129 |
+
ERROR: red
|
130 |
+
CRITICAL: bold_red
|
131 |
+
handlers:
|
132 |
+
console:
|
133 |
+
class: logging.StreamHandler
|
134 |
+
formatter: colorlog
|
135 |
+
stream: ext://sys.stdout
|
136 |
+
file:
|
137 |
+
class: logging.FileHandler
|
138 |
+
formatter: simple
|
139 |
+
filename: ${hydra.job.name}.log
|
140 |
+
root:
|
141 |
+
level: INFO
|
142 |
+
handlers:
|
143 |
+
- console
|
144 |
+
- file
|
145 |
+
disable_existing_loggers: false
|
146 |
+
env: {}
|
147 |
+
mode: MULTIRUN
|
148 |
+
searchpath: []
|
149 |
+
callbacks: {}
|
150 |
+
output_subdir: .hydra
|
151 |
+
overrides:
|
152 |
+
hydra:
|
153 |
+
- hydra.launcher.timeout_min=360
|
154 |
+
- hydra.mode=MULTIRUN
|
155 |
+
task:
|
156 |
+
- experiment_group=additional_randomsubset_1000
|
157 |
+
- dataset=amazon_agri
|
158 |
+
- strategy=randomsubset_entropy
|
159 |
+
- data.seed=123456
|
160 |
+
- model.seed=654321
|
161 |
+
- active_data.seed=654321
|
162 |
+
- model.name=bert-base-uncased
|
163 |
+
- +launcher=slurm
|
164 |
+
- strategy.args.subpool_size=1000
|
165 |
+
job:
|
166 |
+
name: active_train
|
167 |
+
chdir: true
|
168 |
+
override_dirname: +launcher=slurm,active_data.seed=654321,data.seed=123456,dataset=amazon_agri,experiment_group=additional_randomsubset_1000,model.name=bert-base-uncased,model.seed=654321,strategy.args.subpool_size=1000,strategy=randomsubset_entropy
|
169 |
+
id: '49537867_4'
|
170 |
+
num: 4
|
171 |
+
config_name: conf
|
172 |
+
env_set: {}
|
173 |
+
env_copy: []
|
174 |
+
config:
|
175 |
+
override_dirname:
|
176 |
+
kv_sep: '='
|
177 |
+
item_sep: ','
|
178 |
+
exclude_keys: []
|
179 |
+
runtime:
|
180 |
+
version: 1.3.2
|
181 |
+
version_base: '1.3'
|
182 |
+
cwd: /rds/user/pl487/hpc-work/anchoral
|
183 |
+
config_sources:
|
184 |
+
- path: hydra.conf
|
185 |
+
schema: pkg
|
186 |
+
provider: hydra
|
187 |
+
- path: /rds/user/pl487/hpc-work/anchoral/conf
|
188 |
+
schema: file
|
189 |
+
provider: main
|
190 |
+
- path: hydra_plugins.hydra_colorlog.conf
|
191 |
+
schema: pkg
|
192 |
+
provider: hydra-colorlog
|
193 |
+
- path: ''
|
194 |
+
schema: structured
|
195 |
+
provider: schema
|
196 |
+
output_dir: /rds/user/pl487/hpc-work/anchoral/outputs/multirun/additional_randomsubset_1000/amazon-agri/bert-base-uncased_randomsubset_entropy_2024-04-02T18-37-18_49537867_4
|
197 |
+
choices:
|
198 |
+
launcher: slurm
|
199 |
+
dataset: amazon_agri
|
200 |
+
strategy: randomsubset_entropy
|
201 |
+
hydra/env: default
|
202 |
+
hydra/callbacks: null
|
203 |
+
hydra/job_logging: colorlog
|
204 |
+
hydra/hydra_logging: colorlog
|
205 |
+
hydra/hydra_help: default
|
206 |
+
hydra/help: default
|
207 |
+
hydra/sweeper: basic
|
208 |
+
hydra/launcher: submitit_slurm
|
209 |
+
hydra/output: default
|
210 |
+
verbose: false
|
outputs/ablations/additional_randomsubset_1000/bert-base-uncased_randomsubset_entropy_2024-04-02T18-37-18_49537867_4/.hydra/overrides.yaml
ADDED
@@ -0,0 +1,9 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
- experiment_group=additional_randomsubset_1000
|
2 |
+
- dataset=amazon_agri
|
3 |
+
- strategy=randomsubset_entropy
|
4 |
+
- data.seed=123456
|
5 |
+
- model.seed=654321
|
6 |
+
- active_data.seed=654321
|
7 |
+
- model.name=bert-base-uncased
|
8 |
+
- +launcher=slurm
|
9 |
+
- strategy.args.subpool_size=1000
|
outputs/ablations/additional_randomsubset_1000/bert-base-uncased_randomsubset_entropy_2024-04-02T18-37-18_49537867_4/active_train.log
ADDED
@@ -0,0 +1,132 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
[2024-04-05 21:45:44,959][hydra][INFO] -
|
2 |
+
estimator:
|
3 |
+
accelerator: gpu
|
4 |
+
precision: 32
|
5 |
+
deterministic: true
|
6 |
+
tf32_mode: high
|
7 |
+
callbacks:
|
8 |
+
timer:
|
9 |
+
_target_: energizer.active_learning.callbacks.Timer
|
10 |
+
save_outputs:
|
11 |
+
_target_: src.callbacks.SaveOutputs
|
12 |
+
dirpath: ./logs/
|
13 |
+
instance_level: false
|
14 |
+
batch_level: false
|
15 |
+
epoch_level: false
|
16 |
+
early_stopping:
|
17 |
+
_target_: energizer.callbacks.early_stopping.EarlyStopping
|
18 |
+
monitor: train/avg_f1_minclass
|
19 |
+
stage: train
|
20 |
+
interval: epoch
|
21 |
+
mode: max
|
22 |
+
min_delta: 1.0e-05
|
23 |
+
patience: 10
|
24 |
+
stopping_threshold: null
|
25 |
+
divergence_threshold: null
|
26 |
+
verbose: true
|
27 |
+
model_checkpoint:
|
28 |
+
_target_: energizer.callbacks.model_checkpoint.ModelCheckpoint
|
29 |
+
dirpath: .checkpoints
|
30 |
+
monitor: train/avg_f1_minclass
|
31 |
+
stage: train
|
32 |
+
mode: max
|
33 |
+
save_last: false
|
34 |
+
save_top_k: 1
|
35 |
+
verbose: true
|
36 |
+
loggers:
|
37 |
+
tensorboard:
|
38 |
+
_target_: energizer.loggers.TensorBoardLogger
|
39 |
+
root_dir: ./
|
40 |
+
name: tb_logs
|
41 |
+
version: null
|
42 |
+
data:
|
43 |
+
batch_size: 32
|
44 |
+
eval_batch_size: 256
|
45 |
+
num_workers: 32
|
46 |
+
pin_memory: true
|
47 |
+
drop_last: false
|
48 |
+
persistent_workers: true
|
49 |
+
shuffle: true
|
50 |
+
seed: 123456
|
51 |
+
replacement: false
|
52 |
+
max_length: 512
|
53 |
+
active_data:
|
54 |
+
budget: 100
|
55 |
+
positive_budget: 5
|
56 |
+
seed: 654321
|
57 |
+
fit:
|
58 |
+
min_steps: 100
|
59 |
+
max_epochs: 10
|
60 |
+
learning_rate: 4.0e-05
|
61 |
+
optimizer: adamw
|
62 |
+
log_interval: 1
|
63 |
+
enable_progress_bar: false
|
64 |
+
limit_train_batches: null
|
65 |
+
limit_validation_batches: null
|
66 |
+
active_fit:
|
67 |
+
max_budget: 5000
|
68 |
+
query_size: 25
|
69 |
+
reinit_model: true
|
70 |
+
limit_pool_batches: null
|
71 |
+
limit_test_batches: null
|
72 |
+
test:
|
73 |
+
log_interval: 1
|
74 |
+
enable_progress_bar: false
|
75 |
+
limit_batches: null
|
76 |
+
strategy:
|
77 |
+
name: randomsubset_entropy
|
78 |
+
args:
|
79 |
+
seed: 42
|
80 |
+
subpool_size: 1000
|
81 |
+
model:
|
82 |
+
name: bert-base-uncased
|
83 |
+
seed: 654321
|
84 |
+
dataset:
|
85 |
+
name: amazon-agri
|
86 |
+
text_column: text
|
87 |
+
label_column: labels
|
88 |
+
uid_column: uid
|
89 |
+
prepared_path: /rds/user/pl487/hpc-work/anchoral/data/prepared/amazoncat-agri
|
90 |
+
processed_path: /rds/user/pl487/hpc-work/anchoral/data/processed/amazoncat-13k
|
91 |
+
minority_classes:
|
92 |
+
- 1
|
93 |
+
index_metric: all-mpnet-base-v2_cosine
|
94 |
+
log_interval: 1
|
95 |
+
enable_progress_bar: false
|
96 |
+
limit_batches: null
|
97 |
+
seed: 42
|
98 |
+
experiment_group: additional_randomsubset_1000
|
99 |
+
run_name: amazon-agri/bert-base-uncased_randomsubset_entropy_2024-04-02T18-37-18
|
100 |
+
data_path: /rds/user/pl487/hpc-work/anchoral/data
|
101 |
+
|
102 |
+
======================================================================
|
103 |
+
[2024-04-05 21:45:44,966][hydra][INFO] - Running active learning with strategy {'name': 'randomsubset_entropy', 'args': {'seed': 42, 'subpool_size': 1000}}
|
104 |
+
[2024-04-05 21:45:44,992][hydra][INFO] - Seed enabled: 42
|
105 |
+
[2024-04-05 21:45:53,167][hydra][INFO] - Labelled size: 100 Pool size: 1186139 Test size: 5285
|
106 |
+
Label distribution:
|
107 |
+
| | labels | count | perc |
|
108 |
+
|---:|:---------|--------:|-------:|
|
109 |
+
| 0 | Negative | 95 | 0.95 |
|
110 |
+
| 1 | Positive | 5 | 0.05 |
|
111 |
+
[2024-04-05 21:45:53,241][hydra][INFO] - Batch:
|
112 |
+
{<InputKeys.INPUT_IDS: 'input_ids'>: tensor([[ 101, 3521, 13903, 102]]), <InputKeys.ATT_MASK: 'attention_mask'>: tensor([[1, 1, 1, 1]]), <InputKeys.LABELS: 'labels'>: tensor([0]), <InputKeys.ON_CPU: 'on_cpu'>: {<SpecialKeys.ID: 'uid'>: [1462254]}}
|
113 |
+
[2024-04-05 21:45:59,589][hydra][INFO] - Loggers: {'tensorboard': <energizer.loggers.tensorboard.TensorBoardLogger object at 0x1486b90c9df0>}
|
114 |
+
[2024-04-05 21:45:59,589][hydra][INFO] - Callbacks: {'timer': <energizer.active_learning.callbacks.Timer object at 0x1486b916c580>, 'save_outputs': <src.callbacks.SaveOutputs object at 0x1486b91514f0>, 'early_stopping': <energizer.callbacks.early_stopping.EarlyStopping object at 0x1486b9151a30>, 'model_checkpoint': <energizer.callbacks.model_checkpoint.ModelCheckpoint object at 0x1486b91517c0>}
|
115 |
+
[2024-04-05 21:45:59,612][hydra][INFO] -
|
116 |
+
| Name | Type | Params
|
117 |
+
-----------------------------------------
|
118 |
+
0 | bert | BertModel | 109 M
|
119 |
+
1 | dropout | Dropout | 0
|
120 |
+
2 | classifier | Linear | 1.5 K
|
121 |
+
-----------------------------------------
|
122 |
+
109 M Trainable params
|
123 |
+
0 Non-trainable params
|
124 |
+
109 M Total params
|
125 |
+
437.935 Total estimated model params size (MB)
|
126 |
+
0.00 GB CUDA Memory used
|
127 |
+
[2024-04-06 03:42:58,916][submitit][INFO] - Job has timed out. Ran 357 minutes out of requested 360 minutes.
|
128 |
+
[2024-04-06 03:42:58,959][submitit][WARNING] - Caught signal SIGUSR2 on gpu-q-45: this job is timed-out.
|
129 |
+
[2024-04-06 03:42:58,996][submitit][INFO] - Calling checkpoint method.
|
130 |
+
[2024-04-06 03:42:59,035][submitit][INFO] - Job not requeued because: timed-out too many times.
|
131 |
+
[2024-04-06 03:42:59,035][submitit][WARNING] - Bypassing signal SIGCONT
|
132 |
+
[2024-04-06 03:42:59,040][submitit][INFO] - Job completed successfully
|
outputs/ablations/additional_randomsubset_1000/bert-base-uncased_randomsubset_entropy_2024-04-02T18-37-18_49537867_4/hparams.yaml
ADDED
@@ -0,0 +1,99 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
estimator:
|
2 |
+
accelerator: gpu
|
3 |
+
precision: 32
|
4 |
+
deterministic: true
|
5 |
+
tf32_mode: high
|
6 |
+
callbacks:
|
7 |
+
timer:
|
8 |
+
_target_: energizer.active_learning.callbacks.Timer
|
9 |
+
save_outputs:
|
10 |
+
_target_: src.callbacks.SaveOutputs
|
11 |
+
dirpath: ./logs/
|
12 |
+
instance_level: false
|
13 |
+
batch_level: false
|
14 |
+
epoch_level: false
|
15 |
+
early_stopping:
|
16 |
+
_target_: energizer.callbacks.early_stopping.EarlyStopping
|
17 |
+
monitor: train/avg_f1_minclass
|
18 |
+
stage: train
|
19 |
+
interval: epoch
|
20 |
+
mode: max
|
21 |
+
min_delta: 1.0e-05
|
22 |
+
patience: 10
|
23 |
+
stopping_threshold: null
|
24 |
+
divergence_threshold: null
|
25 |
+
verbose: true
|
26 |
+
model_checkpoint:
|
27 |
+
_target_: energizer.callbacks.model_checkpoint.ModelCheckpoint
|
28 |
+
dirpath: .checkpoints
|
29 |
+
monitor: train/avg_f1_minclass
|
30 |
+
stage: train
|
31 |
+
mode: max
|
32 |
+
save_last: false
|
33 |
+
save_top_k: 1
|
34 |
+
verbose: true
|
35 |
+
loggers:
|
36 |
+
tensorboard:
|
37 |
+
_target_: energizer.loggers.TensorBoardLogger
|
38 |
+
root_dir: ./
|
39 |
+
name: tb_logs
|
40 |
+
version: null
|
41 |
+
data:
|
42 |
+
batch_size: 32
|
43 |
+
eval_batch_size: 256
|
44 |
+
num_workers: 32
|
45 |
+
pin_memory: true
|
46 |
+
drop_last: false
|
47 |
+
persistent_workers: true
|
48 |
+
shuffle: true
|
49 |
+
seed: 123456
|
50 |
+
replacement: false
|
51 |
+
max_length: 512
|
52 |
+
active_data:
|
53 |
+
budget: 100
|
54 |
+
positive_budget: 5
|
55 |
+
seed: 654321
|
56 |
+
fit:
|
57 |
+
min_steps: 100
|
58 |
+
max_epochs: 10
|
59 |
+
learning_rate: 4.0e-05
|
60 |
+
optimizer: adamw
|
61 |
+
log_interval: 1
|
62 |
+
enable_progress_bar: false
|
63 |
+
limit_train_batches: null
|
64 |
+
limit_validation_batches: null
|
65 |
+
active_fit:
|
66 |
+
max_budget: 5000
|
67 |
+
query_size: 25
|
68 |
+
reinit_model: true
|
69 |
+
limit_pool_batches: null
|
70 |
+
limit_test_batches: null
|
71 |
+
test:
|
72 |
+
log_interval: 1
|
73 |
+
enable_progress_bar: false
|
74 |
+
limit_batches: null
|
75 |
+
strategy:
|
76 |
+
name: randomsubset_entropy
|
77 |
+
args:
|
78 |
+
seed: 42
|
79 |
+
subpool_size: 1000
|
80 |
+
model:
|
81 |
+
name: bert-base-uncased
|
82 |
+
seed: 654321
|
83 |
+
dataset:
|
84 |
+
name: amazon-agri
|
85 |
+
text_column: text
|
86 |
+
label_column: labels
|
87 |
+
uid_column: uid
|
88 |
+
prepared_path: /rds/user/pl487/hpc-work/anchoral/data/prepared/amazoncat-agri
|
89 |
+
processed_path: /rds/user/pl487/hpc-work/anchoral/data/processed/amazoncat-13k
|
90 |
+
minority_classes:
|
91 |
+
- 1
|
92 |
+
index_metric: all-mpnet-base-v2_cosine
|
93 |
+
log_interval: 1
|
94 |
+
enable_progress_bar: false
|
95 |
+
limit_batches: null
|
96 |
+
seed: 42
|
97 |
+
experiment_group: additional_randomsubset_1000
|
98 |
+
run_name: amazon-agri/bert-base-uncased_randomsubset_entropy_2024-04-02T18-37-18
|
99 |
+
data_path: /rds/user/pl487/hpc-work/anchoral/data
|
outputs/ablations/additional_randomsubset_1000/bert-base-uncased_randomsubset_entropy_2024-04-02T18-37-18_49537867_4/logs/labelled_dataset.parquet
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:1ffcdc2c9172c196926c2f6683bb40435c3cd257e23da4126814bccb35c6aa1a
|
3 |
+
size 6286136
|