|
2022-04-25 00:28:46,333 ---------------------------------------------------------------------------------------------------- |
|
2022-04-25 00:28:46,337 Model: "SequenceTagger( |
|
(embeddings): TransformerWordEmbeddings( |
|
(model): XLMRobertaModel( |
|
(embeddings): RobertaEmbeddings( |
|
(word_embeddings): Embedding(250002, 1024, padding_idx=1) |
|
(position_embeddings): Embedding(514, 1024, padding_idx=1) |
|
(token_type_embeddings): Embedding(1, 1024) |
|
(LayerNorm): LayerNorm((1024,), eps=1e-05, elementwise_affine=True) |
|
(dropout): Dropout(p=0.1, inplace=False) |
|
) |
|
(encoder): RobertaEncoder( |
|
(layer): ModuleList( |
|
(0): RobertaLayer( |
|
(attention): RobertaAttention( |
|
(self): RobertaSelfAttention( |
|
(query): Linear(in_features=1024, out_features=1024, bias=True) |
|
(key): Linear(in_features=1024, out_features=1024, bias=True) |
|
(value): Linear(in_features=1024, out_features=1024, bias=True) |
|
(dropout): Dropout(p=0.1, inplace=False) |
|
) |
|
(output): RobertaSelfOutput( |
|
(dense): Linear(in_features=1024, out_features=1024, bias=True) |
|
(LayerNorm): LayerNorm((1024,), eps=1e-05, elementwise_affine=True) |
|
(dropout): Dropout(p=0.1, inplace=False) |
|
) |
|
) |
|
(intermediate): RobertaIntermediate( |
|
(dense): Linear(in_features=1024, out_features=4096, bias=True) |
|
(intermediate_act_fn): GELUActivation() |
|
) |
|
(output): RobertaOutput( |
|
(dense): Linear(in_features=4096, out_features=1024, bias=True) |
|
(LayerNorm): LayerNorm((1024,), eps=1e-05, elementwise_affine=True) |
|
(dropout): Dropout(p=0.1, inplace=False) |
|
) |
|
) |
|
(1): RobertaLayer( |
|
(attention): RobertaAttention( |
|
(self): RobertaSelfAttention( |
|
(query): Linear(in_features=1024, out_features=1024, bias=True) |
|
(key): Linear(in_features=1024, out_features=1024, bias=True) |
|
(value): Linear(in_features=1024, out_features=1024, bias=True) |
|
(dropout): Dropout(p=0.1, inplace=False) |
|
) |
|
(output): RobertaSelfOutput( |
|
(dense): Linear(in_features=1024, out_features=1024, bias=True) |
|
(LayerNorm): LayerNorm((1024,), eps=1e-05, elementwise_affine=True) |
|
(dropout): Dropout(p=0.1, inplace=False) |
|
) |
|
) |
|
(intermediate): RobertaIntermediate( |
|
(dense): Linear(in_features=1024, out_features=4096, bias=True) |
|
(intermediate_act_fn): GELUActivation() |
|
) |
|
(output): RobertaOutput( |
|
(dense): Linear(in_features=4096, out_features=1024, bias=True) |
|
(LayerNorm): LayerNorm((1024,), eps=1e-05, elementwise_affine=True) |
|
(dropout): Dropout(p=0.1, inplace=False) |
|
) |
|
) |
|
(2): RobertaLayer( |
|
(attention): RobertaAttention( |
|
(self): RobertaSelfAttention( |
|
(query): Linear(in_features=1024, out_features=1024, bias=True) |
|
(key): Linear(in_features=1024, out_features=1024, bias=True) |
|
(value): Linear(in_features=1024, out_features=1024, bias=True) |
|
(dropout): Dropout(p=0.1, inplace=False) |
|
) |
|
(output): RobertaSelfOutput( |
|
(dense): Linear(in_features=1024, out_features=1024, bias=True) |
|
(LayerNorm): LayerNorm((1024,), eps=1e-05, elementwise_affine=True) |
|
(dropout): Dropout(p=0.1, inplace=False) |
|
) |
|
) |
|
(intermediate): RobertaIntermediate( |
|
(dense): Linear(in_features=1024, out_features=4096, bias=True) |
|
(intermediate_act_fn): GELUActivation() |
|
) |
|
(output): RobertaOutput( |
|
(dense): Linear(in_features=4096, out_features=1024, bias=True) |
|
(LayerNorm): LayerNorm((1024,), eps=1e-05, elementwise_affine=True) |
|
(dropout): Dropout(p=0.1, inplace=False) |
|
) |
|
) |
|
(3): RobertaLayer( |
|
(attention): RobertaAttention( |
|
(self): RobertaSelfAttention( |
|
(query): Linear(in_features=1024, out_features=1024, bias=True) |
|
(key): Linear(in_features=1024, out_features=1024, bias=True) |
|
(value): Linear(in_features=1024, out_features=1024, bias=True) |
|
(dropout): Dropout(p=0.1, inplace=False) |
|
) |
|
(output): RobertaSelfOutput( |
|
(dense): Linear(in_features=1024, out_features=1024, bias=True) |
|
(LayerNorm): LayerNorm((1024,), eps=1e-05, elementwise_affine=True) |
|
(dropout): Dropout(p=0.1, inplace=False) |
|
) |
|
) |
|
(intermediate): RobertaIntermediate( |
|
(dense): Linear(in_features=1024, out_features=4096, bias=True) |
|
(intermediate_act_fn): GELUActivation() |
|
) |
|
(output): RobertaOutput( |
|
(dense): Linear(in_features=4096, out_features=1024, bias=True) |
|
(LayerNorm): LayerNorm((1024,), eps=1e-05, elementwise_affine=True) |
|
(dropout): Dropout(p=0.1, inplace=False) |
|
) |
|
) |
|
(4): RobertaLayer( |
|
(attention): RobertaAttention( |
|
(self): RobertaSelfAttention( |
|
(query): Linear(in_features=1024, out_features=1024, bias=True) |
|
(key): Linear(in_features=1024, out_features=1024, bias=True) |
|
(value): Linear(in_features=1024, out_features=1024, bias=True) |
|
(dropout): Dropout(p=0.1, inplace=False) |
|
) |
|
(output): RobertaSelfOutput( |
|
(dense): Linear(in_features=1024, out_features=1024, bias=True) |
|
(LayerNorm): LayerNorm((1024,), eps=1e-05, elementwise_affine=True) |
|
(dropout): Dropout(p=0.1, inplace=False) |
|
) |
|
) |
|
(intermediate): RobertaIntermediate( |
|
(dense): Linear(in_features=1024, out_features=4096, bias=True) |
|
(intermediate_act_fn): GELUActivation() |
|
) |
|
(output): RobertaOutput( |
|
(dense): Linear(in_features=4096, out_features=1024, bias=True) |
|
(LayerNorm): LayerNorm((1024,), eps=1e-05, elementwise_affine=True) |
|
(dropout): Dropout(p=0.1, inplace=False) |
|
) |
|
) |
|
(5): RobertaLayer( |
|
(attention): RobertaAttention( |
|
(self): RobertaSelfAttention( |
|
(query): Linear(in_features=1024, out_features=1024, bias=True) |
|
(key): Linear(in_features=1024, out_features=1024, bias=True) |
|
(value): Linear(in_features=1024, out_features=1024, bias=True) |
|
(dropout): Dropout(p=0.1, inplace=False) |
|
) |
|
(output): RobertaSelfOutput( |
|
(dense): Linear(in_features=1024, out_features=1024, bias=True) |
|
(LayerNorm): LayerNorm((1024,), eps=1e-05, elementwise_affine=True) |
|
(dropout): Dropout(p=0.1, inplace=False) |
|
) |
|
) |
|
(intermediate): RobertaIntermediate( |
|
(dense): Linear(in_features=1024, out_features=4096, bias=True) |
|
(intermediate_act_fn): GELUActivation() |
|
) |
|
(output): RobertaOutput( |
|
(dense): Linear(in_features=4096, out_features=1024, bias=True) |
|
(LayerNorm): LayerNorm((1024,), eps=1e-05, elementwise_affine=True) |
|
(dropout): Dropout(p=0.1, inplace=False) |
|
) |
|
) |
|
(6): RobertaLayer( |
|
(attention): RobertaAttention( |
|
(self): RobertaSelfAttention( |
|
(query): Linear(in_features=1024, out_features=1024, bias=True) |
|
(key): Linear(in_features=1024, out_features=1024, bias=True) |
|
(value): Linear(in_features=1024, out_features=1024, bias=True) |
|
(dropout): Dropout(p=0.1, inplace=False) |
|
) |
|
(output): RobertaSelfOutput( |
|
(dense): Linear(in_features=1024, out_features=1024, bias=True) |
|
(LayerNorm): LayerNorm((1024,), eps=1e-05, elementwise_affine=True) |
|
(dropout): Dropout(p=0.1, inplace=False) |
|
) |
|
) |
|
(intermediate): RobertaIntermediate( |
|
(dense): Linear(in_features=1024, out_features=4096, bias=True) |
|
(intermediate_act_fn): GELUActivation() |
|
) |
|
(output): RobertaOutput( |
|
(dense): Linear(in_features=4096, out_features=1024, bias=True) |
|
(LayerNorm): LayerNorm((1024,), eps=1e-05, elementwise_affine=True) |
|
(dropout): Dropout(p=0.1, inplace=False) |
|
) |
|
) |
|
(7): RobertaLayer( |
|
(attention): RobertaAttention( |
|
(self): RobertaSelfAttention( |
|
(query): Linear(in_features=1024, out_features=1024, bias=True) |
|
(key): Linear(in_features=1024, out_features=1024, bias=True) |
|
(value): Linear(in_features=1024, out_features=1024, bias=True) |
|
(dropout): Dropout(p=0.1, inplace=False) |
|
) |
|
(output): RobertaSelfOutput( |
|
(dense): Linear(in_features=1024, out_features=1024, bias=True) |
|
(LayerNorm): LayerNorm((1024,), eps=1e-05, elementwise_affine=True) |
|
(dropout): Dropout(p=0.1, inplace=False) |
|
) |
|
) |
|
(intermediate): RobertaIntermediate( |
|
(dense): Linear(in_features=1024, out_features=4096, bias=True) |
|
(intermediate_act_fn): GELUActivation() |
|
) |
|
(output): RobertaOutput( |
|
(dense): Linear(in_features=4096, out_features=1024, bias=True) |
|
(LayerNorm): LayerNorm((1024,), eps=1e-05, elementwise_affine=True) |
|
(dropout): Dropout(p=0.1, inplace=False) |
|
) |
|
) |
|
(8): RobertaLayer( |
|
(attention): RobertaAttention( |
|
(self): RobertaSelfAttention( |
|
(query): Linear(in_features=1024, out_features=1024, bias=True) |
|
(key): Linear(in_features=1024, out_features=1024, bias=True) |
|
(value): Linear(in_features=1024, out_features=1024, bias=True) |
|
(dropout): Dropout(p=0.1, inplace=False) |
|
) |
|
(output): RobertaSelfOutput( |
|
(dense): Linear(in_features=1024, out_features=1024, bias=True) |
|
(LayerNorm): LayerNorm((1024,), eps=1e-05, elementwise_affine=True) |
|
(dropout): Dropout(p=0.1, inplace=False) |
|
) |
|
) |
|
(intermediate): RobertaIntermediate( |
|
(dense): Linear(in_features=1024, out_features=4096, bias=True) |
|
(intermediate_act_fn): GELUActivation() |
|
) |
|
(output): RobertaOutput( |
|
(dense): Linear(in_features=4096, out_features=1024, bias=True) |
|
(LayerNorm): LayerNorm((1024,), eps=1e-05, elementwise_affine=True) |
|
(dropout): Dropout(p=0.1, inplace=False) |
|
) |
|
) |
|
(9): RobertaLayer( |
|
(attention): RobertaAttention( |
|
(self): RobertaSelfAttention( |
|
(query): Linear(in_features=1024, out_features=1024, bias=True) |
|
(key): Linear(in_features=1024, out_features=1024, bias=True) |
|
(value): Linear(in_features=1024, out_features=1024, bias=True) |
|
(dropout): Dropout(p=0.1, inplace=False) |
|
) |
|
(output): RobertaSelfOutput( |
|
(dense): Linear(in_features=1024, out_features=1024, bias=True) |
|
(LayerNorm): LayerNorm((1024,), eps=1e-05, elementwise_affine=True) |
|
(dropout): Dropout(p=0.1, inplace=False) |
|
) |
|
) |
|
(intermediate): RobertaIntermediate( |
|
(dense): Linear(in_features=1024, out_features=4096, bias=True) |
|
(intermediate_act_fn): GELUActivation() |
|
) |
|
(output): RobertaOutput( |
|
(dense): Linear(in_features=4096, out_features=1024, bias=True) |
|
(LayerNorm): LayerNorm((1024,), eps=1e-05, elementwise_affine=True) |
|
(dropout): Dropout(p=0.1, inplace=False) |
|
) |
|
) |
|
(10): RobertaLayer( |
|
(attention): RobertaAttention( |
|
(self): RobertaSelfAttention( |
|
(query): Linear(in_features=1024, out_features=1024, bias=True) |
|
(key): Linear(in_features=1024, out_features=1024, bias=True) |
|
(value): Linear(in_features=1024, out_features=1024, bias=True) |
|
(dropout): Dropout(p=0.1, inplace=False) |
|
) |
|
(output): RobertaSelfOutput( |
|
(dense): Linear(in_features=1024, out_features=1024, bias=True) |
|
(LayerNorm): LayerNorm((1024,), eps=1e-05, elementwise_affine=True) |
|
(dropout): Dropout(p=0.1, inplace=False) |
|
) |
|
) |
|
(intermediate): RobertaIntermediate( |
|
(dense): Linear(in_features=1024, out_features=4096, bias=True) |
|
(intermediate_act_fn): GELUActivation() |
|
) |
|
(output): RobertaOutput( |
|
(dense): Linear(in_features=4096, out_features=1024, bias=True) |
|
(LayerNorm): LayerNorm((1024,), eps=1e-05, elementwise_affine=True) |
|
(dropout): Dropout(p=0.1, inplace=False) |
|
) |
|
) |
|
(11): RobertaLayer( |
|
(attention): RobertaAttention( |
|
(self): RobertaSelfAttention( |
|
(query): Linear(in_features=1024, out_features=1024, bias=True) |
|
(key): Linear(in_features=1024, out_features=1024, bias=True) |
|
(value): Linear(in_features=1024, out_features=1024, bias=True) |
|
(dropout): Dropout(p=0.1, inplace=False) |
|
) |
|
(output): RobertaSelfOutput( |
|
(dense): Linear(in_features=1024, out_features=1024, bias=True) |
|
(LayerNorm): LayerNorm((1024,), eps=1e-05, elementwise_affine=True) |
|
(dropout): Dropout(p=0.1, inplace=False) |
|
) |
|
) |
|
(intermediate): RobertaIntermediate( |
|
(dense): Linear(in_features=1024, out_features=4096, bias=True) |
|
(intermediate_act_fn): GELUActivation() |
|
) |
|
(output): RobertaOutput( |
|
(dense): Linear(in_features=4096, out_features=1024, bias=True) |
|
(LayerNorm): LayerNorm((1024,), eps=1e-05, elementwise_affine=True) |
|
(dropout): Dropout(p=0.1, inplace=False) |
|
) |
|
) |
|
(12): RobertaLayer( |
|
(attention): RobertaAttention( |
|
(self): RobertaSelfAttention( |
|
(query): Linear(in_features=1024, out_features=1024, bias=True) |
|
(key): Linear(in_features=1024, out_features=1024, bias=True) |
|
(value): Linear(in_features=1024, out_features=1024, bias=True) |
|
(dropout): Dropout(p=0.1, inplace=False) |
|
) |
|
(output): RobertaSelfOutput( |
|
(dense): Linear(in_features=1024, out_features=1024, bias=True) |
|
(LayerNorm): LayerNorm((1024,), eps=1e-05, elementwise_affine=True) |
|
(dropout): Dropout(p=0.1, inplace=False) |
|
) |
|
) |
|
(intermediate): RobertaIntermediate( |
|
(dense): Linear(in_features=1024, out_features=4096, bias=True) |
|
(intermediate_act_fn): GELUActivation() |
|
) |
|
(output): RobertaOutput( |
|
(dense): Linear(in_features=4096, out_features=1024, bias=True) |
|
(LayerNorm): LayerNorm((1024,), eps=1e-05, elementwise_affine=True) |
|
(dropout): Dropout(p=0.1, inplace=False) |
|
) |
|
) |
|
(13): RobertaLayer( |
|
(attention): RobertaAttention( |
|
(self): RobertaSelfAttention( |
|
(query): Linear(in_features=1024, out_features=1024, bias=True) |
|
(key): Linear(in_features=1024, out_features=1024, bias=True) |
|
(value): Linear(in_features=1024, out_features=1024, bias=True) |
|
(dropout): Dropout(p=0.1, inplace=False) |
|
) |
|
(output): RobertaSelfOutput( |
|
(dense): Linear(in_features=1024, out_features=1024, bias=True) |
|
(LayerNorm): LayerNorm((1024,), eps=1e-05, elementwise_affine=True) |
|
(dropout): Dropout(p=0.1, inplace=False) |
|
) |
|
) |
|
(intermediate): RobertaIntermediate( |
|
(dense): Linear(in_features=1024, out_features=4096, bias=True) |
|
(intermediate_act_fn): GELUActivation() |
|
) |
|
(output): RobertaOutput( |
|
(dense): Linear(in_features=4096, out_features=1024, bias=True) |
|
(LayerNorm): LayerNorm((1024,), eps=1e-05, elementwise_affine=True) |
|
(dropout): Dropout(p=0.1, inplace=False) |
|
) |
|
) |
|
(14): RobertaLayer( |
|
(attention): RobertaAttention( |
|
(self): RobertaSelfAttention( |
|
(query): Linear(in_features=1024, out_features=1024, bias=True) |
|
(key): Linear(in_features=1024, out_features=1024, bias=True) |
|
(value): Linear(in_features=1024, out_features=1024, bias=True) |
|
(dropout): Dropout(p=0.1, inplace=False) |
|
) |
|
(output): RobertaSelfOutput( |
|
(dense): Linear(in_features=1024, out_features=1024, bias=True) |
|
(LayerNorm): LayerNorm((1024,), eps=1e-05, elementwise_affine=True) |
|
(dropout): Dropout(p=0.1, inplace=False) |
|
) |
|
) |
|
(intermediate): RobertaIntermediate( |
|
(dense): Linear(in_features=1024, out_features=4096, bias=True) |
|
(intermediate_act_fn): GELUActivation() |
|
) |
|
(output): RobertaOutput( |
|
(dense): Linear(in_features=4096, out_features=1024, bias=True) |
|
(LayerNorm): LayerNorm((1024,), eps=1e-05, elementwise_affine=True) |
|
(dropout): Dropout(p=0.1, inplace=False) |
|
) |
|
) |
|
(15): RobertaLayer( |
|
(attention): RobertaAttention( |
|
(self): RobertaSelfAttention( |
|
(query): Linear(in_features=1024, out_features=1024, bias=True) |
|
(key): Linear(in_features=1024, out_features=1024, bias=True) |
|
(value): Linear(in_features=1024, out_features=1024, bias=True) |
|
(dropout): Dropout(p=0.1, inplace=False) |
|
) |
|
(output): RobertaSelfOutput( |
|
(dense): Linear(in_features=1024, out_features=1024, bias=True) |
|
(LayerNorm): LayerNorm((1024,), eps=1e-05, elementwise_affine=True) |
|
(dropout): Dropout(p=0.1, inplace=False) |
|
) |
|
) |
|
(intermediate): RobertaIntermediate( |
|
(dense): Linear(in_features=1024, out_features=4096, bias=True) |
|
(intermediate_act_fn): GELUActivation() |
|
) |
|
(output): RobertaOutput( |
|
(dense): Linear(in_features=4096, out_features=1024, bias=True) |
|
(LayerNorm): LayerNorm((1024,), eps=1e-05, elementwise_affine=True) |
|
(dropout): Dropout(p=0.1, inplace=False) |
|
) |
|
) |
|
(16): RobertaLayer( |
|
(attention): RobertaAttention( |
|
(self): RobertaSelfAttention( |
|
(query): Linear(in_features=1024, out_features=1024, bias=True) |
|
(key): Linear(in_features=1024, out_features=1024, bias=True) |
|
(value): Linear(in_features=1024, out_features=1024, bias=True) |
|
(dropout): Dropout(p=0.1, inplace=False) |
|
) |
|
(output): RobertaSelfOutput( |
|
(dense): Linear(in_features=1024, out_features=1024, bias=True) |
|
(LayerNorm): LayerNorm((1024,), eps=1e-05, elementwise_affine=True) |
|
(dropout): Dropout(p=0.1, inplace=False) |
|
) |
|
) |
|
(intermediate): RobertaIntermediate( |
|
(dense): Linear(in_features=1024, out_features=4096, bias=True) |
|
(intermediate_act_fn): GELUActivation() |
|
) |
|
(output): RobertaOutput( |
|
(dense): Linear(in_features=4096, out_features=1024, bias=True) |
|
(LayerNorm): LayerNorm((1024,), eps=1e-05, elementwise_affine=True) |
|
(dropout): Dropout(p=0.1, inplace=False) |
|
) |
|
) |
|
(17): RobertaLayer( |
|
(attention): RobertaAttention( |
|
(self): RobertaSelfAttention( |
|
(query): Linear(in_features=1024, out_features=1024, bias=True) |
|
(key): Linear(in_features=1024, out_features=1024, bias=True) |
|
(value): Linear(in_features=1024, out_features=1024, bias=True) |
|
(dropout): Dropout(p=0.1, inplace=False) |
|
) |
|
(output): RobertaSelfOutput( |
|
(dense): Linear(in_features=1024, out_features=1024, bias=True) |
|
(LayerNorm): LayerNorm((1024,), eps=1e-05, elementwise_affine=True) |
|
(dropout): Dropout(p=0.1, inplace=False) |
|
) |
|
) |
|
(intermediate): RobertaIntermediate( |
|
(dense): Linear(in_features=1024, out_features=4096, bias=True) |
|
(intermediate_act_fn): GELUActivation() |
|
) |
|
(output): RobertaOutput( |
|
(dense): Linear(in_features=4096, out_features=1024, bias=True) |
|
(LayerNorm): LayerNorm((1024,), eps=1e-05, elementwise_affine=True) |
|
(dropout): Dropout(p=0.1, inplace=False) |
|
) |
|
) |
|
(18): RobertaLayer( |
|
(attention): RobertaAttention( |
|
(self): RobertaSelfAttention( |
|
(query): Linear(in_features=1024, out_features=1024, bias=True) |
|
(key): Linear(in_features=1024, out_features=1024, bias=True) |
|
(value): Linear(in_features=1024, out_features=1024, bias=True) |
|
(dropout): Dropout(p=0.1, inplace=False) |
|
) |
|
(output): RobertaSelfOutput( |
|
(dense): Linear(in_features=1024, out_features=1024, bias=True) |
|
(LayerNorm): LayerNorm((1024,), eps=1e-05, elementwise_affine=True) |
|
(dropout): Dropout(p=0.1, inplace=False) |
|
) |
|
) |
|
(intermediate): RobertaIntermediate( |
|
(dense): Linear(in_features=1024, out_features=4096, bias=True) |
|
(intermediate_act_fn): GELUActivation() |
|
) |
|
(output): RobertaOutput( |
|
(dense): Linear(in_features=4096, out_features=1024, bias=True) |
|
(LayerNorm): LayerNorm((1024,), eps=1e-05, elementwise_affine=True) |
|
(dropout): Dropout(p=0.1, inplace=False) |
|
) |
|
) |
|
(19): RobertaLayer( |
|
(attention): RobertaAttention( |
|
(self): RobertaSelfAttention( |
|
(query): Linear(in_features=1024, out_features=1024, bias=True) |
|
(key): Linear(in_features=1024, out_features=1024, bias=True) |
|
(value): Linear(in_features=1024, out_features=1024, bias=True) |
|
(dropout): Dropout(p=0.1, inplace=False) |
|
) |
|
(output): RobertaSelfOutput( |
|
(dense): Linear(in_features=1024, out_features=1024, bias=True) |
|
(LayerNorm): LayerNorm((1024,), eps=1e-05, elementwise_affine=True) |
|
(dropout): Dropout(p=0.1, inplace=False) |
|
) |
|
) |
|
(intermediate): RobertaIntermediate( |
|
(dense): Linear(in_features=1024, out_features=4096, bias=True) |
|
(intermediate_act_fn): GELUActivation() |
|
) |
|
(output): RobertaOutput( |
|
(dense): Linear(in_features=4096, out_features=1024, bias=True) |
|
(LayerNorm): LayerNorm((1024,), eps=1e-05, elementwise_affine=True) |
|
(dropout): Dropout(p=0.1, inplace=False) |
|
) |
|
) |
|
(20): RobertaLayer( |
|
(attention): RobertaAttention( |
|
(self): RobertaSelfAttention( |
|
(query): Linear(in_features=1024, out_features=1024, bias=True) |
|
(key): Linear(in_features=1024, out_features=1024, bias=True) |
|
(value): Linear(in_features=1024, out_features=1024, bias=True) |
|
(dropout): Dropout(p=0.1, inplace=False) |
|
) |
|
(output): RobertaSelfOutput( |
|
(dense): Linear(in_features=1024, out_features=1024, bias=True) |
|
(LayerNorm): LayerNorm((1024,), eps=1e-05, elementwise_affine=True) |
|
(dropout): Dropout(p=0.1, inplace=False) |
|
) |
|
) |
|
(intermediate): RobertaIntermediate( |
|
(dense): Linear(in_features=1024, out_features=4096, bias=True) |
|
(intermediate_act_fn): GELUActivation() |
|
) |
|
(output): RobertaOutput( |
|
(dense): Linear(in_features=4096, out_features=1024, bias=True) |
|
(LayerNorm): LayerNorm((1024,), eps=1e-05, elementwise_affine=True) |
|
(dropout): Dropout(p=0.1, inplace=False) |
|
) |
|
) |
|
(21): RobertaLayer( |
|
(attention): RobertaAttention( |
|
(self): RobertaSelfAttention( |
|
(query): Linear(in_features=1024, out_features=1024, bias=True) |
|
(key): Linear(in_features=1024, out_features=1024, bias=True) |
|
(value): Linear(in_features=1024, out_features=1024, bias=True) |
|
(dropout): Dropout(p=0.1, inplace=False) |
|
) |
|
(output): RobertaSelfOutput( |
|
(dense): Linear(in_features=1024, out_features=1024, bias=True) |
|
(LayerNorm): LayerNorm((1024,), eps=1e-05, elementwise_affine=True) |
|
(dropout): Dropout(p=0.1, inplace=False) |
|
) |
|
) |
|
(intermediate): RobertaIntermediate( |
|
(dense): Linear(in_features=1024, out_features=4096, bias=True) |
|
(intermediate_act_fn): GELUActivation() |
|
) |
|
(output): RobertaOutput( |
|
(dense): Linear(in_features=4096, out_features=1024, bias=True) |
|
(LayerNorm): LayerNorm((1024,), eps=1e-05, elementwise_affine=True) |
|
(dropout): Dropout(p=0.1, inplace=False) |
|
) |
|
) |
|
(22): RobertaLayer( |
|
(attention): RobertaAttention( |
|
(self): RobertaSelfAttention( |
|
(query): Linear(in_features=1024, out_features=1024, bias=True) |
|
(key): Linear(in_features=1024, out_features=1024, bias=True) |
|
(value): Linear(in_features=1024, out_features=1024, bias=True) |
|
(dropout): Dropout(p=0.1, inplace=False) |
|
) |
|
(output): RobertaSelfOutput( |
|
(dense): Linear(in_features=1024, out_features=1024, bias=True) |
|
(LayerNorm): LayerNorm((1024,), eps=1e-05, elementwise_affine=True) |
|
(dropout): Dropout(p=0.1, inplace=False) |
|
) |
|
) |
|
(intermediate): RobertaIntermediate( |
|
(dense): Linear(in_features=1024, out_features=4096, bias=True) |
|
(intermediate_act_fn): GELUActivation() |
|
) |
|
(output): RobertaOutput( |
|
(dense): Linear(in_features=4096, out_features=1024, bias=True) |
|
(LayerNorm): LayerNorm((1024,), eps=1e-05, elementwise_affine=True) |
|
(dropout): Dropout(p=0.1, inplace=False) |
|
) |
|
) |
|
(23): RobertaLayer( |
|
(attention): RobertaAttention( |
|
(self): RobertaSelfAttention( |
|
(query): Linear(in_features=1024, out_features=1024, bias=True) |
|
(key): Linear(in_features=1024, out_features=1024, bias=True) |
|
(value): Linear(in_features=1024, out_features=1024, bias=True) |
|
(dropout): Dropout(p=0.1, inplace=False) |
|
) |
|
(output): RobertaSelfOutput( |
|
(dense): Linear(in_features=1024, out_features=1024, bias=True) |
|
(LayerNorm): LayerNorm((1024,), eps=1e-05, elementwise_affine=True) |
|
(dropout): Dropout(p=0.1, inplace=False) |
|
) |
|
) |
|
(intermediate): RobertaIntermediate( |
|
(dense): Linear(in_features=1024, out_features=4096, bias=True) |
|
(intermediate_act_fn): GELUActivation() |
|
) |
|
(output): RobertaOutput( |
|
(dense): Linear(in_features=4096, out_features=1024, bias=True) |
|
(LayerNorm): LayerNorm((1024,), eps=1e-05, elementwise_affine=True) |
|
(dropout): Dropout(p=0.1, inplace=False) |
|
) |
|
) |
|
) |
|
) |
|
(pooler): RobertaPooler( |
|
(dense): Linear(in_features=1024, out_features=1024, bias=True) |
|
(activation): Tanh() |
|
) |
|
) |
|
) |
|
(word_dropout): WordDropout(p=0.05) |
|
(locked_dropout): LockedDropout(p=0.5) |
|
(linear): Linear(in_features=1024, out_features=20, bias=True) |
|
(loss_function): CrossEntropyLoss() |
|
)" |
|
2022-04-25 00:28:46,337 ---------------------------------------------------------------------------------------------------- |
|
2022-04-25 00:28:46,338 Corpus: "Corpus: 352 train + 50 dev + 67 test sentences" |
|
2022-04-25 00:28:46,338 ---------------------------------------------------------------------------------------------------- |
|
2022-04-25 00:28:46,339 Parameters: |
|
2022-04-25 00:28:46,339 - learning_rate: "0.000005" |
|
2022-04-25 00:28:46,340 - mini_batch_size: "4" |
|
2022-04-25 00:28:46,340 - patience: "3" |
|
2022-04-25 00:28:46,340 - anneal_factor: "0.5" |
|
2022-04-25 00:28:46,341 - max_epochs: "10" |
|
2022-04-25 00:28:46,341 - shuffle: "True" |
|
2022-04-25 00:28:46,342 - train_with_dev: "False" |
|
2022-04-25 00:28:46,342 - batch_growth_annealing: "False" |
|
2022-04-25 00:28:46,343 ---------------------------------------------------------------------------------------------------- |
|
2022-04-25 00:28:46,343 Model training base path: "resources/taggers/ner_xlm_finedtuned_ck1" |
|
2022-04-25 00:28:46,344 ---------------------------------------------------------------------------------------------------- |
|
2022-04-25 00:28:46,345 Device: cuda:0 |
|
2022-04-25 00:28:46,345 ---------------------------------------------------------------------------------------------------- |
|
2022-04-25 00:28:46,346 Embeddings storage mode: none |
|
2022-04-25 00:28:46,346 ---------------------------------------------------------------------------------------------------- |
|
2022-04-25 00:28:55,605 epoch 1 - iter 8/88 - loss 1.25822871 - samples/sec: 3.46 - lr: 0.000000 |
|
2022-04-25 00:29:03,857 epoch 1 - iter 16/88 - loss 1.22365524 - samples/sec: 3.88 - lr: 0.000001 |
|
2022-04-25 00:29:13,839 epoch 1 - iter 24/88 - loss 1.18822646 - samples/sec: 3.21 - lr: 0.000001 |
|
2022-04-25 00:29:23,244 epoch 1 - iter 32/88 - loss 1.12798044 - samples/sec: 3.40 - lr: 0.000002 |
|
2022-04-25 00:29:31,472 epoch 1 - iter 40/88 - loss 1.05740151 - samples/sec: 3.89 - lr: 0.000002 |
|
2022-04-25 00:29:38,751 epoch 1 - iter 48/88 - loss 0.99049744 - samples/sec: 4.40 - lr: 0.000003 |
|
2022-04-25 00:29:46,982 epoch 1 - iter 56/88 - loss 0.92466364 - samples/sec: 3.89 - lr: 0.000003 |
|
2022-04-25 00:29:54,849 epoch 1 - iter 64/88 - loss 0.87012404 - samples/sec: 4.07 - lr: 0.000004 |
|
2022-04-25 00:30:04,123 epoch 1 - iter 72/88 - loss 0.80738819 - samples/sec: 3.45 - lr: 0.000004 |
|
2022-04-25 00:30:13,985 epoch 1 - iter 80/88 - loss 0.76049921 - samples/sec: 3.25 - lr: 0.000005 |
|
2022-04-25 00:30:23,710 epoch 1 - iter 88/88 - loss 0.72027292 - samples/sec: 3.29 - lr: 0.000005 |
|
2022-04-25 00:30:23,712 ---------------------------------------------------------------------------------------------------- |
|
2022-04-25 00:30:23,713 EPOCH 1 done: loss 0.7203 - lr 0.000005 |
|
2022-04-25 00:30:30,732 Evaluating as a multi-label problem: False |
|
2022-04-25 00:30:30,742 DEV : loss 0.20562097430229187 - f1-score (micro avg) 0.0027 |
|
2022-04-25 00:30:30,751 BAD EPOCHS (no improvement): 4 |
|
2022-04-25 00:30:30,753 ---------------------------------------------------------------------------------------------------- |
|
2022-04-25 00:30:39,284 epoch 2 - iter 8/88 - loss 0.32586993 - samples/sec: 3.75 - lr: 0.000005 |
|
2022-04-25 00:30:47,933 epoch 2 - iter 16/88 - loss 0.33892041 - samples/sec: 3.70 - lr: 0.000005 |
|
2022-04-25 00:30:56,990 epoch 2 - iter 24/88 - loss 0.33672071 - samples/sec: 3.53 - lr: 0.000005 |
|
2022-04-25 00:31:05,736 epoch 2 - iter 32/88 - loss 0.33060665 - samples/sec: 3.66 - lr: 0.000005 |
|
2022-04-25 00:31:13,937 epoch 2 - iter 40/88 - loss 0.33045049 - samples/sec: 3.90 - lr: 0.000005 |
|
2022-04-25 00:31:23,091 epoch 2 - iter 48/88 - loss 0.32851558 - samples/sec: 3.50 - lr: 0.000005 |
|
2022-04-25 00:31:31,313 epoch 2 - iter 56/88 - loss 0.32679558 - samples/sec: 3.89 - lr: 0.000005 |
|
2022-04-25 00:31:41,184 epoch 2 - iter 64/88 - loss 0.32379177 - samples/sec: 3.24 - lr: 0.000005 |
|
2022-04-25 00:31:49,757 epoch 2 - iter 72/88 - loss 0.32124627 - samples/sec: 3.73 - lr: 0.000005 |
|
2022-04-25 00:31:57,768 epoch 2 - iter 80/88 - loss 0.32825760 - samples/sec: 4.00 - lr: 0.000004 |
|
2022-04-25 00:32:08,014 epoch 2 - iter 88/88 - loss 0.32124062 - samples/sec: 3.12 - lr: 0.000004 |
|
2022-04-25 00:32:08,017 ---------------------------------------------------------------------------------------------------- |
|
2022-04-25 00:32:08,018 EPOCH 2 done: loss 0.3212 - lr 0.000004 |
|
2022-04-25 00:32:15,400 Evaluating as a multi-label problem: False |
|
2022-04-25 00:32:15,415 DEV : loss 0.15934991836547852 - f1-score (micro avg) 0.0082 |
|
2022-04-25 00:32:15,428 BAD EPOCHS (no improvement): 4 |
|
2022-04-25 00:32:15,431 ---------------------------------------------------------------------------------------------------- |
|
2022-04-25 00:32:25,133 epoch 3 - iter 8/88 - loss 0.26548392 - samples/sec: 3.30 - lr: 0.000004 |
|
2022-04-25 00:32:33,272 epoch 3 - iter 16/88 - loss 0.28651787 - samples/sec: 3.93 - lr: 0.000004 |
|
2022-04-25 00:32:41,433 epoch 3 - iter 24/88 - loss 0.29010948 - samples/sec: 3.92 - lr: 0.000004 |
|
2022-04-25 00:32:50,243 epoch 3 - iter 32/88 - loss 0.29681501 - samples/sec: 3.63 - lr: 0.000004 |
|
2022-04-25 00:32:59,007 epoch 3 - iter 40/88 - loss 0.29554105 - samples/sec: 3.65 - lr: 0.000004 |
|
2022-04-25 00:33:07,692 epoch 3 - iter 48/88 - loss 0.29343573 - samples/sec: 3.69 - lr: 0.000004 |
|
2022-04-25 00:33:16,189 epoch 3 - iter 56/88 - loss 0.29547981 - samples/sec: 3.77 - lr: 0.000004 |
|
2022-04-25 00:33:25,763 epoch 3 - iter 64/88 - loss 0.28997972 - samples/sec: 3.34 - lr: 0.000004 |
|
2022-04-25 00:33:36,471 epoch 3 - iter 72/88 - loss 0.29000464 - samples/sec: 2.99 - lr: 0.000004 |
|
2022-04-25 00:33:45,481 epoch 3 - iter 80/88 - loss 0.29344732 - samples/sec: 3.55 - lr: 0.000004 |
|
2022-04-25 00:33:53,793 epoch 3 - iter 88/88 - loss 0.29232563 - samples/sec: 3.85 - lr: 0.000004 |
|
2022-04-25 00:33:53,797 ---------------------------------------------------------------------------------------------------- |
|
2022-04-25 00:33:53,798 EPOCH 3 done: loss 0.2923 - lr 0.000004 |
|
2022-04-25 00:34:00,978 Evaluating as a multi-label problem: False |
|
2022-04-25 00:34:00,991 DEV : loss 0.14386053383350372 - f1-score (micro avg) 0.0664 |
|
2022-04-25 00:34:00,999 BAD EPOCHS (no improvement): 4 |
|
2022-04-25 00:34:01,000 ---------------------------------------------------------------------------------------------------- |
|
2022-04-25 00:34:09,617 epoch 4 - iter 8/88 - loss 0.32142401 - samples/sec: 3.72 - lr: 0.000004 |
|
2022-04-25 00:34:17,886 epoch 4 - iter 16/88 - loss 0.30301646 - samples/sec: 3.87 - lr: 0.000004 |
|
2022-04-25 00:34:27,850 epoch 4 - iter 24/88 - loss 0.28913590 - samples/sec: 3.21 - lr: 0.000004 |
|
2022-04-25 00:34:35,703 epoch 4 - iter 32/88 - loss 0.29200045 - samples/sec: 4.08 - lr: 0.000004 |
|
2022-04-25 00:34:44,383 epoch 4 - iter 40/88 - loss 0.28601870 - samples/sec: 3.69 - lr: 0.000004 |
|
2022-04-25 00:34:53,597 epoch 4 - iter 48/88 - loss 0.28333016 - samples/sec: 3.47 - lr: 0.000004 |
|
2022-04-25 00:35:02,237 epoch 4 - iter 56/88 - loss 0.28101070 - samples/sec: 3.70 - lr: 0.000004 |
|
2022-04-25 00:35:11,887 epoch 4 - iter 64/88 - loss 0.27725419 - samples/sec: 3.32 - lr: 0.000003 |
|
2022-04-25 00:35:20,971 epoch 4 - iter 72/88 - loss 0.27522330 - samples/sec: 3.52 - lr: 0.000003 |
|
2022-04-25 00:35:29,993 epoch 4 - iter 80/88 - loss 0.27767522 - samples/sec: 3.55 - lr: 0.000003 |
|
2022-04-25 00:35:38,121 epoch 4 - iter 88/88 - loss 0.27780342 - samples/sec: 3.94 - lr: 0.000003 |
|
2022-04-25 00:35:38,125 ---------------------------------------------------------------------------------------------------- |
|
2022-04-25 00:35:38,126 EPOCH 4 done: loss 0.2778 - lr 0.000003 |
|
2022-04-25 00:35:45,523 Evaluating as a multi-label problem: False |
|
2022-04-25 00:35:45,536 DEV : loss 0.13249367475509644 - f1-score (micro avg) 0.1099 |
|
2022-04-25 00:35:45,545 BAD EPOCHS (no improvement): 4 |
|
2022-04-25 00:35:45,547 ---------------------------------------------------------------------------------------------------- |
|
2022-04-25 00:35:55,215 epoch 5 - iter 8/88 - loss 0.26147172 - samples/sec: 3.31 - lr: 0.000003 |
|
2022-04-25 00:36:05,160 epoch 5 - iter 16/88 - loss 0.26559845 - samples/sec: 3.22 - lr: 0.000003 |
|
2022-04-25 00:36:13,857 epoch 5 - iter 24/88 - loss 0.26674131 - samples/sec: 3.68 - lr: 0.000003 |
|
2022-04-25 00:36:22,022 epoch 5 - iter 32/88 - loss 0.26445641 - samples/sec: 3.92 - lr: 0.000003 |
|
2022-04-25 00:36:29,834 epoch 5 - iter 40/88 - loss 0.26849622 - samples/sec: 4.10 - lr: 0.000003 |
|
2022-04-25 00:36:38,499 epoch 5 - iter 48/88 - loss 0.26495720 - samples/sec: 3.69 - lr: 0.000003 |
|
2022-04-25 00:36:46,651 epoch 5 - iter 56/88 - loss 0.26747065 - samples/sec: 3.93 - lr: 0.000003 |
|
2022-04-25 00:36:56,479 epoch 5 - iter 64/88 - loss 0.26716735 - samples/sec: 3.26 - lr: 0.000003 |
|
2022-04-25 00:37:05,247 epoch 5 - iter 72/88 - loss 0.26323866 - samples/sec: 3.65 - lr: 0.000003 |
|
2022-04-25 00:37:14,099 epoch 5 - iter 80/88 - loss 0.26763434 - samples/sec: 3.62 - lr: 0.000003 |
|
2022-04-25 00:37:23,612 epoch 5 - iter 88/88 - loss 0.26510194 - samples/sec: 3.36 - lr: 0.000003 |
|
2022-04-25 00:37:23,615 ---------------------------------------------------------------------------------------------------- |
|
2022-04-25 00:37:23,615 EPOCH 5 done: loss 0.2651 - lr 0.000003 |
|
2022-04-25 00:37:30,711 Evaluating as a multi-label problem: False |
|
2022-04-25 00:37:30,723 DEV : loss 0.1335981786251068 - f1-score (micro avg) 0.1516 |
|
2022-04-25 00:37:30,734 BAD EPOCHS (no improvement): 4 |
|
2022-04-25 00:37:30,735 ---------------------------------------------------------------------------------------------------- |
|
2022-04-25 00:37:39,100 epoch 6 - iter 8/88 - loss 0.25254979 - samples/sec: 3.83 - lr: 0.000003 |
|
2022-04-25 00:37:48,489 epoch 6 - iter 16/88 - loss 0.24629379 - samples/sec: 3.41 - lr: 0.000003 |
|
2022-04-25 00:37:56,856 epoch 6 - iter 24/88 - loss 0.25016090 - samples/sec: 3.83 - lr: 0.000003 |
|
2022-04-25 00:38:06,647 epoch 6 - iter 32/88 - loss 0.25646469 - samples/sec: 3.27 - lr: 0.000003 |
|
2022-04-25 00:38:14,700 epoch 6 - iter 40/88 - loss 0.25909943 - samples/sec: 3.97 - lr: 0.000003 |
|
2022-04-25 00:38:23,772 epoch 6 - iter 48/88 - loss 0.25850607 - samples/sec: 3.53 - lr: 0.000002 |
|
2022-04-25 00:38:32,983 epoch 6 - iter 56/88 - loss 0.25417190 - samples/sec: 3.48 - lr: 0.000002 |
|
2022-04-25 00:38:42,014 epoch 6 - iter 64/88 - loss 0.25534730 - samples/sec: 3.54 - lr: 0.000002 |
|
2022-04-25 00:38:49,968 epoch 6 - iter 72/88 - loss 0.25617877 - samples/sec: 4.02 - lr: 0.000002 |
|
2022-04-25 00:38:58,183 epoch 6 - iter 80/88 - loss 0.25537613 - samples/sec: 3.90 - lr: 0.000002 |
|
2022-04-25 00:39:07,930 epoch 6 - iter 88/88 - loss 0.25729809 - samples/sec: 3.28 - lr: 0.000002 |
|
2022-04-25 00:39:07,933 ---------------------------------------------------------------------------------------------------- |
|
2022-04-25 00:39:07,934 EPOCH 6 done: loss 0.2573 - lr 0.000002 |
|
2022-04-25 00:39:15,220 Evaluating as a multi-label problem: False |
|
2022-04-25 00:39:15,238 DEV : loss 0.12874221801757812 - f1-score (micro avg) 0.215 |
|
2022-04-25 00:39:15,250 BAD EPOCHS (no improvement): 4 |
|
2022-04-25 00:39:15,252 ---------------------------------------------------------------------------------------------------- |
|
2022-04-25 00:39:23,920 epoch 7 - iter 8/88 - loss 0.25032306 - samples/sec: 3.69 - lr: 0.000002 |
|
2022-04-25 00:39:32,341 epoch 7 - iter 16/88 - loss 0.24173648 - samples/sec: 3.80 - lr: 0.000002 |
|
2022-04-25 00:39:42,283 epoch 7 - iter 24/88 - loss 0.25674155 - samples/sec: 3.22 - lr: 0.000002 |
|
2022-04-25 00:39:50,287 epoch 7 - iter 32/88 - loss 0.25221355 - samples/sec: 4.00 - lr: 0.000002 |
|
2022-04-25 00:39:58,742 epoch 7 - iter 40/88 - loss 0.25534056 - samples/sec: 3.79 - lr: 0.000002 |
|
2022-04-25 00:40:07,531 epoch 7 - iter 48/88 - loss 0.25396630 - samples/sec: 3.64 - lr: 0.000002 |
|
2022-04-25 00:40:16,857 epoch 7 - iter 56/88 - loss 0.25506091 - samples/sec: 3.43 - lr: 0.000002 |
|
2022-04-25 00:40:26,056 epoch 7 - iter 64/88 - loss 0.25606985 - samples/sec: 3.48 - lr: 0.000002 |
|
2022-04-25 00:40:34,742 epoch 7 - iter 72/88 - loss 0.25690660 - samples/sec: 3.68 - lr: 0.000002 |
|
2022-04-25 00:40:43,201 epoch 7 - iter 80/88 - loss 0.25644415 - samples/sec: 3.78 - lr: 0.000002 |
|
2022-04-25 00:40:53,512 epoch 7 - iter 88/88 - loss 0.25640539 - samples/sec: 3.10 - lr: 0.000002 |
|
2022-04-25 00:40:53,515 ---------------------------------------------------------------------------------------------------- |
|
2022-04-25 00:40:53,516 EPOCH 7 done: loss 0.2564 - lr 0.000002 |
|
2022-04-25 00:40:59,919 Evaluating as a multi-label problem: False |
|
2022-04-25 00:40:59,934 DEV : loss 0.12849482893943787 - f1-score (micro avg) 0.2546 |
|
2022-04-25 00:40:59,943 BAD EPOCHS (no improvement): 4 |
|
2022-04-25 00:40:59,944 ---------------------------------------------------------------------------------------------------- |
|
2022-04-25 00:41:09,917 epoch 8 - iter 8/88 - loss 0.26072190 - samples/sec: 3.21 - lr: 0.000002 |
|
2022-04-25 00:41:18,102 epoch 8 - iter 16/88 - loss 0.27005318 - samples/sec: 3.91 - lr: 0.000002 |
|
2022-04-25 00:41:26,730 epoch 8 - iter 24/88 - loss 0.26735720 - samples/sec: 3.71 - lr: 0.000002 |
|
2022-04-25 00:41:35,802 epoch 8 - iter 32/88 - loss 0.25981810 - samples/sec: 3.53 - lr: 0.000001 |
|
2022-04-25 00:41:45,065 epoch 8 - iter 40/88 - loss 0.25497924 - samples/sec: 3.46 - lr: 0.000001 |
|
2022-04-25 00:41:53,266 epoch 8 - iter 48/88 - loss 0.25297761 - samples/sec: 3.90 - lr: 0.000001 |
|
2022-04-25 00:42:01,654 epoch 8 - iter 56/88 - loss 0.25588829 - samples/sec: 3.82 - lr: 0.000001 |
|
2022-04-25 00:42:10,833 epoch 8 - iter 64/88 - loss 0.25234574 - samples/sec: 3.49 - lr: 0.000001 |
|
2022-04-25 00:42:20,767 epoch 8 - iter 72/88 - loss 0.25437752 - samples/sec: 3.22 - lr: 0.000001 |
|
2022-04-25 00:42:29,555 epoch 8 - iter 80/88 - loss 0.25358380 - samples/sec: 3.64 - lr: 0.000001 |
|
2022-04-25 00:42:38,444 epoch 8 - iter 88/88 - loss 0.25159043 - samples/sec: 3.60 - lr: 0.000001 |
|
2022-04-25 00:42:38,447 ---------------------------------------------------------------------------------------------------- |
|
2022-04-25 00:42:38,447 EPOCH 8 done: loss 0.2516 - lr 0.000001 |
|
2022-04-25 00:42:45,466 Evaluating as a multi-label problem: False |
|
2022-04-25 00:42:45,478 DEV : loss 0.13098381459712982 - f1-score (micro avg) 0.2535 |
|
2022-04-25 00:42:45,486 BAD EPOCHS (no improvement): 4 |
|
2022-04-25 00:42:45,488 ---------------------------------------------------------------------------------------------------- |
|
2022-04-25 00:42:55,033 epoch 9 - iter 8/88 - loss 0.22931718 - samples/sec: 3.35 - lr: 0.000001 |
|
2022-04-25 00:43:03,513 epoch 9 - iter 16/88 - loss 0.25355650 - samples/sec: 3.77 - lr: 0.000001 |
|
2022-04-25 00:43:13,870 epoch 9 - iter 24/88 - loss 0.25289254 - samples/sec: 3.09 - lr: 0.000001 |
|
2022-04-25 00:43:22,935 epoch 9 - iter 32/88 - loss 0.24994442 - samples/sec: 3.53 - lr: 0.000001 |
|
2022-04-25 00:43:30,905 epoch 9 - iter 40/88 - loss 0.24795011 - samples/sec: 4.02 - lr: 0.000001 |
|
2022-04-25 00:43:39,312 epoch 9 - iter 48/88 - loss 0.24733180 - samples/sec: 3.81 - lr: 0.000001 |
|
2022-04-25 00:43:47,522 epoch 9 - iter 56/88 - loss 0.24885510 - samples/sec: 3.90 - lr: 0.000001 |
|
2022-04-25 00:43:55,856 epoch 9 - iter 64/88 - loss 0.25085127 - samples/sec: 3.84 - lr: 0.000001 |
|
2022-04-25 00:44:04,511 epoch 9 - iter 72/88 - loss 0.25141658 - samples/sec: 3.70 - lr: 0.000001 |
|
2022-04-25 00:44:13,473 epoch 9 - iter 80/88 - loss 0.25114253 - samples/sec: 3.57 - lr: 0.000001 |
|
2022-04-25 00:44:23,065 epoch 9 - iter 88/88 - loss 0.25032100 - samples/sec: 3.34 - lr: 0.000001 |
|
2022-04-25 00:44:23,068 ---------------------------------------------------------------------------------------------------- |
|
2022-04-25 00:44:23,069 EPOCH 9 done: loss 0.2503 - lr 0.000001 |
|
2022-04-25 00:44:30,828 Evaluating as a multi-label problem: False |
|
2022-04-25 00:44:30,844 DEV : loss 0.1269032210111618 - f1-score (micro avg) 0.2445 |
|
2022-04-25 00:44:30,854 BAD EPOCHS (no improvement): 4 |
|
2022-04-25 00:44:30,855 ---------------------------------------------------------------------------------------------------- |
|
2022-04-25 00:44:38,190 epoch 10 - iter 8/88 - loss 0.25877504 - samples/sec: 4.36 - lr: 0.000001 |
|
2022-04-25 00:44:47,141 epoch 10 - iter 16/88 - loss 0.26538309 - samples/sec: 3.58 - lr: 0.000000 |
|
2022-04-25 00:44:56,357 epoch 10 - iter 24/88 - loss 0.25992814 - samples/sec: 3.47 - lr: 0.000000 |
|
2022-04-25 00:45:04,805 epoch 10 - iter 32/88 - loss 0.25024608 - samples/sec: 3.79 - lr: 0.000000 |
|
2022-04-25 00:45:12,966 epoch 10 - iter 40/88 - loss 0.25450198 - samples/sec: 3.92 - lr: 0.000000 |
|
2022-04-25 00:45:23,081 epoch 10 - iter 48/88 - loss 0.25508489 - samples/sec: 3.16 - lr: 0.000000 |
|
2022-04-25 00:45:32,191 epoch 10 - iter 56/88 - loss 0.25273411 - samples/sec: 3.51 - lr: 0.000000 |
|
2022-04-25 00:45:40,798 epoch 10 - iter 64/88 - loss 0.25090079 - samples/sec: 3.72 - lr: 0.000000 |
|
2022-04-25 00:45:49,572 epoch 10 - iter 72/88 - loss 0.24954558 - samples/sec: 3.65 - lr: 0.000000 |
|
2022-04-25 00:45:59,254 epoch 10 - iter 80/88 - loss 0.24933938 - samples/sec: 3.31 - lr: 0.000000 |
|
2022-04-25 00:46:08,852 epoch 10 - iter 88/88 - loss 0.24774755 - samples/sec: 3.33 - lr: 0.000000 |
|
2022-04-25 00:46:08,856 ---------------------------------------------------------------------------------------------------- |
|
2022-04-25 00:46:08,857 EPOCH 10 done: loss 0.2477 - lr 0.000000 |
|
2022-04-25 00:46:15,919 Evaluating as a multi-label problem: False |
|
2022-04-25 00:46:15,935 DEV : loss 0.12706945836544037 - f1-score (micro avg) 0.2495 |
|
2022-04-25 00:46:15,947 BAD EPOCHS (no improvement): 4 |
|
2022-04-25 00:46:19,590 ---------------------------------------------------------------------------------------------------- |
|
2022-04-25 00:46:19,592 Testing using last state of model ... |
|
2022-04-25 00:46:29,219 Evaluating as a multi-label problem: False |
|
2022-04-25 00:46:29,232 0.4412 0.2257 0.2986 0.1758 |
|
2022-04-25 00:46:29,232 |
|
Results: |
|
- F-score (micro) 0.2986 |
|
- F-score (macro) 0.147 |
|
- Accuracy 0.1758 |
|
|
|
By class: |
|
precision recall f1-score support |
|
|
|
ORG 0.4718 0.2314 0.3105 687 |
|
LOC 0.3837 0.2171 0.2773 304 |
|
PENT 0.0000 0.0000 0.0000 6 |
|
MISC 0.0000 0.0000 0.0000 0 |
|
|
|
micro avg 0.4412 0.2257 0.2986 997 |
|
macro avg 0.2139 0.1121 0.1470 997 |
|
weighted avg 0.4421 0.2257 0.2985 997 |
|
|
|
2022-04-25 00:46:29,233 ---------------------------------------------------------------------------------------------------- |
|
|