TajaKuzman commited on
Commit
20dd6be
1 Parent(s): 98c90c1

Upload 8 files

Browse files
.gitattributes CHANGED
@@ -33,3 +33,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ tokenizer.json filter=lfs diff=lfs merge=lfs -text
config.json ADDED
@@ -0,0 +1,67 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "xlm-roberta-large",
3
+ "architectures": [
4
+ "XLMRobertaForSequenceClassification"
5
+ ],
6
+ "attention_probs_dropout_prob": 0.1,
7
+ "bos_token_id": 0,
8
+ "classifier_dropout": null,
9
+ "eos_token_id": 2,
10
+ "hidden_act": "gelu",
11
+ "hidden_dropout_prob": 0.1,
12
+ "hidden_size": 1024,
13
+ "id2label": {
14
+ "0": "education",
15
+ "1": "human interest",
16
+ "2": "society",
17
+ "3": "sport",
18
+ "4": "crime, law and justice",
19
+ "5": "disaster, accident and emergency incident",
20
+ "6": "arts, culture, entertainment and media",
21
+ "7": "politics",
22
+ "8": "economy, business and finance",
23
+ "9": "lifestyle and leisure",
24
+ "10": "science and technology",
25
+ "11": "health",
26
+ "12": "labour",
27
+ "13": "religion",
28
+ "14": "weather",
29
+ "15": "environment",
30
+ "16": "conflict, war and peace"
31
+ },
32
+ "initializer_range": 0.02,
33
+ "intermediate_size": 4096,
34
+ "label2id": {
35
+ "education": 0,
36
+ "human interest": 1,
37
+ "society": 2,
38
+ "sport": 3,
39
+ "crime, law and justice": 4,
40
+ "disaster, accident and emergency incident": 5,
41
+ "arts, culture, entertainment and media": 6,
42
+ "politics": 7,
43
+ "economy, business and finance": 8,
44
+ "lifestyle and leisure": 9,
45
+ "science and technology": 10,
46
+ "health": 11,
47
+ "labour": 12,
48
+ "religion": 13,
49
+ "weather": 14,
50
+ "environment": 15,
51
+ "conflict, war and peace": 16
52
+ },
53
+ "layer_norm_eps": 1e-05,
54
+ "max_position_embeddings": 514,
55
+ "model_type": "xlm-roberta",
56
+ "num_attention_heads": 16,
57
+ "num_hidden_layers": 24,
58
+ "output_past": true,
59
+ "pad_token_id": 1,
60
+ "position_embedding_type": "absolute",
61
+ "problem_type": "single_label_classification",
62
+ "torch_dtype": "float32",
63
+ "transformers_version": "4.42.4",
64
+ "type_vocab_size": 1,
65
+ "use_cache": true,
66
+ "vocab_size": 250002
67
+ }
model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:357862230f8b370be969e8fdeecc58d3cc7776b71c7d74c3619609669c0b54d7
3
+ size 2239680172
model_args.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"adafactor_beta1": null, "adafactor_clip_threshold": 1.0, "adafactor_decay_rate": -0.8, "adafactor_eps": [1e-30, 0.001], "adafactor_relative_step": true, "adafactor_scale_parameter": true, "adafactor_warmup_init": true, "adam_betas": [0.9, 0.999], "adam_epsilon": 1e-08, "best_model_dir": "outputs/best_model", "cache_dir": "cache_dir/", "config": {}, "cosine_schedule_num_cycles": 0.5, "custom_layer_parameters": [], "custom_parameter_groups": [], "dataloader_num_workers": 0, "do_lower_case": false, "dynamic_quantize": false, "early_stopping_consider_epochs": false, "early_stopping_delta": 0, "early_stopping_metric": "eval_loss", "early_stopping_metric_minimize": true, "early_stopping_patience": 3, "encoding": null, "eval_batch_size": 100, "evaluate_during_training": false, "evaluate_during_training_silent": true, "evaluate_during_training_steps": 2000, "evaluate_during_training_verbose": false, "evaluate_each_epoch": true, "fp16": true, "gradient_accumulation_steps": 1, "learning_rate": 8e-06, "local_rank": -1, "logging_steps": 50, "loss_type": null, "loss_args": {}, "manual_seed": null, "max_grad_norm": 1.0, "max_seq_length": 512, "model_name": "xlm-roberta-large", "model_type": "xlmroberta", "multiprocessing_chunksize": -1, "n_gpu": 1, "no_cache": false, "no_save": false, "not_saved_args": [], "num_train_epochs": 5, "optimizer": "AdamW", "output_dir": "15k-model-v2", "overwrite_output_dir": true, "polynomial_decay_schedule_lr_end": 1e-07, "polynomial_decay_schedule_power": 1.0, "process_count": 254, "quantized_model": false, "reprocess_input_data": true, "save_best_model": true, "save_eval_checkpoints": true, "save_model_every_epoch": false, "save_optimizer_and_scheduler": true, "save_steps": -1, "scheduler": "linear_schedule_with_warmup", "silent": true, "skip_special_tokens": true, "tensorboard_dir": null, "thread_count": null, "tokenizer_name": "xlm-roberta-large", "tokenizer_type": null, "train_batch_size": 32, "train_custom_parameters_only": false, "trust_remote_code": false, "use_cached_eval_features": false, "use_early_stopping": false, "use_hf_datasets": false, "use_multiprocessing": false, "use_multiprocessing_for_evaluation": false, "wandb_kwargs": {}, "wandb_project": "IPTC", "warmup_ratio": 0.06, "warmup_steps": 141, "weight_decay": 0.0, "model_class": "ClassificationModel", "labels_list": ["education", "human interest", "society", "sport", "crime, law and justice", "disaster, accident and emergency incident", "arts, culture, entertainment and media", "politics", "economy, business and finance", "lifestyle and leisure", "science and technology", "health", "labour", "religion", "weather", "environment", "conflict, war and peace"], "labels_map": {"education": 0, "human interest": 1, "society": 2, "sport": 3, "crime, law and justice": 4, "disaster, accident and emergency incident": 5, "arts, culture, entertainment and media": 6, "politics": 7, "economy, business and finance": 8, "lifestyle and leisure": 9, "science and technology": 10, "health": 11, "labour": 12, "religion": 13, "weather": 14, "environment": 15, "conflict, war and peace": 16}, "lazy_delimiter": "\t", "lazy_labels_column": 1, "lazy_loading": false, "lazy_loading_start_line": 1, "lazy_text_a_column": null, "lazy_text_b_column": null, "lazy_text_column": 0, "onnx": false, "regression": false, "sliding_window": false, "special_tokens_list": [], "stride": 0.8, "tie_value": 1}
sentencepiece.bpe.model ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:cfc8146abe2a0488e9e2a0c56de7952f7c11ab059eca145a0a727afce0db2865
3
+ size 5069051
special_tokens_map.json ADDED
@@ -0,0 +1,15 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token": "<s>",
3
+ "cls_token": "<s>",
4
+ "eos_token": "</s>",
5
+ "mask_token": {
6
+ "content": "<mask>",
7
+ "lstrip": true,
8
+ "normalized": false,
9
+ "rstrip": false,
10
+ "single_word": false
11
+ },
12
+ "pad_token": "<pad>",
13
+ "sep_token": "</s>",
14
+ "unk_token": "<unk>"
15
+ }
tokenizer.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3ffb37461c391f096759f4a9bbbc329da0f36952f88bab061fcf84940c022e98
3
+ size 17082999
tokenizer_config.json ADDED
@@ -0,0 +1,54 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "added_tokens_decoder": {
3
+ "0": {
4
+ "content": "<s>",
5
+ "lstrip": false,
6
+ "normalized": false,
7
+ "rstrip": false,
8
+ "single_word": false,
9
+ "special": true
10
+ },
11
+ "1": {
12
+ "content": "<pad>",
13
+ "lstrip": false,
14
+ "normalized": false,
15
+ "rstrip": false,
16
+ "single_word": false,
17
+ "special": true
18
+ },
19
+ "2": {
20
+ "content": "</s>",
21
+ "lstrip": false,
22
+ "normalized": false,
23
+ "rstrip": false,
24
+ "single_word": false,
25
+ "special": true
26
+ },
27
+ "3": {
28
+ "content": "<unk>",
29
+ "lstrip": false,
30
+ "normalized": false,
31
+ "rstrip": false,
32
+ "single_word": false,
33
+ "special": true
34
+ },
35
+ "250001": {
36
+ "content": "<mask>",
37
+ "lstrip": true,
38
+ "normalized": false,
39
+ "rstrip": false,
40
+ "single_word": false,
41
+ "special": true
42
+ }
43
+ },
44
+ "bos_token": "<s>",
45
+ "clean_up_tokenization_spaces": true,
46
+ "cls_token": "<s>",
47
+ "eos_token": "</s>",
48
+ "mask_token": "<mask>",
49
+ "model_max_length": 512,
50
+ "pad_token": "<pad>",
51
+ "sep_token": "</s>",
52
+ "tokenizer_class": "XLMRobertaTokenizer",
53
+ "unk_token": "<unk>"
54
+ }
training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:bd99a34b1d5fd589a01c16d0ce04bca02b5b11815025e32be03b75b088a15264
3
+ size 3695