mahimairaja commited on
Commit
67e3b71
1 Parent(s): 7f22e7e

Update config.json

Browse files
Files changed (1) hide show
  1. config.json +23 -63
config.json CHANGED
@@ -1,65 +1,25 @@
1
  {
2
- "torchscript": False,
3
- "torch_dtype": "float16",
4
- "use_bfloat16": False,
5
- "tf_legacy_loss": False,
6
- "pruned_heads": {},
7
- "tie_word_embeddings": False,
8
- "is_encoder_decoder": False,
9
- "is_decoder": False,
10
- "cross_attention_hidden_size": None,
11
- "add_cross_attention": False,
12
- "tie_encoder_decoder": False,
13
- "max_length": 20,
14
- "min_length": 0,
15
- "do_sample": False,
16
- "early_stopping": False,
17
- "num_beams": 1,
18
- "num_beam_groups": 1,
19
- "diversity_penalty": 0.0,
20
- "temperature": 1.0,
21
- "top_k": 50,
22
- "top_p": 1.0,
23
- "typical_p": 1.0,
24
- "repetition_penalty": 1.0,
25
- "length_penalty": 1.0,
26
- "no_repeat_ngram_size": 0,
27
- "encoder_no_repeat_ngram_size": 0,
28
- "bad_words_ids": None,
29
- "num_return_sequences": 1,
30
- "chunk_size_feed_forward": 0,
31
- "output_scores": False,
32
- "return_dict_in_generate": False,
33
- "forced_bos_token_id": None,
34
- "forced_eos_token_id": None,
35
- "remove_invalid_values": False,
36
- "exponential_decay_length_penalty": None,
37
- "suppress_tokens": None,
38
- "begin_suppress_tokens": None,
39
- "architectures": ["LlamaForCausalLM"],
40
- "finetuning_task": None,
41
- "id2label": {0: "LABEL_0", 1: "LABEL_1"},
42
- "label2id": {"LABEL_0": 0, "LABEL_1": 1},
43
- "tokenizer_class": None,
44
- "prefix": None,
45
- "bos_token_id": 1,
46
- "pad_token_id": None,
47
- "eos_token_id": 2,
48
- "sep_token_id": None,
49
- "decoder_start_token_id": None,
50
- "task_specific_params": None,
51
- "problem_type": None,
52
- "_name_or_path": "mahimairaja/tweet-summarization-llama-2-finetuned",
53
- "transformers_version": "4.32.1",
54
- "model_type": "llama",
55
- "quantization_config": {"quant_method": <QuantizationMethod.BITS_AND_BYTES: "bitsandbytes">,
56
- "load_in_8bit": False,
57
- "load_in_4bit": True,
58
- "llm_int8_threshold": 6.0,
59
- "llm_int8_skip_modules": None,
60
- "llm_int8_enable_fp32_cpu_offload": False,
61
- "llm_int8_has_fp16_weight": False,
62
- "bnb_4bit_quant_type": "nf4",
63
- "bnb_4bit_use_double_quant": False,
64
- "bnb_4bit_compute_dtype": "float16"}}
65
  }
 
1
  {
2
+ "_name_or_path": "mahimairaja/tweet-summarization-llama-2-finetuned",
3
+ "architectures": [
4
+ "LlamaForCausalLM"
5
+ ],
6
+ "bos_token_id": 1,
7
+ "eos_token_id": 2,
8
+ "hidden_act": "silu",
9
+ "hidden_size": 4096,
10
+ "initializer_range": 0.02,
11
+ "intermediate_size": 11008,
12
+ "max_position_embeddings": 4096,
13
+ "model_type": "llama",
14
+ "num_attention_heads": 32,
15
+ "num_hidden_layers": 32,
16
+ "num_key_value_heads": 32,
17
+ "pretraining_tp": 1,
18
+ "rms_norm_eps": 1e-05,
19
+ "rope_scaling": null,
20
+ "tie_word_embeddings": false,
21
+ "torch_dtype": "float16",
22
+ "transformers_version": "4.32.1",
23
+ "use_cache": true,
24
+ "vocab_size": 32000
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
25
  }