Salmamoori commited on
Commit
b1eaf35
1 Parent(s): 4556eee

End of training

Browse files
Files changed (3) hide show
  1. README.md +4 -3
  2. tokenizer.json +8 -1
  3. tokenizer_config.json +2 -1
README.md CHANGED
@@ -1,8 +1,9 @@
1
  ---
2
  base_model: llava-hf/llava-1.5-7b-hf
3
  library_name: peft
4
- license: llama2
5
  tags:
 
 
6
  - trl
7
  - sft
8
  - generated_from_trainer
@@ -16,7 +17,7 @@ should probably proofread and complete it, then remove this comment. -->
16
 
17
  # llava-bddx-finetuned
18
 
19
- This model is a fine-tuned version of [llava-hf/llava-1.5-7b-hf](https://huggingface.co/llava-hf/llava-1.5-7b-hf) on the None dataset.
20
 
21
  ## Model description
22
 
@@ -37,7 +38,7 @@ More information needed
37
  The following hyperparameters were used during training:
38
  - learning_rate: 1.4e-05
39
  - train_batch_size: 4
40
- - eval_batch_size: 8
41
  - seed: 42
42
  - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
43
  - lr_scheduler_type: linear
 
1
  ---
2
  base_model: llava-hf/llava-1.5-7b-hf
3
  library_name: peft
 
4
  tags:
5
+ - BDD-X
6
+ - Autonomous-Driving
7
  - trl
8
  - sft
9
  - generated_from_trainer
 
17
 
18
  # llava-bddx-finetuned
19
 
20
+ This model is a fine-tuned version of [llava-7b-v1.5](https://huggingface.co/llava-7b-v1.5) on the BDD-X-dataset dataset.
21
 
22
  ## Model description
23
 
 
38
  The following hyperparameters were used during training:
39
  - learning_rate: 1.4e-05
40
  - train_batch_size: 4
41
+ - eval_batch_size: 4
42
  - seed: 42
43
  - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
44
  - lr_scheduler_type: linear
tokenizer.json CHANGED
@@ -1,7 +1,14 @@
1
  {
2
  "version": "1.0",
3
  "truncation": null,
4
- "padding": null,
 
 
 
 
 
 
 
5
  "added_tokens": [
6
  {
7
  "id": 0,
 
1
  {
2
  "version": "1.0",
3
  "truncation": null,
4
+ "padding": {
5
+ "strategy": "BatchLongest",
6
+ "direction": "Right",
7
+ "pad_to_multiple_of": null,
8
+ "pad_id": 32001,
9
+ "pad_type_id": 0,
10
+ "pad_token": "<pad>"
11
+ },
12
  "added_tokens": [
13
  {
14
  "id": 0,
tokenizer_config.json CHANGED
@@ -45,12 +45,13 @@
45
  }
46
  },
47
  "bos_token": "<s>",
 
48
  "clean_up_tokenization_spaces": false,
49
  "eos_token": "</s>",
50
  "legacy": false,
51
  "model_max_length": 1000000000000000019884624838656,
52
  "pad_token": "<pad>",
53
- "padding_side": "left",
54
  "processor_class": "LlavaProcessor",
55
  "sp_model_kwargs": {},
56
  "tokenizer_class": "LlamaTokenizer",
 
45
  }
46
  },
47
  "bos_token": "<s>",
48
+ "chat_template": "A chat between a curious user and an artificial intelligence assistant. The assistant gives helpful, detailed, and polite answers to the user's questions. {% for message in messages %}{% if message['role'] == 'user' %}USER: {% else %}ASSISTANT: {% endif %}{% for item in message['content'] %}{% if item['type'] == 'text' %}{{ item['text'] }}{% elif item['type'] == 'image' %}<image>{% endif %}{% endfor %}{% if message['role'] == 'user' %} {% else %}{{eos_token}}{% endif %}{% endfor %}",
49
  "clean_up_tokenization_spaces": false,
50
  "eos_token": "</s>",
51
  "legacy": false,
52
  "model_max_length": 1000000000000000019884624838656,
53
  "pad_token": "<pad>",
54
+ "padding_side": "right",
55
  "processor_class": "LlavaProcessor",
56
  "sp_model_kwargs": {},
57
  "tokenizer_class": "LlamaTokenizer",