|
run_name: multitask_train |
|
seed: 6198 |
|
epoch: null |
|
dry_run: false |
|
model: |
|
d_model: 3584 |
|
n_heads: 28 |
|
n_kv_heads: 4 |
|
qkv_bias: true |
|
clip_qkv: null |
|
n_layers: 28 |
|
mlp_ratio: 4 |
|
mlp_hidden_size: 37888 |
|
activation_type: swiglu |
|
block_type: sequential |
|
block_group_size: 1 |
|
alibi: false |
|
alibi_bias_max: 8.0 |
|
rope: true |
|
rope_full_precision: true |
|
rope_theta: 1000000.0 |
|
rope_impl: llama |
|
vision_backbone: |
|
image_model_type: openai |
|
image_default_input_size: |
|
- 336 |
|
- 336 |
|
image_patch_size: 14 |
|
image_pos_patch_size: 14 |
|
image_emb_dim: 1024 |
|
image_num_heads: 16 |
|
image_num_key_value_heads: 16 |
|
image_num_layers: 23 |
|
image_head_dim: 64 |
|
image_mlp_dim: 4096 |
|
image_mlp_activations: quick_gelu |
|
image_dropout_rate: 0.0 |
|
image_num_pos: 577 |
|
image_norm_eps: 1.0e-05 |
|
attention_dropout: 0.0 |
|
residual_dropout: 0.0 |
|
initializer_range: 0.02 |
|
fsdp_wrap: false |
|
resize_mode: default |
|
vit_load_path: /weka/oe-training-default/mm-olmo/pretrained_image_encoders/vit-l-14-336.pt |
|
llm_load_path: /weka/oe-training-default/mm-olmo/pretrained_llms/qwen2-7b.pt |
|
low_cpu_fsdp: true |
|
attention_type: sdpa |
|
float32_attention: true |
|
attention_dropout: 0.0 |
|
response_attention_dropout: 0.0 |
|
multi_query_attention: null |
|
attention_layer_norm: false |
|
residual_dropout: 0.0 |
|
response_residual_dropout: 0.1 |
|
embedding_dropout: 0.0 |
|
layer_norm_type: rms |
|
layer_norm_with_affine: true |
|
layer_norm_eps: 1.0e-06 |
|
attention_layer_norm_with_affine: true |
|
max_sequence_length: 4096 |
|
max_position_embeddings: null |
|
include_bias: false |
|
bias_for_layer_norm: null |
|
scale_logits: false |
|
vocab_size: 152064 |
|
embedding_size: 152064 |
|
additional_vocab_size: 128 |
|
new_embedding_init_range: 0.02 |
|
weight_tying: false |
|
pad_token_id: -1 |
|
init_device: null |
|
init_fn: normal |
|
init_std: 0.02 |
|
init_cutoff_factor: null |
|
norm_after: false |
|
precision: amp_bf16 |
|
max_crops: 12 |
|
crop_mode: overlap-and-resize-c2 |
|
do_random_scale: false |
|
use_col_tokens: true |
|
prompt_type: none |
|
system_prompt_kind: style_and_length |
|
message_formatting: none |
|
always_start_with_space: true |
|
prompt_override: null |
|
default_inference_len: 65 |
|
overlap_margins: |
|
- 4 |
|
- 4 |
|
image_padding_embed: pad_and_partial_pad |
|
vit_layers: |
|
- -2 |
|
- -9 |
|
image_pooling_h: 2 |
|
image_pooling_w: 2 |
|
image_pooling_2d: attention_meanq |
|
image_projector: mlp |
|
image_feature_dropout: 0.0 |
|
use_cls_feature: false |
|
fix_image_input_idx: 2 |
|
unconditioned: false |
|
pad_to: null |
|
initializer_range: 0.02 |
|
pad_tokenizer: true |
|
normalize_input_embeds: false |
|
use_position_ids: true |
|
query_pre_attn_scalar: 224 |
|
attn_logit_softcapping: null |
|
final_logit_softcapping: null |
|
head_dim: null |
|
tokenizer: |
|
identifier: mm:hf-Qwen/Qwen2-7B |
|
truncate_direction: right |
|
tokenizer_adds_space: false |
|
tokenizer_dir: null |
|
olmo_bos_token_id: null |
|
olmo_eos_token_id: null |
|
loss_token_weighting: null |
|
gin_bindings: null |
|
ft_llm: true |
|
ft_vit: true |
|
ft_connector: true |
|
ft_embedding: lm_head |
|
optimizer: |
|
name: adamw |
|
learning_rate: 0.0001 |
|
weight_decay: 0.01 |
|
betas: |
|
- 0.9 |
|
- 0.95 |
|
eps: 1.0e-05 |
|
connector_learning_rate: 0.0002 |
|
vit_learning_rate: 6.0e-06 |
|
llm_learning_rate: 2.0e-05 |
|
connector_weight_decay: 0.0 |
|
vit_weight_decay: 0.0 |
|
llm_weight_decay: 0.0 |
|
connector_betas: |
|
- 0.9 |
|
- 0.95 |
|
vit_betas: |
|
- 0.9 |
|
- 0.95 |
|
llm_betas: |
|
- 0.9 |
|
- 0.95 |
|
connector_eps: 1.0e-06 |
|
vit_eps: 1.0e-06 |
|
llm_eps: 1.0e-06 |
|
no_decay_norm_and_bias: null |
|
decay_norm_and_bias: false |
|
decay_embeddings: false |
|
metrics_log_interval: 20 |
|
scheduler: |
|
name: multimodal |
|
units: steps |
|
t_warmup: 100 |
|
t_max: null |
|
alpha_f: 0.1 |
|
connector_t_warmup: 200 |
|
vit_t_warmup: 2000 |
|
llm_t_warmup: 2000 |
|
grad_clip_warmup_steps: null |
|
grad_clip_warmup_factor: null |
|
warmup_min_lr: 0.0 |
|
data: |
|
multi_modal: true |
|
mixture_or_task_name: cockatoo_and_transcript_712k_sept6 |
|
paths: null |
|
datasets: null |
|
label_mask_paths: null |
|
pad_direction: right |
|
generate_attention_mask: false |
|
num_workers: 0 |
|
drop_last: true |
|
pin_memory: false |
|
prefetch_factor: null |
|
persistent_workers: false |
|
timeout: 0 |
|
seed: null |
|
instance_filter: null |
|
mixture: null |
|
sequence_length: 2304 |
|
shuffle: true |
|
for_inference: false |
|
split: train |
|
use_memory_cache: false |
|
num_epochs: null |
|
shuffle_buffer_size: 1000 |
|
per_node_data_loader: null |
|
restore_dataloader: true |
|
fast_forward_batches: null |
|
evaluators: |
|
- label: val |
|
type: multi_modal_lm |
|
data: |
|
multi_modal: true |
|
mixture_or_task_name: cockatoo_and_transcript_712k_sept6 |
|
paths: null |
|
datasets: null |
|
label_mask_paths: null |
|
pad_direction: right |
|
generate_attention_mask: false |
|
num_workers: 0 |
|
drop_last: true |
|
pin_memory: false |
|
prefetch_factor: null |
|
persistent_workers: false |
|
timeout: 0 |
|
seed: null |
|
instance_filter: null |
|
mixture: null |
|
sequence_length: 2304 |
|
shuffle: false |
|
for_inference: false |
|
split: validation |
|
use_memory_cache: false |
|
num_epochs: null |
|
shuffle_buffer_size: 1000 |
|
per_node_data_loader: null |
|
device_eval_batch_size: null |
|
subset_num_batches: 8 |
|
max_new_tokens: 448 |
|
mm_evaluator: null |
|
save_dir: null |
|
save_to_checkpoint_dir: false |
|
eval_name: null |
|
skip_if_metrics_cached: true |
|
- label: caption_val |
|
type: multi_modal_lm |
|
data: |
|
multi_modal: true |
|
mixture_or_task_name: cockatoo_476k_gpt_captions |
|
paths: null |
|
datasets: null |
|
label_mask_paths: null |
|
pad_direction: right |
|
generate_attention_mask: false |
|
num_workers: 0 |
|
drop_last: true |
|
pin_memory: false |
|
prefetch_factor: null |
|
persistent_workers: false |
|
timeout: 0 |
|
seed: null |
|
instance_filter: null |
|
mixture: null |
|
sequence_length: 2304 |
|
shuffle: false |
|
for_inference: false |
|
split: validation |
|
use_memory_cache: false |
|
num_epochs: null |
|
shuffle_buffer_size: 1000 |
|
per_node_data_loader: null |
|
device_eval_batch_size: null |
|
subset_num_batches: 8 |
|
max_new_tokens: 448 |
|
mm_evaluator: null |
|
save_dir: null |
|
save_to_checkpoint_dir: false |
|
eval_name: null |
|
skip_if_metrics_cached: true |
|
eval_interval: 1000 |
|
inf_eval_interval: -1 |
|
inf_evaluators: [] |
|
save_folder: /weka/oe-training-default/chrisc/cockatoo/models/dense-captioner-v22-qwen2/v2-lr2620 |
|
remote_save_folder: null |
|
canceled_check_interval: 50 |
|
save_interval: 4000 |
|
save_interval_unsharded: 22300 |
|
save_interval_ephemeral: null |
|
save_num_checkpoints_to_keep: 1 |
|
save_num_unsharded_checkpoints_to_keep: -1 |
|
save_overwrite: true |
|
force_save_unsharded: false |
|
no_pre_train_checkpoint: true |
|
initial_model_checkpoint: null |
|
load_model_config: null |
|
load_path: null |
|
load_path_sharded_checkpointer: null |
|
reset_optimizer_state: false |
|
reset_trainer_state: false |
|
save_dataloader_state: false |
|
reset_dataloader_state: false |
|
sharded_checkpointer: torch_legacy |
|
new_style_checkpoints: null |
|
max_duration: 22300 |
|
global_train_batch_size: 128 |
|
device_train_batch_size: 2 |
|
device_train_microbatch_size: 4 |
|
device_eval_batch_size: 4 |
|
eval_subset_num_batches: -1 |
|
eval_on_load: false |
|
device_inf_eval_batch_size: 16 |
|
inf_eval_subset_num_batches: -1 |
|
device_train_grad_accum: 0 |
|
max_grad_norm: 1.0 |
|
batch_divisor: global_batch |
|
max_grad_norm_ratio: null |
|
precision: amp_bf16 |
|
wandb: |
|
project: cockatoo |
|
entity: prior-ai2 |
|
group: dense-captioner-v22-qwen2 |
|
name: v2-lr2620 |
|
tags: |
|
- watching |
|
log_artifacts: false |
|
rank_zero_only: true |
|
log_interval: 20 |
|
speed_monitor: |
|
window_size: 20 |
|
gpu_flops_available: null |
|
console_log_interval: 20 |
|
gen1_gc_interval: 1 |
|
compile: null |
|
fsdp: |
|
use_orig_params: true |
|
sharding_strategy: FULL_SHARD |
|
wrapping_strategy: by_block_and_size |
|
precision: float |
|
hybrid_sharding_num_model_replicas: null |
|
softmax_auxiliary_loss: true |
|
softmax_auxiliary_loss_scale: 0.0001 |
|
time_limit: null |
|
extra_steps_after_cancel: 10 |
|
early_stopping_factor: null |
|
save_data_indices: false |
|
python_profiling: false |
|
torch_profiling: false |
|
stop_at: 22300 |
|
stop_after: null |
|
activation_checkpointing: whole_layer |
|
fused_loss: null |
|
tfds_dir: /weka/oe-training-default/mm-olmo/tensorflow_datasets |
|
|