BLIP / configs /retrieval_coco.yaml
AK391
files
794924b
raw
history blame
655 Bytes
image_root: '/export/share/datasets/vision/coco/images/'
ann_root: 'annotation'
dataset: 'coco'
# set pretrained as a file path or an url
pretrained: 'https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_retrieval_coco.pth'
# size of vit model; base or large
vit: 'base'
batch_size_train: 32
batch_size_test: 64
vit_grad_ckpt: True
vit_ckpt_layer: 4
init_lr: 1e-5
# vit: 'large'
# batch_size_train: 16
# batch_size_test: 32
# vit_grad_ckpt: True
# vit_ckpt_layer: 12
# init_lr: 5e-6
image_size: 384
queue_size: 57600
alpha: 0.4
k_test: 256
negative_all_rank: True
# optimizer
weight_decay: 0.05
min_lr: 0
max_epoch: 6