|
model = dict( |
|
type='SelfSupDetector', |
|
backbone=dict( |
|
type='SelfSupMaskRCNN', |
|
backbone=dict( |
|
type='ResNet', |
|
depth=50, |
|
num_stages=4, |
|
out_indices=(0, 1, 2, 3), |
|
frozen_stages=4, |
|
norm_cfg=dict(type='BN', requires_grad=False), |
|
norm_eval=True, |
|
style='pytorch', |
|
init_cfg=dict( |
|
type='Pretrained', checkpoint='torchvision://resnet50')), |
|
neck=dict( |
|
type='FPN', |
|
in_channels=[256, 512, 1024, 2048], |
|
out_channels=256, |
|
num_outs=5), |
|
rpn_head=dict( |
|
type='RPNHead', |
|
in_channels=256, |
|
feat_channels=256, |
|
anchor_generator=dict( |
|
type='AnchorGenerator', |
|
scales=[8], |
|
ratios=[0.5, 1.0, 2.0], |
|
strides=[4, 8, 16, 32, 64]), |
|
bbox_coder=dict( |
|
type='DeltaXYWHBBoxCoder', |
|
target_means=[0.0, 0.0, 0.0, 0.0], |
|
target_stds=[1.0, 1.0, 1.0, 1.0]), |
|
loss_cls=dict( |
|
type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0), |
|
loss_bbox=dict(type='L1Loss', loss_weight=1.0)), |
|
roi_head=dict( |
|
type='SelfSupStandardRoIHead', |
|
bbox_roi_extractor=dict( |
|
type='SingleRoIExtractor', |
|
roi_layer=dict( |
|
type='RoIAlign', output_size=7, sampling_ratio=0), |
|
out_channels=256, |
|
featmap_strides=[4, 8, 16, 32]), |
|
bbox_head=dict( |
|
type='SelfSupShared4Conv1FCBBoxHead', |
|
in_channels=256, |
|
num_classes=256, |
|
roi_feat_size=7, |
|
loss_cls=dict( |
|
type='ContrastiveLoss', loss_weight=1.0, temperature=0.5)), |
|
mask_roi_extractor=None, |
|
mask_head=None), |
|
train_cfg=dict( |
|
rpn=dict( |
|
assigner=dict( |
|
type='MaxIoUAssigner', |
|
pos_iou_thr=0.7, |
|
neg_iou_thr=0.3, |
|
min_pos_iou=0.3, |
|
match_low_quality=True, |
|
ignore_iof_thr=-1), |
|
sampler=dict( |
|
type='RandomSampler', |
|
num=4096, |
|
pos_fraction=1.0, |
|
neg_pos_ub=-1, |
|
add_gt_as_proposals=False), |
|
allowed_border=-1, |
|
pos_weight=-1, |
|
debug=False), |
|
rpn_proposal=dict( |
|
nms_pre=2000, |
|
max_per_img=1000, |
|
nms=dict(type='nms', iou_threshold=0.7), |
|
min_bbox_size=0), |
|
rcnn=dict( |
|
assigner=dict( |
|
type='MaxIoUAssigner', |
|
pos_iou_thr=0.5, |
|
neg_iou_thr=0.5, |
|
min_pos_iou=0.5, |
|
match_low_quality=True, |
|
ignore_iof_thr=-1, |
|
gt_max_assign_all=False), |
|
sampler=dict( |
|
type='RandomSampler', |
|
num=4096, |
|
pos_fraction=1, |
|
neg_pos_ub=0, |
|
add_gt_as_proposals=True), |
|
mask_size=28, |
|
pos_weight=-1, |
|
debug=False)), |
|
test_cfg=dict( |
|
rpn=dict( |
|
nms_pre=1000, |
|
max_per_img=1000, |
|
nms=dict(type='nms', iou_threshold=0.7), |
|
min_bbox_size=0), |
|
rcnn=dict( |
|
score_thr=0.05, |
|
nms=dict(type='nms', iou_threshold=0.5), |
|
max_per_img=100, |
|
mask_thr_binary=0.5)))) |
|
train_dataset_type = 'MultiViewCocoDataset' |
|
test_dataset_type = 'CocoDataset' |
|
data_root = 'data/coco/' |
|
classes = ['selective_search'] |
|
img_norm_cfg = dict( |
|
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) |
|
load_pipeline = [ |
|
dict(type='LoadImageFromFile'), |
|
dict(type='LoadAnnotations', with_bbox=True, with_mask=False) |
|
] |
|
train_pipeline1 = [ |
|
dict( |
|
type='Resize', |
|
img_scale=[(1333, 640), (1333, 672), (1333, 704), (1333, 736), |
|
(1333, 768), (1333, 800)], |
|
multiscale_mode='value', |
|
keep_ratio=True), |
|
dict(type='FilterAnnotations', min_gt_bbox_wh=(0.01, 0.01)), |
|
dict(type='Pad', size_divisor=32), |
|
dict(type='RandFlip', flip_ratio=0.5), |
|
dict( |
|
type='OneOf', |
|
transforms=[ |
|
dict(type='Identity'), |
|
dict(type='AutoContrast'), |
|
dict(type='RandEqualize'), |
|
dict(type='RandSolarize'), |
|
dict(type='RandColor'), |
|
dict(type='RandContrast'), |
|
dict(type='RandBrightness'), |
|
dict(type='RandSharpness'), |
|
dict(type='RandPosterize') |
|
]), |
|
dict( |
|
type='Normalize', |
|
mean=[123.675, 116.28, 103.53], |
|
std=[58.395, 57.12, 57.375], |
|
to_rgb=True), |
|
dict(type='DefaultFormatBundle'), |
|
dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']) |
|
] |
|
train_pipeline2 = [ |
|
dict( |
|
type='Resize', |
|
img_scale=[(1333, 640), (1333, 672), (1333, 704), (1333, 736), |
|
(1333, 768), (1333, 800)], |
|
multiscale_mode='value', |
|
keep_ratio=True), |
|
dict(type='FilterAnnotations', min_gt_bbox_wh=(0.01, 0.01)), |
|
dict(type='Pad', size_divisor=32), |
|
dict(type='RandFlip', flip_ratio=0.5), |
|
dict( |
|
type='OneOf', |
|
transforms=[ |
|
dict(type='Identity'), |
|
dict(type='AutoContrast'), |
|
dict(type='RandEqualize'), |
|
dict(type='RandSolarize'), |
|
dict(type='RandColor'), |
|
dict(type='RandContrast'), |
|
dict(type='RandBrightness'), |
|
dict(type='RandSharpness'), |
|
dict(type='RandPosterize') |
|
]), |
|
dict( |
|
type='Normalize', |
|
mean=[123.675, 116.28, 103.53], |
|
std=[58.395, 57.12, 57.375], |
|
to_rgb=True), |
|
dict(type='DefaultFormatBundle'), |
|
dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']) |
|
] |
|
test_pipeline = [ |
|
dict(type='LoadImageFromFile'), |
|
dict( |
|
type='MultiScaleFlipAug', |
|
img_scale=(1333, 800), |
|
flip=False, |
|
transforms=[ |
|
dict(type='Resize', keep_ratio=True), |
|
dict(type='RandomFlip'), |
|
dict( |
|
type='Normalize', |
|
mean=[123.675, 116.28, 103.53], |
|
std=[58.395, 57.12, 57.375], |
|
to_rgb=True), |
|
dict(type='Pad', size_divisor=32), |
|
dict(type='ImageToTensor', keys=['img']), |
|
dict(type='Collect', keys=['img']) |
|
]) |
|
] |
|
data = dict( |
|
samples_per_gpu=2, |
|
workers_per_gpu=2, |
|
train=dict( |
|
type='MultiViewCocoDataset', |
|
dataset=dict( |
|
type='CocoDataset', |
|
classes=['selective_search'], |
|
ann_file= |
|
'data/coco/filtered_proposals/train2017_ratio3size0008@0.5.json', |
|
img_prefix='data/coco/train2017/', |
|
pipeline=[ |
|
dict(type='LoadImageFromFile'), |
|
dict(type='LoadAnnotations', with_bbox=True, with_mask=False) |
|
]), |
|
num_views=2, |
|
pipelines=[[{ |
|
'type': |
|
'Resize', |
|
'img_scale': [(1333, 640), (1333, 672), (1333, 704), (1333, 736), |
|
(1333, 768), (1333, 800)], |
|
'multiscale_mode': |
|
'value', |
|
'keep_ratio': |
|
True |
|
}, { |
|
'type': 'FilterAnnotations', |
|
'min_gt_bbox_wh': (0.01, 0.01) |
|
}, { |
|
'type': 'Pad', |
|
'size_divisor': 32 |
|
}, { |
|
'type': 'RandFlip', |
|
'flip_ratio': 0.5 |
|
}, { |
|
'type': |
|
'OneOf', |
|
'transforms': [{ |
|
'type': 'Identity' |
|
}, { |
|
'type': 'AutoContrast' |
|
}, { |
|
'type': 'RandEqualize' |
|
}, { |
|
'type': 'RandSolarize' |
|
}, { |
|
'type': 'RandColor' |
|
}, { |
|
'type': 'RandContrast' |
|
}, { |
|
'type': 'RandBrightness' |
|
}, { |
|
'type': 'RandSharpness' |
|
}, { |
|
'type': 'RandPosterize' |
|
}] |
|
}, { |
|
'type': 'Normalize', |
|
'mean': [123.675, 116.28, 103.53], |
|
'std': [58.395, 57.12, 57.375], |
|
'to_rgb': True |
|
}, { |
|
'type': 'DefaultFormatBundle' |
|
}, { |
|
'type': 'Collect', |
|
'keys': ['img', 'gt_bboxes', 'gt_labels'] |
|
}], |
|
[{ |
|
'type': |
|
'Resize', |
|
'img_scale': [(1333, 640), (1333, 672), (1333, 704), |
|
(1333, 736), (1333, 768), (1333, 800)], |
|
'multiscale_mode': |
|
'value', |
|
'keep_ratio': |
|
True |
|
}, { |
|
'type': 'FilterAnnotations', |
|
'min_gt_bbox_wh': (0.01, 0.01) |
|
}, { |
|
'type': 'Pad', |
|
'size_divisor': 32 |
|
}, { |
|
'type': 'RandFlip', |
|
'flip_ratio': 0.5 |
|
}, { |
|
'type': |
|
'OneOf', |
|
'transforms': [{ |
|
'type': 'Identity' |
|
}, { |
|
'type': 'AutoContrast' |
|
}, { |
|
'type': 'RandEqualize' |
|
}, { |
|
'type': 'RandSolarize' |
|
}, { |
|
'type': 'RandColor' |
|
}, { |
|
'type': 'RandContrast' |
|
}, { |
|
'type': 'RandBrightness' |
|
}, { |
|
'type': 'RandSharpness' |
|
}, { |
|
'type': 'RandPosterize' |
|
}] |
|
}, { |
|
'type': 'Normalize', |
|
'mean': [123.675, 116.28, 103.53], |
|
'std': [58.395, 57.12, 57.375], |
|
'to_rgb': True |
|
}, { |
|
'type': 'DefaultFormatBundle' |
|
}, { |
|
'type': 'Collect', |
|
'keys': ['img', 'gt_bboxes', 'gt_labels'] |
|
}]]), |
|
val=dict( |
|
type='CocoDataset', |
|
classes=['selective_search'], |
|
ann_file='data/coco/annotations/instances_val2017.json', |
|
img_prefix='data/coco/val2017/', |
|
pipeline=[ |
|
dict(type='LoadImageFromFile'), |
|
dict( |
|
type='MultiScaleFlipAug', |
|
img_scale=(1333, 800), |
|
flip=False, |
|
transforms=[ |
|
dict(type='Resize', keep_ratio=True), |
|
dict(type='RandomFlip'), |
|
dict( |
|
type='Normalize', |
|
mean=[123.675, 116.28, 103.53], |
|
std=[58.395, 57.12, 57.375], |
|
to_rgb=True), |
|
dict(type='Pad', size_divisor=32), |
|
dict(type='ImageToTensor', keys=['img']), |
|
dict(type='Collect', keys=['img']) |
|
]) |
|
]), |
|
test=dict( |
|
type='CocoDataset', |
|
classes=['selective_search'], |
|
ann_file='data/coco/annotations/instances_val2017.json', |
|
img_prefix='data/coco/val2017/', |
|
pipeline=[ |
|
dict(type='LoadImageFromFile'), |
|
dict( |
|
type='MultiScaleFlipAug', |
|
img_scale=(1333, 800), |
|
flip=False, |
|
transforms=[ |
|
dict(type='Resize', keep_ratio=True), |
|
dict(type='RandomFlip'), |
|
dict( |
|
type='Normalize', |
|
mean=[123.675, 116.28, 103.53], |
|
std=[58.395, 57.12, 57.375], |
|
to_rgb=True), |
|
dict(type='Pad', size_divisor=32), |
|
dict(type='ImageToTensor', keys=['img']), |
|
dict(type='Collect', keys=['img']) |
|
]) |
|
])) |
|
evaluation = dict(interval=65535) |
|
optimizer = dict(type='SGD', lr=0.02, momentum=0.9, weight_decay=0.0001) |
|
optimizer_config = dict(grad_clip=None) |
|
lr_config = dict( |
|
policy='step', |
|
warmup='linear', |
|
warmup_iters=500, |
|
warmup_ratio=0.001, |
|
step=[8, 11]) |
|
runner = dict(type='EpochBasedRunner', max_epochs=12) |
|
checkpoint_config = dict(interval=1) |
|
log_config = dict(interval=50, hooks=[dict(type='TextLoggerHook')]) |
|
custom_hooks = [ |
|
dict(type='MomentumUpdateHook'), |
|
dict( |
|
type='MMDetWandbHook', |
|
init_kwargs=dict(project='I2B', group='pretrain'), |
|
interval=50, |
|
num_eval_images=0, |
|
log_checkpoint=False) |
|
] |
|
dist_params = dict(backend='nccl') |
|
log_level = 'INFO' |
|
load_from = None |
|
resume_from = None |
|
workflow = [('train', 1)] |
|
opencv_num_threads = 0 |
|
mp_start_method = 'fork' |
|
auto_scale_lr = dict(enable=False, base_batch_size=16) |
|
custom_imports = dict( |
|
imports=[ |
|
'mmselfsup.datasets.pipelines', |
|
'selfsup.core.hook.momentum_update_hook', |
|
'selfsup.datasets.pipelines.selfsup_pipelines', |
|
'selfsup.datasets.pipelines.rand_aug', |
|
'selfsup.datasets.single_view_coco', |
|
'selfsup.datasets.multi_view_coco', |
|
'selfsup.models.losses.contrastive_loss', |
|
'selfsup.models.dense_heads.fcos_head', |
|
'selfsup.models.dense_heads.retina_head', |
|
'selfsup.models.dense_heads.detr_head', |
|
'selfsup.models.dense_heads.deformable_detr_head', |
|
'selfsup.models.roi_heads.bbox_heads.convfc_bbox_head', |
|
'selfsup.models.roi_heads.standard_roi_head', |
|
'selfsup.models.detectors.selfsup_detector', |
|
'selfsup.models.detectors.selfsup_fcos', |
|
'selfsup.models.detectors.selfsup_detr', |
|
'selfsup.models.detectors.selfsup_deformable_detr', |
|
'selfsup.models.detectors.selfsup_retinanet', |
|
'selfsup.models.detectors.selfsup_mask_rcnn', |
|
'selfsup.core.bbox.assigners.hungarian_assigner', |
|
'selfsup.core.bbox.match_costs.match_cost' |
|
], |
|
allow_failed_imports=False) |
|
work_dir = 'work_dirs/selfsup_mask-rcnn_mstrain-soft-teacher_sampler-4096_temp0.5' |
|
auto_resume = False |
|
gpu_ids = range(0, 8) |
|
|