Newer
Older
_base_ = [
'../_base_/models/upernet_vit-b16_ln_mln.py',
'../_base_/datasets/ade20k.py', '../_base_/default_runtime.py',
'../_base_/schedules/schedule_80k.py'
]
crop_size = (512, 512)
data_preprocessor = dict(size=crop_size)
data_preprocessor=data_preprocessor,

谢昕辰
committed
pretrained='pretrain/vit_base_patch16_224.pth',
decode_head=dict(num_classes=150),
auxiliary_head=dict(num_classes=150))
# AdamW optimizer, no weight decay for position embedding & layer norm
# in backbone

limengzhang.vendor
committed
optim_wrapper = dict(

limengzhang.vendor
committed
type='OptimWrapper',
optimizer=dict(
type='AdamW', lr=0.00006, betas=(0.9, 0.999), weight_decay=0.01),
paramwise_cfg=dict(
custom_keys={
'pos_embed': dict(decay_mult=0.),
'cls_token': dict(decay_mult=0.),
'norm': dict(decay_mult=0.)
}))
param_scheduler = [
dict(
type='LinearLR', start_factor=1e-6, by_epoch=False, begin=0, end=1500),
dict(
type='PolyLR',
eta_min=0.0,
power=1.0,
begin=1500,
end=80000,
by_epoch=False,
)
]
# By default, models are trained on 8 GPUs with 2 images per GPU
train_dataloader = dict(batch_size=2)
val_dataloader = dict(batch_size=1)
test_dataloader = val_dataloader