From 66dec528c6752409aaef03f9fb96f86fd677eef7 Mon Sep 17 00:00:00 2001
From: Rockey <41846794+RockeyCoss@users.noreply.github.com>
Date: Thu, 17 Feb 2022 16:25:17 +0800
Subject: [PATCH] [Fix] Fix the bug that setr cannot load pretrain (#1293)

* [Fix] Fix the bug that setr cannot load pretrain

* delete new pretrain
---
 configs/setr/README.md                          | 17 +++++++++++++++++
 configs/setr/setr_mla_512x512_160k_b8_ade20k.py |  3 ++-
 .../setr/setr_naive_512x512_160k_b16_ade20k.py  |  3 ++-
 .../setr/setr_pup_512x512_160k_b16_ade20k.py    |  3 ++-
 ..._vit-large_mla_8x1_768x768_80k_cityscapes.py |  3 ++-
 ...it-large_naive_8x1_768x768_80k_cityscapes.py |  3 ++-
 ..._vit-large_pup_8x1_768x768_80k_cityscapes.py |  3 ++-
 7 files changed, 29 insertions(+), 6 deletions(-)

diff --git a/configs/setr/README.md b/configs/setr/README.md
index 5673d9b6..3a28635e 100644
--- a/configs/setr/README.md
+++ b/configs/setr/README.md
@@ -36,6 +36,23 @@ This head has two version head.
 }
 ```
 
+## Usage
+
+You can download the pretrain from [here](https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-vitjx/jx_vit_large_p16_384-b3be5167.pth). Then you can convert its keys with the script `vit2mmseg.py` in the tools directory.
+
+```shell
+python tools/model_converters/vit2mmseg.py ${PRETRAIN_PATH} ${STORE_PATH}
+```
+
+E.g.
+
+```shell
+python tools/model_converters/vit2mmseg.py \
+jx_vit_large_p16_384-b3be5167.pth pretrain/vit_large_p16.pth
+```
+
+This script convert the model from `PRETRAIN_PATH` and store the converted model in `STORE_PATH`.
+
 ## Results and models
 
 ### ADE20K
diff --git a/configs/setr/setr_mla_512x512_160k_b8_ade20k.py b/configs/setr/setr_mla_512x512_160k_b8_ade20k.py
index 6977dbac..e1a07ce5 100644
--- a/configs/setr/setr_mla_512x512_160k_b8_ade20k.py
+++ b/configs/setr/setr_mla_512x512_160k_b8_ade20k.py
@@ -8,7 +8,8 @@ model = dict(
     backbone=dict(
         img_size=(512, 512),
         drop_rate=0.,
-        init_cfg=dict(type='Pretrained', checkpoint='mmcls://vit_large_p16')),
+        init_cfg=dict(
+            type='Pretrained', checkpoint='pretrain/vit_large_p16.pth')),
     decode_head=dict(num_classes=150),
     auxiliary_head=[
         dict(
diff --git a/configs/setr/setr_naive_512x512_160k_b16_ade20k.py b/configs/setr/setr_naive_512x512_160k_b16_ade20k.py
index 3b1f9d7d..8ad8c9fe 100644
--- a/configs/setr/setr_naive_512x512_160k_b16_ade20k.py
+++ b/configs/setr/setr_naive_512x512_160k_b16_ade20k.py
@@ -8,7 +8,8 @@ model = dict(
     backbone=dict(
         img_size=(512, 512),
         drop_rate=0.,
-        init_cfg=dict(type='Pretrained', checkpoint='mmcls://vit_large_p16')),
+        init_cfg=dict(
+            type='Pretrained', checkpoint='pretrain/vit_large_p16.pth')),
     decode_head=dict(num_classes=150),
     auxiliary_head=[
         dict(
diff --git a/configs/setr/setr_pup_512x512_160k_b16_ade20k.py b/configs/setr/setr_pup_512x512_160k_b16_ade20k.py
index 68c3a2a4..83997a2b 100644
--- a/configs/setr/setr_pup_512x512_160k_b16_ade20k.py
+++ b/configs/setr/setr_pup_512x512_160k_b16_ade20k.py
@@ -8,7 +8,8 @@ model = dict(
     backbone=dict(
         img_size=(512, 512),
         drop_rate=0.,
-        init_cfg=dict(type='Pretrained', checkpoint='mmcls://vit_large_p16')),
+        init_cfg=dict(
+            type='Pretrained', checkpoint='pretrain/vit_large_p16.pth')),
     decode_head=dict(num_classes=150),
     auxiliary_head=[
         dict(
diff --git a/configs/setr/setr_vit-large_mla_8x1_768x768_80k_cityscapes.py b/configs/setr/setr_vit-large_mla_8x1_768x768_80k_cityscapes.py
index 3c2fc3af..4237cd5a 100644
--- a/configs/setr/setr_vit-large_mla_8x1_768x768_80k_cityscapes.py
+++ b/configs/setr/setr_vit-large_mla_8x1_768x768_80k_cityscapes.py
@@ -6,7 +6,8 @@ model = dict(
     pretrained=None,
     backbone=dict(
         drop_rate=0,
-        init_cfg=dict(type='Pretrained', checkpoint='mmcls://vit_large_p16')),
+        init_cfg=dict(
+            type='Pretrained', checkpoint='pretrain/vit_large_p16.pth')),
     test_cfg=dict(mode='slide', crop_size=(768, 768), stride=(512, 512)))
 
 optimizer = dict(
diff --git a/configs/setr/setr_vit-large_naive_8x1_768x768_80k_cityscapes.py b/configs/setr/setr_vit-large_naive_8x1_768x768_80k_cityscapes.py
index 181f444e..0c6621ef 100644
--- a/configs/setr/setr_vit-large_naive_8x1_768x768_80k_cityscapes.py
+++ b/configs/setr/setr_vit-large_naive_8x1_768x768_80k_cityscapes.py
@@ -7,7 +7,8 @@ model = dict(
     pretrained=None,
     backbone=dict(
         drop_rate=0.,
-        init_cfg=dict(type='Pretrained', checkpoint='mmcls://vit_large_p16')),
+        init_cfg=dict(
+            type='Pretrained', checkpoint='pretrain/vit_large_p16.pth')),
     test_cfg=dict(mode='slide', crop_size=(768, 768), stride=(512, 512)))
 
 optimizer = dict(
diff --git a/configs/setr/setr_vit-large_pup_8x1_768x768_80k_cityscapes.py b/configs/setr/setr_vit-large_pup_8x1_768x768_80k_cityscapes.py
index 817a0296..e108988a 100644
--- a/configs/setr/setr_vit-large_pup_8x1_768x768_80k_cityscapes.py
+++ b/configs/setr/setr_vit-large_pup_8x1_768x768_80k_cityscapes.py
@@ -9,7 +9,8 @@ model = dict(
     pretrained=None,
     backbone=dict(
         drop_rate=0.,
-        init_cfg=dict(type='Pretrained', checkpoint='mmcls://vit_large_p16')),
+        init_cfg=dict(
+            type='Pretrained', checkpoint='pretrain/vit_large_p16.pth')),
     auxiliary_head=[
         dict(
             type='SETRUPHead',
-- 
GitLab