Skip to content
Snippets Groups Projects
Unverified Commit b21df463 authored by Miao Zheng's avatar Miao Zheng Committed by GitHub
Browse files

[Feature] LIP dataset (#2187)

* [WIP] LIP dataset

* wip

* keep473

* lip dataset prepare

* add ut and test data
parent a1f011dc
No related branches found
No related tags found
No related merge requests found
......@@ -351,7 +351,8 @@ The dataset is a Large-scale Dataset for Instance Segmentation (also have segman
You may need to follow the following structure for dataset preparation after downloading iSAID dataset.
```
```none
├── data
│ ├── iSAID
│ │ ├── train
│ │ │ ├── images
......@@ -376,3 +377,40 @@ python tools/dataset_converters/isaid.py /path/to/iSAID
```
In our default setting (`patch_width`=896, `patch_height`=896, `overlap_area`=384), it will generate 33978 images for training and 11644 images for validation.
## LIP(Look Into Person) dataset
This dataset could be download from [this page](https://lip.sysuhcp.com/overview.php).
Please run the following commands to unzip dataset.
```shell
unzip LIP.zip
cd LIP
unzip TrainVal_images.zip
unzip TrainVal_parsing_annotations.zip
cd TrainVal_parsing_annotations
unzip TrainVal_parsing_annotations.zip
mv train_segmentations ../
mv val_segmentations ../
cd ..
```
The contents of LIP datasets include:
```none
├── data
│ ├── LIP
│ │ ├── train_images
│ │ │ ├── 1000_1234574.jpg
│ │ │ ├── ...
│ │ ├── train_segmentations
│ │ │ ├── 1000_1234574.png
│ │ │ ├── ...
│ │ ├── val_images
│ │ │ ├── 100034_483681.jpg
│ │ │ ├── ...
│ │ ├── val_segmentations
│ │ │ ├── 100034_483681.png
│ │ │ ├── ...
```
......@@ -11,6 +11,7 @@ from .drive import DRIVEDataset
from .hrf import HRFDataset
from .isaid import iSAIDDataset
from .isprs import ISPRSDataset
from .lip import LIPDataset
from .loveda import LoveDADataset
from .night_driving import NightDrivingDataset
from .pascal_context import PascalContextDataset, PascalContextDataset59
......@@ -35,5 +36,5 @@ __all__ = [
'RandomCutOut', 'RandomMosaic', 'PackSegInputs', 'ResizeToMultiple',
'LoadImageFromNDArray', 'LoadBiomedicalImageFromFile',
'LoadBiomedicalAnnotation', 'LoadBiomedicalData', 'GenerateEdge',
'DecathlonDataset'
'DecathlonDataset', 'LIPDataset'
]
# Copyright (c) OpenMMLab. All rights reserved.
from mmseg.registry import DATASETS
from .basesegdataset import BaseSegDataset
@DATASETS.register_module()
class LIPDataset(BaseSegDataset):
"""LIP dataset.
The ``img_suffix`` is fixed to '.jpg' and ``seg_map_suffix`` is fixed to
'.png'.
"""
METAINFO = dict(
classes=('Background', 'Hat', 'Hair', 'Glove', 'Sunglasses',
'UpperClothes', 'Dress', 'Coat', 'Socks', 'Pants',
'Jumpsuits', 'Scarf', 'Skirt', 'Face', 'Left-arm',
'Right-arm', 'Left-leg', 'Right-leg', 'Left-shoe',
'Right-shoe'),
palette=(
[0, 0, 0],
[128, 0, 0],
[255, 0, 0],
[0, 85, 0],
[170, 0, 51],
[255, 85, 0],
[0, 0, 85],
[0, 119, 221],
[85, 85, 0],
[0, 85, 85],
[85, 51, 0],
[52, 86, 128],
[0, 128, 0],
[0, 0, 255],
[51, 170, 221],
[0, 255, 255],
[85, 255, 170],
[170, 255, 85],
[255, 255, 0],
[255, 170, 0],
))
def __init__(self, **kwargs) -> None:
super().__init__(img_suffix='.jpg', seg_map_suffix='.png', **kwargs)
......@@ -265,6 +265,26 @@ def stare_palette():
return [[120, 120, 120], [6, 230, 230]]
def lip_classes():
"""LIP class names for external use."""
return [
'background', 'hat', 'hair', 'glove', 'sunglasses', 'upperclothes',
'dress', 'coat', 'socks', 'pants', 'jumpsuits', 'scarf', 'skirt',
'face', 'leftArm', 'rightArm', 'leftLeg', 'rightLeg', 'leftShoe',
'rightShoe'
]
def lip_palette():
"""LIP palette for external use."""
return [
'Background', 'Hat', 'Hair', 'Glove', 'Sunglasses', 'UpperClothes',
'Dress', 'Coat', 'Socks', 'Pants', 'Jumpsuits', 'Scarf', 'Skirt',
'Face', 'Left-arm', 'Right-arm', 'Left-leg', 'Right-leg', 'Left-shoe',
'Right-shoe'
]
dataset_aliases = {
'cityscapes': ['cityscapes'],
'ade': ['ade', 'ade20k'],
......@@ -278,7 +298,8 @@ dataset_aliases = {
'coco_stuff164k'
],
'isaid': ['isaid', 'iSAID'],
'stare': ['stare', 'STARE']
'stare': ['stare', 'STARE'],
'lip': ['LIP', 'lip']
}
......
tests/data/pseudo_lip_dataset/train_images/684_2150041.jpg

10.7 KiB

tests/data/pseudo_lip_dataset/train_segmentations/684_2150041.png

1.56 KiB

tests/data/pseudo_lip_dataset/val_images/86_185913.jpg

13.1 KiB

tests/data/pseudo_lip_dataset/val_segmentations/86_185913.png

1.43 KiB

......@@ -8,8 +8,8 @@ import pytest
from mmseg.datasets import (ADE20KDataset, BaseSegDataset, CityscapesDataset,
COCOStuffDataset, DecathlonDataset, ISPRSDataset,
LoveDADataset, PascalVOCDataset, PotsdamDataset,
iSAIDDataset)
LIPDataset, LoveDADataset, PascalVOCDataset,
PotsdamDataset, iSAIDDataset)
from mmseg.registry import DATASETS
from mmseg.utils import get_classes, get_palette
......@@ -259,6 +259,25 @@ def test_decathlon():
assert len(test_dataset) == 3
def test_lip():
data_root = osp.join(osp.dirname(__file__), '../data/pseudo_lip_dataset')
# train load training dataset
train_dataset = LIPDataset(
pipeline=[],
data_root=data_root,
data_prefix=dict(
img_path='train_images', seg_map_path='train_segmentations'))
assert len(train_dataset) == 1
# test load training dataset
test_dataset = LIPDataset(
pipeline=[],
data_root=data_root,
data_prefix=dict(
img_path='val_images', seg_map_path='val_segmentations'))
assert len(test_dataset) == 1
@pytest.mark.parametrize('dataset, classes', [
('ADE20KDataset', ('wall', 'building')),
('CityscapesDataset', ('road', 'sidewalk')),
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment