Skip to content

Commit 202a998

Browse files
committed
fix the bug related to celeba
1 parent 53fc531 commit 202a998

12 files changed

+465
-34
lines changed

configs/celeba/H_48_D_4.json

+15-24
Original file line numberDiff line numberDiff line change
@@ -1,39 +1,37 @@
11
{
2-
"dataset": "celeba_mask",
2+
"dataset": "celeba",
33
"method": "fcn_segmentor",
44
"data": {
55
"image_tool": "cv2",
66
"input_mode": "BGR",
77
"num_classes": 19,
8-
"reduce_zero_label": true,
9-
"data_dir": "~/DataSet/celeba_mask",
8+
"reduce_zero_label": false,
9+
"data_dir": "~/DataSet/celeba",
1010
"workers": 8
1111
},
1212
"train": {
1313
"batch_size": 16,
1414
"data_transformer": {
1515
"size_mode": "fix_size",
16-
"input_size": [520, 520],
16+
"input_size": [512, 512],
1717
"align_method": "only_pad",
1818
"pad_mode": "random"
1919
}
2020
},
2121
"val": {
2222
"batch_size": 4,
23-
"mode": "ms_test",
2423
"data_transformer": {
25-
"size_mode": "diverse_size",
26-
"align_method": "only_pad",
27-
"pad_mode": "pad_right_down"
24+
"size_mode": "fix_size",
25+
"input_size": [512, 512],
26+
"align_method": "only_pad"
2827
}
2928
},
3029
"test": {
3130
"batch_size": 4,
32-
"mode": "ss_test",
3331
"data_transformer": {
34-
"size_mode": "diverse_size",
35-
"align_method": "only_pad",
36-
"pad_mode": "pad_right_down"
32+
"size_mode": "fix_size",
33+
"input_size": [512, 512],
34+
"align_method": "only_pad"
3735
}
3836
},
3937
"train_trans": {
@@ -43,7 +41,7 @@
4341
"shift_value": 10
4442
},
4543
"resize": {
46-
"min_side_length": 520
44+
"target_size": [512, 512]
4745
},
4846
"random_hflip": {
4947
"ratio": 0.5,
@@ -61,21 +59,15 @@
6159
},
6260
"random_crop":{
6361
"ratio": 1.0,
64-
"crop_size": [520, 520],
62+
"crop_size": [512, 512],
6563
"method": "random",
6664
"allow_outside_center": false
6765
}
6866
},
6967
"val_trans": {
7068
"trans_seq": ["resize"],
71-
"resize": {
72-
"min_side_length": 520
73-
},
74-
"random_crop": {
75-
"ratio": 1.0,
76-
"crop_size": [520, 520],
77-
"method": "center",
78-
"allow_outside_center": false
69+
"resize":{
70+
"target_size": [512, 512]
7971
}
8072
},
8173
"normalize": {
@@ -122,7 +114,7 @@
122114
},
123115
"solver": {
124116
"display_iter": 10,
125-
"test_interval": 5000,
117+
"test_interval": 2000,
126118
"max_iters": 150000
127119
},
128120
"optim": {
@@ -142,7 +134,6 @@
142134
"loss_type": "fs_auxce_loss",
143135
"params": {
144136
"ce_reduction": "elementwise_mean",
145-
"ce_ignore_index": -1,
146137
"ohem_minkeep":100000,
147138
"ohem_thresh": 0.7
148139
}

configs/celeba/H_48_D_4_TEST.json

+1-1
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,5 @@
11
{
2-
"dataset": "celeba_mask",
2+
"dataset": "celeba",
33
"method": "fcn_segmentor",
44
"data": {
55
"image_tool": "cv2",

lib/loss/loss_helper.py

-1
Original file line numberDiff line numberDiff line change
@@ -77,7 +77,6 @@ def __init__(self, configer=None):
7777
self.ce_loss = nn.CrossEntropyLoss(weight=weight, ignore_index=ignore_index, reduction=reduction)
7878

7979
def forward(self, inputs, *targets, weights=None, **kwargs):
80-
pdb.set_trace()
8180
loss = 0.0
8281
if isinstance(inputs, tuple) or isinstance(inputs, list):
8382
if weights is None:

lib/metrics/running_score.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -115,7 +115,7 @@ def _fast_hist(self, label_true, label_pred, n_class):
115115

116116
if self.ignore_index is not None:
117117
mask = mask & (label_true != self.ignore_index)
118-
118+
119119
hist = np.bincount(
120120
n_class * label_true[mask].astype(int) +
121121
label_pred[mask], minlength=n_class**2)
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,109 @@
1+
#!/usr/bin/env bash
2+
3+
# $1 code path
4+
# $2 dataset path
5+
# $3 train or test
6+
# $4 log_suffix
7+
8+
PYTHON="/opt/conda/bin/python"
9+
${PYTHON} -c "import torch; print(torch.__version__)"
10+
11+
${PYTHON} -m pip install yacs
12+
13+
export PYTHONPATH=$1:$PYTHONPATH
14+
15+
DATA_DIR="$2/face_parse/CelebAMask-HQ"
16+
SAVE_DIR="$2/seg_result/celeba/"
17+
BACKBONE="hrnet48"
18+
19+
CONFIGS="configs/celeba/H_48_D_4.json"
20+
CONFIGS_TEST="configs/celeba/H_48_D_4_TEST.json"
21+
22+
MODEL_NAME="hrnet_w48_ocr"
23+
LOSS_TYPE="fs_auxce_loss"
24+
CHECKPOINTS_NAME="${MODEL_NAME}_${BACKBONE}_"$4
25+
LOG_FILE="./log/celeba/${CHECKPOINTS_NAME}.log"
26+
echo "Logging to $LOG_FILE"
27+
mkdir -p `dirname $LOG_FILE`
28+
PRETRAINED_MODEL="./pretrained_model/hrnetv2_w48_imagenet_pretrained.pth"
29+
MAX_ITERS=150000
30+
31+
if [ "$3"x == "train"x ]; then
32+
${PYTHON} -u main.py --configs ${CONFIGS} \
33+
--drop_last y \
34+
--nbb_mult 10 \
35+
--phase train \
36+
--gathered n \
37+
--loss_balance y \
38+
--log_to_file n \
39+
--backbone ${BACKBONE} \
40+
--model_name ${MODEL_NAME} \
41+
--gpu 0 1 2 3 \
42+
--data_dir ${DATA_DIR} \
43+
--loss_type ${LOSS_TYPE} \
44+
--max_iters ${MAX_ITERS} \
45+
--checkpoints_name ${CHECKPOINTS_NAME} \
46+
--pretrained ${PRETRAINED_MODEL} \
47+
2>&1 | tee ${LOG_FILE}
48+
49+
50+
elif [ "$3"x == "resume"x ]; then
51+
${PYTHON} -u main.py --configs ${CONFIGS} \
52+
--drop_last y \
53+
--nbb_mult 10 \
54+
--phase train \
55+
--gathered n \
56+
--loss_balance y \
57+
--log_to_file n \
58+
--backbone ${BACKBONE} \
59+
--model_name ${MODEL_NAME} \
60+
--max_iters ${MAX_ITERS} \
61+
--data_dir ${DATA_DIR} \
62+
--loss_type ${LOSS_TYPE} \
63+
--gpu 0 1 2 3 \
64+
--resume_continue y \
65+
--resume ./checkpoints/coco_stuff/${CHECKPOINTS_NAME}_latest.pth \
66+
--checkpoints_name ${CHECKPOINTS_NAME} \
67+
2>&1 | tee -a ${LOG_FILE}
68+
69+
70+
elif [ "$3"x == "val"x ]; then
71+
${PYTHON} -u main.py --configs ${CONFIGS_TEST} \
72+
--data_dir ${DATA_DIR} \
73+
--backbone ${BACKBONE} \
74+
--model_name ${MODEL_NAME} \
75+
--checkpoints_name ${CHECKPOINTS_NAME} \
76+
--phase test \
77+
--gpu 0 1 2 3 \
78+
--resume ./checkpoints/coco_stuff/${CHECKPOINTS_NAME}_latest.pth \
79+
--test_dir ${DATA_DIR}/val/image \
80+
--log_to_file n \
81+
--out_dir ${SAVE_DIR}${CHECKPOINTS_NAME}_val_ms
82+
83+
cd lib/metrics
84+
${PYTHON} -u ade20k_evaluator.py --configs ../../${CONFIGS_TEST} \
85+
--pred_dir ${SAVE_DIR}${CHECKPOINTS_NAME}_val_ms/label \
86+
--gt_dir ${DATA_DIR}/val/label
87+
88+
89+
elif [ "$3"x == "test"x ]; then
90+
if [ "$5"x == "ss"x ]; then
91+
echo "[single scale] test"
92+
${PYTHON} -u main.py --configs ${CONFIGS} --drop_last y \
93+
--backbone ${BACKBONE} --model_name ${MODEL_NAME} --checkpoints_name ${CHECKPOINTS_NAME} \
94+
--phase test --gpu 0 1 2 3 --resume ./checkpoints/coco_stuff/${CHECKPOINTS_NAME}_latest.pth \
95+
--test_dir ${DATA_DIR}/test --log_to_file n \
96+
--out_dir ${SAVE_DIR}${CHECKPOINTS_NAME}_test_ss
97+
else
98+
echo "[multiple scale + flip] test"
99+
${PYTHON} -u main.py --configs ${CONFIGS_TEST} --drop_last y \
100+
--backbone ${BACKBONE} --model_name ${MODEL_NAME} --checkpoints_name ${CHECKPOINTS_NAME} \
101+
--phase test --gpu 0 1 2 3 --resume ./checkpoints/coco_stuff/${CHECKPOINTS_NAME}_latest.pth \
102+
--test_dir ${DATA_DIR}/test --log_to_file n \
103+
--out_dir ${SAVE_DIR}${CHECKPOINTS_NAME}_test_ms
104+
fi
105+
106+
107+
else
108+
echo "$3"x" is invalid..."
109+
fi
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,108 @@
1+
#!/usr/bin/env bash
2+
3+
# $1 code path
4+
# $2 dataset path
5+
# $3 train or test
6+
# $4 log_suffix
7+
8+
PYTHON="/opt/conda/bin/python"
9+
${PYTHON} -c "import torch; print(torch.__version__)"
10+
11+
${PYTHON} -m pip install yacs
12+
13+
export PYTHONPATH=$1:$PYTHONPATH
14+
15+
DATA_DIR="$2/face_parse/CelebAMask-HQ"
16+
SAVE_DIR="$2/seg_result/celeba/"
17+
BACKBONE="hrnet48"
18+
19+
CONFIGS="configs/celeba/H_48_D_4.json"
20+
CONFIGS_TEST="configs/celeba/H_48_D_4_TEST.json"
21+
22+
MODEL_NAME="hrnet_w48_ocr"
23+
LOSS_TYPE="fs_auxce_loss"
24+
CHECKPOINTS_NAME="${MODEL_NAME}_${BACKBONE}_"$4
25+
LOG_FILE="./log/celeba/${CHECKPOINTS_NAME}.log"
26+
echo "Logging to $LOG_FILE"
27+
mkdir -p `dirname $LOG_FILE`
28+
PRETRAINED_MODEL="./pretrained_model/hrnetv2_w48_imagenet_pretrained.pth"
29+
MAX_ITERS=150000
30+
31+
if [ "$3"x == "train"x ]; then
32+
${PYTHON} -u main.py --configs ${CONFIGS} \
33+
--base_lr 0.01 \
34+
--drop_last y \
35+
--phase train \
36+
--gathered n \
37+
--loss_balance y \
38+
--log_to_file n \
39+
--backbone ${BACKBONE} \
40+
--model_name ${MODEL_NAME} \
41+
--gpu 0 1 2 3 \
42+
--data_dir ${DATA_DIR} \
43+
--loss_type ${LOSS_TYPE} \
44+
--max_iters ${MAX_ITERS} \
45+
--checkpoints_name ${CHECKPOINTS_NAME} \
46+
--pretrained ${PRETRAINED_MODEL} \
47+
2>&1 | tee ${LOG_FILE}
48+
49+
50+
elif [ "$3"x == "resume"x ]; then
51+
${PYTHON} -u main.py --configs ${CONFIGS} \
52+
--drop_last y \
53+
--phase train \
54+
--gathered n \
55+
--loss_balance y \
56+
--log_to_file n \
57+
--backbone ${BACKBONE} \
58+
--model_name ${MODEL_NAME} \
59+
--max_iters ${MAX_ITERS} \
60+
--data_dir ${DATA_DIR} \
61+
--loss_type ${LOSS_TYPE} \
62+
--gpu 0 1 2 3 \
63+
--resume_continue y \
64+
--resume ./checkpoints/coco_stuff/${CHECKPOINTS_NAME}_latest.pth \
65+
--checkpoints_name ${CHECKPOINTS_NAME} \
66+
2>&1 | tee -a ${LOG_FILE}
67+
68+
69+
elif [ "$3"x == "val"x ]; then
70+
${PYTHON} -u main.py --configs ${CONFIGS_TEST} \
71+
--data_dir ${DATA_DIR} \
72+
--backbone ${BACKBONE} \
73+
--model_name ${MODEL_NAME} \
74+
--checkpoints_name ${CHECKPOINTS_NAME} \
75+
--phase test \
76+
--gpu 0 1 2 3 \
77+
--resume ./checkpoints/coco_stuff/${CHECKPOINTS_NAME}_latest.pth \
78+
--test_dir ${DATA_DIR}/val/image \
79+
--log_to_file n \
80+
--out_dir ${SAVE_DIR}${CHECKPOINTS_NAME}_val_ms
81+
82+
cd lib/metrics
83+
${PYTHON} -u ade20k_evaluator.py --configs ../../${CONFIGS_TEST} \
84+
--pred_dir ${SAVE_DIR}${CHECKPOINTS_NAME}_val_ms/label \
85+
--gt_dir ${DATA_DIR}/val/label
86+
87+
88+
elif [ "$3"x == "test"x ]; then
89+
if [ "$5"x == "ss"x ]; then
90+
echo "[single scale] test"
91+
${PYTHON} -u main.py --configs ${CONFIGS} --drop_last y \
92+
--backbone ${BACKBONE} --model_name ${MODEL_NAME} --checkpoints_name ${CHECKPOINTS_NAME} \
93+
--phase test --gpu 0 1 2 3 --resume ./checkpoints/coco_stuff/${CHECKPOINTS_NAME}_latest.pth \
94+
--test_dir ${DATA_DIR}/test --log_to_file n \
95+
--out_dir ${SAVE_DIR}${CHECKPOINTS_NAME}_test_ss
96+
else
97+
echo "[multiple scale + flip] test"
98+
${PYTHON} -u main.py --configs ${CONFIGS_TEST} --drop_last y \
99+
--backbone ${BACKBONE} --model_name ${MODEL_NAME} --checkpoints_name ${CHECKPOINTS_NAME} \
100+
--phase test --gpu 0 1 2 3 --resume ./checkpoints/coco_stuff/${CHECKPOINTS_NAME}_latest.pth \
101+
--test_dir ${DATA_DIR}/test --log_to_file n \
102+
--out_dir ${SAVE_DIR}${CHECKPOINTS_NAME}_test_ms
103+
fi
104+
105+
106+
else
107+
echo "$3"x" is invalid..."
108+
fi

0 commit comments

Comments
 (0)