-
Notifications
You must be signed in to change notification settings - Fork 177
/
ViTPose_large_interhand2d_all_256x192.py
162 lines (154 loc) · 4.71 KB
/
ViTPose_large_interhand2d_all_256x192.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
_base_ = [
'../../../../_base_/default_runtime.py',
'../../../../_base_/datasets/interhand2d.py'
]
checkpoint_config = dict(interval=5)
evaluation = dict(interval=5, metric=['PCK', 'AUC', 'EPE'], save_best='AUC')
optimizer = dict(
type='Adam',
lr=5e-4,
)
optimizer_config = dict(grad_clip=None)
# learning policy
lr_config = dict(
policy='step',
warmup='linear',
warmup_iters=500,
warmup_ratio=0.001,
step=[40, 50])
total_epochs = 60
log_config = dict(
interval=20,
hooks=[
dict(type='TextLoggerHook'),
# dict(type='TensorboardLoggerHook')
])
channel_cfg = dict(
num_output_channels=21,
dataset_joints=21,
dataset_channel=[
[
0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18,
19, 20
],
],
inference_channel=[
0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19,
20
])
# model settings
model = dict(
type='TopDown',
pretrained=None,
backbone=dict(
type='ViT',
img_size=(256, 192),
patch_size=16,
embed_dim=1024,
depth=24,
num_heads=16,
ratio=1,
use_checkpoint=False,
mlp_ratio=4,
qkv_bias=True,
drop_path_rate=0.3,
),
keypoint_head=dict(
type='TopdownHeatmapSimpleHead',
in_channels=1024,
num_deconv_layers=2,
num_deconv_filters=(256, 256),
num_deconv_kernels=(4, 4),
extra=dict(final_conv_kernel=1, ),
out_channels=channel_cfg['num_output_channels'],
loss_keypoint=dict(type='JointsMSELoss', use_target_weight=True)),
train_cfg=dict(),
test_cfg=dict(
flip_test=True,
post_process='default',
shift_heatmap=True,
modulate_kernel=11))
data_cfg = dict(
image_size=[192, 256],
heatmap_size=[48, 64],
num_output_channels=channel_cfg['num_output_channels'],
num_joints=channel_cfg['dataset_joints'],
dataset_channel=channel_cfg['dataset_channel'],
inference_channel=channel_cfg['inference_channel'])
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='TopDownRandomFlip', flip_prob=0.5),
dict(
type='TopDownGetRandomScaleRotation', rot_factor=90, scale_factor=0.3),
dict(type='TopDownAffine'),
dict(type='ToTensor'),
dict(
type='NormalizeTensor',
mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225]),
dict(type='TopDownGenerateTarget', sigma=2),
dict(
type='Collect',
keys=['img', 'target', 'target_weight'],
meta_keys=[
'image_file', 'joints_3d', 'joints_3d_visible', 'center', 'scale',
'rotation', 'flip_pairs'
]),
]
val_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='TopDownAffine'),
dict(type='ToTensor'),
dict(
type='NormalizeTensor',
mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225]),
dict(
type='Collect',
keys=['img'],
meta_keys=['image_file', 'center', 'scale', 'rotation', 'flip_pairs']),
]
test_pipeline = val_pipeline
data_root = 'data/interhand2.6m'
data = dict(
samples_per_gpu=64,
workers_per_gpu=2,
val_dataloader=dict(samples_per_gpu=32),
test_dataloader=dict(samples_per_gpu=32),
train=dict(
type='InterHand2DDataset',
ann_file=f'{data_root}/annotations/all/'
'InterHand2.6M_train_data.json',
camera_file=f'{data_root}/annotations/all/'
'InterHand2.6M_train_camera.json',
joint_file=f'{data_root}/annotations/all/'
'InterHand2.6M_train_joint_3d.json',
img_prefix=f'{data_root}/images/train/',
data_cfg=data_cfg,
pipeline=train_pipeline,
dataset_info={{_base_.dataset_info}}),
val=dict(
type='InterHand2DDataset',
ann_file=f'{data_root}/annotations/machine_annot/'
'InterHand2.6M_val_data.json',
camera_file=f'{data_root}/annotations/machine_annot/'
'InterHand2.6M_val_camera.json',
joint_file=f'{data_root}/annotations/machine_annot/'
'InterHand2.6M_val_joint_3d.json',
img_prefix=f'{data_root}/images/val/',
data_cfg=data_cfg,
pipeline=val_pipeline,
dataset_info={{_base_.dataset_info}}),
test=dict(
type='InterHand2DDataset',
ann_file=f'{data_root}/annotations/all/'
'InterHand2.6M_test_data.json',
camera_file=f'{data_root}/annotations/all/'
'InterHand2.6M_test_camera.json',
joint_file=f'{data_root}/annotations/all/'
'InterHand2.6M_test_joint_3d.json',
img_prefix=f'{data_root}/images/test/',
data_cfg=data_cfg,
pipeline=test_pipeline,
dataset_info={{_base_.dataset_info}}),
)