-
Notifications
You must be signed in to change notification settings - Fork 1
Expand file tree
/
Copy pathtrain_vae.py
More file actions
225 lines (184 loc) · 8.55 KB
/
train_vae.py
File metadata and controls
225 lines (184 loc) · 8.55 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
import argparse
import wandb
import pkg_resources
import os
import torch.backends.cudnn as cudnn
import torch
import numpy as np
from torch import optim
from tqdm import tqdm
import wandb
from model.smpl.SMPL import SMPL_layer
from utils.tools import set_random_seed, get_config, print_args, create_directory_if_not_exists
from utils.learning import load_data, load_model, save_model, AverageMeter
import utils.misc as misc
from loss.vae_loss import VAETwistLoss
import configs.constants as _C
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('-sd', '--seed', default=0,
type=int, help='random seed')
parser.add_argument(
"--config", type=str, default="configs/vae_smpl_concat_twist.yaml", help="Path to the config file.")
parser.add_argument('--resume', action='store_true')
parser.add_argument('-c', '--checkpoint', type=str, metavar='PATH', default='checkpoint',
help='checkpoint directory')
parser.add_argument('--num-cpus', default=16, type=int,
help='Number of CPU cores')
parser.add_argument('--use-wandb', action='store_true')
parser.add_argument('--wandb-name', default=None, type=str)
parser.add_argument('--device', default='cuda',
help='device to use for training / testing')
# distributed training parameters
parser.add_argument('--world_size', default=1, type=int,
help='number of distributed processes')
parser.add_argument('--local_rank', default=-1, type=int)
parser.add_argument('--dist_on_itp', action='store_true')
parser.add_argument('--dist_url', default='env://',
help='url used to set up distributed training')
opts = parser.parse_args()
return opts
def train_one_epoch(args, model, dataloader, optimizer, criterion, device):
model.train()
loss_meters = {
'joint_loss': AverageMeter(),
'joint_velocity_loss': AverageMeter(),
'twist_loss': AverageMeter(),
'kl_loss': AverageMeter(),
'total_loss': AverageMeter()
}
if args.concat:
dataloader.dataset.amass_only = False
for pose, _, additional_info in tqdm(dataloader):
pose = pose.float().to(device, non_blocking=True)
batch_size = pose.shape[0]
reconstructed_pose, dist = model(pose) # (B, T, J*6)
optimizer.zero_grad()
loss_values = criterion(reconstructed_pose, pose, dist)
total_loss = 0
for key in loss_values:
loss_meters[key].update(loss_values[key].item(), batch_size)
coeff = args.lambda_kl if key == 'kl_loss' else 1.0
total_loss += coeff * loss_values[key]
loss_meters['total_loss'].update(total_loss.item(), batch_size)
total_loss.backward()
optimizer.step()
return {f'loss/{key}': val.avg for key, val in loss_meters.items()}
def evaluate(args, model, dataloader, body_model, device):
model.eval()
if args.concat:
dataloader.dataset.amass_only = True
else:
dataloader.dataset.augment = False
with torch.no_grad():
joints_meter, vertices_meter = AverageMeter(), AverageMeter()
for pose, beta, additional_info in tqdm(dataloader):
pose = pose.float().to(device, non_blocking=True)
beta = beta.float().to(device, non_blocking=True)
reconstructed_pose, _ = model(pose)
frame_batch = args.batch_size * args.temporal_length
out_gt = body_model.hybrik(pose_skeleton=pose[:, :, :29*3].reshape(-1, 29, 3),
phis=pose[:, :, 29*3:].reshape(-1, 23, 2),
betas=beta.reshape(frame_batch, -1), global_orient=None)
out_gts_vertices = out_gt.vertices.reshape(args.batch_size, args.temporal_length, -1, 3)
out_gts_joints = out_gt.joints_from_verts.reshape(args.batch_size, args.temporal_length, -1, 3)
out_pred = body_model.hybrik(pose_skeleton=reconstructed_pose[:, :, :29*3].reshape(-1, 29, 3),
phis=reconstructed_pose[:, :, 29*3:].reshape(-1, 23, 2),
betas=beta.reshape(frame_batch, -1), global_orient=None)
out_pred_vertices = out_pred.vertices.reshape(args.batch_size, args.temporal_length, -1, 3)
out_pred_joints = out_pred.joints_from_verts.reshape(args.batch_size, args.temporal_length, -1, 3)
mpjpe = torch.mean(torch.norm(
out_pred_joints - out_gts_joints, dim=-1)) * 1000
mve = torch.mean(torch.norm(out_pred_vertices -
out_gts_vertices, dim=-1)) * 1000
joints_meter.update(mpjpe, args.batch_size)
vertices_meter.update(mve, args.batch_size)
return {
'eval/MPJPE': joints_meter.avg,
'eval/MVE': vertices_meter.avg
}
def train(args, opts):
print_args(args)
if misc.is_main_process():
create_directory_if_not_exists(opts.checkpoint)
device = torch.device(opts.device)
dataloader = load_data(args, opts, device)
model, model_without_ddp = load_model(args, opts, device, type='hybrik')
optimizer = optim.AdamW(filter(lambda p: p.requires_grad, model_without_ddp.parameters()),
lr=args.lr)
criterion = VAETwistLoss().to(device)
last_ckpt_path = os.path.join(opts.checkpoint, 'last_ckpt.pth.tr')
resume = os.path.exists(last_ckpt_path)
if resume:
print('[INFO] Resuming from last checkpoint')
checkpoint = torch.load(
last_ckpt_path, map_location=lambda storage, loc: storage)
model_without_ddp.load_state_dict(checkpoint['model'], strict=True)
optimizer.load_state_dict(checkpoint['optimizer'])
start_epoch = checkpoint['epoch'] + 1
min_error = checkpoint['min_error']
wandb_id = checkpoint['wandb_id']
else:
start_epoch = 0
wandb_id = wandb.util.generate_id
min_error = float('inf')
if opts.use_wandb and misc.is_main_process():
if resume:
wandb.init(id=wandb_id,
project='VideoDiffHMR',
resume='must',
settings=wandb.Settings(start_method='fork'))
else:
wandb.init(name=opts.wandb_name,
project='VideoDiffHMR',
settings=wandb.Settings(start_method='fork'))
wandb.config.update({"run_id": wandb_id})
wandb.config.update(args)
installed_packages = {
d.project_name: d.version for d in pkg_resources.working_set}
wandb.config.update({'installed_packages': installed_packages})
wandb_id = wandb.run.id
if args.evaluate_freq > 0:
J_regressor_h36m = torch.from_numpy(
np.load(_C.BMODEL.JOINTS_REGRESSOR_H36M)
)[_C.KEYPOINTS.H36M_TO_J14, :].float()
body_model = SMPL_layer(model_path=os.path.join(_C.BMODEL.SMPL, 'SMPL_NEUTRAL.pkl'), h36m_jregressor=J_regressor_h36m).cuda()
else:
body_model = None
print(f'[INFO] Starting from epoch {start_epoch}')
for epoch in range(start_epoch, args.epochs):
print(f"[INFO] Epoch {epoch}")
if opts.distributed:
dataloader.sampler.set_epoch(epoch)
loss_values = train_one_epoch(
args, model, dataloader, optimizer, criterion, device)
if args.evaluate_freq > 0 and epoch % args.evaluate_freq == 0:
print("[INFO] Evaluation")
eval_result = evaluate(
args, model, dataloader, body_model, device)
else:
eval_result = {}
if loss_values['loss/joint_loss'] < min_error:
min_error = loss_values['loss/joint_loss']
if misc.is_main_process():
log_dict = {
'loss/min_reconstruction': min_error,
**loss_values,
**eval_result
}
if opts.use_wandb:
wandb.log(log_dict, step=epoch + 1)
for key, value in log_dict.items():
print(f"[INFO] {key}: {value}")
if epoch % args.save_freq == 0:
save_model(model_without_ddp, optimizer, min_error,
epoch, wandb_id, last_ckpt_path)
def main():
opts = parse_args()
misc.init_distributed_mode(opts)
set_random_seed(opts.seed + misc.get_rank())
cudnn.benchmark = True
args = get_config(opts.config)
train(args, opts)
if __name__ == "__main__":
main()