-
Notifications
You must be signed in to change notification settings - Fork 13
Expand file tree
/
Copy pathutils.py
More file actions
57 lines (48 loc) · 1.8 KB
/
utils.py
File metadata and controls
57 lines (48 loc) · 1.8 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
import os
import torch
import numpy as np
from utils.options import Options
def load_checkpoint(path, model, optimizer=None, scheduler=None, device="cpu"):
if path:
checkpoint = torch.load(path, map_location=device)
model.load_state_dict(checkpoint["model"])
if optimizer:
optimizer.load_state_dict(checkpoint["optimizer"])
if scheduler:
scheduler.load_state_dict(checkpoint["scheduler"])
epoch = checkpoint["epoch"]
else:
epoch = 0
if optimizer:
if scheduler:
return model, epoch, optimizer, scheduler
else:
return model, epoch, optimizer
else:
return model, epoch
def save_checkpoint(model, optimizer, scheduler, epoch, opt: Options):
torch.save({
'model': model.state_dict(),
'optimizer': optimizer.state_dict(),
'scheduler': scheduler.state_dict(),
'epoch': epoch,
}, os.path.join(opt.checkpoints_dir, f"{opt.experiment_name}_{epoch}.pth"))
def profile(model, inputs, repeats=1000):
# Reference for counting flops: http://www.bnikolic.co.uk/blog/python/flops/2019/10/01/pytorch-count-flops.html
from pypapi import papi_high
from pypapi import events as papi_events
papi_high.start_counters([
papi_events.PAPI_SP_OPS,
])
model.forward(*inputs)
flops = papi_high.stop_counters()[0] / 1000000.0
from time import perf_counter
times = []
for _ in range(repeats):
t = perf_counter()
model.forward(*inputs)
times.append(perf_counter() - t)
params = sum(p.numel() for p in model.parameters()) / 1000000.0
times = np.array(times) * 1000
return {"params(M)": params, "flops(M)": flops,
"inf_time_mean(ms)": np.mean(times), "inf_time_std(ms)": np.std(times)}