Skip to content

Commit f6c32aa

Browse files
committed
[feat] Add timecheck
1 parent ce8f362 commit f6c32aa

7 files changed

+865
-4
lines changed

demo/demo.py

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -230,7 +230,8 @@ def main():
230230
box_model.eval()
231231

232232
pose_model = eval('models.'+cfg.MODEL.NAME+'.get_pose_net')(
233-
cfg, is_train=False
233+
cfg,
234+
is_train=False
234235
)
235236

236237
if cfg.TEST.MODEL_FILE:

demo/demo_without_detection.py

Lines changed: 227 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,227 @@
1+
from __future__ import absolute_import
2+
from __future__ import division
3+
from __future__ import print_function
4+
5+
import argparse
6+
import csv
7+
import os
8+
import shutil
9+
10+
from PIL import Image
11+
import torch
12+
import torch.nn.parallel
13+
import torch.backends.cudnn as cudnn
14+
import torch.optim
15+
import torch.utils.data
16+
import torch.utils.data.distributed
17+
import torchvision.transforms as transforms
18+
import torchvision
19+
import cv2
20+
import numpy as np
21+
import time
22+
23+
24+
import _init_paths
25+
import models
26+
from config import cfg
27+
from config import update_config
28+
from core.function import get_final_preds
29+
from utils.transforms import get_affine_transform
30+
31+
COCO_KEYPOINT_INDEXES = {
32+
0: 'nose',
33+
1: 'left_eye',
34+
2: 'right_eye',
35+
3: 'left_ear',
36+
4: 'right_ear',
37+
5: 'left_shoulder',
38+
6: 'right_shoulder',
39+
7: 'left_elbow',
40+
8: 'right_elbow',
41+
9: 'left_wrist',
42+
10: 'right_wrist',
43+
11: 'left_hip',
44+
12: 'right_hip',
45+
13: 'left_knee',
46+
14: 'right_knee',
47+
15: 'left_ankle',
48+
16: 'right_ankle'
49+
}
50+
51+
COCO_INSTANCE_CATEGORY_NAMES = [
52+
'__background__', 'person', 'bicycle', 'car', 'motorcycle', 'airplane', 'bus',
53+
'train', 'truck', 'boat', 'traffic light', 'fire hydrant', 'N/A', 'stop sign',
54+
'parking meter', 'bench', 'bird', 'cat', 'dog', 'horse', 'sheep', 'cow',
55+
'elephant', 'bear', 'zebra', 'giraffe', 'N/A', 'backpack', 'umbrella', 'N/A', 'N/A',
56+
'handbag', 'tie', 'suitcase', 'frisbee', 'skis', 'snowboard', 'sports ball',
57+
'kite', 'baseball bat', 'baseball glove', 'skateboard', 'surfboard', 'tennis racket',
58+
'bottle', 'N/A', 'wine glass', 'cup', 'fork', 'knife', 'spoon', 'bowl',
59+
'banana', 'apple', 'sandwich', 'orange', 'broccoli', 'carrot', 'hot dog', 'pizza',
60+
'donut', 'cake', 'chair', 'couch', 'potted plant', 'bed', 'N/A', 'dining table',
61+
'N/A', 'N/A', 'toilet', 'N/A', 'tv', 'laptop', 'mouse', 'remote', 'keyboard', 'cell phone',
62+
'microwave', 'oven', 'toaster', 'sink', 'refrigerator', 'N/A', 'book',
63+
'clock', 'vase', 'scissors', 'teddy bear', 'hair drier', 'toothbrush'
64+
]
65+
66+
SKELETON = [
67+
[1,3],[1,0],[2,4],[2,0],[0,5],[0,6],[5,7],[7,9],[6,8],[8,10],[5,11],[6,12],[11,12],[11,13],[13,15],[12,14],[14,16]
68+
]
69+
70+
CocoColors = [[255, 0, 0], [255, 85, 0], [255, 170, 0], [255, 255, 0], [170, 255, 0], [85, 255, 0], [0, 255, 0],
71+
[0, 255, 85], [0, 255, 170], [0, 255, 255], [0, 170, 255], [0, 85, 255], [0, 0, 255], [85, 0, 255],
72+
[170, 0, 255], [255, 0, 255], [255, 0, 170], [255, 0, 85]]
73+
74+
NUM_KPTS = 17
75+
76+
CTX = torch.device('cuda') if torch.cuda.is_available() else torch.device('cpu')
77+
78+
79+
def box_to_center_scale(box, model_image_width, model_image_height):
80+
"""convert a box to center,scale information required for pose transformation
81+
Parameters
82+
----------
83+
box : list of tuple
84+
list of length 2 with two tuples of floats representing
85+
bottom left and top right corner of a box
86+
model_image_width : int
87+
model_image_height : int
88+
89+
Returns
90+
-------
91+
(numpy array, numpy array)
92+
Two numpy arrays, coordinates for the center of the box and the scale of the box
93+
"""
94+
center = np.zeros((2), dtype=np.float32)
95+
96+
bottom_left_corner = box[0]
97+
top_right_corner = box[1]
98+
box_width = top_right_corner[0]-bottom_left_corner[0]
99+
box_height = top_right_corner[1]-bottom_left_corner[1]
100+
bottom_left_x = bottom_left_corner[0]
101+
bottom_left_y = bottom_left_corner[1]
102+
center[0] = bottom_left_x + box_width * 0.5
103+
center[1] = bottom_left_y + box_height * 0.5
104+
105+
aspect_ratio = model_image_width * 1.0 / model_image_height
106+
pixel_std = 200
107+
108+
if box_width > aspect_ratio * box_height:
109+
box_height = box_width * 1.0 / aspect_ratio
110+
elif box_width < aspect_ratio * box_height:
111+
box_width = box_height * aspect_ratio
112+
scale = np.array(
113+
[box_width * 1.0 / pixel_std, box_height * 1.0 / pixel_std],
114+
dtype=np.float32)
115+
if center[0] != -1:
116+
scale = scale * 1.25
117+
118+
return center, scale
119+
120+
121+
def get_pose_estimation_prediction(pose_model, image, center, scale):
122+
rotation = 0
123+
124+
# pose estimation transformation
125+
trans = get_affine_transform(center, scale, rotation, cfg.MODEL.IMAGE_SIZE)
126+
model_input = cv2.warpAffine(
127+
image,
128+
trans,
129+
(int(cfg.MODEL.IMAGE_SIZE[0]), int(cfg.MODEL.IMAGE_SIZE[1])),
130+
flags=cv2.INTER_LINEAR)
131+
transform = transforms.Compose([
132+
transforms.ToTensor(),
133+
transforms.Normalize(mean=[0.485, 0.456, 0.406],
134+
std=[0.229, 0.224, 0.225]),
135+
])
136+
137+
# pose estimation inference
138+
model_input = transform(model_input).unsqueeze(0)
139+
# switch to evaluate mode
140+
pose_model.eval()
141+
with torch.no_grad():
142+
# compute output heatmap
143+
output = pose_model(model_input)
144+
preds, _ = get_final_preds(
145+
cfg,
146+
output.clone().cpu().numpy(),
147+
np.asarray([center]),
148+
np.asarray([scale]))
149+
return preds
150+
151+
152+
def get_model(args):
153+
update_config(cfg, args)
154+
pose_model = eval('models.'+cfg.MODEL.NAME+'.get_pose_net')(cfg, is_train=False)
155+
156+
if cfg.TEST.MODEL_FILE:
157+
print('=> loading model from {}'.format(cfg.TEST.MODEL_FILE))
158+
pose_model.load_state_dict(torch.load(cfg.TEST.MODEL_FILE), strict=False)
159+
else:
160+
print('expected model defined in config at TEST.MODEL_FILE')
161+
162+
pose_model = torch.nn.DataParallel(pose_model, device_ids=cfg.GPUS)
163+
pose_model.to(CTX)
164+
pose_model.eval()
165+
return pose_model
166+
167+
168+
def draw_pose(keypoints,img):
169+
"""draw the keypoints and the skeletons.
170+
:params keypoints: the shape should be equal to [17,2]
171+
:params img:
172+
"""
173+
assert keypoints.shape == (NUM_KPTS,2)
174+
for i in range(len(SKELETON)):
175+
kpt_a, kpt_b = SKELETON[i][0], SKELETON[i][1]
176+
x_a, y_a = keypoints[kpt_a][0],keypoints[kpt_a][1]
177+
x_b, y_b = keypoints[kpt_b][0],keypoints[kpt_b][1]
178+
cv2.circle(img, (int(x_a), int(y_a)), 6, CocoColors[i], -1)
179+
cv2.circle(img, (int(x_b), int(y_b)), 6, CocoColors[i], -1)
180+
cv2.line(img, (int(x_a), int(y_a)), (int(x_b), int(y_b)), CocoColors[i], 2)
181+
182+
183+
184+
def inference(model, image):
185+
image_permuted = image[:, :, [2, 1, 0]]
186+
h, w, _ = image_permuted.shape
187+
box = [[0, 0],
188+
[w, h]]
189+
190+
center, scale = box_to_center_scale(box, cfg.MODEL.IMAGE_SIZE[0], cfg.MODEL.IMAGE_SIZE[1])
191+
keypoints = get_pose_estimation_prediction(model, image_permuted, center, scale)
192+
return keypoints
193+
194+
195+
def parse_args():
196+
parser = argparse.ArgumentParser(description='Without Detection Demo')
197+
parser.add_argument('--image', type=str, default='sunglassman.jpg')
198+
parser.add_argument('--cfg', type=str, default='demo/inference-config.yaml')
199+
parser.add_argument('opts',
200+
help='Modify config options using the command-line',
201+
default=None,
202+
nargs=argparse.REMAINDER)
203+
args = parser.parse_args()
204+
args.modelDir = ''
205+
args.logDir = ''
206+
args.dataDir = ''
207+
args.prevModelDir = ''
208+
return args
209+
210+
211+
if __name__ == '__main__':
212+
cudnn.benchmark = cfg.CUDNN.BENCHMARK
213+
torch.backends.cudnn.deterministic = cfg.CUDNN.DETERMINISTIC
214+
torch.backends.cudnn.enabled = cfg.CUDNN.ENABLED
215+
216+
args = parse_args()
217+
model_ = get_model(args)
218+
219+
image_ = cv2.imread(args.image)
220+
keypoints = inference(model_, image_)
221+
222+
if len(keypoints)>=1:
223+
for kpt in keypoints:
224+
draw_pose(kpt, image_)
225+
226+
cv2.imshow('demo', image_)
227+
cv2.waitKey(0)

0 commit comments

Comments
 (0)