Skip to content

Commit 37e5205

Browse files
authored
ppdetpose test ok (#5470)
* update pose model by ppdetpose in controlnet to improve performance * add model auto download
1 parent 229056b commit 37e5205

File tree

14 files changed

+4738
-1
lines changed

14 files changed

+4738
-1
lines changed
Lines changed: 101 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,101 @@
1+
# Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved.
2+
#
3+
# Licensed under the Apache License, Version 2.0 (the "License");
4+
# you may not use this file except in compliance with the License.
5+
# You may obtain a copy of the License at
6+
#
7+
# http://www.apache.org/licenses/LICENSE-2.0
8+
#
9+
# Unless required by applicable law or agreed to in writing, software
10+
# distributed under the License is distributed on an "AS IS" BASIS,
11+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12+
# See the License for the specific language governing permissions and
13+
# limitations under the License.
14+
15+
import cv2
16+
import numpy as np
17+
import paddle
18+
import paddlehub as hub
19+
20+
from . import util
21+
from .det_keypoint_unite_infer import PPDetPose
22+
23+
24+
def keypoint_to_openpose_kpts(coco_keypoints_list):
25+
# coco keypoints: [x1,y1,v1,...,xk,yk,vk] (k=17)
26+
# ['Nose', Leye', 'Reye', 'Lear', 'Rear', 'Lsho', 'Rsho', 'Lelb',
27+
# 'Relb', 'Lwri', 'Rwri', 'Lhip', 'Rhip', 'Lkne', 'Rkne', 'Lank', 'Rank']
28+
# openpose keypoints: [y1,...,yk], [x1,...xk] (k=18, with Neck)
29+
# ['Nose', *'Neck'*, 'Rsho', 'Relb', 'Rwri', 'Lsho', 'Lelb', 'Lwri','Rhip',
30+
# 'Rkne', 'Rank', 'Lhip', 'Lkne', 'Lank', 'Reye', 'Leye', 'Rear', 'Lear']
31+
indices = [0, 6, 8, 10, 5, 7, 9, 12, 14, 16, 11, 13, 15, 2, 1, 4, 3]
32+
openpose_kpts = []
33+
for i in indices:
34+
openpose_kpts.append(coco_keypoints_list[i])
35+
36+
# Get 'Neck' keypoint by interpolating between 'Lsho' and 'Rsho' keypoints
37+
l_shoulder_index = 5
38+
r_shoulder_index = 6
39+
l_shoulder_keypoint = coco_keypoints_list[l_shoulder_index]
40+
r_shoulder_keypoint = coco_keypoints_list[r_shoulder_index]
41+
42+
neck_keypoint_y = int((l_shoulder_keypoint[1] + r_shoulder_keypoint[1]) / 2.0)
43+
neck_keypoint_x = int((l_shoulder_keypoint[0] + r_shoulder_keypoint[0]) / 2.0)
44+
neck_keypoint = [neck_keypoint_x, neck_keypoint_y, min(l_shoulder_keypoint[2], r_shoulder_keypoint[2])]
45+
open_pose_neck_index = 1
46+
openpose_kpts.insert(open_pose_neck_index, neck_keypoint)
47+
48+
return openpose_kpts
49+
50+
51+
class PPDetDetector:
52+
def __init__(self):
53+
self.body_estimation = hub.Module(name="openpose_body_estimation")
54+
self.hand_estimation = hub.Module(name="openpose_hands_estimation")
55+
self.ppdetpose = PPDetPose()
56+
57+
def __call__(self, oriImg, detect_resolution=512, hand=False):
58+
with paddle.no_grad():
59+
img_scalarfactor = detect_resolution / min(oriImg.shape[:2])
60+
result = self.ppdetpose_pred(oriImg)
61+
result["candidate"] = result["candidate"] * img_scalarfactor
62+
oriImg = cv2.resize(oriImg, (0, 0), fx=img_scalarfactor, fy=img_scalarfactor)
63+
canvas = oriImg.copy()
64+
canvas.fill(0)
65+
canvas = self.body_estimation.draw_pose(canvas, result["candidate"], result["subset"])
66+
if hand:
67+
hands_list = util.hand_detect(result["candidate"], result["subset"], oriImg)
68+
all_hand_peaks = []
69+
for x, y, w, is_left in hands_list:
70+
scale_search = [x * img_scalarfactor for x in [0.5, 1.0, 1.5, 2.0]]
71+
peaks = self.hand_estimation.hand_estimation(
72+
oriImg[y : y + w, x : x + w, ::-1], scale_search=scale_search
73+
)
74+
peaks[:, 0] = np.where(peaks[:, 0] == 0, peaks[:, 0], peaks[:, 0] + x)
75+
peaks[:, 1] = np.where(peaks[:, 1] == 0, peaks[:, 1], peaks[:, 1] + y)
76+
all_hand_peaks.append(peaks)
77+
canvas = util.draw_handpose(canvas, all_hand_peaks)
78+
79+
return canvas, dict(candidate=result["candidate"].tolist(), subset=result["subset"].tolist())
80+
81+
def ppdetpose_pred(self, image, kpt_threshold=0.3):
82+
poseres = self.ppdetpose.ppdet_hrnet_infer(image)
83+
keypoints = poseres["keypoint"][0]
84+
num_kpts = len(keypoints)
85+
subset = np.ones((num_kpts, 20)) * -1
86+
candidate = np.zeros((0, 4))
87+
posnum = 0
88+
for kptid, keypoint in enumerate(keypoints):
89+
openpose_kpts = keypoint_to_openpose_kpts(keypoint)
90+
for idx, item in enumerate(openpose_kpts):
91+
if item[2] > kpt_threshold:
92+
subset[kptid][idx] = posnum
93+
kpt = np.array(
94+
item
95+
+ [
96+
posnum,
97+
]
98+
)
99+
candidate = np.vstack((candidate, kpt))
100+
posnum += 1
101+
return {"candidate": candidate, "subset": subset}
Lines changed: 273 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,273 @@
1+
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
2+
#
3+
# Licensed under the Apache License, Version 2.0 (the "License");
4+
# you may not use this file except in compliance with the License.
5+
# You may obtain a copy of the License at
6+
#
7+
# http://www.apache.org/licenses/LICENSE-2.0
8+
#
9+
# Unless required by applicable law or agreed to in writing, software
10+
# distributed under the License is distributed on an "AS IS" BASIS,
11+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12+
# See the License for the specific language governing permissions and
13+
# limitations under the License.
14+
15+
import logging
16+
import os
17+
from pathlib import Path
18+
19+
import paddle
20+
import paddle.inference as paddle_infer
21+
22+
CUR_DIR = os.path.dirname(os.path.abspath(__file__))
23+
LOG_PATH_ROOT = f"{CUR_DIR}/../../output"
24+
25+
26+
class PaddleInferBenchmark(object):
27+
def __init__(
28+
self,
29+
config,
30+
model_info: dict = {},
31+
data_info: dict = {},
32+
perf_info: dict = {},
33+
resource_info: dict = {},
34+
**kwargs
35+
):
36+
"""
37+
Construct PaddleInferBenchmark Class to format logs.
38+
args:
39+
config(paddle.inference.Config): paddle inference config
40+
model_info(dict): basic model info
41+
{'model_name': 'resnet50'
42+
'precision': 'fp32'}
43+
data_info(dict): input data info
44+
{'batch_size': 1
45+
'shape': '3,224,224'
46+
'data_num': 1000}
47+
perf_info(dict): performance result
48+
{'preprocess_time_s': 1.0
49+
'inference_time_s': 2.0
50+
'postprocess_time_s': 1.0
51+
'total_time_s': 4.0}
52+
resource_info(dict):
53+
cpu and gpu resources
54+
{'cpu_rss': 100
55+
'gpu_rss': 100
56+
'gpu_util': 60}
57+
"""
58+
# PaddleInferBenchmark Log Version
59+
self.log_version = "1.0.3"
60+
61+
# Paddle Version
62+
self.paddle_version = paddle.__version__
63+
self.paddle_commit = paddle.__git_commit__
64+
paddle_infer_info = paddle_infer.get_version()
65+
self.paddle_branch = paddle_infer_info.strip().split(": ")[-1]
66+
67+
# model info
68+
self.model_info = model_info
69+
70+
# data info
71+
self.data_info = data_info
72+
73+
# perf info
74+
self.perf_info = perf_info
75+
76+
try:
77+
# required value
78+
self.model_name = model_info["model_name"]
79+
self.precision = model_info["precision"]
80+
81+
self.batch_size = data_info["batch_size"]
82+
self.shape = data_info["shape"]
83+
self.data_num = data_info["data_num"]
84+
85+
self.inference_time_s = round(perf_info["inference_time_s"], 4)
86+
except:
87+
self.print_help()
88+
raise ValueError("Set argument wrong, please check input argument and its type")
89+
90+
self.preprocess_time_s = perf_info.get("preprocess_time_s", 0)
91+
self.postprocess_time_s = perf_info.get("postprocess_time_s", 0)
92+
self.with_tracker = True if "tracking_time_s" in perf_info else False
93+
self.tracking_time_s = perf_info.get("tracking_time_s", 0)
94+
self.total_time_s = perf_info.get("total_time_s", 0)
95+
96+
self.inference_time_s_90 = perf_info.get("inference_time_s_90", "")
97+
self.inference_time_s_99 = perf_info.get("inference_time_s_99", "")
98+
self.succ_rate = perf_info.get("succ_rate", "")
99+
self.qps = perf_info.get("qps", "")
100+
101+
# conf info
102+
self.config_status = self.parse_config(config)
103+
104+
# mem info
105+
if isinstance(resource_info, dict):
106+
self.cpu_rss_mb = int(resource_info.get("cpu_rss_mb", 0))
107+
self.cpu_vms_mb = int(resource_info.get("cpu_vms_mb", 0))
108+
self.cpu_shared_mb = int(resource_info.get("cpu_shared_mb", 0))
109+
self.cpu_dirty_mb = int(resource_info.get("cpu_dirty_mb", 0))
110+
self.cpu_util = round(resource_info.get("cpu_util", 0), 2)
111+
112+
self.gpu_rss_mb = int(resource_info.get("gpu_rss_mb", 0))
113+
self.gpu_util = round(resource_info.get("gpu_util", 0), 2)
114+
self.gpu_mem_util = round(resource_info.get("gpu_mem_util", 0), 2)
115+
else:
116+
self.cpu_rss_mb = 0
117+
self.cpu_vms_mb = 0
118+
self.cpu_shared_mb = 0
119+
self.cpu_dirty_mb = 0
120+
self.cpu_util = 0
121+
122+
self.gpu_rss_mb = 0
123+
self.gpu_util = 0
124+
self.gpu_mem_util = 0
125+
126+
# init benchmark logger
127+
self.benchmark_logger()
128+
129+
def benchmark_logger(self):
130+
"""
131+
benchmark logger
132+
"""
133+
# remove other logging handler
134+
for handler in logging.root.handlers[:]:
135+
logging.root.removeHandler(handler)
136+
137+
# Init logger
138+
FORMAT = "%(asctime)s - %(name)s - %(levelname)s - %(message)s"
139+
log_output = f"{LOG_PATH_ROOT}/{self.model_name}.log"
140+
Path(f"{LOG_PATH_ROOT}").mkdir(parents=True, exist_ok=True)
141+
logging.basicConfig(
142+
level=logging.INFO,
143+
format=FORMAT,
144+
handlers=[
145+
logging.FileHandler(filename=log_output, mode="w"),
146+
logging.StreamHandler(),
147+
],
148+
)
149+
self.logger = logging.getLogger(__name__)
150+
self.logger.info(f"Paddle Inference benchmark log will be saved to {log_output}")
151+
152+
def parse_config(self, config) -> dict:
153+
"""
154+
parse paddle predictor config
155+
args:
156+
config(paddle.inference.Config): paddle inference config
157+
return:
158+
config_status(dict): dict style config info
159+
"""
160+
if isinstance(config, paddle_infer.Config):
161+
config_status = {}
162+
config_status["runtime_device"] = "gpu" if config.use_gpu() else "cpu"
163+
config_status["ir_optim"] = config.ir_optim()
164+
config_status["enable_tensorrt"] = config.tensorrt_engine_enabled()
165+
config_status["precision"] = self.precision
166+
config_status["enable_mkldnn"] = config.mkldnn_enabled()
167+
config_status["cpu_math_library_num_threads"] = config.cpu_math_library_num_threads()
168+
elif isinstance(config, dict):
169+
config_status["runtime_device"] = config.get("runtime_device", "")
170+
config_status["ir_optim"] = config.get("ir_optim", "")
171+
config_status["enable_tensorrt"] = config.get("enable_tensorrt", "")
172+
config_status["precision"] = config.get("precision", "")
173+
config_status["enable_mkldnn"] = config.get("enable_mkldnn", "")
174+
config_status["cpu_math_library_num_threads"] = config.get("cpu_math_library_num_threads", "")
175+
else:
176+
self.print_help()
177+
raise ValueError("Set argument config wrong, please check input argument and its type")
178+
return config_status
179+
180+
def report(self, identifier=None):
181+
"""
182+
print log report
183+
args:
184+
identifier(string): identify log
185+
"""
186+
if identifier:
187+
identifier = f"[{identifier}]"
188+
else:
189+
identifier = ""
190+
191+
self.logger.info("\n")
192+
self.logger.info("---------------------- Paddle info ----------------------")
193+
self.logger.info(f"{identifier} paddle_version: {self.paddle_version}")
194+
self.logger.info(f"{identifier} paddle_commit: {self.paddle_commit}")
195+
self.logger.info(f"{identifier} paddle_branch: {self.paddle_branch}")
196+
self.logger.info(f"{identifier} log_api_version: {self.log_version}")
197+
self.logger.info("----------------------- Conf info -----------------------")
198+
self.logger.info(f"{identifier} runtime_device: {self.config_status['runtime_device']}")
199+
self.logger.info(f"{identifier} ir_optim: {self.config_status['ir_optim']}")
200+
self.logger.info(f"{identifier} enable_memory_optim: {True}")
201+
self.logger.info(f"{identifier} enable_tensorrt: {self.config_status['enable_tensorrt']}")
202+
self.logger.info(f"{identifier} enable_mkldnn: {self.config_status['enable_mkldnn']}")
203+
self.logger.info(
204+
f"{identifier} cpu_math_library_num_threads: {self.config_status['cpu_math_library_num_threads']}"
205+
)
206+
self.logger.info("----------------------- Model info ----------------------")
207+
self.logger.info(f"{identifier} model_name: {self.model_name}")
208+
self.logger.info(f"{identifier} precision: {self.precision}")
209+
self.logger.info("----------------------- Data info -----------------------")
210+
self.logger.info(f"{identifier} batch_size: {self.batch_size}")
211+
self.logger.info(f"{identifier} input_shape: {self.shape}")
212+
self.logger.info(f"{identifier} data_num: {self.data_num}")
213+
self.logger.info("----------------------- Perf info -----------------------")
214+
self.logger.info(
215+
f"{identifier} cpu_rss(MB): {self.cpu_rss_mb}, cpu_vms: {self.cpu_vms_mb}, cpu_shared_mb: {self.cpu_shared_mb}, cpu_dirty_mb: {self.cpu_dirty_mb}, cpu_util: {self.cpu_util}%"
216+
)
217+
self.logger.info(
218+
f"{identifier} gpu_rss(MB): {self.gpu_rss_mb}, gpu_util: {self.gpu_util}%, gpu_mem_util: {self.gpu_mem_util}%"
219+
)
220+
self.logger.info(f"{identifier} total time spent(s): {self.total_time_s}")
221+
222+
if self.with_tracker:
223+
self.logger.info(
224+
f"{identifier} preprocess_time(ms): {round(self.preprocess_time_s*1000, 1)}, "
225+
f"inference_time(ms): {round(self.inference_time_s*1000, 1)}, "
226+
f"postprocess_time(ms): {round(self.postprocess_time_s*1000, 1)}, "
227+
f"tracking_time(ms): {round(self.tracking_time_s*1000, 1)}"
228+
)
229+
else:
230+
self.logger.info(
231+
f"{identifier} preprocess_time(ms): {round(self.preprocess_time_s*1000, 1)}, "
232+
f"inference_time(ms): {round(self.inference_time_s*1000, 1)}, "
233+
f"postprocess_time(ms): {round(self.postprocess_time_s*1000, 1)}"
234+
)
235+
if self.inference_time_s_90:
236+
self.looger.info(
237+
f"{identifier} 90%_cost: {self.inference_time_s_90}, 99%_cost: {self.inference_time_s_99}, succ_rate: {self.succ_rate}"
238+
)
239+
if self.qps:
240+
self.logger.info(f"{identifier} QPS: {self.qps}")
241+
242+
def print_help(self):
243+
"""
244+
print function help
245+
"""
246+
print(
247+
"""Usage:
248+
==== Print inference benchmark logs. ====
249+
config = paddle.inference.Config()
250+
model_info = {'model_name': 'resnet50'
251+
'precision': 'fp32'}
252+
data_info = {'batch_size': 1
253+
'shape': '3,224,224'
254+
'data_num': 1000}
255+
perf_info = {'preprocess_time_s': 1.0
256+
'inference_time_s': 2.0
257+
'postprocess_time_s': 1.0
258+
'total_time_s': 4.0}
259+
resource_info = {'cpu_rss_mb': 100
260+
'gpu_rss_mb': 100
261+
'gpu_util': 60}
262+
log = PaddleInferBenchmark(config, model_info, data_info, perf_info, resource_info)
263+
log('Test')
264+
"""
265+
)
266+
267+
def __call__(self, identifier=None):
268+
"""
269+
__call__
270+
args:
271+
identifier(string): identify log
272+
"""
273+
self.report(identifier)

0 commit comments

Comments
 (0)