|
| 1 | +# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. |
| 2 | +# |
| 3 | +# Licensed under the Apache License, Version 2.0 (the "License"); |
| 4 | +# you may not use this file except in compliance with the License. |
| 5 | +# You may obtain a copy of the License at |
| 6 | +# |
| 7 | +# http://www.apache.org/licenses/LICENSE-2.0 |
| 8 | +# |
| 9 | +# Unless required by applicable law or agreed to in writing, software |
| 10 | +# distributed under the License is distributed on an "AS IS" BASIS, |
| 11 | +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
| 12 | +# See the License for the specific language governing permissions and |
| 13 | +# limitations under the License. |
| 14 | + |
| 15 | +import logging |
| 16 | +import os |
| 17 | +from pathlib import Path |
| 18 | + |
| 19 | +import paddle |
| 20 | +import paddle.inference as paddle_infer |
| 21 | + |
| 22 | +CUR_DIR = os.path.dirname(os.path.abspath(__file__)) |
| 23 | +LOG_PATH_ROOT = f"{CUR_DIR}/../../output" |
| 24 | + |
| 25 | + |
| 26 | +class PaddleInferBenchmark(object): |
| 27 | + def __init__( |
| 28 | + self, |
| 29 | + config, |
| 30 | + model_info: dict = {}, |
| 31 | + data_info: dict = {}, |
| 32 | + perf_info: dict = {}, |
| 33 | + resource_info: dict = {}, |
| 34 | + **kwargs |
| 35 | + ): |
| 36 | + """ |
| 37 | + Construct PaddleInferBenchmark Class to format logs. |
| 38 | + args: |
| 39 | + config(paddle.inference.Config): paddle inference config |
| 40 | + model_info(dict): basic model info |
| 41 | + {'model_name': 'resnet50' |
| 42 | + 'precision': 'fp32'} |
| 43 | + data_info(dict): input data info |
| 44 | + {'batch_size': 1 |
| 45 | + 'shape': '3,224,224' |
| 46 | + 'data_num': 1000} |
| 47 | + perf_info(dict): performance result |
| 48 | + {'preprocess_time_s': 1.0 |
| 49 | + 'inference_time_s': 2.0 |
| 50 | + 'postprocess_time_s': 1.0 |
| 51 | + 'total_time_s': 4.0} |
| 52 | + resource_info(dict): |
| 53 | + cpu and gpu resources |
| 54 | + {'cpu_rss': 100 |
| 55 | + 'gpu_rss': 100 |
| 56 | + 'gpu_util': 60} |
| 57 | + """ |
| 58 | + # PaddleInferBenchmark Log Version |
| 59 | + self.log_version = "1.0.3" |
| 60 | + |
| 61 | + # Paddle Version |
| 62 | + self.paddle_version = paddle.__version__ |
| 63 | + self.paddle_commit = paddle.__git_commit__ |
| 64 | + paddle_infer_info = paddle_infer.get_version() |
| 65 | + self.paddle_branch = paddle_infer_info.strip().split(": ")[-1] |
| 66 | + |
| 67 | + # model info |
| 68 | + self.model_info = model_info |
| 69 | + |
| 70 | + # data info |
| 71 | + self.data_info = data_info |
| 72 | + |
| 73 | + # perf info |
| 74 | + self.perf_info = perf_info |
| 75 | + |
| 76 | + try: |
| 77 | + # required value |
| 78 | + self.model_name = model_info["model_name"] |
| 79 | + self.precision = model_info["precision"] |
| 80 | + |
| 81 | + self.batch_size = data_info["batch_size"] |
| 82 | + self.shape = data_info["shape"] |
| 83 | + self.data_num = data_info["data_num"] |
| 84 | + |
| 85 | + self.inference_time_s = round(perf_info["inference_time_s"], 4) |
| 86 | + except: |
| 87 | + self.print_help() |
| 88 | + raise ValueError("Set argument wrong, please check input argument and its type") |
| 89 | + |
| 90 | + self.preprocess_time_s = perf_info.get("preprocess_time_s", 0) |
| 91 | + self.postprocess_time_s = perf_info.get("postprocess_time_s", 0) |
| 92 | + self.with_tracker = True if "tracking_time_s" in perf_info else False |
| 93 | + self.tracking_time_s = perf_info.get("tracking_time_s", 0) |
| 94 | + self.total_time_s = perf_info.get("total_time_s", 0) |
| 95 | + |
| 96 | + self.inference_time_s_90 = perf_info.get("inference_time_s_90", "") |
| 97 | + self.inference_time_s_99 = perf_info.get("inference_time_s_99", "") |
| 98 | + self.succ_rate = perf_info.get("succ_rate", "") |
| 99 | + self.qps = perf_info.get("qps", "") |
| 100 | + |
| 101 | + # conf info |
| 102 | + self.config_status = self.parse_config(config) |
| 103 | + |
| 104 | + # mem info |
| 105 | + if isinstance(resource_info, dict): |
| 106 | + self.cpu_rss_mb = int(resource_info.get("cpu_rss_mb", 0)) |
| 107 | + self.cpu_vms_mb = int(resource_info.get("cpu_vms_mb", 0)) |
| 108 | + self.cpu_shared_mb = int(resource_info.get("cpu_shared_mb", 0)) |
| 109 | + self.cpu_dirty_mb = int(resource_info.get("cpu_dirty_mb", 0)) |
| 110 | + self.cpu_util = round(resource_info.get("cpu_util", 0), 2) |
| 111 | + |
| 112 | + self.gpu_rss_mb = int(resource_info.get("gpu_rss_mb", 0)) |
| 113 | + self.gpu_util = round(resource_info.get("gpu_util", 0), 2) |
| 114 | + self.gpu_mem_util = round(resource_info.get("gpu_mem_util", 0), 2) |
| 115 | + else: |
| 116 | + self.cpu_rss_mb = 0 |
| 117 | + self.cpu_vms_mb = 0 |
| 118 | + self.cpu_shared_mb = 0 |
| 119 | + self.cpu_dirty_mb = 0 |
| 120 | + self.cpu_util = 0 |
| 121 | + |
| 122 | + self.gpu_rss_mb = 0 |
| 123 | + self.gpu_util = 0 |
| 124 | + self.gpu_mem_util = 0 |
| 125 | + |
| 126 | + # init benchmark logger |
| 127 | + self.benchmark_logger() |
| 128 | + |
| 129 | + def benchmark_logger(self): |
| 130 | + """ |
| 131 | + benchmark logger |
| 132 | + """ |
| 133 | + # remove other logging handler |
| 134 | + for handler in logging.root.handlers[:]: |
| 135 | + logging.root.removeHandler(handler) |
| 136 | + |
| 137 | + # Init logger |
| 138 | + FORMAT = "%(asctime)s - %(name)s - %(levelname)s - %(message)s" |
| 139 | + log_output = f"{LOG_PATH_ROOT}/{self.model_name}.log" |
| 140 | + Path(f"{LOG_PATH_ROOT}").mkdir(parents=True, exist_ok=True) |
| 141 | + logging.basicConfig( |
| 142 | + level=logging.INFO, |
| 143 | + format=FORMAT, |
| 144 | + handlers=[ |
| 145 | + logging.FileHandler(filename=log_output, mode="w"), |
| 146 | + logging.StreamHandler(), |
| 147 | + ], |
| 148 | + ) |
| 149 | + self.logger = logging.getLogger(__name__) |
| 150 | + self.logger.info(f"Paddle Inference benchmark log will be saved to {log_output}") |
| 151 | + |
| 152 | + def parse_config(self, config) -> dict: |
| 153 | + """ |
| 154 | + parse paddle predictor config |
| 155 | + args: |
| 156 | + config(paddle.inference.Config): paddle inference config |
| 157 | + return: |
| 158 | + config_status(dict): dict style config info |
| 159 | + """ |
| 160 | + if isinstance(config, paddle_infer.Config): |
| 161 | + config_status = {} |
| 162 | + config_status["runtime_device"] = "gpu" if config.use_gpu() else "cpu" |
| 163 | + config_status["ir_optim"] = config.ir_optim() |
| 164 | + config_status["enable_tensorrt"] = config.tensorrt_engine_enabled() |
| 165 | + config_status["precision"] = self.precision |
| 166 | + config_status["enable_mkldnn"] = config.mkldnn_enabled() |
| 167 | + config_status["cpu_math_library_num_threads"] = config.cpu_math_library_num_threads() |
| 168 | + elif isinstance(config, dict): |
| 169 | + config_status["runtime_device"] = config.get("runtime_device", "") |
| 170 | + config_status["ir_optim"] = config.get("ir_optim", "") |
| 171 | + config_status["enable_tensorrt"] = config.get("enable_tensorrt", "") |
| 172 | + config_status["precision"] = config.get("precision", "") |
| 173 | + config_status["enable_mkldnn"] = config.get("enable_mkldnn", "") |
| 174 | + config_status["cpu_math_library_num_threads"] = config.get("cpu_math_library_num_threads", "") |
| 175 | + else: |
| 176 | + self.print_help() |
| 177 | + raise ValueError("Set argument config wrong, please check input argument and its type") |
| 178 | + return config_status |
| 179 | + |
| 180 | + def report(self, identifier=None): |
| 181 | + """ |
| 182 | + print log report |
| 183 | + args: |
| 184 | + identifier(string): identify log |
| 185 | + """ |
| 186 | + if identifier: |
| 187 | + identifier = f"[{identifier}]" |
| 188 | + else: |
| 189 | + identifier = "" |
| 190 | + |
| 191 | + self.logger.info("\n") |
| 192 | + self.logger.info("---------------------- Paddle info ----------------------") |
| 193 | + self.logger.info(f"{identifier} paddle_version: {self.paddle_version}") |
| 194 | + self.logger.info(f"{identifier} paddle_commit: {self.paddle_commit}") |
| 195 | + self.logger.info(f"{identifier} paddle_branch: {self.paddle_branch}") |
| 196 | + self.logger.info(f"{identifier} log_api_version: {self.log_version}") |
| 197 | + self.logger.info("----------------------- Conf info -----------------------") |
| 198 | + self.logger.info(f"{identifier} runtime_device: {self.config_status['runtime_device']}") |
| 199 | + self.logger.info(f"{identifier} ir_optim: {self.config_status['ir_optim']}") |
| 200 | + self.logger.info(f"{identifier} enable_memory_optim: {True}") |
| 201 | + self.logger.info(f"{identifier} enable_tensorrt: {self.config_status['enable_tensorrt']}") |
| 202 | + self.logger.info(f"{identifier} enable_mkldnn: {self.config_status['enable_mkldnn']}") |
| 203 | + self.logger.info( |
| 204 | + f"{identifier} cpu_math_library_num_threads: {self.config_status['cpu_math_library_num_threads']}" |
| 205 | + ) |
| 206 | + self.logger.info("----------------------- Model info ----------------------") |
| 207 | + self.logger.info(f"{identifier} model_name: {self.model_name}") |
| 208 | + self.logger.info(f"{identifier} precision: {self.precision}") |
| 209 | + self.logger.info("----------------------- Data info -----------------------") |
| 210 | + self.logger.info(f"{identifier} batch_size: {self.batch_size}") |
| 211 | + self.logger.info(f"{identifier} input_shape: {self.shape}") |
| 212 | + self.logger.info(f"{identifier} data_num: {self.data_num}") |
| 213 | + self.logger.info("----------------------- Perf info -----------------------") |
| 214 | + self.logger.info( |
| 215 | + f"{identifier} cpu_rss(MB): {self.cpu_rss_mb}, cpu_vms: {self.cpu_vms_mb}, cpu_shared_mb: {self.cpu_shared_mb}, cpu_dirty_mb: {self.cpu_dirty_mb}, cpu_util: {self.cpu_util}%" |
| 216 | + ) |
| 217 | + self.logger.info( |
| 218 | + f"{identifier} gpu_rss(MB): {self.gpu_rss_mb}, gpu_util: {self.gpu_util}%, gpu_mem_util: {self.gpu_mem_util}%" |
| 219 | + ) |
| 220 | + self.logger.info(f"{identifier} total time spent(s): {self.total_time_s}") |
| 221 | + |
| 222 | + if self.with_tracker: |
| 223 | + self.logger.info( |
| 224 | + f"{identifier} preprocess_time(ms): {round(self.preprocess_time_s*1000, 1)}, " |
| 225 | + f"inference_time(ms): {round(self.inference_time_s*1000, 1)}, " |
| 226 | + f"postprocess_time(ms): {round(self.postprocess_time_s*1000, 1)}, " |
| 227 | + f"tracking_time(ms): {round(self.tracking_time_s*1000, 1)}" |
| 228 | + ) |
| 229 | + else: |
| 230 | + self.logger.info( |
| 231 | + f"{identifier} preprocess_time(ms): {round(self.preprocess_time_s*1000, 1)}, " |
| 232 | + f"inference_time(ms): {round(self.inference_time_s*1000, 1)}, " |
| 233 | + f"postprocess_time(ms): {round(self.postprocess_time_s*1000, 1)}" |
| 234 | + ) |
| 235 | + if self.inference_time_s_90: |
| 236 | + self.looger.info( |
| 237 | + f"{identifier} 90%_cost: {self.inference_time_s_90}, 99%_cost: {self.inference_time_s_99}, succ_rate: {self.succ_rate}" |
| 238 | + ) |
| 239 | + if self.qps: |
| 240 | + self.logger.info(f"{identifier} QPS: {self.qps}") |
| 241 | + |
| 242 | + def print_help(self): |
| 243 | + """ |
| 244 | + print function help |
| 245 | + """ |
| 246 | + print( |
| 247 | + """Usage: |
| 248 | + ==== Print inference benchmark logs. ==== |
| 249 | + config = paddle.inference.Config() |
| 250 | + model_info = {'model_name': 'resnet50' |
| 251 | + 'precision': 'fp32'} |
| 252 | + data_info = {'batch_size': 1 |
| 253 | + 'shape': '3,224,224' |
| 254 | + 'data_num': 1000} |
| 255 | + perf_info = {'preprocess_time_s': 1.0 |
| 256 | + 'inference_time_s': 2.0 |
| 257 | + 'postprocess_time_s': 1.0 |
| 258 | + 'total_time_s': 4.0} |
| 259 | + resource_info = {'cpu_rss_mb': 100 |
| 260 | + 'gpu_rss_mb': 100 |
| 261 | + 'gpu_util': 60} |
| 262 | + log = PaddleInferBenchmark(config, model_info, data_info, perf_info, resource_info) |
| 263 | + log('Test') |
| 264 | + """ |
| 265 | + ) |
| 266 | + |
| 267 | + def __call__(self, identifier=None): |
| 268 | + """ |
| 269 | + __call__ |
| 270 | + args: |
| 271 | + identifier(string): identify log |
| 272 | + """ |
| 273 | + self.report(identifier) |
0 commit comments