16
16
TensorFlow Edge TPU | `edgetpu` | yolov5s_edgetpu.tflite
17
17
TensorFlow.js | `tfjs` | yolov5s_web_model/
18
18
19
+ Requirements:
20
+ $ pip install -r requirements.txt coremltools onnx onnx-simplifier onnxruntime openvino-dev tensorflow-cpu # CPU
21
+ $ pip install -r requirements.txt coremltools onnx onnx-simplifier onnxruntime-gpu openvino-dev tensorflow # GPU
22
+
19
23
Usage:
20
24
$ python path/to/export.py --weights yolov5s.pt --include torchscript onnx openvino engine coreml tflite ...
21
25
45
49
import subprocess
46
50
import sys
47
51
import time
52
+ import warnings
48
53
from pathlib import Path
49
54
50
55
import pandas as pd
@@ -239,41 +244,14 @@ def export_engine(model, im, file, train, half, simplify, workspace=4, verbose=F
239
244
except Exception as e :
240
245
LOGGER .info (f'\n { prefix } export failure: { e } ' )
241
246
242
- def export_keras (model , im , file , dynamic , prefix = colorstr ('Keras:' )):
243
- # YOLOv5 TensorFlow SavedModel export
244
- try :
245
- import tensorflow as tf
246
- from tensorflow import keras
247
-
248
- from models .keras import TFDetect , KerasModel
249
-
250
- LOGGER .info (f'\n { prefix } starting export with keras { tf .__version__ } ...' )
251
- f = str (file ).replace ('.pt' , '.h5' )
252
- batch_size , ch , * imgsz = list (im .shape ) # BCHW
253
-
254
- model = KerasModel (cfg = model .yaml , model = model , nc = model .nc , imgsz = imgsz )
255
- im = tf .zeros ((batch_size , * imgsz , 3 )) # BHWC order for Keras
256
- _ = model .predict (im ) # first call to create weights
257
- inputs = keras .Input (shape = (* imgsz , 3 ), batch_size = None if dynamic else batch_size )
258
- outputs = model .predict (inputs )
259
- keras_model = keras .Model (inputs = inputs , outputs = outputs , name = "yolov5n" )
260
- keras_model .trainable = False
261
- keras_model .summary ()
262
- keras_model .save (f , save_format = 'h5' )
263
-
264
- LOGGER .info (f'{ prefix } export success, saved as { f } ({ file_size (f ):.1f} MB)' )
265
- return keras_model , f
266
- except Exception as e :
267
- LOGGER .info (f'\n { prefix } export failure: { e } ' )
268
- return None , None
269
247
270
248
def export_saved_model (model , im , file , dynamic ,
271
249
tf_nms = False , agnostic_nms = False , topk_per_class = 100 , topk_all = 100 , iou_thres = 0.45 ,
272
- conf_thres = 0.25 , prefix = colorstr ('TensorFlow SavedModel:' )):
250
+ conf_thres = 0.25 , keras = False , prefix = colorstr ('TensorFlow SavedModel:' )):
273
251
# YOLOv5 TensorFlow SavedModel export
274
252
try :
275
253
import tensorflow as tf
276
- from tensorflow import keras
254
+ from tensorflow . python . framework . convert_to_constants import convert_variables_to_constants_v2
277
255
278
256
from models .tf import TFDetect , TFModel
279
257
@@ -282,16 +260,28 @@ def export_saved_model(model, im, file, dynamic,
282
260
batch_size , ch , * imgsz = list (im .shape ) # BCHW
283
261
284
262
tf_model = TFModel (cfg = model .yaml , model = model , nc = model .nc , imgsz = imgsz )
285
- im = tf .ones ((batch_size , * imgsz , 3 )) # BHWC order for TensorFlow
286
- y = tf_model .predict (im , tf_nms , agnostic_nms , topk_per_class , topk_all , iou_thres , conf_thres )
287
- y = tf_model .predict (im , tf_nms , agnostic_nms , topk_per_class , topk_all , iou_thres , conf_thres )
288
- inputs = keras .Input (shape = (* imgsz , 3 ), batch_size = None if dynamic else batch_size )
263
+ im = tf .zeros ((batch_size , * imgsz , 3 )) # BHWC order for TensorFlow
264
+ _ = tf_model .predict (im , tf_nms , agnostic_nms , topk_per_class , topk_all , iou_thres , conf_thres )
265
+ inputs = tf .keras .Input (shape = (* imgsz , 3 ), batch_size = None if dynamic else batch_size )
289
266
outputs = tf_model .predict (inputs , tf_nms , agnostic_nms , topk_per_class , topk_all , iou_thres , conf_thres )
290
- keras_model = keras .Model (inputs = inputs , outputs = outputs )
267
+ keras_model = tf . keras .Model (inputs = inputs , outputs = outputs )
291
268
keras_model .trainable = False
292
269
keras_model .summary ()
293
- keras_model .save (f , save_format = 'tf' )
294
-
270
+ if keras :
271
+ keras_model .save (f , save_format = 'tf' )
272
+ else :
273
+ m = tf .function (lambda x : keras_model (x )) # full model
274
+ spec = tf .TensorSpec (keras_model .inputs [0 ].shape , keras_model .inputs [0 ].dtype )
275
+ m = m .get_concrete_function (spec )
276
+ frozen_func = convert_variables_to_constants_v2 (m )
277
+ tfm = tf .Module ()
278
+ tfm .__call__ = tf .function (lambda x : frozen_func (x ), [spec ])
279
+ tfm .__call__ (im )
280
+ tf .saved_model .save (
281
+ tfm ,
282
+ f ,
283
+ options = tf .saved_model .SaveOptions (experimental_custom_gradients = False ) if
284
+ check_version (tf .__version__ , '2.6' ) else tf .saved_model .SaveOptions ())
295
285
LOGGER .info (f'{ prefix } export success, saved as { f } ({ file_size (f ):.1f} MB)' )
296
286
return keras_model , f
297
287
except Exception as e :
@@ -358,13 +348,14 @@ def export_edgetpu(keras_model, im, file, prefix=colorstr('Edge TPU:')):
358
348
cmd = 'edgetpu_compiler --version'
359
349
help_url = 'https://coral.ai/docs/edgetpu/compiler/'
360
350
assert platform .system () == 'Linux' , f'export only supported on Linux. See { help_url } '
361
- if subprocess .run (cmd , shell = True ).returncode != 0 :
351
+ if subprocess .run (cmd + ' >/dev/null' , shell = True ).returncode != 0 :
362
352
LOGGER .info (f'\n { prefix } export requires Edge TPU compiler. Attempting install from { help_url } ' )
353
+ sudo = subprocess .run ('sudo --version >/dev/null' , shell = True ).returncode == 0 # sudo installed on system
363
354
for c in ['curl https://packages.cloud.google.com/apt/doc/apt-key.gpg | sudo apt-key add -' ,
364
355
'echo "deb https://packages.cloud.google.com/apt coral-edgetpu-stable main" | sudo tee /etc/apt/sources.list.d/coral-edgetpu.list' ,
365
356
'sudo apt-get update' ,
366
357
'sudo apt-get install edgetpu-compiler' ]:
367
- subprocess .run (c , shell = True , check = True )
358
+ subprocess .run (c if sudo else c . replace ( 'sudo ' , '' ) , shell = True , check = True )
368
359
ver = subprocess .run (cmd , shell = True , capture_output = True , check = True ).stdout .decode ().split ()[- 1 ]
369
360
370
361
LOGGER .info (f'\n { prefix } starting export with Edge TPU compiler { ver } ...' )
@@ -446,16 +437,17 @@ def run(data=ROOT / 'data/coco128.yaml', # 'dataset.yaml path'
446
437
tf_exports = list (x in include for x in ('saved_model' , 'pb' , 'tflite' , 'edgetpu' , 'tfjs' )) # TensorFlow exports
447
438
file = Path (url2file (weights ) if str (weights ).startswith (('http:/' , 'https:/' )) else weights )
448
439
449
- # Checks
450
- imgsz *= 2 if len (imgsz ) == 1 else 1 # expand
451
- opset = 12 if ('openvino' in include ) else opset # OpenVINO requires opset <= 12
452
-
453
440
# Load PyTorch model
454
441
device = select_device (device )
455
442
assert not (device .type == 'cpu' and half ), '--half only compatible with GPU export, i.e. use --device 0'
456
443
model = attempt_load (weights , map_location = device , inplace = True , fuse = True ) # load FP32 model
457
444
nc , names = model .nc , model .names # number of classes, class names
458
445
446
+ # Checks
447
+ imgsz *= 2 if len (imgsz ) == 1 else 1 # expand
448
+ opset = 12 if ('openvino' in include ) else opset # OpenVINO requires opset <= 12
449
+ assert nc == len (names ), f'Model class count { nc } != len(names) { len (names )} '
450
+
459
451
# Input
460
452
gs = int (max (model .stride )) # grid size (max stride)
461
453
imgsz = [check_img_size (x , gs ) for x in imgsz ] # verify img_size are gs-multiples
@@ -477,10 +469,12 @@ def run(data=ROOT / 'data/coco128.yaml', # 'dataset.yaml path'
477
469
478
470
for _ in range (2 ):
479
471
y = model (im ) # dry runs
480
- LOGGER .info (f"\n { colorstr ('PyTorch:' )} starting from { file } ({ file_size (file ):.1f} MB)" )
472
+ shape = tuple (y [0 ].shape ) # model output shape
473
+ LOGGER .info (f"\n { colorstr ('PyTorch:' )} starting from { file } with output shape { shape } ({ file_size (file ):.1f} MB)" )
481
474
482
475
# Exports
483
- f = ['' ] * 11 # exported filenames
476
+ f = ['' ] * 10 # exported filenames
477
+ warnings .filterwarnings (action = 'ignore' , category = torch .jit .TracerWarning ) # suppress TracerWarning
484
478
if 'torchscript' in include :
485
479
f [0 ] = export_torchscript (model , im , file , optimize )
486
480
if 'engine' in include : # TensorRT required before ONNX
@@ -510,17 +504,15 @@ def run(data=ROOT / 'data/coco128.yaml', # 'dataset.yaml path'
510
504
if tfjs :
511
505
f [9 ] = export_tfjs (model , im , file )
512
506
513
- if 'keras' in include :
514
- _ , f [10 ] = export_keras (model , im , file , dynamic )
515
-
516
507
# Finish
517
508
f = [str (x ) for x in f if x ] # filter out '' and None
518
- LOGGER .info (f'\n Export complete ({ time .time () - t :.2f} s)'
519
- f"\n Results saved to { colorstr ('bold' , file .parent .resolve ())} "
520
- f"\n Visualize with https://netron.app"
521
- f"\n Detect with `python detect.py --weights { f [- 1 ]} `"
522
- f" or `model = torch.hub.load('ultralytics/yolov5', 'custom', '{ f [- 1 ]} ')"
523
- f"\n Validate with `python val.py --weights { f [- 1 ]} `" )
509
+ if any (f ):
510
+ LOGGER .info (f'\n Export complete ({ time .time () - t :.2f} s)'
511
+ f"\n Results saved to { colorstr ('bold' , file .parent .resolve ())} "
512
+ f"\n Detect: python detect.py --weights { f [- 1 ]} "
513
+ f"\n PyTorch Hub: model = torch.hub.load('ultralytics/yolov5', 'custom', '{ f [- 1 ]} ')"
514
+ f"\n Validate: python val.py --weights { f [- 1 ]} "
515
+ f"\n Visualize: https://netron.app" )
524
516
return f # return list of exported files/dirs
525
517
526
518
0 commit comments