From 6e7e51ddc536c128335781fa229c03ce1f5cc707 Mon Sep 17 00:00:00 2001 From: HeChengHui <84503515+HeChengHui@users.noreply.github.com> Date: Thu, 16 Mar 2023 14:24:13 +0800 Subject: [PATCH] Update SCRFD onnx InferenceSession with providers Original code will give error: ValueError: This ORT build has ['TensorrtExecutionProvider', 'CUDAExecutionProvider', 'CPUExecutionProvider'] enabled. Since ORT 1.9, you are required to explicitly set the providers parameter when instantiating InferenceSession. For example, onnxruntime.InferenceSession(..., providers=['TensorrtExecutionProvider', 'CUDAExecutionProvider', 'CPUExecutionProvider'], ...) when just loading model. Added providers to fix error. --- python-package/insightface/model_zoo/scrfd.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/python-package/insightface/model_zoo/scrfd.py b/python-package/insightface/model_zoo/scrfd.py index 674db4bba..6558ec24f 100644 --- a/python-package/insightface/model_zoo/scrfd.py +++ b/python-package/insightface/model_zoo/scrfd.py @@ -79,7 +79,10 @@ def __init__(self, model_file=None, session=None): if self.session is None: assert self.model_file is not None assert osp.exists(self.model_file) - self.session = onnxruntime.InferenceSession(self.model_file, None) + if (onnxruntime.get_device() == "GPU"): + self.session = onnxruntime.InferenceSession(self.model_file, None, providers=['CUDAExecutionProvider']) + if (onnxruntime.get_device() == "CPU"): + self.session = onnxruntime.InferenceSession(self.model_file, None, providers=['CPUExecutionProvider']) self.center_cache = {} self.nms_thresh = 0.4 self.det_thresh = 0.5