From 655573d368b9121973f287b6614b410818e1ff26 Mon Sep 17 00:00:00 2001 From: linxie47 Date: Thu, 12 Dec 2024 13:53:57 +0800 Subject: [PATCH] v24.12 release version (#24) * Code update to 2024.12 rc1 * Add code owner * Enable "-Wall" build flag; fix some warnings. * Fix trivy build * Pinned some dependencies to specific versions --------- Co-authored-by: Jie Dong Co-authored-by: Xiaoxia Liang --- .clang-format | 28 + .github/workflows/trivy.yml | 4 +- .github/workflows/ubuntu-build-docker.yml | 2 +- .gitignore | 4 + .gitmodules | 8 - CODEOWNERS | 5 + README.md | 140 +-- ReleaseNotes.md | 12 + build_ivsr.sh | 23 +- docs/docker_image_build.md | 36 + docs/figs/logo.bmp | Bin 0 -> 135054 bytes docs/generic_manual_build.md | 45 + docs/quick_try_manual_build.md | 172 ++++ ivsr_ffmpeg_plugin/README.md | 22 +- ivsr_ffmpeg_plugin/build_docker.sh | 116 ++- .../dockerfiles/rockylinux9/Dockerfile | 223 +++++ .../rockylinux9/ov2022.3.dockerfile | 1 + .../rockylinux9/ov2023.2.dockerfile | 1 + .../rockylinux9/ov2024.5.dockerfile | 1 + .../rockylinux9/ov2024.5s.dockerfile | 137 +++ .../{ => dockerfiles/ubuntu22}/Dockerfile | 99 +- .../dockerfiles/ubuntu22/ov2022.3.dockerfile | 1 + .../dockerfiles/ubuntu22/ov2023.2.dockerfile | 1 + .../dockerfiles/ubuntu22/ov2024.5.dockerfile | 200 ++++ .../dockerfiles/ubuntu22/ov2024.5s.dockerfile | 146 +++ ivsr_ffmpeg_plugin/ffmpeg | 1 - ...-optimized-layout-conversion-nchw-nh.patch | 295 ++++++ ...-process-non-8-aligned-resolution-to.patch | 218 +++++ ...uest-infer-and-refine-the-config-set.patch | 227 +++++ ...it-for-YUV-and-16bit-for-RGB-support.patch | 566 +++++++++++ ...ocessing-of-OpenVINO-in-dnn_backend_.patch | 889 ++++++++++++++++++ ...E_SETTINGS-support-Y-input-SVP-model.patch | 65 ++ ...vsr-change-aligned-size-to-64-from-8.patch | 26 + ...to-do-model-preprocessing-for-TSENet.patch | 169 ++++ ivsr_ov/license/LICENSE.md | 31 - ivsr_sdk/CMakeLists.txt | 20 +- ivsr_sdk/README.md | 10 +- ivsr_sdk/include/ivsr.h | 67 +- .../model_guard.bin/libirguard.a-2024.5.0 | Bin 0 -> 353316 bytes ivsr_sdk/samples/CMakeLists.txt | 3 +- ivsr_sdk/samples/vsr_sample.cpp | 115 ++- ivsr_sdk/src/include/InferTask.hpp | 34 +- ivsr_sdk/src/include/engine.hpp | 111 ++- ivsr_sdk/src/include/ivsr_smart_patch.hpp | 6 +- ivsr_sdk/src/include/ov_engine.hpp | 127 ++- .../threading/ivsr_thread_executor.hpp | 32 +- .../include/threading/ivsr_thread_local.hpp | 9 +- ivsr_sdk/src/include/utils.hpp | 46 +- ivsr_sdk/src/ivsr.cpp | 486 ++++++---- ivsr_sdk/src/ivsr_thread_executor.cpp | 62 +- ivsr_sdk/src/ov_engine.cpp | 336 +++++-- ivsr_sdk/src/smart_patch.cpp | 12 +- ivsr_setupvar.sh | 6 +- 53 files changed, 4634 insertions(+), 762 deletions(-) create mode 100644 .clang-format create mode 100644 .gitignore delete mode 100644 .gitmodules create mode 100644 CODEOWNERS create mode 100644 docs/docker_image_build.md create mode 100644 docs/figs/logo.bmp create mode 100644 docs/generic_manual_build.md create mode 100644 docs/quick_try_manual_build.md create mode 100644 ivsr_ffmpeg_plugin/dockerfiles/rockylinux9/Dockerfile create mode 120000 ivsr_ffmpeg_plugin/dockerfiles/rockylinux9/ov2022.3.dockerfile create mode 120000 ivsr_ffmpeg_plugin/dockerfiles/rockylinux9/ov2023.2.dockerfile create mode 120000 ivsr_ffmpeg_plugin/dockerfiles/rockylinux9/ov2024.5.dockerfile create mode 100644 ivsr_ffmpeg_plugin/dockerfiles/rockylinux9/ov2024.5s.dockerfile rename ivsr_ffmpeg_plugin/{ => dockerfiles/ubuntu22}/Dockerfile (86%) create mode 120000 ivsr_ffmpeg_plugin/dockerfiles/ubuntu22/ov2022.3.dockerfile create mode 120000 ivsr_ffmpeg_plugin/dockerfiles/ubuntu22/ov2023.2.dockerfile create mode 100644 ivsr_ffmpeg_plugin/dockerfiles/ubuntu22/ov2024.5.dockerfile create mode 100644 ivsr_ffmpeg_plugin/dockerfiles/ubuntu22/ov2024.5s.dockerfile delete mode 160000 ivsr_ffmpeg_plugin/ffmpeg create mode 100644 ivsr_ffmpeg_plugin/patches/0019-dnn_ivsr_backend-optimized-layout-conversion-nchw-nh.patch create mode 100644 ivsr_ffmpeg_plugin/patches/0020-dnn_ivsr_backend-process-non-8-aligned-resolution-to.patch create mode 100644 ivsr_ffmpeg_plugin/patches/0021-Enable-async-request-infer-and-refine-the-config-set.patch create mode 100644 ivsr_ffmpeg_plugin/patches/0022-enable-10bit-for-YUV-and-16bit-for-RGB-support.patch create mode 100644 ivsr_ffmpeg_plugin/patches/0023-enable-PrePostProcessing-of-OpenVINO-in-dnn_backend_.patch create mode 100644 ivsr_ffmpeg_plugin/patches/0024-refine-RESHAPE_SETTINGS-support-Y-input-SVP-model.patch create mode 100644 ivsr_ffmpeg_plugin/patches/0025-dnn_backend_ivsr-change-aligned-size-to-64-from-8.patch create mode 100644 ivsr_ffmpeg_plugin/patches/0026-Using-plugin-to-do-model-preprocessing-for-TSENet.patch delete mode 100644 ivsr_ov/license/LICENSE.md create mode 100644 ivsr_sdk/privates/model_guard.bin/libirguard.a-2024.5.0 diff --git a/.clang-format b/.clang-format new file mode 100644 index 0000000..ebe747b --- /dev/null +++ b/.clang-format @@ -0,0 +1,28 @@ +BasedOnStyle: Google +IndentWidth: 4 +UseTab: Never +ColumnLimit: 120 + +Language: Cpp +Standard: Cpp11 + +AccessModifierOffset: -4 +AlignConsecutiveMacros: true +AllowAllArgumentsOnNextLine: false +AllowAllConstructorInitializersOnNextLine: false +AllowAllParametersOfDeclarationOnNextLine: false +AllowShortFunctionsOnASingleLine: Empty +AllowShortIfStatementsOnASingleLine: Never +AllowShortLambdasOnASingleLine: Empty +AllowShortLoopsOnASingleLine: false +AlwaysBreakBeforeMultilineStrings: false +BinPackArguments: false +BinPackParameters: false +CommentPragmas: '^#' +DerivePointerAlignment: false +FixNamespaceComments: true +IndentCaseLabels: false +IndentPPDirectives: AfterHash +ForEachMacros: + - foreach + - FOREACH_CHILD diff --git a/.github/workflows/trivy.yml b/.github/workflows/trivy.yml index 27e6f10..eb5b230 100644 --- a/.github/workflows/trivy.yml +++ b/.github/workflows/trivy.yml @@ -44,7 +44,7 @@ jobs: sudo systemctl daemon-reload sudo systemctl restart docker cd ./ivsr_ffmpeg_plugin - ./build_docker.sh --enable_ov_patch false + ./build_docker.sh --ov_version 2024.5s - name: Check disk space run: df -h @@ -55,7 +55,7 @@ jobs: TRIVY_JAVA_DB_REPOSITORY: public.ecr.aws/aquasecurity/trivy-java-db with: scan-type: 'image' - image-ref: 'ffmpeg_ivsr_sdk_ov2022.3' + image-ref: 'ffmpeg_ivsr_sdk_ubuntu22_ov2024.5s' #format: 'template' #template: '@/contrib/sarif.tpl' security-checks: vuln diff --git a/.github/workflows/ubuntu-build-docker.yml b/.github/workflows/ubuntu-build-docker.yml index 6db9e9c..26b1d8f 100644 --- a/.github/workflows/ubuntu-build-docker.yml +++ b/.github/workflows/ubuntu-build-docker.yml @@ -28,4 +28,4 @@ jobs: sudo systemctl daemon-reload sudo systemctl restart docker cd ./ivsr_ffmpeg_plugin - ./build_docker.sh --enable_ov_patch true --ov_version 2022.3 + ./build_docker.sh --ov_version 2024.5s --os_version rockylinux9 diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000..4392efc --- /dev/null +++ b/.gitignore @@ -0,0 +1,4 @@ +bin +build +lib + diff --git a/.gitmodules b/.gitmodules deleted file mode 100644 index 2f57ecf..0000000 --- a/.gitmodules +++ /dev/null @@ -1,8 +0,0 @@ -[submodule "ivsr_gpu_opt/based_on_openvino_2022.1/openvino"] - path = ivsr_gpu_opt/based_on_openvino_2022.1/openvino - url = https://github.com/openvinotoolkit/openvino.git - branch = releases/2022/1 -[submodule "ffmpeg"] - path = ivsr_ffmpeg_plugin/ffmpeg - url = https://github.com/ffmpeg/ffmpeg.git - branch = release/5.1 diff --git a/CODEOWNERS b/CODEOWNERS new file mode 100644 index 0000000..6a43914 --- /dev/null +++ b/CODEOWNERS @@ -0,0 +1,5 @@ +# SPDX-License-Identifier: BSD-3-Clause +# Copyright 2023 Intel Corporation + +# global-owner +* @linxie47 @wangjingz @xiaoxial @djie1 diff --git a/README.md b/README.md index e45b6a6..48a8542 100644 --- a/README.md +++ b/README.md @@ -1,13 +1,19 @@ -## iVSR + +
+ +
+ +
[![OpenSSF Scorecard](https://api.securityscorecards.dev/projects/github.com/OpenVisualCloud/iVSR/badge)](https://api.securityscorecards.dev/projects/github.com/OpenVisualCloud/iVSR) [![OpenSSF Best Practices](https://bestpractices.coreinfrastructure.org/projects/9795/badge)](https://bestpractices.coreinfrastructure.org/projects/9795) [![Dependency Review](https://github.com/OpenVisualCloud/iVSR/actions/workflows/dependency-review.yml/badge.svg)](https://github.com/OpenVisualCloud/iVSR/actions/workflows/dependency-review.yml) -[![CodeQL](https://github.com/OpenVisualCloud/iVSR/actions/workflows/codeql.yml/badge.svg)](https://github.com/OpenVisualCloud/iVSR/actions/workflows/codeql.yml) +[![CodeQL](https://github.com/OpenVisualCloud/iVSR/actions/workflows/codeql.yml/badge.svg)](https://github.com/OpenVisualCloud/iVSR/actions/workflows/codeql.yml)
[![License](https://img.shields.io/badge/license-BSD_3_Clause-stable.svg)](https://github.com/OpenVisualCloud/iVSR/blob/master/LICENSE.md) [![Contributions](https://img.shields.io/badge/contributions-welcome-blue.svg)](https://github.com/OpenVisualCloud/iVSR/wiki) [![Ubuntu-DockerFile-Build](https://github.com/OpenVisualCloud/iVSR/actions/workflows/ubuntu-build-docker.yml/badge.svg)](https://github.com/OpenVisualCloud/iVSR/actions/workflows/ubuntu-build-docker.yml) [![Trivy](https://github.com/OpenVisualCloud/iVSR/actions/workflows/trivy.yml/badge.svg)](https://github.com/OpenVisualCloud/iVSR/actions/workflows/trivy.yml) +
# Contents Overview 1. [Overview of iVSR](#1-overview-of-ivsr) @@ -16,10 +22,10 @@ - [iVSR Components](#13-ivsr-components) - [Capabilities of iVSR](#14-capabilities-of-ivsr) 2. [Setup iVSR env on linux](#2-setup-ivsr-env-on-linux) - - [Install GPU in kernel space ](#21-optional-install-gpu-kernel-packages) + - [Install GPU kernel packages(Optional)](#21-optional-install-gpu-kernel-packages) - [Install dependencies and build iVSR manually](#22-install-dependencies-and-build-ivsr-manually) - [Install dependencies and build iVSR by scripts](#23-install-dependencies-and-build-ivsr-by-scripts) - - [Install dependencies and build iVSR by Docker file](#24-install-dependencies-and-build-ivsr-by-docker-file) + - [Install dependencies and build iVSR by Dockerfile](#24-install-dependencies-and-build-ivsr-by-dockerfile) 3. [How to use iVSR](#3-how-to-use-ivsr) - [Run with iVSR SDK sample](#31-run-with-ivsr-sdk-sample) - [Run with FFmpeg](#32-run-with-ffmpeg) @@ -69,36 +75,42 @@ Currently, iVSR offers two AI media processing functionalities: Video Super Reso ### 1.4.1 Video Super Resolution (VSR) Video Super Resolution (VSR) is a technique extensively employed in the AI media enhancement domain to upscale low-resolution videos to high-resolution. iVSR supports `Enhanced BasicVSR`, `Enhanced EDSR`, `TSENet`, and has the capability to be extended to support additional models. -- #### i. Enhanced BasicVSR - `BasicVSR` is a publicly available AI-based VSR algorithm. For more details on the public `BasicVSR`, please refer to this [paper](https://arxiv.org/pdf/2012.02181.pdf).
- We have improved the public model to attain superior visual quality and reduced computational complexity, named `Enhanced BasicVSR`. The performance of the `Enhanced BasicVSR` model inference has also been optimized for Intel GPUs. Please note that this optimization is specific to OpenVINO 2022.3. Therefore, the Enhanced BasicVSR model only works with OpenVINO 2022.3 with the applied patches. - -- #### ii. Enhanced EDSR - `EDSR` is another publicly available AI-based single image SR algorithm. For more details on the public EDSR, please refer to this [paper](https://arxiv.org/pdf/1707.02921.pdf) +- #### i. Enhanced BasicVSR + `BasicVSR` is a publicly available AI-based VSR algorithm. For more details on the public `BasicVSR`, please refer to this [paper](https://arxiv.org/pdf/2012.02181.pdf).

+ We have improved the public model to attain superior visual quality and reduced computational complexity, named `Enhanced BasicVSR`. The performance of the `Enhanced BasicVSR` model inference has also been optimized for Intel GPUs. Please note that this optimization is specific to OpenVINO 2022.3. Therefore, the Enhanced BasicVSR model only works with OpenVINO 2022.3 with the applied patches.

+ The input shape of this model is `[1, (channels)3, (frames)3, H, W]`, and the output shape is `[1, (channels)3, (frames)3, 2xH, 2xW]`. - We have improved the public `EDSR` model to reduce the computational complexity by over 79% compared to Enhanced BasicVSR, while maintaining similar visual quality, named `Enhanced EDSR`. +- #### ii. Enhanced EDSR + `EDSR` is another publicly available AI-based single image SR algorithm. For more details on the public EDSR, please refer to this [paper](https://arxiv.org/pdf/1707.02921.pdf)

+ We have improved the public `EDSR` model to reduce the computational complexity by over 79% compared to Enhanced BasicVSR, while maintaining similar visual quality, named `Enhanced EDSR`.

+ The input shape of this model is `[1, (channels)3, H, W]`, and the output shape is `[1, (channels)3, 2xH, 2xW]`. - #### iii. TSENet - `TSENet` is one multi-frame SR algorithm derived from [ETDS](https://github.com/ECNUSR/ETDS).
- We provide a preview version of the feature to support this model in the SDK and its plugin. Please contact your Intel representative to obtain the model package. + `TSENet` is one multi-frame SR algorithm derived from [ETDS](https://github.com/ECNUSR/ETDS).

+ We provide a preview version of the feature to support this model in the SDK and its plugin. Please contact your Intel representative to obtain the model package.

+ The input shape of this model is `[1, (channels * frames)9, H, W]`, and the output shape is `[1, (channels)3, 2xH, 2xW]`. For each inference, the input data is the `(n-1)th`, `(n)th`, and `(n+1)th` frames combined. The output data is the `(N)th` frame. For the first frame, the input data is `1st`, `1st`, `2nd` frames combined. For the last frame, the input data is the `(n-1)th`, `(n)th`, `(n)th` frames combined.
### 1.4.2. Smart Video Processing (SVP) -`SVP` is an AI-based video prefilter that enhances the perceptual rate-distortion in video encoding. With `SVP`, the encoded video streams maintain the same visual quality while reducing bandwidth, as measured by common video quality metrics (such as VMAF and (MS-)SSIM) and human perception. +`SVP` is an AI-based video prefilter that enhances the perceptual rate-distortion in video encoding. With `SVP`, the encoded video streams maintain the same visual quality while reducing bandwidth.
+ +Two SVP model variances are provided. `SVP-Basic` model is one efficiency-oriented designed model, it preserves fidelity while reducing the encoded bitrate. Modifications to images/video by SVP-Basic pre-processing cannot be perceived by human eyes while they can be measured by no to minor BD-rates degradation if it’s measured by SSIM or MS-SSIM metrics. SVP-Basic model is adaptive to almost all video scenarios, including live sport, live gaming, livestream sales, VOD, video conference, video surveillance, and 5G video ring.
+`SVP-SE` model is designed for subjective video quality preservation with up to 50% bitrate saving. It targets human eyes plausible enhancement, reduces complex details like human-eyes insensitive patterns and noise; hence it can’t be evaluated by popular full-reference visual quality metrics including PSNR/SSIM/VMAF/etc. It improves the visibility and quality of visuals, making them more vivid and appealing to viewers, so it’s widely used in various industries, including entertainment, media and advertising, to enhance the visual experience and attract audience attention.

+The input and output shape are `[1, (channels)3, H, W]` for RGB based model and `[1, (channels)1, H, W]` for Y based model.
# 2. Setup iVSR env on linux The software was validated on: - Intel Xeon hardware platform - (Optional) Intel Data Center GPU Flex 170(*aka* ATS-M1 150W) -- Host OS: Linux based OS (Ubuntu 22.04) -- Docker OS: Ubuntu 22.04 -- OpenVINO: [2022.3](https://github.com/openvinotoolkit/openvino/tree/2022.3.0) or [2023.2](https://github.com/openvinotoolkit/openvino/tree/2023.2.0) +- Host OS: Linux based OS (Ubuntu 22.04 or Rocky Linux 9.3) +- Docker OS: Ubuntu 22.04 or Rocky Linux 9.3 +- OpenVINO: [2022.3](https://github.com/openvinotoolkit/openvino/tree/2022.3.0) or [2023.2](https://github.com/openvinotoolkit/openvino/tree/2023.2.0) or [2024.5](https://github.com/openvinotoolkit/openvino/tree/2024.5.0) - FFmpeg: [n6.1](https://github.com/FFmpeg/FFmpeg/tree/n6.1) Building iVSR requires the installation of the GPU driver(optional), OpenCV, OpenVINO, and FFmpeg.
We provide **three** ways to install requirements and build iVSR SDK & iVSR FFmpeg plugin:
1. [Install dependencies and build iVSR manually](#22-install-dependencies-and-build-ivsr-manually)
2. [Install dependencies and build iVSR by scripts](#23-install-dependencies-and-build-ivsr-by-scripts)
-3. [Install dependencies and build iVSR by Docker file](#24-install-dependencies-and-build-ivsr-by-docker-file)
+3. [Install dependencies and build iVSR by Dockerfile](#24-install-dependencies-and-build-ivsr-by-dockerfile)
Note that to run inference on a **GPU**, it is necessary to have **kernel packages** installed on the bare metal system beforehand. See [Install GPU kernel packages ](#21-optional-install-gpu-kernel-packages) for details.
@@ -107,101 +119,29 @@ Refer to this [instruction](https://dgpu-docs.intel.com/driver/installation.html ## 2.2 Install dependencies and build iVSR manually -### 2.2.1 (Optional) Install software for Intel® Data Center GPU Flex Series -To facilitate inference on Intel Data Center GPU, it's necessary to have both the kernel driver and the run-time driver and software installed. If you're planning to run inference on a CPU only, you can disregard this step.
- -The detailed installation instruction is on [this page](https://dgpu-docs.intel.com/driver/installation.html#).
+Here are two guides for your reference:
+One is generic in case you are familiar with Intel® devices and have experience in Intel® developed software before, which you can follow the official steps to build OpenCV and OpenVINO by source code. You can get it from: [Generic manual building guide](docs/generic_manual_build.md#generic-manual-build-steps-for-ffmpeg--ivsr-plugin-software)
+Another option is a tutorial for absolute beginners to try to build the project following every step in the guide based on a clean Ubuntu OS installed machine. [Quick manual building guide](docs/quick_try_manual_build.md#manual-build-steps-for-ffmpeg--ivsr-plugin-software-on-ubuntu) - -### 2.2.2 Install OpenCV -OpenCV, which is used by the iVSR SDK sample for image processing tasks, needs to be installed. Detailed installation instructions can be found at [Installation OpenCV in Linux](https://docs.opencv.org/4.x/d7/d9f/tutorial_linux_install.html).
- -### 2.2.3 Install OpenVINO -OpenVINO, currently the only backend supported by iVSR for model inference, should also be installed. You can refer to this [instruction](https://github.com/openvinotoolkit/openvino/blob/master/docs/dev/build_linux.md) to build OpenVINO from the source code.
- -### 2.2.4 Build iVSR SDK -Once the dependencies are installed in the system, you can proceed to build the iVSR SDK and its sample.
-```bash -source /install/setupvars.sh -export OpenCV_DIR=/install/lib/cmake/opencv4 -cd ivsr_sdk -mkdir -p ./build -cd ./build -cmake .. -DENABLE_THREADPROCESS=ON -DCMAKE_BUILD_TYPE=Release -make -``` -### 2.2.5 Build FFmpeg with iVSR plugin -We provide patches specifically for FFmpeg n6.1. Apply these patches as instructed below:
-```bash -git clone https://github.com/FFmpeg/FFmpeg.git ./ivsr_ffmpeg_plugin/ffmpeg -cd ./ivsr_ffmpeg_plugin/ffmpeg -git checkout n6.1 -cp ../patches/*.patch ./ -for patch_file in $(find -iname "*.patch" | sort -n); do \ - echo "Applying: ${patch_file}"; \ - git am --whitespace=fix ${patch_file}; \ -done; -``` -Finally, build FFmpeg. You can also enable other FFmpeg plugins as per the instructions provided in the [Compile FFmpeg for Ubuntu](https://trac.ffmpeg.org/wiki/CompilationGuide/Ubuntu) guide.
-```bash -source /install/setupvars.sh -./configure --enable-libivsr --extra-cflags=-I/ivsr_sdk/include/ --extra-ldflags=-L/ivsr_sdk/lib -make -j $(nproc --all) -make install -``` ## 2.3 Install dependencies and build iVSR by scripts We provide shell scripts `build_ivsr.sh` and `ivsr_setupvar.sh` to assist in building the dependencies from source code and setting up the environment from scratch.
```bash #ivsr environment building chmod a+x ./build_ivsr.sh -sudo ./build_ivsr.sh --enable_ov_patch --enable_compile_ffmpeg true --ov_version <2022.3|2023.2> +sudo ./build_ivsr.sh --enable_ov_patch --enable_compile_ffmpeg true --ov_version <2022.3|2023.2|2024.5> #environment variables setting -source ./ivsr_setupvar.sh --ov_version <2022.3|2023.2> +source ./ivsr_setupvar.sh --ov_version <2022.3|2023.2|2024.5> ``` The scripts accept the following input parameters:
- `enable_ov_patch`: Determines whether to enable OpenVINO patches, which are necessary to run the Enhanced BasicVSR model.
- `enable_compile_ffmpeg`: Determines whether to compile FFmpeg. Set this to `false` if you're only using the iVSR SDK sample.
-- `ov_version`: Specifies the OpenVINO version. iVSR supports `2022.3` & `2023.2`. Note that running the Enhanced BasicVSR model requires `2022.3`.
- -Feel free to modify and update these scripts as per your requirements.
- - -## 2.4 Install dependencies and build iVSR by Docker file. -A Dockerfile is also provided to expedite the environment setup process. Follow the steps below to build the docker image and run the docker container.
- -### 2.4.1. Set timezone correctly before building docker image. -The following command takes Shanghai as an example. - - ```bash - timedatectl set-timezone Asia/Shanghai - ``` - -### 2.4.2 Set up docker service +- `ov_version`: Specifies the OpenVINO version. iVSR supports `2022.3`, `2023.2` and `2024.5`. Note that running the Enhanced BasicVSR model requires `2022.3`.
-```bash -sudo mkdir -p /etc/systemd/system/docker.service.d -printf "[Service]\nEnvironment=\"HTTPS_PROXY=$https_proxy\" \"NO_PROXY=$no_proxy\"\n" | sudo tee /etc/systemd/system/docker.service.d/proxy.conf -sudo systemctl daemon-reload -sudo systemctl restart docker -``` - -### 2.4.3 Build docker image - -```bash -cd ./ivsr_ffmpeg_plugin -./build_docker.sh --enable_ov_patch [true|false] --ov_version [2022.3|2023.2] -``` -- `enable_ov_patch`: Set as `true` or `flase` to enable or disable the application of OpenVINO 2022.3 patches, which are needed to support the Enhanced BasicVSR model.
-- `ov_version`: Set the OpenVINO version to `2022.3` or `2023.2`, which will be built and installed. iVSR currently supports both OpenVINO 2022.3 and 2023.2, but the patches to enable the Enhanced BasicVSR model are only for OpenVINO 2022.3.
-If the docker image builds successfully, you can see a docker image named `ffmpeg_ivsr_sdk_ov2022.3` or `ffmpeg_ivsr_sdk_ov2023.2` in the output of `docker image ls`.
+Feel free to modify and update these scripts as per your requirements. For new released OpenVINO version, please follow the [manual build](#22-install-dependencies-and-build-ivsr-manually) guide.
-### 2.4.4. Start Docker Container -```bash -sudo docker run -itd --name ffmpeg_ivsr_sdk_container --privileged -e MALLOC_CONF="oversize_threshold:1,background_thread:true,metadata_thp:auto,dirty_decay_ms:9000000000,muzzy_decay_ms:9000000000" -e http_proxy=$http_proxy -e https_proxy=$https_proxy -e no_proxy=$no_proxy --shm-size=128g --device=/dev/dri:/dev/dri ffmpeg_ivsr_sdk_[ov2022.3|ov2023.2]:latest bash -sudo docker exec -it ffmpeg_ivsr_sdk_container bash -``` -Note `--device=/dev/dri:/dev/dri` is specified in the command to add the host gpu device to container.
+## 2.4 Install dependencies and build iVSR by Dockerfile +Dockerfiles are also provided to expedite the environment setup process. Follow the guide to build the docker image and run the application in the docker containers: [Docker image build guide](docs/docker_image_build.md#docker-image-build-guide).
# 3. How to use iVSR Both `vsr_sample` and FFmpeg integration are provided to run inference on the iVSR SDK. Execute the following commands to setup the env before executing them.
@@ -223,5 +163,5 @@ Only models in OpenVINO IR format is supported by iVSR. Please reach out to your # 5. License -Please check the license file under each folder. +iVSR is licensed under the BSD 3-clause license. See [LICENSE](LICENSE.md) for details. diff --git a/ReleaseNotes.md b/ReleaseNotes.md index 36788f5..88339da 100644 --- a/ReleaseNotes.md +++ b/ReleaseNotes.md @@ -1,5 +1,17 @@ # iVSR Release Notes +# Next Release + +## New and Changes + +## Bug Fixes + +## Known Limitations/Issues +- If the model-guard protected model is loaded, it prints the following *error* messages. They can be ignored as its normal procedure for this kind of model files.
+[libprotobuf ERROR thirdparty/protobuf/protobuf/src/google/protobuf/text_format.cc:335] Error parsing text-format tensorflow.GraphDef: 1:2: Message type "tensorflow.GraphDef" has no field named "T".
+[libprotobuf ERROR thirdparty/protobuf/protobuf/src/google/protobuf/text_format.cc:335] Error parsing text-format tensorflow.GraphDef: 1:2: Message type "tensorflow.GraphDef" has no field named "T". + + # Release v24.05 ## New and Changes in v24.05 diff --git a/build_ivsr.sh b/build_ivsr.sh index 2d24265..4efde04 100755 --- a/build_ivsr.sh +++ b/build_ivsr.sh @@ -6,7 +6,7 @@ PROJECTDIR=${PWD} usage() { echo "Usage: $0 --enable_ov_patch [true|false] --enable_compile_ffmpeg [true|false] - --ov_version [2022.3|2023.2]" + --ov_version [2022.3|2023.2|2024.5]" exit 1 } @@ -40,7 +40,7 @@ while [ $# -gt 0 ]; do ;; --ov_version) shift - if [ "$1" = "2022.3" ] || [ "$1" = "2023.2" ]; then + if [ "$1" = "2022.3" ] || [ "$1" = "2023.2" ] || [ "$1" = "2024.5" ]; then OV_VERSION=$1 else usage @@ -55,9 +55,9 @@ while [ $# -gt 0 ]; do shift # Move to the next argument done -if [ "$OV_VERSION" = "2023.2" ]; then +if [ "$OV_VERSION" != "2022.3" ]; then ENABLE_OV_PATCH="false" - echo "There is no openvino patches for openvino 2023.2 version, will ignore the setting of ENABLE_OV_PATCH" + echo "There is no openvino patches for openvino $OV_VERSION, will ignore the setting of ENABLE_OV_PATCH" fi @@ -92,8 +92,8 @@ apt-get update && DEBIAN_FRONTEND=noninteractive && apt-get install -y --no-inst python3-dev libpython3-dev python3-pip apt-get clean -pip --no-cache-dir install --upgrade pip setuptools -pip install numpy +pip --no-cache-dir install --upgrade pip==23.0 setuptools==65.5.0 +pip install numpy==1.23.5 @@ -142,8 +142,8 @@ if [ "$OV_VERSION" = "2022.3" ]; then apt-get clean fi -## 3.2-2 BKC for OV2023.2 -if [ "$OV_VERSION" = "2023.2" ]; then +## 3.2-2 BKC for other OV versions +if [ "$OV_VERSION" != "2022.3" ]; then apt-get update apt-get install -y vainfo clinfo apt-get install -y --no-install-recommends ocl-icd-libopencl1 @@ -241,7 +241,8 @@ cd ${IVSR_SDK_DIR}/build cmake .. \ -DENABLE_LOG=OFF -DENABLE_PERF=OFF -DENABLE_THREADPROCESS=ON \ -DCMAKE_BUILD_TYPE=Release -make +make -j $(nproc --all) +make install echo "Build ivsr sdk finished." @@ -289,9 +290,9 @@ if ${ENABLE_COMPILE_FFMPEG}; then export LD_LIBRARY_PATH=${IVSR_SDK_DIR}/lib:${CUSTOM_IE_LIBDIR}:${TBB_DIR}/../lib:"$LD_LIBRARY_PATH" cd ${FFMPEG_DIR} ./configure \ + --extra-cflags=-fopenmp \ + --extra-ldflags=-fopenmp \ --enable-libivsr \ - --extra-cflags=-I${IVSR_SDK_DIR}/include/ \ - --extra-ldflags=-L${IVSR_SDK_DIR}/lib \ --disable-static \ --disable-doc \ --enable-shared \ diff --git a/docs/docker_image_build.md b/docs/docker_image_build.md new file mode 100644 index 0000000..af64ae0 --- /dev/null +++ b/docs/docker_image_build.md @@ -0,0 +1,36 @@ +# Docker image build guide + +### 1. Set timezone correctly before building docker image. +The following command takes Shanghai as an example. + + ```bash + timedatectl set-timezone Asia/Shanghai + ``` + +### 2. Set up docker service + +```bash +sudo mkdir -p /etc/systemd/system/docker.service.d +printf "[Service]\nEnvironment=\"HTTPS_PROXY=$https_proxy\" \"NO_PROXY=$no_proxy\"\n" | sudo tee /etc/systemd/system/docker.service.d/proxy.conf +sudo systemctl daemon-reload +sudo systemctl restart docker +``` + +### 3. Build docker image + +```bash +cd ./ivsr_ffmpeg_plugin +./build_docker.sh --enable_ov_patch [true|false] --ov_version [2022.3|2023.2|2024.5|2024.5s] --os_version [rockylinux9|ubuntu22] +``` +- `enable_ov_patch`: Set as `true` or `flase` to enable or disable the application of OpenVINO 2022.3 patches, which are needed to support the Enhanced BasicVSR model.
+- `ov_version`: Set the OpenVINO version to `2022.3`, `2023.2`, `2024.5`, `2024.5s`, which will be built and installed, the 2024.5s mean install openvino 2024.5 via apt or yum not build and install from source code. iVSR currently supports both OpenVINO 2022.3, 2023.2 and 2024.5, but the patches to enable the Enhanced BasicVSR model are only for OpenVINO 2022.3.
+- `os_version`: Set OS version of Docker image to ubuntu22(Ubuntu 22.04) or rockylinux9(Rocky Linux 9.3) to build docker image based on specific OS.
+If the docker image builds successfully, you can see a docker image named `ffmpeg_ivsr_sdk_${os_version}_ov${ov_version}` such as `ffmpeg_ivsr_sdk_ubuntu22_ov2022.3` or `ffmpeg_ivsr_sdk_rockylinux9_ov2022.3` in the output of `docker image ls`.
+ +### 4. Start Docker Container + +```bash +sudo docker run -itd --name ffmpeg_ivsr_sdk_container --privileged -e MALLOC_CONF="oversize_threshold:1,background_thread:true,metadata_thp:auto,dirty_decay_ms:9000000000,muzzy_decay_ms:9000000000" -e http_proxy=$http_proxy -e https_proxy=$https_proxy -e no_proxy=$no_proxy --shm-size=128g --device=/dev/dri:/dev/dri ffmpeg_ivsr_sdk_[ubuntu22|rockylinux9]_[ov2022.3|ov2023.2|ov2024.5]:latest bash +sudo docker exec -it ffmpeg_ivsr_sdk_container bash +``` +Note `--device=/dev/dri:/dev/dri` is specified in the command to add the host gpu device to container.
\ No newline at end of file diff --git a/docs/figs/logo.bmp b/docs/figs/logo.bmp new file mode 100644 index 0000000000000000000000000000000000000000..d46a7b9f4cd1a77c99677f25693f35b60dd4b24d GIT binary patch literal 135054 zcmeHQ`JbFcoyPqK_P71GySg8DS6vlwy*Nb_JWvr7gQ9=}iXwuFC@zQ)L2fo7ksHDd zxh9iia%XZ*W-@2yJ~Q|9eIL_z&vf?bev@}!y5Fw1-uJC~yE{|!sZ_pi)wiyv>gl)M zIxoL!YzFegyxI{nJ^6e+36vC&iyYKp-Fx z5C{ka1Ofs9fq+0jARrJB2nYlO0s;YnfIvVXAP^7;2m}NI0s(=5KtLcM5D*9m1Ox&C z0fB%(Kp-Fx5C{ka1Ofs9fq+0jARrJB2nYlO0s;YnfIvVXAP^7;2m}NI0s(=5KtLcM z5D*9m1Ox&C0fB%(Kp-Fx5C{ka1Ofs9fq+0jARrJB2nYlO0s?_0jKJ)cWwV=?&2G82 zV6&8iLv>3SS5b6<02KncY}~ftJllpZ@UW+LKR}fUp(GGk!U)7?cH1_bKbNN=V|(Da zk9V!QUh0=~8?BrMCKlEy+sTko)x7i(~>qm^-{uJ1as%Ah7fhNV?kcAA=ms(97p4 zIoSL16-y6YQDT9BHUh!!mG;e-+3PRM+XZ7&$Y$KTp8iiI2O%f?liC@o9gW3wG#i;p zdAj1Wd*;X9i}XJq?tcy@GPEW(QID_el)FPRXzx6;Ak3YOWq z>j$>Ymoqmr8!s&)ARg^4FI2FkCX-%B`cv+9)K{SGLC@anZ5uv6_ui*;8T+P7JdHmI zwEYDn1w$7S9+O$p8$9jrPy74yhN_@`IxrX+d^!KKHuzE|GQ~D0;ofI#8_$z#(6RX| zvGEEl=t}u|?OVRa!;GvsH7lWHV*hL3WeX$k*cQ{__;f>{?E%M@%jL>qC^)uU=0AEj zs(vvQ;8&JR=!9$6_nq6mA>|wm56zEXa;#zwv@b7$I@JBYCr{>>#E+!L$z7Lts~ zm~^(NW;oots#5UMr0Z|~qiVLJBX7_IC+lSAZK{#O(YF3`xy1}QI9p$L<1O~BS1>oW z`mf2PD-xXTud^)z+t?D0!~T{(+17uyTpd~9j^>9>FROAnw|%o*o)_XSR;jR!C)n|D zxjdsoV*XVapV?V1ez>Cp%(gg$*wVof`_^yrFr%+uP((fXaq#Wy2wAF>S8g#Io80W$ z{vATqSnTE5cSF+EY7!89a?ZVXcDbC#rnZ`lpGWX;|MP@Aqb%k*I6uCz942s2y4!e^ zOEpdmFW9$z8^%$v8OlAYJ}Hx~RLH~`cehkaNAH-h9dK^_I-yc3=;hgWBgRze%TlD7 z35^ljGlqtW^eg^|;blUGQ4#1Xo=F_^9J<54?Mn7$>$h7T`sY&W$u92hwH2>xDFf(? zc@O-&f)t}n+JE#ee8Lz2O^buC8i|0yHD%kP2GlqlN=#0JLDo4Xr%}c($ z$vER<5F^#*^H3#y(ddbd_J*r@nBLXrs8UaMZ29ZIETrS+ku(!Su=7!JbyufvxbN?} zN?j}OLG-sZ=Ubt`YP6(EIOuD>$Fco;jve1)ZXbL2UuC+L6TJPKb+sji%8RL>r|~Ak z6vO+-d*Ek**84GW9GlpbbR8=DVigTO@G6;}fX@g3yYagBZZHFZ5ZMvPlAu-fk z%*?xXT%}RnC97ZTbr)d1VE)8fyjIL-Cby7Ed5>d23~^%RjNZ&ZA1iGrvr+vVL{qfa ze@O#`wdW+}_EHv^My(n+7@KNv>|Ey9v5dLh_xyh=xmHeYee^$axkVbd*y?x>+^SLC z?9>iwMz(nlaf^*vynl*xHBh}IwcB}>t z`dS}!?7Y^#oLvhmJ~2YAL?1bb^Tar5zuRO z?Yf3Chftdz(;i-~W)5NzyKBdHHOm_r#NshF>(gY+E6zdu_&9d`kcXLE_YE5MNjl?O ze=lQVfq+Rs&FYpE(9>H;`YgzjY7+A&*3oxRHKz`%mV@Y!d-mR-StG%YhnN!(uveUe zvDtl&UDxq2{V%Gl(=Nx#_9wC6!JDS>ukaUu8A&W~N{m%)vGR&D=u4z555` zgsPN-xdrdRUuYH(-^(XYa`|AXI0plrPdIk}n1^Y9?BB~VPSrg$ahTkMSowx~R%q5Z z)`np}AgdJ3$CI!h(#$gy$lprU$w7bfJ(@-H9=!ErE^8h})fg(vK@1E!c3;oKOm4hV zm1qRV$({G9Ld4b)%);5ldw$~Fb0cg=-j<9cn{pp~<$b5e5U3}6b*{wPKkk2+nq^r~iYTsyALW#-G3h>3e;IT+|!?b!2EN1lL;?T#lu zDwB&|h|F?dqwZ`dyLm@y51HTylbYoeNo3#!a=Z04{jOZnRmH*Br}Qgo8NP%C_z^1sT^XdPh=UtKJyuaO_^y5GJ%=e^dRYjH(Xii1gS zuXFz|osGX>ZV$hCxh~QbDfhgH(RGf0So5jc1~68`F;&)4M~B}~ErsNWk8GYx9a?GN4?|b!1T^KGw<=E6VLX*a8 zy^1)PTk!0^nUGt_i_>~+nzm694o1c{IuG9AI(X-4`%Z;y%Ki8%sttphd`t#sXwX}- zI^a;yjkJuMG95B{cLKtoA@ZC1AJ{iP0#7Rdl{kQI%E0BN{ECVf$O|QO%_F9K zyO`VE&wfFZT=WX*sJk3!OWZv+Ng5PIv&ZFYst{Sp!TA&GH3^9I*o*0iYF4duw4Q^$ zjwf7)?`Ccs2Y!2WIq584diF`0uFkiutcY4k3uBDVtzRQI?{YUskk2+G$H~JNS2SWn zRxVrNlPxXgU?ysF9{z*t@E@4lCpIk8%QCY5(7QL(3NNh8C$2jr)1k11P(4T0#zDCt zuASe*i9J|Hs-ADot`>7JHnY!l$iyUW!V)q|5cW7?1D}70o+cqed zv4;AhMWw^xhl?3r$J|-Q!G%=F)%+J8COG)kyrai3%g6!8>hF-7&>17(%&1K01P))y z-~TeS1}A=F`!LO_=AxFVe5AQ`qINlmlDc+&x7?hI$%bJW2jk8*cgur$(_FCG$q83} zNEydaklAF+xAn1)nMBy);5fG&S1+Z!-8c~xlj(RjVYx`4?E&AR+p(nx<9EKRA6i$O#`!12j{YaWjIh5hhw1Mhy_@t9Y`i8uIZ z!nB}XK6>`wq`D=(7~g-fPbUi)GV2NRxd_pyg1<2o(K zE@s^0&Q=-E&J~|E#xioK_NUI3ldxF8D?T<}hg&}BTZm>aI2t4!^_MQDnVmvu0SD2d+^vuDFoA(Llr14NamX;s z$N?vJ(OxyH%#Q&X1K?OIWv{!4rX^Uc<81gg@|KM`43tX>Qso?!GMV-dIJ6d}19^r$ zQVTek^bNY(9{038&fJbUT8i33`;(s}v&oom=QHOOCB)+qW0*x}Jz31M(&TV2t^erV z*auYZntNS1)C31nBCL&Z?OIlfhSCLRkg_@k-vSPX$G5uMp5S3JaZgbj7~guAF_vkD zip+6bRF4*p%lnj7Tq)VL{X5uu9-FAgnwBDL%|DXY$zb}xd=ji;$lnpUN#kPrNdI$8 z#z=W{5cj-$I#;;cpYpUn#nuiCyj}{pe|VQBn~W(;H}bj(BI#0K?;J1>xH5*M^C-EB3BKsN20I^(j=UA0*@Z=(*xs&;xjN+<>WfMMVl9Nd?qWhM zg!@*T?~d~3ptt*ZPvEWMm}!`d%y7%)I^eddWqpKX-UVxkL@P^|buvaT&-h)67yRiryw5`Y8&1Tok zaD5L(W)8Z$R(rbs?(T%WnxTwK-3hI0c;xUI9Q5_S?m7Of=lFAMZC{_th^VrR?&I%Ka!Rk7 zz4>`%LWX6}{#%Nwvh)!vidoG-8O|r$fHlg&VEbQ7Ye#lTy3F2K$B%>QsNHk?`2qnM z+mYEr4O|=o@^`)BRF`fGlmVR!39<#M%CuUUE_DBW9DBdm4YLNdd^3HU`dIV{78gPEAe+w-!w=M^<$%3ZS~Dzz58ttdIA zS8ed^>r6Gs+#&|kit17Mh@DEN;;-VswfY{kA0oU5BZDtiCkN-99iE<7d6-main5jA z+f6UODD^XOKV=?D)iA}NWL&9?GhRwVof$4Q`W}pqzC+;P{E4+DYlqW&Ffdy0?Oo&T zeeL}=9*bNg9cuT!H%{hg?}@`0CMBozs$tyKFziW3Qz`G1E*2m1Y`7o>0@mnzFw{lb zRERZ!2B1&|;laUdBH-Tr9S2#$w?D@u{l@C zxFQ)VX9!_y-K$19h=UD_s!92n@^qT47akmpxqH3+Z+iPq8%b8$l9$qjPCpr()Ky-r zY@wu;UNx-7(UlWf85htiYp_Vhxo5KGnJ3gJ2kmti7FCn-(R{y!2M2@WJH7pHdHUXh z?Jd}> zq8}-&DeE| z(=Bk|8)fRX$iZO8!>YfQk|U;>%ySSsrU^;lM}QJuSuNok_V&N)9e9_yoeYf2sn849 z_otU%H2TLL`A9CeK%Wg+Gd8(}064sG{En<#odWj!7M&IB+unxbIm)M-({VL_{VPf&JI2sA4!sZ~Tjy zxOZ?J4-=Z!F(RtWc66@O0KaI0oY?X^WkQrPHnXduEj#*K{)aNEil`CJy}=R0D6dwA42l9QO@v@D6QYZYKi9ch7aLI-iD9 zdUe{K_(UlJG%mol2<@Ur^V?x)vSC06b74hURX$=zBstF7`X2N(-K8vDsT}HliWLWe z+hQt&@j6`Kh@;eDV_^Vx@ssgX^$pK9d51RgFbf%ja{LQ^Dt5~7N+b=ORhd4$!x4^;DzjauO#2A#;Cfv6yVGj1Wt@xa+k zoXPNvcevg+T+iH2_>G*bt}IK()1TJMFB<)h6__3}@`sy}ie4h6kI~^bj6}^R3=Y#M z&7sl---((j*8~US)7v!(h=QXmn&q`#A!2XyjcnmzFz0BpI1>$jq~VkSo$$1gk+FpB zfU*IXll?7!%4HKK%Wgh+;Ml$8awxlFyjL@!D*qnLEoR6|L*>FUJm_8%G4YI^qHEY~SPz|>nhFsKY@rHrvb+?z?r zp5*1;^COyERr7nWaMZbW=LF@&4(r@P<$oAU0Z&C8zR`yNH)9IfoO|5Q=Fs)F$3ITP zDZM&ft1l`=gT^Hjnbd6CvO>6LrLj>mnw+TWV0t<_{Ceqtxtt}XK=~_CogBoGS&_k) zmBk^;=>X$nW+nWAq3_5YQqs8C5d(L)bW*iy$~i`z-M+EyzR~T>?QBZ-6p3mkPi($Z zFTW`Dqjue^iKHB2t01|$mA>}63$QA~%(X|Vu#`ID3NWldqrZIyM;?_%qUus@90Zv8 zTdp>dPVrS%0p$7d4XQ;m={P_3UQt4L#9}1$-sxlZkMHrH*y$TPWmw6f8QQgonpyP^ zZKvdvUbUG6k87r0P66ZjrQnlYJo|3IqEiD)n9AircE?X~u=5dJcWPsS2RJLoKo+Wj zgE%iVF}DvNbnZRZk#R2d<@6ayShj(iLCBEGzUg_lZ)}(E#4hG`+^6GrK+cbPgwdJ&EX1 ztD>#yNakRm{lQbWW4Jrm3K;4Pbw7={75m2XX@FJL;Y2_H6`R03Q`5L$Z_5? zkbe*6Uxfv`Ao~=uVfSp3*=?uD^XP_KX*gv-C*~e5!i4@2i?luaZY0zKjTgF(cqMrc z-G+7k(V;c***!(g7;|2jcShGM(DoO!MqDgN6D>iz^y$ z(&#hbZ1|ReFH`c|1byn3YOXyO?WjqF&+2&CmK(g@zVlaOv^EL z9Z{(i3#kYVp$zEszVaimf_jIU z+c6)P^>~?ij{$zs1nGbM8qG$iO94|+SR_MeEtYv<05)M~v$NR1=&NPLbZq_#hDWTw zXU8N5J3?G*$3q6y;c;PnN8nPtpRkq_A`syj1}I4;cJ{KurLl_C4&N+BQDX(rqB zVQm}F^X$JV-1`isY~+kpN{9}>M#y=}(?Q5sHZ>oZZt+hyGq>k`#&^vVGAwx|@u|7| zgFMXOJ3rP;-HZaJFt9xV^YZRp%Luh**egHj-@ZIy+t2&jHLNSZj6BGQJ>&e0_u#Gm zqrXQ&n4pggzJx>Rm>)wrW5`=0o$3|Q7^}i zs8p!9XOt5MN7kRQH zzwlKaG;De3LyH;OeFRKuO+>O}5r}w3{c~M`xi0@~!Dc5fE@mx2b@t$sJUECry#re< z$iGNpi6gL(O$BV-fw}I2eZ0gr?jH48G-fsL=;g%0vHD*x@#Z0lZvg_yd2hhh8AHS z#Djx~GdPB|CE`yYzzc!gVm4s!=V2mV9h9}f;9&hR9& z)2J-QL?l<`2w(+E&^f}%SYP(g#9>YxoZNN4^~Dh>E(HW|o;D^XIGKt7825~DG8HL~ zNK_yo5D*9m1Ox&C0fB%(Kp-Fx5C{ka1Ofs9fq+0jARrJB2nYlO0s;YnfIvVXAP^7; z2m}NI0s(=5KtLcM5D*9m1Ox&C0fB%(Kp-Fx5C{ka1Ofs9fq+0jARrJB2nYlO0s;Yn XfIvVXAP^7;2m}NI0s(;l4+Q=XV5ZjS literal 0 HcmV?d00001 diff --git a/docs/generic_manual_build.md b/docs/generic_manual_build.md new file mode 100644 index 0000000..93b846c --- /dev/null +++ b/docs/generic_manual_build.md @@ -0,0 +1,45 @@ +# Generic Manual Build Steps for FFmpeg + IVSR plugin Software + +### 1. (Optional) Install software for Intel® Data Center GPU Flex Series +To facilitate inference on Intel Data Center GPU, it's necessary to have both the kernel driver and the run-time driver and software installed. If you're planning to run inference on a CPU only, you can disregard this step.
+ +The detailed installation instruction is on [this page](https://dgpu-docs.intel.com/driver/installation.html#).
+ + +### 2. Install OpenCV +OpenCV, which is used by the iVSR SDK sample for image processing tasks, needs to be installed. Detailed installation instructions can be found at [Installation OpenCV in Linux](https://docs.opencv.org/4.x/d7/d9f/tutorial_linux_install.html).
+ +### 3. Install OpenVINO +OpenVINO, currently the only backend supported by iVSR for model inference, should also be installed. You can refer to this [instruction](https://github.com/openvinotoolkit/openvino/blob/master/docs/dev/build_linux.md) to build OpenVINO from the source code.
+ +### 4. Build iVSR SDK +Once the dependencies are installed in the system, you can proceed to build the iVSR SDK and its sample.
+```bash +source /install/setupvars.sh +export OpenCV_DIR=/install/lib/cmake/opencv4 +cd ivsr_sdk +mkdir -p ./build +cd ./build +cmake .. -DENABLE_THREADPROCESS=ON -DENABLE_SAMPLE=ON -DCMAKE_BUILD_TYPE=Release +make +make install +``` +### 5. Build FFmpeg with iVSR plugin +We provide patches specifically for FFmpeg n6.1. Apply these patches as instructed below:
+```bash +git clone https://github.com/FFmpeg/FFmpeg.git ./ivsr_ffmpeg_plugin/ffmpeg +cd ./ivsr_ffmpeg_plugin/ffmpeg +git checkout n6.1 +cp ../patches/*.patch ./ +for patch_file in $(find -iname "*.patch" | sort -n); do \ + echo "Applying: ${patch_file}"; \ + git am --whitespace=fix ${patch_file}; \ +done; +``` +Finally, build FFmpeg. You can also enable other FFmpeg plugins as per the instructions provided in the [Compile FFmpeg for Ubuntu](https://trac.ffmpeg.org/wiki/CompilationGuide/Ubuntu) guide.
+```bash +source /install/setupvars.sh +./configure --enable-libivsr --extra-cflags=-fopenmp --extra-ldflags=-fopenmp +make -j $(nproc --all) +make install +``` diff --git a/docs/quick_try_manual_build.md b/docs/quick_try_manual_build.md new file mode 100644 index 0000000..3c66aa0 --- /dev/null +++ b/docs/quick_try_manual_build.md @@ -0,0 +1,172 @@ +# Manual Build Steps for FFmpeg + IVSR plugin Software on Ubuntu + +This document provides detailed steps for building the software with FFmpeg + iVSR SDK as the backend to work for media transcoding and DNN-based processing for video content on a clean Ubuntu 22.04 system. + +## Prerequisites + +Ensure your system has internet access and an updated package index: + +```bash +sudo apt-get update +``` + +## Step-by-Step Instructions + +### 1. Install Essential Utilities + +Start by installing essential packages required for downloading and handling other software components: + +```bash +sudo apt-get install -y --no-install-recommends \ + curl ca-certificates gpg-agent software-properties-common +``` +Install common dependencies: +```bash +sudo apt-get install -y --no-install-recommends --fix-missing \ + autoconf \ + automake \ + build-essential \ + apt-utils cmake cython3 flex bison gcc g++ git make patch pkg-config wget \ + libdrm-dev libudev-dev libtool libusb-1.0-0-dev xz-utils ocl-icd-opencl-dev opencl-headers +``` +### 2. Set Up OpenVINO + +Set up the OpenVINO toolkit by downloading and installing the key, adding the repository, and installing OpenVINO: + +```bash +wget https://apt.repos.intel.com/intel-gpg-keys/GPG-PUB-KEY-INTEL-SW-PRODUCTS.PUB +sudo apt-key add GPG-PUB-KEY-INTEL-SW-PRODUCTS.PUB + +echo "deb https://apt.repos.intel.com/openvino/2024 ubuntu22 main" | sudo tee /etc/apt/sources.list.d/intel-openvino-2024.list + +sudo apt-get update +sudo apt-get install -y openvino-2024.5.0 +rm -f GPG-PUB-KEY-INTEL-SW-PRODUCTS.PUB +``` + +### 3. Install FFmpeg Dependencies + +Install additional dependencies required by FFmpeg: + +```bash +sudo apt-get install -y --no-install-recommends \ + ca-certificates tar g++ wget pkg-config nasm yasm libglib2.0-dev flex bison gobject-introspection libgirepository1.0-dev \ + python3-dev libx11-dev libxv-dev libxt-dev libasound2-dev libpango1.0-dev libtheora-dev libvisual-0.4-dev libgl1-mesa-dev \ + libcurl4-gnutls-dev librtmp-dev mjpegtools libx264-dev libx265-dev libde265-dev libva-dev libtbb-dev +``` + +### 4. Build iVSR SDK + +1. Clone or copy the iVSR SDK repository into your workspace. +2. Navigate to the SDK folder and create a build directory. +3. Run CMake with the appropriate flags and build the project. + +```bash +mkdir -p /ivsr/ivsr_sdk/build +cd /ivsr/ivsr_sdk/build +cmake .. -DENABLE_LOG=OFF -DENABLE_PERF=OFF -DENABLE_THREADPROCESS=ON -DCMAKE_BUILD_TYPE=Release +make -j $(nproc --all) +sudo make install +``` + +### 5. Build and Install FFmpeg with iVSR SDK Support + +1. Configure global Git settings if you didn't. +2. Clone the FFmpeg repository and check out the desired version. +3. Apply necessary patches and configure the build. +4. Compile and install FFmpeg. + +```bash +git config --global user.email "noname@example.com" +git config --global user.name "no name" +git clone https://github.com/FFmpeg/FFmpeg.git /ivsr/ivsr_ffmpeg_plugin/ffmpeg +cd /ivsr/ivsr_ffmpeg_plugin/ffmpeg +git checkout n6.1 + +# Apply patches +copy -rf /path/to/patches/*.patch . +git am --whitespace=fix *.patch + +./configure \ + --enable-gpl \ + --enable-nonfree \ + --disable-static \ + --disable-doc \ + --enable-shared \ + --enable-version3 \ + --enable-libivsr \ + --enable-libx264 \ + --enable-libx265 + +make -j$(nproc) +sudo make install + +# Set the library path for FFmpeg or run ldconfig +export LD_LIBRARY_PATH=/usr/local/lib:$LD_LIBRARY_PATH + +# Run ffmpeg to test if it can run successfully +ffmpeg +``` + +### 6. Install GPU Drivers (Optional) + +Install required GPU drivers and dependencies: + +```bash +sudo apt-get install -y --no-install-recommends ocl-icd-libopencl1 + +# Download and install necessary GPU packages +mkdir /tmp/gpu_deps && cd /tmp/gpu_deps +curl -L -O https://github.com/intel/intel-graphics-compiler/releases/download/igc-1.0.17384.11/intel-igc-core_1.0.17384.11_amd64.deb +curl -L -O https://github.com/intel/intel-graphics-compiler/releases/download/igc-1.0.17384.11/intel-igc-opencl_1.0.17384.11_amd64.deb +curl -L -O https://github.com/intel/compute-runtime/releases/download/24.31.30508.7/intel-level-zero-gpu-dbgsym_1.3.30508.7_amd64.ddeb +curl -L -O https://github.com/intel/compute-runtime/releases/download/24.31.30508.7/intel-level-zero-gpu_1.3.30508.7_amd64.deb +curl -L -O https://github.com/intel/compute-runtime/releases/download/24.31.30508.7/intel-opencl-icd-dbgsym_24.31.30508.7_amd64.ddeb +curl -L -O https://github.com/intel/compute-runtime/releases/download/24.31.30508.7/intel-opencl-icd_24.31.30508.7_amd64.deb +curl -L -O https://github.com/intel/compute-runtime/releases/download/24.31.30508.7/libigdgmm12_22.4.1_amd64.deb +sudo dpkg -i ./*.deb +rm -Rf /tmp/gpu_deps +``` + +Also, you can download the latest gpu driver from the official website: https://github.com/intel/compute-runtime/releases +### 7. Environment Configuration for GPU + +Set the environment variables required for GPU drivers: + +```bash +export LIBVA_DRIVER_NAME=iHD +export LIBVA_DRIVERS_PATH=/usr/lib/x86_64-linux-gnu/dri +``` + +Congratulations! You've successfully built the software on a bare metal Ubuntu system. + +### Optional: Build and Install OpenCV + +Start building OpenCV: + +```bash +OPENCV_REPO=https://github.com/opencv/opencv/archive/4.5.3-openvino-2021.4.2.tar.gz +wget -qO - ${OPENCV_REPO} | tar xz +OPENCV_BASE=opencv-4.5.3-openvino-2021.4.2 +cd ${OPENCV_BASE} && mkdir -p build && mkdir -p install && cd build +cmake \ + -DCMAKE_BUILD_TYPE=Release \ + -DCMAKE_INSTALL_PREFIX=${OPENCV_BASE}/install \ + -DCMAKE_INSTALL_LIBDIR=lib \ + -DOPENCV_GENERATE_PKGCONFIG=ON \ + -DBUILD_DOCS=OFF \ + -DBUILD_EXAMPLES=OFF \ + -DBUILD_PERF_TESTS=OFF \ + -DBUILD_TESTS=OFF \ + -DWITH_OPENEXR=OFF \ + -DWITH_OPENJPEG=OFF \ + -DWITH_GSTREAMER=OFF \ + -DWITH_JASPER=OFF \ + -DWITH_FFMPEG=OFF \ + -DPYTHON3_EXECUTABLE=/usr/bin/python3 \ + .. +make -j "$(nproc)" +sudo make install +cd ${OPENCV_BASE}/install/bin && bash ./setup_vars_opencv4.sh + +``` \ No newline at end of file diff --git a/ivsr_ffmpeg_plugin/README.md b/ivsr_ffmpeg_plugin/README.md index 79ead0f..355b815 100644 --- a/ivsr_ffmpeg_plugin/README.md +++ b/ivsr_ffmpeg_plugin/README.md @@ -25,30 +25,34 @@ Additionally, there are other parameters that you can use. These parameters are Here are some examples of FFmpeg command lines to run inference with the supported models using the `ivsr` backend.
-- Command sample to run Enhanced BasicVSR inference, the input pixel format supported by the model is `bgr24`. +- Command sample to run Enhanced BasicVSR inference, the input pixel format supported by the model is `rgb24`. ``` cd /ivsr_ffmpeg_plugin/ffmpeg -./ffmpeg -i -vf format=bgr24,dnn_processing=dnn_backend=ivsr:model=:input=input:output=output:nif=3:backend_configs='device=&extension=/ivsr_ov/based_on_openvino_2022.3/openvino/bin/intel64/Release/libcustom_extension.so&op_xml=/ivsr_ov/based_on_openvino_2022.3/openvino/flow_warp_cl_kernel/flow_warp.xml' test_out.mp4 +./ffmpeg -i -vf format=rgb24,dnn_processing=dnn_backend=ivsr:model=:input=input:output=output:nif=3:backend_configs='device=&extension=/ivsr_ov/based_on_openvino_2022.3/openvino/bin/intel64/Release/libcustom_extension.so&op_xml=/ivsr_ov/based_on_openvino_2022.3/openvino/flow_warp_cl_kernel/flow_warp.xml' test_out.mp4 ``` Please note that for the Enhanced BasicVSR model, you need to set the `extension` and `op_xml` options (with `backend_configs`) in the command line. After applying OpenVINO's patches and building OpenVINO, the extension lib file is located in `/openvino/bin/intel64/Release/libcustom_extension.so`, and the op xml file is located in `/openvino/flow_warp_cl_kernel/flow_warp.xml`.
-- Command sample to run SVP models inference +- Command sample to run SVP models inference. If the supported input pixel format of the model variance is `rgb24`, set the preceeding format as is to avoid unnecessary layout conversion: ``` cd /ivsr_ffmpeg_plugin/ffmpeg -./ffmpeg -i -vf format=bgr24,dnn_processing=dnn_backend=ivsr:model=:input=input:output=output:nif=1:backend_configs='device=&model_type=1' -pix_fmt yuv420p test_out.mp4 +./ffmpeg -i -vf format=rgb24,dnn_processing=dnn_backend=ivsr:model=:input=input:output=output:nif=1:backend_configs='device=&model_type=1' -pix_fmt yuv420p test_out.mp4 ``` -- Command sample to run Enhanced EDSR inference +If the model variance supports Y-input, set the preceeding format as YUV: +``` +./ffmpeg -i -vf format=yuv420p,dnn_processing=dnn_backend=ivsr:model=:input=input:output=output:nif=1:backend_configs='device=&model_type=1' -pix_fmt yuv420p test_out.mp4 +``` +- Command sample to run Enhanced EDSR inference, the input pixel format supported by the model is `rgb24`. ``` cd /ivsr_ffmpeg_plugin/ffmpeg -./ffmpeg -i -vf format=bgr24,dnn_processing=dnn_backend=ivsr:model=:input=input:output=output:nif=1:backend_configs='device=&model_type=2&normalize_factor=255.0' -pix_fmt yuv420p test_out.mp4 +./ffmpeg -i -vf format=rgb24,dnn_processing=dnn_backend=ivsr:model=:input=input:output=output:nif=1:backend_configs='device=&model_type=2&normalize_factor=255.0' -pix_fmt yuv420p test_out.mp4 ``` -- Command sample to run CUSTOM VSR inference. Note the input pixel format supported by this model is `yuv420p`. +- Command sample to run CUSTOM VSR inference. Note the input pixel format supported by this model is `yuv420p`, and its input shape is `[1, (Y channel)1, H, W]`, output shape is `[1, 1, 2xH, 2xW]`. ``` cd /ivsr_ffmpeg_plugin/ffmpeg ./ffmpeg -i -vf format=yuv420p,dnn_processing=dnn_backend=ivsr:model=:input=input:output=output:nif=1:backend_configs='nireq=1&device=CPU&model_type=3' -pix_fmt yuv420p test_out.mp4 ``` -- Command sample to run TSENet model +- Command sample to run TSENet model, the input pixel format supported by the model is `rgb24`. ``` cd /ivsr_ffmpeg_plugin/ffmpeg -./ffmpeg -i -vf format=bgr24,dnn_processing=dnn_backend=ivsr:model=:input=input:output=output:nif=1:backend_configs='device=&model_type=4' -pix_fmt yuv420p test_out.mp4 +./ffmpeg -i -vf format=rgb24,dnn_processing=dnn_backend=ivsr:model=:input=input:output=output:nif=1:backend_configs='device=&model_type=4' -pix_fmt yuv420p test_out.mp4 ``` diff --git a/ivsr_ffmpeg_plugin/build_docker.sh b/ivsr_ffmpeg_plugin/build_docker.sh index 0c257e7..d69e467 100755 --- a/ivsr_ffmpeg_plugin/build_docker.sh +++ b/ivsr_ffmpeg_plugin/build_docker.sh @@ -1,50 +1,78 @@ #!/bin/sh -# Default value for the ENABLE_OV_PATCH flag -ENABLE_OV_PATCH="true" -OV_VERSION="2022.3" -OV_VERSION_N="ov2022.3" -# Parse the --enable_ov_patch flag and --ov_version +enable_ov_patch="false" + +# Default os_version set to ubuntu22 +os_version="ubuntu22" + +# Extract available OV versions from Dockerfile names and format them with "|" +available_versions=$(ls dockerfiles/${os_version}/ov*.dockerfile 2>/dev/null | grep -oP '(?<=ov)\d+\.\d+[a-z]*' | paste -sd '|') + +# Default OV_VERSION set to the first available version +ov_version=$(echo $available_versions | awk -F '|' '{print $1}') + +# Extract available OS versions from name of dockerfiles folder +available_os=$(ls dockerfiles 2>/dev/null | paste -sd '|') + +# Function to print usage and exit with error +print_usage_and_exit() { + echo "Usage: $0 --enable_ov_patch [true|false] --ov_version [${available_versions}] --os_version [${available_os}]" + exit 1 +} + +# Parse the arguments while [ $# -gt 0 ]; do - case "$1" in - --enable_ov_patch) - shift - value=$(echo $1 | tr '[:upper:]' '[:lower:]') - if [ "$value" = "false" ]; then - ENABLE_OV_PATCH=$value - fi - shift - ;; - --ov_version) - shift - if [ "$1" = "2022.3" ]; then - OV_VERSION=$1 - OV_VERSION_N="ov2022.3" - elif [ "$1" = "2023.2" ]; then - OV_VERSION=$1 - OV_VERSION_N="ov2023.2" - else - echo "Usage: $0 --enable_ov_patch [true|false] --ov_version [2022.3|2023.2]" - exit 1 - fi - shift - ;; - *) - echo "Usage: $0 --enable_ov_patch [true|false] --ov_version [2022.3|2023.2]" - exit 1 - ;; - esac + case "$1" in + --enable_ov_patch) + shift + value=$(echo $1 | tr '[:upper:]' '[:lower:]') + if [ "$value" = "true" ] || [ "$value" = "false" ]; then + enable_ov_patch=$value + else + print_usage_and_exit + fi + shift + ;; + --ov_version) + shift + if echo "$available_versions" | grep -qw "$1"; then + ov_version=$1 + else + print_usage_and_exit + fi + shift + ;; + --os_version) + shift + value=$(echo $1 | tr '[:upper:]' '[:lower]') + if echo "$available_os" | grep -qw "$value"; then + os_version="$value"; + else + print_usage_and_exit + fi + shift + ;; + *) + print_usage_and_exit + ;; + esac done -if [ "$OV_VERSION" = "2023.2" ]; then - ENABLE_OV_PATCH="false" - echo "There is no openvino patches for openvino 2023.2 version, will ignore the setting of ENABLE_OV_PATCH" +# Configure ENABLE_OV_PATCH according to OV version +if [ "$ov_version" = "2022.3" ]; then + echo "Setting ENABLE_OV_PATCH to $enable_ov_patch for version 2022.3." +else + enable_ov_patch="false" + echo "ENABLE_OV_PATCH is not applicable for version $ov_version. Automatically set to false." fi -docker build --build-arg http_proxy=$http_proxy \ - --build-arg https_proxy=$https_proxy \ - --build-arg no_proxy=$no_proxy \ - --build-arg PYTHON=python3.10 \ - --build-arg ENABLE_OV_PATCH=$ENABLE_OV_PATCH \ - --build-arg OV_VERSION=$OV_VERSION \ - -f Dockerfile -t ffmpeg_ivsr_sdk_$OV_VERSION_N \ - ../ + +docker build \ + --build-arg http_proxy=$http_proxy \ + --build-arg https_proxy=$https_proxy \ + --build-arg no_proxy=$no_proxy \ + --build-arg PYTHON=python3.10 \ + --build-arg ENABLE_OV_PATCH=$enable_ov_patch \ + --build-arg OV_VERSION=$ov_version \ + -f ./dockerfiles/$os_version/ov${ov_version}.dockerfile \ + -t ffmpeg_ivsr_sdk_${os_version}_ov${ov_version} \ + ../ diff --git a/ivsr_ffmpeg_plugin/dockerfiles/rockylinux9/Dockerfile b/ivsr_ffmpeg_plugin/dockerfiles/rockylinux9/Dockerfile new file mode 100644 index 0000000..6f9636a --- /dev/null +++ b/ivsr_ffmpeg_plugin/dockerfiles/rockylinux9/Dockerfile @@ -0,0 +1,223 @@ +# SPDX-License-Identifier: BSD 3-Clause License +# +# Copyright (c) 2023, Intel Corporation +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: +# +# * Redistributions of source code must retain the above copyright notice, this +# list of conditions and the following disclaimer. +# +# * Redistributions in binary form must reproduce the above copyright notice, +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# +# * Neither the name of the copyright holder nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +ARG IMAGE=rockylinux@sha256:d7be1c094cc5845ee815d4632fe377514ee6ebcf8efaed6892889657e5ddaaa6 +FROM $IMAGE AS base + +RUN dnf -y update && \ + dnf -y install \ + wget && \ + dnf clean all + +FROM base as build +LABEL vendor="Intel Corporation" + +RUN dnf -y install cmake \ + gcc \ + g++ && \ + dnf clean all + +ARG ENABLE_OV_PATCH +ARG OV_VERSION + +# install opencv +ARG WORKSPACE=/workspace +ARG OPENCV_REPO=https://github.com/opencv/opencv/archive/4.5.3-openvino-2021.4.2.tar.gz +WORKDIR ${WORKSPACE} +RUN wget -qO - ${OPENCV_REPO} | tar xz +WORKDIR ${WORKSPACE}/opencv-4.5.3-openvino-2021.4.2 +RUN mkdir build && mkdir install +WORKDIR ${WORKSPACE}/opencv-4.5.3-openvino-2021.4.2/build +RUN cmake \ + -DCMAKE_BUILD_TYPE=Release \ + -DCMAKE_INSTALL_PREFIX=${WORKSPACE}/opencv-4.5.3-openvino-2021.4.2/install \ + -DCMAKE_INSTALL_LIBDIR=lib \ + -DOPENCV_GENERATE_PKGCONFIG=ON \ + -DBUILD_DOCS=OFF \ + -DBUILD_EXAMPLES=OFF \ + -DBUILD_PERF_TESTS=OFF \ + -DBUILD_TESTS=OFF \ + -DWITH_OPENEXR=OFF \ + -DWITH_OPENJPEG=OFF \ + -DWITH_GSTREAMER=OFF \ + -DWITH_JASPER=OFF \ + .. && \ + make -j16 && \ + make install + +WORKDIR ${WORKSPACE}/opencv-4.5.3-openvino-2021.4.2/install/bin +RUN bash ./setup_vars_opencv4.sh + +ENV LD_LIBRARY_PATH=${WORKSPACE}/opencv-4.5.3-openvino-2021.4.2/install/lib:$LD_LIBRARY_PATH +ENV OpenCV_DIR=${WORKSPACE}/opencv-4.5.3-openvino-2021.4.2/install/lib/cmake/opencv4 + +# install openvino +RUN dnf -y install git \ + python3-devel && \ + dnf clean all +RUN dnf -y --enablerepo=crb install python3-Cython && \ + dnf clean all + +ARG IVSR_DIR=${WORKSPACE}/ivsr +ARG IVSR_OV_DIR=${IVSR_DIR}/ivsr_ov/based_on_openvino_${OV_VERSION}/openvino +ARG CUSTOM_OV_INSTALL_DIR=${IVSR_OV_DIR}/install +ARG IVSR_SDK_DIR=${IVSR_DIR}/ivsr_sdk/ + +ARG OV_REPO=https://github.com/openvinotoolkit/openvino.git +ARG OV_BRANCH=${OV_VERSION}.0 +WORKDIR ${IVSR_OV_DIR} + +RUN git config --global user.email "noname@example.com" && \ + git config --global user.name "no name" + +RUN git clone ${OV_REPO} ${IVSR_OV_DIR} && \ + git checkout ${OV_BRANCH} && \ + git submodule update --init --recursive + +COPY ./ivsr_ov/based_on_openvino_2022.3/patches/*.patch ${IVSR_OV_DIR}/../patches/ +RUN if [ "$ENABLE_OV_PATCH" = "true" ] && [ "$OV_VERSION" = "2022.3" ]; then \ + { set -e; \ + for patch_file in $(find ../patches -iname "*.patch" | sort -n); do \ + echo "Applying: ${patch_file}"; \ + git am --whitespace=fix ${patch_file}; \ + done; }; \ + fi +RUN rm -rf ${IVSR_OV_DIR}/../patches + +WORKDIR ${IVSR_OV_DIR}/build +RUN cmake \ + -DCMAKE_INSTALL_PREFIX=${PWD}/../install \ + -DENABLE_INTEL_CPU=ON \ + -DENABLE_CLDNN=ON \ + -DENABLE_INTEL_GPU=OFF \ + -DENABLE_ONEDNN_FOR_GPU=OFF \ + -DENABLE_INTEL_GNA=OFF \ + -DENABLE_INTEL_MYRIAD_COMMON=OFF \ + -DENABLE_INTEL_MYRIAD=OFF \ + -DENABLE_PYTHON=ON \ + -DENABLE_OPENCV=ON \ + -DENABLE_SAMPLES=ON \ + -DENABLE_CPPLINT=OFF \ + -DTREAT_WARNING_AS_ERROR=OFF \ + -DENABLE_TESTS=OFF \ + -DENABLE_GAPI_TESTS=OFF \ + -DENABLE_BEH_TESTS=OFF \ + -DENABLE_FUNCTIONAL_TESTS=OFF \ + -DENABLE_OV_CORE_UNIT_TESTS=OFF \ + -DENABLE_OV_CORE_BACKEND_UNIT_TESTS=OFF \ + -DENABLE_DEBUG_CAPS=ON \ + -DENABLE_GPU_DEBUG_CAPS=OFF \ + -DENABLE_CPU_DEBUG_CAPS=ON \ + -DCMAKE_BUILD_TYPE=Release \ + .. && \ + make -j16 && \ + make install && \ + bash ${PWD}/../install/setupvars.sh + +ARG CUSTOM_IE_DIR=${CUSTOM_OV_INSTALL_DIR}/runtime +ARG CUSTOM_IE_LIBDIR=${CUSTOM_IE_DIR}/lib/intel64 +ARG CUSTOM_OV=${CUSTOM_IE_DIR} + +ENV OpenVINO_DIR=${CUSTOM_IE_DIR}/cmake +ENV InferenceEngine_DIR=${CUSTOM_IE_DIR}/cmake +ENV TBB_DIR=${CUSTOM_IE_DIR}/3rdparty/tbb/cmake +ENV ngraph_DIR=${CUSTOM_IE_DIR}/cmake +ENV LD_LIBRARY_PATH=${CUSTOM_IE_DIR}/3rdparty/tbb/lib:${CUSTOM_IE_LIBDIR}:$LD_LIBRARY_PATH + +# install ivsr sdk +RUN dnf -y install zlib-devel +COPY ./ivsr_sdk ${IVSR_SDK_DIR} +RUN echo ${IVSR_SDK_DIR} +WORKDIR ${IVSR_SDK_DIR}/build +RUN cmake .. \ + -DENABLE_LOG=OFF -DENABLE_PERF=OFF -DENABLE_THREADPROCESS=ON \ + -DCMAKE_BUILD_TYPE=Release && \ + make -j16 && \ + make install && \ + echo "Building vsr sdk finished." + +#build ffmpeg with iVSR SDK backend +RUN dnf -y --enablerepo=crb install nasm +RUN dnf -y --enablerepo=devel install yasm +RUN dnf -y install diffutils + +# build libx264 +WORKDIR ${WORKSPACE} +RUN git clone https://github.com/mirror/x264 -b stable --depth 1 && \ + cd x264 && \ + ./configure --enable-shared && \ + make -j16 && \ + make install + +# build libx265 +WORKDIR ${WORKSPACE} +ARG LIBX265=https://github.com/videolan/x265/archive/3.4.tar.gz +RUN wget ${LIBX265} && \ + tar xzf ./3.4.tar.gz && \ + rm ./3.4.tar.gz && \ + cd x265-3.4/build/linux && \ + cmake -DBUILD_SHARED_LIBS=ON -DHIGH_BIT_DEPTH=ON ../../source && \ + make -j16 && \ + make install + +ENV PKG_CONFIG_PATH=/usr/local/lib/pkgconfig +ENV LD_LIBRARY_PATH=${IVSR_SDK_DIR}/lib:/usr/local/lib:$LD_LIBRARY_PATH + +ARG FFMPEG_IVSR_SDK_PLUGIN_DIR=${IVSR_DIR}/ivsr_ffmpeg_plugin +ARG FFMPEG_DIR=${FFMPEG_IVSR_SDK_PLUGIN_DIR}/ffmpeg + +ARG FFMPEG_REPO=https://github.com/FFmpeg/FFmpeg.git +ARG FFMPEG_VERSION=n6.1 +WORKDIR ${FFMPEG_DIR} +RUN git clone ${FFMPEG_REPO} ${FFMPEG_DIR} && \ + git checkout ${FFMPEG_VERSION} +COPY ./ivsr_ffmpeg_plugin/patches/*.patch ${FFMPEG_DIR}/ +RUN { set -e; \ + for patch_file in $(find -iname "*.patch" | sort -n); do \ + echo "Applying: ${patch_file}"; \ + git am --whitespace=fix ${patch_file}; \ + done; } + +RUN ./configure \ +--extra-cflags=-fopenmp \ +--extra-ldflags=-fopenmp \ +--enable-libivsr \ +--disable-static \ +--disable-doc \ +--enable-shared \ +--enable-gpl \ +--enable-libx264 \ +--enable-libx265 \ +--enable-version3 && \ +make -j16 && \ +make install + +WORKDIR ${WORKSPACE} +CMD ["/bin/bash"] diff --git a/ivsr_ffmpeg_plugin/dockerfiles/rockylinux9/ov2022.3.dockerfile b/ivsr_ffmpeg_plugin/dockerfiles/rockylinux9/ov2022.3.dockerfile new file mode 120000 index 0000000..1d1fe94 --- /dev/null +++ b/ivsr_ffmpeg_plugin/dockerfiles/rockylinux9/ov2022.3.dockerfile @@ -0,0 +1 @@ +Dockerfile \ No newline at end of file diff --git a/ivsr_ffmpeg_plugin/dockerfiles/rockylinux9/ov2023.2.dockerfile b/ivsr_ffmpeg_plugin/dockerfiles/rockylinux9/ov2023.2.dockerfile new file mode 120000 index 0000000..1d1fe94 --- /dev/null +++ b/ivsr_ffmpeg_plugin/dockerfiles/rockylinux9/ov2023.2.dockerfile @@ -0,0 +1 @@ +Dockerfile \ No newline at end of file diff --git a/ivsr_ffmpeg_plugin/dockerfiles/rockylinux9/ov2024.5.dockerfile b/ivsr_ffmpeg_plugin/dockerfiles/rockylinux9/ov2024.5.dockerfile new file mode 120000 index 0000000..1d1fe94 --- /dev/null +++ b/ivsr_ffmpeg_plugin/dockerfiles/rockylinux9/ov2024.5.dockerfile @@ -0,0 +1 @@ +Dockerfile \ No newline at end of file diff --git a/ivsr_ffmpeg_plugin/dockerfiles/rockylinux9/ov2024.5s.dockerfile b/ivsr_ffmpeg_plugin/dockerfiles/rockylinux9/ov2024.5s.dockerfile new file mode 100644 index 0000000..436c835 --- /dev/null +++ b/ivsr_ffmpeg_plugin/dockerfiles/rockylinux9/ov2024.5s.dockerfile @@ -0,0 +1,137 @@ +# SPDX-License-Identifier: BSD 3-Clause License +# +# Copyright (c) 2023, Intel Corporation +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: +# +# * Redistributions of source code must retain the above copyright notice, this +# list of conditions and the following disclaimer. +# +# * Redistributions in binary form must reproduce the above copyright notice, +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# +# * Neither the name of the copyright holder nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +ARG IMAGE=rockylinux@sha256:d7be1c094cc5845ee815d4632fe377514ee6ebcf8efaed6892889657e5ddaaa6 +FROM $IMAGE AS base + +RUN dnf -y update && \ + dnf -y install \ + wget && \ + dnf clean all + +FROM base as build +LABEL vendor="Intel Corporation" + +RUN dnf -y install cmake \ + gcc \ + git \ + g++ && \ + dnf clean all + +ARG WORKSPACE=/workspace + +# install openvino +RUN tee /tmp/openvino-2024.repo < +Date: Tue, 25 Jun 2024 22:58:36 +0800 +Subject: [PATCH] dnn_ivsr_backend: optimized layout conversion(nchw <-> nhwc) + with openmp. + +Signed-off-by: Liang +--- + libavfilter/dnn/dnn_backend_ivsr.c | 163 ++++++++++------------------- + 1 file changed, 58 insertions(+), 105 deletions(-) + +diff --git a/libavfilter/dnn/dnn_backend_ivsr.c b/libavfilter/dnn/dnn_backend_ivsr.c +index d0f71e976d..80e8f61607 100644 +--- a/libavfilter/dnn/dnn_backend_ivsr.c ++++ b/libavfilter/dnn/dnn_backend_ivsr.c +@@ -37,6 +37,7 @@ + #include "ivsr.h" + #include "dnn_backend_common.h" + #include ++#include + + + #define DNN_MORE_FRAMES FFERRTAG('M','O','R','E') +@@ -126,6 +127,48 @@ static uint8_t clamp(uint8_t val, uint8_t min, uint8_t max) { + return val; + } + ++static void convert_nchw_to_nhwc(void* data, int N, int C, int H, int W) { ++ int data_size = N * C * H * W; ++ void *temp = av_malloc(data_size * sizeof(float)); ++ int max_threads = omp_get_num_procs() / 2; ++ // memory copy ++ #pragma omp parallel for num_threads(max_threads) ++ for (int i = 0; i < data_size; i++) ++ ((float *)temp)[i] = ((float *)data)[i]; ++ ++ // convert buffer from nchw to nhwc and reverse rgb to bgr ++ #pragma omp parallel num_threads(max_threads) ++ { ++ for (int n = 0; n < N; n++) ++ for (int h = omp_get_thread_num(); h < H; h += omp_get_num_threads()) ++ for (int w = 0; w < W; w++) ++ for (int c = 0; c < C; c++) ++ ((float *)data)[n * H * W * C + h * W * C + w * C + c] = ((float *)temp)[n * C * H * W + (C - 1 - c) * H * W + h * W + w]; ++ } ++ av_free(temp); ++} ++ ++static void convert_nhwc_to_nchw(void* data, int N, int C, int H, int W) { ++ int data_size = N * C * H * W; ++ void *temp = av_malloc(data_size * sizeof(float)); ++ int max_threads = omp_get_num_procs() / 2; ++ // memory copy ++ #pragma omp parallel for num_threads(max_threads) ++ for (int i = 0; i < data_size; i++) ++ ((float *)temp)[i] = ((float *)data)[i]; ++ ++ // convert buffer from nhwc to nchw and reverse bgr to rgb ++ #pragma omp parallel num_threads(max_threads) ++ { ++ for (int n = 0; n < N; n++) ++ for (int h = omp_get_thread_num(); h < H; h += omp_get_num_threads()) ++ for (int w = 0; w < W; w++) ++ for (int c = 0; c < C; c++) ++ ((float *)data)[n * C * H * W + c * H * W + h * W + w] = ((float *)temp)[n * H * W * C + h * W * C + w * C + C - 1 - c]; ++ } ++ av_free(temp); ++} ++ + /* returns + * DNN_GENERIC_ERROR, + * DNN_MORE_FRAMES - waiting for more input frames, +@@ -142,7 +185,6 @@ static int fill_model_input_ivsr(IVSRModel * ivsr_model, + TaskItem *task; + AVFrame *tmp_frame = NULL; + void *in_data = NULL; +- void *in_in_packed = NULL; + int dims[5] = { 0, 0, 0, 0, 0 }; + float normalize_factor = ctx->options.normalize_factor; + +@@ -186,15 +228,6 @@ static int fill_model_input_ivsr(IVSRModel * ivsr_model, + input.mean = 0; + input.layout = DL_NONE; + +- if (input.channels != 1) { +- in_in_packed = +- av_malloc(input.height * input.width * input.channels * +- sizeof(float)); +- if (!in_in_packed) +- return AVERROR(ENOMEM); +- } +- +- + for (int i = 0; i < ctx->options.batch_size; ++i) { + //INFO: for TSENET, lltask_queue contains (N-1)th and (N)th frames + //so peek (N)th frame. +@@ -218,25 +251,7 @@ static int fill_model_input_ivsr(IVSRModel * ivsr_model, + ivsr_model->model-> + filter_ctx); + // convert buffer from NHWC to NCHW when C != 1 +- if (input.channels != 1) { +- memcpy((uint8_t *) in_in_packed, +- (uint8_t *) input.data, +- input.height * input.width * +- input.channels * sizeof(float)); +- for (int pos = 0; +- pos < input.height * input.width; pos++) { +- for (int ch = 0; ch < input.channels; ch++) { +- ((float *) +- input.data)[(ch * input.height * +- input.width + pos)] = +- ((float *) +- in_in_packed)[(pos * +- input.channels + +- (input.channels - +- 1 - ch))]; +- } +- } +- } ++ convert_nhwc_to_nchw(input.data, 1, input.channels, input.height, input.width); + input.data += + input.height * input.width * + input.channels * sizeof(float); +@@ -252,7 +267,6 @@ static int fill_model_input_ivsr(IVSRModel * ivsr_model, + //1. copy the input_frame(ref the buffer) and put into ivsr_model->fame_queue + tmp_frame = av_frame_alloc(); + if(av_frame_ref(tmp_frame, task->in_frame) < 0) { +- if(in_in_packed) av_free(in_in_packed); + return AVERROR(ENOMEM); + } + +@@ -262,7 +276,6 @@ static int fill_model_input_ivsr(IVSRModel * ivsr_model, + //For the first pic in the stream + tmp_frame = av_frame_alloc(); + if(av_frame_ref(tmp_frame, task->in_frame) < 0) { +- if(in_in_packed) av_free(in_in_packed); + return AVERROR(ENOMEM); + } + av_fifo_write(ivsr_model->frame_queue, &tmp_frame, 1); +@@ -277,15 +290,7 @@ static int fill_model_input_ivsr(IVSRModel * ivsr_model, + for (int idx = 0; idx < ivsr_model->nif; idx++) { + //INFO: the 3 frames in frame_queue are: (N-2)th, (N-1)th, (N)th + ff_proc_from_frame_to_dnn(input_frames[idx], &input, ivsr_model->model->filter_ctx); +- //convert to NCHW layout +- memcpy((uint8_t *)in_in_packed, (uint8_t *)input.data, +- input.height * input.width * input.channels * sizeof(float)); +- for (int pos = 0; pos < input.height * input.width; pos++) { +- for (int ch = 0; ch < input.channels; ch++) { +- ((float *)input.data)[(ch * input.height * input.width + pos)] = +- ((float *)in_in_packed)[(pos * input.channels + (input.channels - 1 - ch))]; +- } +- } ++ convert_nhwc_to_nchw(input.data, 1, input.channels, input.height, input.width); + input.data += input.height * input.width * input.channels * sizeof(float); + } + input.data = in_data; +@@ -295,7 +300,6 @@ static int fill_model_input_ivsr(IVSRModel * ivsr_model, + av_frame_free(&tmp_frame); + // INFO: for the last frame, peek_back and pop_front get the same frame, so don't have to handle EOS specifically + } else { +- if(in_in_packed) av_free(in_in_packed); + return DNN_MORE_FRAMES; + } + } else { +@@ -307,23 +311,13 @@ static int fill_model_input_ivsr(IVSRModel * ivsr_model, + ivsr_model->model-> + filter_ctx); + if (input.channels != 1) { +- // convert buffer from NHWC to NCHW and multiply normalize_factor +- memcpy((uint8_t*)in_in_packed, +- (uint8_t*)input.data, +- input.height * input.width * input.channels * sizeof(float)); +- for (int pos = 0; pos < input.height * input.width; pos++) { +- for (int ch = 0; ch < input.channels; ch++) { +- ((float*)input.data)[(ch * input.height * input.width + pos)] = +- ((float*)in_in_packed)[(pos * input.channels + (input.channels - 1 - ch))] * normalize_factor; +- } +- } +- } else if (normalize_factor != 1) { ++ convert_nhwc_to_nchw(input.data, 1, input.channels, input.height, input.width); ++ } ++ if (normalize_factor != 1) { + // do not need to covert buffer from NHWC to NCHW if the channels is 1, only need to mulitple normalize_factor +- for (int pos = 0; pos < input.height * input.width; pos++) { +- for (int ch = 0; ch < input.channels; ch++) { +- ((float*)input.data)[(ch * input.height * input.width + pos)] = +- ((float*)input.data)[ch * input.height * input.width + pos] * normalize_factor; +- } ++ #pragma omp parallel for ++ for (int pos = 0; pos < input.height * input.width * input.channels; pos++) { ++ ((float*)input.data)[pos] = ((float*)input.data)[pos] * normalize_factor; + } + } + } +@@ -339,8 +333,6 @@ static int fill_model_input_ivsr(IVSRModel * ivsr_model, + input.width * input.height * input.channels * + get_datatype_size(input.dt); + } +- if (in_in_packed) +- av_free(in_in_packed); + return 0; + } + +@@ -355,7 +347,6 @@ static void infer_completion_callback(void *args) + DNNData output; + IVSRContext *ctx = &ivsr_model->ctx; + AVFrame *tmp_frame = NULL; +- void *out_in_planar = NULL; + int offset = 0; + int dims[5] = { 0, 0, 0, 0, 0 }; + float normalize_factor = ctx->options.normalize_factor; +@@ -390,18 +381,6 @@ static void infer_completion_callback(void *args) + output.scale = 0; + output.mean = 0; + output.layout = DL_NONE; +- if (output.channels != 1) { +- out_in_planar = +- av_malloc(output.height * output.width * output.channels * +- sizeof(float)); +- if (!out_in_planar) { +- av_log(ctx, AV_LOG_ERROR, +- "Failed to allocate array with %ld bytes!\n", +- output.height * output.width * output.channels * +- sizeof(float)); +- return; +- } +- } + + av_assert0(request->lltask_count <= dims[0]); + av_assert0(request->lltask_count >= 1); +@@ -423,21 +402,7 @@ static void infer_completion_callback(void *args) + offset); + if (ret == 0) { + if (output.channels != 1) { +- memcpy((uint8_t *) out_in_planar, +- (uint8_t *) output.data, +- output.height * output.width * +- output.channels * sizeof(float)); +- for (int pos = 0; +- pos < output.height * output.width; +- pos++) { +- for (int ch = 0; ch < output.channels; +- ch++) { +- ((float *) +- output.data)[(pos * output.channels + +- ch)] = ((float *) +- out_in_planar)[((output.channels - 1 - ch) * output.height * output.width + pos)]; +- } +- } ++ convert_nchw_to_nhwc(output.data, 1, output.channels, output.height, output.width); + } + ff_proc_from_dnn_to_frame(tmp_frame, &output, + &ivsr_model->model-> +@@ -460,23 +425,13 @@ static void infer_completion_callback(void *args) + } else { + if (output.channels != 1) { + //convert buffer from NCHW to NHWC +- memcpy((uint8_t*)out_in_planar, +- (uint8_t*)output.data, +- output.height * output.width * output.channels * sizeof(float)); +- for (int pos = 0; pos < output.height * output.width; pos++) { +- for (int ch = 0; ch < output.channels; ch++) { +- ((float*)output.data)[(pos * output.channels + ch)] = +- ((float*) +- out_in_planar)[((output.channels - 1 - ch) * output.height * output.width + pos)] / normalize_factor; +- } +- } +- } else if (normalize_factor != 1) { ++ convert_nchw_to_nhwc(output.data, 1, output.channels, output.height, output.width); ++ } ++ if (normalize_factor != 1) { ++ #pragma omp parallel for + // only need to devide by normalize_factor for channels = 1. +- for (int pos = 0; pos < output.height * output.width; pos++) { +- for (int ch = 0; ch < output.channels; ch++) { +- ((float*)output.data)[(pos * output.channels + ch)] = +- ((float*)output.data)[pos * output.channels + ch] / normalize_factor; +- } ++ for (int pos = 0; pos < output.height * output.width * output.channels; pos++) { ++ ((float*)output.data)[pos] = ((float*)output.data)[pos] / normalize_factor; + } + } + ff_proc_from_dnn_to_frame(task->out_frame, &output, +@@ -504,8 +459,6 @@ static void infer_completion_callback(void *args) + output.width * output.height * output.channels * + get_datatype_size(output.dt); + } +- if (out_in_planar) +- av_free(out_in_planar); + + request->lltask_count = 0; + if (ff_safe_queue_push_back(requestq, request) < 0) { +-- +2.34.1 + diff --git a/ivsr_ffmpeg_plugin/patches/0020-dnn_ivsr_backend-process-non-8-aligned-resolution-to.patch b/ivsr_ffmpeg_plugin/patches/0020-dnn_ivsr_backend-process-non-8-aligned-resolution-to.patch new file mode 100644 index 0000000..535fdd4 --- /dev/null +++ b/ivsr_ffmpeg_plugin/patches/0020-dnn_ivsr_backend-process-non-8-aligned-resolution-to.patch @@ -0,0 +1,218 @@ +From 2990f5107f2e531d3ac1a927f6d3551529868958 Mon Sep 17 00:00:00 2001 +From: Xiaoxia Liang +Date: Fri, 26 Jul 2024 18:57:53 +0000 +Subject: [PATCH] dnn_ivsr_backend: process non-8 aligned resolution to make + the video processing model can run at any resoultion. + +Padding DNN data buffer to 8 aligned and then do video processing and +then crop to resolution same as input. + +Signed-off-by: Xiaoxia Liang +--- + libavfilter/dnn/dnn_backend_ivsr.c | 52 +++++++++++++++++++++++++++++- + libavfilter/dnn/dnn_io_proc.c | 8 ++--- + libavfilter/vf_dnn_processing.c | 20 ++++++------ + 3 files changed, 65 insertions(+), 15 deletions(-) + +diff --git a/libavfilter/dnn/dnn_backend_ivsr.c b/libavfilter/dnn/dnn_backend_ivsr.c +index 189705d309..6f2f5f0f07 100644 +--- a/libavfilter/dnn/dnn_backend_ivsr.c ++++ b/libavfilter/dnn/dnn_backend_ivsr.c +@@ -57,6 +57,10 @@ typedef struct IVSROptions { + typedef struct IVSRContext { + const AVClass *class; + IVSROptions options; ++ uint32_t frame_input_height; ++ uint32_t frame_input_width; ++ uint32_t model_input_height; ++ uint32_t model_input_width; + } IVSRContext; + + typedef enum { +@@ -105,6 +109,8 @@ static const AVOption dnn_ivsr_options[] = { + + AVFILTER_DEFINE_CLASS(dnn_ivsr); + ++#define ALIGNED_SIZE 8 ++ + static int get_datatype_size(DNNDataType dt) + { + switch (dt) { +@@ -169,6 +175,20 @@ static void convert_nhwc_to_nchw(void* data, int N, int C, int H, int W) { + av_free(temp); + } + ++/** ++ * set value for padding right and bottom. ++ */ ++static void set_padding_value(void* data, uint32_t width, uint32_t height, uint32_t padding_width, uint32_t padding_height, int padding_value) { ++ int n_width = width + padding_width; ++ for (int h = 0; h < height; ++h) { ++ int index = h * (n_width) + width; ++ memset(data + index, padding_value, padding_width); ++ } ++ ++ int index = height * n_width; ++ memset(data + index, padding_value, padding_height * n_width); ++} ++ + /* returns + * DNN_GENERIC_ERROR, + * DNN_MORE_FRAMES - waiting for more input frames, +@@ -187,6 +207,7 @@ static int fill_model_input_ivsr(IVSRModel * ivsr_model, + void *in_data = NULL; + int dims[5] = { 0, 0, 0, 0, 0 }; + float normalize_factor = ctx->options.normalize_factor; ++ int padding_height = 0, padding_width = 0; + + status = ivsr_get_attr(ivsr_model->handle, INPUT_TENSOR_DESC, dims); + if (status != OK) { +@@ -227,7 +248,11 @@ static int fill_model_input_ivsr(IVSRModel * ivsr_model, + input.scale = 0; + input.mean = 0; + input.layout = DL_NONE; ++ ctx->model_input_height = input.height; ++ ctx->model_input_width = input.width; + ++ padding_height = ctx->model_input_height - ctx->frame_input_height; ++ padding_width = ctx->model_input_width - ctx->frame_input_width; + for (int i = 0; i < ctx->options.batch_size; ++i) { + //INFO: for TSENET, lltask_queue contains (N-1)th and (N)th frames + //so peek (N)th frame. +@@ -242,6 +267,18 @@ static int fill_model_input_ivsr(IVSRModel * ivsr_model, + ivsr_model->model-> + filter_ctx); + } else { ++ // reset bottom and right to 0 when size of input frame < model required. ++ if (padding_height > 0 || padding_width > 0) { ++ uint32_t padding_width_bytes = (padding_width) * input.channels * get_datatype_size(input.dt); ++ for (int i = 0; i < ivsr_model->nif; ++i) { ++ set_padding_value(input.data, ctx->frame_input_width * input.channels * get_datatype_size(input.dt), ctx->frame_input_height, ++ padding_width_bytes, padding_height, 0); ++ input.data += ++ input.height * input.width * ++ input.channels * get_datatype_size(input.dt); ++ } ++ input.data = in_data; ++ } + if (ivsr_model->model_type == BASICVSR && dims[2] != 1) { + int read_frame_num = 0; + for (int j = 0; j < dims[2]; j++) { +@@ -602,8 +639,11 @@ static int get_output_ivsr(void *model, const char *input_name, + } + + switch (ivsr_model->model_type) { +- case BASICVSR: + case VIDEOPROC: ++ *output_height = input_height; ++ *output_width = input_width; ++ break; ++ case BASICVSR: + case EDSR: + case CUSTVSR: + case TSENET: +@@ -707,6 +747,8 @@ DNNModel *ff_dnn_load_model_ivsr(const char *model_filename, + AVFilterLink *inlink = filter_ctx->inputs[0]; + int frame_h = inlink->h; + int frame_w = inlink->w; ++ ctx->frame_input_height = inlink->h; ++ ctx->frame_input_width = inlink->w; + + // input_res setting + config_input_res = av_mallocz(sizeof(ivsr_config_t)); +@@ -735,6 +777,11 @@ DNNModel *ff_dnn_load_model_ivsr(const char *model_filename, + sprintf(shape_string, "1,3,3,%d,%d", frame_h, frame_w); + break; + case VIDEOPROC: ++ // the input resoultion required 8-aligned ++ frame_h = (frame_h + ALIGNED_SIZE - 1) / ALIGNED_SIZE * ALIGNED_SIZE; ++ frame_w = (frame_w + ALIGNED_SIZE - 1) / ALIGNED_SIZE * ALIGNED_SIZE; ++ sprintf(shape_string, "1,3,%d,%d", frame_h, frame_w); ++ break; + case EDSR: + sprintf(shape_string, "1,3,%d,%d", frame_h, frame_w); + break; +@@ -834,6 +881,9 @@ DNNModel *ff_dnn_load_model_ivsr(const char *model_filename, + item->in_frames = + av_malloc(input_dims[0] * input_dims[1] * input_dims[2] * + input_dims[3] * input_dims[4] * sizeof(float)); ++ ++ int input_byte_size = input_dims[0] * input_dims[1] * input_dims[2] * input_dims[3] * input_dims[4] * sizeof(float); ++ memset(item->in_frames, 0, input_byte_size); + if (!item->in_frames) { + av_log(ctx, AV_LOG_ERROR, "Failed to malloc in frames\n"); + goto err; +diff --git a/libavfilter/dnn/dnn_io_proc.c b/libavfilter/dnn/dnn_io_proc.c +index ab656e8ed7..1465d32c32 100644 +--- a/libavfilter/dnn/dnn_io_proc.c ++++ b/libavfilter/dnn/dnn_io_proc.c +@@ -98,7 +98,7 @@ int ff_proc_from_dnn_to_frame(AVFrame *frame, DNNData *output, void *log_ctx) + goto err; + } + sws_scale(sws_ctx, (const uint8_t *[4]){(const uint8_t *)output->data, 0, 0, 0}, +- (const int[4]){frame->width * 3 * src_datatype_size, 0, 0, 0}, 0, frame->height, ++ (const int[4]){output->width * 3 * src_datatype_size, 0, 0, 0}, 0, frame->height, + (uint8_t * const*)dst_data, linesize); + sws_freeContext(sws_ctx); + // convert data from planar to packed +@@ -163,7 +163,7 @@ int ff_proc_from_dnn_to_frame(AVFrame *frame, DNNData *output, void *log_ctx) + goto err; + } + sws_scale(sws_ctx, (const uint8_t *[4]){(const uint8_t *)output->data, 0, 0, 0}, +- (const int[4]){frame->width * src_datatype_size, 0, 0, 0}, 0, frame->height, ++ (const int[4]){output->width * src_datatype_size, 0, 0, 0}, 0, frame->height, + (uint8_t * const*)frame->data, frame->linesize); + sws_freeContext(sws_ctx); + break; +@@ -272,7 +272,7 @@ int ff_proc_from_frame_to_dnn(AVFrame *frame, DNNData *input, void *log_ctx) + sws_scale(sws_ctx, (const uint8_t **)src_data, + linesize, 0, frame->height, + (uint8_t * const [4]){input->data, 0, 0, 0}, +- (const int [4]){frame->width * 3 * dst_datatype_size, 0, 0, 0}); ++ (const int [4]){input->width * 3 * dst_datatype_size, 0, 0, 0}); + sws_freeContext(sws_ctx); + break; + case AV_PIX_FMT_GRAYF32: +@@ -305,7 +305,7 @@ int ff_proc_from_frame_to_dnn(AVFrame *frame, DNNData *input, void *log_ctx) + sws_scale(sws_ctx, (const uint8_t **)frame->data, + frame->linesize, 0, frame->height, + (uint8_t * const [4]){input->data, 0, 0, 0}, +- (const int [4]){frame->width * dst_datatype_size, 0, 0, 0}); ++ (const int [4]){input->width * dst_datatype_size, 0, 0, 0}); + sws_freeContext(sws_ctx); + break; + default: +diff --git a/libavfilter/vf_dnn_processing.c b/libavfilter/vf_dnn_processing.c +index 5208a72b6f..2b7656f21a 100644 +--- a/libavfilter/vf_dnn_processing.c ++++ b/libavfilter/vf_dnn_processing.c +@@ -104,16 +104,16 @@ static int check_modelinput_inlink(const DNNData *model_input, const AVFilterLin + enum AVPixelFormat fmt = inlink->format; + + // the design is to add explicit scale filter before this filter +- if (model_input->height != -1 && model_input->height != inlink->h) { +- av_log(ctx, AV_LOG_ERROR, "the model requires frame height %d but got %d\n", +- model_input->height, inlink->h); +- return AVERROR(EIO); +- } +- if (model_input->width != -1 && model_input->width != inlink->w) { +- av_log(ctx, AV_LOG_ERROR, "the model requires frame width %d but got %d\n", +- model_input->width, inlink->w); +- return AVERROR(EIO); +- } ++ // if (model_input->height != -1 && model_input->height != inlink->h) { ++ // av_log(ctx, AV_LOG_ERROR, "the model requires frame height %d but got %d\n", ++ // model_input->height, inlink->h); ++ // return AVERROR(EIO); ++ // } ++ // if (model_input->width != -1 && model_input->width != inlink->w) { ++ // av_log(ctx, AV_LOG_ERROR, "the model requires frame width %d but got %d\n", ++ // model_input->width, inlink->w); ++ // return AVERROR(EIO); ++ // } + if (model_input->dt != DNN_FLOAT) { + avpriv_report_missing_feature(ctx, "data type rather than DNN_FLOAT"); + return AVERROR(EIO); +-- +2.34.1 + diff --git a/ivsr_ffmpeg_plugin/patches/0021-Enable-async-request-infer-and-refine-the-config-set.patch b/ivsr_ffmpeg_plugin/patches/0021-Enable-async-request-infer-and-refine-the-config-set.patch new file mode 100644 index 0000000..4063b0e --- /dev/null +++ b/ivsr_ffmpeg_plugin/patches/0021-Enable-async-request-infer-and-refine-the-config-set.patch @@ -0,0 +1,227 @@ +From d50841f2a463d0d4c6ec03c6ddbb7327f0f8a00a Mon Sep 17 00:00:00 2001 +From: LinXie +Date: Fri, 23 Aug 2024 15:08:26 +0000 +Subject: [PATCH] Enable async request infer and refine the config setting + +--- + libavfilter/dnn/dnn_backend_ivsr.c | 123 +++++++++++------------------ + 1 file changed, 48 insertions(+), 75 deletions(-) + +diff --git a/libavfilter/dnn/dnn_backend_ivsr.c b/libavfilter/dnn/dnn_backend_ivsr.c +index 6f2f5f0f07..1f7dfff743 100644 +--- a/libavfilter/dnn/dnn_backend_ivsr.c ++++ b/libavfilter/dnn/dnn_backend_ivsr.c +@@ -423,7 +423,6 @@ static void infer_completion_callback(void *args) + av_assert0(request->lltask_count >= 1); + for (int i = 0; i < request->lltask_count; ++i) { + task = request->lltasks[i]->task; +- task->inference_done++; + + if (task->do_ioproc) { + if (ivsr_model->model->frame_post_proc != NULL) { +@@ -490,6 +489,7 @@ static void infer_completion_callback(void *args) + task->out_frame->height = output.height; + } + ++ task->inference_done++; + av_freep(&request->lltasks[i]); + output.data = + (uint8_t *) output.data + +@@ -599,8 +599,8 @@ static int execute_model_ivsr(IVSRRequestItem * request, + goto err; + } + status = +- ivsr_process(ivsr_model->handle, request->in_frames, +- request->out_frames, &request->cb); ++ ivsr_process_async(ivsr_model->handle, request->in_frames, ++ request->out_frames, &request->cb); + if (status != OK) { + av_log(ctx, AV_LOG_ERROR, + "Failed to process the inference on input data seq\n"); +@@ -658,6 +658,22 @@ static int get_output_ivsr(void *model, const char *input_name, + return ret; + } + ++// Utility function to create and link config ++static ivsr_config_t* create_and_link_config(ivsr_config_t *previous, ++ int key, char *value, void *ctx) { ++ ivsr_config_t *config = av_mallocz(sizeof(ivsr_config_t)); ++ if (config == NULL) { ++ av_log(ctx, AV_LOG_ERROR, "Failed to malloc config\n"); ++ return NULL; ++ } ++ config->key = key; ++ config->value = value; ++ if (previous != NULL) { ++ previous->next = config; ++ } ++ return config; ++} ++ + DNNModel *ff_dnn_load_model_ivsr(const char *model_filename, + DNNFunctionType func_type, + const char *options, +@@ -667,12 +683,12 @@ DNNModel *ff_dnn_load_model_ivsr(const char *model_filename, + IVSRModel *ivsr_model = NULL; + IVSRContext *ctx = NULL; + IVSRStatus status; +- ivsr_config_t *config = NULL; + ivsr_config_t *config_device = NULL; + ivsr_config_t *config_customlib = NULL; + ivsr_config_t *config_cldnn = NULL; + ivsr_config_t *config_reshape = NULL; + ivsr_config_t *config_input_res = NULL; ++ ivsr_config_t *config_nireq = NULL; + int nif = 0; + int input_dims[5] = { 0, 0, 0, 0, 1 }; + int output_dims[5] = { 0, 0, 0, 0, 1 }; +@@ -714,35 +730,19 @@ DNNModel *ff_dnn_load_model_ivsr(const char *model_filename, + // the default value is a rough estimation + ctx->options.nireq = av_cpu_count() / 2 + 1; + } +- //TODO: override the 2 values before async mode in iVSR SDK is supported +- //"async == 1/TRUE" is misleading as it's actually not supported by SDK +- ctx->options.nireq = 1; +- ctx->options.async = 1; + + ivsr_model->model_type = ctx->options.model_type; + + // set ivsr config + // input model +- ivsr_model->config = av_mallocz(sizeof(ivsr_config_t)); +- config = ivsr_model->config; +- if (config == NULL) { +- av_log(ctx, AV_LOG_ERROR, "Failed to malloc config\n"); ++ ivsr_model->config = create_and_link_config(NULL, INPUT_MODEL, model_filename, ctx); ++ if (ivsr_model->config == NULL) + goto err; +- } +- config->key = INPUT_MODEL; +- config->value = model_filename; +- config->next = NULL; +- +- // target device +- config_device = av_mallocz(sizeof(ivsr_config_t)); +- if (config_device == NULL) { +- av_log(ctx, AV_LOG_ERROR, "Failed to malloc device config\n"); ++ ++ config_device = create_and_link_config(ivsr_model->config, TARGET_DEVICE, ++ ctx->options.device_type, ctx); ++ if (config_device == NULL) + goto err; +- } +- config_device->key = TARGET_DEVICE; +- config_device->value = ctx->options.device_type; +- config_device->next = NULL; +- config->next = config_device; + + AVFilterLink *inlink = filter_ctx->inputs[0]; + int frame_h = inlink->h; +@@ -751,27 +751,19 @@ DNNModel *ff_dnn_load_model_ivsr(const char *model_filename, + ctx->frame_input_width = inlink->w; + + // input_res setting +- config_input_res = av_mallocz(sizeof(ivsr_config_t)); +- if (config_input_res == NULL) { +- av_log(ctx, AV_LOG_ERROR, "Failed to malloc input_res config\n"); +- goto err; +- } +- + char input_res_string[40] = {0}; +- sprintf(input_res_string, "%d,%d\0", frame_w, frame_h); +- config_input_res->key = INPUT_RES; +- config_input_res->value = input_res_string; +- config_input_res->next = NULL; +- config_device->next = config_input_res; +- +- // reshape setting +- config_reshape = av_mallocz(sizeof(ivsr_config_t)); +- if (config_reshape == NULL) { +- av_log(ctx, AV_LOG_ERROR, "Failed to malloc reshape config\n"); ++ sprintf(input_res_string, "%d,%d", frame_w, frame_h); ++ config_input_res = create_and_link_config(config_device, INPUT_RES, input_res_string, ctx); ++ if (config_input_res == NULL) ++ goto err; ++ ++ char nireq_string[40] = {0}; ++ sprintf(nireq_string, "%d", ctx->options.nireq); ++ config_nireq = create_and_link_config(config_input_res, INFER_REQ_NUMBER, nireq_string, ctx); ++ if (config_nireq == NULL) + goto err; +- } + +- char shape_string[40]; ++ char shape_string[40] = {0}; + switch (ivsr_model->model_type) { + case BASICVSR: + sprintf(shape_string, "1,3,3,%d,%d", frame_h, frame_w); +@@ -795,45 +787,26 @@ DNNModel *ff_dnn_load_model_ivsr(const char *model_filename, + av_log(ctx, AV_LOG_ERROR, "Not supported model type\n"); + return DNN_GENERIC_ERROR; + } +- config_reshape->key = RESHAPE_SETTINGS; +- // by default it sets the same resolution as input res config. +- // If you want to enable smart-patch, set a smaller shape than the input. +- config_reshape->value = shape_string; +- config_reshape->next = NULL; +- config_input_res->next = config_reshape; ++ config_reshape = create_and_link_config(config_nireq, RESHAPE_SETTINGS, shape_string, ctx); ++ if (config_reshape == NULL) ++ goto err; + + if (ctx->options.extension != NULL) { +- // extension +- config_customlib = av_mallocz(sizeof(ivsr_config_t)); +- if (config_customlib == NULL) { +- av_log(ctx, AV_LOG_ERROR, "Failed to malloc customlib config\n"); ++ config_customlib = create_and_link_config(config_reshape, CUSTOM_LIB, ctx->options.extension, ctx); ++ if (config_customlib == NULL) + goto err; +- } +- config_customlib->key = CUSTOM_LIB; +- config_customlib->value = ctx->options.extension; +- config_customlib->next = NULL; +- config_reshape->next = config_customlib; + } + + if (ctx->options.op_xml != NULL) { +- // cldnn +- config_cldnn = av_mallocz(sizeof(ivsr_config_t)); +- if (config_cldnn == NULL) { +- av_log(ctx, AV_LOG_ERROR, "Failed to malloc cldnn config\n"); ++ config_cldnn = create_and_link_config(ctx->options.extension != NULL ? ++ config_customlib : config_reshape, ++ CLDNN_CONFIG, ctx->options.op_xml, ctx); ++ if (config_cldnn == NULL) + goto err; +- } +- config_cldnn->key = CLDNN_CONFIG; +- config_cldnn->value = ctx->options.op_xml; +- config_cldnn->next = NULL; +- if (config_customlib != NULL) { +- config_customlib->next = config_cldnn; +- } else { +- config_reshape->next = config_cldnn; +- } + } + + // initialize ivsr +- status = ivsr_init(config, &ivsr_model->handle); ++ status = ivsr_init(ivsr_model->config, &ivsr_model->handle); + if (status != OK) { + av_log(ctx, AV_LOG_ERROR, "Failed to initialize ivsr engine\n"); + goto err; +@@ -1052,8 +1025,8 @@ int ff_dnn_flush_ivsr(const DNNModel * model) + } + + status = +- ivsr_process(ivsr_model->handle, request->in_frames, +- request->out_frames, &request->cb); ++ ivsr_process_async(ivsr_model->handle, request->in_frames, ++ request->out_frames, &request->cb); + if (status != OK) { + av_log(ctx, AV_LOG_ERROR, + "Failed to process the inference on input data seq\n"); +-- +2.34.1 + diff --git a/ivsr_ffmpeg_plugin/patches/0022-enable-10bit-for-YUV-and-16bit-for-RGB-support.patch b/ivsr_ffmpeg_plugin/patches/0022-enable-10bit-for-YUV-and-16bit-for-RGB-support.patch new file mode 100644 index 0000000..6764dd2 --- /dev/null +++ b/ivsr_ffmpeg_plugin/patches/0022-enable-10bit-for-YUV-and-16bit-for-RGB-support.patch @@ -0,0 +1,566 @@ +From 45367468d592018fa13fc62cdd4478cbecaebaee Mon Sep 17 00:00:00 2001 +From: Xueshu Wang +Date: Tue, 27 Aug 2024 18:11:35 +0800 +Subject: [PATCH] enable 10bit(for YUV) and 16bit(for RGB) support. + +--- + libavfilter/dnn/dnn_backend_ivsr.c | 36 ++++++--- + libavfilter/dnn/dnn_io_proc.c | 125 ++++++++++++++++++----------- + libavfilter/dnn_interface.h | 2 +- + libavfilter/vf_dnn_processing.c | 14 +++- + libswscale/swscale_unscaled.c | 110 +++++++++++++++++++++++++ + 5 files changed, 229 insertions(+), 58 deletions(-) + +diff --git a/libavfilter/dnn/dnn_backend_ivsr.c b/libavfilter/dnn/dnn_backend_ivsr.c +index 9bee7d1277..550e64915a 100644 +--- a/libavfilter/dnn/dnn_backend_ivsr.c ++++ b/libavfilter/dnn/dnn_backend_ivsr.c +@@ -103,7 +103,7 @@ static const AVOption dnn_ivsr_options[] = { + { "extension", "extension lib file full path, usable for BasicVSR model", OFFSET(options.extension), AV_OPT_TYPE_STRING, { .str = NULL }, 0, 0, FLAGS}, + { "op_xml", "custom op xml file full path, usable for BasicVSR model", OFFSET(options.op_xml), AV_OPT_TYPE_STRING, { .str = NULL }, 0, 0, FLAGS}, + { "model_type", "dnn model type", OFFSET(options.model_type), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, MODEL_TYPE_NUM - 1, FLAGS}, +- { "normalize_factor", "normalization factor", OFFSET(options.normalize_factor), AV_OPT_TYPE_FLOAT, { .dbl = 1.0 }, 1.0, 255.0, FLAGS}, ++ { "normalize_factor", "normalization factor", OFFSET(options.normalize_factor), AV_OPT_TYPE_FLOAT, { .dbl = 1.0 }, 1.0, 65535.0, FLAGS}, + { NULL } + }; + +@@ -118,13 +118,15 @@ static int get_datatype_size(DNNDataType dt) + return sizeof(float); + case DNN_UINT8: + return sizeof(uint8_t); ++ case DNN_UINT16: ++ return sizeof(uint16_t); + default: + av_assert0(!"not supported yet."); + return 1; + } + } + +-static uint8_t clamp(uint8_t val, uint8_t min, uint8_t max) { ++static int clamp(int val, int min, int max) { + if (val < min) + return min; + else if (val > max) +@@ -418,7 +420,9 @@ static void infer_completion_callback(void *args) + output.scale = 0; + output.mean = 0; + output.layout = DL_NONE; +- ++ const AVPixFmtDescriptor* pix_desc = av_pix_fmt_desc_get(task->out_frame->format); ++ const AVComponentDescriptor* comp_desc = &pix_desc->comp[0]; ++ int bits = comp_desc->depth; + av_assert0(request->lltask_count <= dims[0]); + av_assert0(request->lltask_count >= 1); + for (int i = 0; i < request->lltask_count; ++i) { +@@ -450,7 +454,7 @@ static void infer_completion_callback(void *args) + uint8_t min_x = 16, max_x = 235; + for (int index = 0; index < tmp_frame->height * tmp_frame->linesize[0]; ++index) { + uint8_t value = tmp_frame->data[0][index]; +- tmp_frame->data[0][index] = clamp(tmp_frame->data[0][index], min_x, max_x); ++ tmp_frame->data[0][index] = (uint8_t)clamp(tmp_frame->data[0][index], min_x, max_x); + } + } + output.data += +@@ -476,11 +480,25 @@ static void infer_completion_callback(void *args) + filter_ctx); + // clamp output to [16, 235] range for Y plane when color range of output is TV range, + // assume model only process Y plane when output.channels = 1. AVCOL_RANGE_MPEG is mean tv range. +- if (task->out_frame->color_range == AVCOL_RANGE_MPEG && output.channels == 1) { +- uint8_t min_x = 16, max_x = 235; +- for (int index = 0; index < task->out_frame->height * task->out_frame->linesize[0]; ++index) { +- uint8_t value = task->out_frame->data[0][index]; +- task->out_frame->data[0][index] = clamp(task->out_frame->data[0][index], min_x, max_x); ++ if (task->out_frame->color_range == AVCOL_RANGE_MPEG && output.channels == 1) { ++ if (bits == 8) { ++ uint8_t min_x = 16, max_x = 235; ++ for (int index = 0; index < task->out_frame->height * task->out_frame->linesize[0]; ++ ++index) { ++ uint8_t value = task->out_frame->data[0][index]; ++ task->out_frame->data[0][index] = (uint8_t)clamp(task->out_frame->data[0][index], ++ min_x, max_x); ++ } ++ } else if (bits == 10) { ++ uint16_t min_x = 64, max_x = 940; ++ uint16_t* dstPtr = (uint16_t*)task->out_frame->data[0]; ++ ptrdiff_t dstStrideUint16 = task->out_frame->linesize[0] >> 1; ++ for (int y = 0; y < task->out_frame->height; ++y) { ++ for (int x = 0; x < task->out_frame->width; ++x) { ++ dstPtr[x] = (uint16_t)clamp(dstPtr[x], min_x, max_x); ++ } ++ dstPtr += dstStrideUint16; ++ } + } + } + } +diff --git a/libavfilter/dnn/dnn_io_proc.c b/libavfilter/dnn/dnn_io_proc.c +index 1465d32c32..f51c0669a9 100644 +--- a/libavfilter/dnn/dnn_io_proc.c ++++ b/libavfilter/dnn/dnn_io_proc.c +@@ -32,6 +32,8 @@ static int get_datatype_size(DNNDataType dt) + return sizeof(float); + case DNN_UINT8: + return sizeof(uint8_t); ++ case DNN_UINT16: ++ return sizeof(uint16_t); + default: + av_assert0(!"not supported yet."); + return 1; +@@ -46,10 +48,15 @@ int ff_proc_from_dnn_to_frame(AVFrame *frame, DNNData *output, void *log_ctx) + void **dst_data = NULL; + void *middle_data = NULL; + uint8_t *planar_data[4] = { 0 }; +- int plane_size = frame->width * frame->height * sizeof(uint8_t); ++ int plane_size = 0; + enum AVPixelFormat src_fmt = AV_PIX_FMT_NONE; ++ enum AVPixelFormat dst_fmt = AV_PIX_FMT_NONE; ++ enum AVPixelFormat mdl_fmt = AV_PIX_FMT_NONE; + int src_datatype_size = get_datatype_size(output->dt); +- ++ const AVPixFmtDescriptor *pix_desc = av_pix_fmt_desc_get(frame->format); ++ const AVComponentDescriptor *comp_desc = &pix_desc->comp[0]; ++ int bits = comp_desc->depth; ++ const char *pix_fmt_name = av_get_pix_fmt_name(frame->format); + int bytewidth = av_image_get_linesize(frame->format, frame->width, 0); + if (bytewidth < 0) { + return AVERROR(EINVAL); +@@ -69,6 +76,7 @@ int ff_proc_from_dnn_to_frame(AVFrame *frame, DNNData *output, void *log_ctx) + + dst_data = (void **)frame->data; + linesize[0] = frame->linesize[0]; ++ plane_size = linesize[0] * frame->height; + if (output->layout == DL_NCHW) { + middle_data = av_malloc(plane_size * output->channels); + if (!middle_data) { +@@ -80,20 +88,23 @@ int ff_proc_from_dnn_to_frame(AVFrame *frame, DNNData *output, void *log_ctx) + } + + switch (frame->format) { ++ case AV_PIX_FMT_RGB48LE: ++ case AV_PIX_FMT_BGR48LE: + case AV_PIX_FMT_RGB24: + case AV_PIX_FMT_BGR24: ++ dst_fmt = comp_desc->depth == 8 ? AV_PIX_FMT_GRAY8 : AV_PIX_FMT_GRAY16; + sws_ctx = sws_getContext(frame->width * 3, + frame->height, + src_fmt, + frame->width * 3, + frame->height, +- AV_PIX_FMT_GRAY8, ++ dst_fmt, + 0, NULL, NULL, NULL); + if (!sws_ctx) { + av_log(log_ctx, AV_LOG_ERROR, "Impossible to create scale context for the conversion " + "fmt:%s s:%dx%d -> fmt:%s s:%dx%d\n", + av_get_pix_fmt_name(src_fmt), frame->width * 3, frame->height, +- av_get_pix_fmt_name(AV_PIX_FMT_GRAY8), frame->width * 3, frame->height); ++ av_get_pix_fmt_name(dst_fmt), frame->width * 3, frame->height); + ret = AVERROR(EINVAL); + goto err; + } +@@ -103,9 +114,10 @@ int ff_proc_from_dnn_to_frame(AVFrame *frame, DNNData *output, void *log_ctx) + sws_freeContext(sws_ctx); + // convert data from planar to packed + if (output->layout == DL_NCHW) { ++ mdl_fmt = comp_desc->depth == 8 ? AV_PIX_FMT_GBRP : AV_PIX_FMT_GBRP16LE; + sws_ctx = sws_getContext(frame->width, + frame->height, +- AV_PIX_FMT_GBRP, ++ mdl_fmt, + frame->width, + frame->height, + frame->format, +@@ -113,24 +125,27 @@ int ff_proc_from_dnn_to_frame(AVFrame *frame, DNNData *output, void *log_ctx) + if (!sws_ctx) { + av_log(log_ctx, AV_LOG_ERROR, "Impossible to create scale context for the conversion " + "fmt:%s s:%dx%d -> fmt:%s s:%dx%d\n", +- av_get_pix_fmt_name(AV_PIX_FMT_GBRP), frame->width, frame->height, +- av_get_pix_fmt_name(frame->format),frame->width, frame->height); ++ av_get_pix_fmt_name(mdl_fmt), frame->width, frame->height, ++ av_get_pix_fmt_name(frame->format), frame->width, frame->height); + ret = AVERROR(EINVAL); + goto err; + } +- if (frame->format == AV_PIX_FMT_RGB24) { +- planar_data[0] = (uint8_t *)middle_data + plane_size; +- planar_data[1] = (uint8_t *)middle_data + plane_size * 2; +- planar_data[2] = (uint8_t *)middle_data; +- } else if (frame->format == AV_PIX_FMT_BGR24) { +- planar_data[0] = (uint8_t *)middle_data + plane_size; +- planar_data[1] = (uint8_t *)middle_data; +- planar_data[2] = (uint8_t *)middle_data + plane_size * 2; ++ if (strstr(pix_fmt_name, "rgb") != NULL) { ++ planar_data[0] = (uint8_t*)middle_data + plane_size; ++ planar_data[1] = (uint8_t*)middle_data + plane_size * 2; ++ planar_data[2] = (uint8_t*)middle_data; ++ } else if (strstr(pix_fmt_name, "bgr") != NULL) { ++ planar_data[0] = (uint8_t*)middle_data + plane_size; ++ planar_data[1] = (uint8_t*)middle_data; ++ planar_data[2] = (uint8_t*)middle_data + plane_size * 2; ++ } else { ++ av_log(log_ctx, AV_LOG_ERROR, "dnn_process output data doesn't support this format: %s\n", pix_fmt_name); ++ return AVERROR(ENOSYS); + } +- sws_scale(sws_ctx, (const uint8_t * const *)planar_data, +- (const int [4]){frame->width * sizeof(uint8_t), +- frame->width * sizeof(uint8_t), +- frame->width * sizeof(uint8_t), 0}, ++ ++ int middle_data_linesize[4] = {0}; ++ ret = av_image_fill_linesizes(middle_data_linesize, mdl_fmt, frame->width); ++ sws_scale(sws_ctx, (const uint8_t * const *)planar_data, middle_data_linesize, + 0, frame->height, frame->data, frame->linesize); + sws_freeContext(sws_ctx); + } +@@ -147,18 +162,21 @@ int ff_proc_from_dnn_to_frame(AVFrame *frame, DNNData *output, void *log_ctx) + case AV_PIX_FMT_YUV411P: + case AV_PIX_FMT_GRAY8: + case AV_PIX_FMT_NV12: ++ case AV_PIX_FMT_YUV420P10LE: ++ av_assert0(comp_desc->depth == 8 || comp_desc->depth == 10); ++ dst_fmt = comp_desc->depth == 8 ? AV_PIX_FMT_GRAY8 : AV_PIX_FMT_GRAY10; + sws_ctx = sws_getContext(frame->width, + frame->height, +- AV_PIX_FMT_GRAYF32, ++ src_fmt, + frame->width, + frame->height, +- AV_PIX_FMT_GRAY8, ++ dst_fmt, + 0, NULL, NULL, NULL); + if (!sws_ctx) { + av_log(log_ctx, AV_LOG_ERROR, "Impossible to create scale context for the conversion " + "fmt:%s s:%dx%d -> fmt:%s s:%dx%d\n", + av_get_pix_fmt_name(src_fmt), frame->width, frame->height, +- av_get_pix_fmt_name(AV_PIX_FMT_GRAY8), frame->width, frame->height); ++ av_get_pix_fmt_name(dst_fmt), frame->width, frame->height); + ret = AVERROR(EINVAL); + goto err; + } +@@ -186,9 +204,15 @@ int ff_proc_from_frame_to_dnn(AVFrame *frame, DNNData *input, void *log_ctx) + void **src_data = NULL; + void *middle_data = NULL; + uint8_t *planar_data[4] = { 0 }; +- int plane_size = frame->width * frame->height * sizeof(uint8_t); ++ int plane_size = 0; + enum AVPixelFormat dst_fmt = AV_PIX_FMT_NONE; ++ enum AVPixelFormat src_fmt = AV_PIX_FMT_NONE; ++ enum AVPixelFormat mdl_fmt = AV_PIX_FMT_NONE; + int dst_datatype_size = get_datatype_size(input->dt); ++ const AVPixFmtDescriptor* pix_desc = av_pix_fmt_desc_get(frame->format); ++ const AVComponentDescriptor* comp_desc = &pix_desc->comp[0]; ++ int bits = comp_desc->depth; ++ const char *pix_fmt_name = av_get_pix_fmt_name(frame->format); + int bytewidth = av_image_get_linesize(frame->format, frame->width, 0); + if (bytewidth < 0) { + return AVERROR(EINVAL); +@@ -208,55 +232,61 @@ int ff_proc_from_frame_to_dnn(AVFrame *frame, DNNData *input, void *log_ctx) + + src_data = (void **)frame->data; + linesize[0] = frame->linesize[0]; +- if (input->layout == DL_NCHW) { +- middle_data = av_malloc(plane_size * input->channels); +- if (!middle_data) { +- ret = AVERROR(ENOMEM); +- goto err; +- } +- src_data = &middle_data; +- linesize[0] = frame->width * 3; +- } ++ plane_size = linesize[0] * frame->height; + + switch (frame->format) { ++ case AV_PIX_FMT_RGB48LE: ++ case AV_PIX_FMT_BGR48LE: + case AV_PIX_FMT_RGB24: + case AV_PIX_FMT_BGR24: +- // convert data from planar to packed + if (input->layout == DL_NCHW) { ++ av_assert0(comp_desc->depth == 8 || comp_desc->depth == 16); ++ mdl_fmt = comp_desc->depth == 8 ? AV_PIX_FMT_GBRP : AV_PIX_FMT_GBRP16LE; ++ middle_data = av_malloc(plane_size * input->channels); ++ if (!middle_data) { ++ ret = AVERROR(ENOMEM); ++ goto err; ++ } ++ src_data = &middle_data; + sws_ctx = sws_getContext(frame->width, + frame->height, + frame->format, + frame->width, + frame->height, +- AV_PIX_FMT_GBRP, ++ mdl_fmt, + 0, NULL, NULL, NULL); + if (!sws_ctx) { + av_log(log_ctx, AV_LOG_ERROR, "Impossible to create scale context for the conversion " + "fmt:%s s:%dx%d -> fmt:%s s:%dx%d\n", + av_get_pix_fmt_name(frame->format), frame->width, frame->height, +- av_get_pix_fmt_name(AV_PIX_FMT_GBRP),frame->width, frame->height); ++ av_get_pix_fmt_name(mdl_fmt),frame->width, frame->height); + ret = AVERROR(EINVAL); + goto err; + } +- if (frame->format == AV_PIX_FMT_RGB24) { ++ if (strstr(pix_fmt_name, "rgb") != NULL) { + planar_data[0] = (uint8_t *)middle_data + plane_size; + planar_data[1] = (uint8_t *)middle_data + plane_size * 2; + planar_data[2] = (uint8_t *)middle_data; +- } else if (frame->format == AV_PIX_FMT_BGR24) { ++ } else if (strstr(pix_fmt_name, "bgr") != NULL) { + planar_data[0] = (uint8_t *)middle_data + plane_size; + planar_data[1] = (uint8_t *)middle_data; + planar_data[2] = (uint8_t *)middle_data + plane_size * 2; ++ } else { ++ av_log(log_ctx, AV_LOG_ERROR, "dnn_process input data doesn't support this format: %s\n", pix_fmt_name); ++ return AVERROR(ENOSYS); + } ++ ++ int middle_data_linesize[4] = {0}; ++ ret = av_image_fill_linesizes(middle_data_linesize, mdl_fmt, frame->width); + sws_scale(sws_ctx, (const uint8_t * const *)frame->data, + frame->linesize, 0, frame->height, planar_data, +- (const int [4]){frame->width * sizeof(uint8_t), +- frame->width * sizeof(uint8_t), +- frame->width * sizeof(uint8_t), 0}); ++ middle_data_linesize); + sws_freeContext(sws_ctx); + } ++ src_fmt = comp_desc->depth == 8 ? AV_PIX_FMT_GRAY8 : AV_PIX_FMT_GRAY16; + sws_ctx = sws_getContext(frame->width * 3, + frame->height, +- AV_PIX_FMT_GRAY8, ++ src_fmt, + frame->width * 3, + frame->height, + dst_fmt, +@@ -264,8 +294,8 @@ int ff_proc_from_frame_to_dnn(AVFrame *frame, DNNData *input, void *log_ctx) + if (!sws_ctx) { + av_log(log_ctx, AV_LOG_ERROR, "Impossible to create scale context for the conversion " + "fmt:%s s:%dx%d -> fmt:%s s:%dx%d\n", +- av_get_pix_fmt_name(AV_PIX_FMT_GRAY8), frame->width * 3, frame->height, +- av_get_pix_fmt_name(dst_fmt),frame->width * 3, frame->height); ++ av_get_pix_fmt_name(src_fmt), frame->width * 3, frame->height, ++ av_get_pix_fmt_name(dst_fmt), frame->width * 3, frame->height); + ret = AVERROR(EINVAL); + goto err; + } +@@ -287,9 +317,12 @@ int ff_proc_from_frame_to_dnn(AVFrame *frame, DNNData *input, void *log_ctx) + case AV_PIX_FMT_YUV411P: + case AV_PIX_FMT_GRAY8: + case AV_PIX_FMT_NV12: ++ case AV_PIX_FMT_YUV420P10LE: ++ av_assert0(comp_desc->depth == 8 || comp_desc->depth == 10); ++ src_fmt = comp_desc->depth == 8 ? AV_PIX_FMT_GRAY8 : AV_PIX_FMT_GRAY10; + sws_ctx = sws_getContext(frame->width, + frame->height, +- AV_PIX_FMT_GRAY8, ++ src_fmt, + frame->width, + frame->height, + dst_fmt, +@@ -297,8 +330,8 @@ int ff_proc_from_frame_to_dnn(AVFrame *frame, DNNData *input, void *log_ctx) + if (!sws_ctx) { + av_log(log_ctx, AV_LOG_ERROR, "Impossible to create scale context for the conversion " + "fmt:%s s:%dx%d -> fmt:%s s:%dx%d\n", +- av_get_pix_fmt_name(AV_PIX_FMT_GRAY8), frame->width, frame->height, +- av_get_pix_fmt_name(dst_fmt),frame->width, frame->height); ++ av_get_pix_fmt_name(src_fmt), frame->width, frame->height, ++ av_get_pix_fmt_name(dst_fmt), frame->width, frame->height); + ret = AVERROR(EINVAL); + goto err; + } +diff --git a/libavfilter/dnn_interface.h b/libavfilter/dnn_interface.h +index b030995a9b..6d077b94d7 100644 +--- a/libavfilter/dnn_interface.h ++++ b/libavfilter/dnn_interface.h +@@ -35,7 +35,7 @@ + + typedef enum {DNN_TF = 1, DNN_OV, DNN_IVSR} DNNBackendType; + +-typedef enum {DNN_FLOAT = 1, DNN_UINT8 = 4} DNNDataType; ++typedef enum {DNN_FLOAT = 1, DNN_UINT8 = 4 ,DNN_UINT16 = 8} DNNDataType; + + typedef enum { + DCO_NONE, +diff --git a/libavfilter/vf_dnn_processing.c b/libavfilter/vf_dnn_processing.c +index 2b7656f21a..066b00a898 100644 +--- a/libavfilter/vf_dnn_processing.c ++++ b/libavfilter/vf_dnn_processing.c +@@ -87,6 +87,9 @@ static const enum AVPixelFormat pix_fmts[] = { + #else + AV_PIX_FMT_BGR24, + AV_PIX_FMT_YUV420P, ++ AV_PIX_FMT_BGR48LE, ++ AV_PIX_FMT_RGB48LE, ++ AV_PIX_FMT_YUV420P10LE, + AV_PIX_FMT_NONE + #endif + }; +@@ -122,6 +125,8 @@ static int check_modelinput_inlink(const DNNData *model_input, const AVFilterLin + switch (fmt) { + case AV_PIX_FMT_RGB24: + case AV_PIX_FMT_BGR24: ++ case AV_PIX_FMT_RGB48LE: ++ case AV_PIX_FMT_BGR48LE: + if (model_input->channels != 3) { + LOG_FORMAT_CHANNEL_MISMATCH(); + return AVERROR(EIO); +@@ -135,6 +140,7 @@ static int check_modelinput_inlink(const DNNData *model_input, const AVFilterLin + case AV_PIX_FMT_YUV410P: + case AV_PIX_FMT_YUV411P: + case AV_PIX_FMT_NV12: ++ case AV_PIX_FMT_YUV420P10LE: + if (model_input->channels != 1) { + LOG_FORMAT_CHANNEL_MISMATCH(); + return AVERROR(EIO); +@@ -197,13 +203,17 @@ static int prepare_uv_scale(AVFilterLink *outlink) + SWS_BICUBIC, NULL, NULL, NULL); + ctx->sws_uv_height = inlink->h >> 1; + } else { ++ av_assert0(AV_PIX_FMT_YUV420P10LE || fmt == AV_PIX_FMT_YUV420P); + const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(fmt); ++ const AVComponentDescriptor comp = desc->comp[0]; + int sws_src_h = AV_CEIL_RSHIFT(inlink->h, desc->log2_chroma_h); + int sws_src_w = AV_CEIL_RSHIFT(inlink->w, desc->log2_chroma_w); + int sws_dst_h = AV_CEIL_RSHIFT(outlink->h, desc->log2_chroma_h); + int sws_dst_w = AV_CEIL_RSHIFT(outlink->w, desc->log2_chroma_w); +- ctx->sws_uv_scale = sws_getContext(sws_src_w, sws_src_h, AV_PIX_FMT_GRAY8, +- sws_dst_w, sws_dst_h, AV_PIX_FMT_GRAY8, ++ ctx->sws_uv_scale = sws_getContext(sws_src_w, sws_src_h, ++ comp.depth == 10 ? AV_PIX_FMT_GRAY10 : AV_PIX_FMT_GRAY8, ++ sws_dst_w, sws_dst_h, ++ comp.depth == 10 ? AV_PIX_FMT_GRAY10 : AV_PIX_FMT_GRAY8, + SWS_BICUBIC, NULL, NULL, NULL); + ctx->sws_uv_height = sws_src_h; + } +diff --git a/libswscale/swscale_unscaled.c b/libswscale/swscale_unscaled.c +index a5c9917799..c87eb560d3 100644 +--- a/libswscale/swscale_unscaled.c ++++ b/libswscale/swscale_unscaled.c +@@ -1710,6 +1710,98 @@ static int float_y_to_uint_y_wrapper(SwsContext *c, const uint8_t* src[], + return srcSliceH; + } + ++static int uint16_y_to_float_y_wrapper(SwsContext *c, const uint8_t *src[], ++ int srcStride[], int srcSliceY, ++ int srcSliceH, uint8_t *dst[], int dstStride[]) ++{ ++ int y, x; ++ ptrdiff_t srcStrideUint16 = srcStride[0] >> 1; ++ ptrdiff_t dstStrideFloat = dstStride[0] >> 2; ++ const uint16_t *srcPtr = (const uint16_t *)(src[0] + srcStride[0] * srcSliceY); ++ float *dstPtr = (float *)(dst[0] + dstStride[0] * srcSliceY); ++ const float float_norm_factor = 1.0f / 65535.0f; ++ ++ for (y = 0; y < srcSliceH; ++y) { ++ for (x = 0; x < c->srcW; ++x) { ++ dstPtr[x] = (float)srcPtr[x] * float_norm_factor; ++ } ++ srcPtr += srcStrideUint16; ++ dstPtr += dstStrideFloat; ++ } ++ ++ return srcSliceH; ++} ++ ++static int float_y_to_uint16_y_wrapper(SwsContext *c, const uint8_t* src[], ++ int srcStride[], int srcSliceY, ++ int srcSliceH, uint8_t* dst[], int dstStride[]) ++{ ++ int y, x; ++ ptrdiff_t srcStrideFloat = srcStride[0] >> 2; ++ ptrdiff_t dstStrideUint16 = dstStride[0] >> 1; ++ const float *srcPtr = (const float *)(src[0] + srcStride[0] * srcSliceY); ++ uint16_t *dstPtr = (uint16_t*)(dst[0] + dstStride[0] * srcSliceY); ++ ++ for (y = 0; y < srcSliceH; ++y) { ++ for (x = 0; x < c->srcW; ++x) { ++ dstPtr[x] = av_clip_uint16(lrintf(65535.0f * srcPtr[x])); ++ } ++ srcPtr += srcStrideFloat; ++ dstPtr += dstStrideUint16; ++ } ++ ++ return srcSliceH; ++} ++ ++static int uint10_y_to_float_y_wrapper(SwsContext *c, const uint8_t *src[], ++ int srcStride[], int srcSliceY, ++ int srcSliceH, uint8_t *dst[], int dstStride[]) ++{ ++ int y, x; ++ ptrdiff_t srcStrideUint16 = srcStride[0] >> 1; ++ ptrdiff_t dstStrideFloat = dstStride[0] >> 2; ++ const uint16_t *srcPtr = (const uint16_t *)(src[0] + srcStride[0] * srcSliceY); ++ float *dstPtr = (float *)(dst[0] + dstStride[0] * srcSliceY); ++ const float float_norm_factor = 1.0f / 1023.0f; ++ for (y = 0; y < srcSliceH; ++y) { ++ for (x = 0; x < c->srcW; ++x) { ++ dstPtr[x] = (float)srcPtr[x] * float_norm_factor; ++ } ++ ++ srcPtr += srcStrideUint16; ++ dstPtr += dstStrideFloat; ++ } ++ ++ return srcSliceH; ++} ++ ++static int float_y_to_uint10_y_wrapper(SwsContext *c, const uint8_t* src[], ++ int srcStride[], int srcSliceY, ++ int srcSliceH, uint8_t* dst[], int dstStride[]) ++{ ++ int y, x; ++ ptrdiff_t srcStrideFloat = srcStride[0] >> 2; ++ ptrdiff_t dstStrideUint16 = dstStride[0] >> 1; ++ const float *srcPtr = (const float *)(src[0] + srcStride[0] * srcSliceY); ++ uint16_t *dstPtr = (uint16_t*)(dst[0] + dstStride[0] * srcSliceY); ++ ++ for (y = 0; y < srcSliceH; ++y) { ++ for (x = 0; x < c->srcW; ++x) { ++ int value = lrintf(1023.0f * srcPtr[x]); ++ if (value < 0) { ++ value = 0; ++ } else if (value > 1023) { ++ value = 1023; ++ } ++ dstPtr[x] = (uint16_t)value; ++ } ++ srcPtr += srcStrideFloat; ++ dstPtr += dstStrideUint16; ++ } ++ ++ return srcSliceH; ++} ++ + /* unscaled copy like stuff (assumes nearly identical formats) */ + static int packedCopyWrapper(SwsContext *c, const uint8_t *src[], + int srcStride[], int srcSliceY, int srcSliceH, +@@ -2186,6 +2278,24 @@ void ff_get_unscaled_swscale(SwsContext *c) + c->convert_unscaled = float_y_to_uint_y_wrapper; + } + ++ /* 16bit Y to float Y */ ++ if (srcFormat == AV_PIX_FMT_GRAY16 && dstFormat == AV_PIX_FMT_GRAYF32){ ++ c->convert_unscaled = uint16_y_to_float_y_wrapper; ++ } ++ ++ /* float Y to 16bit Y */ ++ if (srcFormat == AV_PIX_FMT_GRAYF32 && dstFormat == AV_PIX_FMT_GRAY16){ ++ c->convert_unscaled = float_y_to_uint16_y_wrapper; ++ } ++ /* 10bit Y to float Y */ ++ if (srcFormat == AV_PIX_FMT_GRAY10 && dstFormat == AV_PIX_FMT_GRAYF32){ ++ c->convert_unscaled = uint10_y_to_float_y_wrapper; ++ } ++ ++ /* float Y to 10bit Y */ ++ if (srcFormat == AV_PIX_FMT_GRAYF32 && dstFormat == AV_PIX_FMT_GRAY10){ ++ c->convert_unscaled = float_y_to_uint10_y_wrapper; ++ } + /* LQ converters if -sws 0 or -sws 4*/ + if (c->flags&(SWS_FAST_BILINEAR|SWS_POINT)) { + /* yv12_to_yuy2 */ +-- +2.34.1 + diff --git a/ivsr_ffmpeg_plugin/patches/0023-enable-PrePostProcessing-of-OpenVINO-in-dnn_backend_.patch b/ivsr_ffmpeg_plugin/patches/0023-enable-PrePostProcessing-of-OpenVINO-in-dnn_backend_.patch new file mode 100644 index 0000000..3d9619b --- /dev/null +++ b/ivsr_ffmpeg_plugin/patches/0023-enable-PrePostProcessing-of-OpenVINO-in-dnn_backend_.patch @@ -0,0 +1,889 @@ +From 001b77050d3c035b24fd2c853dab839e9d8e778e Mon Sep 17 00:00:00 2001 +From: Xiaoxia Liang +Date: Mon, 9 Sep 2024 14:17:02 +0000 +Subject: [PATCH] enable PrePostProcessing of OpenVINO in dnn_backend_ivsr + +Signed-off-by: Xiaoxia Liang +--- + libavfilter/dnn/dnn_backend_ivsr.c | 493 ++++++++++++++++++++--------- + libavfilter/dnn/dnn_io_proc.c | 70 ++++ + libavfilter/vf_dnn_processing.c | 3 +- + 3 files changed, 408 insertions(+), 158 deletions(-) + +diff --git a/libavfilter/dnn/dnn_backend_ivsr.c b/libavfilter/dnn/dnn_backend_ivsr.c +index 997c1b803d..44d603190c 100644 +--- a/libavfilter/dnn/dnn_backend_ivsr.c ++++ b/libavfilter/dnn/dnn_backend_ivsr.c +@@ -126,6 +126,20 @@ static int get_datatype_size(DNNDataType dt) + } + } + ++static DNNColorOrder map_dnn_color_order(int format) { ++ switch (format) ++ { ++ case AV_PIX_FMT_RGB24: ++ case AV_PIX_FMT_RGB48: ++ return DCO_RGB; ++ case AV_PIX_FMT_BGR24: ++ case AV_PIX_FMT_BGR48: ++ return DCO_BGR; ++ default: ++ return DCO_NONE; ++ } ++} ++ + static int clamp(int val, int min, int max) { + if (val < min) + return min; +@@ -135,44 +149,54 @@ static int clamp(int val, int min, int max) { + return val; + } + +-static void convert_nchw_to_nhwc(void* data, int N, int C, int H, int W) { ++static void convert_nchw_to_nhwc(void* data, int N, int C, int H, int W, DNNDataType type) { + int data_size = N * C * H * W; +- void *temp = av_malloc(data_size * sizeof(float)); ++ int type_size = get_datatype_size(type); ++ data_size = data_size * type_size; ++ uint8_t *temp = av_malloc(data_size); + int max_threads = omp_get_num_procs() / 2; + // memory copy + #pragma omp parallel for num_threads(max_threads) + for (int i = 0; i < data_size; i++) +- ((float *)temp)[i] = ((float *)data)[i]; ++ temp[i] = ((uint8_t*)data)[i]; + +- // convert buffer from nchw to nhwc and reverse rgb to bgr ++ // convert buffer from nchw to nhwc + #pragma omp parallel num_threads(max_threads) + { + for (int n = 0; n < N; n++) + for (int h = omp_get_thread_num(); h < H; h += omp_get_num_threads()) +- for (int w = 0; w < W; w++) +- for (int c = 0; c < C; c++) +- ((float *)data)[n * H * W * C + h * W * C + w * C + c] = ((float *)temp)[n * C * H * W + (C - 1 - c) * H * W + h * W + w]; ++ for (int w = 0; w < W; w++) ++ for (int c = 0; c < C; c++) { ++ for (int byte = 0; byte < type_size; ++byte) ++ ((uint8_t*)data)[(n * H * W * C + h * W * C + w * C + c) * type_size + byte] = ++ temp[(n * C * H * W + c * H * W + h * W + w) * type_size + byte]; ++ } + } + av_free(temp); + } + +-static void convert_nhwc_to_nchw(void* data, int N, int C, int H, int W) { ++static void convert_nhwc_to_nchw(void* data, int N, int C, int H, int W, DNNDataType type) { + int data_size = N * C * H * W; +- void *temp = av_malloc(data_size * sizeof(float)); ++ int type_size = get_datatype_size(type); ++ data_size = data_size * type_size; ++ uint8_t *temp = av_malloc(data_size); + int max_threads = omp_get_num_procs() / 2; + // memory copy + #pragma omp parallel for num_threads(max_threads) + for (int i = 0; i < data_size; i++) +- ((float *)temp)[i] = ((float *)data)[i]; ++ temp[i] = ((uint8_t*)data)[i]; + +- // convert buffer from nhwc to nchw and reverse bgr to rgb ++ // convert buffer from nhwc to nchw + #pragma omp parallel num_threads(max_threads) + { + for (int n = 0; n < N; n++) +- for (int h = omp_get_thread_num(); h < H; h += omp_get_num_threads()) +- for (int w = 0; w < W; w++) +- for (int c = 0; c < C; c++) +- ((float *)data)[n * C * H * W + c * H * W + h * W + w] = ((float *)temp)[n * H * W * C + h * W * C + w * C + C - 1 - c]; ++ for (int h = omp_get_thread_num(); h < H; h += omp_get_num_threads()) ++ for (int w = 0; w < W; w++) ++ for (int c = 0; c < C; c++) { ++ for (int byte = 0; byte < type_size; ++byte) ++ ((uint8_t*)data)[(n * C * H * W + c * H * W + h * W + w) * type_size + byte] = ++ temp[(n * H * W * C + h * W * C + w * C + c) * type_size + byte]; ++ } + } + av_free(temp); + } +@@ -191,6 +215,75 @@ static void set_padding_value(void* data, uint32_t width, uint32_t height, uint3 + memset(data + index, padding_value, padding_height * n_width); + } + ++static size_t get_tensor_size(const tensor_desc_t* tensor) { ++ size_t tensor_size = 0; ++ size_t data_type_size = 0; ++ if (NULL == tensor || tensor->dimension <= 0) ++ return 0; ++ ++ if (strcmp(tensor->precision, "u8") == 0) { ++ data_type_size = sizeof(uint8_t); ++ } else if (strcmp(tensor->precision, "u16") == 0) { ++ data_type_size = sizeof(uint16_t); ++ } else if (strcmp(tensor->precision, "f32") == 0) { ++ data_type_size = sizeof(float); ++ } else { ++ av_assert0(!"not supported the precision yet."); ++ return 1; ++ } ++ ++ tensor_size = data_type_size; ++ for (int i = 0; i < tensor->dimension; ++i) { ++ tensor_size *= tensor->shape[i]; ++ } ++ return tensor_size; ++} ++/* ++ * set layout, precision, width, height and channels info accorring to tensor info ++*/ ++static void set_dnndata_info(DNNData *dnn_data, const tensor_desc_t* tensor) { ++ if (NULL == dnn_data || NULL == tensor) ++ return; ++ ++ // set layout and width, height and channels ++ if (strcmp(tensor->layout, "NHWC") == 0 || strcmp(tensor->layout, "[N,H,W,C]") == 0) { ++ dnn_data->layout = DL_NHWC; ++ dnn_data->channels = tensor->shape[3]; ++ dnn_data->height = tensor->shape[1]; ++ dnn_data->width = tensor->shape[2]; ++ } else if (strcmp(tensor->layout, "NCHW") == 0 || strcmp(tensor->layout, "[N,C,H,W]") == 0) { ++ dnn_data->layout = DL_NCHW; ++ dnn_data->channels = tensor->shape[1]; ++ dnn_data->height = tensor->shape[2]; ++ dnn_data->width = tensor->shape[3]; ++ } else if (strcmp(tensor->layout, "NFHWC") == 0 || strcmp(tensor->layout, "[N,F,H,W,C]") == 0) { ++ dnn_data->layout = DL_NHWC; ++ dnn_data->channels = tensor->shape[4]; ++ dnn_data->height = tensor->shape[2]; ++ dnn_data->width = tensor->shape[3]; ++ } else if (strcmp(tensor->layout, "NFCHW") == 0 || strcmp(tensor->layout, "[N,F,C,H,W]") == 0) { ++ dnn_data->layout = DL_NCHW; ++ dnn_data->channels = tensor->shape[2]; ++ dnn_data->height = tensor->shape[3]; ++ dnn_data->width = tensor->shape[4]; ++ } else { ++ av_assert0(!"DNNData not supported the layout yet."); ++ return; ++ } ++ ++ // set precision ++ if (strcmp(tensor->precision, "f32") == 0 || strcmp(tensor->precision, "fp32") == 0) { ++ dnn_data->dt = DNN_FLOAT; ++ } else if (strcmp(tensor->precision, "u8") == 0) { ++ dnn_data->dt = DNN_UINT8; ++ } else if (strcmp(tensor->precision, "u16") == 0){ ++ dnn_data->dt = DNN_UINT16; ++ } else { ++ av_assert0(!"DNNData not supported the precision yet."); ++ return; ++ } ++} ++ + /* returns + * DNN_GENERIC_ERROR, + * DNN_MORE_FRAMES - waiting for more input frames, +@@ -207,62 +300,49 @@ static int fill_model_input_ivsr(IVSRModel * ivsr_model, + TaskItem *task; + AVFrame *tmp_frame = NULL; + void *in_data = NULL; +- int dims[5] = { 0, 0, 0, 0, 0 }; + float normalize_factor = ctx->options.normalize_factor; + int padding_height = 0, padding_width = 0; +- +- status = ivsr_get_attr(ivsr_model->handle, INPUT_TENSOR_DESC, dims); ++ tensor_desc_t input_tensor_desc_get = { ++ .precision = {0}, ++ .layout = {0}, ++ .tensor_color_format = {0}, ++ .model_color_format = {0}, ++ .scale = 0.0, ++ .dimension = 0, ++ .shape = {0}}; ++ ++ status = ivsr_get_attr(ivsr_model->handle, INPUT_TENSOR_DESC, &input_tensor_desc_get); + if (status != OK) { + av_log(ctx, AV_LOG_ERROR, "Failed to get input dimensions\n"); + return DNN_GENERIC_ERROR; + } + +- switch (ivsr_model->model_type) { +- case BASICVSR: +- input.channels = dims[1]; +- input.height = dims[3]; +- input.width = dims[4]; +- input.dt = DNN_FLOAT; +- break; +- case VIDEOPROC: +- case EDSR: +- case CUSTVSR: +- input.channels = dims[2]; +- input.height = dims[3]; +- input.width = dims[4]; +- input.dt = DNN_FLOAT; +- break; +- case TSENET: +- //INFO:for TSENet, dims[2]==nif * channels, and nif==3 +- input.channels = dims[2] / 3; +- input.height = dims[3]; +- input.width = dims[4]; +- input.dt = DNN_FLOAT; +- break; +- default: +- av_log(ctx, AV_LOG_ERROR, "Not supported model type\n"); +- return DNN_GENERIC_ERROR; +- } ++ set_dnndata_info(&input, &input_tensor_desc_get); ++ if (ivsr_model->model_type == TSENET) ++ input.channels = input.channels / 3; + + input.data = request->in_frames; +- input.order = DCO_BGR; + in_data = input.data; +- input.scale = 0; ++ // ff_proc_from_frame_to_dnn: uint_8->uint8 requires scale == 1 and mean == 0 and dt == UINT8 ++ input.scale = 1; + input.mean = 0; +- input.layout = DL_NONE; ++ + ctx->model_input_height = input.height; + ctx->model_input_width = input.width; + + padding_height = ctx->model_input_height - ctx->frame_input_height; + padding_width = ctx->model_input_width - ctx->frame_input_width; + for (int i = 0; i < ctx->options.batch_size; ++i) { +- //INFO: for TSENET, lltask_queue contains (N-1)th and (N)th frames +- //so peek (N)th frame. ++ // INFO: for TSENET, lltask_queue contains (N-1)th and (N)th frames ++ // so peek (N)th frame. + lltask = ff_queue_peek_back(ivsr_model->lltask_queue); + if (!lltask) { + break; + } + task = lltask->task; ++ // the color order of input DNNData is same as format of in frame ++ input.order = map_dnn_color_order(task->in_frame->format); ++ + if (task->do_ioproc) { + if (ivsr_model->model->frame_pre_proc != NULL) { + ivsr_model->model->frame_pre_proc(task->in_frame, &input, +@@ -281,27 +361,28 @@ static int fill_model_input_ivsr(IVSRModel * ivsr_model, + } + input.data = in_data; + } +- if (ivsr_model->model_type == BASICVSR && dims[2] != 1) { ++ if (ivsr_model->model_type == BASICVSR && ivsr_model->nif != 1) { + int read_frame_num = 0; +- for (int j = 0; j < dims[2]; j++) { ++ for (int j = 0; j < ivsr_model->nif; j++) { + if (av_fifo_can_read(task->in_queue)) { + av_fifo_read(task->in_queue, &tmp_frame, 1); + ff_proc_from_frame_to_dnn(tmp_frame, &input, + ivsr_model->model-> + filter_ctx); + // convert buffer from NHWC to NCHW when C != 1 +- convert_nhwc_to_nchw(input.data, 1, input.channels, input.height, input.width); ++ if (input.layout != 1 && input.layout == DL_NONE ) ++ convert_nhwc_to_nchw(input.data, 1, input.channels, input.height, input.width, input.dt); + input.data += + input.height * input.width * +- input.channels * sizeof(float); ++ input.channels * get_datatype_size(input.dt); + read_frame_num++; + } + } + input.data = in_data; +- if (read_frame_num < dims[2]) ++ if (read_frame_num < ivsr_model->nif) + av_log(ctx, AV_LOG_ERROR, +- "Read frame number is %d less than the model requirement!!!\n", +- read_frame_num); ++ "Read frame number is %d less than the model requirement %d!!!\n", ++ read_frame_num, ivsr_model->nif); + } else if (ivsr_model->model_type == TSENET) { + //1. copy the input_frame(ref the buffer) and put into ivsr_model->fame_queue + tmp_frame = av_frame_alloc(); +@@ -329,8 +410,8 @@ static int fill_model_input_ivsr(IVSRModel * ivsr_model, + for (int idx = 0; idx < ivsr_model->nif; idx++) { + //INFO: the 3 frames in frame_queue are: (N-2)th, (N-1)th, (N)th + ff_proc_from_frame_to_dnn(input_frames[idx], &input, ivsr_model->model->filter_ctx); +- convert_nhwc_to_nchw(input.data, 1, input.channels, input.height, input.width); +- input.data += input.height * input.width * input.channels * sizeof(float); ++ convert_nhwc_to_nchw(input.data, 1, input.channels, input.height, input.width, input.dt); ++ input.data += input.height * input.width * input.channels * get_datatype_size(input.dt); + } + input.data = in_data; + //pop the (N-2)th frame from frame_queue and free it +@@ -349,10 +430,12 @@ static int fill_model_input_ivsr(IVSRModel * ivsr_model, + ff_proc_from_frame_to_dnn(task->in_frame, &input, + ivsr_model->model-> + filter_ctx); +- if (input.channels != 1) { +- convert_nhwc_to_nchw(input.data, 1, input.channels, input.height, input.width); ++ if (input.channels != 1 && (input.layout == DL_NONE)) { ++ convert_nhwc_to_nchw(input.data, 1, input.channels, input.height, input.width, input.dt); + } +- if (normalize_factor != 1) { ++ ++ if (normalize_factor != 1 && input.dt == DNN_FLOAT ++ && (input.scale > 1 || input.scale == 0)) { + // do not need to covert buffer from NHWC to NCHW if the channels is 1, only need to mulitple normalize_factor + #pragma omp parallel for + for (int pos = 0; pos < input.height * input.width * input.channels; pos++) { +@@ -387,43 +470,47 @@ static void infer_completion_callback(void *args) + IVSRContext *ctx = &ivsr_model->ctx; + AVFrame *tmp_frame = NULL; + int offset = 0; +- int dims[5] = { 0, 0, 0, 0, 0 }; + float normalize_factor = ctx->options.normalize_factor; +- +- status = ivsr_get_attr(ivsr_model->handle, OUTPUT_TENSOR_DESC, dims); ++ tensor_desc_t output_tensor_desc_get = { ++ .precision = {0}, ++ .layout = {0}, ++ .tensor_color_format = {0}, ++ .model_color_format = {0}, ++ .scale = 0.0, ++ .dimension = 0, ++ .shape = {0}}; ++ ++ // ivsr_get_attr can only get precision, layout, dimension and shape info ++ status = ivsr_get_attr(ivsr_model->handle, OUTPUT_TENSOR_DESC, &output_tensor_desc_get); + if (status != OK) { + av_log(ctx, AV_LOG_ERROR, "Failed to get output dimensions\n"); + return; + } + +- switch (ivsr_model->model_type) { ++ set_dnndata_info(&output, &output_tensor_desc_get); ++ ++ output.data = request->out_frames; ++ output.mean = 0; ++ // ff_proc_from_dnn_to_frame: float->uint8 require (scale == 255 or scale == 0) and mean == 0 ++ output.scale = output.dt == DNN_UINT8 ? 1 : 0; ++ // set order based on model type ++ switch (ivsr_model->model_type) ++ { + case BASICVSR: +- output.channels = dims[1]; +- output.height = dims[3]; +- output.width = dims[4]; +- break; + case VIDEOPROC: + case EDSR: +- case CUSTVSR: + case TSENET: +- output.channels = dims[2]; +- output.height = dims[3]; +- output.width = dims[4]; ++ output.order = DCO_RGB; + break; + default: +- av_log(ctx, AV_LOG_ERROR, "Not supported model type\n"); +- return; ++ output.order = DCO_NONE; ++ break; + } + +- output.dt = DNN_FLOAT; +- output.data = request->out_frames; +- output.scale = 0; +- output.mean = 0; +- output.layout = DL_NONE; + const AVPixFmtDescriptor* pix_desc = av_pix_fmt_desc_get(task->out_frame->format); + const AVComponentDescriptor* comp_desc = &pix_desc->comp[0]; + int bits = comp_desc->depth; +- av_assert0(request->lltask_count <= dims[0]); ++ av_assert0(request->lltask_count <= output_tensor_desc_get.shape[0]); + av_assert0(request->lltask_count >= 1); + for (int i = 0; i < request->lltask_count; ++i) { + task = request->lltasks[i]->task; +@@ -435,14 +522,14 @@ static void infer_completion_callback(void *args) + ivsr_model->model-> + filter_ctx); + } else { +- if (ivsr_model->model_type == BASICVSR && dims[2] != 1) { ++ if (ivsr_model->model_type == BASICVSR && ivsr_model->nif != 1) { + do { + int ret = + av_fifo_peek(task->out_queue, &tmp_frame, 1, + offset); + if (ret == 0) { +- if (output.channels != 1) { +- convert_nchw_to_nhwc(output.data, 1, output.channels, output.height, output.width); ++ if (output.channels != 1 && output.layout == DL_NONE) { ++ convert_nchw_to_nhwc(output.data, 1, output.channels, output.height, output.width, output.dt); + } + ff_proc_from_dnn_to_frame(tmp_frame, &output, + &ivsr_model->model-> +@@ -458,22 +545,25 @@ static void infer_completion_callback(void *args) + } + output.data += + output.height * output.width * +- output.channels * sizeof(float); ++ output.channels * get_datatype_size(output.dt); + } + offset++; +- } while (offset != dims[2]); ++ } while (offset != ivsr_model->nif); + } else { +- if (output.channels != 1) { ++ if (output.channels != 1 && output.layout == DL_NONE) { + //convert buffer from NCHW to NHWC +- convert_nchw_to_nhwc(output.data, 1, output.channels, output.height, output.width); ++ convert_nchw_to_nhwc(output.data, 1, output.channels, output.height, output.width, output.dt); + } +- if (normalize_factor != 1) { ++ ++ if (normalize_factor != 1 && output.dt == DNN_FLOAT ++ && (output.scale > 1 || output.scale == 0)) { + #pragma omp parallel for + // only need to devide by normalize_factor for channels = 1. + for (int pos = 0; pos < output.height * output.width * output.channels; pos++) { + ((float*)output.data)[pos] = ((float*)output.data)[pos] / normalize_factor; + } + } ++ + ff_proc_from_dnn_to_frame(task->out_frame, &output, + &ivsr_model->model-> + filter_ctx); +@@ -531,40 +621,27 @@ static int get_input_ivsr(void *model, DNNData * input, + IVSRModel *ivsr_model = model; + IVSRContext *ctx = &ivsr_model->ctx; + IVSRStatus status; +- int dims[5] = { 0, 0, 0, 0, 0 }; +- +- status = ivsr_get_attr(ivsr_model->handle, INPUT_TENSOR_DESC, dims); ++ tensor_desc_t input_tensor_desc_get = { ++ .precision = {0}, ++ .layout = {0}, ++ .tensor_color_format = {0}, ++ .model_color_format = {0}, ++ .scale = 0.0, ++ .dimension = 0, ++ .shape = {0}}; ++ ++ status = ivsr_get_attr(ivsr_model->handle, INPUT_TENSOR_DESC, &input_tensor_desc_get); + if (status != OK) { + av_log(ctx, AV_LOG_ERROR, "Failed to get input dimensions\n"); + return DNN_GENERIC_ERROR; + } + +- switch (ivsr_model->model_type) { +- case BASICVSR: +- input->channels = dims[1]; +- input->height = dims[3]; +- input->width = dims[4]; +- input->dt = DNN_FLOAT; +- break; +- case VIDEOPROC: +- case EDSR: +- case CUSTVSR: +- input->channels = dims[2]; +- input->height = dims[3]; +- input->width = dims[4]; +- input->dt = DNN_FLOAT; +- break; +- case TSENET: +- //INFO:for TSENet, dims[2] == nif * channels, and nif==3 +- input->channels = dims[2] / 3; +- input->height = dims[3]; +- input->width = dims[4]; +- input->dt = DNN_FLOAT; +- break; +- default: +- av_log(ctx, AV_LOG_ERROR, "Not supported model type\n"); +- return DNN_GENERIC_ERROR; +- } ++ set_dnndata_info(input, &input_tensor_desc_get); ++ if (ivsr_model->model_type == TSENET) ++ input->channels = input->channels / 3; ++ ++ // hard code to pass check_modelinput_inlink() that requires DNN_FLOAT of model_input->dt ++ input->dt = DNN_FLOAT; + + return 0; + } +@@ -648,29 +725,29 @@ static int get_output_ivsr(void *model, const char *input_name, + IVSRModel *ivsr_model = model; + IVSRContext *ctx = &ivsr_model->ctx; + IVSRStatus status; +- int dims[5] = { 0, 0, 0, 0, 0 }; +- +- status = ivsr_get_attr(ivsr_model->handle, OUTPUT_TENSOR_DESC, dims); ++ DNNData output; ++ tensor_desc_t output_tensor_desc_get = { ++ .precision = {0}, ++ .layout = {0}, ++ .tensor_color_format = {0}, ++ .model_color_format = {0}, ++ .scale = 0.0, ++ .dimension = 0, ++ .shape = {0}}; ++ ++ status = ivsr_get_attr(ivsr_model->handle, OUTPUT_TENSOR_DESC, &output_tensor_desc_get); + if (status != OK) { + av_log(ctx, AV_LOG_ERROR, "Failed to get output dimensions\n"); + return DNN_GENERIC_ERROR; + } + +- switch (ivsr_model->model_type) { +- case VIDEOPROC: +- *output_height = input_height; +- *output_width = input_width; +- break; +- case BASICVSR: +- case EDSR: +- case CUSTVSR: +- case TSENET: +- *output_height = dims[3]; +- *output_width = dims[4]; +- break; +- default: +- av_log(ctx, AV_LOG_ERROR, "Not supported model type\n"); +- return DNN_GENERIC_ERROR; ++ set_dnndata_info(&output, &output_tensor_desc_get); ++ *output_height = output.height; ++ *output_width = output.width; ++ ++ if (ivsr_model->model_type == VIDEOPROC) { ++ *output_height = input_height; ++ *output_width = input_width; + } + + return ret; +@@ -678,14 +755,20 @@ static int get_output_ivsr(void *model, const char *input_name, + + // Utility function to create and link config + static ivsr_config_t* create_and_link_config(ivsr_config_t *previous, +- int key, char *value, void *ctx) { ++ int key, void *value, void *ctx) { + ivsr_config_t *config = av_mallocz(sizeof(ivsr_config_t)); + if (config == NULL) { + av_log(ctx, AV_LOG_ERROR, "Failed to malloc config\n"); + return NULL; + } + config->key = key; +- config->value = value; ++ if (config->key == INPUT_TENSOR_DESC_SETTING ++ || config->key == OUTPUT_TENSOR_DESC_SETTING) { ++ config->value = (tensor_desc_t *)value; ++ } else { ++ config->value = (char *)value; ++ } ++ + if (previous != NULL) { + previous->next = config; + } +@@ -708,8 +791,24 @@ DNNModel *ff_dnn_load_model_ivsr(const char *model_filename, + ivsr_config_t *config_input_res = NULL; + ivsr_config_t *config_nireq = NULL; + int nif = 0; +- int input_dims[5] = { 0, 0, 0, 0, 1 }; +- int output_dims[5] = { 0, 0, 0, 0, 1 }; ++ ivsr_config_t *config_input_tensor = NULL; ++ ivsr_config_t *config_output_tensor = NULL; ++ tensor_desc_t input_tensor_desc_get = { ++ .precision = {0}, ++ .layout = {0}, ++ .tensor_color_format = {0}, ++ .model_color_format = {0}, ++ .scale = 0.0, ++ .dimension = 0, ++ .shape = {0}}; ++ tensor_desc_t output_tensor_desc_get = { ++ .precision = {0}, ++ .layout = {0}, ++ .tensor_color_format = {0}, ++ .model_color_format = {0}, ++ .scale = 0.0, ++ .dimension = 0, ++ .shape = {0}}; + + model = av_mallocz(sizeof(DNNModel)); + if (!model) { +@@ -775,9 +874,98 @@ DNNModel *ff_dnn_load_model_ivsr(const char *model_filename, + if (config_input_res == NULL) + goto err; + ++ tensor_desc_t input_tensor_desc_set = { ++ .precision = "u8", ++ .layout = "NHWC", ++ .tensor_color_format = {0}, ++ .model_color_format = {0}, ++ .scale = 0.0, ++ .dimension = 4, ++ .shape = {0, 0, 0, 0}}; ++ tensor_desc_t output_tensor_desc_set = { ++ .precision = "fp32", ++ .layout = "NHWC", ++ .tensor_color_format = {0}, ++ .model_color_format = {0}, ++ .scale = 0.0, ++ .dimension = 4, ++ .shape = {0, 0, 0, 0}}; ++ // set element type according to bit depth of frame ++ const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(inlink->format); ++ switch (desc->comp[0].depth) ++ { ++ case 8: ++ strcpy(input_tensor_desc_set.precision, "u8"); ++ break; ++ case 10: ++ case 16: ++ strcpy(input_tensor_desc_set.precision, "u16"); ++ break; ++ default: ++ break; ++ } ++ // set layout for Basic_VSR ++ if (ivsr_model->model_type == BASICVSR) { ++ strcpy(input_tensor_desc_set.layout, "NFHWC"); ++ strcpy(output_tensor_desc_set.layout, "NFHWC"); ++ } ++ // set scale ++ if (fabsf(ctx->options.normalize_factor - 1) < 1e-6f) { ++ switch (desc->comp[0].depth) ++ { ++ case 8: ++ input_tensor_desc_set.scale = 255.0; ++ break; ++ case 10: ++ input_tensor_desc_set.scale = 1023.0; ++ break; ++ case 16: ++ input_tensor_desc_set.scale = 65535.0; ++ break; ++ default: ++ break; ++ } ++ } ++ ++ // set color format of input tensor ++ switch (inlink->format) ++ { ++ case AV_PIX_FMT_RGB24: ++ case AV_PIX_FMT_RGB48: ++ strcpy(input_tensor_desc_set.tensor_color_format, "RGB"); ++ break; ++ case AV_PIX_FMT_BGR24: ++ case AV_PIX_FMT_BGR48: ++ strcpy(input_tensor_desc_set.tensor_color_format, "BGR"); ++ break; ++ case AV_PIX_FMT_YUV420P: ++ case AV_PIX_FMT_YUV420P10LE: ++ strcpy(input_tensor_desc_set.tensor_color_format, "I420_Three_Planes"); ++ break; ++ default: ++ break; ++ } ++ // set color format of model required ++ switch (ivsr_model->model_type) ++ { ++ case BASICVSR: ++ case EDSR: ++ case VIDEOPROC: ++ case TSENET: ++ strcpy(input_tensor_desc_set.model_color_format, "RGB"); ++ break; ++ case CUSTVSR: ++ strcpy(input_tensor_desc_set.model_color_format, "I420_Three_Planes"); ++ break; ++ default: ++ break; ++ } ++ config_input_tensor = create_and_link_config(config_input_res, INPUT_TENSOR_DESC_SETTING, &input_tensor_desc_set, ctx); ++ config_output_tensor = create_and_link_config(config_input_tensor, OUTPUT_TENSOR_DESC_SETTING, &output_tensor_desc_set, ctx); ++ + char nireq_string[40] = {0}; + sprintf(nireq_string, "%d", ctx->options.nireq); +- config_nireq = create_and_link_config(config_input_res, INFER_REQ_NUMBER, nireq_string, ctx); ++ config_nireq = create_and_link_config(config_output_tensor, INFER_REQ_NUMBER, nireq_string, ctx); + if (config_nireq == NULL) + goto err; + +@@ -840,20 +1028,19 @@ DNNModel *ff_dnn_load_model_ivsr(const char *model_filename, + if(ivsr_model->model_type == TSENET) ivsr_model->nif = 3; + + status = +- ivsr_get_attr(ivsr_model->handle, INPUT_TENSOR_DESC, input_dims); ++ ivsr_get_attr(ivsr_model->handle, INPUT_TENSOR_DESC, &input_tensor_desc_get); + if (status != OK) { +- av_log(ctx, AV_LOG_ERROR, "Failed to get input dimensions\n"); ++ av_log(ctx, AV_LOG_ERROR, "Failed to get input tensor description\n"); + goto err; + } + + status = +- ivsr_get_attr(ivsr_model->handle, OUTPUT_TENSOR_DESC, output_dims); ++ ivsr_get_attr(ivsr_model->handle, OUTPUT_TENSOR_DESC, &output_tensor_desc_get); + if (status != OK) { +- av_log(ctx, AV_LOG_ERROR, "Failed to get output dimensions\n"); ++ av_log(ctx, AV_LOG_ERROR, "Failed to get output description\n"); + goto err; + } + +- + ivsr_model->request_queue = ff_safe_queue_create(); + if (!ivsr_model->request_queue) { + av_log(ctx, AV_LOG_ERROR, "Failed to create request queue\n"); +@@ -868,25 +1055,19 @@ DNNModel *ff_dnn_load_model_ivsr(const char *model_filename, + goto err; + } + +- //TODO: assume batch_size==1 +- item->in_frames = +- av_malloc(input_dims[0] * input_dims[1] * input_dims[2] * +- input_dims[3] * input_dims[4] * sizeof(float)); +- +- int input_byte_size = input_dims[0] * input_dims[1] * input_dims[2] * input_dims[3] * input_dims[4] * sizeof(float); +- memset(item->in_frames, 0, input_byte_size); ++ item->in_frames = av_malloc(get_tensor_size(&input_tensor_desc_get)); + if (!item->in_frames) { + av_log(ctx, AV_LOG_ERROR, "Failed to malloc in frames\n"); + goto err; + } ++ memset(item->in_frames, 0, get_tensor_size(&input_tensor_desc_get)); + +- item->out_frames = +- av_malloc(output_dims[0] * output_dims[1] * output_dims[2] * +- output_dims[3] * output_dims[4] * sizeof(float)); ++ item->out_frames = av_malloc(get_tensor_size(&output_tensor_desc_get)); + if (!item->out_frames) { + av_log(ctx, AV_LOG_ERROR, "Failed to malloc out frames\n"); + goto err; + } ++ memset(item->out_frames, 0 , get_tensor_size(&output_tensor_desc_get)); + + item->cb.ivsr_cb = infer_completion_callback; + item->cb.args = item; +diff --git a/libavfilter/dnn/dnn_io_proc.c b/libavfilter/dnn/dnn_io_proc.c +index f51c0669a9..8dec6d97be 100644 +--- a/libavfilter/dnn/dnn_io_proc.c ++++ b/libavfilter/dnn/dnn_io_proc.c +@@ -40,6 +40,59 @@ static int get_datatype_size(DNNDataType dt) + } + } + ++static DNNColorOrder map_dnn_color_order(int format) { ++ switch (format) ++ { ++ case AV_PIX_FMT_RGB24: ++ case AV_PIX_FMT_RGB48: ++ return DCO_RGB; ++ case AV_PIX_FMT_BGR24: ++ case AV_PIX_FMT_BGR48: ++ return DCO_BGR; ++ default: ++ return DCO_NONE; ++ } ++} ++ ++// bgr<->rgb ++static void transpose(DNNData *input, DNNColorOrder dst_order) { ++ if (input->order == DCO_NONE || input->layout == DL_NONE ++ || dst_order == DCO_NONE || input->order == dst_order) ++ return; ++ ++ int H = input->height; ++ int W = input->width; ++ int C = input->channels; ++ void *data = input->data; ++ int a_index = 0, b_index = 0; ++ int type_size = get_datatype_size(input->dt); ++ //transpose bgr<->rgb for NHWC layout ++ if (input->layout == DL_NHWC) { ++ for (int h = 0; h < H; ++h) { ++ for (int w = 0; w < W; ++w) { ++ a_index = h * W * C + w * C; ++ b_index = a_index + (C - 1); ++ for (int byte = 0; byte < type_size; ++byte) { ++ uint8_t tmp = ((uint8_t*)data)[a_index * type_size + byte]; ++ ((uint8_t*)data)[a_index * type_size + byte] = ((uint8_t*)data)[b_index * type_size + byte]; ++ ((uint8_t*)data)[b_index * type_size + byte] = tmp; ++ } ++ } ++ } ++ // transpose bgr<->rgb for NCHW layout ++ } else if (input->layout == DL_NCHW) { ++ int plane_size = H * W * type_size; ++ void *tmp = av_malloc(plane_size); ++ memcpy(tmp, data, plane_size); ++ memcpy(data, data + (C - 1) * plane_size, plane_size); ++ memcpy(data + (C - 1) * plane_size, tmp, plane_size); ++ av_free(tmp); ++ } ++ ++ // re-set order ++ input->order = dst_order; ++} ++ + int ff_proc_from_dnn_to_frame(AVFrame *frame, DNNData *output, void *log_ctx) + { + struct SwsContext *sws_ctx; +@@ -64,6 +117,9 @@ int ff_proc_from_dnn_to_frame(AVFrame *frame, DNNData *output, void *log_ctx) + /* scale == 1 and mean == 0 and dt == UINT8: passthrough */ + if (fabsf(output->scale - 1) < 1e-6f && fabsf(output->mean) < 1e-6 && output->dt == DNN_UINT8) + src_fmt = AV_PIX_FMT_GRAY8; ++ /* scale == 1 and mean == 0 and dt == UINT16: passthrough */ ++ else if (fabsf(output->scale - 1) < 1e-6f && fabsf(output->mean) < 1e-6 && output->dt == DNN_UINT16) ++ src_fmt = AV_PIX_FMT_GRAY16; + /* (scale == 255 or scale == 0) and mean == 0 and dt == FLOAT: normalization */ + else if ((fabsf(output->scale - 255) < 1e-6f || fabsf(output->scale) < 1e-6f) && + fabsf(output->mean) < 1e-6 && output->dt == DNN_FLOAT) +@@ -74,6 +130,11 @@ int ff_proc_from_dnn_to_frame(AVFrame *frame, DNNData *output, void *log_ctx) + return AVERROR(ENOSYS); + } + ++ DNNColorOrder dst_color_order = map_dnn_color_order(frame->format); ++ if (dst_color_order != output->order) { ++ transpose(output, dst_color_order); ++ } ++ + dst_data = (void **)frame->data; + linesize[0] = frame->linesize[0]; + plane_size = linesize[0] * frame->height; +@@ -220,6 +281,9 @@ int ff_proc_from_frame_to_dnn(AVFrame *frame, DNNData *input, void *log_ctx) + /* scale == 1 and mean == 0 and dt == UINT8: passthrough */ + if (fabsf(input->scale - 1) < 1e-6f && fabsf(input->mean) < 1e-6 && input->dt == DNN_UINT8) + dst_fmt = AV_PIX_FMT_GRAY8; ++ /* scale == 1 and mean == 0 and dt == UINT16: passthrough */ ++ else if (fabsf(input->scale - 1) < 1e-6f && fabsf(input->mean) < 1e-6 && input->dt == DNN_UINT16) ++ dst_fmt = comp_desc->depth == 10 ? AV_PIX_FMT_GRAY10 : AV_PIX_FMT_GRAY16; + /* (scale == 255 or scale == 0) and mean == 0 and dt == FLOAT: normalization */ + else if ((fabsf(input->scale - 255) < 1e-6f || fabsf(input->scale) < 1e-6f) && + fabsf(input->mean) < 1e-6 && input->dt == DNN_FLOAT) +@@ -346,6 +410,12 @@ int ff_proc_from_frame_to_dnn(AVFrame *frame, DNNData *input, void *log_ctx) + ret = AVERROR(ENOSYS); + goto err; + } ++ DNNColorOrder current_color_order = map_dnn_color_order(frame->format); ++ if (input->order != current_color_order) { ++ DNNColorOrder dst_color_order = input->order; ++ input->order = current_color_order; ++ transpose(input, dst_color_order); ++ } + err: + av_free(middle_data); + return ret; +diff --git a/libavfilter/vf_dnn_processing.c b/libavfilter/vf_dnn_processing.c +index 066b00a898..9580f81cdb 100644 +--- a/libavfilter/vf_dnn_processing.c ++++ b/libavfilter/vf_dnn_processing.c +@@ -78,14 +78,13 @@ static av_cold int init(AVFilterContext *context) + + static const enum AVPixelFormat pix_fmts[] = { + #if 0 +- AV_PIX_FMT_RGB24, AV_PIX_FMT_BGR24, + AV_PIX_FMT_GRAY8, AV_PIX_FMT_GRAYF32, + AV_PIX_FMT_YUV420P, AV_PIX_FMT_YUV422P, + AV_PIX_FMT_YUV444P, AV_PIX_FMT_YUV410P, AV_PIX_FMT_YUV411P, + AV_PIX_FMT_NV12, + AV_PIX_FMT_NONE + #else +- AV_PIX_FMT_BGR24, ++ AV_PIX_FMT_RGB24, AV_PIX_FMT_BGR24, + AV_PIX_FMT_YUV420P, + AV_PIX_FMT_BGR48LE, + AV_PIX_FMT_RGB48LE, +-- +2.34.1 + diff --git a/ivsr_ffmpeg_plugin/patches/0024-refine-RESHAPE_SETTINGS-support-Y-input-SVP-model.patch b/ivsr_ffmpeg_plugin/patches/0024-refine-RESHAPE_SETTINGS-support-Y-input-SVP-model.patch new file mode 100644 index 0000000..0a0274c --- /dev/null +++ b/ivsr_ffmpeg_plugin/patches/0024-refine-RESHAPE_SETTINGS-support-Y-input-SVP-model.patch @@ -0,0 +1,65 @@ +From be7d4bd47ed80fe4a0590c782b09587c4819a396 Mon Sep 17 00:00:00 2001 +From: Jerry Dong +Date: Wed, 18 Sep 2024 12:44:34 +0800 +Subject: [PATCH] refine RESHAPE_SETTINGS; support Y-input SVP model + +--- + libavfilter/dnn/dnn_backend_ivsr.c | 18 ++++++++++++------ + 1 file changed, 12 insertions(+), 6 deletions(-) + +diff --git a/libavfilter/dnn/dnn_backend_ivsr.c b/libavfilter/dnn/dnn_backend_ivsr.c +index 44d603190c..e7e1d5ea2a 100644 +--- a/libavfilter/dnn/dnn_backend_ivsr.c ++++ b/libavfilter/dnn/dnn_backend_ivsr.c +@@ -950,10 +950,15 @@ DNNModel *ff_dnn_load_model_ivsr(const char *model_filename, + { + case BASICVSR: + case EDSR: +- case VIDEOPROC: + case TSENET: + strcpy(input_tensor_desc_set.model_color_format, "RGB"); + break; ++ case VIDEOPROC: ++ if (desc->flags & AV_PIX_FMT_FLAG_RGB) ++ strcpy(input_tensor_desc_set.model_color_format, "RGB"); ++ else ++ strcpy(input_tensor_desc_set.model_color_format, "I420_Three_Planes"); ++ break; + case CUSTVSR: + strcpy(input_tensor_desc_set.model_color_format, "I420_Three_Planes"); + break; +@@ -969,25 +974,26 @@ DNNModel *ff_dnn_load_model_ivsr(const char *model_filename, + if (config_nireq == NULL) + goto err; + ++ //TODO: reshape setting follows NHW layout. Hardcode the batch_size as 1. + char shape_string[40] = {0}; + switch (ivsr_model->model_type) { + case BASICVSR: +- sprintf(shape_string, "1,3,3,%d,%d", frame_h, frame_w); ++ sprintf(shape_string, "1,%d,%d", frame_h, frame_w); + break; + case VIDEOPROC: + // the input resoultion required 8-aligned + frame_h = (frame_h + ALIGNED_SIZE - 1) / ALIGNED_SIZE * ALIGNED_SIZE; + frame_w = (frame_w + ALIGNED_SIZE - 1) / ALIGNED_SIZE * ALIGNED_SIZE; +- sprintf(shape_string, "1,3,%d,%d", frame_h, frame_w); ++ sprintf(shape_string, "1,%d,%d", frame_h, frame_w); + break; + case EDSR: +- sprintf(shape_string, "1,3,%d,%d", frame_h, frame_w); ++ sprintf(shape_string, "1,%d,%d", frame_h, frame_w); + break; + case CUSTVSR: +- sprintf(shape_string, "1,1,%d,%d", frame_h, frame_w); ++ sprintf(shape_string, "1,%d,%d", frame_h, frame_w); + break; + case TSENET: +- sprintf(shape_string, "1,9,%d,%d", frame_h, frame_w); ++ sprintf(shape_string, "1,%d,%d", frame_h, frame_w); + break; + default: + av_log(ctx, AV_LOG_ERROR, "Not supported model type\n"); +-- +2.34.1 + diff --git a/ivsr_ffmpeg_plugin/patches/0025-dnn_backend_ivsr-change-aligned-size-to-64-from-8.patch b/ivsr_ffmpeg_plugin/patches/0025-dnn_backend_ivsr-change-aligned-size-to-64-from-8.patch new file mode 100644 index 0000000..efdeb86 --- /dev/null +++ b/ivsr_ffmpeg_plugin/patches/0025-dnn_backend_ivsr-change-aligned-size-to-64-from-8.patch @@ -0,0 +1,26 @@ +From b058f5bcd7d6d68f9038a1016476fdf1b3566300 Mon Sep 17 00:00:00 2001 +From: Xiaoxia Liang +Date: Thu, 26 Sep 2024 22:47:25 +0800 +Subject: [PATCH] dnn_backend_ivsr: change aligned size to 64 from 8 + +Signed-off-by: Xiaoxia Liang +--- + libavfilter/dnn/dnn_backend_ivsr.c | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +diff --git a/libavfilter/dnn/dnn_backend_ivsr.c b/libavfilter/dnn/dnn_backend_ivsr.c +index e7e1d5ea2a..246fe5eb2b 100644 +--- a/libavfilter/dnn/dnn_backend_ivsr.c ++++ b/libavfilter/dnn/dnn_backend_ivsr.c +@@ -109,7 +109,7 @@ static const AVOption dnn_ivsr_options[] = { + + AVFILTER_DEFINE_CLASS(dnn_ivsr); + +-#define ALIGNED_SIZE 8 ++#define ALIGNED_SIZE 64 + + static int get_datatype_size(DNNDataType dt) + { +-- +2.34.1 + diff --git a/ivsr_ffmpeg_plugin/patches/0026-Using-plugin-to-do-model-preprocessing-for-TSENet.patch b/ivsr_ffmpeg_plugin/patches/0026-Using-plugin-to-do-model-preprocessing-for-TSENet.patch new file mode 100644 index 0000000..076f9b1 --- /dev/null +++ b/ivsr_ffmpeg_plugin/patches/0026-Using-plugin-to-do-model-preprocessing-for-TSENet.patch @@ -0,0 +1,169 @@ +From 163300648bc861dc781c43e5a5d64985e5992386 Mon Sep 17 00:00:00 2001 +From: Jerry Dong +Date: Tue, 19 Nov 2024 14:10:45 +0800 +Subject: [PATCH] Using plugin to do model preprocessing for TSENet. + +--- + libavfilter/dnn/dnn_backend_ivsr.c | 40 ++++++++++++++++++++---------- + libavfilter/dnn/dnn_io_proc.c | 9 ++++--- + 2 files changed, 32 insertions(+), 17 deletions(-) + +diff --git a/libavfilter/dnn/dnn_backend_ivsr.c b/libavfilter/dnn/dnn_backend_ivsr.c +index 246fe5eb2b..4a0fab99cd 100644 +--- a/libavfilter/dnn/dnn_backend_ivsr.c ++++ b/libavfilter/dnn/dnn_backend_ivsr.c +@@ -103,6 +103,7 @@ static const AVOption dnn_ivsr_options[] = { + { "extension", "extension lib file full path, usable for BasicVSR model", OFFSET(options.extension), AV_OPT_TYPE_STRING, { .str = NULL }, 0, 0, FLAGS}, + { "op_xml", "custom op xml file full path, usable for BasicVSR model", OFFSET(options.op_xml), AV_OPT_TYPE_STRING, { .str = NULL }, 0, 0, FLAGS}, + { "model_type", "dnn model type", OFFSET(options.model_type), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, MODEL_TYPE_NUM - 1, FLAGS}, ++ //TODO: replace "normalize_factor" with "scale" as defined in openvino backend + { "normalize_factor", "normalization factor", OFFSET(options.normalize_factor), AV_OPT_TYPE_FLOAT, { .dbl = 1.0 }, 1.0, 65535.0, FLAGS}, + { NULL } + }; +@@ -323,9 +324,10 @@ static int fill_model_input_ivsr(IVSRModel * ivsr_model, + + input.data = request->in_frames; + in_data = input.data; ++ //scale/mean can't be retrieved, so they're 0.0 by default + // ff_proc_from_frame_to_dnn: uint_8->uint8 requires scale == 1 and mean == 0 and dt == UINT8 +- input.scale = 1; +- input.mean = 0; ++ input.scale = input.dt == DNN_UINT8 ? 1.0f : 0.0f; ++ input.mean = 0.0f; + + ctx->model_input_height = input.height; + ctx->model_input_width = input.width; +@@ -370,7 +372,7 @@ static int fill_model_input_ivsr(IVSRModel * ivsr_model, + ivsr_model->model-> + filter_ctx); + // convert buffer from NHWC to NCHW when C != 1 +- if (input.layout != 1 && input.layout == DL_NONE ) ++ if (input.channels != 1 && input.layout == DL_NONE ) + convert_nhwc_to_nchw(input.data, 1, input.channels, input.height, input.width, input.dt); + input.data += + input.height * input.width * +@@ -410,7 +412,9 @@ static int fill_model_input_ivsr(IVSRModel * ivsr_model, + for (int idx = 0; idx < ivsr_model->nif; idx++) { + //INFO: the 3 frames in frame_queue are: (N-2)th, (N-1)th, (N)th + ff_proc_from_frame_to_dnn(input_frames[idx], &input, ivsr_model->model->filter_ctx); +- convert_nhwc_to_nchw(input.data, 1, input.channels, input.height, input.width, input.dt); ++ //NHWC->NCHW was processed in ff_proc_from_frame_to_dnn() if input.layout is set ++ if (input.channels != 1 && input.layout == DL_NONE ) ++ convert_nhwc_to_nchw(input.data, 1, input.channels, input.height, input.width, input.dt); + input.data += input.height * input.width * input.channels * get_datatype_size(input.dt); + } + input.data = in_data; +@@ -434,8 +438,8 @@ static int fill_model_input_ivsr(IVSRModel * ivsr_model, + convert_nhwc_to_nchw(input.data, 1, input.channels, input.height, input.width, input.dt); + } + +- if (normalize_factor != 1 && input.dt == DNN_FLOAT +- && (input.scale > 1 || input.scale == 0)) { ++ if (normalize_factor != 1 && input.dt == DNN_FLOAT && ++ (fabsf(input.scale - 1.0f) > 1e-6f || fabsf(input.scale) < 1e-6f)) { + // do not need to covert buffer from NHWC to NCHW if the channels is 1, only need to mulitple normalize_factor + #pragma omp parallel for + for (int pos = 0; pos < input.height * input.width * input.channels; pos++) { +@@ -490,9 +494,10 @@ static void infer_completion_callback(void *args) + set_dnndata_info(&output, &output_tensor_desc_get); + + output.data = request->out_frames; +- output.mean = 0; ++ //scale/mean can't be retrieved, so they're 0.0 by default ++ output.mean = 0.0f; + // ff_proc_from_dnn_to_frame: float->uint8 require (scale == 255 or scale == 0) and mean == 0 +- output.scale = output.dt == DNN_UINT8 ? 1 : 0; ++ output.scale = output.dt == DNN_UINT8 ? 1.0f : 0.0f; + // set order based on model type + switch (ivsr_model->model_type) + { +@@ -555,8 +560,8 @@ static void infer_completion_callback(void *args) + convert_nchw_to_nhwc(output.data, 1, output.channels, output.height, output.width, output.dt); + } + +- if (normalize_factor != 1 && output.dt == DNN_FLOAT +- && (output.scale > 1 || output.scale == 0)) { ++ if (normalize_factor != 1 && output.dt == DNN_FLOAT && ++ (fabsf(output.scale - 1.0f) > 1e-6f || fabsf(output.scale) < 1e-6f)) { + #pragma omp parallel for + // only need to devide by normalize_factor for channels = 1. + for (int pos = 0; pos < output.height * output.width * output.channels; pos++) { +@@ -890,6 +895,9 @@ DNNModel *ff_dnn_load_model_ivsr(const char *model_filename, + .scale = 0.0, + .dimension = 4, + .shape = {0, 0, 0, 0}}; ++ ++ // Through the setting of input/output_tensor_desc_set, we can config where ++ // to do the pre-processing, in plugin or in SDK(openvino). + // set element type according to bit depth of frame + const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(inlink->format); + switch (desc->comp[0].depth) +@@ -904,13 +912,19 @@ DNNModel *ff_dnn_load_model_ivsr(const char *model_filename, + default: + break; + } +- // set layout for Basic_VSR ++ // customize layout for Basic_VSR and TSENet + if (ivsr_model->model_type == BASICVSR) { + strcpy(input_tensor_desc_set.layout, "NFHWC"); + strcpy(output_tensor_desc_set.layout, "NFHWC"); ++ } else if (ivsr_model->model_type == TSENET) { ++ //For TSENet, it's not typical N'C'HW, so do the NHWC->NCHW transion in plugin ++ strcpy(input_tensor_desc_set.layout, "NCHW"); ++ strcpy(input_tensor_desc_set.precision, "fp32"); + } +- // set scale +- if (fabsf(ctx->options.normalize_factor - 1) < 1e-6f) { ++ // set scale for non-float type of input ++ if (fabsf(ctx->options.normalize_factor - 1) < 1e-6f && ++ (strcmp(input_tensor_desc_set.precision, "u8") == 0 || ++ strcmp(input_tensor_desc_set.precision, "u16") == 0)) { + switch (desc->comp[0].depth) + { + case 8: +diff --git a/libavfilter/dnn/dnn_io_proc.c b/libavfilter/dnn/dnn_io_proc.c +index 8dec6d97be..09fea90e20 100644 +--- a/libavfilter/dnn/dnn_io_proc.c ++++ b/libavfilter/dnn/dnn_io_proc.c +@@ -285,9 +285,10 @@ int ff_proc_from_frame_to_dnn(AVFrame *frame, DNNData *input, void *log_ctx) + else if (fabsf(input->scale - 1) < 1e-6f && fabsf(input->mean) < 1e-6 && input->dt == DNN_UINT16) + dst_fmt = comp_desc->depth == 10 ? AV_PIX_FMT_GRAY10 : AV_PIX_FMT_GRAY16; + /* (scale == 255 or scale == 0) and mean == 0 and dt == FLOAT: normalization */ ++ //TODO: compare with "255" doesn't cover 10-bit case + else if ((fabsf(input->scale - 255) < 1e-6f || fabsf(input->scale) < 1e-6f) && + fabsf(input->mean) < 1e-6 && input->dt == DNN_FLOAT) +- dst_fmt = AV_PIX_FMT_GRAYF32; ++ dst_fmt = AV_PIX_FMT_GRAYF32; //float, 0.0f ~ 1.0f + else { + av_log(log_ctx, AV_LOG_ERROR, "dnn_process input data doesn't support type: UINT8 " + "scale: %f, mean: %f\n", input->scale, input->mean); +@@ -296,7 +297,6 @@ int ff_proc_from_frame_to_dnn(AVFrame *frame, DNNData *input, void *log_ctx) + + src_data = (void **)frame->data; + linesize[0] = frame->linesize[0]; +- plane_size = linesize[0] * frame->height; + + switch (frame->format) { + case AV_PIX_FMT_RGB48LE: +@@ -306,6 +306,9 @@ int ff_proc_from_frame_to_dnn(AVFrame *frame, DNNData *input, void *log_ctx) + if (input->layout == DL_NCHW) { + av_assert0(comp_desc->depth == 8 || comp_desc->depth == 16); + mdl_fmt = comp_desc->depth == 8 ? AV_PIX_FMT_GBRP : AV_PIX_FMT_GBRP16LE; ++ int middle_data_linesize[4] = {0}; ++ ret = av_image_fill_linesizes(middle_data_linesize, mdl_fmt, frame->width); ++ plane_size = middle_data_linesize[0] * frame->height; + middle_data = av_malloc(plane_size * input->channels); + if (!middle_data) { + ret = AVERROR(ENOMEM); +@@ -340,8 +343,6 @@ int ff_proc_from_frame_to_dnn(AVFrame *frame, DNNData *input, void *log_ctx) + return AVERROR(ENOSYS); + } + +- int middle_data_linesize[4] = {0}; +- ret = av_image_fill_linesizes(middle_data_linesize, mdl_fmt, frame->width); + sws_scale(sws_ctx, (const uint8_t * const *)frame->data, + frame->linesize, 0, frame->height, planar_data, + middle_data_linesize); +-- +2.34.1 + diff --git a/ivsr_ov/license/LICENSE.md b/ivsr_ov/license/LICENSE.md deleted file mode 100644 index 14ac870..0000000 --- a/ivsr_ov/license/LICENSE.md +++ /dev/null @@ -1,31 +0,0 @@ -# BSD 3-Clause License - -Copyright (c) 2023, Intel Corporation -All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are met: - -* Redistributions of source code must retain the above copyright notice, this - list of conditions and the following disclaimer. - -* Redistributions in binary form must reproduce the above copyright notice, - this list of conditions and the following disclaimer in the documentation - and/or other materials provided with the distribution. - -* Neither the name of the copyright holder nor the names of its - contributors may be used to endorse or promote products derived from - this software without specific prior written permission. - -DISCLAIMER - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" -AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE -FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL -DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR -SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER -CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, -OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. \ No newline at end of file diff --git a/ivsr_sdk/CMakeLists.txt b/ivsr_sdk/CMakeLists.txt index 30fead1..4f19a39 100644 --- a/ivsr_sdk/CMakeLists.txt +++ b/ivsr_sdk/CMakeLists.txt @@ -3,20 +3,32 @@ # cmake_minimum_required(VERSION 3.10) -project(IVSR DESCRIPTION "Intel Video Super Resolution SDK") - +project(IVSR DESCRIPTION "Intel IVSR SDK") +include(GNUInstallDirs) set(OUTPUT_FOLDER ${CMAKE_CURRENT_SOURCE_DIR}) set(CMAKE_RUNTIME_OUTPUT_DIRECTORY ${OUTPUT_FOLDER}/bin) set(CMAKE_LIBRARY_OUTPUT_DIRECTORY ${OUTPUT_FOLDER}/lib) set(CMAKE_ARCHIVE_OUTPUT_DIRECTORY ${OUTPUT_FOLDER}/lib) +SET(CMAKE_CXX_FLAGS_DEBUG "$ENV{CXXFLAGS} -O0 -Wall -g2 -ggdb") +SET(CMAKE_CXX_FLAGS_RELEASE "$ENV{CXXFLAGS} -O3 -Wall") + set(SDK_PRIVATE_HEADERS ${CMAKE_CURRENT_SOURCE_DIR}/privates/include) add_subdirectory(src) -add_subdirectory(samples) -#add_subdirectory(tools) add_subdirectory(privates) +if(ENABLE_SAMPLE) + add_subdirectory(samples) +endif() + if(ENABLE_TEST) add_subdirectory(test) endif() + +install(DIRECTORY "${CMAKE_CURRENT_SOURCE_DIR}/include/" + DESTINATION "${CMAKE_INSTALL_INCLUDEDIR}" + FILES_MATCHING PATTERN "*.h" +) + +install(TARGETS ivsr LIBRARY DESTINATION "${CMAKE_INSTALL_LIBDIR}" ARCHIVE DESTINATION "${CMAKE_INSTALL_LIBDIR}" RUNTIME DESTINATION "${CMAKE_INSTALL_BINDIR}") diff --git a/ivsr_sdk/README.md b/ivsr_sdk/README.md index f7396c5..53cc435 100644 --- a/ivsr_sdk/README.md +++ b/ivsr_sdk/README.md @@ -147,8 +147,8 @@ IVSRStatus ivsr_get_attr(ivsr_handle handle, IVSRAttrKey key, void* value); |Key name|Description| |:--|:--| |IVSR_VERSION|Use this key to get ivsr version.| - |INPUT_TENSOR_DESC|Use this key to get model input shape.| - |OUTPUT_TENSOR_DESC|Use this key to get model output shape.| + |INPUT_TENSOR_DESC|Use this key to get model input tensor description includes precision, layout and shape information.| + |OUTPUT_TENSOR_DESC|Use this key to get model output description includes precision, layout and shape information.| |NUM_INPUT_FRAMES|Use this key to get input frames number of the model.| |INPUT_DIMS|Use this key to get input dims of the model.| |OUTPUT_DIMS|Use this key to get input dims of the model.| @@ -191,7 +191,7 @@ The method deinitializes the handle and releases the resources allocated for iVS ## **VSR Sample** -There is a simple C++ sample to perform BasicVSR/EDSR/SVP inference on OpenVINO backend. You can reach the sample from `/ivsr_sdk/bin/vsr_sample`. You can get the help messages by running `./vsr_sample -h` and see the default settings of parameters. +There is a simple C++ sample to perform BasicVSR/EDSR/SVP inference on OpenVINO backend. Please add CMake option **-DENABLE_SAMPLE=ON** and build/install its dependency `OpenCV` component befor that. You can reach the sample from `/ivsr_sdk/bin/vsr_sample`. You can get the help messages by running `./vsr_sample -h` and see the default settings of parameters. |Option name|Desciption|Default value|Recommended value(s)| |:--|:--|:--|:--| @@ -205,7 +205,7 @@ There is a simple C++ sample to perform BasicVSR/EDSR/SVP inference on OpenVINO |save_path|Optional. Path to save predictions.|./outputs|If use the default value, make sure default path exists.| |save_predictions|Optional. Whether to save the results to save_path.|false|If this option exists, results will be saved.| |scale_factor|Optional. The ratio of the size of the image before scaling (original size) to the size of the image after scaling (new size).|2|For image enhancement model and if no resolution change, please set to 1.| -|normalize_factor|Optional. Normalization factor is equal to the value range required by models.|1.0|255.0 for Enhanced EDSR, 1.0 for other models.| +|normalize_factor|Optional. Normalization factor is equal to the value range required by models.|255.0|Must set to 1.0 for Enhanced EDSR.| |precision |Required for inference precision setting, but runtime precision you need to check with your HW platform.|f32|f32[FP32], f16[FP16], bf16[bf16].| |reshape_values|Optional. Reshape the network to fit the input image size. |None|Set the complete tensor value of the shape. e.g. --reshape_values="(1,3,720,1280)" in case your input image happens to be 1280x720 RGB 24bits| @@ -225,7 +225,7 @@ cd /ivsr_sdk/bin ### **Enhanced EDSR model Sample** ```bash cd /ivsr_sdk/bin -./vsr_sample --model_path=[your EDSR model.xml] --data_path=[folder contains low resolution images] --nig=1 --device=GPU --normalize_factor=255.0 --precision=f16 #need to set normalize_factor as 225.0 +./vsr_sample --model_path=[your EDSR model.xml] --data_path=[folder contains low resolution images] --nig=1 --device=GPU --normalize_factor=1.0 --precision=f16 #need to set normalize_factor as 1.0 ``` ### **SVP models Sample** ```bash diff --git a/ivsr_sdk/include/ivsr.h b/ivsr_sdk/include/ivsr.h index e62746b..11fe1b2 100644 --- a/ivsr_sdk/include/ivsr.h +++ b/ivsr_sdk/include/ivsr.h @@ -24,6 +24,7 @@ #include #include +#include /** * @brief vsr context @@ -39,7 +40,7 @@ typedef struct ivsr_callback { /** * @brief Intel VSR SDK version. - * + * */ typedef struct ivsr_version { const char *api_version; //!< A string representing ibasicvsr sdk version> @@ -47,7 +48,7 @@ typedef struct ivsr_version { /** * @brief Status for Intel VSR SDK - * + * */ typedef enum { OK = 0, @@ -61,19 +62,29 @@ typedef enum { /** * @enum vsr sdk supported key. - * + * There are multiple configurations which contain resolutions, + * INPUT_RES - it's for patch-based solution + * RESHAPE_SETTINGS - it's to reshape the model's input tensor, NHW in current version + * INPUT_TENSOR_DESC_SETTING - input data's tensor description + * OUTPUT_TENSOR_DESC_SETTING - output data's tensor description + * + * RESHAPE_SETTINGS carries data for BATCH, WIDTH, HEIGH, in NHW format. + * We may extent the type from one vector to a structure which specifies layout and different dimensions + * */ typedef enum { - INPUT_MODEL = 0x1, //!< Required, Path to the input model file> - TARGET_DEVICE = 0x2, //!< Required, device to run the inference> + INPUT_MODEL = 0x1, //!< Required. Path to the input model file> + TARGET_DEVICE = 0x2, //!< Required. Device to run the inference> BATCH_NUM = 0x3, //!< Not Enabled Yet> VERBOSE_LEVEL = 0x4, //!< Not Enabled Yet> - CUSTOM_LIB = 0x5, //!< Path to extension lib file, required for loading Extended BasicVSR model> - CLDNN_CONFIG = 0x6, //!< Path to custom op xml file, required for loading Extended BasicVSR model> - NUM_IFER_REQUEST = 0x7, //!< Not Enabled Yet> + CUSTOM_LIB = 0x5, //!< Optional. Path to extension lib file, required for loading Extended BasicVSR model> + CLDNN_CONFIG = 0x6, //!< Optional. Path to custom op xml file, required for loading Extended BasicVSR model> + INFER_REQ_NUMBER = 0x7, //!< Optional. To specify inference request number> PRECISION = 0x8, //!< Optional. To set inference precision for hardware> RESHAPE_SETTINGS = 0x9, //!< Optional. To set reshape setting for the input model> - INPUT_RES = 0xA, //!< Required, to specify the input frame resolution> + INPUT_RES = 0xA, //!< Required. To specify the input frame resolution> + INPUT_TENSOR_DESC_SETTING = 0xB, + OUTPUT_TENSOR_DESC_SETTING = 0xC }IVSRConfigKey; typedef enum { @@ -87,15 +98,23 @@ typedef enum { /** * @struct Intel VSR configuration. - * + * */ typedef struct ivsr_config { IVSRConfigKey key; - const char *value; + const void *value; struct ivsr_config *next; }ivsr_config_t; - +typedef struct tensor_desc { + char precision[20]; + char layout[20]; + char tensor_color_format[20]; + char model_color_format[20]; + float scale; + uint8_t dimension; + size_t shape[8]; +} tensor_desc_t; #ifdef __cplusplus extern "C" { @@ -103,48 +122,50 @@ extern "C" { /** * @brief initialize the intel vsr sdk - * + * * @param configs configurations to initialize the intel vsr sdk. * @param handle handle used to process frames. - * @return IVSRStatus + * @return IVSRStatus */ IVSRStatus ivsr_init(ivsr_config_t *configs, ivsr_handle *handle); /** * @brief process function - * + * * @param handle vsr process handle. * @param input_data input data buffer * @param output_data output data buffer * @param cb callback function. - * @return IVSRStatus + * @return IVSRStatus */ IVSRStatus ivsr_process(ivsr_handle handle, char* input_data, char* output_data, ivsr_cb_t* cb); +IVSRStatus ivsr_process_async(ivsr_handle handle, char* input_data, char* output_data, ivsr_cb_t* cb); + /** * @brief reset the configures for vsr - * + * * @param handle vsr process handle * @param configs changed configurations for vsr. - * @return IVSRStatus + * @return IVSRStatus */ IVSRStatus ivsr_reconfig(ivsr_handle handle, ivsr_config_t* configs); /** - * @brief get attributes - * + * @brief get attributes + * * @param handle vsr process handle * @param key indicate which type information to query. * @param value returned data. - * @return IVSRStatus + * @return IVSRStatus */ IVSRStatus ivsr_get_attr(ivsr_handle handle, IVSRAttrKey key, void* value); /** * @brief free created vsr handle and conresponding resources. - * + * * @param handle vsr process handle. - * @return IVSRStatus + * @return IVSRStatus */ IVSRStatus ivsr_deinit(ivsr_handle handle); diff --git a/ivsr_sdk/privates/model_guard.bin/libirguard.a-2024.5.0 b/ivsr_sdk/privates/model_guard.bin/libirguard.a-2024.5.0 new file mode 100644 index 0000000000000000000000000000000000000000..3c896867c6fde6b005e9dfdd47c74caa29b0bb0a GIT binary patch literal 353316 zcmeFa3w%_?**|_Z*$v@(0;0xCb=6f95t`+i1(Ynw!dcm1G~8OmB_tb?5|Wslg`ixb zyOA8%Ro<$t)%LBuysfQ$t8cYYQ6b^3tqNF`YOQ#~tc!wL5HFDb_nDb<_G}U$+^YWm z&4*>?%$dtG&ph+YGtcwPoD0U6)Rk6T+2_0V6Htw zs!QtXY8HD+*~?Qi|N~+7M0(IU=^GZEai`87ezbv4ZR8~2ies}ef z!V2O9ipT}h={kG4I&c{#?<{wc<=6Ex&GIT#-9&#x${J0>Fj`r1B;{T}6?SXWZIAfSFd zT|b`YzsBBk+CFImf2_Ty=6lxr;UoOTwf@qYV6_@3^Vh0%U_!N3C8dER&NL6q7!~MRR(gpt7nySjuC7$6PPr-1{B0kyw?JYZd(DInd zy7|G9x-#eJ;wANJU|~@mEhlPapq_Qlc~ZJ}!i^X`8NDb_s@BwbOM!c=nhE4~eqoe2 z0@hC$tehSo*t8s6-k&InyKC;0NnVVBd<}AeF8NXWr${cK zCJYw+HFZ&M{}gxe>l{UW89GH>xEeo|ojaL>cg&qT#)aaT>Oa|&@39(n6Oc?Q#botT~o?UYt|NAI&~`QJQZ3fUECrTe-s_bNkJ6aIq8V% zQYR2~gq_YQ*@QSpLW4IyphDSM6;Me}NoqV&reaH7Uc)E?N<9!vN5Y<4 zEtx;cU+h;aJ&TBV@QAF_iVu^O&z2+f?S!`&N9{&WiSW2s-BBp;l(pRhHqq0zkA+L= z!bcA#->GcAXT3iH)t#CdMOy!|%KF-x`T#c91v7`$1N*#V6>=U9q~Ylkt1MS^=XPCtQC32DUoF-zaV!E3K)nS4*nZdS?!M<)^j9 zQ#3``hYaxr_OQ~D>S~zSN)f1tj!1S?UXbYTh8Q(+x~m{NtMsZ#ad=!XI; zP_pn^XO15A*H>0zRt+EH4_4Jw&-be}{_@HOELSiIsuliuOVmJp)wNU1x?aqq>YBQR zC8!k3UkT|Yi%N^tPO3U5T3B~BP0xh_?xo($$N%3N$-u>9~P6I$5gDQxhU<0)+DL7k(`YJGqn@E%z& z&vpQHkX(>eS7de?IWRo1<3M>yuuAop)Yb;7%RCD~BhFEjT19vVUdjV?B=~eHdu_4P zU(WMF80uNxsk$gLI#^u`(-RY@y3{+|52FK7>L@N%VZ=)TN;Q6Ehli(lgujYlLP4V$ zr>m-_qzq(U6(Hi_eLw;rtbv;Zb{$O?G4Z#3RKlQNKk71Z5DdFD_7cHeEmkYQmDyx%#V><-_ z6%1kEu?hY~9n*o=htD~to|%RyZ@C|?44_OhX-%7P#65>m{5UO3NtBwyD0N{Cwi-mB z!)q3e%&(~nkhjG#4>nb765m|@{F;Tem;*=g(C~}~T5D@+b!K5KKQ6zgAy675_J6e1 zVG3lkV+WX78mEFZUG>3vnDlTb@fJ@61)}?-x)Jr5_A5dN4vZ3@ zT``K)QN;oQ0jZl_m|hwsb;hi7#;Rh!*&J@7#GcT+6I9wmR!mA6;;;oo-xQgQNCN0@ z7+eOv4BAZ+MoEA$38N*8Ip;L9pQQZ6xlQ@8lvFl!;WtSoXbpm-TP-BfBWETK#xhq{ z`{%*ArHkOR5C~rg?I^(w`M9d8rc|sV-m9*if;XN;M;q>i5Hc`ewO3CwRzTqmmjw54 z(bLkZlKOh?aA)GlNm)svH$x4VoFjLWcF`qei%O~!9qe{^@`uxA0+JENqwAg{w|+^r zTG9abI_BfY_8t&&fRd7`K&Lgt3~#>k1Z&kv+_Y32<8;e8_UV?7L1PL%NP|^=j7jDE zYR^Ilw)tWhO;8!gOfU{MQ-@O>-H}c)y*WDRAX_m+$8L$IC%zMq%gLpX31iqesw5ux zO)z#Ev}3KZEVyu?;qzeNpeMBqcbWhT&I>fu*7<2OAsmB_t*#MW>zWssUs>IywjK*M z*gF;aOM~PIrTQ0@R0RWU%WCFQJ+F(ps@5%moLExV1*D!Ji&2mm#d+8TK?TNkU1o4F z(#PeTmsm#(oF8{pU0MHeRn@&tx_cuj(X0b&_x;)KT4tNj;)Znk|Ap z>jJe{u02&%^NxqN5UUgwF`*tN6s&>e!mFHllZ)N0iTNJ$c86prcqw)xLzxzs9*bv2 zZ6{$!xgpL&>pO&vGK073z13yUL1&hres?Xybj)NL{)Af9AciS1USXHW(u*}>Tz~;PGU#v!z z5oWvsDQu0c4c0@Gj5W<)5^Nxe8rbqIGRexKjCrYp`IJPN;=m2&{Z}{k9Y*dn(aj!)eDMxGhtE3zqBUj**~l&!W!q zSdXQiqf}1D&Qnr3Hw>M^%1;n-(^6T2^GgEd%qVkm@P70LPH7KL0{)NO1F=M&2=$nx z4D%Q;-NMNkgdVhTf}?Ph7EZ!2^q>WEA3BRDU$1)1%<%<}!|DQSW$`4e2iSOHTkS1^ zCau^@;*eQ4^KZ9KkW>O=DU5DK(7KG!%_GoXl?9kxg|D_paJ@KD!veGXCA=({H9#NF zBvG@oP+}H5NVXi&X|YY9ehPcn1C0j`BQe?BJS0M0*5k!zgh?YnD_cn^tkGP2(K#;W zd1p=nnmPLU)#Kb|Lk2m{^Q*_TwD$}FLMk>`=_x^g-pDr*0iC(zDM5hV$TtxIV&8x2 zDS(8@6F~qsMkn#29$HU`nS0{bZ~Emh^>@ zkLXKD73c@lQ@Mz&2j9;AFkOS_kI+zd-<4#K{cq^(AW5RK-oLn}t_-IQM)>_gAM*|8 zNwlvkE3g075)lOOLUFMP!@YkVj?9;p)Ghhe_J~aj%m+Ul=Aaeh?nB+qaHuA~$nR@sk!v)prpOyp71Nq9L_N1d9)13Zk zv8zMNC|(bok>TA=p7bPW>dF*s6}opghU(~eymGIFGl#I74EGoC`y!atgE=SS z!W40bgtzE;wmHyg0YNU-!V&FAxw;*xriy&9&6cE|`^cavH#Kz>8`H^ai#&1QZEJwc zB%UMoa|~`0tFAA{X#{_5UFAY7(>RJibZ|_)I#K-+H=8kM{1uCPCWvd0>De{TMgl?89iqgG7H<{}^aoXHD~aW-;Cf%AYd5s?-CL z(UkytzmJFA5N(s177l~Zsq3!&C#6QGQDdj8`;J%j3O;p$HR(D`&c5|t>WtvH@Mm(y z;5gJd%}KAF;gi*Vf>_X3;**0ttON1lW~ZCvlNZb~bTU6-oKDVza9QAl=Ll7!|ZlH-}=mTwZ_?R&%*w`t@q|PqCX_(aE>VnB0^9ZJD(U}jqZgW{h7axtG>2ZbR zY5?=Z!sD-jIqJiL{R81DdV)2Xb>7GA0>u;waMq*CBWq5;86ENg9m*JuU8ix{0^FkO zcDB3o8MIg*`Bb85@6i)yU)`zh$Y+Uxmx5Wc~8L6OII$vUIx6Rt@}_R~%bCS9^SahPQ#E|p*jBR>K^}88-9vLCT8ZQtay&Ji9GDJ|q+^B2 zL_^Lo>|zA$R!VX9w2XvAh%rS|DvolClXEz3Y8WwmUZv`;pGa#m2x(MlZLqSe=m*8P zC9Rkg&vao6Ze8%RbRnS`XC1yp_q5<9iCSD>Wk%Am+7{R~A)cZNV1==6T~Tcog?f6_ zTvx&T=hlzzZv7Zp1d9n-6eiZ;8y$7dk+>zFJsVemh^ym=mDbh{tHFnw4)<-Q(IZE) zFEWkD89jo3oM~>K6&~ND6pJNQ5`PxyO39)>n8NfltMQb_VtLa2DfgvN-q-P?@E;<4 z1K~eK{7n(=MEI77zb(RdMEI@<-xJ|35xy_Nb`i!9?!j-bc>Vz4hxmOYp5q92i}=R~ zKN0ay5&lcW_aWRb;-4WrAmX1R{6fUPMEI46A4GUa#5)jDN2#wC{70Rqcq;z0intA7 znuzy8m@eYI5%v-Bz6kq?_yB~mh-V-iDB^<H){PyGb8GZ-w!$0YK{AU;O3lLt2-$mltf$(DdE)ma{BFw@sTRaazcsYJU z@f#-6Mj#xC-zfY>i?lHabMae*-(r#GLO2#b6~CZJ8;5Ybh+m0tf{5oKbc^^zg!v*~ zfY2l2lMqf8aRs4Q#IF+J6oftzpNjBm5ide`jffW`oF?Kk5Y80w?;-rYh+l_rj)?ma z&K2VY7&92yYYd2*TS%Jc{rR5x*1RDiQxLgg+GV zyAa+j;`bo@k%<2o;k_b$AHw@Z{HF+iCgT5%@aH1_0K#90_%9LuO2mJS@P9=7Hwb?# z;twMHorwP);X@++2ZaAC;(tW=u!ui`@c%^oPY53s@y8JUS;SiqJ}%-3I;d3JX7leNm@#hh~AmT3~+=|~e z{I-jqE`IOf zw@akGkFZ_DV+cPG@i@ZWBK{%5k3@VA!o4E?F~Uzo{8NPg67eq(ektN#Av`GJhY)s% zxRe4OML+PV6ojcFZbfJl@ic_JL_8f~ZxQcsL)c%$2OyM1JOklC5g&~3bP+!T z;h7?yiSR5DKO5mWB7QEy?}+$$2+tRBJHiV@{6d5miMRvd#Ug$Q!b?Rw3t_g1Uxsjq zhz~_LOvJy7Fh|6l2#1UKNQ9$Ad^Ex_BA$!TCE{0#a00?S5qFDlq6qU57KpezO5#hB6i$#2z2&W^QA>uPdI1AzTMEv_AoQ?20 z5uYQ%>k;}ze69#f5Y7|vQW2IR42XES2KUL(RZ!jK5VB5V@jts-10 z!e$X_BD_t65rnsk_#GnrFN8nDkN%YC#18iiul9;p+v}e0)}HffFT1s8-P63<26t$e z#oM-f!gQ}?L$P<;S3cJ+IeI?|d9_dDz02kE3n;pNN>n{3MTt)8P@?CKQKEx;$t^?V zmOgp8u@&iBOwnFbw7nCX`TZxzE+Q;?nqN z`Fts_dAJh225+}2ZS8owRoQkx(bg&27O!i|vPwnU=8K*yQ!8${Wkf;l3rn9bXdX0s zj{7?I9QXC>SK-YVv=RKkr?txG=SkkshbcZlfu?+x7@AUCTjZvjIywMtfn-sl{Q$0$ zTGgJ5l_6O^zmf3b(_TxeS3W;X$|==ob?=^tem-w zma1r*<6HIi71tZ;IMzzjcJ(wR^o2z|m*itKhL$7CBx-Pz^-MgEi{DqiN^Z%|Xzr!I zjK8I5pT$Gv<-_#)XzcfUvkzhHr%f;Rh7M)OchGw8)xH8HP)D1#%i%1Rvez4}a#(!Y zI=B4zNQXymQ96KLi!b|aj72{O#;m+;Q@R*8Bu#TzFnT`APPrvFFZYXO)djgP%D1-x z*n-^Wmj0mJt&MabTWyC=+pK8ucwdaa+%mgx9+CP07h-Z_cz1jZep^lEi>|KPF<;x43K2$IAYCB?IBGuhI z-qD=SE7I$%?TC#40ePdsx<5mT@am#D^UGCF9QM8YI+WYNydLtfKY(cW$BkC_<0~6PM>ni z)g5E6vZ!Y&+80W6g+*~43dS)3*X#Imy`t{Lq)!3K25oUWCMp}9&(-w2<{N?fb@;WS z*j@k`MS^xBQ+ zcq7vs>At8HguhM6#`Jq$UP&7}pkeuy%Ib%H&;JyyU1>Xzs>lzoV|Y;_AnwhX7}rci zo7oNm-ivUr+){j?dFBBc|KL9gwE_obj+pvl{xp3khkF4&<%t6r4E(K7TpQ(;w4YP7 zJ#4CYBjX(mQZI%%`w)i5i&=FcYxh&RrY~zWv@TN~coiC+0{(N=0ptk2LlfA>swj+F zhZTZ+351O9;4ttVwf0q-o?Y%&))-xF`<(jug6Jn`SMTRyG*^#)E=E7=59Bp3Mn_|o z;Ok!WAUptlDPINnz%bSUF|B}aA_^!1zKy|mk^y1$dz|}Y=b$9ey@Z3#;9sTF3;5ad zjGt+*fCDm)=K6~wPkIUSUhU2DHnj%rN_PAShRd73q+o@t+)TUV#wX}Mxp2MC!&bSc zd$oOFC5rYks{K;Q-p9G*1oSL})5M|$07h=6ow)mH{3odw=_BLQJ_6ZePGa?8ZG(&H z1r6N$EboU6XQ>p zuTr!ZecIcY{yeMJ7XO11y@Gnp5`Zta_Pl$VTl?4aV(lfu6-x(O0L&YeNM26)MdudL zu0#r+<7{<@-m|EuH7?7Q)bGeGP4^<%ox4H4yVcYQPFI4~m52vRK~at}?sRJO2|xZ* z|GI_(AiZOIywxLTAdjLX#f|ll z>UYni?(hX`da-iz?j2NRsX}J%xE{Z#E^$p+{9hpIhYcyO6KJ_vP^0h`6=3rdIU~=XUK?tgi)F z@~`wX7hE9~xSp%qt!M>~icJO9S}K_;-})4lj9&BA6C|PgqK`RN5wxy#KJ76_BSqy& z>qzdC!;i2wAfEjR5nptw)vc{VJEx0wAfQ=Yo@VQn1+MLNA1Yed(Tw7o!d9w~8nek^ z+BRT~Nn6bud)n~`y>e~yX-_*Ipr|})8}ATpUZ_J&q-9XFyB!bG>j*HiDLj};afXIr zb7Zb&1gGsOh}bvN8{}WFV6|k#ri`m{WGa!74mnJln#gIPFD{hBw0C?4IUCnveSvJ= zFH3QKF0Z65trEH2(TG=@nUcC`dx7kEduUZJNfOPd4iM&xiUt|4z%i1y5rH^5&PVOw ztsL%?Xo!>}R6TEp+8~t-wediwHb7>g1-*jjcXYAu6^>#awL%K zd>^V+qDviiKzF&QfFl=+oX^#v4pbsxj`wJ-#mLZzVm{Yr{4FDa#M`Bgxjxq(bpUbH zT5P8DQk;Sf7ArTMM?l@Yf#}_*?bJmjw?8pI3bc!2*8^r<9>6{>&8=-Ez|Vx7>wH#w zA=GA(TgK-=tcl1WT|HHzFWrBhDfw_6hln z_>G*kg>9_(u8QQKn|C);?;?w_0-6@o1~9rIW8`L1!4Q-BRFN;+^r`&4Hu<}ZRm&lw z*H3-&)eiBwH%82=OPgbtwiIY`wT_C_Y9dn0Ekk?R~;iq@uN zZ&O@%hewYzAFif)jxrz4C%V7bJRfdDDfcr- z>4+Ufv}f~S21xr^u1*nquh8zIq;*13B9qhQY@Y*)L&%;$oTLotC5OpGCO4CTi53~! zH*aJ#t3*bS&X2apt>DsEMXgy%HrPTHo`me00d?&_Ib6-kzQ{@ejO~hc9aaW3l1Gim z+0!88x^}49O0>8GQs^SP&L7g{mf?&$V1>zRuI*^P3XEdi;$5H08H}TmB$z{jPllL9 ztt77GxI+gzl^`MtkygQpWf-*Z|b@-1Zm}L=@ zw87R7k~e5?!}Km*?XTG3fD}TzvwRRXd_%^pO($Z}m=+E_NFu9DA(}b2E9f22c?co{ z2@cUb%VrP8h)PyDGRn0gD--NC<)!IzzK5rv3@s!W4L;7^2r0bt)4fBcgG5t-))dGK zXq|SVOb6JexL#PixGtbXkVAvYoRKOi1Pe(c0%26q-)rjq-x4ek5IqV>#E&VBxSr`We=*@4Z zIh~MLDj1cwdU#Vviy*Dbb!5)F{=ll zJ{_?%vC;00RJQ`DGQ5(2f&S=ukWQE2z`}vyK_IyQ_aeEi{6L_lmlA| zY(UwcD6YW{d1WT@%eRnsj}jRSn&KTWTBjB-Mi%UP6h@E%S}I_KHa>5s3loa zOvr<@vyBcO07Z%%rWM8udr$0Nh;Y748*3l%m;DNBgHRWH1#fkFjx4r^Ua+!vv09`P z4+N+UG*;$!Ygw5*Nvvg05yi_U>Zk511n|&s%NudAOV(`4UNcao(-2VXxZZS zz7olS@|M`3XhqCbBJsCT4kVt1>_p=4>OEx8=Fwaw==4-fXD$o~ssys=c?;8>XM+Y< z13DafDXiX5({ftR6o?V*1l{#%YJ$uKbtfPa zJeRYH%^o}IV0>HI?8#+u4Z8TK2L$EG!Vg2svNq;Qa`9AR|Z9DAO5Kb^c5VGp_62qi9#F%zY`@VKQwL#!>Gkg$wFjg>BL0b?e4+|ew z`y3yXce!y`Pe^ZC7%6jPlA3v@mv;8BfqIe0?sLU;V^BY=^o#^x54r$5k|zLRqc1zA zxZ0MTt}yEm4Fw*T|4G}@)Y|ZeLTwjSOU(v<=W_u7IZS>@-pJG}Z)66R8BCgv_9gFi zbbz3n$&JdrX;Pl#8#mrTdN1@3?@jtGuw=dS01o;Kt~~|Lsq>Jb+7G^e*nST`a=)kb zq~GWHUGjM1tnEOjA2xUT5594%)8t&%sng+id8dDRv`*_>gfxf@{cddo zu+?-Cw4uJpP@=2Vr{OAcVQzMUM?Zs>OM9AUqpp3h5Inf*WWWkECC|_q= z*gS{{z7&cb!`PPKVxW6tF3U}?0-F%HDhiU-imTQt)R^`!R+leYkpmm6Pph!gXu|#g zTX&e;$-LSmmXwDG2oZ)28binw-slWFBLXIn7HZ2eQ>Y|1By0|CqKyd$R9jDmJoEPP?n!@`{kq%!e8l=Q`R!xt&;5!1BxJU< z&SoJwychY9ftiM4@8F?))n4K_;8035$avGs#FXG10cjdcX&twOGzDN4W;qFyb{ett z*mR_ouPT>ou5;wM<$>4d#0G;&)3A752i5-2BZ6@W)ioMjLxcnG3pTbfl`R3_OFTdJ zVn@zD7@UXkF|=34+AGgtmFQFC3>tQ>mDVYq!YHPpSVB_KU)DJLkJbo%UFaM)D}ePp zx_m`>&GshdAfdprd-HC#924S9iwT5#0Oo9EcqAgv!J|ZxB8o8ElH9_U36hjA!31XD z5eN7g9!Nj|zF{-v24+fsk)_klRV)-b3(?lIvdZd5?|$Wy_P)aglK%Fy50tjgCn#-S zoDG{DG^FzFbtq43gQ7&kj;#a^9#Gj&9XshkBVPL&wz!J+Q~o*|(XitIdKo$(%PSS) zv@ZBzP>;#^N@#(xRzL|Hb;G5F6v6+;SjYh=f`7|nNPC)M+6$xz-UUT4=?iOoqz7i2 zNa%q#kskO(CG`GYtbbRa|0Bo3kS~X$)Oz;kKGe>%z~?{eS5gaW_rROntX1d^Lk(Hh=#WPa}M~iIq zd?ucmMmSoOM$b?R=P{))gqy4>0FP6gFCxq(WM71a10tyq zA_^R|*#S!S__DFahnb0z#$^jj)#bKA?SK#dbi(Qa*8~XC4TI7xrZ9|#zE3jV z>EsdNfD!Kp0K)EQXgQwdy>PwBEmAwFeSP{8ug)`6BSi_-2-YkkIVX|Ksou1j_|Zv7 zhn!x_(%~wF)B@NgQ*R8f06~`666#0>W(cBK6XSH?8%mTdjr#IMn`z9A@LF=Hid=!L z$RWfihI=_%V4MZ?p3fD5oY<`>Nc5P{kE=E~D~=Qj@-4xSVtv4~$c;&R#n};C20#?C zJfF*2<3p??8{xp69UW^)Wa2G}%%i(zj&IXzlDod-u6-l)j;?0pq7wq#>bO2Ieg|uW zOTG!47G4{>G@X-m<#s#|mlK(551d0HSLczMDKZ&M{F-*`M%9ZkRR_>UNgk-QE0G!4 zAQv5g-$st`@dyKUP-JAjLXI3mW@drsq>{gg658IO44Iq@)5Z^(4_I{YZOW5|x0L*F zn_|I8PqrgdhHw%?(pF{lbpB^!n_;S-w4|)|GpTYi`M}V#X!cstpa=xiOvfri%Y7t2 zM;-`5hhL^vgfJic4qL5AUb>uc zkqvrGZ2MO}NU1_xE5w2;)Vvm0bxFTF2P43gCh3KdLNMYN)GR zb$=+lCGDO@u8uq#qb$f{Nq%R1eZ>Bs`VD6dGLtgrQ*t|k=JGE}v_JJ9xPk~ZR*BS^ zm&*c27M2F>JvkeTkOj)h4`_wtdspHVFx43RCPU{po-mf8S5T+U`8dkK2vGstDn4Vi zVad&)hS^Ax7z0v(atZ3`OPIrnYwM!#px?}2GKafK!UwgvuA^BY-F00;@2&k)pNxEN z_Jza_mx|!&@pDPP?UY-_DzK^NH81HPzhkERes1wD#_4pevjIFol4Wqp;MtJ5bCYFm zH|b_N;#}WzxCw6f`}t^wjGLUHo*}oSxtoFK$>|^*ll|}$IKQ%?BWy5x9lS?b{BY0k z6wH7_N)Q(5F;3x!qpx6EFQgcyd42F1X#AqTd{1lR7k%V=)?KjGlz}>Mvg<=nb0=Hi zB!_mWe)gPheug=F%oB`rf`r>XNK;N&{dt!KU{g@OPI#{7NHobSlEzJGJ@E zAX0mspM8;Dh42d+4DSQ%dN%vA`S>z<*_>DJ-RNx;ONmI05@z>A-`=j>$Q=ZMIe+&NP$J5GB1pip{)8v7@;(tHR z@jDge-P7P=cgb`iG{NJ;BZc7>r$oL(Ko;a)G#itYJD!3Lg-Vsb#5BQPYR{%rz7Cl3 zn)-Jt>&GX=t7}wyDK0Tp1NM5gZ0y)Yfl_=#!(LTVH$PBkSMfms`$BvVyB^;;%*qyE zN#=`E$BxC#R;4x7^=e(PRJB(JtE%i&DLyG&StbfxTtC+C_v7PZ_N>Jfm8BK-%6j|7 zHwLrqSE7Z=`k`0y>N~x~S=ow!ojxg1MFnbts=cP%UItKy*^8wYtB;V?3|&)=Va@Tvz^1|TvxVq#OxgB9HUDN zQsM{7I3_#0r}?7uGt`Tf$a1XBT%3l3-06)IE>pR3x~cUED&&(rJ7EZfXL0!|kMms~ z75zRH^+9fd-?rT12Urv2HN&_FL5|Ck_EATXJ$Jp~puL{jKi2$oGKUG>5w}ZC~HWfG77kJch7d?aMV4m?%a0Fg5(vL;QRNt2Q3LsLCl?A%-|(W zY>}5~AYAd_fcO|%FG3sPhY(jU2`!WQ20w;dJE{8AcIX!15WF#Gbt>!Qp$>R?#e=W$ z((ud5po1Q>Fr}4jt|1|{HZ$X&PH*faszOj?s~yS5OtkNy)2RH9+twLIU{WUyI~A70 z_ag_%W*M*`L2nM_>U^XWMwa6NugBxT8<`9VngmJOdG-?pKqD2IH*5LsStU-PwfJE z>8w>HQX|A}K#$#L6*PX|K@Kw3>+Qc!z1Ph~h|z%koVJ;nETdn~CpN%<6Gt`PjBQ|Q z!t3?7!YIz){xkLy+GW!i3NZXS7_X~v_=6QM7N8uV4Ox5_;=Ug4u6!)%e}ldwH5i;P z$#w5sU*uwX*U{xg_!3-sO6WjF&0>PMLtA>LUMHxK%cO#yDEPFp> z4`QP(UT4NPGWm+^A{~3>Hcxu|VWKbNxnKMR10Htvj-X)&3Yp@f0DOcmT)}?&a6z>V zT{(jlvz`rZfr%5ibD>?o2bweUK26War@wPQIEG$@w#VHptnBfJ2`?BLy(xXGgqbj4 z>_AB>&1kkr_gM_(2!YQ27(TyE@N^QJWys2RF~1J$!=S|f* zZLi!!$EqQNrRO*Ed`cv8FV)^MFuXl@PK@*%od3uz*FdG#mJ)_prxB^SI4_ddcyL~D zbm*yOJg2BbmB?LN5mTa1Z9(9Eq89%<_iqlZLP*2OPK?Aa#&2$vz`YLK$zT*-Uh!*~=@$=IcU&8l^FYWwYiZoD?o5tbQ?u-y0b+e8;pM|ej!r4I-iUq*s z7{3F)-0~fVz=ETmHRJ8_%1?tD9 zOl%J0_{n2ljRN{$8e<+Cft;dV=k@{(D1EOB3y+0EGsplPdL z5LKlV9g`gIa(E6$Lo4!{FbVx#K{6(N(OjU`~ zx=Ny;83$eT;bbGy?QrhkMY{sAmR@u>oLyRH=U#a0FjL4Y>Ee#VwC)ZaN|D1U$e<%b zCo4Lo)0M1%j4?fi<8xN6AS=K@7s?zS4r_3EXBhsEKk+_Rfp?u| zTt`8(&mp&?G>r>!08Cq?#2yEMuO`z#>_SWc9`nLp!K(u&kYW=sKYLo;E>zdkyc3NE zv%4u}hBbD-k=1I%mLn#tRz%4{`O2*qM@T@lp1VFc4;yZ&UahH=oWq6>b2=sc z5<>sH%4%A;rH0(me&hN!JyB1<0*%WB?X|ePS?ZKlUQ$^VD7!#9;vRbv%Vbw;>{T@- zWp-(pG!jpWXsB9&C(hG?h_N*C1QlS2VXEtw}(2Nu(( zJEcW*d30$>)lf>LtLKK62LfeMX-QpOWy$=&(7J#ctgA-VQn6Pm@Oh+yd?$Xx@f(5P zNNK8jid6i)sc3Yfl2kiD0zzWC2#Ra{6^q696zVTTMsmpR+ZqpNVu|kD88Dr zK)NAVqnaM8gA3;c>W0==&aalr=z}~WUK^+@4OFZAMae=u)|XV5NhMHhsC6~96#=O# zP_7QGEvZAUia>p3y;N5@zalxMq#oZpsjOcBtOx6PRk)maVTn|+1T|DksI{`DOscOc zsjrZ34Aj*~`2LNgF0PT(in>4m7z)-&<@i#iRA1R3)dv=#On_REstKF;fKW{}k{4F; z7=5T`D5-IHQvrMmr%E(CpQ(N7OmA-p;+stdRtPNgxDrECqDpQF*p&(-k2Fb|EGd## zx=NZN`J_T=s&utfBwZt2D-}!Ar0LQOX{Izw`kwTCj*faXSr-^8E>q`3z&giq)9H0- zWnF1-VR==cLBf{@f#FhovQL^{SF$KDw4{{KFO^91q*AF&0xqQaQiW70{XkkERY?n_ zYN$9iu`HOmYsjvZ^c~6?lB0U(f{p zudP7iRVCF6q|$1&u1YGaoL{LDbhQ;yRn6i+orDiD0mNFmqg@KtVlbr!o+&g=3Z~)! zqyo$L17IkMw*I)zV`^{EhP2xwnYwln+f4HbFx|;`v?6!a*g?KHE;K^akp)Nl&EoKVX%9g8FQh|F+tY zf?gv0_(yh2GMC*=#MnrT?+n*7cVDnf7D~UQ_-RJU_+z##3!~ z*rc!xN)38kos#o>inK3<=ji+E)Zve(O0iV_c*bL?w$D-tGq~H2MehzwUHtMrf5p_{DIK`Mrsgk;LlY8m|(oAk33+a9a* zRqD`PR?s*yY_i#qOc|cBjNW3C?n|NR^m*#gcdXKVlwrNihUA{q(B}%CS%p(s!RFeRgIx^fs9`L?6qfJnx@{>}Vz9Q^MMl zDYkv7(vMRY^I=G;c+jnu$9v-v(!RFl0n$_bFq~BG%32h=C&jihL;CMj+aX!H$7=g< zfOJcm?N73_y0`6jvhi#%bY9BNf=}(?! zLww(9^!9gyd4+=>wA_il|FQ3s#(_ZPGe~7~-~!7TKeE_fuv)%MEymFltL-N?>A?hn zusqoo`}dJwOR@bZ)$(Nut_!(6m7)))+MsrNm1dLRkbrz_GY!Mz7Mqra>qBke{VgfB z-`S*=RNJ3y(yca1UuC0|`)qxX@@p3VlA1J^{s}7$UXIdFveN5p(vNI5q=S@^@_Sn! zq&&vr5r)b8M_DeX7t|4X&KZngZ%3TPj*QUVq`B;1WEY|?g957Fu{9p>L# z3Fd=VnhyW6N}E$`k6NW4S#3yvz}g4#Jz{*0y~NeU52b=YthW9ArCV$hBe(y0KpfpzP>xLzX@LZ76g50F^iNBg@r)=tT|vs+aU}YR-KS9ny-B@+!$9 zKwvJ(3=3_~^u;Z-shj%Jphfyg&!*UZ+E02YHT6$@q^EmP{I+z;^JH(^y?vzr?Nh|c z7BIy8*n07U{iPogke}LIYx_$dq!EzM_OaiI1*gwdSSR{ki0Cc-?gfy)>Tg4q)%|TK z^SAy28+Ti-fqVcU-?0&pui2zVTM<(36Z7mufHX$$tA57l-PJ!adWh?z_fCI|ULF`| zUmK!p`#ubKKkH{hjyw9>Q2ap?yxSSP&oX#h3Et62X%z6DbN=6ffnTTD?g#1#(SPnm z(TxJ<=L~reHQ#Qv-JT}BMr8S0+lt?$Nt@G%DcxUUnN?}~T?THFwmpzRlkcv9(mzve zp$yBPt$$7VGgjx`wzfgioqbVocYoVKS-Ne2?PFPbM!pO$@6Vu=Lm8;*kwKLF;vm~w zgDk%}&Gyh~rnx@mhZf}6VzI64C52Nk3_DY7H}{fmNkuBel`qqz2dz`sQu273^z$^^ zAJe3l(=K77o_+xuo@ZN5)MGstarQ^{4dHvlZ0O8%gS)j>z{4|tmg3q>r@RqKxH7Yn zF#F~54NL0mn5e3AeYhpf7$i52GoipcwW3!o+QYYvn7jXgkw^^P1MakMxa z_vxS>>I21FDK1>h`N2sd2i3_h*Qkwy32$tzdkksO%irbURk z;a1ozYQUZne#pOOw>pu<1oz+B*{RmK?$|?kRZKz1ED?H=QZK4N@ok}X%C+si$IyYilZjVduci_z&fqLXQqHK$A`&u0*q(_ zIO1a-b2GIx+eb6d;C~_x3H=#w1hha({25{WjbF<6jJ7d8Rgb_Op4}f6uN9HlFQ_+S zfDI*wJ|4$|V}@J!%7D8cZ8@t*-&{BeHNi51}xla=uv#-|j) zZpgKi*iSc%it#As4U*tVIw}NZ{+oD`uk&>W-*%pq15wVzlkUb`;XJ7hv6I1*6616@ z+6gUnSYcYOM1vHCIdD0mY$oaqEy0oQ!mF=`BjJ1~@l4Ki?@rVw_}ly> z{>Cn#(fJr-ac~jhERG_K#ZiQ@IJ)KQ7wEV?R^|By}>*NswRHOy&mGn>$s(pVV;h%Aj1%B=4`+#7nq% zWby(XJ9U4r$?5TyPX1qi<&M3~TemDwUJ|TQ{UxwZzuf-M#o={L!Rt%db&T5{BsI%xjfy;H*~;5@O+%ieG(Nm`fyniKACVD>){IOB6_&|x45I=SExJ_z6r?vn?h}G zUU+}7iYmih0H};kFX7xQlOW-rYIOjBZ0Gi-R-deV;*-7WFr08j-8&l}0$|6%#9)P~ zyHa8H9`DW6*oqZph!7x4OAE9E@n^U{mM@BHgo!o!T(z01(?B(@wE-x;NCTv>3Q)r) zU!)Ql&!WDf$LMbCd#H64GSQXh4Tv*3!V$9A0z6P+DMF5KB|15s%PjmhYc{U6=47gK^}%3gZL#^RMVA_=ON^*6#hTgu_Y8}4XJl8`vjXCx z@Lt-?t6=5OK{SHVE9CgcF|F1_NlX)te0GYTfzv+r!%K;BQzzzR$ofH7LHW@v~!D@^i#>*RO$i!6-dkbYxn-aPy zQ}W85zXJ=g*~q2WkMZ$_mS>Vr{(AgAja|uJ(ml7V~W^x>;%)nB}v3|*yZAlR6BU`fi(wE z6opppWED2j%~{Ah*ue@i{J>F%H4~Y50DOrZtbALEHd6uOX*(1f)j`oVei$@Fz|nIn z8b;Cvd0?m(NqY0VKf-^1MT5yK7b&e!A`9kvUAxuG6Z;u_!UO)|9jaf61hI&0Bug)Y z4Ecg@QvtFW%eWN3mR3LE)DG`(g-Z29`YgQ!Tz@GI=OyadJ?ltk$_CDVQ8iV0CtX_) zdLvR=4?GdPOi@A_lEc(^8mm#Sj?e4aR9KV`P3WukqAP|`FFUsj4zibxWoUDT$x$C8 z#Lh$L$73UOfa1I4RrT<#!zWV6pX%;p$DPm=pdSs#zTl<23~ z8JApdacZTaqz!1}e9n?wVt0A8eG}!DyDxTu611&w zafLZ|I`*)8F(-AuSh8*_P3B2 z`XILvcb4D!GUZD+>A@*>2c;NZlLpOUMJv zpB3~Cf7WLWcfUA95$^6c5A}3$qysEm{=ij)Oit|N**yWM1;J$bbkFXCaOd7vpvCj$ zHII!X+9lE8Qu61fa|ULY?smC}?%xwG-7K#XdFBBmaW;w>Z^NxI!P-dE?MQsEQEvJZ zLM3t!r`~6XmVReP6YAMYB-)B6xdlFzUQndll8(Y(<^)SOH<3CIncybfF6fZgA8LAp z-slS;F&n=AC5ni)P?G$KS%a(!Rzdt3fmfd2+O>)J{hVI_=VBhQS>)V~8PKr}-%en6 zj&;GgnYC1R_Vcf-w&U<#4Mco+`57K*jOBQ%+37rcJq`)Ly{r37fYMYyJJ)A0E;}m6 zk%Ayxo_9yq!7pi_w4T0f48Ez8s#7 z4DhSIP7ZHFI$~LJ_~$&9Er;ndhDe(&hri^po8<5f6r;2H7LQ6(Bx;VED^OF-T+u}&rI~H9gUw`7WHoY;wG7qEMF{`Q?T5U6?+#mSgeyQt#Ryx z;H~&P6NV+(z4(08FSkszj33MI%N&1&9DV~WjlV!{x)|?hjSyb*oD<>u2sj%-e)7pa z$gww&l#n+zhnn$>iX>jFA7ZgVOh#)&6#aZ^FAG96w2I};2Q4aA=`$h2r*!BT>44;; z&p(F9bmY}v^6zr2wj;@yI_to~_#y^6!u8|kq{ z3}XUXf*ZV<^AxrsV!F>}(|nF|KmRb4zW53Lz~F-k7Z}6M<{Znh3s5PChxjaH|2-XB z>As~lzzMa+BM1b)NN$b8MNq^wUY@7G&4iB#U)N$(4}ZU((FjyzHzP};cyhJ0sToZi zz579}rP`Am%VX&#{!f#D1`F%#C;Rk)uI^;k2DojrZVVo5k8CMp>K$G8AP=CkfO$#Q`8rFDTNAN=!#jAW&7Z(eBA1o_h#D zU3mH1Vpo#za=7C|!P}SwY3?&v2{0vffBNSrl?oBD$qA@qe`M1)PvT-r1(J9_F)+)5 z7bS+mGz`i3*NK|=TN0JioMQPH$fARYUP3n}`p}nfch; z;=Y`nw9%&z($PJ;>?llAE5BPwJBY)BitO3ye3mXD(ms)Y*yh94#Pn?(c3%%E(f*94 za6b-?!}cn~C$geT&t!W^+{a`+b4t{DDeV}=-K#h(*R*}brBJ}EmpV#Ig3!ZaQ;FDaz)R#LDRpCZ>L*AQU2NZIx(9ZdvN+mzSFxycjnD;g(8VHYYeZ_|Tl7 z+-aEcrLTTtg9CM1r>V!A;~d?|RhZO{iSHhkUoQx#T{yO>T&IEKqNg|;ptE;siv4l@ zfIcb-7JV;Z5W@^mOH zzD6e_E1npDT@vs@{L%Pg5csTN3$c26WMwnnVoNuOzLax%2y34M2P>_|4oDojz?NY2k|XtNgStDS126{swhZ#{M?QS#X-gN)DM{iB4AN^%hZuvgUf! ze+GV9geXv1Jpum@qS)FY`2Tc@X(Qw_=*+|#2cFJkDfM`|3^9JkcYUU^W+|T76}(z; zmcsP@{OKmUvX=V5pXzfITv;$Ly}VyFE-EO_Q&wMi;dj4#-E}3s6b;TsYwP-}y%&KX z6s=xS*6@6F{gHDPow&WJNL*W3t`%3|G{J=p&e8NL$DE=&{jlsiMQ!@c+lNYHi!1&iT)UAv&{u>BxHOPvXQ-xu9(Fga`?bg zBQNf6v&&86AxW?|``hy5reXA?-*<}9-oFuMtFyW!rpQfn`jC7j(y99WZH;pH6G-Q* zrdYcnrwc&Yvd92q{}DC9B!o@T78F)+k8Oq$;m5T@n=C8~_Bbr0+VRIqJfUA^=$@u% zd%3w0OSgh+Ve#!~obkY|d2|_~{4j1WStsA}Dhgt4hlS{4`eZJ?7mHW;($$tl6$);= z!Ix_B`ZV_4SpIEXtS*(pSN^Z#}&S)HP73&GxaKbC7V8%3Y*{oV;_X}$%hCYNc81tt%$zO zo6(FbA6;$1w|o|SR{4KIv?A zJ{!je$*!Ol%>zZx_+$JA-|v8YXbhUJK7*j*i_}0qyp$P&c0xY1(g%x`)#4+@_^2`j zjUUr)B_S=69iy|f_%@Rk$*&0+<)*7pf?a0*xgPm{vA*AANTc7Kl;F+=i0q9(h5Nf5{32|4>4z&bG%)oCJ{WsSh%0*3s+~exhR#>L z4h?7Fmq&2da=0%e=;VQQ4DJSk``HBCR{`!D0XMGYhRNbFWpf_u5Ca{!Kv7`o#=WRJ z_-hl$**kEvW6ohfF33w&+I9luO~i8A=)V`h)!N8V3VD?T@Av8ANMH6YMn;g@=>qd0 zfMgbL(~S}!e#yILCr8K%2<(cMB!9QT#0-bgoR|dGB=F&l%oLsEeZi%w=wuNrc{9;T zT#ms$!U%d`Bd8VGVaS?nXADevGC__^g!e^9VjAzV#=~9dekwayOZSmXo#e)SW4Owz z9>QWN&imHet11^(s>Ae~y@I&itG=S94ptIzc?r(g;#BkHcHBVgkq9>h^lE*9FvU9v-pa}{l?n73`p&0k1YdZoqR zrwcx33%(bbC?>@uMnm$zNx=Ig5Z6TDy;c1Unwuw(J_!?YB7Wr=@jDIv7G06c=mA_Y zj}XpQ(6zk*F-zw147Ue{cIQ!_O#X5Oj$CK!W-H_%Ob}Y5h-P@7$^Qd1*3W@{B7`$j zXuJcRo&jKR#X7)1ze`xZ*^EX1P5sVCzt>@np~<-ohHG?NUWxscM3wlW^E%XOvdZV0 zoG#M(k1xtjmyD+T~7td97RBiT?GIbsGbfOnaWu$G-7-oMdJtiJw^fF zC=s95mYcQ^?4b>{#@HG7>~8!ZU=e|nLm7=9X_o+El_M`Pe$}jhMX|yz;|CJQp=8te z)h3N!qFPcgP~JRK-YZ#CWGALi{=k*J(@1{+hL|+cxVAkpp;(ztV=c^U#+WCK_l8D{ zck+G04H)e#zH!BLgBX>Ck;As(c3PXYG$phNiHp;L5Bd;Zq&|-XMLVWuMmlheWFM#= zkD2HRVGS>c2#Sat7$wYA#?^ymook|mB8*{Pf)eI3O2GU(21-anb*vUn38Z(zV@6-K zh7fN4_$ZSlL%SPwQV{D)cXUvp)4^Ou2jo`6=%A=G9n6hgjd#X8&LPIQlE&6<8rym* zzKX!%)zQ?aJVmBF3Tr|ie{k%Zl4rWGF|k;8NjqJ^4m-VUdwLs)__8H89wQ~kJT4HkIR*A z`QeR}uC1?i!dS}QS$5Q33>OaE{QWT>T;|@5Kw+hF^jY)ym;zc!IS6TXK9?1@YMh8r^K_p5wk=tY=Zy110+SR=oB_ z=YyO%Kh$x(6g|2@E-!s7!n{UvaXe^5B)T3*u0aj3N zoI@M?5LCj)hfFG$F$(1Tqcf^a`Y<91TyY{ug6yibHC0QhYZj7SmhL_!A!TCcOl5%v zLvF&I6=dDD<4TyJ30thav?f@s3JIrEdDdKApp0&B5hh|hB$sC;4Sp8dU7I90VBy+7 z`7Ep46ay|uT5>JUl5hPBJw$LmX=XZIWq#T!Wn7LNzJpRjt&|jm?;tK3VEoO_*jWZ~ zIjnz#A@llYb!^Jdv28V|^ks^^z0HP0wMEc}bgf>E- z7fJF@_by&Hft_EOWdVSMvsw@Q(taLqWq4zvCpxG98T3#4fo@#X04iLF7UU493e%G;16UsH zhuKL#M06NUJ&?mcL;a!`ScQa&m3pz3D${#`#oy?K87EFNJwP2H@KSnQc)5bq9@K5! z!H%!9p^bTfM{XJdl45L7jDzswTN!`wxbOwf!T*f+|8wI_+KNr|-*eMRXTeXt?#5=L z({2-s26#T(Jwm7g(}yC8gj%Zu7OqT0u)Pr+e;SFibeNCrz`tW~{4!%>Q%$CxX8}NllCLWu&p6Oo{0{AuG7H{o#}v? zAxv}tq;vgp59om|&<2>;gC*!;xuA!sc$Z8MO`YlC1}feSJrpCoGd)b8ysWo+Q#X;Do#OQ6W}6=0jMheZ)+3 zUK}#DK)@-?-sP2_*`$Q_PRL%Tlx|ln+u7Xmh7M)OchE=k3TfLHKMS7;@@2m*H)X;_ zg!W^e=8+DsYmYh(X8WMz89UOk^dEA|bc~*-!=3wz9G!xU&Ey)n307rzVYOS3y!1mV zThKfjMS~l0C`HYMGog&%%nomKa!2kqIr;(`<>Q%LJa^mDkGwdk)I0#?f?Ir%A(%)w zAU)Sf%^o<%*^-ME52YC0H*z<97da$at`93F-o zKGcZ*+v%J-kjm8nYXLQC!=lHUU}suzfDvEHwd6HJ$u8kE5c8wi+)rf?P%i?0`Zyhf76;hy(q9f6rQbpM8>Ez{|Y){yCpE zXP>p#eLd@0&w8%6y&qlUMHBp-9glkXuaG*itoy&e|7a~~P+Us!fn{3NM!FNM9v0&x z+a3i&f9`~|4&TEin4|`q`u`aVu#*z`t$KchivgSJdNA}1I5ZF5YKo2%ZeEF5mKh<% z=ZDHwDaVLXAsQ708~N9@Sw z-kqEJ6sP^hai@L0(%qOFe6cyW_LHcDc76k70rJ{V`1799rs|u7s;52?44u{_47Z8n zdpU1^QJ4Y#d4Jb&%NwqEhy&wH4)B>fyV_p}KDmU9y8s(kwnO~LjrsBs?(~jj4XM++ zd%CCR!ZMy^(c2dj^5Nsi@f?j2yjoXpFDVzU$vUb98;8kUOoLMFIEpT#%m zoepUk$%4P}GK3by6)k;XrP!h>{K3zugK%2-dW!z^IgkTsYX|qqP0BKtgoQaJS5i{m zCk(?QM{(YDR1&GFd+mJXoX&ag{8Ajp6La3(Fzho6naNp>5WKq7i$SwF%n1pds|LdI zHSVSCJp5sLr+b4>foA8LW|fupH+gNPJ{`7G+{=ObmqMQNBT&BqrHdTy>3AB$;QLv*0&C!;{hWeFU<2(9t)<6jeL#iUBQ2ht+!m+ z#9@8yH%ZQy`w_79XX?$2sm+0qt1UL$-eI*}r`p!kYTMyzldhQ{!UzxxE3gfusRMx% z=<#IxM&BgW=)C8*8a#*|s|Sgpr>vmcb}8B6GH6&Rvn@1~qRis+(d~DTc`)=JoCv|{ zQ$HHqHft`&dSCfir&K&VsRi4Kp|?Yf<;xesbP(-@-wkHlK*vxmY~L7;M05qKMd_3t z*M2QMJ=~4BU{Z^bUS3$i!f@i@#@FSbc)oOt!GNn~45+Y9szD{%>Om$C;SbH?VK1gx zJoI~d+-R_Q&|qu;)1fjl4BL2A2VQ^-8-)zJC-~&afDLH^nq?x#a+RYX;pr%PiPdh0 zAA4}s#7TG&#C&m>Db??+enW|^^1tWmH)Pldzu|jBhMfo7rT8Cu-9ccb{;!M5ctfsy zJPIaHFh4eAfadc=Va?BAt17mm+s1MP77t4Gv^(y+K*k_ET5o`XX82QuLlp~)Sx;3N z3_0WN25Rmp{i6Cg<@>qvW$W@4eyoA`jt`0L;pK|T&$%IY*y(x;|5h8=&^xs~nA?oH zFP$Og6OmR>yiF5oa;*jK37SZpCiOgHR!J*SzX^)p{k1C4-Hy0ea_vlC!j!WoLmEs3 zZGf2EQlmity>2t7>OxJJ@+fuN`u4h~*0W`ZyU#rmQ1 zBv%gIn$Zz*rHY%zG^TL|uTA~!R@04pt|=&9PKN6Kt{ULDslWlWArrYUWrwxoEB(#> zR4#3<;yB_3JCN@+Sm#E34lw>w;k}#)Z$0juhAV!|gTGJT8&rNsuKLrs{oX5{9)6Qh z8pZnW4$FtNa3~Xf4f6(y9>9Nhn(I0)dfKnwuI}TT>^}aB{>{-;O514v)hIdbvt7;` zappzTJ;*d0N_!d_GT-RDg+w<#Uzh60yyy^s9it5q5W4;UA{aV@l;}3g{Hx(N+3kbi z?a^mQu{DYb!qel2kLdx7S3NLJSr(^c!t!2DLQgu@ytC_oez0(KUhl+W|{TmqmI_w&tLVV_QZI8k8|x;WBGiST(6Af(+?drhUIhL%Vqhz2I8?E zMs~^a`7_|Fp5;@f1@?7)*?NA@x5n~&5@yfzr!WlyM@ygg@`W57laRbWl^wpQC1aqF z4(}$KQ(C{$2~PRH8M<5ipzk?)^0-be%#~$OSm+JsWG*)tHKJ=Oe$f4eruGTJCkL5U zj^uXaS4IJ~8WGU~mNk6D_>OJiPnTG1JF~;Wapk28hZ9bTcb~((xpu4Emws}mBDS1+ zAorA9d0&F$$=Y^GyQB!$ex8PTY6`B=`rCmkvX&F`iF!^`jifE)5@wU5T|B=j82S!7 zP#Aoes#3JeEJwSjfjZhH+mPlZ<;4k0;u}k1Jlw-=?54n~e%Co1k;Fm9Spc z=H-2`r!EwGYF^BqdVB(=_`$9+Qpw>V>kQ|wHaQAl%BOlcs@U{a zxtI}iSsxMG9P7`8c-dMQQ%NdmRJC!bxE^?t+7>J(SgpT^cg3v8s^ZrC)a|BWnBr1x z&>3!ADvsNHBk#LfGF5L!ldBNGQ{C#BF(1kmu6 zve;FgU*(`#rwk0xUGS?OqFg<{>RY6+N(QKc(J{17P+ZTPd4ARN@T+d+rxA35gqU9i zjX05d#z6#hk9zw!>rpFHR9mbIWY2tKSZ&Ny|F?Thf*4j*cPmB4RfpaH$<^=FI96|q znvG-C;A9SC_*K7vu-hHKO0;M_sngm13i(xUp!#}#)oG+eSMn#}2iagAXU=vpn5`ry z466y062t0pfXx;oraclOE!ERfyujErCO$tOATMV(4O^g|;`2bu)q>){ez+>q;d zxNsbcHLdHoi{TPs+yaODB;-T)1(mDGH+HS=lYB@;r&6+ilbB4B57iT-@1>xl^LIrr z#PsNvo-$5XE`|T#*aNv4CnFrnN#D5QZh0oi)szWgW$m8N7USqyf$(I_b`sk;gPnd| znubuVIh;2b4Pp2I;$R&8K-806(GNjUJXL6jYp>>e6POaW4LZ;{#oIk+>cgBfYoFLR zymwo!e9Z#`beSI#g^i}p`f=?Q<4=S0%QZ-B(@{D+;e0uI)#EdxB~%QqR_U@vrmqx34J!^c8Z z3^i`0e3=yefDPteJ3iJm48;3a<%tEB&u7capt#-K)N->5aiX_iJXy|LA6NiDpRP;|!2>0{r460V1!^azbbO zFIF6rZHL=&#WW8aF%iN|0|_peQVU+MIpdMVCQHl~^GbUES(Q1xq==H2JYi!(jt^P!vkJ}y=B zQy&dW4~A=h?fFVS3O>1`ku_fXD0ZO`DEaa&2CeXv@{NyHyF49{<}0<5Kh0MP{qLBs zB#(KH{qiV&NA%LTN_UD^vkR`$O`faN>bOdRxUpQN48nBISIr&xSj?quJ zSp@vdJW29M77l{>2!QNDPd<5;$xN|KS{=*eTR?(XCWyf3o@bjJ2bRET4zC2qgX<>= zIN+N^pSPqU{bHUZZ#Jazwk^3*(NK=StSQDTk4j|o1$aE3AGbTc*By+9b{yX;`fvNy z#Z-|W`f42i>wz@?>z^$5fcm5S&{u%I11Eza)Z_nGyZ}Se;-D4nNpQ>dItExLqjwCj zr!+QG>;3{yxzdx-1(aLh_+OtkmKEIX>QSO^XB(SSR>GSc$a;(=8jPs`GdL8LBKbt# zP|RVrsGIscT#0_$Zb$#_`A4VuK}8?pXAGyF6LktZ*f=T@1J`q+98DJ;Nd z{WSU{dFsm-wTE)>ksPSbu&U1Ab5%icCK;+hT}_3JswG+=gkrLxT*rSRSNtptA9w!Q z(cizcYq;NzeqIN~D|idQ{%O~7{dGq_sv+0Yudzozsv%v&)2k!a$#q6sr8!7-VVi_$!#&yy0}$DgqDA5qbNUrKSSC??Dj*`iTs z?keeMHK3+X44^TWJP^EtAP$~Yj5H-esYhyc2NPU4cm5^^Q+0LRg_Z7MeYGu4F_({F z31@G6X~QGVu=J3dof8>ey}a`TX)5>RfTS=7DuNHZ5S{jiE@?^A2Q$_*NcJ_j-kXnf zJap+30!Ya;%_0NK)QPg4;JC~hk+EJs{71cx`B%BpFQjyLFTMrXQ!q6+aSuyLFzLqh z5$h`lV4Eq=x2hVWF+^5yP&L_UB_LcFZPmN6zRR@Ti$9W6$QzHAgEKK!ksWcwnai9I zc7GH0#8@NsRQfS3x&PAqiNlAv9Dq7p8)8G8X#NmQjvzOP-4H5k5E|O?4PbA_Ic3q+ zUaHE^)g?@AX?^uN{Kr%f2{`KsIr}jk7zb3=hw616Pf+16g>i6{RazgNN_o$}F26-X zwrax4zwF@I?X&Wq#$-R(FvoHk%_;Xj5KRGR|HJ1}XoGVRg38(%r{QeW1i(8QjJ)G-! zdg-D0($5#Hyi34nS;G=_trxsr`UK|PSMHlDAHZpNLOA#oe7^1m`P2UH0(5vEIvX9{ zk?^XuDtZofzK}plUyE)0c0FvBj&Hb+UfbJ@y(Mv-{cO|E5q_?>pIL9dt-+c+bvo?IZijm6J@X zr=8Ut-o;et6TtnLs23fhLGcT=7x76ji+kC&{M3HDt6wYfoD{Cy;~vN+c;kbu-? z)5AcWMPE+Dzxlc*t;IKOLEM6AX@kipC2O>nA6eks+lqPL+F3T#$0wXxne-m%>|K2u z6NLoQu}y>Sbtsokn2_uE@8D{Y4X0F&|EZ?ZtxkDqtEsHfJ34%2s1i}C^g z296r&gMvI>wnsk{nK1QPuv8O!gPv@KB2oJt&s}mz9;2|ʧmQ!Du|bCu;yX-Uf> zEzVm9G|1fAC*S9zK)V`BK#{oG=Mz6c^dG zp<$G|eoRt79<;O1*0S+i@p8tE?|MUqEKv^^3x}o36Mr6DIWbq#;n`T#H_JN8P_%Me zSUGv{H|E{Mu(C3KF`~C}v*QplV{7)s^rs*FN>W!fS1WHZS(1 z!TXP4mc(2@F2b~v*>P;*R@-mdw}V?@#vC-j|CR9u*E@{c*M^PjNMa+O3@0K^+FIv! z#7W}G#biGcPJxCaiwC-Y0{Nzx;u$4Xg6O?P%>54y6xRBFlM8c6(SpQ_YWJ ze%^+@tPw8ps0!}7lkFcy}1A7!f}S7yuD{c3OFYHXN;EfdGOTwA~` zXmzltiv;)mr@8&>Qi{6xVmZ4&--I7r$=kVFOL;x(vX`$@Wo|wY%{PaYJKB_>V@!0D zft1fmA(;=q-dhEuQ42|7*OuDBkLQ{V3myAsy=bglm300Ivy>BUF%lwSISa9#l!kgPnG zXS-VY6v?N+E4{XH#;*G=%BIJuvFc;%f4U4kVtX0JYy+v=31i0!IIqS41FAdTIDy+5 zjPq?aU_Xcyr2n5EY%<=lV~qzfZp?0B2L%J-DYt6M{7Bi-c31hs8m(K;SI1Z4X2kyd zBVE6jvmb5u*#Kkc)gj|{;U5-EYSYA@SK;$WArWDYKVta&za78)U%vlR`~aECecf*J zJ9kc;1woABleu^{ahcuZcW_VG$2|AV7NgL6?#q>)LJBR4fNqIALV|!X)9QS|v&gf1FQsUp??Xh1zTU}FI;m+;m z-QB+FNAyn0z-+$kKzm_w)jY|Eu~^9x-EM0Kn6Y|9lG}{#(>`&f)i0*6c{(I$$(6+3 zxv4>-HbkdlQ#$h4-l3xb3xGIK__l=8?HB{qpSa>)GF#8Sbo6cv{|4^@Fs2#HKAbuR z#kZk@rs+cFNt8k$ns8HafNvG(+p_uOW=lTVCpQzBZHl7(6@Lt9NQ7yMB!9t=oC&g| zbo><{Jzu_#%BQdS`5EQORQ{#a#* zJ|<6RlGRZd_Pj(0R{Q{QPR5mYcPPpi&L;p#v=3!{&EHFUQ8Kv5+_ z2aK}5U4o>HE1POpj&E|bo1?sJPn*X!{w~?ws&T=L6D5u7!C?v_*BnWj3$x!|$0O_i ziE0I!>xWcVtAqSq*V+_)WH+?}*s}9XuRpcA^*8voF8Er#%%9n^zD|jLhBOY?7}M7= z*aL8hKM8$@Bwub}K9H}R5Bljx^}_>5Y;YG({2mvaNN%Nvs(01oRd&~n8{^yd`qx+- zecl>NqM%O(pKK+_ALj_pXhG1jqQIbgOHhyJ~FXYeB&2QawGg300t+IS`9yL6jQ-Lwg7PFZ874D&|ES626$ zY_R&^u;hc|Vnm`BMDcbqd9{Y7FB;tC>!$cpbCsiSa9d;8_-rt>*fKz#VJF(x&y^}N z+1YdHdkep9VoK}QKGFO|xx4Rw%>p6&9|%O$g9Rnm8YpP^`j*ef1vI?7Zs3=A0&RSQ z*8hd#_Pbrb)su`>j^WiR5;%u(eW`--bdljFmhk-jaEtXi3jt8-olC-bQc8YCrhPZ7 zp2>2pcuUKGtKwES2u%c3!ksO*lgz_4HWtW(g#^_zZ7a3!rX!w+cn0Fj;&5+Yd|8}~ z9{S|jnPBoef?VbNGKoN8GcRs(qnf->TjdrT z8oBxe-!^>B=gR@|We$8N_W|0=0PWd;_9`_VOukvu8Seq=`<=nJ)}S7~1w4Of@SVob zuLActZ2c;5L;ZNMQ;cney#J7an_c!Q{(kuc$*Um##|PqqZ`0x7qKLgkn_cHkk0!qN znuf|rb|F3x(?lGQVOm?$-%Bg+#rwqqc$kkAiz#03EDv@ZSM1VHjVkSfakQz|hzx$) zH>x2#qBZB(FN*7Is&m8?eW$6`5%;~E1Rw1`bNWcu=84ZQIB;QA4KGluFqujAHx@Ci7rnJYM6too|I0woT2A&2?T?vott9am1Q>Xr;iu~BATgB z)#az+E8|{b6QmiH8-uI=8b^aMl`31p_OSeMDsFT%bfaQ4jYw?(iVvzik7mew@+i)f z*1|`F);vbMNjQb5H+&rap?6BXqW_{JCMir2D)>-^_Vfy1vD3G3DNDIIWkOiY-PNl(q&__is`0 z)>P007u{v8IYWd<+rNczU}6zWy{2UwHDZY;F7WVXO-PAoM@39qc;o^nI#-`??N2A^ zj$K@0c|1Hp2wJO543}bN<`yfUS<06`md#K5SbI`g;wm|}_Qgr~NPZ&R>}nnyY2KVw zHE7yw?~m-_CG(A2u*z|rX!D)Wgk*U30J2u9M|iYbb~x;zmwZUW@1gawGZ*ySH{Dqf zn#$2NE$tNRd~U-)VGAsXqt3v!wuL3DC(8!goe#EZ&Hgm z3qQd^&{mWA+Yly^mQd)KggVexmPiq%@3gT@e@+nSm{+VQkE%`+#r!>`m5V1$eg!mf z12m=n0z3YKJ@ppra?C#lXo!A9{UhY07I@#c8Z{qGz24HmT~kzet7_VO(iFa+&0&DU z&_$`uIaQNgH$BmOBWeKGv~?4C`|M2$jO$^va@2T5aKs!&(Z*dj6~L-x0g$ zpYq91bquGf0sU3#1~6_lV5qIooo;)_FSW!#hcfQ#ot#eQ*4<{ z9mz#182W91|~*R(T4q#y2*B)Ed88$UJUu8|?I{~(Mkf>>9RpSWRq!F(^DWKW=gQ?C01a`e4*e{a-v%i_m(k6?SsB_cI=EhuU zW~?^mIF>nLvry&Dl+ZhEo7T?Zna$A=X3N9~US2_d0v- zc*rjDmm6|tX~eH_XFF2hFUr#F8ZUoI@avCAjZxsxi}qvpC{MmPhTeqvl|y;O*PEdi zWR5^{VeNF2@l<;_^?n)h5*?6(ZVS9QA`lnVoa8?PS4?PbkjRuhe*A>65fx&b{9Q4j zX;UX=yQRy-#>Y}`Q~&+Z%@nEObIt{!#b9V1$ziM-H zrx6DJ7{?UF`CE#vC6kA5)=ad*%|vIsjMSS4f}v|c25}Wpo*(KzV5xl1ts|nvR|Ut( zaWs$Qs~#C(t%9LPY<^zPGx$H>(iXjktOkd^zY_}o98IR3WPCmy!@_gO8hy^7h4B#{ zr017fDTXfpZ^^kP4es5XYwoBpw65@j9&Y>*RKCSXny|#7shD@~wK<6EEpTM&BYiJJ7mqi~8a zjlRuTt-tZayUwSS=bu~09Ca`~ey)p0^BjE~c(APg;!&L(-$48UQzaPdopuQO>F~!U z$EiOtbrkNCyd0r#G0DUn>gAj4WtGd_6)mAX$2dkQbK_GqRgXtOai2ED5C5o<|8^f$ zmE4>LpUi~i58%eQJSov~n!?5-TH`=_L;uo{AU&mDDJdFaKb;nX8!|ENKop<>^~P$N(VYp3p9Q~n4*YjxVc&2@cWeKefo~}K zZA1URZyM4AXIy=a|8D2O@~mn_wij1yH*V46^|`NBZgRS@)MfVjsy_c&uQ2~R_nQBV zDdCJW80RBEKBI2anLXyr+$Ow;WbG$_s>G5QRDQuA%~qp|a` z-PeD<)qhN_`u=~8W+_d6p2mz{wd`@emU1+v&YP1&Q`mXaNR8H8dL8B1`NErAH+Bg} zv+Ts3#HB|5g6u+L*PrewIJUt%K&RR4o7Ixvo8&8rAVx!^dszMhI3}B-Ydn7T@WecP zLMhOn`0|8M2iY#p3pIC&O)uH&Rh8Q#Mm8;AX|nt!uDw*O6Eo>;wS zwF4PvV+zDpo`Va~ChcmugPnLHJdd-cpbxN5uHy#_Rk>Nm<*SG;6evi4g;Og2MbVka(g&& zcKg8PQyL1V4%}s1xo{v5kLz2~Kd)~D?i<@du$Su~-|WkiS=c8i#uJdUiYIJLmH`;uqPtzR>3n%%bFOkG z)B^_38+&8i&o_>AOKVdkwj2q7A3M5R3&&HB?e^(+`?4r3u@Smj_QzKsl{jdCio)sY z&bnHrZwyx3rs`_nj?%+mohj4G=dKn!HU&k6cTmRe=}#F^zrFtlw)x24C1CbbD1$wl zqL|+KJC&ypluZBMYTSh>G4;|wm449C@jTm+yO@Xg$d#{Nvzns{|1o&*^UVqIp)+!8 ze=^DRyyUCx@VcW368hx!{KUEV2+>|QDje7JleP{lYi6H*i7K5+;bJ%xFNWngXq2>x zj%vzvJXClGwr*1`(DAd?18ewi5Jo`9dRP~P6Tnw1X52!tea+-`Y@krE<&&e;FblgblQM5H?>cX$5phK@A) zZFXu;WsTVvTibc_WA=eCZFdnzXyr*jLw5lrAY$?<_%6?#IHkG&U@r9w+3HK=8oB?A zhL}s;3$av6PVH9cC*ScaSl^9k$8AA78>8Xs0gfI9pDK(7_;g)AaZVl> zi%(AohGyF+u$vSD0LP5#3Iy+%YIevTeCWMu9t@kY5jxy@0=V@Arr=QP8p|*8E#H9- zW>6IUn=dV=yLD5e2>~H?Pl@C{IEE+Gk7x9=)StIr?2?|ojqJgiOX}$9FOUXIv}<9s zK+y@R6L{M#J^OJoL`TuJmq*7UL`VqPUIWkF5VFcxa+GYCqGTud{xCJ%gMy;yN71ol z_k`>$jTq)A9Ma31JstaPa)pE4Q@AG8&)wj~QyL?=53ZXx>iuiGKkahfR;bA}?}J>QriE7l{z_6?8EbfQV{E?DaCLUxz&t@)AG#CH0>8ex7LH9 zufQ9zKtjp{P?59$Poe=gF1={ei&m$iDl%$tEm)LXNM*i#wZe4KaJM+LL>Ov(q zTK+Tom=I&L&17-}V1=%jK>a(mNWN^)6+Of=>qB{P!D}e0m5IJ#TrKdTOCO|)>SH!j zM$G=4LNPHbr<+kXf9!RNr12A-kTgao{HA}5KK?&_U#r<)T=nEABhfZw4OzR?sqQ2k z$gQ2F#dJ8P+Iys-o1gD)kXIc2o(n2Qa*ZHgGG`M!zo0C!j1&JRhTcy>lzK{gxaME}U-!wE zoJy{aEhi7(BtjHU^YBtA1!UmCWFMRZ_TtTc)pD`i`(X0K7dS~8hRKkN<=%;x2SsvF zQAxnV^2g|clYrl#sYC7!dpnBh)A91}d@TP4jjEg+qtpP;fCQV`ILPy{nK@@;P z9SEFVCt#2NC*>M~19z}qS8mjwO)Sfmza-`n@-F}(jo+M;>sZQNlL4e9+@R?@O-EW1 zZqHVsJtXJwT}NmO2>FSYzYX^y>4}lIsBAFxCa0PT|H#X;aS~4vgxLgn2&SUS?Znss z-JWFF)1OO}lqE)hZT)3U)j~O~!U^b|0x=Mu`k)DN=e(EC{@jkq@jUT|&6=pnTNnbs zQhurvD#WvxOsj@^J42ONNNXJ&xZO=JGy9ZgcIeB7{h!Kr>?pjCo2$eBvHbecbo(X9 zg+zw^^RPUrr5{;~(@d6sZ9m&4)is!^HGZQtj=PrvHVavL-kg53 zsxPkI+7=DabkWvy{n@oX3hs_Tcy2Qa-n20KXc9Hpx}5~juy004W>GirMK-|0ZB{y> z0yWTrIJm`fX%*BDtwOUpdb55cau~%qh^6jxwjX?iQCxcCvFcbr)Bn#As-pCOd19uEo36R%-+v5(Qjl6*SUXeRB zeyQX_0AVns@Ep+%{PFZooIX+Mt^DzqibvM4I}sV_T;U_02}%J>qAf$@Vzyv@=U2ro zqpgLIS=`a+pBRP~TnnR*Fvt`qeA4ph6a84Nrt)ac_?f(aH`E1b(Wg~cN+ji)P9=CE zqKZkWF-WQwL(O$D!9<>}82dXe@~82`h8TzY5qjXN!BtA&1bGNX1UGt-)n~0t^}4vO zTlG*~#bEW&Y4$>xTEzazu6j~q_3C$ZB!Q+)YIootR-&0-oUy;?nYtk;9;j`aSO`l} zeDxxdl5r`ZXnCTJ_mN!V2A!pHXw0E6{i#g_-0w-{4*U*xIM%nrztA4_?bwFw5WJH~ z-LCYQogmb3>JByUl3%88c>Ioc45v+bMJJNtXnQ!Kv9yQ<$GG4prlk~_kjoZEP2JxOr z*H{0{%?dr%grv6^vqGu_>#0<~nVI+4Eok*VwVr&`K(fyyAnAC%aHR1k@oVR_hN+ha z5h#gDF`g(NcKpmlUSb5yL&)6+#Ux!HH(>iYrtibjedsEG3qcIv4QnP8ea?k0wWYgu z;}cX=1XoY!Lv1nsKrTJhe^I34PwOJRl$us?FeCt!ThNn+Z+wSy3woZel9U_o7L@SF zxsLlTJs!WH8Ox%8nW9GJ7Zh!~Q1K@U=jfK*a1x$CU4^Abiz9_$jM8pW`{Vq8V&3N0 zRE<`@;MX4x_%Gxp^m+3G`WLl(b=idA&JQSFt}Fknt4Q_r%sbKreC(S2Y1%1h|4+iH zxY_&l#LfYNr4$wftA9l0ryq{m3fF13-6~v`WMSIqpvyMY%WxcezGeZ<#0|4kN3S~% z>ABU#da@_t{zT9f@_qI#_D={$=x1p9+S5MC&tBLF6tY+ zytN@Ho+iMdo&1a?nc$OIesMs4fUo3y99)K^oKOHySU{bXu%|QAcQPm0;Kt3-d&%k6 zmGLv;=m0B9{nJOP&(IAAKi-}KhoaNXr6}h3S3POU+nfSTIQW#;;zUQNXzE;26M}2N z5{0UV%_)pLTZd+^67-A9wLeYu&}aF0FoH#zrW14gNDjXh&F%s zq7-}AQR}Gva~`tbk2L`!WpWK}WNo@fQS(|HU-)PgzCV9@%I^UGWF@o8P5f|6J7nVC!YrWN(L)G`N~XCTy1q2hj;`VAs%wmG&aQE zYkxf{7yR3g2{Z8iTmufF_Y(|O8{=Ao;)f<`W(vB6;sK@Keqp`>E)u>lH=gC0u!&F0 zTzRlC%#l)CmUB;^y5b4#w*!r)xWD?nnBFSAl<#<2x?87ZmcLb8!mQv5#T3}kY!-ou zzoV5lvF@6^u=J7EO7G6W7aD`VI*(Nx9NjM{o^8uX$y~=wIM6>}q`E_IQGVFCLGds3 z#3IuFk-}-#?JlK+^pJ4&;0X2m9`$=`I#vC?Tm6Qgz9rRfT>ETSzxy)F(T^Dekqr_% zmgIt85b0po@JGDzahxu1-5`5{b9IfZ+-dMrvw}|mcIX4VUn#!(Y{gpW?SW5tY;>4l zg!wN|sDwN5+Z_DW-!e{{kqQq4x^R1_1k&D)e3P_XIg4+a15E0-_7PNnJ_vB1!shwa z&cf|zTL#7Bv5I*n@H-XzrsWGKv6m{~!fIP`FL@S^|mo{p%{l$(*u^aT0u z;N_+@ES=bt^y+F0pIJT;I(3w_m=79a1!%1Qfa);@KRByJ-H7^os9!R=Zt6F>_&dt> zxZ>P4N#iCL#4Pt;J{F&;3qMx~`6@Mz2 zVYLOQP(bA~4SZKT%7B96x9sKf)&FvMA6U{xb+B}u@fjQ}1;sxhXRTh8miM!w-!wpy zW8KieOZC#|Q+7ISPUm#c!;g1jol_{!{ZyV{>&c92Fh=nmsi;p3M|EG(KZiu$cuM|5fEYEJWy(9JXT=_KEhfh~uFM9Ew z0-8+h87Zr=3{oHM$*g#s31WYe>8Ego;NS^0_GFg7-Jx`bWi2r_QPINc9xO^5F@5W1 z4WTr&`~;`jSJ7Ph0E3NTX}@6d$xR|@a~kXI8Y^eE*h+D5j6?nY+*Mo*;rnH3FYUr# zQGPfCepGlX%xp(+%J)wB>}I3P+#C%3s2_UQkva*b9}KOs4(N(|)dmqrDY`8zf21|K z2TlQK(_ly#f#+1 zD3x#iqVqC~7wj?@AiU_3b1%B!yv&kw7o9(Uca>3}#*^|&Rr)oQ@F?kBIXHS?Q2a0816lOo=>9?Rm)hfilZvIlX;f5J%ts$d z0aJpX@|D{J3Hi#u2{(t8#cV{0jrD0uwUpqbV79LVh86j+`Nf{699Asvd?19gLH-I6C|&sD#OvCbAugXfvhx;$i<*f z#GRg?2)s;1M1a-Qc{@!tM5Dl`?bqLETyNghxOzC;L##EdAK0$Pb5pL3?s|tX+yMQ+ zYEjT!!KdRPRSrTM*^@=|UdQ4px}F||nJ7B`Y%-4jRkuE#PmuJbetkX!b%;Pf>+?>( zKK+N=x<2lLI=Mdj(hk2dGA*9(y^SqjzNuv(?phd+H>z9Bgy*3ZbgrQ47Lpho=lPMl z^^;jgCurI&Y;7HhLwN{b(8bvU?>np3LOOWKd%$Tom5HD(!(J6^wjuggWfe5t3wCRjB`^?w6JIn<$tJ|&c6&NzeNf75szfW)m?^`dwriF`(+j{2aFSh@Vv zWauM`XqEr|vi~0O-&$JNPuGxBc&>2QM+8QPIP1j&!R1e1!5Z3@R*V~CBScTTO zre%Pd#MTYVOBkLk1mk^J>N=S0Qu$DnKlIO(lM@iNCC{F50~2^G>k`+-JL`)n_Ofq^ zUbKdl({RP`&0wfts1Dv|ynZ4TTBD0db_llfPThj>p{L3-+ry6U70$xM0;a<=4AoVZ zjK5LDWfr$`n?@Rg$yvC~|J{Fg((8&YG}I%FX?lJ`+>s ze;7!Z^JsI5S@RohHxlnR8fsq5W&B1@e@<-uMo9lBvcad&n_~2`hkzY4)DP6?iZpZ! z4;*dW6yFGtPY=r<#QN;y=CFkLtF#}E9t|0|DV1mDf(lB}X$Kbj`Kj2up90c=$547L zu9?G*p9RIW48wlHsToYXH1FVA&5Yv#8a@aro`v^;KQ_S~{%N7xZWuelsn4*#DqO%T zr&3?DoKmp$i=VOj_&Li-%~WTpm-=Q=U+;D~dAK`le93gr1ndtB*yXSpzpUP2Ga~4z z&a((tO@81XTYf(G>nSa!&N^409`4yBVl;uT{Sbo5r|fXu-xv(-%eq*^vX$hcTPTQ{ zK##n^WV%MT%(=z8ly5G768f;--!sF6DI$0*9ptXAPJT*?xW5aG`Wj&T?7yd@Z|lB6 zf!nE2pRtafRa^DTX}N`iPRCM$aqdlEVj`5QNerY8xXY7T!ZPTM3q=7n5>_=n*QyVD zKzA8Ef~8+dmeEyMdNwStfZ?;exp0Jop#3c7Sou9+BNMXD6&&nt%AA9Y699P$Fgj

E&| z9VPXV{xh)f->+xtlZXZnF&9CbhbJ8ZS@llA&K6(?k2}o6&^;^8<{rD^N+7BKBY+KZ z?b3zK!Q?kc1W-@Ff;rMqSb7|l;QibpmnT;L(Vpm& zTTjs~wV3LrhJycKZs4?@$SEK?5K>G#JluS8vqr=I42mtnH#HyWg!aAqt%ibr?Aj6v zw_#}YMsT~!0@BnR+ur$dKQGS#*fvTRsrYZyDhh{7OX7Zf zxhfNB!3FXxi&N>li|7(SVBYBi9wKaY?Ui^aAd=+`(Z^Ze1YhJzi*0-?H=yo$xhRK0#P?ms$`E-mS0M zQz3*Z{Tgo5`{45zdIw*aP?$IP!o>c!gynaOQ~bvG99$zpHU4q-^bxb_?EHvKz-+#_ zIq3a<)06wP;S2N;#-t4>W4`wu~FDZnNezK6dt z(nB3dP_s7I_#HtFCVtHvg>wI#W7Q;o#mEKcDZ?t$809Ni2?FdO>mEo&hedm!V%x3V zX;LBC+?`dgsG}_?wh2&gXF2p)ks~qw?h}z+JA(k zm_!=W{%qP0)qbzq57qu*wf4*VAL3dk3ikc|KQo-(-P_3=&6O*ae9JO@ISuPl_gqch z!eBDy?4|C7>@jW_!Q@X5)B13CUs#$JO|+ve+c5p^Xc>^$Uqc z9qg9YMjs`+{Qx;GJ1BligTJLYkY8Ut%s;Tz+7Mi1>mWN`RqaE0TB&;p%o^F%># z!~U5z+`TONMa-XJzbqWHKYlRgch{cxi-M)cyX5!2Usf@ySH54;{vtiG0F&V(oKmn6 z>L`ZR@f5>10ip2r5kq>4WCO*pQAu8p#O^aXfTxH}V}XogvLP%H_yfKC$q+_8oVpiN zNUWxg?*zpz0Vc>lQ~kK%V-bQVE#jYaN&DRe9oL{)C+v3>iw18 zqi1XNUOHWM2E{oH9x5al8nw6EBWYW@^Hs#jb=5fuK9%kqwy7~SHPKTinXsh6L3=v4 zLZVJGhy=YtN)|He@{E=BtBdr_(KRZ`XvUVjJQKZ%POS8#&uk-VT5Cj&jKR|Zb7J~a zbj=h-(GZ2e?H=fwG(Bv60*gh*(WFDno2v)92gG1Aavd-Bx5p?pobh3AlBa$#UwQ_sHT%e zHGN1_(>c4QnpROf>GI%)rW$#K8h`D+5IRqE&`Q<+aB^PJP1Wzj^C@~sRav+9cpe-B z#0JA3HMot%7cY-qnxZl~5Bk&eQmpB>yiG(DAmXu*^Z^_ZgW6I(0dtqeoI=p}dVPST z_ToaO8k^Sqd8<<4@QBrc>eia@STk2JK{67qjNx+;F zQjj?=-q80E3~ULCM}ll%>+CJu(5=9`F+AnsOV{TuzUZQJ3&`j%nt$>9MTK`IV)ywU@4o;6{QN}-@aJ80?$V`k7^1l3(!#k* z3KHMf3Ft4JUl=3rkMr*(TdiTI6tK7KN&fTBUAiz<4;bHioxVXWu#(qJ*j4^r;lpKh zBL@+ptlIQ|}FIV~nb{K=BC^4?!=iun%p!f~|Y;ZIPif=@aXUJW?6?ESyUnlm* z;0uQZ#jo*h@Pz{qQ%d=;@>RQzRPK=6)rvs&C2WSyf#X{s?A~v3K_BO7`NGaklUgQB zzdI}sibbJtPS_801-D5=XHlEr-!JIA>L(3OI9XZ;x9l)tF+I1gv9M@Cw(pV_ztwLJigz${^{cUS z_`@xY)muHka{&U!!y5Ykx;{jZ5fCY;-53(<5`@@k9>lpomuf2do7IM7umt8tT(au ziT>7+2;z6`xxaqhPowIj>(*midP&wZ8jA!d@esJxpb3Un<;bMe8oCPJporGNHc9(>s1VKPPO25Rv0yG?G=H| z@p!?w%>&Lispz?h;Ezv}aC_s?$ZR>2TB1JsWUv&heyafN5EV|gR`gkVvXVO^Cju=$ z5^sg5-vL)q!v((RJ|4J3>s0g>8zykKyLtFq8=lR>F6U_xm3zK9?*Sp2tao8} z3gg{Q58MPC77Q(;`)-ozCdl>V_@jes|BMW(3s7xGRQr~rK@8cTcn_5riUKom2+u@t z)ZeaoV6J!-ordVPHk?CUhYqziZ1$op^w~A6b*gXN@TwNJ@WHS9nLnt-s{=aB7Oc*L zdRM=B`aQ$n9Dca{TfsFIT3x_VY#mh}3*8G!2dj5U&!VrPPfDi*O9&wZLu70&95+n9 zZrm$=Dqa|;8RHIazgBHYHw|j4Uvguyw>H~u3=SUU-Yu3SH_{v)cx0_3aUfcxJiktNvXbnkYP4k0Pq~(&g00&g0mA+3qJ!zb|zp zJ1xDieDwg}5Q6J^v%|Nxj8KWxV!rGv1E3B-HwHAUKXdg5Hk(sXm1e{M`SiWgf3MOv zT5((a_k(=f+{)@`wOT3<>L^jYfQJA#u*QXy4@lkjoUc*GU@4CDKaIK%1&Uu)6@FfH z{Y~N>W@!jMQWEbl)NggyK}y;ABS0XDl939UdKEv>!Ewq+Jbzf~xQlOU8K9_-5~r;g zM?2IQd4FrmDkW-85M$qIL*?Nb??S{yI~?&@+>GsRrhcQShiz&)Ym@3#-Rwp$G&aLBmC5=R+Bd=?*p zbyg2;D>|i|NE(J#!fl!Lj@u-gJ+trE1HJ)v+40?kShOKph{d*=X+iZq+Ye`Ho;K;v zH#^Ui#JW=wU%xwsS+LaZK%qL;do_|({;4D$FWslv&luf$HEBk-zMlig=+@WAbZcj6 zgAloloMU}ENY_mwT}L5ZzqLEkbsnUv)9~vYSl$4<|7n_ZHF~vkQ+LAzcW)~%C-UGc zpQ(YtI~qLo>Xg7V(a`K(UW#U&$k(ejXkrq>b?;d)-o>tytEa^DYj*?m>p76G zo^D-Pj~LVZwwQiBkpa?h9sMfxv~lz+n!dXtVWVy`(~hTK5xb}8+;}n*BCx+t^ec4! zIQn%q^y{5Q`ibN$FJFl4K4Y+H99pK}F{Jjw8X{~#TZbjOT~NQXMb&mw#Vk*p$n%Y@ zl+bpss5=zd8O@97PJ<#(&%RI9fCOx-MW53Ytc=MbPp?j^m3#T~@Ohg$PyfDx z`8&}-+S>8+7w?$ov}4T6vi6Ic?Z+LhTbnDJoZ<)=GZ>PujO3^Z8RuB;t){P0MDJ&L zsY=HXy{f=4TRqX+&5W6%YjlX2CZ#7O?yR=APN;E|aIZPW7~7MvOSJ~d)zda~@{Gn{ zHL`}`t;64L|9)`IlXPnV=V6WXZCyYll!Wx9f6;PP5R#5UdfX4SuiK>$7*T&~dqpxvT!4qO}sXfrmRFmM0f@$((6AW0r&FjO6 zF0(Tk%{bX+OLrI&X?#@UQHF$lKFHXJ)-9Ue;G(;YSWQ!}Mx^P_p2;vd!!tq;LUt>4 ziCo*px6QQ(*X4$IhqUFPS&eoD_yX%dSov_v0d|p0j51R6>L4ZKQ^@GmRn!|Ioy+nK zz54GOqkf0_t*(xtS4FAfkPeJu#45BaWU39PmuqAaoI8ZTb+l?TIc{kgpf;mbjWC}) zTd6K;4-eNn(>@>Gc!aDrN4&Yw#;spa`7W-pCU@`jD-w6QPL$+KFnJ;0edy&Z>kTHa z98mGeH&{0ghN5t7#pLU!HT*RB`i(Xv$i!wZzn987+l`6(kEuMKjMI7xp*b#FF!b+wV6Qc+(XVIu15uf)Oga{I zJrZQLh#%$SOI$Fqfx`X3p-+ek!JgN}5B)#nn zDQC4+|55xb3T4AGO7HSk@=qGt)Msq}Afa!(w>l@`cRM}z5?b%&aFkihgr)2bvF}MEWmf6LRB5o=Pmu8p?icQ z{fp-6XZ$>pAdK|Uw(4DOf0gdFZ)v@65ZP{kBr>(~C z@5H4AM}WQP_;F#faINb?4CxRaeR8T`uLe{>d`-biRm=X#KvM(M*i=dbbYqUYU)p42 za>EGeyc>r0J4D}p!@lwQA+-JehM>8#dmUTC&9J$@SYY_aErPP}*7uAb?K&{vd5Nf~ z#0)=@nVJUWyFuUNlds||w2O0dYO!3O zdWNIepm@2}=MqQA5tgWc4C)TgD2;WuXLVq=2{;aLvlK&y9po}-v-G(M#HfM?sUUTd z|E=1&YJ8dG>zhew0O zK#9o1RZ73nMg$#Wq@=RqlAe$f%6`hovAl5c*bD^XwvTI_Muo0T-HwU z7c;b8e*YdexA8&u0mIv@dOqlPXe7l4mCst^gTBaxpq`s-rSg?=e9&nWNmxX`H6L`Y z_@MotFiycZ9;ih4&SoSSI!hfK+<@dYd zf67#;j(-*+zuz7IQz7J5zB2wNh;JPKQ}X-&1rgvdA?N!m;(vyF;XG^FQBBVK@J;U>^Pe_@AEzPbU1&pYa2(qhnk; zyy*3;aj@Fo`1PZCAK$Sx9}Us_UpaF#8-0cCHoIxi}ufGh#pO|H))-QgEj7$ zbZsl{@GF3#>KAN8FN1TqE1u>boW$ME&oT1$Cg5}r+{;AdzJ+!Cy>c(*mct)d#=0CY zc3)|(XCJHSpd|A4^(vc|x33b^)%cftmA89IJ2)5G4=)(}t?@4(`(x){n&*`?|MHHP z%fG}iGrpokvW+sMDU#!_G{IMG5cGHvJbae~DV%JFWm@TR&+_*}$Y0O@J8umCFP3|7 zC6idw?deG2SZ#+o^6~d4nP;?o#?YUy@+)5DSF8doL|47auXvST5pzFY7R_w*zpwE=f{|D?1BwiJ>BE_jl6y~H!AF{IZtQ>K&^`Sh zG(2PGRn;#U|Cz+oQ;9bu^w^%|0@I(Xe$L9*%0_?gghE9vhr0qeJ1&Xzde2>g9rD8Y zOD|ZO;oqXg1J0X`8G-k)P4vGk|HLl-;cm1cAhi)Kc zkobRQH7>zj@!K4bgQNQf#n*8b4vy{<6u-%XuIsZ?tgLs4J{#PykZJPn%FJ;g{HXf5 zo6I!z*ifqnJGlw2J&v`6L_S_$^S4DbQ8RM6*b!Q7zN+6}=_mq-ddcBc^zxPYs4keAp5=&DJc-JoOmjZ`|WRFb&mM{NzZiNOv=S%Jw`Ack(Ge zh4K^va`@vXV64A)s&>O(CHbFLwf_Q3@PJ;D050A?=@+uEP5+)d=6COy-`O$0+s6FP z#J^)gjBBg|4!-@z<8SY2rLPWoE%ed#q^SK9@`vR+<_b{DgEE~BXWX%nFZ}EB7j?k1 zw&Q-9h+NV)DWA-7(`*Rx}E%Hi|7p$Q@ifnu#G($W>5d@p5P1&wP2+c6F_MvwG+|=KxcRu!Gw?`vqw`em=< zBLoa%ztxj*@*eb!u6j}uuhhnNf-%3h9sT)+!n=1NFq1!kI-=E#6y5m4%1H$FlLek_ z@XDZwn|$S_mJz-U2*ZkL^5~geD}2inYH$Wt=@2n7WD!g|2`y6kvHYK9-Lfh=gV#E9 zyD^WYz+7yp)t1|pMKcx#NKzZ~Zx}Z7?;_>}li$U&i@_C4zQkE=Y@>WYV7oZ3j`N3r zbwU;8Tior{3U$MJ^ldsKAHwC5Wkk3m+VT ztmfMvG^o8R11^QY;8^Jm4hF9%7{;lUF5LX3qGQnO8|}sw_>d?hIA``eT;H;aIu$f< zn|(ViLPZozXic~G*pNR3V1V@v+Fov6D^A_deo)z@7BIUyCYXdhXcAW~TAvV%Q`q(3 z5)6F@D2?8o#viFjQZEY8P`f%&+sV;mmYm*F-P!1~RtF0;c0YMDdLN%l<1;kz5%Sa7 zTdeZh##&_Yxs)psnzJ#*8|cmH4OI8$Ey*TI&qchWl9zsm&0D-^!9^F$D=7ZZ`Ag2d z_~LVyWES@oh$OU>cHPI$Q~c@WO80l3fn9Uw=>$iOO`*4tr*0$OPC{2{r~NfdoDGX z-#|2p=2CAfUtutHe+N|Xp<;7Z=ZwR;k|Hn7sg?7+SDR<7LOreSzg11;s~LdN%Cq46 zx8zIa+r&Z2;1u5Jb61u>n+BCtGtlOfd2HvgiPT_nSx^nm&$_Q(_chmj3Gw^+x=uyH zl2vk-4h@26d>SHZXE^xCXg-d3p*h=v$wGrW?6j>&8S$G(1`rQ1n_Q6yaWT29Ho+zk z0fTCn+6VaJ#n|Ta)rq<1)3)#WTe)z$+|maWBG@0oUy!hA;I+Fwlqb#BRB^`fJ1o>bol zBjHY8jqHe^c6zDt5gx-bI0>^4PxIv_8j=kC{lx-P9U2tRpzXV;%CBNAgp)dC=#@XF zPIF4>^wtMuf{+ErH)(!!q$V>%98X6$Fv)Yl{%i_O6@LlC!e2}mYSZP6-h!#l_OC_` zK1jdar1>22VY7;+{9!Y|BSUOw*Xyu}Z9$SlXPS;0B4ci>O=3KGVLW-CSK+$JlM%b0 zyk7x996u|Pljdn1DarhC9;pla^Itmu!+Z-eY!}3fflKS4SQ0?GP&0lquVm`ZWHm2p zeK72Jq0nP76^2HZadw>6UOiwXbR-ZuxMi^YwS`BMaGrlvp-BreT2vnf z*P-$GK>t@og9s(2X!IKd23Kv^LTXC-_Pzmr2gQ3~PhWE6(b_g&+@W)%xK_$?UIYCA z^4O4MPc^um5NmChJ{l63f$aPbgDPlGV=vlB(T^kC;5w}sRKKg)?lRAG@t8@PpNgbB=Z;2kV)>FM+YnkI-kIOG5&IC}6 zE`Gu$R{$KHl6q-DJ_*KRZ{@u*8|^^i;LvuOU@lt7WJfcn`k~;W3CgijMmaCA1733+ zUH3|=`+H`N>RgDdT6K)&gV{Nigo5`N*cqxt%d55TW8QAV)<;Wmv3Zi^n|asy(!KXDMvrfo2JQ%}cFFPWOd4AHKJ zIe58Ntok^IUB-szt-+#GU3oh|%Z_qP$IOflwXI`T^cG4o2BmZlql?Mp=d=1+dt-m% zen&mE_x2(gaoG-g=BUj&b?ipw$R7veNP^^*Zz;N*J)=bXDDjNacf!gCnr3z4OxrF{ zH&bgJ-F5>B@U|0(xqJ$c#fiVTXklaH;JLx%lRzA`CA_mm{VPpV+6T3rm#Mqu8}V|Y zrCo???}siq!ryae*qq~k+s!$zE`1^_yDC)HraC>xdMO|ZE5#OD9Du}(eCk$8xyFK) zqtsPy8qqw^{iV7|0<$ghNM1!H(F-O8WcOw}H?Tt8_y^h-i3-}+8sVZ!wL+G7@(QkpSH>fhkf3>h z9J*FIlF?bC+Aw!HMTTIa@3OB}Ch;vT{wVhV^)%klsr_q!Tlz5_>I|+ot3NLqgF$^D za8P%Hz!EyWmY}odgmW-7tZNh%q{$~?qj;U$Ja;s zjybdX=3IR4h4beuU5NPZyg7w)&rdS){ylm>Z*l)3f&>3%jU=^=f0$J`=1MMNIXo%Y@fmYm;z?vnGSzh~BEOAGTa?p?CDZ~l_P1@o77&pJ0ZYxc9Xsc=nVq@QW)5Mz6ZT!P;RsVDw z!|%8L-&g-#-?aw6pZ>@7|33Tw`<#E(?f$>~`8j6JNnof~v;p>5IpA6cpRc(7y-faZ zRsYMF{NK9%SJ?kQn{y%|;c>hEyIX>~`TtGo-^&nwtNLHY68zTn@1_6aG3u)+ug<`$ zGw|vR{Ljq5r45(9yoQ>_WZR!hX!?^$|KM5o%=AYLh16$EX+=h_|BPvG(%AaO7Rc5Y zAJbezL#F7?T_t!I3cYgB(``t)}l z`>wX5r}x{>=~o_e%)8o-o$8ZSyg~g_le#jVeA*cr|A^9%V%X;(|K0eB4>dF{Yi>Mr z|NWX)(xA#5=HKbwt&h^$?5Uw4oz~^kj`ZKkXVOZ(|3CK51iY@Q>ieg;1qvylGFb*C zjRne_v`L#n!464hN@xp|@itA;HZ&7SIx$G0P%4x`83kS}2m%Vq3<~mzF={w|B>t~MRwH)l@9L$Oue1!mgh3b4}rOF04saS5jGPduy9IPb= zJ1Ymnk{?2O{N`-AB?nuTgFTyry_ti3nuGPz^3L%NR5qM!DbK+sdu+1EqzbrMrpm1sLX%bO|GqWOpmVFd4TvyeN>rl>?>vgsA z8KFg9S1bED?4Jbt#Xsk#oc$Gv{dwQ;d^avX@8pgt8u!TKPyOoGaVgh3Y>Oc1Yf-{tD=qyj!~2*n;Ady-Ottu6=q|uU)0r zu0R=CmXd3i_8uo@ov>9wq1Ys;laf}M@2=^i6}+?8pluwX*Gxb>hoX$MY*o-_O#b+S zKCbONZav?lEaT^t5a#2Lq;W-_Smo?IDK5&D1$`z;ng+_(H>qqfWGhkbMRqCDRFmWj z)b%BQ%l&8to0YYPOh5ldXp`#!ezTy zpMo|8HX>XqFPBX&ZfO&iq3lT9t#Q&NXC~(-nIsFJB7ck8@wk?`3sD~~Kz%sB_vnH_ zD^Q=8Z9b}?eCZYy1v68(_9`eBBBeznl9omExb&;874pBI{>H?F@%ba<>ot&(fUQQe z`FzB!<=vG>6s$_GKvv{OxV}H^mC&8*3+1{QdOcQCGx59nHUagaR`?UzA>WO3iBH$- zo2tCOZosuN{V6J|M!s(684JU9IH^~b{U1EJ)5EID3z;{_d;!tYb6nu zjtld~hVpP+!9{tgb$Mcs^buYzqFO|S@@iL|C(381Q!ZM%#j?#;Y<7O{3sM*6^%wq{ z+A>}~N0STsEzMgfHB|Bi|8I#uQFJ8!$al-~q#=_pVee|yeXiLnb@al#Nnv|e_D+44 z+uljA7di)Oo4&S#&h_{}bZ(Z)>4OQ-i9Y91O~a}3Ic^(VpD1mEv~=1fj>&IGK980Q z+eP$Ky-tp1MA6Y5%S>g*`ZTd%WZsoIOUBsz8M%tmSl8wJW_-a&&W+dygIq?*_)>h~ zKDzQM<@thCWsbT$v0zoNRIl8Al^Pb8gHu%}^3;8li#TTZex=Iv?Lxf&=#|s=NxUC! zAYPQY{@T_Kmh$ApCA}A*+;d;<#z7SYOM0d5mne&+59RTY^onshYgO0KB%MooWqNff z!oruU%u>l$i;vp_m$wq1ww2=wX6AhuN@bzwTci508|CyB%w*TPa{Z~w zo|bPde zKb@0H9PrcC+CP*_+A?KgT*yxq1uJ`{cFNuEjIpw~PIWem{_}~j+Qvbxq;Y3j5{zW^oL>Gd_C3XL>aGk8myELaZsTt7q zlB6nMv9B{0h0` zYSnceY^Xwgbag`tmCO?^PwJy&`gr+ADxPHbgnXJo$fyf`c}?}ao#g%ce4N?`-BNay zsL03srg%H~BVjfExVr8tJEoqlKKItP{A95?lh(}K3Z1P~MwHKZ{wa=wzenhGe+gc9 zQLnt0_`*r~xUOBV>)*k*9N)@bdH>|=#^+L zxTsfZOB9;KlWt4~pFFF!+%2DBONVxRc~WjVwBxw0F9tXgcGom?xNa?7S8tgY=Xr%* z*VXt=?Ph#)we80Z#F;WC^O9v4KVywU%F2`Mn$fPTtkmm9bnvNoUgzHKx=;DKiSD|A zx;Q%@&T;;9^)T+Eu<91~QI*_BRf%XnZF48eIKk}Xai}+EcjNp|BujHtJ~!q-9^Iz4 zT$;-kDPC&JB;S^+)s_#XFYc5rOY%}TU>uU&5V(04(zi}^4wUgor*y9Fm8#C&xX3D0 zJU6P)Hhed^zb>P5J>yxKBd2ofbaPJl?6oh$Sy5DnKYsqnnuWGRhw^$0(zjY|d5g+f zR{rrkLaO9qt60~9k)v@*dWNdu0^kVsl#;{pCi7NIj*~@o7b)F=5_tGoqwi-ogLG6Q4Tw+ z^t!I5Z+SPbTh-0$KJG^PFa{~sw)|u2kFweqX@8e(et10hZArbIyZ%jdW16QhK$EbI z)}5?91B_`N^oo8({&XP|X-wnVVzdpre#NPaj=-9ow41qY)Ai<-RCy{_Zy}cslGj|- zm1%=B#HcVwFD0%2NV)tHZTs&cAD@psI9|J+h?Ukt z=|wQ*qDDckhmJL>ZeXTE$xsT~t z&sz7yf|aS%%FS}-q+1WWMReY>oBWaRn3hMjD>E0DZYuAlY`ezi=Z(!?R~(l=L*TfM zNtoCLn0H^U>!S$$;cch>Xv% z-k7&Er!!=ZC>!oMoFOF|S4_+rY@))T{JmHte-*wVH6;|gxe zOZDA6r)_S$J6UvYuWk84DR-H6U~;l9)2f83peKO(A}68{+q5k8$0+<`VHMH%IquX`uLJw zd24cJ$;5o5Yn`U+7NpDCq@^iISKfj+VX}KaCSMka;vH~|0|xF6VQHVl28_wMPJ8iJ zN!yWoFGW&>GB!}#@TLy)JGUM#He~H%k1JT0mm098Bq)0xnykGnQk_qAuG~(Pa+_Bj z@DY6egzCGsLq4u&&1`1ssNC5&-mRI{?jcGgRN#**OU-WGaGdnH@b6jreHr|lt8I2= zW|HlHoR`xZ;@^I9`D)d*k9pcejimdp`iHi=cBRVDc;@Lu6 zByCBD^5@o7y>kaz{${2|-~ecgqHU>e=I+Pa#Ufd1O9mF>@(+z|Oh`rk z1doh&aV*z3@T5Ph|2-K8{PM2G@w*OX#M{@&)b$E!*Ivu*mkDm{KBCCEC0f=&-+J2K zk>a27FY9E!K5rhrV$=X}zsis!zyy zqH3hPPF0=#D9^dgm+>LaWXbp%%iA4_x@&9EHmy<}-|b+R>#Jl(C97?6>vcQ9F6qC# z^&Ynt3%_lz?d?Zo7w>($ajRUvbaU68t`*~QjHvbe&)%X?LZ#@6+llqINf(H&9L?s0 ze5CajwQJDknL5Jly0-}>`4Kezwfdgn+{IbC%gNdC)_ zuQC!j#Lrik2e9i3)zOn_eOGn%=|WmJRE9@tJ6*WJv>xcE)$QF6lC-YgSN=$N34g4A zUxxl&<_Q?AxILn{jrde$??EFevRQ4UTPwqcBJWwquv501Iv|OwA zf62Q1-*}-X-is;M>yD%A)>9U-M}B(FqOD_$<*M)A?6qz;_V&(yOe|QEmpU-_+|!LQ z;HE+QyQD>UxJKNE%h`dYvVXTKmD*X#u#N+re1(YgMP^7ipVE}&U+Eu~Za%SKU9UXM zLS>4_6X|hoyf$6+oGZGJO>zG%{qG8##a-4LhdGnoDft>h_~m*Hx+r%I1XrZam#!rk zPLzJ5cl7?ha|xuP-{rz`hW_~ge<8qM4Dde$xGTk3-=(6pSqvZ*y%ONB2KZ|M{(6A_ zDZt+d@HYeetpI;Jz~2e*KL_}G0sek~e-Pk*3-FHu{2u}SNr3+=z;hNu$=_b6eOVzD zr2>4j0N)~u1LRRTcjxtmovyrCuXs#jf5haqQn@qs`dup8GD|yGq&# zgxdHmx<`4jtH4p@&cMre6%wDbQ~GTLcTQd2FH&x2#@xAd;d!DL@=LaWbQGN#zeN?w z`}K*zD&_YoU+r;xor(&w?2mcBERH9IBi@Uo4MvzBzq!Au2v63?4ykDS0N*jd?PPIB zU8!j2fc&lj{*^2~*V&)p?##7DlZtlBlE+u6Xh48}Ex-o`_#OdnrP5FDUIF=i0(`#! zw^HNlx6<$P!C88K>-?6HKanm;MMJXW@pUS4GhdR7Kc=E#S@L*RWgc;yO}3DVhG)qi z>*OuPjN3xPk)tuK)KZ`<9}9urOFM}_d=<+c~Nn%)%w3~ z9YrT7e_i=(egCEMNovut%JG(p(0XKtR5Ut^A1L)7@qS;wY1YeT3hzbzxK{h<7s~so zoTaN+1bRi8p@g{;XW*fK2Wf{8&WhjM-&9nQWe>hcMPmcp?j|E8PJUuQ-n`?>PYK9Z z1^Bc8KQh3N4sdq@t)nr{{&Yazof^s^UmK909pG~Vd|rTKRWUoHqUHcUDZtwUd_jOO z4)9X~{Imc+J;2WhaGVIw4yowu06!^eqn&)5z^Tq6&FL;{Q_7 z4+7lo9PM~jD*91C{*36pix@-yrf)w6$GsIQoHd>^E}w zvEv71$={{&b6HqE|3u|&USwZC==g3vk?cQ!$Ne^TXbeqvv*b{wlE9mD^s9!@_b zUQ|A~qj>a+@^K!2Q#sGmAo3UGt)Bdcj&I#-rN{rN@)vqMe%{Lgy}s-5&HDm;C(F-$ z9ltP7hGiYXwvO-JYi0);MPF9#;b>RogS;;WIDT+eyn8r)Xcphs@u^vSuyVirlq>h` zJjC%MdinAd$~`lqiH_Tydvb<2AkX7K0>$xylouADWc! z=zY?aaH556R+xb$shyYdl3C$K0b))Vej0-Xuid{?2i~6w0|D z$M+z~_~V{Vg*(vyBA}8mi$qUPsrleIX*Fq|Jm_Fv-m;ozKp}N_@$0d%i?!A-ju~Zb^PosK4m)~ zzx2pHyyI78$q$gT_C?VP376$K7e+Xq*+(;ewBwn5wB1$yufm;6^?!ToBOUC3$evh~ zk(nyIDBeAGqSih=?f6=|+_w6o6GdP^G$Sd`7S~BAMQBzjnN-Ecr3gAs0oxlK#&0 z|6cfjs5VQ!a3JJUiTrSl=^o(&qB&Xe|8nw~{YsO+SPJ%l=)^4f=Y!v-)0cv&9}=V zi}wQI*GA_g`Yqnq9ABPryRdw(-0>a~;Ijh!lmI_7z|Rfvn*#iq0RJ$+%lFQmt|J0` zw(ug_pPiuj^NH|lqYDzhSsv{nUH*V*RKokK{Al4OqsYBY5MC7ZGq*>k{}%!I=K}oI z0RP14-!{=-tdTF=7x8BHg^hpUcxGRCuFALW2l>ptu<`dD&+H3ds`B&q7eC~EEAfMs zw{Hp0<%d%P{5s)9v@g6u_5VH~zb+uZ=K;_&Ao0U@RnPZ@UmGpWvh!2N&q}ymSS60i zW&f-IKS_8|l-ZBAc-INPHaa8GZ~hru0)A$~E#51I7sXFIbe7Li`_~B1WzYKo-gj_r z`N9An7T}Wt{HOqL5MC7RW66o!^TZ?#{}YgZGr$iRB4?asov`0lu>+)hA>@tE`Qbqcte1<1^8tFepi71uka$;-ya{F8Eq#C%w^|c z0e-6RTzY=ucv+G!N2>n62lN~~G?+Nfv0{oz1x#OK2;NK4L9|ibB0iGuV zlA`!@qVvqD>Ys6rACmaZc9>2Qo-1D#2}ilD+9pO4s|F@msh03dTin*otCE>aB=O37x z4-4?R0KYlFUk~st4$2+x4go$Sz()sodw_p8z<(OxZwB~31AMCyxzjaVIP$SY<}nCY zYd(H2Ape^H|0KZs9-KSggM=5+xtbQOInM;-Q-?r4bKd4om0u-1SNT~d-1Q4GA40(M z$Pm8tb&SVON%~VM_Aayu&!y+u0RLft|0BQ;8maYNC(w9K8NwwabI1FW0DmgL{}$k7 zqjKy2c7R_W;CF*3UW=0Oa6tYy0sdrw|6X_zonKn5G3-1VJac~OR^^Wf&y`pM=y>Kl)i}MfY8>o2F41G3PH{YQp6Y*9 z;ugoN6Zs=F;mupWfzZ{>J_+fRN+^A|I9*4~710 zewyQ%^JAv}i2&bj3iO+nt&PB8r1@v9SlgMK<-I`m(Y@Xyqqs~umN@G|8eIc~LC_Bm8y z`Vr8RITvUAcE>a4;$Bld1xG?Yb1v>>2NFH* z88u?l8pj_@xRtAM$3lKp!fnU&X~!Q)_+6@h_;HZWoX^4i$_T%8{INtH&tgG1@Oa2) z&h6mdV}xfM|9K*h=f5BvbOPiv=XPvG^;yR==jcrTh&1Fg=Xp&2ImdsQ=pP-ckG?V! z^4Yx3@n0qKe^>c|)sX*9!awU9fAM?ax$5;xPCj$~%`E%jEa=Icf4f%opHc&!IbU~~ z^0Bq3zvrq~aJEW=WL#17Ok$6F=8Jr_N_Z}NUUu@Cb9&ZO-Z>k3ewXMuS=E1I4*1In zH$NOa7yQ+PLLG5!tu-rgwsw$y1tnt$36_32YG9~q-%H}fGba!(6Mk)Ux<#iq07lVvO_0x=$FmW@sg8e; z=(#0U6aB&Qj}ngOH6Ro=L(e}Gj%O|))I0uZ!f#azK6CuDgv)T$g&8f-lgVyd*}22< z%sIOARle#Z$Y;*cy{HxO(e2>dCmi?0BRu1H=G-0bYeyI|AM!Q|liyTO`9~ekoV&yQ z<_H59K;C*Sy?0#rBaUaz-QnJ7gk2XxK6CC4_i`iL>Uied9qz|Q*m@DJ(mkB57a z5x(Jgd7>Zp-0d4L)<-ff&7$2-uM#7 z56|LHIzBy%Z+`~#9F@gSaQv7oev{+JXYs!}J~NAtI1~D75l|;*;_o^>KZ_4N3;Gvj@hE+a{FL>Obh^6s#QAOQ>A7{)Efe^+ zvgLQ3o0?l2s~ghw?RBly?ai$ds%oaS4_#1KgU>bc+s^Jco*QbWOe;>8rPIwVZ6#&t z2_l?ssB3F;msM6Sn4X?qoSrr+J-sA7eQ} zo92jW`3>(H`C;44Z%N0IRZputY+5Pla6iVKu5W5@ZEkCBZBI8eH+M4;8%o2p;Yfz0 zA(H_2L)7z|>Q9<4zsyZPGTvFoQIx}Dx7D3AU)EDOkk!`tT1 zlAkP>AHv?CDU)1&l??8j%keAT?q|Hmq$?}M|5GPTt0W&xnbh@M99GlZ*do8H4qcp? zlrk+`Qale$SGTp*&uL0m&tJ5`q}AXT&?QKjn>H*hzhquKySa6t{9bxegz;1Ew7P9xC28n-^5sY950zh!Z<4}3zouR4)CTQ! z!Y%Ss@7=98ew33m&hk$9Xj*$|T~lpCu1Z-s6)i|yj+!Mob=8d%Ty3wMHk8W=E=m;3!|nj31-Os%hslr5-k zm@g%yUZf>MGkMgxi;}ib+RfU!*$uFL*8JJC>sqC1B-LCi!&E7Sv&|~GpuVXq7fcye z-#TZ$RE44Qn_A>2t5Nx~V;G#CR+hFn2A4^TGOrG0wMHy!l2>imylF$y4KUHJP9iKB z-q2iKo1R_YP=`#3(snDZAWERWP^(rwn}%Uu^>Lgn=`RpGypA2R&*u9 z+7`F9*EPaNv(q(m>%_4OOlsVb$7GyP?3~ajWwrrnAKbj4bWF3ff!*;!Wl8a@`X)`I z6w&`sG%$LYRMy72ruNB$=a*Nac3810>B$6|2gNyMwbCg{|LQv1+Pe1Y`UdF}+odc{ zt2+reGluZmZ|%3rB;QH~*EO~@RM$wkkfJ|fq7-uZcB;Hpv>Z0QV^d}2%i5`iX=#q8 ziyf_W99`{bD_|QMzi~M%pV}bxc5H0@IN$ooCBxEFE5}vNNLS9NnlN?L^a+Pe;XIOt zH+RLCs?^o`i0Uv^+GonDjcMcuqf@j@nkxgkv<|E$qMaIC(o$bnQ@60b&9z2x&zWwQ z($PM@rJ-)ZL^M3&w2JhEiQ+u@)DBm~T~5;XYyUDh(}lQ3yW{ua0%2nkuPk8J)C_id%A9aHqv8cN-@rLnxbwlucO zU0de9EsKehFLT!pb>9w+KReB3PD7c~Fx15|)Wtj0T{YC{89EI0K?>B6^t2XfwB@&_ zrB7>-;X-|rYalu_G^Ob&V@Gl2FBu$cVj-FW!0n_89GXK zmz2dN&^5}{P3>(Z7<<|!vG(Dr zJt^wL(_>@-MW!_yDC1VUhRWbBsWy_@&Q&xdNiHjMYamip%WIp{3+rp^nksAjuZGK4 zP1UWf%?m4Q+?Q_nHFkbejjOkFt7ZK_j2%0x2GgA+9g@$ZnihAdy1FVW0ct zV`TZdOGW$hi@OKY-OWz8r`+D1=s(BQ$7Q?6GQO*+NA#Ub{eOJAG`JJfCF)6faH3Yy ziI_KfqSl$@ZFD3_N^DqSv%xs_+J>qHjd6?W3nW*rC*L>Pyy8wcdeda_=N7Z;K5Oe_ z`KozwCsMz$F3uHQj}K4cX_Pi3pTqcoI?+P(Qe4^_l&Mc#${>$3iy%)D+z zzNr#tg})UO{k?a9>@?Zk$L5-a4YL_*~#yI6Aa@sLYc|-FYIk%ZHR*qiDCXAefnJ4zj ztq`BfZfi-Ym^`PhUG^Ot>f|`8JN`2q+ebJw)htF~4_Fd0ttCF;)Fz8{vJWh01IyCW z((Uz?3s5EHUlE-?Eg%}nJPz5op(JkfG;V{bnKG?;NXz{8cwv58bJ^^M`E7Guc8lpl zEvrySYpUDYbtAe{Gr?|kK%d7Ns`U>&6n9rTjX3Jhb%DO~sb9~E4s~Wdx1Q#Pu(kDV zEzNR%b3s&b)c&n?v-eM@=PX*3mW`RV=BDa~`u4@?1$rV}H+L(NC8(se>ePt^x3raR zsGlWg$>k(DP8`marmMEjo#dn=1)b8C7}2S^(v>pPVM$>V4>V9Xvih)%9BAayNrQ^& zS#lUZN*~d*u)e8Q&P}(?Z`_n4j7}E+jUHiWJUu$X$f?uyINHO;Cl_1~n{)5B*nJfr zj&S1zmk-q#X-Joyu3J=7=Z;Ur{Zz@2Ryp%j-zZ;TfH7t;&R?LHJ5u5-p}16>kygs- zG6wNPU6gJ>pkvF~a@5<8IvFbS0T67|re$oAu4}8Qmj1b}rm?!a0~Ocpx7Ni6J>_sCW!lm=T$a{{oFHKM&*GnZya=Q!1)#6Q&=|^_&@=A%=RiR9bQaAz!zmu$869awFsOtBhR5fLrPy)|CFgSP*)ACrS>)uWw>-G?q{i<-_V|}ZfTMGhbkK}-EFIrQNb~CD6VriyF@BB z0WGfmWABws7}JTa&fNzSsaUK_|1lWrO73OY@GUYDbNB9!kqeV(Hsy^m63_JMQUgl0 zKF$tTt++mPURFw--SmZfYXWUfWu{_uM!HmvX*IdCpt!|rf;23L3Bc_#@Yn|1m9iRH z#cIW||H1BJQGm`H*UBQDK(te`PyvIbEtFgPa4xJ#%Wr*IdtF1roaDc@y1IFDD&45xbA2_><#8=TFmn``%>V|*8+oT{CLa@*lP$nwm6kT`XVBeH@jW$YBMYt=Wk$&CSei;Uc6 z&?eo4JA^8V61$|4m1R3OFzSGD-=yod<1&`WcRVm1*O$0kUQdVIy>wv4i}g882i@=E z5+j4W@I5->h+K)SZ`yETt=Q(gYY(MO1~BVcAW1Io=KUG)ccP=TO78ZQyOSG7x5{bG z){;`0wz&W8E<%wO=Ss(nE>kj4*72lK!<<9imMd3q7}i=OmCoFeTQv#yXgY0!hg<2j zJdiWG>DIbg)eY56HFdKvcuER}yVKT{7`e}X>NGbPNz&1ixBbCEa!|QO*3_Gthop}j zTr${huap_Zydh0;z}T%`;zq=2GGdgoG}g@sDw{S}rjP<~+__{ZMuGJ*K{y%DBdC|# ziyP6?;U>nR)9ae#aRE+JZhf(P8cV0+Zg1QOmyT6cRxNP-_;o*BHhfrB1dq zx7d)56nr7?+KETNwbfO+oc%?&H<>1Ukqwpo$jsE@i*9JPN9lH`WP+5Wg1$_yw0Vp5 zVxRB5z8Tkl!R$)>l{mA*5*}uIw1ii^Q0{^ETA>c|=>17vhA;Jt>H5^t65@_t=G`aq8rNZjd*m9Tbz~|UxPl+rm|5+DPweDhpb9+In!Rb z@jsq~lC!9~XQXVnsEQaKm{FOluy$syevxxVg35%9SP?y-e$>%G&vjjmeql z#0QC$)G1wf-jF3yb-9^ymbOk0t7P0R$0z3F2J`lG{InRX z=1ciYQrUT3NNRiQVwobyVuvQsig9P3GlAAOexwq{$)&cmXg6th#!apqs+;gY4cXF_ z^}UXX>hhhh@j>of$ir7HKRsN1S~z$)E-%?uclvead(OjQqA)kD70T~V6^ zQDx~=`9z)5@+td)GS;YUXqe>{&`v4B*gzGzhpWk1BC%z*-r4HzXmx#|bZ|PVppK=p zHPW^3?&j)b)Y>E3dLT=+Qesqs$!^?2%W7=qJQ}4}p6k|tM_6IAN!pgIvuPP8*?5~S z?!2@3p7AV)^_b=!a-P(Oj?UwTzq^$+OJim)N}6=PMekNDx$Mq;Uw)DWxxZFF%8E>; zqw*fcP$K3x&zmp1P#7P}u&{1|4D#^M4)3vh@>IW}Y1Cu4s^3%k9-+YKK%aZ1Pd{*@h!#23C(Cehyb{ks zlsSrQ4^}Rq0bS2LD~sksQki25wejqk2;54OpBDFYiJni*@Wg2t?Uea-qtin#_k1et z3EY!OwL6%QxgBJIKjQ2utx#ITXMuz?tQAR9Un1E;};cQ#u2eAgXwE-PdnG`L`j z%n~P5$>Rv8P4E{G{^MJq82W@C1}l4m@hE>2?&7)gFPRg~9hMY3jA#8(4nIH`JGw(c zD>~9q;k!<(>Bv*;+`It~vGk@lo$h>w(3%H&HaT}B8)j}iWG$}LbH62#GPB{w+;sJU z6c$OhZ=d7Abq_m0=gCP^x=9>hk$e#y;J1>S!~qt`7tsN_Cb8*DfXuzR{fH|}9X9d- zX~~3cBlTove#z?8@bnjPFF9U8Zj(a-O>)0TU29{#JnFi+HEW~hi)p>gUJu3Z9?Ywj zUvQ|c#Q6@HLTB{`Uqr{dfk%9p!Od&2jyz@B__WNN-J`SI*$S7Ujt%=4F)gkTX%a10=;#vHN|C~*LEHZwPJ4P9W{2%TZ#rpMEo-cSw68k$}lD_Z-DaRP)3*T)Z z2R7Jk$OuL@#^h&#($x)faHK+R#zo4ZF192+XUhAb642LbKiEjT{`|DFYM3cc$9EitVe#DM(%*bd0LD_ z?Xl(35U!Fq0U#NF{M52?_H?MTQmDbo!imD3XJ;&F2vAbN625< zDYyNrc>j}kusZ+q9}?9lEaLpf;;NJ*Npjkp**jl;I)}+!X-#c7w4QEht#6cW z+WmT!6tqt7-X;!3n`NtqUH?s}_1+0llD!tz$sdft>Z07M(^O}NYp8?nWnp&4boq51*EUKEIO&Lnno21$wpL*2-k5BblbW&u>_1a}qcpl(BAiC=S?Aqd zp_O)OBQ@DlXWV?7-!^LTiVlXvKC~z2_qcZFPp5q&y#^Mr z9lA$RBtK!%^@6mXj7HV$en+$EG8%SSMkfjGvM03}+vJ|K=JJc?NwX`j;^uUA%_IJD@dL5CcFB=bBwcdqb5ZrI`UNFeMv6~dBoC>?K+f4B z_t9?t z33xk!_>j#UcKjgXe<7YG{sHls#CO{~qopK!e6MXJkrLp4F6VueBOlohTa!G;dlkw5L9VfUzl-EABs-rW&h6*F zNzWfiPw#&6j|A9xE9u#l_>YM1NBqactG?vaWc3SYDMxzQ{u@dDhomQO+l-zc5TB|X zdbk}visZQ+K8`s1|9;ZL_B>1S?9UH~vp=`pF18c)us?TG4u9TG_75X|2l4U5?<78V z`&d8paJzA}a^Ii#l05r!+Z{4`*q^hM`~I9m^6bwB;_T07h_ipT+%Xd`_rFIfN4zh| z%gS>d$=^nPc$oONh_4OsJ-(cYm;H8dfPafPk58^6&gsqTpV80hEhT;t*?DMy-%FhB z{D?T`SHVu1c-f!32lx=;Z2yJCze{#rrX2n0O5(pDelPLOcFx#y9r3=(q31s00|UH_ z^!$Y6M-pfKRm6Wv@(slACw?mN2Z*mA{xjlN6MvBSzR}mjc@(YO{Px74a3rYS)l7E3Xm(P!gv;Vi-HBJ}o zXTC3SE}yp(=kj@^S4>3TK5KOoNKZL6AADe;RyD;A}<|oMSBj2xv%GH;;iR-;y0829})j8 z@m0!^-e-tEr5yIWDlhZTv&v!5vn0P+kpKzcJdfL&cr(fGN}TN+7~n?}=X|_}_^(NS zudg|CoITGG--|fwSw@`oJWZVSd`O(qtJ3e#V|+gap{XhWK*jW~b@@j>mO49X(8XeouOSL-N0qI4r%dko@x` z|99dq5dVz$i^Mk{1nXn_|3Lg}#JT*83h)^L-W=dRCeG_lFDN&^S$bb3`J>6szbi*M z96|hFr00*sd++6v>)Jh(AIq2g0N>W*I&6-1QI33niS&Ps_%X!O%3++qY zw~h30eL0)>%cN&TK>rn_|4*cUCFx=PHwX0GO?uuSJwFTRc`Cryke)Y5&#M9Xw~2E* z@D3a)0siN9b^JaKJN`8(MCQ*Ul%rg|B`@R0kUY;rA0W>9XYZTQpC{zoY}LPr)*ryXq5MZW?}mTC zFH`Trt}a}R`lJ$}FPa*w~Ryu#yuSN?ac*QS3IK1j&Z zdTl(dyujl-Dc{-SUsAq@#|J7uj_T21VOD;ANA+d2*m0@Kzv}Tjm4A7EXRd2!jwL-h zJ&h3kK9W|`&wPL4S`3{Yu6J9->Ngo@dq$Hz%qs$XY=Dms@QDFFIl!j`cvXO-PUQ0Q zbmF>v?(*d*;#w^npFzBo{LkZ!gOnqGS$-nPbHC5=9!L5)-s6dPCSH~g#k&RBc_Qi8 zWg(Z|dBn9ky7JaQyr=YNGwk&9er1SjbDkxy!`Li7lKjv5drFVZk1^ihb%sWYm+SBH z0RLWqKN#S@5Ac5l_*W$F5MU?UGbX?>_D1mK&kFGC1Kip%)C*p(81A&k`q^(b?}7Yj zqSNZH4%?kQT{-#av8(uVuuMPY8HFhj@vjF<(yn>%_lFe6n)nbBQJ~hLXt5cdx0e(R+j=m6~ zPyCkf2e${RfXsPYz% zKcT$c<4-AHr+C_IxGrLgkR>{hdL?*`ED~ zql}pT!NeaU{&nJy6SsM@?>C#T`}WUKJ-+>?kbbrweL8}#-`WpwwjX^N0yx`$CGlU% zXG^cm?|uFElRTFb>=hzFp7&F*XMy1JKLog~6Zr9d7?9sa+ZD*OJ+>YI{-RuK>9To0 zIBYO(>kYmhTW|3BftKo6I<9|q)YUBQ?CML_;(;;es-a=c-E+uG$3_}20n!RO_~*`M4lp>H?+ zO(f6x^^tP;{|1uJqjJJ_?n<2f#`W&|qz7wg2)_Mo0lthl+sWfij`v3-&wluS%KdWn zBFV2LJO57d+&)*KvPpm*%&EAMJZ|t9KmBat+pRbWT*SoPK&;B`v&%#5vwmiL;$&5x<(!btH_JfUmjU@py^r zU7eGT%N5&s1#zx-&nkz0F3&HJJlDG~iERjexg9~A{duHv=;wOZO7dKuKOuRpcbii= zWIGFqv!BNh=X!U%a^L>>0dDg%$g`a{lYWl(Hqyg>c!=b=T>X*cIln$8c`jEosGM`X zJB2vcyEV$8j_chYNS^E6hs3$w4W;tQ_3jYjT%J!L&i<)W?$^5oB+vf&4#~5BZY6p4 z&(Dc-yuTsNc0Ny>>s>$b7Xs`&S<0lfO9fOue@J}K03SIvVq8;`aym*GSK=NdEAE{5ix~{(qFiPP{(Vb)x42d>4(^ zm*;uMZ^#~NPki|YRX_ZIxvcr;6yoe>Uf+9-eJ@|A4r)C*bRdZwFxs zu=5}CGCjPG{ZHc7enS2e;#;eosJ}SVVS1)1N4y^qUrh2J6UW#C0eb#H{2b-b^H1XD zAISfiINBlv$Ybru?CA@`CHVFqK>Snr-sCHYV~^GNDasM=3etbJa>Uz0{9NLn5Wkf; z*RQ*Y^LVUOu0?=;u3z6#j>}MI&CY8`p6k~#;v8=o#moFe;vDbG%JIha`cEX!@!lL0EZ^>t~X901} z7tF;Fe0%OB&h|W_+_%Tdvv1Gyq=)T!CBWY%&UW5Soc;f6;_u66OE0g}vi+~Ayl+3+ z9P)oBoQB1iWJxuak&Uyci%Q^Q)%&lG{ zAGw@|+6SwrkY_vXe6DY2A=NLoa}aU1r#QgNiL;#t2FmAg0iIS4b$ETMe(Xnn27f^3 zdp4d#dTT|-+P$S@5A$ymKY`?V-2i=}>3Lq|@rGC5KJrJxHZX z=f!qD3U)GomE>8^X1Xy6c`pAY#M#e>1o&9uT%L!DUIgf8J+|NL^94>iTmE?ER<5j^ z^Ed?GSvh}5@&W<+uc3T-mN=K&R$MH>x97qDe}*{wa{!f3=Dfeget1vip$@N8)sNf2 z5D9qW`nW4`u8(7gbGmAjn|`a0-y?aphwBCNBLn4g5pkBknK=95=>X^Rhb-Sn<(BtN zE+WqBJpW6a{WgNiC)eLe%KiF#9?7#k&jt7++RnkPEdM9sTpvFT@B_9)uE+V#_3>2V z>}P&{6!V({^1lr5Hv`|3iL-ol7wgLuD(9TRnUemlQ`ueqK!l02vPeC4L!>glgYp6%g%Wag&^>dQ66 zS^kg2+5VFP<^0+Je>%WNQn}@Ru!cDIgDZ%0J-te~U$0*$d9FvrRGyhX7vLWg=la`^ z`WNP-h;uzXo;a7QlZmtc?YWD{X0CVF1>_$N@Xfap1rnf#_3s$q2RrF(`56H|m^hac zTdzaD43fHU{ov-*4)A`Ht(!sqt5Voa{xFi?jkxV=`009ptRH0(0sdkAO~hG$XubU+rzO_I ze%O`tFdt8x>&0=z*-l>fV0rAFA;5m-Tc9D70G<{Z^E0n+&m_K`%H!=Vd6|5F(!={E zCyNY%A208xaC))lh~UfHJ{>rxcX=RPmlNl7T}zzPb-s%=j+gCulQ=(z0eeOWz8|pX zhyXj;54;|Tv)tx4TR%i|gEejAqo70rcq?(<59Rh@34|pe-hQP2hmK~(;rpUNknl?U z=H$6v_aXmq{bf6`H=WDQlZby_cJ8d{g5S{AnVl8HONiULf$z7oRUY|^m-+3d$|28w zTT7h%R;e8J97^#{CSFYZ7}CS}c&2jSo_k52?fC<7wuk*QiQ*lr^02d=IQLU*&k~jQ z?csXO_VBrgkUbM9Ubd%=_3Wyq<#FSO#JS$xAvPl*-XS7m_T%g~f^QFxTR7fr0)FQ4C(HAEfq7{B z$>Uy@|3M($djtGW0scXN7m?pMUf$1QJ~tq5_Y3&`{8m8z!2thdfWHyo+XvP|c)x({ zKR6)&mAE)$l@p$Sv7YfH&-^FEdEMZ3;w;bSmAl85a4I7cegnj^RLK7 zR?c50jyZ^No)>VuH>iG$XNHmdkBJYK&!&gxvpmkhSri1k;br5%(aK>zk8}9`%041v z`oBT)T(4Iu$6Jc*;rqYNBL0xbAs}A%kDaRl$6U$&i+}Y9B=HOpOZZM=RuJ}@cqN*H@+;_TD-eZ zy4XKIR}MemY>dguz##wg1LB^fB61fEk%JIhj z;eLw!^AMCv@cqN(XJ?AHGv()ClIQYMM*i6p<)?t+?Wywf)!04GKit0AI0}Aa&imKA z9)1YvsU<%gN}Sg%8zMMGMFP^uu z{Q0?P#2tHRimqTz{ z?8_ao+ng}%OQA$<`b7g@Q+n~g1*B~Tpn5ioX15hznJ7F zij9^pO#%LG;;d(ia=)FgBYE!c7Al8Z@Us5#2Lbt?1?0zQe){q5pxpP*Bc#7tWGuZO z66bVbO&9@sc>EbUPsR0x$Dg=&1_64wKIUn;0B3p3O%TAj-R5;Xw(|fd9m_N4eIn+~ z0eOre5fCrSTe}C&emDfe62QL*<8Z!j@kX)9^7kAVDFJp)7T+2#r+i_4T7dJslFQE( zlwYjpW#W@4-agb0Pa)n&ob@~#;M{Ms{v(l438)u`k$!%@3fISPsyxia%j)AJ%JIhX zYXb6H(fIsOigy5UPS@}Nzm7PUw_AvFdApxD*Q2|19O3)nb>&C~*T>#8uIB!62jZ;% zE5x~;?xf|%{AvB^y;QE)KVpUpkpI5CE{gwkA@&oM41ehulrjyT)N_Yu^Q zJjy%*;@y)t=R5oP>y(eIA8jK7$|3u6u%j_&|L}O2%kylK=l!@diEmH-!Q3AK(c@+3 zoSz1i0DIWZmmSUKUr|3m2baSl<LO$Blzhu|AQBj{_n$)67Y71yewXR{vqq< z=bkYiO8v-ZWKSJ&&fjhNAlB@7PYrNBzs-8yQ+YqVdAh!ZbPbc2rE5FozI;7#?2(!L zH39z90DqP^m$%oH!ydFPrvL9Ge-`@_e(4 zHI5g1&1UEA#6Ki^-XM-X%jCC`&j_%88`8t=B=%}e{tS}mc)v-U<9&=c$NMUAju(4~ z2(X9a#o8JIILCW9agO&K;w*n5ahAW9INQn3r9tI0KRiS7Z2t)A@7R96|BLNeK=N$Q z&BR&$cH%5=^9lHe?SF#g*?zk}6!Ixb*M0yJd_R{F=Xj4J&UV%kXZiDpA4T!LO`Po< zLgVB@lK(Mr*7G=V&X>Ot=XRqXwR>!Tf8uQa4~cXAeSrAxWapoWb9>l8<5IT&+r-)a zmx!~SZxClYx2N`&<##2{@_P~I@^Cb9)>BQK^~@vA?P{xXaZS`)>b}(@>qn6N+^()5 z`OQfFGSbiT*AVA?`95)OKW`=dxEIFkxtHWw|AQpI1<5}VkiQ(2NCMJ}myM%B=ifdR zoe1y)k3%YPu>`#FIVh}YBS4BC*<4^X_yCgr0;*H3kY^Uu9K|ha&_f>f$oBfP4{|LUH zXT{j@T8j5{<&3CeIbGZ@dMWdhks%VGhs!gMXE-1Ed?|B2_r>L5 zCFy7R8;G;~lf>Eoza_q{d^W$mL7e434De5hhw`y6>F-QF&LsKID<99Lba6gjPMrPE z?I-gel04_jZNyog$Ez&=Jjrvuyh5Dw@gw3a-I=7TcMxZJTbDz9VR_s424_F)Md@O{6)VRZw})jU&+Xxefc&U{JkC}jz#eWtn}~D! zc_wkJ5n27ZlsLDaTuwM&?jU(?SAR*I<#}Gi`B>_#kL|}EuGuq+IP0lVj&`+%^PoXZ<*M)2F!FB9kT0N}1|T z@}Yd;bFH1pms+y(zxfhBcdn=M<^H(%xbnvB(i_A%e|i0r%kA-6o{{}LE=8Y$fO5j) z|LKm#yf@|VImCG$cQJ9E$6?JD0s47=9C0DQo|JsHa(*J&a~5$P2Xef3l03)z3~`o! zfjGg71^=m0{j`uv`9PiJFbG)w+KSDm6f8JA$H@s}V!Tlze z2YbGd$=m$#XzFKKo`3Iy?O#grZ2ymmbN<@?sl{u4`!XCR!RM9A@y78UN}S_;kT}op z_<1SlbIos`sJtKVF1nuMb31q8^Rcx4!}Ve|akjsSINSeZAl_F=p7p##ob|NPx=KW6I{&Z%3TRKipqfd%LAvH;m-(mNX)O^ZjeDDu+FM{}IL# z2zbNG?ms$7Ipn#V^SqVI`E{NiYfo+=JzSpOBF^*o_ldKgd7qB`d7apY;M@757(32( z{+&3tho`825Iy&IXOiEz9R8X(`{#d&bG&xnlW*rI0eRjZ;eMx_{8KB}TKSow9B+7; z|9KwD8^1cb%GoJdXW8?x$S3Pl-KaV)u ze=%{k|6$^s@Bbvu{tWeldm{rS;Emh_n9V ziL;+?CtgSXe>A{-=5^T z-~4Joejk$OezPnf&(B@tdU_1Wb9-J*`gt5OkL20@R+8s&4)?z-{~gl9_Sk+6su`D0 z?w2t)xAx~{kwbudf8wEj`F?HB(fldFB-sEV^`TW&p0Xqu=oc9-4|3s2! z{Z9~QKkzt^`GMqT=6v3dIX_>W*J;<19$pXO-(zL@VmM3!-dO&p#7~q=FhAS5eSFRK z{FCIF_on`Z%hf>T$lpHVE7QL}$#Z+c&u`~-+M`LH<2@JtrwvU zM?iYHT`E_OH@vJ}T1fKj&x?t({Z|oZ`+r58$IrZ9#^dLIt32NDvhnk-RKM7sLBx6d z96E15kL20@HsWkQ-*?aX{yfREKR;4#_M1PqqI$~mdn)(kCy_kcIgL2m`6O{3C%;0R z3W+#rdta`m#4j^(jNVZ1Zr;e$#3O>O5fPA;Q-?CN;<3^-TX)`Eegonb9x}p^4}$Uc-(b!K>jw8=W*9h z1M>X4GdzxZiR8K8xBX3joYt9fS2L9p9(Um^la-&&jJpmZc^-ES`45h}?0g#X5ic8G zJ%E2Cz&~6*|3RG3q1k<}kY_)Sff5ORKQ$@Fj&psPOMH&RW&S*kcs=p&5$F2&L*lHT zpO1<&U8bMUw>J_mAw4_}zn(bGG@72fiQ}xA@wbTYN&J1{gNSdZ<8a?^yAbDeeVup- z>EZh{_anZPM!@OkphN&gEfFDPCo**i`sKd)vVlHU@75|G{lh~qPY z?+4T!<7_A2e=(HwgytjP4CuL(I1*s_cn|S5;=d-&{(O-*+xZspSHxGQzmVoByOVxi zFJk?NlRWF^`@Zu?&*>!3_Fqn%_1{38_1{69?dRWz>P7L|zAf^R(=`Y{0^YFJXMPw( zd|%>+6W^P7HSuENClTMD_!-0xB>pYpBZyy1oZG{jl#6Jz4do;67jwV-O(a$VF2h+4 zOK%~d1n~L9>m7}GJMrzQKDH9Ka}I(!)VtS6eg?@SPZ1C=bJR@)aPCi^2=LvhJbXre zIG^}HikHh9`}0PXM>Kr@?>(f4{rMpAw!}%s;a!AGzN-jrd|p?{eZ?Uw(!JNx&P|mt{0x>qqfkMDs5$ zw|rkP*9%^!W&Yq6E?O5a#xRzy7l?Da@hWj{H~vhV>)p0=e=E0pe7`W;KZNdk<@-ve z6Tg}4Tu7YT^V5lQ{W^y@my-*Lv;0cp+z$L8z+WcL_IwcFq32QThJ|VgD0b|(66Gi- z?6*n8A0_`(5g$P1ke^G;>Eh>Lu>VgYJ*+?Ud%ic4JjZL#C4>DZlRez-asKkWmHm0I zT#o=f%>UqMHh+&e>)%0qh2ZNMMx4ttuS0UZ_+G5W>EU|u3*ua_d0(6TZ|5|S?|4~% zw-4FF@gnUAu%F}Q`4Pt(njc*p(DN>F)`KxH0^()+F=s>oXFD->L-6e!N1WSF+%s$P ztOsip#yP#G66bdHM&fM0tq=J2^E`#+_ojB6%Y!|4*4HzI$*66bd21mY~u{R_+6 zb1#uR9-l8HJ={KMb#!G4A{EYyQM9eTL(d;2udV&J-?0LR{BbH*r0+;Cd~Wi4DaZKX zG~(qX{}SmRN&I@^>Bk*r{Iv@g$HT+!iXOO&o5BbxHLkGh8^qfWVtmjPP zv6l7gSw`}m(G&8+a?-=`hU9JS!esG|^oHbZ@3ph?=XKGZ6{lCiN zw*EfD<9HST!c6x!Zr@XgumT;Truk^S*7x*TR+w*a5^Y~X)|9u`WQohRLdnkXx<9jP#?eYDTukpA& ze`l@7aW)9y&G;=zuhrpo9=AIDvByWL9?Kv6J4#-a%KPZM@k#nW=J*K4kJJDAdwib$ zU+8hnX%Pl_+{XR3u7a;e$?GDOv*)@Szf$=~eK&rC{y*O1c8<2n<4@@SGd%vZ{y)>> zFX;bsJ^r%(-{SE%_5Vd4e^38k;_<)f|I0mY_j{W^@bxHpVXXw=3Qrz;4+twg-beXO z9=G+-+dRIl%HQYl9hI;0_^!&I@OY8()gHI?)-@i-o*BYgj}KFBe#O_L<13Y0 zI*ebZ+~PNWy>h$G`1h6Hr0>S<|JywN1O5L#kKdyIuk!eh^#3P3ZhBXH{0{wpjmPiO z|JQol^uFow-|GMCJpP9M|FOsas{gymGnMCmDYtzJ^Uqe8)Jo{5@5b#p4z_<`yg=n_ zy~4O%KgiRwt^Qx^al4MX9v`UxFY@>v`u`G-n_rfD+}3w4^7y{`{}mqJU;kg}ar5s@9v`g#-{x_99{PPA zAFBVa@_4!a|AfcweyY_TxBJT0c>ECkf33%D-~COG+wW$r^SIr=@v+Aj>;G}7&a~(K zl=sswtUOqH3q5XrF7~+XuZ;A#J(r})|l+|f<1Q&T(^?kSL0OxT#uMJ zV(Wk4y4B;j`3<~}Bk`ZJ^fS1vJp$Q00(>g~4Y%k!xc)?;Bddt_)z5f)miX4hZQKq$ z1;lM16MP%u{nS_B{fG}F{w3n&#BJ>qUr#2!9m&rmzCH1F;yVzxaVhNCk@(dl|7GGf zo`igV;@G1>0N=@divQR+5Zu;y@r|wXgYN=h0YC!Yc6FcPKid=k3h_b2ze;=r@!g15 z5icZe<41h0BJNx3w~ZHTI;|f!ZWmeq13r-SPuF?_zB_UMeqn2Wt(`ahb|2~XTB(d% zr5WLIyHDwAk1x^6WcQhxo_<=H`fBAgzFPT6;`%69mx`Ih_os9%Ar4&^X{EH^hXN}m zdApwryo9*@-Xr*6;)ArY03Sj;O?-jo6~4Cnupn=35%_5&Uk0Fo&&>`sDDR(=d^zz! z>R0F=PP~NpfyCz$w`YOk>ywC&Ao-h!A58pq;)f7_iul)wmuh~(PQ3bQqd4B0g;?V!Pybx|C z4*8KP|0r?jZ_&=`SHvMdUFG}hpaHVbZ|iRZh(msb%8wvEO}K^S#HSmS_jidSj(*zd z{f#*EuT=dbb+BUgd|&wl;*hs>pIYL$rfPTjBcYKve5% z{1_)0|LLdo2A3a8d_3{vh|eW{Jn@r=BVOyT&mwMnzWDlq|KHuWz{gQl|8L*nVH4yP zpukcCqyk~nXTr;*&9=LAQ({w~yqs({n`Y_eL9%I6C^Xm9 zFgeCyEM^F)m&qHH$o?K=@)(m(r%5NmFK6;aOum%KFJp4#J(PIYF?pQn-^ApoKbiC& zW%5R*|63+Uef|8$`%K=%^g}c`#%!CJ{B|a9Ve;=VIp#l~?AglXS1|oN>Mv2PmB|B4 z-p1s*e?|QSlW%7F?M(hKlcPORvS%lgFJt=uVsg|^kbWUe`tfi%)30Ij4knK=Ip(9^ z54@MjS1|n@OnxPkzr*C1k6tG%qj3Q3xr*rrnH=?>Bl|Zn`AVk$btXss4@rNJ$**Sm zUb>h;wQHDs3X@|#`uUp#ldod>S2H>4FCL>#Zf5dMrazX(CFH%9$@O@J@-H%ZC(~ce zl)*B>lBa zzLx3V$mFQ6-;3yD@+8y$6_caBzHZvZj`qv#?N{X+fUkv%>p?`Ha?Opf}`lYRq} zuVeaGFgfb~jr4n&d_B|u9+RWKUeDUfDQB02d`noZ| znAUdxuvYp!{2E*Z!_LVMzXMCclWu(Vi2>sgs+T{7$BS*kpBp`G1GWk7aVqNB=(Z zrA&So(~mMa>dzzI0Vdzb^z%NW4o1V~drW>5lVd)YQ9gA{j+mBH6{wNPQU4pHe=n2I z2Kj9Wg7ga}M}7T!SX-I=Zd+4*z0BmOe?R3vsX(2_Z0}LK_IDMN|A5IaVsgyqMat(+ zCjTMRzlX_D{~x6PHj{5+`h_Q|1GMKyOrBu!pD_8=Opf^dztl z{F9Ut^8S?RpTOj(e<|rVF!_B$H}bGQs3OXZlw&IqKg{`dgX&0jB>llcWCMNdK%trG)nUg6aF29QE~j);cDCkm=vV z;DgT>J+7f_VlS;`@4h5A7=7vnH=-c|3~>rCjTYV{{xew z{*gKAWFF0rkoOU$e-x9WehKNHKR2z!cZ+uA&~pvby- z!TjpPj7VZdOEehqm((>|%VTxXCVxa$Cf+JX;N04XA5N86R$Vj^j>U_Mt@5T7mElCB0nWmiS(X*)=qN5Op4u8+678@O zt>IXr4UF-JYJI7X-|sK>S(R2}xGB6O+Uj3eohZeOEc9sM{OS@5=u50hb=qofiMEE( zjv+VpgL%c=yi%|(*0ij7Y19w)Fze3y2m&?Cl22}+ilG?&dkhp-Qx}zKsx_w}-0G?} zecTp|cnoG(vs=TFrP0L48HT+v;$x45YBeKVA>2j+jc^l*sne|L7Aw-+-js;eSuKfH z=yh7+;Ybua(^NM*-C8iWe9zsh-(MXLR?o2fp#`OuwWO)t?uVyZq2|a^tED*}i>&ZZ z_4_NP`j_!ubPxQqVy#Qs!>x72Gpkp$C8CX0t=JDIV$n96syI6;V^Ep7EE-8Pw+16% zEes0E97AV++T=s`v*+7CW$;Nb!X8%YqA;syUg0h#`&utDFz>f%hutjqax^t+%zm*k z)qj1$ZTuw1QO}${={A1S%UTU%G?aaW^>OcJfS+UoKkAz3DuN$JHFRy@M;*bvxumsi z+~cLtM=^q^3+zlY;MQim%Epz2;-Zp|eKJ%$9j>&OL=&)}5RWEsEdVdKaRmcr!u8E= z?AzywiAz(mngzJ7?mj5LJzrJtO;vJj#>Zcy^%%5&0^+8XYfD_mWDi#|`{l~Q9ti7V zZ7t1hQJmB-iA1buMK#AQ=+Zd zXP>fgMD$n9ht+4n#eK?;<^%Sce`>`ER^TJyrY2a_kN9yV-in43RX$XOCay!B9xSe9dz|VBW~hz;rbV%)CAIDKutb$8E{?#0^w3R= zJuF)7O+M-CKrr`7Wn|t#nXxP!Z;$#Tu_C*2w7_y)TE~_h-l0z{p2^oDAso@*o);7C z;RY&r6gyyCS5xI-$JJQqgD#;Grg-(?_ISbyx3ol?>imtW3Joi{tg&eYIW&w6yA8!l z)wxJjXfnpV-GH|;^+e+|5QC;j5W2v%Ba=e6uY853ahPca7SyF^@%VM z2NIlKlZg$nreStgCtx-}<1Z{jL#JcKU=|+6HCpu1!4JCqS8B~^ZfuF+oxx#2H}C!V zXMzMk@P0g*1;rNhU&Txj2kZ~|`)i|g*X$?CQcueNxxvan@S zK*bQ!tV>4Sy{N##z{WKXcF085rKJ2AHaKC&!Um`C@}+ugO7B>*&p~LrROg40W9wq% z(DGv)W=|-SV%Gt~X%p3MZFBksd6w|uy{`nrL)B^?2T{s`k_?Km>LygsNhsj^ja^4~<(>O7*Y{Na0BRQ4IJrWPM zwfQ6NqG*7#;lmjiFFCkB?6&O0EPz`-PNJ?2Wq0vejd0zva1&OUEbN^#6{o7z(=2gD zThg|oDG}~~$2#n1OYE6#bz&yekuY4M+UrtTHr;*TN%LG+T{g71?D2f=d%QVN#qAZa z`dB<}L0yV1Y4SJ164;#d*#k2$RZ2rWdg_u`Y;*R-a^^M%YSq(qt8*ylZ0ug&WK{!j z@S;jw3y@J)WjgtvRi3P3*_R=3*8`S=>*--j`yoqz8U^eAhn5Jk zoM7Xe8m?P2#KU#j(fuZzS->Q(PU<4H)%u!(jjmZ=9#qd%m)x`Vj|Whv_vFFeHyEng z`!Dkhmczrj6;WJ}fLel!>tR?pm#V;?7LAHwTGqU@9Uf7`nQl|KF&c!K9bPiS^_|aJ zh!6QzhTE3<7g{@R|9a^r9wnw6t_O=Fa z1s-P$w|C%W1bE|L#x4-jdu4aIq%}E$L!C`kf>Lz}T^+rGUm6Sd4D9(BwmCi@gPD+W zHa$=y!-c(7TAb{rPm$U`K9;69@%HVE^tmgGUzRIIQ}HT^I$nDbmU_ydx=8|6-@j}R zQ>U!w3mJIu=Hve6Q8#`*8lI8!hYgHjcygbL$G$l*;$@VX>!b2(WXo#Le$ntcLiLYL zI(T{6OI9OZU|EojT#02M8`&bug3Q4)b1L-lF0b-oiTYU>Q{&Km&xJt?mI`1YAXo(} zvDHDGcHuHy+Su_i+;YLml=BULxM7iI_^f8Q%LN6}iZ?Bsis(PfEA{p^tkK!) zm;VR6f;n9!y_0S&Z*Hx#YT*USx^U}?{}V-qml#xJ`o6>eg)+k%Gvrn{4r3^cps{7y zG_p_l^Z+Ji|2L)w;7I2F{6A%;kd-t48QnOzvaf>^TT4_q**ICidfu@YFfI^Bt_( zn+rHt7=p4WHJ5_+cn7IO-$81Q$KeJDzlW6l12e!{?uvC5{PIjwTRl8FYPGb+8ewh_ ztg`Q0x#CRo6t7hG{&kf-J}_Kt`nQi<_G4zy(BU~hi#_&>ch2Cc-{O{7G!k7NYm2JK zep}lk@RAVxdQhUhB_0JO`|-Q#3M+UnEC<44nfQA^`a$8LM}+OSa`x~DF`QR-=p3Fo zXBTVsv#FVHtlw7=&!ce7kMLWD^5vz~EP58`DL+*XnY2AysI+*P` z3f<3_7jf|p9F$nbCoDk?o>F&O+#Lz~GTs-|B=mD2qCe1TGy4)5dRlaNr16)mSS8Ey z?xT9;1s>6Y*Uj+vuW-qSZj#{Yel3he(=Dhz^jwZzb^Rc;(x0KIo@OmxWce4{!xRig z^Use*{7{31wIdykz3G|tBOdTbB(!hg^6b z)P*!sPqe5469$_75)3`18lhn97aR4`^9b2!7FZ-=KbwJ1mxKA~&!^ixN#-60)Szh( z^`855o5i8O(f3vMzP}l|AM;7JFa61#5$m$|J%q!@lf4>Frn2Xu@x=>xd|$eImi|=G zh=oCSf_q<+_Int#iObT%U^d~s8X(zIgmSybxlk7EeimK;4YhlRhfC8Z0>g|pZUfV+ z-*{PvbJE7}(x?U3FH7Kc&pqB^+V|-UQ0~KY<|C;TXrfRlQf`v!->HU6Y#vwB-66-;<%suk8@hy|aNw%})) z!z)0br*Vs_@#hJw%63@rQxACW`CWa=%l_#nxaxw8;Dx_MLD!iK{^iWlJGKCl&Fd-p$@RUgPkV5Ae;G0q2R=K8 z*0FcLP3};dBbE0ET8lHI{beoXJwn#%$^HJYmgPNy*6y#^qO~IzTjjfbZ)e17f|2(K z#8&6HKVoa-Jwn#1Tt~i@VZJQy5wv!H#a3BtN5a8cSHCf+*JZNl5cW3Xk+L(}kh->W z&;5X_>{w;OX{0&N8#Z|DZN?)JPgada-p)P8b8l~MWK%`=A#aDM?oE!_R@8mSdxWfY zQ(fI!%kmyUYxh@d_uL>Zuz#QnuCl~`5wWi?u*fxg&z$>f5P`%Xq39D zH-8bYc;p@}!Czs47^j|4fq$t4|5phtn0J9~L+bk8^;HFxFW&I=&ww8@Ylmm4dX_f^o@;}D|C{)>lVZ9uz!DZRex%(9cd^T|C2OCm>8x}xV_>)%e;E^CJ5%Vv@ zu8J?_McNY6Q+PPgcWhJhOyx+$h44O9eG&WvqK*y={##I6a})f;Lt=%s41R(c)><2* zP4Iu~nwk^QqInC>KMmG7BTMNR{$)`S{-!CchvGUX1}fTM)wjY=PQcH&wl>3Z=su@B z_-FEm2)UGzjQ&ld=T`Xtd?%0cc*c@kZ=LV~0bl3>Uq$$XX^Z?*T;MMi_)m3#uM_xB zbAi`I!{c|l3w(!Qzt07}Q@|Iyz^@hXQ(fSD1pG7?c>TYudHklk!1oILGhE;|54?nAL;_Xh48%oALau8tbqTV3;ck9FLQz4A>hkh;CBi5IWF*HK^GH`pWg*O zPr%P}fuAJcgD&vcSDNKr{zw=2M+JPo3w%G}`TX-J7x=9L|Ise+uL}4bF7P`A{Od08?+f@h zUEp)ZsD{t$KlX{b-RZ4U`#Vg)|J?=NE8zd(0)MK2f6E2FM8Lo80$(BEk8y#o67V0m zz+WohKXid_5b(QQ;1dGg19ei@8@+X^Kb-=8lneYi0YBOW{#F4$-Ua?n0gu-d+Hbve z@_&w-Td;Jq&Jiv|2-7x+2>{}~tf76D)2 z0>4tgpXdU=R=}U+0>44PpX>tPE8q)V;O`djQ(WLT3;55vz&|SBPjP|o7w~vZM&i`g zY5aIzz@O#ymX$ICzD0)H6c z`TVoe1>P&*t6bnu74QpO;7bJjLKpZ70blI`UnStrcY(iDz+d13-yq-@yTB&|{6#MC zodW)17x;Ao{t_4XTLt{3F7S5>_yQOG_a*^9%LV@f0>0D*ev5!V(S`lb3ivZ!@DB+1 zvt8hK2>7TA{4N1s?*cz|Li+r1i3@z5fX90Z*^b|n1boZ|zEHql?gBquz%O-yFB9-_ z7x<8XZ*+lQEa015;Ohi@vkQESfNybuUn$_PaDiVd;9FhbHwgHI3w*DDZ*zgaTfm>> zqW{_~;IDGQ|EPd(cY*IGJimW*jSKu%fq#_?{Hp^0QWx>tA>gld!T+{k{})~0Jrp<| zzb+T}90KzB>lzpM!vy~8T;L}O{9kf`FBJIko^ZC~hfm-?#YOp_CGaO*@K*@@vt964 z3H)Dn!M|AGuW-R%C-8T>;ExOZb6xOv2>k0@@OKLQMK1W)3H<9_@NW?K11|V`1^(+@ z@NX3O&vn6nufTtU3;qWL{*Vj)Edqa!3;uqAe}N1B=LP;7UGTpu@K?Fu-zo6_mka(~ z0{=o6{9_ML^MA_5etzjIF8B{7JRg4+x!}(i`0<&ZZ2Nz&z;C(0pDOU*{&oR>wF~^60{$8o_`3!Cg)Z>-3V3`bI@|XDfPlZ$1^!V1f0+yXlLG!c7xm{^ z0YBFT{&~Xl{`WW+_(6gHau@#Z6!@13crOeyAlA$EYS6*&1J&*^MMEOo)6es2=`pZ_ zb)rlQXj}LVa2C?w(MoVD96krTx;^GAvF9Y%B0oK@LQ;?Cn3B})upQLp=~OttJWafp z@L{eD`(ER?7>+T|@yguPS&gU5$duglf1v~3NBo)aS32O!2!CRVS?!qo?I5<}Z`hxA zzY^pV9^?N4?3?k&XG$giTZsP%!t0I6|7{0+PnP!I0eH#&Lc#t4!T#Ccmt=ommi9m4 zfFB_IMC~|jP5*;X@g)A8S>k`=fcKOt!pRxl>dzk>@HJWDFNXVs5`RyY z_?H~;eT0Y8!;%?)3)a~s{{AfSPdVTR2!Fyb_VoVm*MOJmZ<$bkw?kfL`+LiQKkp1h zoT>i3;lRIG;D24e1ZQ1f&WC9pi1%Y$&%j-c!}R9@b4D*f91g6pC$jJ4*UUu ze-t=s#{cq9$^Pe^1;#?6VL%fgj(WH2Htwz~4#yH<90ZWBR|@fqzio$M+{q{(10#BZxit zZWd36m#ZW6jVILQ=@r78?Z->_#|?mK{|kVZ;^%u%8MKwiu>IouSEl_J9xj*P&e;mM zjvEX6CjUafOZ+(xDSjX0$M>&H{@Wb*JLf3iH!|>VaNsW?e(pcMzhv?s2MZh+Vzc~? zuTaF$JccFI<>_9+o8?zV_)PZy5b#p`^7_)t51;=t?Jqr2w*RFp?SG2!ru}~>{1Wag z?3@1o8t{_+gJgdn;j#YV?>CwDPlApV^EBhX*sp*vy&INH{y5>M4%6~@8VG;AA=msm zpC1w4^xrdA0e=eD*F2f^|8Brb{%?F(m0tr9q5t^&gXRaAon9w?)BnvOb-s)A^ai$cvqRB;c4|} z2H@@Zr8Lw|;o}OpiyTJ#eS-Z#;x}kKZ&#~){tDM;Jel#U(e@h|417VM0vrtcX8B*D z?Kkk=-zY&1;nDtSg8dsE_AHQRsYQP=dDwQ4`kp!1@Nd+X2iecDJAG&{G|f_QU`udO!2Rz`00)5 zf4u|$;M0me3vS_zM6pmET6<*X9r*j>ivK4W_@8j#-%0#^%zu1- z%xpg!CdvNyG$~*|;q}Ih{|$hb{NMDfia)PE`23j3Kj6S$)2tZp(ZsYh`Ty*|zlHdD z|AEhsnfx1$mHp3aQG{n?;J*RzlK*9YQ2zS}kL8cgZ<+jaj+6PTh+pHi8F_k#wo4uG zorG^U^i6y{;3fN81pDU;_8)M(vfpg~?+`zPXIO$P?4-Aqgiq@~uTXk)8BM74@hbp$ z$^KrlegZTE>mLrv1ykvi+TeuU2NH zaxv|11-w-Lg=jBx?j4R_|5n~vrl=;!e^@C&>2iBs~7+Ay20L zM*?25f3sl!MUawV*Ly!hkcFLX8nTjX8WrlJU;WnlWG5Ez)SWA{-om1 z`(MmU^XvG(LHwrueXA5}n3Kc4#zR(inl@REzpqoB|D#q^hiUWQBEU=b_mTY@35WH^ zg1j`pw!hhdKkthQcs}9v#*E+P4*U)O1B8$Q1VsL@z<&qe(M2kp0j5k^=rMga6|IFZtgm@Z+_#>Hl@aZ-qRHRCtvV88bT?ehtLmLpUry{QXYtzixlG zJMizMg}*m4@ZaLVzl-=cGX7?P|1Agp0b01p&rpBfaNzHKQ33lHKmPux>HoZwGbo%`3TY#6!ujVCXe+}WW{1SrwA3E^&51$Zfb0|N?JLPQw9 z6@vZmIq>hiNg4iA!+tYO-1zoPj0_He=0_}_Qn@4Z#=XY&7_4*Z*len|2w~~DBDxaYR9a9V*xMuzmfQTgva{#C4s-zfxqPr1w41ca05*K%N+Q(5`Qnn z5BZY<|3eP^ebjMLH;^*IU@#($&0f99!j{5Snl5k`m8r(OTd1iTdg#cz@Sgva=QMc|J*@OvLoz-@rx$&7!E z1OHm$-%4QQze(VK$bmoaF$K&t|GeLUzvgY_e-7D?{I>}F?-0K^fA4u*@%y;*u&=A5 z4$Il6$>rBa_({r)R4!)xN&qjFU&A{JxRa1*|5pY3S32+yY*E1PXhSIp;;NSUM#s6xC_}}QjzlHdF2#@i^5jpDz2~|0k9I z`85C3v?l*8fS3I5-L3cw)y97R{{ex&90phrr`3Po3yLw*{PirrOZ;0#DZ>L~IQIV! z3j9BJ;O`}Vd}oy>)BpP%`13|9{tm|9C-Bdosr-j(JS-hZ4Ids)8J4s;diw<7&GjcA z;qRsR>r71he+hUgerw5oFA-t<9ue$6W0qWgJ6~3ebQ@mNr`6vXfS33;j#2UB{ErFz z_c-wPzp5yIpTYl)4*Xljrt?1`@Z)>7AWo~loBo=e|M;G;sccewrvIk_UgF>KDFwu57BHdz&j|b<5WngF;QI=f z$^Tsr{Q0@*{{KPX|Mgi8{(qqO=`jLz0NDTe+}>we<$(RF#bOY{L9Xf`OE0S8Si=WWb$7Dc!__2_<8&Lv%p{dIhlXcZpAtg z;>MH7zX0$O|Jp-T{I?Pq5FYto68OiJ z%ls{)6@RAw&jWagzmNEN{9hLMpK{{Im-Qf>kYWA#*ADy*hbsR!Q~9C)e-ZerPwSbrS7aytkeXRWdD)66OA@f%q zr2PMs?yxBzv;LI;UgBS?`IQ-V|BGv>I=g(z?@GVSUvsG9j}V^J)9Qaa;3fXeM=M~b z%Esp3F7Tf*SLW|OLID?RVh8_c0AAuRnWXr&pIHB17x=&H!0$U!@y`bwPiFkT<-lJ> z{9cuf?f;ts|FQFA|2vOTz(&Gr8%+Luz)SwO9IO22_5bez|BVj(JC9bxcWYwWn*8e= z_%|Mx9{;xl{tq1Zd-0#GL0YFa#Gc;&?sDMoBmN#jV)?%#@V^w0NQq6cFS0o?!o<9QgB!75_aM+Rw8N{P`y z0`QXmC54LLM>zEVLxKNA2mZY2iXY!O;mM5upB(rbh@aPA57?lKQ}=6g=F9$95%GFX z4*MqmIe?e^-$ndTz3qhl;}~u7?{MHBApYkw`2SZ2{<0}5{`~r3tiT@($^Q4vQp7Y3 zA}`YYuK>K{f6r$X{}!?z{l_+D`v1BE|G*gvI9X-m$Uo@7znK03Cg;x)_;0L~{m(l` z@jnL`p3L%J2YAVUZ&AAc2MYY}Iq>f+Q~cLu;D5(~fADn0e=Cds!2R!<{68jsbNpB~Pw|`c zX9}lBZx>c6`%U~N!aq;^dZYc<_( z@PBoo?0^5oivOP~8%O_d0=(q^mNEq#pn^vJqXqtP)eih$P(XhMeh=U!{*AL0kk@~F zmOutH7Ne+8Onbm;3fb2 zC~*0#{JjGI4;}b>@L^*}RE@OzY2*L*9r(A-SN@Y5_VvqTf&b|9W&itIvy1=XfS3F) zt4xo7fxv%_1Hb*SD|nrgA^s~I_y^8Y{4H$$dy>F^^aZm2d7a9CugbI zf4ac`tOI`^UHIz%1Fl10`u~&zzwa`|zlHJR|Iak*Pr+gpf3y51^(f#ZO-x(LE;WC+ zhw$e3Uq<-_6=yx&GJ>s{u8>bIy~=UkG@Ke=99~ z^${4$Z>GTiT?hWMuPNg78S2ls9QbdoQ^3u@i3#~j1pfClzfwzm_urxTznqe&9n=4J zG{2!&wM6mr^`kQc{!d*Dzl#M}!%a`kHx$3FZ#sK@oZ153j=xr*?b-&FqaNqc|FZ=C zqX3Ug=K66b@totxqBK#Q0o2N~LH{+%4*1C(d{RY05?Dvr(+K=yYXuS68HV6Cr zh+pezKGU9GIN%2eKV1W9>otx?U!vk?`k&vc%FpE2_UmjuaPYsM{11>K`aehTe;)Ch zvi&v056j!bl4*Yz;Z6HD65gz@nqOzS3h*7r*?$_~CHuDu_Ma!%pCo>>{(Ff(Q~dFFdL;WB zvb6ut4)*62K z$rS$$fS2OGSg;@ef2|(>^!WR_gZ*COpJT+^jDLBZTz?v}wErx?OZKl7?7v*F|0d!$ z>rZEv_HT2re@mA3zv^KBX2E{^|FvfP&yLFVXMp%KwcibdH|tL!zAOSMQ~kLC@KXE- z1^e;;*P8Zkcd);V_%qo*yI!t8YqPZfY`{zQ=f}s`{`>52b^Y2V*nb=GoAswROZ#^^ z*gu%1{o5Vv4+!=z6YQV2M6N%c?`GHk-$8h@{sai0ss7vscq#rJg8eH5``>fmUrhWZ zRDOD+>z_V;$ANz%@%OOuze?b5YLM$+PnP^Kz)Sv@G^+Z?`L7oE|LMR#K>T**+F96o z**@<6!-0Pb@p}$d0KEQPCGdB}RGT)-f8hVA`jcbuna3|>z^hz_GBJ-!F5fr)uVlcR zeCF|v40zLD^LXCUJ=<^cna8~u@Ft&m{O=5Slg~U}6qoxCUH(vIh9wh!M5Byz@Z-wu7{D7<+pC-wJrC{cTyH+8@7v)GgHi^P0$hgJ&o4W8OTO#}{h6;pi^H zXX-z{O!!Z864=kVNBK|9H+5F?>+LNL_Eq?>>6X3uw-?NzT zUad%5ZNC(h$Z~ao0arP@Q)`G-?pO7iZnOX z7e!iHikeUN`n*F)?W^yZ)2G`fic4qBnrZLV9JtCKnmc;bP>!QK=h$meoDcW=<_t;7 z?AzlSH)>8~F!^X;^{(99 zFFghs!rrLdHJ1P^_rCKzYZn2s`!E|(_&7Yi7U+%_W=0F)?Zps6-ZuMP*%C-)!Z(uj-ljVxg}31 zOa6^!+m_%fALY6C9aaFQ0Q7&5EubtxZ@L#16o$I%z=|^$6$Em-?g!#v>GQc=SM1)s zJJ4NGPz3~m^(~_m3E8)%2F~UlPu?Wov`<#h7Q)?)S)X}UL85RiL<@59rLxGqZ*0N# zLjvp11|k)F$Oj=C?6Rs0oN}05c)4pHMIWwx6pQqd%gQe;zpVTV<<=v)_A@Y9@HrYJHegAA%ZLIIidA+1vU3$rPEO??h7SfdGO?< z$D_T$dT}`9BA@v;P(yUbmsAwA=B_zg9nBpDhi$oQis*1O942zt98ZU1;IKV+O&%SN zg~MgJYev!GI5=FMyXGGsVEA=a)W&QEmp$9Q4yxK;@bQ}#(^7l8-mPpLmy{ zA1XL3uzpcN9_tLiOsHoNl!LL2=R5(e4h>P}vHDnAQIMCr0czx;g8Woal)k*&DP09#+=ng=vj`|f6#G!z-I5dN4&()%=qkWx1Th7= z=MDyv_ZIZwelk(e5=gEaYaf8p77$_zbYC-A{B9r_D(DD}+RbgithOp85AF}JeOnLF ztNWlbbtwvE5vuNFs=M_5>b^3=cvQ7}c$HHgp6Z(V17-{Z7uw*8f(C@=!$BxH7#cN@IG_^y-3uVZq2!$f*slavzYpE^|HAMOd(dq^ z3473-{hqa51$HjbTpCc>3%L~ks`%;IkB{|2`Q51u?Y`H}p3KQXb98SE4G_vzbwudU zCOG-H+SrA-7LL)pUfjZYdyDN74{$1n(b)(mF$I!wXwJ#81>5j9F|d7N4T4>0r*>$t z!UVd{Dky$8)P0(bp!#tLWCC?B7q**W8|=O;52F?AuFOY*TR~>4gC&?>abG>!1kynF z9H>3FD^sB2Xp;h}VC+#ADecNBi z7RwJ0(S*gYW0xy#!M`%QFiB{iOg@hC#gx6WBLFp~7jnQcwYv%`Z-kW-)AjG*oZ3VOk! zwW9(Kam$^uP8HCZD+|hSj8qP$#_Cj5-qcMDJtOmA3KZy0!0Z9W3LpjRq2!YH+65Ja zzGtZP*Nw$87*?aYFNIl9@^)3KLp?{Tc2SGQ!c}yup|>7|d{sEM1@1osd?iEj0^JJ- z->t~=j?3-(%lkNNg5Ma5Gas<06yz96hUPJLKZ}~!KY({>{g}{Skx_OZ1rcjd8QcAY zkZ~sxGDs?HEdlVp1Kp^>O4EmWcI<8+tM-*`P!YGlDNH&W?D;7hHKxGuG}fLiwJ(R6 z5)M1VW=bBICAQZFx}g)>HuK-RclV^~5%eGEcxrZ_`bm3;r+NgIp=U7AhiIS=b>B%< zOlfcX%X_=^kO9%y{wyE#?U6Y7h&|M&@2e?*lDpmKc`B^Kcw8M=y`v)lV=Ii}sVf2; z6}l&2Uy5_cuB)KK$4PhzP6^72pANvRpd=4Nu4dZMhG7n7&$zKQdnx*h3QC}jsd7?{ zA12qihd?=ERydPFZJK7|47edLw7w-L1jBHSJ->elC_+#J%0h5G5ZIoVnzuE=l>ha_ zLYVAB!}?pG`)Y4x>5gFT+-C!8o=zMbN`7ctu%@s5_{u3eHbX9*!O};sJ|eIgrl_mn z1GC*nui0FY99YxW@uOh!4Y;Df%Z>IQgi3)sac;1?2{R6MFY|^3q2xQZL80{>zF_k4P_hF$1?&w= zx8`=;fg?_Gt}Us89q5rl1p&JcOTH0Ye}T9B@wsK7v7iupdHA{%fKULKvFC9Z3#qav z9}6bY%-k+~r7}>u2ppLR#fRAy=5~DvgbzVgh3G@y2hN3(@7hK|sY7oG@4?xThH<^A zELi$d>RJG%xO7=l*7bT~70%D%1<+ccz`=-GG@>3#u^uM6fz7jn3o5M2@;RxhPz5@R zt@4TrF!a)ew%K?k3rCmfHVVZX0KSPB5g6C?H56pWW}9753-L}ZDnTEbDDXlp)R8E` z3t!u!PmQ0b`!iD|*^MwM|p%Qv?R-*(m08Elycz%f>}yTDAUYOhmOYk{ivSSZ=z z9ja<=pdC#5Z3$F0zYjeNlt1n-gV7o4+WBw>FNy3+=j7FePSp&XS#H-m81>S_3K9p{ zoy3)pIaRQO3s{GU^f38k6|8X&73@&k`rP~aRu7Ka4ns6v$KrMGqJoChRYvZb7;wRr zMu*XlCwB&?3T2KV_*6~Tk5{X8!X+O_!JDlFlnF~ z7fSx~!2kfDJFsoowgW~q3 z2l(@S+rA7o1iCLT43usQK+lys_xISd>-P=~fo(<1t6q*~3#iMoJM6-8EG~takC8DkM;YidQ zuB&T}wzYZVv9<(Id1}LLv53`{Xoco?#u-*6F08>(dnDm)YLCagP0b1KvT!_B=fUcz zmPqXGVJO(J&P1W2xyfG6G`2%eNGn57O<~Rfmw18X3&Up|(gh*SjBs{BvzMW1N6I#K zaWHHu2a}f698OK^aC%7j49(`UnW3ewiBqI2Q?qG%?ukQ&J#SK1CphOsFY~jXcrt6Z zRYIk}p~(*Zwu|AY2g^wA!F3Vs+xgwQM@{M(Gwz2IJbhyz>f6Vi1G}$`huz)@D24N5 zC&3YHXFfG*+$$438^(_N>O{{s$Bw&eqG$E^aX+5u`O)}s?@jc4J7?U36Fq;-nepq1 zo(~Tk_s5BzUw&%w8xs*Z?v_zk+F5wRkk#9x$31zl=S$cpX|?PXNWNs(5}Yi-(!$Rm!_Lr@U4haqS05U)_9Ct;C&#L47lH{37%-*y={0?!+`vCVxxrt;U;s6G=Ov-! zFZm*9mmSi|DQ^a^MY2_^>R<;ASsAHF?%MtfHNJ$_FVC?@sZeQ4exeu#0^9IF_rV2V zN~pU7T2O-*CvM#_P&^8K1&_hqZ5KhVJ)q<-es^03?A!glzqoJPQaC_i1hy*Pf%Wrp zg30G-86L{jj(ni>@!T$42M=}E=0LPze)m?WdvvIK0ZwTvlWzdc+jfcE0d|LyzY8Uw zfC)zkrh?FQv516lS_e~sHP_pE10mhXuR26^@GGx`FfW6w5=Vz{4hnfZH7o}RJ)9W~ zO&L(5!>Ysd$Z!hS2U)NE3=X@=tuXb1a1R@MJAo9e8)CiY%iDjVS0q>503cjcFTE^3 z@hH@bAzHX@>TZDWc3^lbOJ9S*%bsdOgPE>M8?2gCIpwwBwXkZ66YYNh5gfp`J<%S- z+0Tn`RNNO_eNDb6kn4XWm|FoA=6|+bU>8Uk%oJ7?dUCsP5*FyTyrJ&Pp%kaWni$Nm z;Nsybe(`XCuCqY8BX}*0tVnvbP5Qn~I>EkdSkZ_v0G8rD76?_Jz)x>tAkEJf@UsQ{ ze5|tcf5Fc`;vfxvQfCT&9$^P?Cq@5YDz+1#*p5~8`$K33Lu|A_T!!h%L3~U$6ce-00|7}a2Fo_ zKTJ4c;h1jK#+q;n-BB{rYKeyv_06q~C_^1P&6_jl3~%B2we3xbc5iWU(extUX~pgK zVevInrxy99Pa(0bJ{G?5suFrx&%6cadth0k4`nd#w-esi!Zhm2s?b8usE)h|U&F&= zjyZTj4!%ZJ1qTHvf%J0#A=Wc@V%`m-XHU$(e$4EN-t}W=Pb^$Fu6&~Jy7A=`OIA;q zH?e=zm}f>EII#p|P#{_M}wG&J&xZQI(R7|WI{TMJ-O!QR@FNNSio|OQvqr8?w zrBHbVg)+76$Gq+rtq&YD`X6v&Zdy^#J81NUaAIEC31C+-T#3Ap7LU&qL%Uv8Ox!eT z%;vG86{#5ZQ=T=DuZrQOVf6~@&ZVSt3haBJu3R_zV!O=o&?!9(C&qgIjssm<8e_Mv zG%>U>s4PMn<2g`}Vw*Id_)zzuM>G8s2ss(HqdZ5a^1=44(-;q~2c)Ml_MPtGa(u=D z^Vj)e-P37|eK%Ym4QoTFdusK*;9i;N@F}n=-t%z$FMh=$?dWL+3(;p zip7nY1MwO1S7-Esy>oy*Ol@Bwd0|STcK&K_=)nqhZil^ZchXGi@%)|ivESk8UHgDr zr_SO}|9qO&2kbL3od@z0M(dty=vj=VAUE?yeSC#ol3(p0?{bi1pCaXRtAiY0-IVn4 zkz`4Jw}bp=4su-0kn+K2kR&<&?u#Tp*+G7RL;fWW`tv}J<<&>!q{kF|Uj*elNv>;5 zoaA|S=K||=SXhwo`Oe9#-C3)z)_G3RK$qxoTNsd1WBFWDMxx|Z0lapC{x5qHB z=TVTOU!7z>j$fEQ4aZXcCpyUIJIK2oCM`7a#gA3Dg-fp|}5{i;2VL;U{;a><_YFoaBI{jEJ#gZ?8Rm-OEPIhJo84JA0X zVmbl3NJ;->2YJjveyxN2Hk3o1Hq%`W`X1}z;#rY~Xk@9Sr@pY9RW(0SHNT-K(j2b? zh0qLK?~2>1Z7uOwf~zi^zo0rX%d#RJ9mU1PQ~CN=FjAc;E{-&WTdhQEIF@J&M*P)b z`11R!QkgH96>DA69&W8Go?b=s?Afi&OQWsD(^KM zu&6a!Uu0QJIy$VDXlq+@6D;;6R#?k?p0?)5(rCg1>xZ#=yu@hpz`|fW+5{Qa#jBPz zdg@!FQGF^BZ*GfvAcxlGR!?KJG1A!L0hbf4k(L!!q+zKi)*MM-k#I+{jD9IDOSVSC ziKu^JW22pW%L>Q}N~_ee5)G}*%dL3x5?IHy>7g>%)M2P2z--mSQmDr|ziByKAz2Hf zZS9RwjLB4RY}(Y?Sfadbw%^8FJR{Q59;>UmyxPJ)RD#-oRn3-TB)Ro2ieV}i1J#4` z=FY8#3&BO@vqOI5SaiYOW$L%F5Q8B*qTxn|>VV2XGc(>C3CE)q#dfvWJKlW*`=~;W zSi4k?mBgCcENB+dId-TasMU#5)lA@w)IjX;E#e{2`f4#8z9F0vlDY&%tU(SW)3IIq ztK?EDwp1DJvk_FRbM~cJXF(%tThSEpS4E(WLA4wfn-bpTWD}aY^HS_B2{hh0UV8hdorf&5_MCR0#_X6GuZL--PF zVJ!@B(P+@waOaV*Ix)Q^9BU2E2ghpt5PhFj9gRmLiQ=|Iu(|{SpO_wpAvp-=ar8uy zf9iCoT@Yj{Nvpjnc13&CYH5a2sfO3_?Y=x^msJ;q?!GbFl)#2FByMW1i`tEx%JF0H z1BW8!M*~bWxHHv>snfJo>Q0&!gFA2GM02Yh2+7%4Q)XwgpL0NHnwDex1yf2>rl@kYFTw{%~k_Y#G^3$MmoY4 z4A>notFcoBP4HkkZ(oh$VlF25DIYtb9IutM99MZUp&YM%wR{3xr(rVXaV9?&&TIW7 zlOMsRea80=l?L%_cHlWG^a&7Phj#{OwRMKWb(;O-(vF5 zFgd;#i3#)P@>iLh`@4h5xxXjEbsZ+mrvN_MFZwA%TYe&w(@#vL>36bxxc==VM|ylO zLg)WYrhgKX-^Ju7Gr1oPgoGPh^U?W)NRIjAI~ZF2d8YqaCU0eOoX_jkpF_^b2iKWK)5c& zguFZsev+e|JPskI&*N|>)93a)$K>ZgUfQo09rBp~*QJ<{_jB;k`j;>{Z`ZXXH|<}> z^ov{Ccly7X1{bZ)93wCE3J!| zysJo#`QW`LZO_|GpZEWjw8W12aQPZ053+n-X7agAel)&-1_|>y7e1PI4wLh8zm>_S zGyP3W&h_snIr@dG+dBV;n0`K!|C7mix#ZwWa*!~eBjBU+Sx#~@4$nBqcQQGTPk>7sTmq zcMbX3Bp=P%7ux?bl7}b`Sg%mtOZF@_s$S z@-+2#Iq2IR64+tt(=^16|LM$5>=Q7dKDTq4Eln@?3X-EfFZcbO|3b#g?Yx}HPk?=0 zzCU-6C`8+tU?KuHE z07#}jzV@T#6|kfAX?I;8vtv?xskdUSlK3aY<3L&9A z3*e*WD8qzq&`@a~>o1fS>I2x~wGt-G|0sJ;eQ6z(<3}f~W0+7zmjd?jO8Z;M(I%bN z(2myk((z^{$8-Ar=97k8-`^ZGRSSl~fL&qFdy-a=pVR3U7lOv{+jP)})`lbEH zXN)kRE~=h@2as-Ka@4Q2r5+FdA7)IbPeYA;tb>Sh8ft8LCki0p20!|~Sdz&x|E2K3 zr2pqDo<@80^9(N-`Zu65q+LuttUeUAt!PYyYduAR`F(PhC_f1%j>PKYu=)Zw8i(pjTO!<=Xm24A-ng*Bt*v3GKdOr9ZBe8RR_960 zu6vM2k$q>WXixVO;dW>vF6w%U;H83S5j<6R8c;@-((#g}_M(>7X1IlzKuWB}(FUu& zH4L}wik7rCgJmO-@Lwz*BIHv>GQ5Vv^cnbI->>^jy>-GbLJ%a~hakQJJ{qUDPWT$o z#l-P=J)?1Y>x7Tn=;3%AH#AOfy&%J`a6Ei?>ZF4PnkVaXdR)}UxJHY4m1zNOJ8-fD z>DB|)7X8QmQTP3Lt%&I)_#i(N!>}}H@amJ)ZRJ7fwk0yNF-M5Bag5G5PU1 zIf>tXs2KG1b2_kVC)0nN-$?vrXDj_awXypztdb^w7|IR#&HBHb9+4b27Xdxg#iY0G zgg5Jd58+Qy>Zxzj{uaR7_NO#x2Nz!<@$$nWG3{Rqc(id0Dznr_{90e1(#Lwc+JV1= z_;owO^22?N)A9}EKWv7jEyRy~9#1-MT6R0(&F-$B@LsJ*TOB{W{UPDa-(K69!GE3Y z_W>`}pB};gNdPka|19X&UPHrXSSlpE zcbIg@el5o5Br#7DA0WK>zt^QvKaX@zd)KSbt7{kH+izBXMKii;u%^_zgB% sASi%+ss7e`c71$pYV$+5Sj7AK1ss&mMJ{_3tn{q-zNV51Lk&dMF0Q* literal 0 HcmV?d00001 diff --git a/ivsr_sdk/samples/CMakeLists.txt b/ivsr_sdk/samples/CMakeLists.txt index 548c002..67af763 100644 --- a/ivsr_sdk/samples/CMakeLists.txt +++ b/ivsr_sdk/samples/CMakeLists.txt @@ -7,7 +7,6 @@ set (TARGET_NAME "vsr_sample") set(CMAKE_LIBRARY_OUTPUT_DIRECTORY ${PROJECT_SOURCE_DIR}/lib) -SET(CMAKE_BUILD_TYPE "Debug") SET(CMAKE_CXX_FLAGS_DEBUG "$ENV{CXXFLAGS} -O0 -Wall -g2 -ggdb") SET(CMAKE_CXX_FLAGS_RELEASE "$ENV{CXXFLAGS} -O3 -Wall") @@ -17,7 +16,7 @@ target_include_directories(${TARGET_NAME} PRIVATE "${CMAKE_CURRENT_SOURCE_DIR}/. target_include_directories(${TARGET_NAME} PRIVATE "${CMAKE_CURRENT_SOURCE_DIR}/../src/include/") target_link_libraries(${TARGET_NAME} PRIVATE ${CMAKE_LIBRARY_OUTPUT_DIRECTORY}/libivsr.so) - +add_dependencies(vsr_sample ivsr) find_package(OpenCV REQUIRED) target_include_directories(${TARGET_NAME} PRIVATE ${OpenCV_INCLUDE_DIRS}) diff --git a/ivsr_sdk/samples/vsr_sample.cpp b/ivsr_sdk/samples/vsr_sample.cpp index ccccf8c..865a1fa 100644 --- a/ivsr_sdk/samples/vsr_sample.cpp +++ b/ivsr_sdk/samples/vsr_sample.cpp @@ -39,10 +39,11 @@ const std::string keys = "{save_path |./outputs| Optional. Path to a folder to save predictions.}" "{cldnn_config | | Optional option. Required for GPU custom kernels. Absolute path to an .xml file with the kernels description.}" "{nig |1| Optional. Number of input data groups for inference. }" - "{normalize_factor |1.0| Optional. Normalization factor is equal to the value range required by models, default is 1.0. 255.0 for Enhanced EDSR, 1.0 for other models.}" + "{normalize_factor |255.0| Optional. Normalization factor is equal to the value range required by models, default is 255.0.}" "{scale_factor |2| Optional. The ratio of the size of the image before scaling (original size) to the size of the image after scaling (new size).}" "{precision |f32| Optional. For inference precision.fp32:f32, fp16:f16, bf16:bf16}" "{reshape_values | | Optional. Reshape network to fit the input image size. e.g. --reshape_values=\"(1,3,720,1280)\"}" + "{num_infer_req | | Optional. Number of infer request number.}" ; bool checkPath(const std::string& path){ @@ -188,6 +189,33 @@ bool commandLineCheck(int argc, char**argv,std::string keys){ return true; } +void print_tensor_desc(const tensor_desc_t* tensor) { + if (!tensor) { + printf("Invalid tensor descriptor!\n"); + return; + } + + printf("Tensor Descriptor:\n"); + printf("Precision: %s\n", tensor->precision); + printf("Layout: %s\n", tensor->layout); + printf("Tensor Color Format: %s\n", tensor->tensor_color_format); + printf("Model Color Format: %s\n", tensor->model_color_format); + printf("Scale: %.2f\n", tensor->scale); + printf("Dimension: %u\n", tensor->dimension); + printf("Shape: ["); + + for (uint8_t i = 0; i < tensor->dimension; i++) { + printf("%zu", tensor->shape[i]); + if (i < tensor->dimension - 1) { + printf(", "); + } + } + + printf("]\n"); +} + +using IVSRFunction = std::function; + int main(int argc, char** argv){ // -------- Parsing and validation of input arguments -------- cv::CommandLineParser parser(argc, argv, keys); @@ -203,7 +231,13 @@ int main(int argc, char** argv){ bool save_predictions = parser.get("save_predictions"); std::string save_path = parser.get("save_path"); float normalize_factor = parser.get("normalize_factor"); - + const float NORMALFACTOR_MIN = 1.0; + const float NORMALFACTOR_MAX = 65535.0; + if(normalize_factor < NORMALFACTOR_MIN || normalize_factor > NORMALFACTOR_MAX) + { + std::cout << "Invalid normalize_factor value! Please enter a value between 1.0 and 255.0."<("scale_factor"); @@ -245,7 +279,7 @@ int main(int argc, char** argv){ // 1. set ivsr config std::list configs; - auto add_config = [&configs](IVSRConfigKey key, const char *value) { + auto add_config = [&configs](IVSRConfigKey key, const void *value) { auto new_config = new ivsr_config_t(); new_config->key = key; new_config->value = value; @@ -273,10 +307,40 @@ int main(int argc, char** argv){ auto reshape_settings = parser.get("reshape_values"); if (!reshape_settings.empty()) add_config(IVSRConfigKey::RESHAPE_SETTINGS, reshape_settings.c_str()); + auto nireq = parser.get("num_infer_req"); + if (!nireq.empty()) add_config(IVSRConfigKey::INFER_REQ_NUMBER, nireq.c_str()); + // in format "," std::string input_res = std::to_string(frameWidth) + "," + std::to_string(frameHeight); add_config(IVSRConfigKey::INPUT_RES, input_res.c_str()); + uint8_t dimension_set = 4; + std::string model_path_lower = model_path; + std::transform(model_path_lower.begin(), model_path_lower.end(), model_path_lower.begin(), ::tolower); + // basicvsr has 5 dimensions + if (model_path_lower.find("basicvsr") != std::string::npos) { + std::cout << "\"basicvsr\" is found in model_path." << std::endl; + dimension_set = 5; + } + + tensor_desc_t input_tensor_desc_set = {.precision = "u8", + .layout = "NHWC", + .tensor_color_format = "BGR", + .model_color_format = "RGB", + .scale = normalize_factor, + .dimension = dimension_set, + .shape = {0, 0, 0, 0}}; + tensor_desc_t output_tensor_desc_set = {.precision = "fp32", + .layout = "NCHW", + .tensor_color_format = {0}, + .model_color_format = {0}, + .scale = 0.0, + .dimension = dimension_set, + .shape = {0, 0, 0, 0}}; + + add_config(IVSRConfigKey::INPUT_TENSOR_DESC_SETTING, &input_tensor_desc_set); + add_config(IVSRConfigKey::OUTPUT_TENSOR_DESC_SETTING, &output_tensor_desc_set); + // 2. initialize ivsr ivsr_handle handle = nullptr; auto res = ivsr_init(*configs.begin(), &handle); @@ -285,6 +349,12 @@ int main(int argc, char** argv){ return -1; } + tensor_desc_t input_tensor_desc_get = {0}, output_tensor_desc_get = {0}; + ivsr_get_attr(handle, INPUT_TENSOR_DESC, &input_tensor_desc_get); + ivsr_get_attr(handle, OUTPUT_TENSOR_DESC, &output_tensor_desc_get); + print_tensor_desc(&input_tensor_desc_get); + print_tensor_desc(&output_tensor_desc_get); + int nif = 0; res = ivsr_get_attr(handle, IVSRAttrKey::NUM_INPUT_FRAMES, &nif); if(res < 0){ @@ -318,9 +388,9 @@ int main(int argc, char** argv){ /* how to check image size and model input? */ // refer to cv::dnn::blobFromImages - cv::Mat inputNCHW = blobFromImages(inMatList, normalize_factor/255.0, true); - int sz[] = { 1, nif, 3, oriHeight, oriWidth }; - cv::Mat inputImg(5, sz, CV_32F, (char *)inputNCHW.data); + cv::Mat inputNHWC = inMatList[0]; //blobFromImages(inMatList, normalize_factor/255.0, true); + int sz[] = { 1, nif, oriHeight, oriWidth, 3 }; + cv::Mat inputImg(5, sz, CV_8U, (char *)inputNHWC.data); inputDataList.push_back(inputImg.clone()); int outputSize[] = {1, nif, 3, oriHeight * scaleFactor, oriWidth * scaleFactor}; @@ -334,18 +404,20 @@ int main(int argc, char** argv){ #ifdef ENABLE_PERF auto totalStartTime = Time::now(); #endif - for (; id < inputDataList.size() && id < outputDataList.size();id++){ + + IVSRFunction process_fn = nireq.empty() ? ivsr_process : ivsr_process_async; + for (; id < inputDataList.size() && id < outputDataList.size(); id++) { auto inputImg = inputDataList[id]; auto outputImg = outputDataList[id]; ivsr_cb_t cb; auto startTime = Time::now(); - callback_args cb_args(0,nif,startTime); + callback_args cb_args(0, nif, startTime); cb.ivsr_cb = completion_callback; cb.args = (void*)(&cb_args); // 4. inference - auto result = ivsr_process(handle, (char *)inputImg.data, (char *)outputImg.data, &cb); - if(result < 0){ - std::cout <<"Failed to process the inference on input data seq." << id < outMatList; imagesFromBlob(outputNCHW, outMatList); // save group - for(int i = 0; i < nif; i ++){ + for (int i = 0; i < nif; i++) { std::string filePath = save_path + "/" + filePathList[idx * nif + i]; #ifdef ENABLE_LOG - std::cout << "[Trace]: " << "Saving image: " << filePath < QueueCallbackFunction; - class InferTask { public: using Ptr = std::shared_ptr; - //construct function - InferTask(char* inBuf, char* outBuf, QueueCallbackFunction callbackQueue, InferFlag flag): - inputPtr_(inBuf), outputPtr_(outBuf), _callbackQueue(callbackQueue), flag_(flag) {} + using QueueCallbackFunction = std::function; + // construct function + InferTask(char* inBuf, char* outBuf, QueueCallbackFunction callbackQueue, InferFlag flag, ivsr_cb_t* ivsr_cb) + : _callbackFunction(callbackQueue), + flag_(flag), + inputPtr_(inBuf), + outputPtr_(outBuf), + cb(ivsr_cb) {} - InferFlag getInferFlag() { return flag_; } + InferFlag getInferFlag() { + return flag_; + } double get_execution_time_in_milliseconds() const { auto execTime = std::chrono::duration_cast(_endTime - _startTime); return static_cast(execTime.count()) * 0.000001; } - + public: - QueueCallbackFunction _callbackQueue; - InferFlag flag_ = InferFlag::GPU; //Default will use GPU to do inference task - char* inputPtr_ = nullptr; //input buffer ptr - char* outputPtr_ = nullptr; //output buffer pointer + QueueCallbackFunction _callbackFunction; + InferFlag flag_ = InferFlag::GPU; // Default will use GPU to do inference task + char* inputPtr_ = nullptr; // input buffer ptr + char* outputPtr_ = nullptr; // output buffer pointer Time::time_point _startTime; Time::time_point _endTime; + ivsr_cb_t* cb = nullptr; }; #endif //INFER_TASK_HPP diff --git a/ivsr_sdk/src/include/engine.hpp b/ivsr_sdk/src/include/engine.hpp index 2c46daf..e2a3272 100644 --- a/ivsr_sdk/src/include/engine.hpp +++ b/ivsr_sdk/src/include/engine.hpp @@ -1,16 +1,16 @@ /******************************************************************************** -* INTEL CONFIDENTIAL -* Copyright (C) 2023 Intel Corporation -* -* This software and the related documents are Intel copyrighted materials, -* and your use of them is governed by the express license under -* which they were provided to you ("License").Unless the License -* provides otherwise, you may not use, modify, copy, publish, distribute, disclose or -* transmit this software or the related documents without Intel's prior written permission. -* -* This software and the related documents are provided as is, -* with no express or implied warranties, other than those that are expressly stated in the License. -*******************************************************************************/ + * INTEL CONFIDENTIAL + * Copyright (C) 2023 Intel Corporation + * + * This software and the related documents are Intel copyrighted materials, + * and your use of them is governed by the express license under + * which they were provided to you ("License").Unless the License + * provides otherwise, you may not use, modify, copy, publish, distribute, disclose or + * transmit this software or the related documents without Intel's prior written permission. + * + * This software and the related documents are provided as is, + * with no express or implied warranties, other than those that are expressly stated in the License. + *******************************************************************************/ /** * @file engine.h @@ -21,65 +21,92 @@ #ifndef COMMON_ENGINE_HPP #define COMMON_ENGINE_HPP -#include "utils.hpp" -#include "InferTask.hpp" +#include +#include #include #include -#include -#include + +#include "InferTask.hpp" +#include "utils.hpp" using namespace std; -template +template class engine { private: - // Function objects for type-erased calls to interface methods - std::function init_func; - std::function run_func; - std::function wait_all_func; + using InitFunc = std::function; + using RunFunc = std::function; + using ProcFunc = std::function; + using WaitAllFunc = std::function; + using CreateInferRequestsFunc = std::function; + using GetInferRequestsSizeFunc = std::function; + + InitFunc init_func; + RunFunc run_func; + ProcFunc proc_func; + WaitAllFunc wait_all_func; + CreateInferRequestsFunc create_infer_requests_func; + GetInferRequestsSizeFunc get_infer_requests_size_func; + Derived* _derived = nullptr; public: - // Template constructor binds the provided methods of the derived engine implementation engine(Derived* derived) - : _derived(derived), - init_func([=]() -> IBasicVSRStatus { return _derived->init_impl(); }), - run_func([=](InferTask::Ptr task) -> IBasicVSRStatus { return _derived->run_impl(task); }), - wait_all_func([=]() { _derived->wait_all_impl(); }) - {} - + : init_func([=]() -> IVSRStatus { + return _derived->init_impl(); + }), + run_func([=](InferTask::Ptr task) -> IVSRStatus { + return _derived->run_impl(task); + }), + proc_func([=](void* input, void* output, void* cb) -> IVSRStatus { + return _derived->process_impl(input, output, cb); + }), + wait_all_func([=]() { + _derived->wait_all_impl(); + }), + create_infer_requests_func([=](size_t requests_num) -> IVSRStatus { + return _derived->create_infer_requests_impl(requests_num); + }), + get_infer_requests_size_func([=]() -> size_t { + return _derived->get_infer_requests_size_impl(); + }), + _derived(derived) {} + + // Default constructor engine() = default; - // Public interface methods call the type-erased std::function members - IBasicVSRStatus init() { + IVSRStatus init() { return init_func(); } - IBasicVSRStatus run(InferTask::Ptr task) { + IVSRStatus run(InferTask::Ptr task) { return run_func(task); } - // The templated get_attr method delegates to the derived class's method + IVSRStatus proc(void* input_data, void* output_data, void* cb) { + return proc_func(input_data, output_data, cb); + } + template - IBasicVSRStatus get_attr(const std::string& key, T& value) { - // Using CRTP style static_cast to delegate to the actual implementation provided by the derived class - // For this to work, the derived class must implement get_attr_impl with the appropriate signature + IVSRStatus get_attr(const std::string& key, T& value) { return _derived->get_attr_impl(key, value); } void wait_all() { - wait_all_func(); + return wait_all_func(); } - Derived* get_impl() const { return _derived; } - - IBasicVSRStatus create_infer_requests(size_t requests_num) { - return _derived->create_infer_requests_impl(requests_num); + IVSRStatus create_infer_requests(size_t requests_num) { + return create_infer_requests_func(requests_num); } size_t get_infer_requests_size() { - return _derived->get_infer_requests_size_impl(); + return get_infer_requests_size_func(); + } + + Derived* get_impl() const { + return _derived; } }; -#endif //COMMON_ENGINE_HPP +#endif // COMMON_ENGINE_HPP diff --git a/ivsr_sdk/src/include/ivsr_smart_patch.hpp b/ivsr_sdk/src/include/ivsr_smart_patch.hpp index ab0e17b..8886eb0 100644 --- a/ivsr_sdk/src/include/ivsr_smart_patch.hpp +++ b/ivsr_sdk/src/include/ivsr_smart_patch.hpp @@ -35,9 +35,9 @@ struct PatchConfig{ int nif; int dims; PatchConfig(int w = 1920, int h = 1080, int pw = 1920, int ph = 1080, int b_w = 1,int b_h = 1,int s = 2,int n = 3,int d = 5)\ - :patchWidth(pw),patchHeight(ph), - block_h(b_h),block_w(b_w),scale(s), - nif(n),dims(d){} + :patchWidth(pw), patchHeight(ph), + block_w(b_w), block_h(b_h), scale(s), + nif(n), dims(d){} friend std::ostream& operator<<(std::ostream& os, const PatchConfig& cfg) { return os << "PatchConfig [width]:" << cfg.patchWidth << " [height]:" << cfg.patchHeight diff --git a/ivsr_sdk/src/include/ov_engine.hpp b/ivsr_sdk/src/include/ov_engine.hpp index 74e0340..147e5d5 100644 --- a/ivsr_sdk/src/include/ov_engine.hpp +++ b/ivsr_sdk/src/include/ov_engine.hpp @@ -1,16 +1,16 @@ /******************************************************************************** -* INTEL CONFIDENTIAL -* Copyright (C) 2023 Intel Corporation -* -* This software and the related documents are Intel copyrighted materials, -* and your use of them is governed by the express license under -* which they were provided to you ("License").Unless the License -* provides otherwise, you may not use, modify, copy, publish, distribute, disclose or -* transmit this software or the related documents without Intel's prior written permission. -* -* This software and the related documents are provided as is, -* with no express or implied warranties, other than those that are expressly stated in the License. -*******************************************************************************/ + * INTEL CONFIDENTIAL + * Copyright (C) 2023 Intel Corporation + * + * This software and the related documents are Intel copyrighted materials, + * and your use of them is governed by the express license under + * which they were provided to you ("License").Unless the License + * provides otherwise, you may not use, modify, copy, publish, distribute, disclose or + * transmit this software or the related documents without Intel's prior written permission. + * + * This software and the related documents are provided as is, + * with no express or implied warranties, other than those that are expressly stated in the License. + *******************************************************************************/ /** * @file openvino_engine.h @@ -23,22 +23,24 @@ #include #include + #include "engine.hpp" +#include "openvino/core/layout.hpp" #include "openvino/openvino.hpp" -#include "openvino/pass/manager.hpp" #include "openvino/pass/make_stateful.hpp" -#include "openvino/core/layout.hpp" +#include "openvino/pass/manager.hpp" -typedef std::function - CallbackFunction; +typedef std::function CallbackFunction; class inferReqWrap final { public: using Ptr = std::shared_ptr; - explicit inferReqWrap(ov::CompiledModel& model, size_t id,CallbackFunction callback) - : id_(id), request_(model.create_infer_request()),callback_(callback) {} + explicit inferReqWrap(ov::CompiledModel& model, size_t id, CallbackFunction callback) + : request_(model.create_infer_request()), + id_(id), + callback_(callback) {} void start_async() { - startTime_ = Time::now(); + startTime_ = Time::now(); request_.start_async(); } @@ -82,9 +84,10 @@ class inferReqWrap final { void set_callback(std::function callback) { request_.set_callback(std::move(callback)); } - void call_back(){ + void call_back() { callback_(id_); } + private: ov::InferRequest request_; size_t id_; @@ -93,62 +96,84 @@ class inferReqWrap final { CallbackFunction callback_; }; - class ov_engine : public engine { public: ov_engine(std::string device, std::string model_path, std::string custom_lib, std::map configs, - const std::vector& reshape_settings) + const std::vector& reshape_settings, + const tensor_desc_t input_tensor_desc, + const tensor_desc_t output_tensor_desc) : engine(this), device_(device), - model_path_(model_path), - custom_lib_(custom_lib), configs_(configs), - reshape_settings_(reshape_settings) { - init(); + reshape_settings_(reshape_settings), + input_tensor_desc_(input_tensor_desc), + output_tensor_desc_(output_tensor_desc), + custom_lib_(custom_lib), + model_path_(model_path) { + // init(); } - IBasicVSRStatus init_impl(); + IVSRStatus init_impl(); + + IVSRStatus run_impl(InferTask::Ptr task); - IBasicVSRStatus run_impl(InferTask::Ptr task); + IVSRStatus process_impl(void* input_data, void* output_data, void* cb = nullptr); template - IBasicVSRStatus get_attr_impl(const std::string& key, T& value) { - static_assert(std::is_same::value || std::is_same::value, + IVSRStatus get_attr_impl(const std::string& key, T& value) { + static_assert(std::is_same::value || std::is_same::value || + std::is_same::value, "get_attr() is only supported for 'ov::Shape' and 'size_t' types"); - +/* auto extend_shape = [](ov::Shape& shape, size_t dims) { if (shape.size() < dims) for (size_t i = shape.size(); i < dims; i++) shape.insert(shape.begin(), 1); }; - - if constexpr (std::is_same::value) { - if (key == "model_inputs" || key == "model_outputs") { - ov::Shape shape = (key == "model_inputs") ? input_.get_shape() : output_.get_shape(); - extend_shape(shape, size_t{5}); - value = shape; +*/ + + if constexpr (std::is_same::value) { + ov::Shape shape; + std::string element_type; + std::string layout; + ov::Output node; + if (key == "model_inputs") { + node = input_; + } else if (key == "model_outputs") { + node = output_; } else { - return ERROR; + return UNSUPPORTED_KEY; + } + + layout = ov::layout::get_layout(node).to_string(); + shape = node.get_shape(); + element_type = node.get_element_type().get_type_name(); + memcpy((char*)value.precision, element_type.c_str(), element_type.size()); + memcpy((char*)value.layout, layout.c_str(), layout.size()); + value.dimension = shape.size(); + for (auto i = 0u; i < shape.size(); ++i) { + value.shape[i] = shape[i]; } } else if constexpr (std::is_same::value) { if (key == "input_dims" || key == "output_dims") { const auto& shape = (key == "input_dims") ? input_.get_shape() : output_.get_shape(); value = shape.size() < 5 ? 5 : shape.size(); } else { - return ERROR; + return UNSUPPORTED_KEY; } } - return SUCCESS; + return OK; } inferReqWrap::Ptr get_idle_request() { std::unique_lock lock(mutex_); #ifdef ENABLE_LOG - std::cout << "[Trace]: " << "idleIds size: " < 0; @@ -157,18 +182,22 @@ class ov_engine : public engine { idleIds_.pop(); return request; } - void put_idle_request(size_t id){ + + void put_idle_request(size_t id) { std::unique_lock lock(mutex_); idleIds_.push(id); #ifdef ENABLE_LOG - std::cout << "[Trace]: " << "put_idle_request: idleIds size: " << idleIds_.size() << std::endl; + std::cout << "[Trace]: " + << "put_idle_request: idleIds size: " << idleIds_.size() << std::endl; #endif cv_.notify_one(); } void wait_all_impl() { #ifdef ENABLE_LOG - std::cout << "[Trace]: " << "ov_engine wait_all: " << "idleIds_ size:" << idleIds_.size() << " requests_ size:" << requests_.size() << std::endl; + std::cout << "[Trace]: " + << "ov_engine wait_all: " + << "idleIds_ size:" << idleIds_.size() << " requests_ size:" << requests_.size() << std::endl; #endif std::unique_lock lock(mutex_); cv_.wait(lock, [this] { @@ -176,13 +205,13 @@ class ov_engine : public engine { }); } - IBasicVSRStatus create_infer_requests_impl(size_t requests_num); + IVSRStatus create_infer_requests_impl(size_t requests_num); const size_t get_infer_requests_size_impl() { return requests_.size(); } - ~ov_engine(){ + ~ov_engine() { requests_.clear(); } @@ -192,11 +221,13 @@ class ov_engine : public engine { std::vector requests_; std::mutex mutex_; std::condition_variable cv_; - //configurations for openvino instances. + // configurations for openvino instances. std::map configs_; ov::Core instance_; ov::CompiledModel compiled_model_; std::vector reshape_settings_; + tensor_desc_t input_tensor_desc_; + tensor_desc_t output_tensor_desc_; std::string custom_lib_; std::string model_path_; @@ -205,4 +236,4 @@ class ov_engine : public engine { ov::Output output_; }; -#endif //OV_ENGINE_HPP +#endif // OV_ENGINE_HPP diff --git a/ivsr_sdk/src/include/threading/ivsr_thread_executor.hpp b/ivsr_sdk/src/include/threading/ivsr_thread_executor.hpp index 62d3d39..26541be 100644 --- a/ivsr_sdk/src/include/threading/ivsr_thread_executor.hpp +++ b/ivsr_sdk/src/include/threading/ivsr_thread_executor.hpp @@ -30,22 +30,19 @@ namespace IVSRThread { - // using Task = std::function; - using Task = InferTask::Ptr; +// using Task = std::function; +using Task = InferTask::Ptr; - struct Config { - std::string _name; - int _threads = 5; //!< Number of threads. +struct Config { + std::string _name; + int _threads = 5; //!< Number of threads. - Config(std::string name = "IVSRThreadsExecutor", - int threads = 1): - _name(name), - _threads(threads){}; - }; + Config(std::string name = "IVSRThreadsExecutor", int threads = 1) : _name(name), _threads(threads){}; +}; /** * @class IVSRThreadExecutor - * @brief Thread executor implementation. + * @brief Thread executor implementation. * It implements a common thread pool. */ class IVSRThreadExecutor { @@ -54,6 +51,7 @@ class IVSRThreadExecutor { * @brief A shared pointer to a IVSRThreadExecutor object */ using Ptr = std::shared_ptr; + using CallbackFunc = std::function; /** * @brief Constructor @@ -68,27 +66,27 @@ class IVSRThreadExecutor { /** * @brief interface to enqueue task - */ + */ void Enqueue(Task task); /** * @brief interface to execute the task - */ + */ void Execute(Task task); /** * @brief interface to create task - */ - Task CreateTask(char* inBuf, char* outBuf, InferFlag flag); + */ + Task CreateTask(char* inBuf, char* outBuf, InferFlag flag, ivsr_cb_t* cb = NULL); /** * @brief interface to sync all the tasks - */ + */ void wait_all(int patchSize); /** * @brief interface to get total duration - */ + */ double get_duration_in_milliseconds(); private: diff --git a/ivsr_sdk/src/include/threading/ivsr_thread_local.hpp b/ivsr_sdk/src/include/threading/ivsr_thread_local.hpp index bc4172b..a039d95 100644 --- a/ivsr_sdk/src/include/threading/ivsr_thread_local.hpp +++ b/ivsr_sdk/src/include/threading/ivsr_thread_local.hpp @@ -28,7 +28,6 @@ namespace IVSRThread { - template struct ThreadLocal { using Map = std::unordered_map; @@ -85,13 +84,13 @@ struct ThreadLocal { auto operator*() -> decltype(it->second) { return it->second; } - auto operator-> () -> decltype(&(it->second)) { + auto operator->() -> decltype(&(it->second)) { return &(it->second); } auto operator*() const -> decltype(it->second) { return it->second; } - auto operator-> () const -> decltype(&(it->second)) { + auto operator->() const -> decltype(&(it->second)) { return &(it->second); } }; @@ -110,6 +109,4 @@ struct ThreadLocal { } }; -// #endif - -} // namespace InferenceEngine +} // namespace IVSRThread diff --git a/ivsr_sdk/src/include/utils.hpp b/ivsr_sdk/src/include/utils.hpp index 3a9c9f6..c1381d6 100644 --- a/ivsr_sdk/src/include/utils.hpp +++ b/ivsr_sdk/src/include/utils.hpp @@ -25,6 +25,7 @@ #include #include #include +#include #include #include @@ -68,28 +69,29 @@ inline std::vector split(const std::string& s, char delim) { }; inline void ivsr_status_log(IVSRStatus status, const char* log) { - switch (status) { - case IVSRStatus::GENERAL_ERROR: - std::cout << "[General Error] NULL pointer exception " << log << "." << std::endl; - break; - case IVSRStatus::UNSUPPORTED_KEY: - std::cout << "[Error] Unsupported keys " << log << ", please check the input keys." << std::endl; - break; - case IVSRStatus::UNSUPPORTED_CONFIG: - std::cout << "[Error] Unsupported configs " << log << ", please check the input configs." << std::endl; - break; - case IVSRStatus::UNKNOWN_ERROR: - std::cout << "[Unknown Error] Process failed " << log << "." << std::endl; - break; - case IVSRStatus::EXCEPTION_ERROR: - std::cout << "[Exceptoin] Exception occurred " << log << "." << std::endl; - break; - case IVSRStatus::UNSUPPORTED_SHAPE: - std::cout << "[Error] Unsupported input shape " << log << ", please check the input frame's size." << std::endl; - break; - - default: - break; + static const std::unordered_map status_messages = { + {IVSRStatus::GENERAL_ERROR, "[General Error] Generic error occurred"}, + {IVSRStatus::UNSUPPORTED_KEY, "[Error] Unsupported keys"}, + {IVSRStatus::UNSUPPORTED_CONFIG, "[Error] Unsupported configs"}, + {IVSRStatus::UNKNOWN_ERROR, "[Unknown Error] Process failed"}, + {IVSRStatus::EXCEPTION_ERROR, "[Exception] Exception occurred"}, + {IVSRStatus::UNSUPPORTED_SHAPE, "[Error] Unsupported input shape"} + }; + + auto it = status_messages.find(status); + if (it != status_messages.end()) { + std::cout << it->second << " " << log; + + // Additional messages for specific statuses + if (status == IVSRStatus::UNSUPPORTED_KEY) { + std::cout << ", please check the input keys."; + } else if (status == IVSRStatus::UNSUPPORTED_CONFIG) { + std::cout << ", please check the input configs."; + } else if (status == IVSRStatus::UNSUPPORTED_SHAPE) { + std::cout << ", please check the input frame's size."; + } + + std::cout << "." << std::endl; } } diff --git a/ivsr_sdk/src/ivsr.cpp b/ivsr_sdk/src/ivsr.cpp index 489d1ef..a38a252 100644 --- a/ivsr_sdk/src/ivsr.cpp +++ b/ivsr_sdk/src/ivsr.cpp @@ -45,13 +45,16 @@ std::vector parse_devices(const std::string& device_string) { return result; } -void parse_engine_config(std::map &config, std::string device, std::string infer_precision, std::string cldnn_config){ +void parse_engine_config(std::map& config, + const std::string& device, + const std::string& infer_precision, + const std::string& cldnn_config) { auto getDeviceTypeFromName = [](std::string device) -> std::string { return device.substr(0, device.find_first_of(".(")); }; - if(device.find("GPU") != std::string::npos && !cldnn_config.empty()){ + if (device.find("GPU") != std::string::npos && !cldnn_config.empty()) { if (!config.count("GPU")) - config["GPU"] = {}; + config["GPU"] = {}; config["GPU"]["CONFIG_FILE"] = cldnn_config; } @@ -74,44 +77,41 @@ void parse_engine_config(std::map &config, std::string // remove the hardware devices if MULTI appears in the devices list. auto hardware_devices = devices; if (if_multi) { - ivsr_version_t version; + //ivsr_version_t version; ov::Version ov_version = ov::get_openvino_version(); - std::string ov_buildNumber = std::string(ov_version.buildNumber); + std::string ov_buildNumber = std::string(ov_version.buildNumber); // Parse out the currect virtual device as the target device. std::string virtual_device = split(device, ':').at(0); auto iter_virtual = std::find(hardware_devices.begin(), hardware_devices.end(), virtual_device); hardware_devices.erase(iter_virtual); - if (ov_buildNumber.find("2022.3") != std::string::npos) { - devices.clear(); + if (ov_buildNumber.find("2022.3") != std::string::npos) { + devices.clear(); devices.push_back(virtual_device); - } else { - devices = hardware_devices; - } + } else { + devices = hardware_devices; + } } // update config per device - int nstream = 1; // set nstream = 1 for GPU what about CPU? - //std::string infer_precision = "f32"; // set infer precision to f32 - //std::string infer_precision = "f16"; // set infer precision to f32 + int nstream = 1; // set nstream = 1 for GPU what about CPU? for (auto& d : devices) { auto& device_config = config[d]; try { // set throughput streams and infer precision for hardwares if (d == "MULTI" || d == "AUTO") { - for(auto& hd : hardware_devices){ - // construct device_config[hd] map and insert first property - device_config.insert(ov::device::properties(hd, ov::num_streams(nstream))); - // insert second property in device_config[hd] + for (auto& hd : hardware_devices) { auto& property = device_config[hd].as(); - property.emplace(ov::hint::inference_precision(infer_precision)); + property.emplace(ov::device::properties(hd, ov::num_streams(nstream))); + if (!infer_precision.empty()) + property.emplace(ov::hint::inference_precision(infer_precision)); } - } - else if(d.find("GPU") != std::string::npos){ // GPU + } else if (d.find("GPU") != std::string::npos) { // GPU device_config.emplace(ov::num_streams(nstream)); - device_config.emplace(ov::hint::inference_precision(infer_precision)); - } - else{ // CPU + if (!infer_precision.empty()) + device_config.emplace(ov::hint::inference_precision(infer_precision)); + } else { // CPU // insert inference precision to map device_config - device_config.emplace(ov::hint::inference_precision(infer_precision)); + if (!infer_precision.empty()) + device_config.emplace(ov::hint::inference_precision(infer_precision)); } } catch (const ov::AssertFailure& e) { std::cerr << "Caught an ov::AssertFailure exception: " << e.what() << std::endl; @@ -188,238 +188,314 @@ std::vector convert_string_to_vector(const std::string& input) { return result; } -struct ivsr{ - engine inferEngine; - IVSRThread::IVSRThreadExecutor *threadExecutor; - std::unordered_map vsr_config; +struct ivsr { + engine* inferEngine; + IVSRThread::IVSRThreadExecutor* threadExecutor; + std::unordered_map vsr_config; PatchConfig patchConfig; bool patchSolution; - std::vector input_data_shape; //shape of input data - ivsr():threadExecutor(nullptr),patchSolution(false){} + std::vector input_data_shape; // shape of input data + + ivsr() + : threadExecutor(nullptr), + patchSolution(false) {} + + // Define a constructor to initialize engine and other members if needed + ivsr(engine* engine, + IVSRThread::IVSRThreadExecutor* executor, + const std::unordered_map& config, + const PatchConfig& patch, + std::vector shape, + bool sol = false) + : inferEngine(engine), + threadExecutor(executor), + vsr_config(config), + patchConfig(patch), + patchSolution(sol), + input_data_shape(std::move(shape)) {} }; -IVSRStatus ivsr_init(ivsr_config_t *configs, ivsr_handle *handle){ - if(configs == nullptr || handle == nullptr){ - ivsr_status_log(IVSRStatus::GENERAL_ERROR,"in ivsr_init"); +IVSRStatus ivsr_init(ivsr_config_t *configs, ivsr_handle *handle) { + if (configs == nullptr || handle == nullptr) { + ivsr_status_log(IVSRStatus::GENERAL_ERROR, "in ivsr_init"); return IVSRStatus::GENERAL_ERROR; } - (*handle) = new ivsr(); - - //TODO: replace w/ parseConfig() ?? - // 1.parse input config - std::string model = "", device = "", batch = "", infer_precision = "f32"; - std::string verbose = "", custom_lib = "", cldnn_config = ""; - std::vector reshape_settings; - std::vector reso; + // Configuration variables + std::string model, device, batch, infer_precision; + std::string verbose, custom_lib, cldnn_config; + std::vector reshape_settings, reso; size_t frame_width = 0, frame_height = 0; - while(configs!=nullptr){ + int reshape_h = 0, reshape_w = 0; + std::unordered_map config_map; + size_t infer_request_num = 1; // default infer_request_num set to 1 + const tensor_desc_t *input_tensor_desc = nullptr; + const tensor_desc_t *output_tensor_desc = nullptr; + + // Parse input config + while (configs != nullptr) { IVSRStatus unsupported_status = IVSRStatus::OK; - std::string unsupported_output = ""; - switch(configs->key){ + std::string unsupported_output; + + switch (configs->key) { case IVSRConfigKey::INPUT_MODEL: - model = std::string(configs->value); - if(!checkFile(model)){ + model = std::string(static_cast(configs->value)); + if (!checkFile(model)) { unsupported_status = IVSRStatus::UNSUPPORTED_CONFIG; - unsupported_output.append("INPUT_MODEL").append("=").append(configs->value); - return IVSRStatus::UNSUPPORTED_CONFIG; + unsupported_output = "INPUT_MODEL=" + std::string(static_cast(configs->value)); } std::cout << "[INFO] " << "Model Path:" << model << std::endl; break; case IVSRConfigKey::TARGET_DEVICE: - device = configs->value; + device = static_cast(configs->value); std::cout << "[INFO] " << "DEVICE:" << device << std::endl; break; case IVSRConfigKey::BATCH_NUM: - batch = configs->value; + batch = static_cast(configs->value); break; case IVSRConfigKey::VERBOSE_LEVEL: - verbose = configs->value; + verbose = static_cast(configs->value); break; case IVSRConfigKey::CUSTOM_LIB: - custom_lib = configs->value; - if(!checkFile(custom_lib)){ // file not exists, inform out + custom_lib = static_cast(configs->value); + if (!checkFile(custom_lib)) { unsupported_status = IVSRStatus::UNSUPPORTED_CONFIG; - unsupported_output.append("CUSTOM_LIB").append("=").append(configs->value); - return IVSRStatus::UNSUPPORTED_CONFIG; + unsupported_output = "CUSTOM_LIB=" + std::string(static_cast(configs->value)); } break; case IVSRConfigKey::CLDNN_CONFIG: - cldnn_config = configs->value; - if(!checkFile(cldnn_config)){ + cldnn_config = static_cast(configs->value); + if (!checkFile(cldnn_config)) { unsupported_status = IVSRStatus::UNSUPPORTED_CONFIG; - unsupported_output.append("CLDNN_CONFIG").append("=").append(configs->value); - return IVSRStatus::UNSUPPORTED_CONFIG; + unsupported_output = "CLDNN_CONFIG=" + std::string(static_cast(configs->value)); } break; case IVSRConfigKey::PRECISION: - infer_precision = configs->value; - if(device.find("GPU") != std::string::npos){ - if(infer_precision.compare("f32") != 0 && infer_precision.compare("f16") != 0){ - ivsr_status_log(IVSRStatus::UNSUPPORTED_CONFIG,"for PRECISION="); + infer_precision = static_cast(configs->value); + if (device.find("GPU") != std::string::npos) { + if (infer_precision != "f32" && infer_precision != "f16") { + ivsr_status_log(IVSRStatus::UNSUPPORTED_CONFIG, "for PRECISION="); return IVSRStatus::UNSUPPORTED_CONFIG; } - }else{ - if(infer_precision.compare("f32") != 0 && infer_precision.compare("bf16") != 0){ - ivsr_status_log(IVSRStatus::UNSUPPORTED_CONFIG,"for PRECISION="); + } else { + if (infer_precision != "f32" && infer_precision != "bf16") { + ivsr_status_log(IVSRStatus::UNSUPPORTED_CONFIG, "for PRECISION="); return IVSRStatus::UNSUPPORTED_CONFIG; } } break; case IVSRConfigKey::RESHAPE_SETTINGS: - reshape_settings = convert_string_to_vector(configs->value); + reshape_settings = convert_string_to_vector(static_cast(configs->value)); + //The layout of RESHAPE SETTINGS is NHW + reshape_h = reshape_settings[ov::layout::height_idx(ov::Layout("NHW"))]; + reshape_w = reshape_settings[ov::layout::width_idx(ov::Layout("NHW"))]; + if (reshape_h % 2 != 0 || reshape_w % 2 != 0) { + ivsr_status_log(IVSRStatus::UNSUPPORTED_SHAPE, static_cast(configs->value)); + return IVSRStatus::UNSUPPORTED_SHAPE; + } break; case IVSRConfigKey::INPUT_RES: - //in format "," - reso = convert_string_to_vector(configs->value); + reso = convert_string_to_vector(static_cast(configs->value)); frame_width = reso[0]; frame_height = reso[1]; break; + case IVSRConfigKey::INFER_REQ_NUMBER: + try { + auto num = std::stoul(static_cast(configs->value)); + if (num > infer_request_num) + infer_request_num = num; + std::cout << "[INFO] Infer request num: " << infer_request_num << std::endl; + } catch (const std::invalid_argument& e) { + std::cerr << "[ERROR] Invalid argument: " << static_cast(configs->value) << std::endl; + } catch (const std::out_of_range& e) { + std::cerr << "[ERROR] Out of range: " << static_cast(configs->value) << std::endl; + } + break; + case IVSRConfigKey::INPUT_TENSOR_DESC_SETTING: + input_tensor_desc = static_cast(configs->value); + break; + case IVSRConfigKey::OUTPUT_TENSOR_DESC_SETTING: + output_tensor_desc = static_cast(configs->value); + break; default: unsupported_status = IVSRStatus::UNSUPPORTED_KEY; - unsupported_output.append(std::to_string(configs->key)); + unsupported_output = std::to_string(configs->key); break; } - ivsr_status_log(unsupported_status, unsupported_output.c_str()); + ivsr_status_log(unsupported_status, unsupported_output.c_str()); configs = configs->next; } - if(!check_engine_config(model, device)) { + + if (!check_engine_config(model, device)) { return IVSRStatus::UNSUPPORTED_CONFIG; } - if(frame_width == 0 || frame_height == 0) { - ivsr_status_log(IVSRStatus::UNSUPPORTED_CONFIG,"please set INPUT_RES!"); + + if (frame_width == 0 || frame_height == 0) { + ivsr_status_log(IVSRStatus::UNSUPPORTED_CONFIG, "please set INPUT_RES!"); return IVSRStatus::UNSUPPORTED_CONFIG; } - /** - * Below code only is for OpenVINO engine - */ - // 2.parse config for inference engine + // Parse config for the inference engine std::map engine_configs; - parse_engine_config(engine_configs,device,infer_precision,cldnn_config); - - // 3.construct and initialization - // - initialize inference engine - (*handle)->inferEngine = {new ov_engine(device, model, custom_lib, engine_configs, reshape_settings)}; - // -construct IVSRThreadExecutor object - IVSRThread::Config executorConfig; - (*handle)->threadExecutor = new IVSRThread::IVSRThreadExecutor(executorConfig, (*handle)->inferEngine.get_impl()); - - // -construct patch config - size_t input_dims = 0; - (*handle)->inferEngine.get_attr("input_dims", input_dims); - ov::Shape model_inputs, model_outputs; - (*handle)->inferEngine.get_attr("model_inputs", model_inputs); // 1,3,3,1080,1920 (400,700) - (*handle)->inferEngine.get_attr("model_outputs", model_outputs); // 1,3,3,2160,3840 (800,1400) - - // --set patch configs - int m_input_width = *(model_inputs.end() - 1); - int m_input_height = *(model_inputs.end() - 2); - int nif = *(model_inputs.end() - 4); - int m_output_width = *(model_outputs.end() - 1); - (*handle)->patchConfig.scale = m_output_width / m_input_width; // Note: do not support fractional SR - (*handle)->patchConfig.patchHeight = m_input_height; - (*handle)->patchConfig.patchWidth = m_input_width; - (*handle)->patchConfig.dims = input_dims; - (*handle)->patchConfig.nif = nif; + parse_engine_config(engine_configs, device, infer_precision, cldnn_config); + + // Initialize inference engine + auto ovEng = new ov_engine(device, + model, + custom_lib, + engine_configs, + reshape_settings, + *input_tensor_desc, + *output_tensor_desc); + + IVSRStatus status = ovEng->init(); + if (status != IVSRStatus::OK) { + ivsr_status_log(status, "in ivsr_init"); + return IVSRStatus::UNSUPPORTED_SHAPE; + } + + auto res = ovEng->create_infer_requests(infer_request_num); + if (res < 0) { + std::cout << "[ERROR]: Failed to create infer requests!\n"; + return IVSRStatus::GENERAL_ERROR; + } + + // Construct IVSRThreadExecutor object + IVSRThread::Config executorConfig{"ivsr_thread_executor", 8}; + auto executor = new IVSRThread::IVSRThreadExecutor(executorConfig, ovEng); + + // Construct patch config + tensor_desc_t input_tensor = { + .precision = {0}, + .layout = {0}, + .tensor_color_format = {0}, + .model_color_format = {0}, + .scale = 0.0, + .dimension = 0, + .shape = {0}}; + ovEng->get_attr("model_inputs", input_tensor); + + tensor_desc_t output_tensor = { + .precision = {0}, + .layout = {0}, + .tensor_color_format = {0}, + .model_color_format = {0}, + .scale = 0.0, + .dimension = 0, + .shape = {0}}; + ovEng->get_attr("model_outputs", output_tensor); + + PatchConfig patchConfig; + int m_input_width = input_tensor.shape[ov::layout::width_idx(ov::Layout(input_tensor.layout))];; + int m_input_height = input_tensor.shape[ov::layout::height_idx(ov::Layout(input_tensor.layout))]; + // hard code + int nif = input_tensor.dimension == 5 ? input_tensor.shape[1] : 1; + int m_output_width = output_tensor.shape[ov::layout::width_idx(ov::Layout(output_tensor.layout))]; + patchConfig.scale = m_output_width / m_input_width; + patchConfig.patchHeight = m_input_height; + patchConfig.patchWidth = m_input_width; + patchConfig.dims = input_tensor.dimension; + patchConfig.nif = nif; + #ifdef ENABLE_LOG - std::cout << "[Trace]: " << (*handle)->patchConfig << std::endl; + std::cout << "[Trace]: " << patchConfig << std::endl; #endif - // generate input data shape - std::vector& input_shape = (*handle)->input_data_shape; - //model input res might not be the same as input frame res - std::transform(model_inputs.begin(), model_inputs.end(), std::back_inserter(input_shape), - [](size_t val) { return val; }); - input_shape[input_shape.size() - 1] = frame_width; - input_shape[input_shape.size() - 2] = frame_height; + // Generate input data shape + std::vector input_res; + input_res.push_back(frame_height); + input_res.push_back(frame_width); + // Use the parameterized constructor + *handle = new ivsr(ovEng, executor, config_map, patchConfig, std::move(input_res)); return IVSRStatus::OK; } -IVSRStatus ivsr_process(ivsr_handle handle, char* input_data, char* output_data, ivsr_cb_t* cb){ - if(input_data == nullptr){ - ivsr_status_log(IVSRStatus::GENERAL_ERROR, "in ivsr_process"); +IVSRStatus ivsr_process(ivsr_handle handle, char* input_data, char* output_data, ivsr_cb_t* cb) { + if (input_data == nullptr) { + ivsr_status_log(IVSRStatus::GENERAL_ERROR, "in ivsr_process - input_data is nullptr"); return IVSRStatus::GENERAL_ERROR; } - try{ + try { std::vector int_shape; - std::transform(handle->input_data_shape.begin(), handle->input_data_shape.end(), std::back_inserter(int_shape), - [](size_t val) -> int { return static_cast(val); }); - - // determine whether apply patch solution or not - if(handle->patchConfig.patchHeight < *(int_shape.end()-2) || - handle->patchConfig.patchWidth < *(int_shape.end()-1)) { + int_shape.reserve(handle->input_data_shape.size()); // Reserve space for efficiency + std::transform(handle->input_data_shape.begin(), + handle->input_data_shape.end(), + std::back_inserter(int_shape), + [](size_t val) -> int { + return static_cast(val); + }); + + // Determine whether to apply the patch solution + if (handle->patchConfig.patchHeight < int_shape[int_shape.size() - 2] || + handle->patchConfig.patchWidth < int_shape[int_shape.size() - 1]) { handle->patchSolution = true; } - // smart patch inference - SmartPatch* smartPatch = new SmartPatch(handle->patchConfig, input_data, output_data, int_shape, handle->patchSolution); - // -prepare data - auto res = smartPatch->generatePatch(); - if(res == -1){ - delete smartPatch; - ivsr_status_log(IVSRStatus::UNKNOWN_ERROR, "in Smart Patch"); + // Smart patch inference using a smart pointer for automatic memory management + std::unique_ptr smartPatch( + new SmartPatch(handle->patchConfig, input_data, output_data, int_shape, handle->patchSolution) + ); + + // Prepare data + int res = smartPatch->generatePatch(); + if (res == -1) { + ivsr_status_log(IVSRStatus::UNKNOWN_ERROR, "in SmartPatch::generatePatch"); return IVSRStatus::UNKNOWN_ERROR; } + auto patchList = smartPatch->getInputPatches(); auto outputPatchList = smartPatch->getOutputPatches(); #ifdef ENABLE_PERF - auto totalStartTime = Time::now(); + auto totalStartTime = Time::now(); #endif - // create infer requests based on patch list size - if (patchList.size() > handle->inferEngine.get_infer_requests_size()) { - auto res = handle->inferEngine.create_infer_requests(patchList.size()); - if (res == -1) { - std::cout << "[ERROR]: " << "Failed to creat infer requests!\n"; - delete smartPatch; - return IVSRStatus::GENERAL_ERROR; + // Create infer requests based on patch list size + size_t required_infer_requests = patchList.size(); + if (required_infer_requests > handle->inferEngine->get_infer_requests_size()) { + auto res = handle->inferEngine->create_infer_requests(required_infer_requests); + if (res < 0) { + std::cout << "[ERROR]: Failed to create infer requests!\n"; + return IVSRStatus::GENERAL_ERROR; + } } - } - // -get data into infer task - int idx = 0; - for(; idx < patchList.size(); idx++ ){ + // Get data into infer task + for (auto idx = 0u; idx < patchList.size(); ++idx) { #ifdef ENABLE_LOG - std::cout << "[Trace]: " << "ivsr_process on patch: " << idx << std::endl; + std::cout << "[Trace]: ivsr_process on patch: " << idx << std::endl; #endif + std::shared_ptr task = handle->threadExecutor->CreateTask( + patchList[idx], outputPatchList[idx], InferFlag::AUTO); + handle->threadExecutor->Enqueue(task); + } - std::shared_ptr task = handle->threadExecutor->CreateTask(patchList[idx], outputPatchList[idx], InferFlag::AUTO); - handle->threadExecutor->Enqueue(task); - } + // Wait for all tasks to finish + handle->threadExecutor->wait_all(required_infer_requests); - // -wait for all the tasks finish - handle->threadExecutor->wait_all(patchList.size()); #ifdef ENABLE_PERF - auto duration = get_duration_ms_till_now(totalStartTime); - std::cout << "[PERF] " << "Patch inference with memory copy - Latency: " << double_to_string(duration) <<"ms"<patchConfig.nif* 1000.0 / duration) <<"FPS"<patchConfig.nif * 1000.0 / duration) << "FPS" << std::endl; #endif -// #ifdef ENABLE_PERF -// // -get total duration for all tasks -// double totalDuration = handle->threadExecutor->get_duration_in_milliseconds(); -// double fps = 3 * 1000.0/totalDuration; - -// std::cout << "All tasks Total Latency for One Nig: " << double_to_string(totalDuration) <<"ms"<restoreImageFromPatches(); - if(res == -1){ - ivsr_status_log(IVSRStatus::UNKNOWN_ERROR, "in Smart Patch"); + if (res == -1) { + ivsr_status_log(IVSRStatus::UNKNOWN_ERROR, "in SmartPatch::restoreImageFromPatches"); return IVSRStatus::UNKNOWN_ERROR; } - delete smartPatch; - // notify user + // Notify user cb->ivsr_cb(cb->args); - }catch(exception e){ - std::cout << "Error in ivsr_process" << std::endl; + } catch (const std::exception& e) { + std::cout << "Error in ivsr_process: " << e.what() << std::endl; ivsr_status_log(IVSRStatus::EXCEPTION_ERROR, e.what()); return IVSRStatus::UNKNOWN_ERROR; } @@ -427,6 +503,48 @@ IVSRStatus ivsr_process(ivsr_handle handle, char* input_data, char* output_data, return IVSRStatus::OK; } +IVSRStatus ivsr_process_async(ivsr_handle handle, char* input_data, char* output_data, ivsr_cb_t* cb) { + if (input_data == nullptr) { + ivsr_status_log(IVSRStatus::GENERAL_ERROR, "in ivsr_process - input_data is nullptr"); + return IVSRStatus::GENERAL_ERROR; + } + + try { + std::vector int_shape; + int_shape.reserve(handle->input_data_shape.size()); // Reserve space for efficiency + std::transform(handle->input_data_shape.begin(), + handle->input_data_shape.end(), + std::back_inserter(int_shape), + [](size_t val) -> int { + return static_cast(val); + }); + + // Determine whether to apply the patch solution + if (handle->patchConfig.patchHeight < int_shape[int_shape.size() - 2] || + handle->patchConfig.patchWidth < int_shape[int_shape.size() - 1]) { + handle->patchSolution = true; + } + + // TODO: Now fallback to ivsr_process api when patch solution is needed + if (handle->patchSolution) { + return ivsr_process(handle, input_data, output_data, cb); + } + + /* Uncomment: to use thread loop and internal task to process */ + // std::shared_ptr task = + // handle->threadExecutor->CreateTask(input_data, output_data, InferFlag::AUTO, cb); + // handle->threadExecutor->Enqueue(task); + + handle->inferEngine->proc(input_data, output_data, cb); + + } catch (const std::exception& e) { + std::cout << "Error in ivsr_process: " << e.what() << std::endl; + ivsr_status_log(IVSRStatus::EXCEPTION_ERROR, e.what()); + return IVSRStatus::UNKNOWN_ERROR; + } + + return IVSRStatus::OK; +} IVSRStatus ivsr_reconfig(ivsr_handle handle, ivsr_config_t* configs){ if(configs == nullptr){ @@ -439,22 +557,22 @@ IVSRStatus ivsr_reconfig(ivsr_handle handle, ivsr_config_t* configs){ while(configs!=nullptr){ switch(configs->key){ case IVSRConfigKey::INPUT_MODEL: - handle->vsr_config["model"] = configs->value; + handle->vsr_config["model"] = static_cast(configs->value); break; case IVSRConfigKey::TARGET_DEVICE: - handle->vsr_config["device"] = configs->value; + handle->vsr_config["device"] = static_cast(configs->value); break; case IVSRConfigKey::BATCH_NUM: - handle->vsr_config["batch_num"] = configs->value; + handle->vsr_config["batch_num"] = static_cast(configs->value); break; case IVSRConfigKey::VERBOSE_LEVEL: - handle->vsr_config["verbose_level"] = configs->value; + handle->vsr_config["verbose_level"] = static_cast(configs->value); break; case IVSRConfigKey::CUSTOM_LIB: - handle->vsr_config["custom_lib"] = configs->value; + handle->vsr_config["custom_lib"] = static_cast(configs->value); break; case IVSRConfigKey::CLDNN_CONFIG: - handle->vsr_config["cldnn_config"] = configs->value; + handle->vsr_config["cldnn_config"] = static_cast(configs->value); break; default: break; @@ -464,7 +582,7 @@ IVSRStatus ivsr_reconfig(ivsr_handle handle, ivsr_config_t* configs){ // reconfig ov_engine ? - }catch(exception e){ + } catch (const std::exception& e) { // std::cout << "Error in ivsr_reconfig" << std::endl; ivsr_status_log(IVSRStatus::EXCEPTION_ERROR, e.what()); return IVSRStatus::UNKNOWN_ERROR; @@ -483,24 +601,12 @@ IVSRStatus ivsr_get_attr(ivsr_handle handle, IVSRAttrKey key, void* value){ } case IVSRAttrKey::INPUT_TENSOR_DESC: { - int* input_tensor_desc = (int*) value; - ov::Shape input_shape; - handle->inferEngine.get_attr("model_inputs", input_shape); - for(auto s : input_shape){ - *input_tensor_desc = s; - input_tensor_desc++; - } + handle->inferEngine->get_attr("model_inputs", *(static_cast(value))); break; } case IVSRAttrKey::OUTPUT_TENSOR_DESC: { - int* output_tensor_desc = (int*) value; - ov::Shape output_shape; - handle->inferEngine.get_attr("model_outputs", output_shape); - for(auto s : output_shape){ - *output_tensor_desc = s; - output_tensor_desc++; - } + handle->inferEngine->get_attr("model_outputs", *(static_cast(value))); break; } case IVSRAttrKey::NUM_INPUT_FRAMES: @@ -512,14 +618,14 @@ IVSRStatus ivsr_get_attr(ivsr_handle handle, IVSRAttrKey key, void* value){ case IVSRAttrKey::INPUT_DIMS: { size_t dims = 0; - handle->inferEngine.get_attr("input_dims", dims); + handle->inferEngine->get_attr("input_dims", dims); *((size_t *)value) = dims; break; } case IVSRAttrKey::OUTPUT_DIMS: { size_t dims = 0; - handle->inferEngine.get_attr("output_dims", dims); + handle->inferEngine->get_attr("output_dims", dims); *((size_t *)value) = dims; break; } @@ -539,7 +645,7 @@ IVSRStatus ivsr_deinit(ivsr_handle handle) { } try { - auto p = handle->inferEngine.get_impl(); + auto p = handle->inferEngine->get_impl(); if (p != nullptr) delete p; @@ -547,7 +653,7 @@ IVSRStatus ivsr_deinit(ivsr_handle handle) { delete handle->threadExecutor; handle->threadExecutor = nullptr; } - } catch (exception e) { + } catch (const std::exception& e) { ivsr_status_log(IVSRStatus::EXCEPTION_ERROR, e.what()); return IVSRStatus::UNKNOWN_ERROR; } diff --git a/ivsr_sdk/src/ivsr_thread_executor.cpp b/ivsr_sdk/src/ivsr_thread_executor.cpp index 1c0f51a..6f63f35 100644 --- a/ivsr_sdk/src/ivsr_thread_executor.cpp +++ b/ivsr_sdk/src/ivsr_thread_executor.cpp @@ -40,15 +40,12 @@ struct IVSRThreadExecutor::Impl { _impl->_streamIdQueue.pop(); } } - - } ~Stream() { { std::lock_guard lock{_impl->_streamIdMutex}; _impl->_streamIdQueue.push(_streamId); } - } Impl* _impl = nullptr; @@ -56,22 +53,21 @@ struct IVSRThreadExecutor::Impl { int _numaNodeId = 0; bool _execute = false; std::queue _taskQueue; - }; explicit Impl(const Config& config, engine* engine) : _config{config}, - _engine(engine), _streams([this] { return std::make_shared(this); - }) { + }), + _engine(engine) { for (auto streamId = 0; streamId < _config._threads; ++streamId) { _threads.emplace_back([this, streamId] { for (bool stopped = false; !stopped;) { Task task; { std::unique_lock lock(_mutex); - _queueCondVar.wait(lock, [&] { + _queueCondVar.wait(lock, [&] { return !_taskQueue.empty() || (stopped = _isStopped); }); if (!_taskQueue.empty()) { @@ -81,7 +77,8 @@ struct IVSRThreadExecutor::Impl { } if (task) { #ifdef ENABLE_LOG - std::cout << "[Trace]: " << "Thread " << std::this_thread::get_id() << " get task and execute it" << std::endl; + std::cout << "[Trace]: " + << "Thread " << std::this_thread::get_id() << " get task and execute it" << std::endl; #endif Execute(task, *(_streams.local())); } @@ -94,22 +91,29 @@ struct IVSRThreadExecutor::Impl { { std::lock_guard lock(_mutex); _taskQueue.emplace(task); - _startTime = std::min(Time::now(), _startTime); + _startTime = std::min(Time::now(), _startTime); } _queueCondVar.notify_one(); #ifdef ENABLE_LOG - std::cout << "[Trace]: " << "Enqueue Task into queue and notify 1 / " << _taskQueue.size() << std::endl; -#endif + std::cout << "[Trace]: " + << "Enqueue Task into queue and notify 1 / " << _taskQueue.size() << std::endl; +#endif } void Execute(const Task& task, Stream& stream) { _engine->run(task); } - Task CreateTask(char* inBuf, char* outBuf, InferFlag flag) { - Task task = std::make_shared(inBuf, outBuf, std::bind(&IVSRThread::IVSRThreadExecutor::Impl::competition_call_back, this), flag); + Task CreateTask(char* inBuf, char* outBuf, InferFlag flag, ivsr_cb_t* cb) { + Task task = std::make_shared( + inBuf, + outBuf, + std::bind(&IVSRThread::IVSRThreadExecutor::Impl::competition_call_back, this, std::placeholders::_1), + flag, + cb); return task; } + void Defer(Task task) { auto& stream = *(_streams.local()); stream._taskQueue.push(std::move(task)); @@ -127,11 +131,10 @@ struct IVSRThreadExecutor::Impl { } void sync(int size) { - std::unique_lock lock(_mutex); - _taskCondVar.wait(lock,[&] { - return (_cb_counter == size); + std::unique_lock lock(_mutex); + _taskCondVar.wait(lock, [&] { + return (_cb_counter == size); }); - } void reset() { @@ -139,13 +142,16 @@ struct IVSRThreadExecutor::Impl { _cb_counter = 0; } - void competition_call_back() { + void competition_call_back(Task task) { std::unique_lock lock(_mutex); - _cb_counter++; - _endTime = std::max(Time::now(), _endTime); + _cb_counter++; + _endTime = std::max(Time::now(), _endTime); + if (task->cb) { + task->cb->ivsr_cb(task->cb->args); + } _taskCondVar.notify_one(); } - + double get_duration_in_milliseconds() { return std::chrono::duration_cast(_endTime - _startTime).count() * 0.000001; } @@ -161,14 +167,14 @@ struct IVSRThreadExecutor::Impl { int _cb_counter = 0; std::queue _taskQueue; bool _isStopped = false; - ThreadLocal> _streams; + ThreadLocal> _streams; engine* _engine; Time::time_point _startTime = Time::time_point::max(); - Time::time_point _endTime = Time::time_point::min(); + Time::time_point _endTime = Time::time_point::min(); }; - -IVSRThreadExecutor::IVSRThreadExecutor(const Config& config, engine* engine) : _impl{new Impl{config, engine}} {} +IVSRThreadExecutor::IVSRThreadExecutor(const Config& config, engine* engine) + : _impl{new Impl{config, engine}} {} IVSRThreadExecutor::~IVSRThreadExecutor() { { @@ -195,8 +201,8 @@ void IVSRThreadExecutor::Enqueue(Task task) { } } -Task IVSRThreadExecutor::CreateTask(char* inBuf, char* outBuf, InferFlag flag) { - Task task = _impl->CreateTask(inBuf, outBuf, flag); +Task IVSRThreadExecutor::CreateTask(char* inBuf, char* outBuf, InferFlag flag, ivsr_cb_t *cb) { + Task task = _impl->CreateTask(inBuf, outBuf, flag, cb); return task; } @@ -206,6 +212,6 @@ void IVSRThreadExecutor::wait_all(int patchSize) { } double IVSRThreadExecutor::get_duration_in_milliseconds() { - return _impl->get_duration_in_milliseconds(); + return _impl->get_duration_in_milliseconds(); } } // namespace IVSRThread diff --git a/ivsr_sdk/src/ov_engine.cpp b/ivsr_sdk/src/ov_engine.cpp index 405f76e..284d6cc 100644 --- a/ivsr_sdk/src/ov_engine.cpp +++ b/ivsr_sdk/src/ov_engine.cpp @@ -1,42 +1,83 @@ /******************************************************************************** -* INTEL CONFIDENTIAL -* Copyright (C) 2023 Intel Corporation -* -* This software and the related documents are Intel copyrighted materials, -* and your use of them is governed by the express license under -* which they were provided to you ("License").Unless the License -* provides otherwise, you may not use, modify, copy, publish, distribute, disclose or -* transmit this software or the related documents without Intel's prior written permission. -* -* This software and the related documents are provided as is, -* with no express or implied warranties, other than those that are expressly stated in the License. -*******************************************************************************/ + * INTEL CONFIDENTIAL + * Copyright (C) 2023 Intel Corporation + * + * This software and the related documents are Intel copyrighted materials, + * and your use of them is governed by the express license under + * which they were provided to you ("License").Unless the License + * provides otherwise, you may not use, modify, copy, publish, distribute, disclose or + * transmit this software or the related documents without Intel's prior written permission. + * + * This software and the related documents are provided as is, + * with no express or implied warranties, other than those that are expressly stated in the License. + *******************************************************************************/ /** * @file openvino_engine.cpp * openvino backend inference implementation * it is the wrapper of backend inference API. */ -#include -#include -#include #include "ov_engine.hpp" -#include "utils.hpp" -#include "omp.h" +#include + +#include +#include #include +#include "omp.h" +#include "utils.hpp" + typedef std::chrono::high_resolution_clock Time; -IBasicVSRStatus ov_engine::init_impl() -{ +const std::map precision_string_to_ov = { + {"fp32", ov::element::f32}, + {"f32", ov::element::f32}, + {"fp16", ov::element::f16}, + {"f16", ov::element::f16}, + {"i8", ov::element::i8}, + {"i16", ov::element::i16}, + {"i32", ov::element::i32}, + {"u8", ov::element::u8}, + {"u16", ov::element::u16}, +}; + +const std::map color_format_string_to_ov = { + {"BGR", ov::preprocess::ColorFormat::BGR}, + {"RGB", ov::preprocess::ColorFormat::RGB}, + {"I420_Single_Plane", ov::preprocess::ColorFormat::I420_SINGLE_PLANE}, + {"I420_Three_Planes", ov::preprocess::ColorFormat::I420_THREE_PLANES}, +}; + +/* + * IN: output + * OUT: layout + * +*/ +static IVSRStatus get_default_layout(const ov::Output& output, ov::Layout& layout) { + size_t shape_size = output.get_partial_shape().size(); + switch (shape_size) { + case 4: + layout = ov::Layout("NCHW"); + break; + case 5: + layout = ov::Layout("NFCHW"); + break; + default: + std::cout << "not supported model input/output shape size\n"; + return GENERAL_ERROR; + } + return OK; +} + +IVSRStatus ov_engine::init_impl() { if (custom_lib_ != "") instance_.add_extension(custom_lib_); - //set property for ov instance + // set property for ov instance for (auto&& item : configs_) { instance_.set_property(item.first, item.second); } - //read model + // read model std::shared_ptr model; try { model = instance_.read_model(model_path_); @@ -44,43 +85,61 @@ IBasicVSRStatus ov_engine::init_impl() model = irguard::load_model(instance_, model_path_); } - input_ = model->inputs()[0]; - output_ = model->outputs()[0]; - bool multiple_inputs = false; if (model->inputs().size() == 5 && model->outputs().size() == 5) multiple_inputs = true; if (!reshape_settings_.empty()) { - // get_shape() only can be called by static shape, openvino will check the shape size during reshape operation - //ov::Shape tensor_shape = input_.get_shape(); - //assert(tensor_shape.size() == reshape_settings_.size()); - - ov::Shape tensor_shape = reshape_settings_; + //get model input shape + ov::PartialShape input_shape = model->inputs()[0].get_partial_shape(); +#ifdef ENALBE_LOG + std::cout << "input tensor shape is "<< input_shape.is_static()? "static: " : "dynamic: " + << input_shape << std::endl; +#endif size_t batch_index, channels_index, h_index, w_index; - if (multiple_inputs) { - const ov::Layout model_layout{"NCHW"}; - batch_index = ov::layout::batch_idx(model_layout); - channels_index = ov::layout::channels_idx(model_layout); - h_index = ov::layout::height_idx(model_layout); - w_index = ov::layout::width_idx(model_layout); + //get model input tensor layout + ov::Layout input_layout = ov::layout::get_layout(model->inputs()[0]); + if (input_layout.empty()) { + get_default_layout(model->inputs()[0], input_layout); + //ov::layout::set_layout(model->inputs()[0], input_layout); + } + batch_index = ov::layout::batch_idx(input_layout); + channels_index = ov::layout::channels_idx(input_layout); + h_index = ov::layout::height_idx(input_layout); + w_index = ov::layout::width_idx(input_layout); + + // Assume the input reshape_settings_'s layout is NHW. + // update input layer tensor batch/width/height with the value from reshape_settings_; + input_shape[batch_index] = reshape_settings_[ov::layout::batch_idx(ov::Layout("NHW"))]; + input_shape[w_index] = reshape_settings_[ov::layout::width_idx(ov::Layout("NHW"))]; + input_shape[h_index] = reshape_settings_[ov::layout::height_idx(ov::Layout("NHW"))]; + //input_shape should be static now. + assert(input_shape.is_static()); + + //TODO: is this check for BasicVSR? Is it required anymore?? + if (input_shape.size() == 5) { + if (input_shape[w_index].get_length() % 32 != 0) { + std::cout << "[Error]: " << "Current model requires input widths to be divisible by 32" << std::endl; + return UNSUPPORTED_SHAPE; + } } #ifdef ENABLE_LOG - std::cout << "Reshape network to the image size = [" << reshape_settings_[reshape_settings_.size() - 2] << "x" - << reshape_settings_[reshape_settings_.size() - 1] << "] " << std::endl; + std::cout << "Reshape network to size = [" << input_shape[w_index].get_length() + << "x" << input_shape[h_index].get_length() << "] " << std::endl; #endif - model->reshape({{model->inputs()[0].get_any_name(), tensor_shape}}); + // reshape the model with "static" shape. + model->reshape({{model->inputs()[0].get_any_name(), input_shape.to_shape()}}); if (multiple_inputs) { - ov::Shape hidden_tensor_shape = reshape_settings_; - hidden_tensor_shape[batch_index] = tensor_shape[batch_index]; + ov::Shape hidden_tensor_shape = input_shape.to_shape(); + hidden_tensor_shape[batch_index] = input_shape[batch_index].get_length(); hidden_tensor_shape[channels_index] = 64; - hidden_tensor_shape[h_index] = tensor_shape[h_index] / 4; - hidden_tensor_shape[w_index] = tensor_shape[w_index] / 4; + hidden_tensor_shape[h_index] = input_shape[h_index].get_length() / 4; + hidden_tensor_shape[w_index] = input_shape[w_index].get_length() / 4; - for (int i = 1; i < model->inputs().size(); ++i) + for (auto i = 1u; i < model->inputs().size(); ++i) model->reshape({{model->inputs()[i].get_any_name(), hidden_tensor_shape}}); } } @@ -91,7 +150,7 @@ IBasicVSRStatus ov_engine::init_impl() std::map tensor_names; const auto& inputs = model->inputs(); const auto& outputs = model->outputs(); - for (int i = 1; i < inputs.size(); ++i) { + for (auto i = 1u; i < inputs.size(); ++i) { std::string hidden_inp_name = inputs[i].get_any_name(); std::string hidden_out_name = outputs[i].get_any_name(); tensor_names[hidden_inp_name] = hidden_out_name; @@ -105,54 +164,117 @@ IBasicVSRStatus ov_engine::init_impl() // std::cout << "The exec time of making stateful model is " << execTime.count() * 0.000001 << "ms\n"; } - //compile model + // PPP + ov::preprocess::PrePostProcessor ppp = ov::preprocess::PrePostProcessor(model); + ov::preprocess::InputInfo& input_info = ppp.input(); + ov::preprocess::OutputInfo& output_info = ppp.output(); + + // layout is NCHW by default if can not get layout information + ov::Layout layout; + if (ov::layout::get_layout(model->inputs()[0]).empty()) { + get_default_layout(model->inputs()[0], layout); + input_info.model().set_layout(layout); + } + //after calling ppp::InputInfo.model().set_layout(); model->input_::layout is not set though. + //std::cout << "model layout is " << ov::layout::get_layout(model->inputs()[0]).to_string() << endl; + if (ov::layout::get_layout(model->outputs()[0]).empty()) { + get_default_layout(model->outputs()[0], layout); + output_info.model().set_layout(layout); + } + if (input_tensor_desc_.precision != nullptr) { + input_info.tensor().set_element_type(precision_string_to_ov.at(std::string(input_tensor_desc_.precision))); + } + if (input_tensor_desc_.layout != nullptr) { + const ov::Layout input_tensor_layout{input_tensor_desc_.layout}; + input_info.tensor().set_layout(input_tensor_layout); + } + if (output_tensor_desc_.precision != nullptr) { + output_info.tensor().set_element_type(precision_string_to_ov.at(std::string(output_tensor_desc_.precision))); + } + if (output_tensor_desc_.layout != nullptr) { + const ov::Layout output_tensor_layout{output_tensor_desc_.layout}; + output_info.tensor().set_layout(output_tensor_layout); + } + // convert color tensor_color_format->model_color_format + if (strcmp(input_tensor_desc_.tensor_color_format, input_tensor_desc_.model_color_format) != 0) { + input_info.tensor().set_color_format( + color_format_string_to_ov.at(std::string(input_tensor_desc_.tensor_color_format))); + input_info.preprocess().convert_color( + color_format_string_to_ov.at(std::string(input_tensor_desc_.model_color_format))); + } + // ov 24.0 support convert color model_color_format->tensor_color_format + // if (strcmp(input_tensor_desc_.tensor_color_format, input_tensor_desc_.model_color_format) != 0) { + // output_info.tensor().set_color_format( + // color_format_string_to_ov.at(std::string(input_tensor_desc_.tensor_color_format))); + // output_info.preprocess().convert_color( + // color_format_string_to_ov.at(std::string(input_tensor_desc_.model_color_format))); + // } + if ((input_tensor_desc_.scale - 1.0f) > 1e-6f) { + // the input tensor precision should not be float + assert(std::string(input_tensor_desc_.precision) == std::string("u8") || + std::string(input_tensor_desc_.precision) == std::string("u16")); + input_info.preprocess().convert_element_type(ov::element::f32); + input_info.preprocess().scale(input_tensor_desc_.scale); + // PPP doesn't support un-scale, so + // the precision of output tensor need to be float if the scale != 0 or 1 + output_info.tensor().set_element_type(ov::element::f32); + } + + model = ppp.build(); + + input_ = model->inputs()[0]; + output_ = model->outputs()[0]; + // compile model compiled_model_ = instance_.compile_model(model, device_); #ifdef ENABLE_LOG std::cout << "[Trace]: " << "ov_engine init successfully" << std::endl; #endif - return SUCCESS; + return OK; } -IBasicVSRStatus ov_engine::run_impl(InferTask::Ptr task) { - //construct the input tensor - if(task->inputPtr_ == nullptr || task->outputPtr_ == nullptr) { - std::cout << "[Error]: " << "invalid input buffer pointer" << std::endl; - return ERROR; +IVSRStatus ov_engine::run_impl(InferTask::Ptr task) { + // construct the input tensor + if (task->inputPtr_ == nullptr || task->outputPtr_ == nullptr) { + std::cout << "[Error]: " + << "invalid input buffer pointer" << std::endl; + return GENERAL_ERROR; } auto inferReq = get_idle_request(); - inferReq->set_callback([wp = std::weak_ptr(inferReq),task](std::exception_ptr ex) { + inferReq->set_callback([wp = std::weak_ptr(inferReq), task](std::exception_ptr ex) { auto request = wp.lock(); #ifdef ENABLE_PERF - request->end_time(); + request->end_time(); auto latency = request->get_execution_time_in_milliseconds(); - size_t frame_num = (request->get_input_tensor()).get_shape()[(request->get_input_tensor().get_shape()).size()-4]; - std::cout << "[PERF] " << "Inference Latency: " << latency << "ms, Throughput: " << double_to_string(frame_num * 1000.0 / latency) << "FPS" << std::endl; + std::cout << "[PERF] Inference Latency: " << latency << "ms" << std::endl; #endif - if(ex) { + if (ex) { #ifdef ENABLE_LOG - std::cout << "[Trace]: " << "Exception in infer request callback " << std::endl; + std::cout << "[Trace]: " + << "Exception in infer request callback " << std::endl; #endif - try{ + try { // std::rethrow_exception(ex); throw ex; - } catch(const std::exception& e) { + } catch (const std::exception& e) { std::cout << "Caught exception \"" << e.what() << "\"\n"; } } auto cbTask = task; request->call_back(); - //call application callback function - cbTask->_callbackQueue(); + // call application callback function + cbTask->_callbackFunction(cbTask); }); #ifdef ENABLE_LOG - std::cout << "[Trace]: " << "input: " << input_.get_element_type().get_type_name() << " " << input_.get_shape() << std::endl; - std::cout << "[Trace]: " << "output: " << output_.get_element_type().get_type_name() << " " << output_.get_shape() << std::endl; + std::cout << "[Trace]: " + << "input: " << input_.get_element_type().get_type_name() << " " << input_.get_shape() << std::endl; + std::cout << "[Trace]: " + << "output: " << output_.get_element_type().get_type_name() << " " << output_.get_shape() << std::endl; #endif ov::Tensor input_tensor(input_.get_element_type(), input_.get_shape(), task->inputPtr_); @@ -163,24 +285,90 @@ IBasicVSRStatus ov_engine::run_impl(InferTask::Ptr task) { inferReq->start_async(); #ifdef ENABLE_LOG - std::cout << "[Trace]: " << "ov_engine run: start task inference" << std::endl; + std::cout << "[Trace]: " + << "ov_engine run: start task inference" << std::endl; #endif - return SUCCESS; + return OK; } -IBasicVSRStatus ov_engine::create_infer_requests_impl(size_t requests_num) { +IVSRStatus ov_engine::process_impl(void* input_data, void* output_data, void* cb) { + // Check for valid input and output data pointers + if (input_data == nullptr || output_data == nullptr) { + std::cout << "[Error]: invalid input or output buffer pointer" << std::endl; + return GENERAL_ERROR; + } + + auto inferReq = get_idle_request(); + + // Set callback for inference request + inferReq->set_callback([wp = std::weak_ptr(inferReq), cb](std::exception_ptr ex) { + auto request = wp.lock(); +#ifdef ENABLE_PERF + request->end_time(); + auto latency = request->get_execution_time_in_milliseconds(); + std::cout << "[PERF] Inference Latency: " << latency << "ms" << std::endl; +#endif + + if (ex) { +#ifdef ENABLE_LOG + std::cout << "[Trace]: Exception in infer request callback " << std::endl; +#endif + try { + // std::rethrow_exception(ex); + throw ex; + } catch (const std::exception& e) { + std::cout << "Caught exception \"" << e.what() << "\"\n"; + } + } + + request->call_back(); + + // Check if the callback structure and function are valid, then call the function + if (cb) { + ivsr_cb_t* ivsr_cb = static_cast(cb); + if (ivsr_cb->ivsr_cb) { + ivsr_cb->ivsr_cb(ivsr_cb->args); + } + } + }); + +#ifdef ENABLE_LOG + std::cout << "[Trace]: input: " << input_.get_element_type().get_type_name() << " " << input_.get_shape() << std::endl; + std::cout << "[Trace]: output: " << output_.get_element_type().get_type_name() << " " << output_.get_shape() << std::endl; +#endif + + // Construct input and output tensors + ov::Tensor input_tensor(input_.get_element_type(), input_.get_shape(), input_data); + inferReq->set_input_tensor(input_tensor); + + ov::Tensor output_tensor(output_.get_element_type(), output_.get_shape(), output_data); + inferReq->set_output_tensor(output_tensor); + + // Start asynchronous inference + inferReq->start_async(); + +#ifdef ENABLE_LOG + std::cout << "[Trace]: ov_engine run: start task inference" << std::endl; +#endif + + return OK; +} + +IVSRStatus ov_engine::create_infer_requests_impl(size_t requests_num) { if (requests_num < requests_.size()) { - std::cout << "[ERROR]: " << "please pass correct requests num.\n"; - return ERROR; + std::cout << "[ERROR]: " + << "please pass correct requests num.\n"; + return GENERAL_ERROR; } - for (int id = requests_.size(); id < requests_num; ++id) { - requests_.push_back(std::make_shared(compiled_model_ ,id,std::bind(&ov_engine::put_idle_request, - this, - std::placeholders::_1))); + for (auto id = requests_.size(); id < requests_num; ++id) { + requests_.push_back( + std::make_shared(compiled_model_, + id, + std::bind(&ov_engine::put_idle_request, this, std::placeholders::_1))); idleIds_.push(id); } - return SUCCESS; -} \ No newline at end of file + return OK; +} diff --git a/ivsr_sdk/src/smart_patch.cpp b/ivsr_sdk/src/smart_patch.cpp index d058594..5bc433e 100644 --- a/ivsr_sdk/src/smart_patch.cpp +++ b/ivsr_sdk/src/smart_patch.cpp @@ -51,7 +51,7 @@ float* fill_patch(std::vector patchCorners, float* inputBuf, std::vector> patchCorners, char* imgBuf, \ std::vector patchDims, std::vector imgDims, std::vector patchList){ int pB = patchDims[0], pN = patchDims[1], pC = patchDims[2], pH = patchDims[3], pW = patchDims[4]; int iB = imgDims[0], iN = imgDims[1], iC = imgDims[2], iH = imgDims[3], iW = imgDims[4]; // imgDims? - int patch_sW = 1; + //int patch_sW = 1; int patch_sH = pW; int patch_sC = pH * pW; int patch_sN = pC * pH * pW; int patch_sB = pN * pC * pH * pW; - int img_sW = 1; + //int img_sW = 1; int img_sH = iW; int img_sC = iH * iW; int img_sN = iC * iH * iW; @@ -107,7 +107,7 @@ void fill_image(std::vector> patchCorners, char* imgBuf, \ float * img_ptr =(float *)imgBuf; // for each patch - for (int idx = 0; idx < patchCorners.size(); ++idx){ + for (auto idx = 0u; idx < patchCorners.size(); ++idx){ float* patchPtr = (float*)patchList[idx]; auto patchCorner = patchCorners[idx]; @@ -140,7 +140,7 @@ void fill_image(std::vector> patchCorners, char* imgBuf, \ } // average each pixel - for(int id = 0; id < outputpixels; id++){ + for(size_t id = 0; id < outputpixels; id++){ *(img_ptr + id) /= *(pixelCounter + id); } @@ -150,8 +150,8 @@ void fill_image(std::vector> patchCorners, char* imgBuf, \ SmartPatch::SmartPatch(PatchConfig config, char* inBuf, char* outBuf, std::vector inputShape, bool flag) :_inputPtr(inBuf), _outputPtr(outBuf), - _config(config), _inputShape(inputShape), + _config(config), flag(flag) { if (flag){ diff --git a/ivsr_setupvar.sh b/ivsr_setupvar.sh index e8bdaa7..92a89cc 100755 --- a/ivsr_setupvar.sh +++ b/ivsr_setupvar.sh @@ -5,16 +5,16 @@ while [ $# -gt 0 ]; do case "$1" in --ov_version) shift - if [ "$1" = "2022.3" ] || [ "$1" = "2023.2" ]; then + if [ "$1" = "2022.3" ] || [ "$1" = "2023.2" ] || [ "$1" = "2024.5" ]; then OV_VERSION=$1 else - echo "Usage: $0 --ov_version [2022.3|2023.2]" + echo "Usage: $0 --ov_version [2022.3|2023.2|2024.5]" exit 1 fi shift ;; *) - echo "Usage: $0 --ov_version [2022.3|2023.2]" + echo "Usage: $0 --ov_version [2022.3|2023.2|2024.5]" exit 1 ;; esac