diff --git a/csrc/mmdeploy/net/openvino/openvino_net.cpp b/csrc/mmdeploy/net/openvino/openvino_net.cpp index a294a65c5c..2fed37c32c 100644 --- a/csrc/mmdeploy/net/openvino/openvino_net.cpp +++ b/csrc/mmdeploy/net/openvino/openvino_net.cpp @@ -77,30 +77,18 @@ Result OpenVINONet::Init(const Value& args) { auto model = context["model"].get(); OUTCOME_TRY(auto config, model.GetModelConfig(name)); - // TODO: read network with stream - // save xml and bin to temp file - auto tmp_dir = fs::temp_directory_path(); - std::string tmp_xml = (tmp_dir / fs::path("tmp.xml")).string(); - std::string tmp_bin = (tmp_dir / fs::path("tmp.bin")).string(); OUTCOME_TRY(auto raw_xml, model.ReadFile(config.net)); OUTCOME_TRY(auto raw_bin, model.ReadFile(config.weights)); - - try { - std::ofstream xml_out(tmp_xml, std::ios::binary); - xml_out << raw_xml; - xml_out.close(); - std::ofstream bin_out(tmp_bin, std::ios::binary); - bin_out << raw_bin; - bin_out.close(); - } catch (const std::exception& e) { - MMDEPLOY_ERROR("unhandled exception when creating tmp xml/bin: {}", e.what()); - return Status(eFail); - } + auto ov_tensor = InferenceEngine::TensorDesc(InferenceEngine::Precision::U8, {raw_bin.size()}, + InferenceEngine::Layout::C); + auto ov_blob = InferenceEngine::make_shared_blob(ov_tensor); + ov_blob->allocate(); + memcpy(ov_blob->buffer(), raw_bin.data(), ov_blob->byteSize()); try { // create cnnnetwork core_ = InferenceEngine::Core(); - network_ = core_.ReadNetwork(tmp_xml, tmp_bin); + network_ = core_.ReadNetwork(raw_xml, std::move(ov_blob)); // set input tensor InferenceEngine::InputsDataMap input_info = network_.getInputsInfo(); diff --git a/docker/CPU/Dockerfile b/docker/CPU/Dockerfile index 7cf6d36f43..bafe94331d 100644 --- a/docker/CPU/Dockerfile +++ b/docker/CPU/Dockerfile @@ -1,4 +1,4 @@ -FROM openvino/ubuntu18_dev:2021.4.2 +FROM openvino/ubuntu20_dev:2022.3.0 ARG PYTHON_VERSION=3.8 ARG TORCH_VERSION=1.10.0 ARG TORCHVISION_VERSION=0.11.0 @@ -57,7 +57,7 @@ RUN if [ ${USE_SRC_INSIDE} == true ] ; \ RUN /opt/conda/bin/pip install torch==${TORCH_VERSION}+cpu torchvision==${TORCHVISION_VERSION}+cpu -f https://download.pytorch.org/whl/cpu/torch_stable.html \ && /opt/conda/bin/pip install --no-cache-dir openmim -RUN /opt/conda/bin/mim install --no-cache-dir "mmcv"${MMCV_VERSION} onnxruntime==${ONNXRUNTIME_VERSION} openvino-dev mmengine${MMENGINE_VERSION} +RUN /opt/conda/bin/mim install --no-cache-dir "mmcv"${MMCV_VERSION} onnxruntime==${ONNXRUNTIME_VERSION} openvino-dev==2022.3.0 mmengine${MMENGINE_VERSION} ENV PATH /opt/conda/bin:$PATH WORKDIR /root/workspace @@ -100,14 +100,14 @@ RUN git clone -b main https://github.com/open-mmlab/mmdeploy.git &&\ /opt/conda/bin/mim install -e . ### build SDK -ENV LD_LIBRARY_PATH="/root/workspace/mmdeploy/build/lib:/opt/intel/openvino/deployment_tools/ngraph/lib:/opt/intel/openvino/deployment_tools/inference_engine/lib/intel64:${LD_LIBRARY_PATH}" +ENV LD_LIBRARY_PATH="/root/workspace/mmdeploy/build/lib:${LD_LIBRARY_PATH}" RUN cd mmdeploy && rm -rf build/CM* && mkdir -p build && cd build && cmake .. \ -DMMDEPLOY_BUILD_SDK=ON \ -DMMDEPLOY_BUILD_EXAMPLES=ON \ - -DCMAKE_CXX_COMPILER=g++-7 \ + -DCMAKE_CXX_COMPILER=g++-9 \ -DONNXRUNTIME_DIR=${ONNXRUNTIME_DIR} \ -Dncnn_DIR=/root/workspace/ncnn/build/install/lib/cmake/ncnn \ - -DInferenceEngine_DIR=/opt/intel/openvino/deployment_tools/inference_engine/share \ + -DInferenceEngine_DIR=/opt/intel/openvino/runtime/cmake \ -DMMDEPLOY_TARGET_DEVICES=cpu \ -DMMDEPLOY_BUILD_SDK_PYTHON_API=ON \ -DMMDEPLOY_TARGET_BACKENDS="ort;ncnn;openvino" \