Skip to content

Commit

Permalink
[Backend] Support Intel GPU with OpenVINO (PaddlePaddle#472)
Browse files Browse the repository at this point in the history
* Update ov_backend.cc

* Update ov_backend.cc

* support set openvino device
  • Loading branch information
jiangjiajun authored and felixhjh committed Nov 25, 2022
1 parent 4c4397a commit ff02ff0
Show file tree
Hide file tree
Showing 7 changed files with 24 additions and 3 deletions.
6 changes: 4 additions & 2 deletions fastdeploy/backends/openvino/ov_backend.cc
Original file line number Diff line number Diff line change
Expand Up @@ -158,7 +158,8 @@ bool OpenVINOBackend::InitFromPaddle(const std::string& model_file,
} else if (option_.ov_num_streams > 0) {
properties["NUM_STREAMS"] = option_.ov_num_streams;
}
compiled_model_ = core_.compile_model(model, "CPU", properties);
FDINFO << "Compile OpenVINO model on device_name:" << option.device << "." << std::endl;
compiled_model_ = core_.compile_model(model, option.device, properties);

request_ = compiled_model_.create_infer_request();
initialized_ = true;
Expand Down Expand Up @@ -255,7 +256,8 @@ bool OpenVINOBackend::InitFromOnnx(const std::string& model_file,
} else if (option_.ov_num_streams > 0) {
properties["NUM_STREAMS"] = option_.ov_num_streams;
}
compiled_model_ = core_.compile_model(model, "CPU", properties);
FDINFO << "Compile OpenVINO model on device_name:" << option.device << "." << std::endl;
compiled_model_ = core_.compile_model(model, option.device, properties);

request_ = compiled_model_.create_infer_request();

Expand Down
1 change: 1 addition & 0 deletions fastdeploy/backends/openvino/ov_backend.h
Original file line number Diff line number Diff line change
Expand Up @@ -26,6 +26,7 @@
namespace fastdeploy {

struct OpenVINOBackendOption {
std::string device = "CPU";
int cpu_thread_num = -1;
int ov_num_streams = 1;
std::map<std::string, std::vector<int64_t>> shape_infos;
Expand Down
1 change: 1 addition & 0 deletions fastdeploy/pybind/runtime.cc
Original file line number Diff line number Diff line change
Expand Up @@ -33,6 +33,7 @@ void BindRuntime(pybind11::module& m) {
.def("use_openvino_backend", &RuntimeOption::UseOpenVINOBackend)
.def("use_lite_backend", &RuntimeOption::UseLiteBackend)
.def("set_paddle_mkldnn", &RuntimeOption::SetPaddleMKLDNN)
.def("set_openvino_device", &RuntimeOption::SetOpenVINODevice)
.def("enable_paddle_log_info", &RuntimeOption::EnablePaddleLogInfo)
.def("disable_paddle_log_info", &RuntimeOption::DisablePaddleLogInfo)
.def("set_paddle_mkldnn_cache_size",
Expand Down
5 changes: 5 additions & 0 deletions fastdeploy/runtime.cc
Original file line number Diff line number Diff line change
Expand Up @@ -332,6 +332,10 @@ void RuntimeOption::SetPaddleMKLDNNCacheSize(int size) {
pd_mkldnn_cache_size = size;
}

void RuntimeOption::SetOpenVINODevice(const std::string& name) {
openvino_device = name;
}

void RuntimeOption::EnableLiteFP16() {
lite_enable_fp16 = true;
}
Expand Down Expand Up @@ -641,6 +645,7 @@ void Runtime::CreateOpenVINOBackend() {
#ifdef ENABLE_OPENVINO_BACKEND
auto ov_option = OpenVINOBackendOption();
ov_option.cpu_thread_num = option.cpu_thread_num;
ov_option.device = option.openvino_device;
ov_option.ov_num_streams = option.ov_num_streams;
FDASSERT(option.model_format == ModelFormat::PADDLE ||
option.model_format == ModelFormat::ONNX,
Expand Down
8 changes: 8 additions & 0 deletions fastdeploy/runtime.h
Original file line number Diff line number Diff line change
Expand Up @@ -168,6 +168,11 @@ struct FASTDEPLOY_DECL RuntimeOption {
*/
void SetPaddleMKLDNNCacheSize(int size);

/**
* @brief Set device name for OpenVINO, default 'CPU', can also be 'AUTO', 'GPU', 'GPU.1'....
*/
void SetOpenVINODevice(const std::string& name = "CPU");

/**
* @brief Set optimzed model dir for Paddle Lite backend.
*/
Expand Down Expand Up @@ -344,6 +349,9 @@ struct FASTDEPLOY_DECL RuntimeOption {
size_t trt_max_batch_size = 32;
size_t trt_max_workspace_size = 1 << 30;

// ======Only for OpenVINO Backend======
std::string openvino_device = "CPU";

// ======Only for Poros Backend=======
bool is_dynamic = false;
bool long_to_int = true;
Expand Down
1 change: 0 additions & 1 deletion fastdeploy/vision/common/processors/transform.h
Original file line number Diff line number Diff line change
Expand Up @@ -37,7 +37,6 @@ namespace fastdeploy {
namespace vision {

void FuseTransforms(std::vector<std::shared_ptr<Processor>>* processors);

// Fuse Normalize + Cast(Float) to Normalize
void FuseNormalizeCast(std::vector<std::shared_ptr<Processor>>* processors);
// Fuse Normalize + HWC2CHW to NormalizeAndPermute
Expand Down
5 changes: 5 additions & 0 deletions python/fastdeploy/runtime.py
Original file line number Diff line number Diff line change
Expand Up @@ -269,6 +269,11 @@ def set_paddle_mkldnn(self, use_mkldnn=True):
"""
return self._option.set_paddle_mkldnn(use_mkldnn)

def set_openvino_device(self, name="CPU"):
"""Set device name for OpenVINO, default 'CPU', can also be 'AUTO', 'GPU', 'GPU.1'....
"""
return self._option.set_openvino_device(name)

def enable_paddle_log_info(self):
"""Enable print out the debug log information while using Paddle Inference backend, the log information is disabled by default.
"""
Expand Down

0 comments on commit ff02ff0

Please sign in to comment.