Skip to content

Commit

Permalink
[Bug] Fix build error (#2112)
Browse files Browse the repository at this point in the history
Fix build paddle2onnx error
  • Loading branch information
jiangjiajun committed Jul 17, 2023
1 parent 681ccc4 commit f413e02
Show file tree
Hide file tree
Showing 32 changed files with 597 additions and 176 deletions.
19 changes: 15 additions & 4 deletions paddle2onnx/command.py
Original file line number Diff line number Diff line change
Expand Up @@ -129,6 +129,12 @@ def arg_parser():
type=ast.literal_eval,
default=False,
help="Whether export FP16 model for ORT-GPU, default False")
parser.add_argument(
"--custom_ops",
type=_text_type,
default="{}",
help="Ops that needs to be converted to custom op, e.g --custom_ops '{\"paddle_op\":\"onnx_op\"}', default {}"
)
return parser


Expand All @@ -144,12 +150,14 @@ def c_paddle_to_onnx(model_file,
deploy_backend="onnxruntime",
calibration_file="",
external_file="",
export_fp16_model=False):
export_fp16_model=False,
custom_ops={}):
import paddle2onnx.paddle2onnx_cpp2py_export as c_p2o
onnx_model_str = c_p2o.export(
model_file, params_file, opset_version, auto_upgrade_opset, verbose,
enable_onnx_checker, enable_experimental_op, enable_optimize, {},
deploy_backend, calibration_file, external_file, export_fp16_model)
enable_onnx_checker, enable_experimental_op, enable_optimize,
custom_ops, deploy_backend, calibration_file, external_file,
export_fp16_model)
if save_file is not None:
with open(save_file, "wb") as f:
f.write(onnx_model_str)
Expand Down Expand Up @@ -235,6 +243,8 @@ def main():
os.mkdir(base_path)
external_file = os.path.join(base_path, args.external_filename)

custom_ops_dict = eval(args.custom_ops)

calibration_file = args.save_calibration_file
c_paddle_to_onnx(
model_file=model_file,
Expand All @@ -249,7 +259,8 @@ def main():
deploy_backend=args.deploy_backend,
calibration_file=calibration_file,
external_file=external_file,
export_fp16_model=args.export_fp16_model)
export_fp16_model=args.export_fp16_model,
custom_ops=custom_ops_dict)
logging.info("===============Make PaddlePaddle Better!================")
logging.info("A little survey: https://iwenjuan.baidu.com/?code=r8hu2s")
return
Expand Down
29 changes: 24 additions & 5 deletions paddle2onnx/converter.cc
Original file line number Diff line number Diff line change
Expand Up @@ -137,7 +137,8 @@ PADDLE2ONNX_DECL bool Export(
bool enable_onnx_checker, bool enable_experimental_op, bool enable_optimize,
CustomOp* ops, int op_count, const char* deploy_backend,
char** calibration_cache, int* calibration_size, const char* external_file,
bool* save_external, bool export_fp16_model) {
bool* save_external, bool export_fp16_model, char** disable_fp16_op_types,
int disable_fp16_op_types_count) {
auto parser = PaddleParser();
P2OLogger(verbose) << "Start to parsing Paddle model..." << std::endl;
if (!parser.Init(model, params)) {
Expand All @@ -158,12 +159,20 @@ PADDLE2ONNX_DECL bool Export(
me.custom_ops[op_name] = export_op_name;
}
}

// Add disabled fp16 op information
std::vector<std::string> disable_op_types;
if (disable_fp16_op_types != nullptr && disable_fp16_op_types_count > 0) {
for (int i = 0; i < disable_fp16_op_types_count; ++i) {
std::string disable_op_type(disable_fp16_op_types[i],
strlen(disable_fp16_op_types[i]));
disable_op_types.push_back(disable_op_type);
}
}
std::string calibration_str;
std::string result = me.Run(
parser, opset_version, auto_upgrade_opset, verbose, enable_onnx_checker,
enable_experimental_op, enable_optimize, deploy_backend, &calibration_str,
external_file, save_external, export_fp16_model);
external_file, save_external, export_fp16_model, disable_op_types);
if (result.empty()) {
P2OLogger(verbose) << "The exported ONNX model is invalid!" << std::endl;
return false;
Expand Down Expand Up @@ -193,7 +202,8 @@ PADDLE2ONNX_DECL bool Export(
bool enable_experimental_op, bool enable_optimize, CustomOp* ops,
int op_count, const char* deploy_backend, char** calibration_cache,
int* calibration_size, const char* external_file, bool* save_external,
bool export_fp16_model) {
bool export_fp16_model, char** disable_fp16_op_types,
int disable_fp16_op_types_count) {
auto parser = PaddleParser();
P2OLogger(verbose) << "Start to parsing Paddle model..." << std::endl;
if (!parser.Init(model_buffer, model_size, params_buffer, params_size)) {
Expand All @@ -214,11 +224,20 @@ PADDLE2ONNX_DECL bool Export(
me.custom_ops[op_name] = export_op_name;
}
}
// Add disabled fp16 op information
std::vector<std::string> disable_op_types;
if (disable_fp16_op_types != nullptr && disable_fp16_op_types_count > 0) {
for (int i = 0; i < disable_fp16_op_types_count; ++i) {
std::string disable_op_type(disable_fp16_op_types[i],
strlen(disable_fp16_op_types[i]));
disable_op_types.push_back(disable_op_type);
}
}
std::string calibration_str;
std::string result = me.Run(
parser, opset_version, auto_upgrade_opset, verbose, enable_onnx_checker,
enable_experimental_op, enable_optimize, deploy_backend, &calibration_str,
external_file, save_external, export_fp16_model);
external_file, save_external, export_fp16_model, disable_op_types);
if (result.empty()) {
P2OLogger(verbose) << "The exported ONNX model is invalid!" << std::endl;
return false;
Expand Down
6 changes: 4 additions & 2 deletions paddle2onnx/converter.h
100755 → 100644
Original file line number Diff line number Diff line change
Expand Up @@ -57,7 +57,8 @@ PADDLE2ONNX_DECL bool Export(
const char* deploy_backend = "onnxruntime",
char** calibration_cache = nullptr, int* calibration_size = 0,
const char* external_file = "", bool* save_external = nullptr,
bool export_fp16_model = false);
bool export_fp16_model = false, char** disable_fp16_op_types = nullptr,
int disable_fp16_op_types_count = 0);

PADDLE2ONNX_DECL bool Export(
const void* model_buffer, int64_t model_size, const void* params_buffer,
Expand All @@ -68,7 +69,8 @@ PADDLE2ONNX_DECL bool Export(
const char* deploy_backend = "onnxruntime",
char** calibration_cache = nullptr, int* calibration_size = 0,
const char* external_file = "", bool* save_external = nullptr,
bool export_fp16_model = false);
bool export_fp16_model = false, char** disable_fp16_op_types = nullptr,
int disable_fp16_op_types_count = 0);

// Following are inside usage, will remove it maybe
struct PADDLE2ONNX_DECL ModelTensorInfo {
Expand Down
114 changes: 67 additions & 47 deletions paddle2onnx/mapper/activation.cc
Original file line number Diff line number Diff line change
Expand Up @@ -186,7 +186,7 @@ void SwishMapper::Opset7() {
auto output_info = GetOutput("Out");

std::string beta_node =
helper_->Constant({1}, GetOnnxDtype(input_info[0].dtype), beta_);
helper_->Constant({}, GetOnnxDtype(input_info[0].dtype), beta_);
// TODO(jiangjiajun) eliminate multiply with a constant of value 1
// TODO(jiangjiajun) eliminate add with a constant of value 0
auto beta_x_node = helper_->MakeNode("Mul", {input_info[0].name, beta_node});
Expand All @@ -200,9 +200,9 @@ void HardSwishMapper::Opset7() {
auto output_info = GetOutput("Out");

std::string scale_node =
helper_->Constant({1}, GetOnnxDtype(input_info[0].dtype), scale_);
helper_->Constant({}, GetOnnxDtype(input_info[0].dtype), scale_);
std::string offset_node =
helper_->Constant({1}, GetOnnxDtype(input_info[0].dtype), offset_);
helper_->Constant({}, GetOnnxDtype(input_info[0].dtype), offset_);

auto add_node = helper_->MakeNode("Add", {input_info[0].name, offset_node});
auto clip_node =
Expand Down Expand Up @@ -239,11 +239,11 @@ void GeluMapper::Opset9() {
double scale_value = 0.5;
double const_1_value = 1.0;
auto sqrt_2 =
helper_->Constant({1}, ONNX_NAMESPACE::TensorProto::FLOAT, sqrt_2_value);
helper_->Constant({}, ONNX_NAMESPACE::TensorProto::FLOAT, sqrt_2_value);
auto scale =
helper_->Constant({1}, ONNX_NAMESPACE::TensorProto::FLOAT, scale_value);
helper_->Constant({}, ONNX_NAMESPACE::TensorProto::FLOAT, scale_value);
auto const_1 =
helper_->Constant({1}, ONNX_NAMESPACE::TensorProto::FLOAT, const_1_value);
helper_->Constant({}, ONNX_NAMESPACE::TensorProto::FLOAT, const_1_value);

auto input_name = helper_->AutoCast(input_info[0].name, input_info[0].dtype,
P2ODataType::FP32);
Expand All @@ -268,26 +268,34 @@ void GeluMapper::Opset9() {
void SoftMaxMapper::Opset7() {
auto input_info = GetInput("X");
auto output_info = GetOutput("Out");
if (axis_ < 0) {
axis_ = axis_ + output_info[0].Rank();
}
if (axis_ == output_info[0].Rank() - 1) {
auto node = helper_->MakeNode("Softmax", {input_info[0].name},
{output_info[0].name});
AddAttribute(node, "axis", axis_);
if (input_info[0].Rank() == 0) {
auto unsqueeze = helper_->Unsqueeze(input_info[0].name, {0});
auto node = helper_->MakeNode("Softmax", {unsqueeze});
AddAttribute(node, "axis", static_cast<int64_t>(0));
helper_->Squeeze(node->output(0), output_info[0].name, {0});
} else {
std::vector<int64_t> perm = Arange(0, output_info[0].Rank());
perm[output_info[0].Rank() - 1] = axis_;
perm[axis_] = output_info[0].Rank() - 1;
auto transpose_node = helper_->MakeNode("Transpose", {input_info[0].name});
AddAttribute(transpose_node, "perm", perm);
auto softmax_node =
helper_->MakeNode("Softmax", {transpose_node->output(0)});
int64_t axis_last = -1;
AddAttribute(softmax_node, "axis", axis_last);
auto transpose_node_last = helper_->MakeNode(
"Transpose", {softmax_node->output(0)}, {output_info[0].name});
AddAttribute(transpose_node_last, "perm", perm);
if (axis_ < 0) {
axis_ = axis_ + output_info[0].Rank();
}
if (axis_ == output_info[0].Rank() - 1) {
auto node = helper_->MakeNode("Softmax", {input_info[0].name},
{output_info[0].name});
AddAttribute(node, "axis", axis_);
} else {
std::vector<int64_t> perm = Arange(0, output_info[0].Rank());
perm[output_info[0].Rank() - 1] = axis_;
perm[axis_] = output_info[0].Rank() - 1;
auto transpose_node =
helper_->MakeNode("Transpose", {input_info[0].name});
AddAttribute(transpose_node, "perm", perm);
auto softmax_node =
helper_->MakeNode("Softmax", {transpose_node->output(0)});
int64_t axis_last = -1;
AddAttribute(softmax_node, "axis", axis_last);
auto transpose_node_last = helper_->MakeNode(
"Transpose", {softmax_node->output(0)}, {output_info[0].name});
AddAttribute(transpose_node_last, "perm", perm);
}
}
}

Expand All @@ -296,9 +304,16 @@ void SoftMaxMapper::Opset13() {
GetAttr("axis", &axis);
auto input_info = GetInput("X");
auto output_info = GetOutput("Out");
auto node =
helper_->MakeNode("Softmax", {input_info[0].name}, {output_info[0].name});
AddAttribute(node, "axis", axis);
if (input_info[0].Rank() == 0) {
auto unsqueeze = helper_->Unsqueeze(input_info[0].name, {0});
auto node = helper_->MakeNode("Softmax", {unsqueeze});
AddAttribute(node, "axis", static_cast<int64_t>(0));
helper_->Squeeze(node->output(0), output_info[0].name, {0});
} else {
auto node = helper_->MakeNode("Softmax", {input_info[0].name},
{output_info[0].name});
AddAttribute(node, "axis", axis);
}
}

void BReluMapper::Opset7() {
Expand Down Expand Up @@ -357,7 +372,6 @@ void SizeMapper::Opset7() {
auto out_info = GetOutput("Out");
auto output =
helper_->MakeNode("Size", {GetInput("Input")[0].name})->output(0);
output = helper_->Reshape(output, {-1});
output = helper_->AutoCast(output, out_info[0].name, P2ODataType::INT64,
out_info[0].dtype);
}
Expand All @@ -382,21 +396,28 @@ void LogSigmoidMapper::Opset7() {
void LogSoftmaxMapper::Opset7() {
auto input_info = GetInput("X");
auto axis = axis_;
if (axis < 0) {
axis += input_info[0].Rank();
}
if (axis == input_info[0].Rank() - 1) {
auto node = helper_->MakeNode("LogSoftmax", {input_info[0].name},
{GetOutput("Out")[0].name});
AddAttribute(node, "axis", axis);
if (input_info[0].Rank() == 0) {
auto unsqueeze = helper_->Unsqueeze(input_info[0].name, {0});
auto node = helper_->MakeNode("LogSoftmax", {unsqueeze});
AddAttribute(node, "axis", static_cast<int64_t>(0));
helper_->Squeeze(node->output(0), GetOutput("Out")[0].name, {0});
} else {
auto perm = Arange(0, input_info[0].Rank());
perm[input_info[0].Rank() - 1] = axis;
perm[axis] = input_info[0].Rank() - 1;
auto output = helper_->Transpose(input_info[0].name, perm);
auto node = helper_->MakeNode("LogSoftmax", {output});
AddAttribute(node, "axis", int64_t(-1));
helper_->Transpose(node->output(0), GetOutput("Out")[0].name, perm);
if (axis < 0) {
axis += input_info[0].Rank();
}
if (axis == input_info[0].Rank() - 1) {
auto node = helper_->MakeNode("LogSoftmax", {input_info[0].name},
{GetOutput("Out")[0].name});
AddAttribute(node, "axis", axis);
} else {
auto perm = Arange(0, input_info[0].Rank());
perm[input_info[0].Rank() - 1] = axis;
perm[axis] = input_info[0].Rank() - 1;
auto output = helper_->Transpose(input_info[0].name, perm);
auto node = helper_->MakeNode("LogSoftmax", {output});
AddAttribute(node, "axis", int64_t(-1));
helper_->Transpose(node->output(0), GetOutput("Out")[0].name, perm);
}
}
}

Expand All @@ -420,7 +441,7 @@ void ThresholdedReluMapper::Opset10() {
void Log1PMapper::Opset7() {
auto x_info = GetInput("X");
auto out_info = GetOutput("Out");
auto one = helper_->Constant({1}, GetOnnxDtype(x_info[0].dtype), float(1.0));
auto one = helper_->Constant({}, GetOnnxDtype(x_info[0].dtype), float(1.0));
auto input = helper_->MakeNode("Add", {x_info[0].name, one})->output(0);
helper_->MakeNode("Log", {input}, {out_info[0].name});
}
Expand All @@ -429,7 +450,7 @@ void Log2Mapper::Opset7() {
auto x_info = GetInput("X");
auto out_info = GetOutput("Out");
double ln2 = 0.693147180559945309;
auto ln2_tensor = helper_->Constant({1}, GetOnnxDtype(x_info[0].dtype), ln2);
auto ln2_tensor = helper_->Constant({}, GetOnnxDtype(x_info[0].dtype), ln2);
auto output = helper_->MakeNode("Log", {x_info[0].name})->output(0);
helper_->MakeNode("Div", {output, ln2_tensor}, {out_info[0].name});
}
Expand All @@ -438,8 +459,7 @@ void Log10Mapper::Opset7() {
auto x_info = GetInput("X");
auto out_info = GetOutput("Out");
double ln10 = 2.30258509299404568401;
auto ln10_tensor =
helper_->Constant({1}, GetOnnxDtype(x_info[0].dtype), ln10);
auto ln10_tensor = helper_->Constant({}, GetOnnxDtype(x_info[0].dtype), ln10);
auto output = helper_->MakeNode("Log", {x_info[0].name})->output(0);
helper_->MakeNode("Div", {output, ln10_tensor}, {out_info[0].name});
}
Expand Down
3 changes: 1 addition & 2 deletions paddle2onnx/mapper/elementwise.cc
100644 → 100755
Original file line number Diff line number Diff line change
Expand Up @@ -108,9 +108,8 @@ void ElementWiseModMapper::Opset10() {
auto abs_y_node = helper_->MakeNode("Abs", {input_y_info[0].name});

auto dtype = input_y_info[0].dtype;
std::vector<float> val_0 = {0.0};

std::string zero_node = helper_->Constant(GetOnnxDtype(dtype), val_0);
std::string zero_node = helper_->Constant({}, GetOnnxDtype(dtype), 0.0);

auto mod_node =
helper_->MakeNode("Mod", {abs_x_node->output(0), abs_y_node->output(0)});
Expand Down
Loading

0 comments on commit f413e02

Please sign in to comment.