Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Use ONNXRuntime for ParticleNet inference #30599

Merged
merged 3 commits into from
Jul 20, 2020
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 2 additions & 0 deletions PhysicsTools/ONNXRuntime/interface/ONNXRuntime.h
Original file line number Diff line number Diff line change
Expand Up @@ -32,12 +32,14 @@ namespace cms::Ort {
// Run inference and get outputs
// input_names: list of the names of the input nodes.
// input_values: list of input arrays for each input node. The order of `input_values` must match `input_names`.
// input_shapes: list of `int64_t` arrays specifying the shape of each input node. Can leave empty if the model does not have dynamic axes.
// output_names: names of the output nodes to get outputs from. Empty list means all output nodes.
// batch_size: number of samples in the batch. Each array in `input_values` must have a shape layout of (batch_size, ...).
// Returns: a std::vector<std::vector<float>>, with the order matched to `output_names`.
// When `output_names` is empty, will return all outputs ordered as in `getOutputNames()`.
FloatArrays run(const std::vector<std::string>& input_names,
FloatArrays& input_values,
const std::vector<std::vector<int64_t>>& input_shapes = {},
const std::vector<std::string>& output_names = {},
int64_t batch_size = 1) const;

Expand Down
18 changes: 12 additions & 6 deletions PhysicsTools/ONNXRuntime/src/ONNXRuntime.cc
Original file line number Diff line number Diff line change
Expand Up @@ -50,9 +50,6 @@ namespace cms::Ort {
size_t num_dims = tensor_info.GetDimensionsCount();
input_node_dims_[input_name].resize(num_dims);
tensor_info.GetDimensions(input_node_dims_[input_name].data(), num_dims);

// set the batch size to 1 by default
input_node_dims_[input_name].at(0) = 1;
}

size_t num_output_nodes = session_->GetOutputCount();
Expand Down Expand Up @@ -82,9 +79,11 @@ namespace cms::Ort {

FloatArrays ONNXRuntime::run(const std::vector<std::string>& input_names,
FloatArrays& input_values,
const std::vector<std::vector<int64_t>>& input_shapes,
const std::vector<std::string>& output_names,
int64_t batch_size) const {
assert(input_names.size() == input_values.size());
assert(input_shapes.empty() || input_names.size() == input_shapes.size());
assert(batch_size > 0);

// create input tensor objects from data values
Expand All @@ -95,9 +94,16 @@ namespace cms::Ort {
if (iter == input_names.end()) {
throw cms::Exception("RuntimeError") << "Input " << name << " is not provided!";
}
auto value = input_values.begin() + (iter - input_names.begin());
auto input_dims = input_node_dims_.at(name);
input_dims[0] = batch_size;
auto input_pos = iter - input_names.begin();
auto value = input_values.begin() + input_pos;
std::vector<int64_t> input_dims;
if (input_shapes.empty()) {
input_dims = input_node_dims_.at(name);
input_dims[0] = batch_size;
} else {
input_dims = input_shapes[input_pos];
// rely on the given input_shapes to set the batch size
}
auto expected_len = std::accumulate(input_dims.begin(), input_dims.end(), 1, std::multiplies<int64_t>());
if (expected_len != (int64_t)value->size()) {
throw cms::Exception("RuntimeError")
Expand Down
2 changes: 1 addition & 1 deletion PhysicsTools/ONNXRuntime/test/testONNXRuntime.cc
Original file line number Diff line number Diff line change
Expand Up @@ -27,7 +27,7 @@ void testONNXRuntime::checkAll() {
std::vector<float>(batch_size * 2, 1),
};
FloatArrays outputs;
CPPUNIT_ASSERT_NO_THROW(outputs = rt.run({"X"}, input_values, {"Y"}, batch_size));
CPPUNIT_ASSERT_NO_THROW(outputs = rt.run({"X"}, input_values, {}, {"Y"}, batch_size));
CPPUNIT_ASSERT(outputs.size() == 1);
CPPUNIT_ASSERT(outputs[0].size() == batch_size);
for (const auto &v : outputs[0]) {
Expand Down
2 changes: 1 addition & 1 deletion PhysicsTools/PatAlgos/python/recoLayer0/bTagging_cff.py
Original file line number Diff line number Diff line change
Expand Up @@ -244,7 +244,7 @@

# -----------------------------------
# setup ParticleNet
from RecoBTag.MXNet.pfParticleNet_cff import _pfParticleNetJetTagsProbs, _pfParticleNetJetTagsMetaDiscrs, \
from RecoBTag.ONNXRuntime.pfParticleNet_cff import _pfParticleNetJetTagsProbs, _pfParticleNetJetTagsMetaDiscrs, \
_pfMassDecorrelatedParticleNetJetTagsProbs, _pfMassDecorrelatedParticleNetJetTagsMetaDiscrs
# update supportedBtagDiscr
for disc in _pfParticleNetJetTagsProbs + _pfMassDecorrelatedParticleNetJetTagsProbs:
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -44,7 +44,7 @@ def applyDeepBtagging( process, postfix="" ) :

from RecoBTag.ONNXRuntime.pfDeepBoostedJet_cff import _pfDeepBoostedJetTagsAll as pfDeepBoostedJetTagsAll
from RecoBTag.ONNXRuntime.pfHiggsInteractionNet_cff import _pfHiggsInteractionNetTagsProbs as pfHiggsInteractionNetTagsProbs
from RecoBTag.MXNet.pfParticleNet_cff import _pfParticleNetJetTagsAll as pfParticleNetJetTagsAll
from RecoBTag.ONNXRuntime.pfParticleNet_cff import _pfParticleNetJetTagsAll as pfParticleNetJetTagsAll

# update slimmed jets to include particle-based deep taggers (keep same name)
# make clone for DeepTags-less slimmed AK8 jets, so output name is preserved
Expand Down
2 changes: 1 addition & 1 deletion RecoBTag/Configuration/python/RecoBTag_cff.py
Original file line number Diff line number Diff line change
Expand Up @@ -11,7 +11,7 @@
from RecoBTag.ONNXRuntime.pfDeepDoubleX_cff import *
from RecoBTag.ONNXRuntime.pfDeepBoostedJet_cff import *
from RecoBTag.ONNXRuntime.pfHiggsInteractionNet_cff import *
from RecoBTag.MXNet.pfParticleNet_cff import *
from RecoBTag.ONNXRuntime.pfParticleNet_cff import *
from RecoVertex.AdaptiveVertexFinder.inclusiveVertexing_cff import *
from RecoBTag.PixelCluster.pixelClusterTagInfos_cfi import *

Expand Down
10 changes: 6 additions & 4 deletions RecoBTag/FeatureTools/interface/deep_helpers.h
Original file line number Diff line number Diff line change
Expand Up @@ -79,21 +79,23 @@ namespace btagbtvdeep {
struct PreprocessParams {
struct VarInfo {
VarInfo() {}
VarInfo(float median, float norm_factor, float replace_inf_value, float lower_bound, float upper_bound)
VarInfo(float median, float norm_factor, float replace_inf_value, float lower_bound, float upper_bound, float pad)
: center(median),
norm_factor(norm_factor),
replace_inf_value(replace_inf_value),
lower_bound(lower_bound),
upper_bound(upper_bound) {}
upper_bound(upper_bound),
pad(pad) {}
float center = 0;
float norm_factor = 1;
float replace_inf_value = 0;
float lower_bound = -5;
float upper_bound = 5;
float pad = 0;
};

unsigned var_length = 0;
float pad = 0;
unsigned min_length = 0;
unsigned max_length = 0;
std::vector<std::string> var_names;
std::unordered_map<std::string, VarInfo> var_info_map;

Expand Down
Loading