Skip to content
Merged
Show file tree
Hide file tree
Changes from 4 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 3 additions & 1 deletion Configuration/ProcessModifiers/python/allSonicTriton_cff.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,6 +5,8 @@
from Configuration.ProcessModifiers.particleNetPTSonicTriton_cff import particleNetPTSonicTriton
from Configuration.ProcessModifiers.deepMETSonicTriton_cff import deepMETSonicTriton
from Configuration.ProcessModifiers.deepTauSonicTriton_cff import deepTauSonicTriton
from Configuration.ProcessModifiers.particleTransformerAK4SonicTriton_cff import particleTransformerAK4SonicTriton

# collect all SonicTriton-related process modifiers here
allSonicTriton = cms.ModifierChain(enableSonicTriton,deepMETSonicTriton,particleNetSonicTriton,deepTauSonicTriton)
allSonicTriton = cms.ModifierChain(enableSonicTriton,deepMETSonicTriton,particleNetSonicTriton,particleNetPTSonicTriton,deepTauSonicTriton,particleTransformerAK4SonicTriton)

Original file line number Diff line number Diff line change
@@ -0,0 +1,3 @@
import FWCore.ParameterSet.Config as cms

particleTransformerAK4SonicTriton = cms.Modifier()
1 change: 1 addition & 0 deletions RecoBTag/ONNXRuntime/BuildFile.xml
Original file line number Diff line number Diff line change
@@ -1,3 +1,4 @@
<use name="PhysicsTools/ONNXRuntime"/>
<use name="DataFormats/BTauReco"/>
<export>
<lib name="1"/>
Expand Down
45 changes: 45 additions & 0 deletions RecoBTag/ONNXRuntime/interface/tensor_configs.h
Original file line number Diff line number Diff line change
@@ -1,6 +1,7 @@
#ifndef RecoBTag_ONNXRuntime_tensor_configs_h
#define RecoBTag_ONNXRuntime_tensor_configs_h

#include <map>
namespace deepflavour {

constexpr unsigned n_features_global = 15;
Expand Down Expand Up @@ -28,4 +29,48 @@ namespace deepvertex {

} // namespace deepvertex

namespace parT {

enum InputFeatures {
kChargedCandidates=0,
kNeutralCandidates=1,
kVertices=2,
kChargedCandidates4Vec=3,
kNeutralCandidates4Vec=4,
kVertices4Vec=5
};

const std::map<unsigned int, InputFeatures> InputIndexes{

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

as a method to loop through all the enum values, this is somewhat redundant (requires adding the same information in two places: the enum definition and this map). I checked with core software and they recommend something like the following:

  enum InputFeatures {
    kBegin=0,
    kChargedCandidates=kBegin,
    kNeutralCandidates=1,
    kVertices=2,
    kChargedCandidates4Vec=3,
    kNeutralCandidates4Vec=4,
    kVertices4Vec=5,
    kEnd=6
  };

then you can just loop from kBegin to kEnd.

Copy link
Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

okay that will help...

{0, kChargedCandidates},
{1, kNeutralCandidates},
{2, kVertices},
{3, kChargedCandidates4Vec},
{4, kNeutralCandidates4Vec},
{5, kVertices4Vec}
};

constexpr unsigned n_cpf_accept = 25;
constexpr unsigned n_npf_accept = 25;
constexpr unsigned n_sv_accept = 5;

const std::map<InputFeatures, unsigned int> N_InputFeatures{
{kChargedCandidates, 16},
{kNeutralCandidates, 8},
{kVertices, 14},
{kChargedCandidates4Vec, 4},
{kNeutralCandidates4Vec, 4},
{kVertices4Vec, 4}
};

const std::map<InputFeatures, unsigned int> N_AcceptedFeatures{
{kChargedCandidates, n_cpf_accept},
{kNeutralCandidates, n_npf_accept},
{kVertices, n_sv_accept},
{kChargedCandidates4Vec, n_cpf_accept},
{kNeutralCandidates4Vec, n_npf_accept},
{kVertices4Vec, n_sv_accept}
};

} // namespace parT

#endif
38 changes: 38 additions & 0 deletions RecoBTag/ONNXRuntime/interface/tensor_fillers.h
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,9 @@
#define RecoBTag_ONNXRuntime_tensor_fillers_h

#include "DataFormats/BTauReco/interface/DeepFlavourTagInfo.h"
#include "DataFormats/BTauReco/interface/ParticleTransformerAK4Features.h"
#include "PhysicsTools/ONNXRuntime/interface/ONNXRuntime.h"
#include "RecoBTag/ONNXRuntime/interface/tensor_configs.h"

namespace btagbtvdeep {

Expand All @@ -18,6 +21,41 @@ namespace btagbtvdeep {
void seedTrack_tensor_filler(float*& ptr, const btagbtvdeep::SeedingTrackFeatures& seed_features);

void neighbourTrack_tensor_filler(float*& ptr, const btagbtvdeep::TrackPairFeatures& neighbourTrack_features);

std::vector<float> inputs_parT(const btagbtvdeep::ChargedCandidateFeatures& c_pf_features, parT::InputFeatures ifeature);

std::vector<float> inputs_parT(const btagbtvdeep::NeutralCandidateFeatures& n_pf_features, parT::InputFeatures ifeature);

std::vector<float> inputs_parT(const btagbtvdeep::SecondaryVertexFeatures& sv_features, parT::InputFeatures ifeature);

template<class parT_features>
void parT_tensor_filler(cms::Ort::FloatArrays& data, const parT::InputFeatures ifeature, const std::vector<parT_features>& features, const unsigned int max_n, const float*& start, unsigned offset) {
float* ptr = nullptr;
for (std::size_t n = 0; n < max_n; n++) {
const auto& f = features.at(n);
ptr = &data[ifeature][offset + n * parT::N_InputFeatures.at(ifeature)];
start = ptr;
const std::vector<float>& inputs = inputs_parT(f, ifeature);
for (unsigned int i = 0; i < inputs.size(); i++) {
*ptr = inputs[i];
++ptr;
}
if (inputs.size() > 0) --ptr;
assert(start + parT::N_InputFeatures.at(ifeature) - 1 == ptr);
}
}

template<class parT_features>
void parT_tensor_filler(std::vector<float>& vdata, const parT::InputFeatures ifeature, const std::vector<parT_features>& features, const unsigned int target_n) {
unsigned int n = std::clamp((unsigned int)features.size(), (unsigned int)0, (unsigned int)parT::N_AcceptedFeatures.at(ifeature));
for (unsigned int count = 0; count < n; count++) {
const std::vector<float>& inputs = inputs_parT(features.at(count), ifeature);
vdata.insert(vdata.end(), inputs.begin(), inputs.end());
}
unsigned int n_features = parT::N_InputFeatures.at(ifeature);
if (n < target_n)
vdata.insert(vdata.end(), (target_n - n) * n_features, 0); // Add 0 to unfilled part as padding value
}

} // namespace btagbtvdeep

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -16,6 +16,9 @@

#include "PhysicsTools/ONNXRuntime/interface/ONNXRuntime.h"

#include "RecoBTag/ONNXRuntime/interface/tensor_fillers.h"
#include "RecoBTag/ONNXRuntime/interface/tensor_configs.h"

using namespace cms::Ort;

class ParticleTransformerAK4ONNXJetTagsProducer : public edm::stream::EDProducer<edm::GlobalCache<ONNXRuntime>> {
Expand All @@ -27,7 +30,7 @@ class ParticleTransformerAK4ONNXJetTagsProducer : public edm::stream::EDProducer

static std::unique_ptr<ONNXRuntime> initializeGlobalCache(const edm::ParameterSet&);
static void globalEndJob(const ONNXRuntime*);

private:
typedef std::vector<reco::ParticleTransformerAK4TagInfo> TagInfoCollection;
typedef reco::JetTagCollection JetTagCollection;
Expand All @@ -41,24 +44,9 @@ class ParticleTransformerAK4ONNXJetTagsProducer : public edm::stream::EDProducer
std::vector<std::string> flav_names_;
std::vector<std::string> input_names_;
std::vector<std::string> output_names_;

enum InputIndexes {
kChargedCandidates = 0,
kNeutralCandidates = 1,
kVertices = 2,
kChargedCandidates4Vec = 3,
kNeutralCandidates4Vec = 4,
kVertices4Vec = 5
};
unsigned n_cpf_;
constexpr static unsigned n_features_cpf_ = 16;
constexpr static unsigned n_pairwise_features_cpf_ = 4;
unsigned n_npf_;
constexpr static unsigned n_features_npf_ = 8;
constexpr static unsigned n_pairwise_features_npf_ = 4;
unsigned n_sv_;
constexpr static unsigned n_features_sv_ = 14;
constexpr static unsigned n_pairwise_features_sv_ = 4;
unsigned int n_cpf_;
unsigned int n_npf_;
unsigned int n_sv_;
std::vector<unsigned> input_sizes_;
std::vector<std::vector<int64_t>> input_shapes_; // shapes of each input group (-1 for dynamic axis)

Expand All @@ -84,7 +72,7 @@ void ParticleTransformerAK4ONNXJetTagsProducer::fillDescriptions(edm::Configurat
desc.add<edm::InputTag>("src", edm::InputTag("pfParticleTransformerAK4TagInfos"));
desc.add<std::vector<std::string>>("input_names", {"input_1", "input_2", "input_3", "input_4", "input_5", "input_6"});
desc.add<edm::FileInPath>("model_path",
edm::FileInPath("RecoBTag/Combined/data/RobustParTAK4/PUPPI/V00/RobustParTAK4.onnx"));
edm::FileInPath("RecoBTag/Combined/data/RobustParTAK4/PUPPI/V00/modelfile/model.onnx"));
desc.add<std::vector<std::string>>("output_names", {"softmax"});
desc.add<std::vector<std::string>>(
"flav_names", std::vector<std::string>{"probb", "probbb", "problepb", "probc", "probuds", "probg"});
Expand Down Expand Up @@ -124,12 +112,12 @@ void ParticleTransformerAK4ONNXJetTagsProducer::produce(edm::Event& iEvent, cons
get_input_sizes(taginfo);

// run prediction with dynamic batch size per event
input_shapes_ = {{(int64_t)1, (int64_t)n_cpf_, (int64_t)n_features_cpf_},
{(int64_t)1, (int64_t)n_npf_, (int64_t)n_features_npf_},
{(int64_t)1, (int64_t)n_sv_, (int64_t)n_features_sv_},
{(int64_t)1, (int64_t)n_cpf_, (int64_t)n_pairwise_features_cpf_},
{(int64_t)1, (int64_t)n_npf_, (int64_t)n_pairwise_features_npf_},
{(int64_t)1, (int64_t)n_sv_, (int64_t)n_pairwise_features_sv_}};
input_shapes_ = {{(int64_t)1, (int64_t)n_cpf_, (int64_t)parT::N_InputFeatures.at(parT::kChargedCandidates)},
{(int64_t)1, (int64_t)n_npf_, (int64_t)parT::N_InputFeatures.at(parT::kNeutralCandidates)},
{(int64_t)1, (int64_t)n_sv_, (int64_t)parT::N_InputFeatures.at(parT::kVertices)},
{(int64_t)1, (int64_t)n_cpf_, (int64_t)parT::N_InputFeatures.at(parT::kChargedCandidates4Vec)},
{(int64_t)1, (int64_t)n_npf_, (int64_t)parT::N_InputFeatures.at(parT::kNeutralCandidates4Vec)},
{(int64_t)1, (int64_t)n_sv_, (int64_t)parT::N_InputFeatures.at(parT::kVertices4Vec)}};

outputs = globalCache()->run(input_names_, data_, input_shapes_, output_names_, 1)[0];
assert(outputs.size() == flav_names_.size());
Expand All @@ -151,24 +139,17 @@ void ParticleTransformerAK4ONNXJetTagsProducer::get_input_sizes(
const reco::FeaturesTagInfo<btagbtvdeep::ParticleTransformerAK4Features> taginfo) {
const auto& features = taginfo.features();

unsigned int n_cpf = features.c_pf_features.size();
unsigned int n_npf = features.n_pf_features.size();
unsigned int n_vtx = features.sv_features.size();

n_cpf_ = std::max((unsigned int)1, n_cpf);
n_npf_ = std::max((unsigned int)1, n_npf);
n_sv_ = std::max((unsigned int)1, n_vtx);
n_cpf_ = std::clamp((unsigned int)features.c_pf_features.size(), (unsigned int)1, (unsigned int)parT::n_cpf_accept);
n_npf_ = std::clamp((unsigned int)features.n_pf_features.size(), (unsigned int)1, (unsigned int)parT::n_npf_accept);
n_sv_ = std::clamp((unsigned int)features.sv_features.size(), (unsigned int)1, (unsigned int)parT::n_sv_accept);

n_cpf_ = std::min((unsigned int)25, n_cpf_);
n_npf_ = std::min((unsigned int)25, n_npf_);
n_sv_ = std::min((unsigned int)5, n_sv_);
input_sizes_ = {
n_cpf_ * n_features_cpf_,
n_npf_ * n_features_npf_,
n_sv_ * n_features_sv_,
n_cpf_ * n_pairwise_features_cpf_,
n_npf_ * n_pairwise_features_npf_,
n_sv_ * n_pairwise_features_sv_,
n_cpf_ * parT::N_InputFeatures.at(parT::kChargedCandidates),
n_npf_ * parT::N_InputFeatures.at(parT::kNeutralCandidates),
n_sv_ * parT::N_InputFeatures.at(parT::kVertices),
n_cpf_ * parT::N_InputFeatures.at(parT::kChargedCandidates4Vec),
n_npf_ * parT::N_InputFeatures.at(parT::kNeutralCandidates4Vec),
n_sv_ * parT::N_InputFeatures.at(parT::kVertices4Vec),
};
// init data storage
data_.clear();
Expand All @@ -180,116 +161,26 @@ void ParticleTransformerAK4ONNXJetTagsProducer::get_input_sizes(
}

void ParticleTransformerAK4ONNXJetTagsProducer::make_inputs(btagbtvdeep::ParticleTransformerAK4Features features) {
float* ptr = nullptr;
//float* ptr = nullptr;

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

delete commented-out code

const float* start = nullptr;
unsigned offset = 0;

// c_pf candidates
auto max_c_pf_n = std::min(features.c_pf_features.size(), (std::size_t)n_cpf_);
for (std::size_t c_pf_n = 0; c_pf_n < max_c_pf_n; c_pf_n++) {
const auto& c_pf_features = features.c_pf_features.at(c_pf_n);
ptr = &data_[kChargedCandidates][offset + c_pf_n * n_features_cpf_];
start = ptr;
*ptr = c_pf_features.btagPf_trackEtaRel;
*(++ptr) = c_pf_features.btagPf_trackPtRel;
*(++ptr) = c_pf_features.btagPf_trackPPar;
*(++ptr) = c_pf_features.btagPf_trackDeltaR;
*(++ptr) = c_pf_features.btagPf_trackPParRatio;
*(++ptr) = c_pf_features.btagPf_trackSip2dVal;
*(++ptr) = c_pf_features.btagPf_trackSip2dSig;
*(++ptr) = c_pf_features.btagPf_trackSip3dVal;
*(++ptr) = c_pf_features.btagPf_trackSip3dSig;
*(++ptr) = c_pf_features.btagPf_trackJetDistVal;
*(++ptr) = c_pf_features.ptrel;
*(++ptr) = c_pf_features.drminsv;
*(++ptr) = c_pf_features.vtx_ass;
*(++ptr) = c_pf_features.puppiw;
*(++ptr) = c_pf_features.chi2;
*(++ptr) = c_pf_features.quality;
assert(start + n_features_cpf_ - 1 == ptr);
}

// n_pf candidates
auto max_n_pf_n = std::min(features.n_pf_features.size(), (std::size_t)n_npf_);
for (std::size_t n_pf_n = 0; n_pf_n < max_n_pf_n; n_pf_n++) {
const auto& n_pf_features = features.n_pf_features.at(n_pf_n);
ptr = &data_[kNeutralCandidates][offset + n_pf_n * n_features_npf_];
start = ptr;
*ptr = n_pf_features.ptrel;
*(++ptr) = n_pf_features.etarel;
*(++ptr) = n_pf_features.phirel;
*(++ptr) = n_pf_features.deltaR;
*(++ptr) = n_pf_features.isGamma;
*(++ptr) = n_pf_features.hadFrac;
*(++ptr) = n_pf_features.drminsv;
*(++ptr) = n_pf_features.puppiw;
assert(start + n_features_npf_ - 1 == ptr);
}

// sv candidates
auto max_sv_n = std::min(features.sv_features.size(), (std::size_t)n_sv_);
for (std::size_t sv_n = 0; sv_n < max_sv_n; sv_n++) {
const auto& sv_features = features.sv_features.at(sv_n);
ptr = &data_[kVertices][offset + sv_n * n_features_sv_];
start = ptr;
*ptr = sv_features.pt;
*(++ptr) = sv_features.deltaR;
*(++ptr) = sv_features.mass;
*(++ptr) = sv_features.etarel;
*(++ptr) = sv_features.phirel;
*(++ptr) = sv_features.ntracks;
*(++ptr) = sv_features.chi2;
*(++ptr) = sv_features.normchi2;
*(++ptr) = sv_features.dxy;
*(++ptr) = sv_features.dxysig;
*(++ptr) = sv_features.d3d;
*(++ptr) = sv_features.d3dsig;
*(++ptr) = sv_features.costhetasvpv;
*(++ptr) = sv_features.enratio;
assert(start + n_features_sv_ - 1 == ptr);
}

// c_pf candidates
parT_tensor_filler(data_, parT::kChargedCandidates, features.c_pf_features, max_c_pf_n, start, offset);
// n_pf candidates
parT_tensor_filler(data_, parT::kNeutralCandidates, features.n_pf_features, max_n_pf_n, start, offset);
// sv candidates
parT_tensor_filler(data_, parT::kVertices, features.sv_features, max_sv_n, start, offset);
// cpf pairwise features (4-vectors)
auto max_cpf_n = std::min(features.c_pf_features.size(), (std::size_t)n_cpf_);
for (std::size_t cpf_n = 0; cpf_n < max_cpf_n; cpf_n++) {
const auto& cpf_pairwise_features = features.c_pf_features.at(cpf_n);
ptr = &data_[kChargedCandidates4Vec][offset + cpf_n * n_pairwise_features_cpf_];
start = ptr;
*ptr = cpf_pairwise_features.px;
*(++ptr) = cpf_pairwise_features.py;
*(++ptr) = cpf_pairwise_features.pz;
*(++ptr) = cpf_pairwise_features.e;

assert(start + n_pairwise_features_cpf_ - 1 == ptr);
}

parT_tensor_filler(data_, parT::kChargedCandidates4Vec, features.c_pf_features, max_c_pf_n, start, offset);
// npf pairwise features (4-vectors)
auto max_npf_n = std::min(features.n_pf_features.size(), (std::size_t)n_npf_);
for (std::size_t npf_n = 0; npf_n < max_npf_n; npf_n++) {
const auto& npf_pairwise_features = features.n_pf_features.at(npf_n);
ptr = &data_[kNeutralCandidates4Vec][offset + npf_n * n_pairwise_features_npf_];
start = ptr;
*ptr = npf_pairwise_features.px;
*(++ptr) = npf_pairwise_features.py;
*(++ptr) = npf_pairwise_features.pz;
*(++ptr) = npf_pairwise_features.e;

assert(start + n_pairwise_features_npf_ - 1 == ptr);
}

parT_tensor_filler(data_, parT::kNeutralCandidates4Vec, features.n_pf_features, max_n_pf_n, start, offset);
// sv pairwise features (4-vectors)
auto max_sv_N = std::min(features.sv_features.size(), (std::size_t)n_sv_);
for (std::size_t sv_N = 0; sv_N < max_sv_N; sv_N++) {
const auto& sv_pairwise_features = features.sv_features.at(sv_N);
ptr = &data_[kVertices4Vec][offset + sv_N * n_pairwise_features_sv_];
start = ptr;
*ptr = sv_pairwise_features.px;
*(++ptr) = sv_pairwise_features.py;
*(++ptr) = sv_pairwise_features.pz;
*(++ptr) = sv_pairwise_features.e;

assert(start + n_pairwise_features_sv_ - 1 == ptr);
}
parT_tensor_filler(data_, parT::kVertices4Vec, features.sv_features, max_sv_n, start, offset);
}

//define this as a plug-in
Expand Down
Loading