Skip to content

Commit eb7c9c4

Browse files
authored
[TMVA][SOFIE] Add Conv operator (#8800)
* Add Conv operator * Add tests for Conv * Small fixes * Add missing default constructor * Fix typos
1 parent 9c65dcc commit eb7c9c4

20 files changed

+790
-15
lines changed
Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1,3 +1,4 @@
11
#include "TMVA/ROperator_Transpose.hxx"
22
#include "TMVA/ROperator_Gemm.hxx"
33
#include "TMVA/ROperator_Relu.hxx"
4+
#include "TMVA/ROperator_Conv.hxx"

tmva/sofie/inc/TMVA/RModel.hxx

Lines changed: 6 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -34,9 +34,8 @@ private:
3434
std::string fFileName; //file name of original model file for identification
3535
std::string fParseTime; //UTC date and time string at parsing
3636

37-
3837
std::string fGC; //generated code
39-
bool fNeedGemm = true;
38+
std::set<std::string> fNeededBlasRoutines = {};
4039

4140
const std::vector<std::string> fAllowedStdLib = {"algorithm"};
4241
std::set<std::string> fNeededStdLib = {"vector"};
@@ -66,6 +65,11 @@ public:
6665
void AddOperator(std::unique_ptr<ROperator> op, int order_execution = -1);
6766
void AddInitializedTensor(std::string tensor_name, ETensorType type, std::vector<std::size_t> shape, std::shared_ptr<void> data);
6867
void AddIntermediateTensor(std::string tensor_name, ETensorType type, std::vector<std::size_t> shape);
68+
void AddBlasRoutines(std::vector<std::string> routines) {
69+
for (auto &routine : routines) {
70+
fNeededBlasRoutines.insert(routine);
71+
}
72+
}
6973
void AddNeededStdLib(std::string libname){
7074
for (auto& i: fAllowedStdLib){
7175
if ( i == libname) fNeededStdLib.insert(libname);

tmva/sofie/inc/TMVA/ROperator_Conv.hxx

Lines changed: 374 additions & 0 deletions
Large diffs are not rendered by default.

tmva/sofie/src/RModel.cxx

Lines changed: 22 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -12,27 +12,29 @@ namespace SOFIE{
1212
RModel::RModel(RModel&& other){
1313
fInputTensorInfos = std::move(other.fInputTensorInfos);
1414
fReadyInputTensorInfos = std::move(other.fReadyInputTensorInfos);
15+
fOutputTensorNames = other.fOutputTensorNames;
1516
fOperators = std::move(other.fOperators);
1617
fInitializedTensors = std::move(other.fInitializedTensors);
1718
fName = other.fName;
1819
fFileName = other.fFileName;
1920
fParseTime = other.fParseTime;
2021
fGC = other.fGC;
22+
fNeededBlasRoutines = other.fNeededBlasRoutines;
2123
fNeededStdLib = other.fNeededStdLib;
22-
fOutputTensorNames = other.fOutputTensorNames;
2324
}
2425

2526
RModel& RModel::operator=(RModel&& other){
2627
fInputTensorInfos = std::move(other.fInputTensorInfos);
2728
fReadyInputTensorInfos = std::move(other.fReadyInputTensorInfos);
29+
fOutputTensorNames = other.fOutputTensorNames;
2830
fOperators = std::move(other.fOperators);
2931
fInitializedTensors = std::move(other.fInitializedTensors);
3032
fName = other.fName;
3133
fFileName = other.fFileName;
3234
fParseTime = other.fParseTime;
3335
fGC = other.fGC;
36+
fNeededBlasRoutines = other.fNeededBlasRoutines;
3437
fNeededStdLib = other.fNeededStdLib;
35-
fOutputTensorNames = other.fOutputTensorNames;
3638
return *this;
3739
}
3840

@@ -169,17 +171,27 @@ namespace SOFIE{
169171
void RModel::Generate(){
170172
Initialize();
171173
fGC += ("//Code generated automatically by TMVA for Inference of Model file [" + fFileName + "] at [" + fParseTime.substr(0, fParseTime.length()-1) +"] \n");
172-
for (auto& i: fNeededStdLib){
174+
for (auto& i: fNeededStdLib) {
173175
fGC += "#include<" + i + ">\n";
174176
}
175177
fGC += ("namespace TMVA_SOFIE_" + fName + "{\n");
176-
if (fNeedGemm){
177-
fGC += ("namespace BLAS{\n"
178-
"\textern \"C\" void sgemm_(const char * transa, const char * transb, const int * m, const int * n, const int * k,\n"
179-
"\t const float * alpha, const float * A, const int * lda, const float * B, const int * ldb,\n"
180-
"\t const float * beta, float * C, const int * ldc);\n"
181-
"}//BLAS\n");
182-
178+
if (!fNeededBlasRoutines.empty()) {
179+
fGC += ("namespace BLAS{\n");
180+
for (auto &routine : fNeededBlasRoutines) {
181+
if (routine == "Gemm") {
182+
fGC += ("\textern \"C\" void sgemm_(const char * transa, const char * transb, const int * m, const int * n, const int * k,\n"
183+
"\t const float * alpha, const float * A, const int * lda, const float * B, const int * ldb,\n"
184+
"\t const float * beta, float * C, const int * ldc);\n");
185+
} else if (routine == "Gemv") {
186+
fGC += ("\textern \"C\" void sgemv_(const char * trans, const int * m, const int * n, const float * alpha, const float * A,\n"
187+
"\t const int * lda, const float * X, const int * incx, const float * beta, const float * Y, const int * incy);\n");
188+
} else if (routine == "Axpy") {
189+
fGC += ("\textern \"C\" void saxpy_(const int * n, const float * alpha, const float * x,\n"
190+
"\t const int * incx, float * y, const int * incy);\n");
191+
}
192+
}
193+
fGC += ("}//BLAS\n");
194+
}
183195
for (auto& i: fInitializedTensors){
184196
if (i.second.fType == ETensorType::FLOAT){
185197
size_t length = 1;
@@ -242,7 +254,6 @@ namespace SOFIE{
242254
fGC += "\treturn ret;\n";
243255
}
244256
fGC += "}\n";
245-
}
246257
fGC += ("} //TMVA_SOFIE_" + fName + "\n");
247258
}
248259

tmva/sofie/test/TestCustomModelsFromONNX.cxx

Lines changed: 146 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1,3 +1,5 @@
1+
#include <numeric>
2+
13
#include "Linear_16_FromONNX.hxx"
24
#include "input_models/references/Linear_16.ref.hxx"
35

@@ -7,6 +9,24 @@
79
#include "Linear_64_FromONNX.hxx"
810
#include "input_models/references/Linear_64.ref.hxx"
911

12+
#include "ConvWithPadding_FromONNX.hxx"
13+
#include "input_models/references/ConvWithPadding.ref.hxx"
14+
15+
#include "ConvWithoutPadding_FromONNX.hxx"
16+
#include "input_models/references/ConvWithoutPadding.ref.hxx"
17+
18+
#include "ConvWithAutopadSameLower_FromONNX.hxx"
19+
#include "input_models/references/ConvWithAutopadSameLower.ref.hxx"
20+
21+
#include "ConvWithStridesPadding_FromONNX.hxx"
22+
#include "input_models/references/ConvWithStridesPadding.ref.hxx"
23+
24+
#include "ConvWithStridesNoPadding_FromONNX.hxx"
25+
#include "input_models/references/ConvWithStridesNoPadding.ref.hxx"
26+
27+
#include "ConvWithAsymmetricPadding_FromONNX.hxx"
28+
#include "input_models/references/ConvWithAsymmetricPadding.ref.hxx"
29+
1030
#include "gtest/gtest.h"
1131

1232
constexpr float DEFAULT_TOLERANCE = 1e-6f;
@@ -72,3 +92,129 @@ TEST(ONNX, Linear64)
7292
EXPECT_LE(std::abs(output[i] - correct[i]), TOLERANCE);
7393
}
7494
}
95+
96+
97+
TEST(ONNX, ConvWithPadding)
98+
{
99+
constexpr float TOLERANCE = DEFAULT_TOLERANCE;
100+
101+
// Preparing the standard all-ones input
102+
std::vector<float> input(25);
103+
std::iota(input.begin(), input.end(), 0.0f);
104+
std::vector<float> output = TMVA_SOFIE_ConvWithPadding::infer(input.data());
105+
106+
// Checking output size
107+
EXPECT_EQ(output.size(), sizeof(ConvWithPadding_ExpectedOutput::all_ones) / sizeof(float));
108+
109+
float *correct = ConvWithPadding_ExpectedOutput::all_ones;
110+
111+
// Checking every output value, one by one
112+
for (size_t i = 0; i < output.size(); ++i) {
113+
EXPECT_LE(std::abs(output[i] - correct[i]), TOLERANCE);
114+
}
115+
}
116+
117+
118+
TEST(ONNX, ConvWithoutPadding)
119+
{
120+
constexpr float TOLERANCE = DEFAULT_TOLERANCE;
121+
122+
// Preparing the standard all-ones input
123+
std::vector<float> input(25);
124+
std::iota(input.begin(), input.end(), 0.0f);
125+
std::vector<float> output = TMVA_SOFIE_ConvWithoutPadding::infer(input.data());
126+
127+
// Checking output size
128+
EXPECT_EQ(output.size(), sizeof(ConvWithoutPadding_ExpectedOutput::all_ones) / sizeof(float));
129+
130+
float *correct = ConvWithoutPadding_ExpectedOutput::all_ones;
131+
132+
// Checking every output value, one by one
133+
for (size_t i = 0; i < output.size(); ++i) {
134+
EXPECT_LE(std::abs(output[i] - correct[i]), TOLERANCE);
135+
}
136+
}
137+
138+
139+
TEST(ONNX, ConvWithAutopadSameLower)
140+
{
141+
constexpr float TOLERANCE = DEFAULT_TOLERANCE;
142+
143+
// Preparing the standard all-ones input
144+
std::vector<float> input(25);
145+
std::iota(input.begin(), input.end(), 0.0f);
146+
std::vector<float> output = TMVA_SOFIE_ConvWithAutopadSameLower::infer(input.data());
147+
148+
// Checking output size
149+
EXPECT_EQ(output.size(), sizeof(ConvWithAutopadSameLower_ExpectedOutput::all_ones) / sizeof(float));
150+
151+
float *correct = ConvWithAutopadSameLower_ExpectedOutput::all_ones;
152+
153+
// Checking every output value, one by one
154+
for (size_t i = 0; i < output.size(); ++i) {
155+
EXPECT_LE(std::abs(output[i] - correct[i]), TOLERANCE);
156+
}
157+
}
158+
159+
160+
TEST(ONNX, ConvWithStridesPadding)
161+
{
162+
constexpr float TOLERANCE = DEFAULT_TOLERANCE;
163+
164+
// Preparing the standard all-ones input
165+
std::vector<float> input(35);
166+
std::iota(input.begin(), input.end(), 0.0f);
167+
std::vector<float> output = TMVA_SOFIE_ConvWithStridesPadding::infer(input.data());
168+
169+
// Checking output size
170+
EXPECT_EQ(output.size(), sizeof(ConvWithStridesPadding_ExpectedOutput::all_ones) / sizeof(float));
171+
172+
float *correct = ConvWithStridesPadding_ExpectedOutput::all_ones;
173+
174+
// Checking every output value, one by one
175+
for (size_t i = 0; i < output.size(); ++i) {
176+
EXPECT_LE(std::abs(output[i] - correct[i]), TOLERANCE);
177+
}
178+
}
179+
180+
181+
TEST(ONNX, ConvWithStridesNoPadding)
182+
{
183+
constexpr float TOLERANCE = DEFAULT_TOLERANCE;
184+
185+
// Preparing the standard all-ones input
186+
std::vector<float> input(35);
187+
std::iota(input.begin(), input.end(), 0.0f);
188+
std::vector<float> output = TMVA_SOFIE_ConvWithStridesNoPadding::infer(input.data());
189+
190+
// Checking output size
191+
EXPECT_EQ(output.size(), sizeof(ConvWithStridesNoPadding_ExpectedOutput::all_ones) / sizeof(float));
192+
193+
float *correct = ConvWithStridesNoPadding_ExpectedOutput::all_ones;
194+
195+
// Checking every output value, one by one
196+
for (size_t i = 0; i < output.size(); ++i) {
197+
EXPECT_LE(std::abs(output[i] - correct[i]), TOLERANCE);
198+
}
199+
}
200+
201+
202+
TEST(ONNX, ConvWithAsymmetricPadding)
203+
{
204+
constexpr float TOLERANCE = DEFAULT_TOLERANCE;
205+
206+
// Preparing the standard all-ones input
207+
std::vector<float> input(35);
208+
std::iota(input.begin(), input.end(), 0.0f);
209+
std::vector<float> output = TMVA_SOFIE_ConvWithAsymmetricPadding::infer(input.data());
210+
211+
// Checking output size
212+
EXPECT_EQ(output.size(), sizeof(ConvWithAsymmetricPadding_ExpectedOutput::all_ones) / sizeof(float));
213+
214+
float *correct = ConvWithAsymmetricPadding_ExpectedOutput::all_ones;
215+
216+
// Checking every output value, one by one
217+
for (size_t i = 0; i < output.size(); ++i) {
218+
EXPECT_LE(std::abs(output[i] - correct[i]), TOLERANCE);
219+
}
220+
}

0 commit comments

Comments
 (0)