@@ -128,23 +128,35 @@ class NgraphCustomOp: public ngraph::op::Op {
128
128
return true ;
129
129
}
130
130
131
- private:
132
131
std::map<std::string, InferenceEngine::Parameter> params;
133
132
};
134
133
135
134
136
135
class InfEngineNgraphCustomLayer : public InferenceEngine ::ILayerExecImpl
137
136
{
138
137
public:
138
+ #if INF_ENGINE_VER_MAJOR_GE(INF_ENGINE_RELEASE_2020_2)
139
+ explicit InfEngineNgraphCustomLayer (const std::shared_ptr<ngraph::Node>& _node)
140
+ {
141
+ node = std::dynamic_pointer_cast<NgraphCustomOp>(_node);
142
+ CV_Assert (node);
143
+ std::string implStr = node->params [" impl" ];
144
+ std::istringstream iss (implStr);
145
+ #else
139
146
explicit InfEngineNgraphCustomLayer (const InferenceEngine::CNNLayer& layer) : cnnLayer (layer)
140
147
{
141
148
std::istringstream iss (layer.GetParamAsString (" impl" ));
149
+ #endif
142
150
size_t ptr;
143
151
iss >> ptr;
144
152
cvLayer = (Layer*)ptr;
145
153
146
154
std::vector<std::vector<size_t > > shapes;
155
+ #if INF_ENGINE_VER_MAJOR_GE(INF_ENGINE_RELEASE_2020_2)
156
+ strToShapes (node->params [" internals" ], shapes);
157
+ #else
147
158
strToShapes (layer.GetParamAsString (" internals" ), shapes);
159
+ #endif
148
160
internals.resize (shapes.size ());
149
161
for (int i = 0 ; i < shapes.size (); ++i)
150
162
internals[i].create (std::vector<int >(shapes[i].begin (), shapes[i].end ()), CV_32F);
@@ -180,6 +192,29 @@ class InfEngineNgraphCustomLayer : public InferenceEngine::ILayerExecImpl
180
192
{
181
193
std::vector<InferenceEngine::DataConfig> inDataConfig;
182
194
std::vector<InferenceEngine::DataConfig> outDataConfig;
195
+ #if INF_ENGINE_VER_MAJOR_GE(INF_ENGINE_RELEASE_2020_2)
196
+ InferenceEngine::SizeVector order;
197
+ size_t offset = std::numeric_limits<size_t >::max ();
198
+ for (int i = 0 ; i < node->get_input_size (); ++i)
199
+ {
200
+ InferenceEngine::DataConfig conf;
201
+ auto shape = node->input_value (i).get_shape ();
202
+ order.resize (shape.size ());
203
+ std::iota (order.begin (), order.end (), 0 );
204
+ conf.desc = InferenceEngine::TensorDesc (InferenceEngine::Precision::FP32, shape, {shape, order, offset});
205
+ inDataConfig.push_back (conf);
206
+ }
207
+
208
+ for (int i = 0 ; i < node->get_output_size (); ++i)
209
+ {
210
+ InferenceEngine::DataConfig conf;
211
+ auto shape = node->output (i).get_shape ();
212
+ order.resize (shape.size ());
213
+ std::iota (order.begin (), order.end (), 0 );
214
+ conf.desc = InferenceEngine::TensorDesc (InferenceEngine::Precision::FP32, shape, {shape, order, offset});
215
+ outDataConfig.push_back (conf);
216
+ }
217
+ #else
183
218
for (auto & it : cnnLayer.insData )
184
219
{
185
220
InferenceEngine::DataConfig conf;
@@ -193,6 +228,7 @@ class InfEngineNgraphCustomLayer : public InferenceEngine::ILayerExecImpl
193
228
conf.desc = it->getTensorDesc ();
194
229
outDataConfig.push_back (conf);
195
230
}
231
+ #endif
196
232
197
233
InferenceEngine::LayerConfig layerConfig;
198
234
layerConfig.inConfs = inDataConfig;
@@ -209,12 +245,16 @@ class InfEngineNgraphCustomLayer : public InferenceEngine::ILayerExecImpl
209
245
}
210
246
211
247
private:
248
+ #if INF_ENGINE_VER_MAJOR_GE(INF_ENGINE_RELEASE_2020_2)
249
+ std::shared_ptr<NgraphCustomOp> node;
250
+ #else
212
251
InferenceEngine::CNNLayer cnnLayer;
252
+ #endif
213
253
dnn::Layer* cvLayer;
214
254
std::vector<Mat> internals;
215
255
};
216
256
217
-
257
+ # if INF_ENGINE_VER_MAJOR_LT(INF_ENGINE_RELEASE_2020_2)
218
258
class InfEngineNgraphCustomLayerFactory : public InferenceEngine ::ILayerImplFactory {
219
259
public:
220
260
explicit InfEngineNgraphCustomLayerFactory (const InferenceEngine::CNNLayer* layer) : cnnLayer(*layer)
@@ -233,17 +273,29 @@ class InfEngineNgraphCustomLayerFactory : public InferenceEngine::ILayerImplFact
233
273
private:
234
274
InferenceEngine::CNNLayer cnnLayer;
235
275
};
276
+ #endif
236
277
237
278
238
279
class InfEngineNgraphExtension : public InferenceEngine ::IExtension
239
280
{
240
281
public:
241
- #if INF_ENGINE_VER_MAJOR_LT(INF_ENGINE_RELEASE_2020_2)
282
+ void Unload () noexcept override {}
283
+ void Release () noexcept override { delete this ; }
284
+ void GetVersion (const InferenceEngine::Version*&) const noexcept override {}
285
+
286
+ #if INF_ENGINE_VER_MAJOR_GE(INF_ENGINE_RELEASE_2020_2)
287
+ std::vector<std::string> getImplTypes (const std::shared_ptr<ngraph::Node>& node) override {
288
+ return {" CPU" };
289
+ }
290
+
291
+ InferenceEngine::ILayerImpl::Ptr getImplementation (const std::shared_ptr<ngraph::Node>& node, const std::string& implType) override {
292
+ if (std::dynamic_pointer_cast<NgraphCustomOp>(node) && implType == " CPU" ) {
293
+ return std::make_shared<InfEngineNgraphCustomLayer>(node);
294
+ }
295
+ return nullptr ;
296
+ }
297
+ #else
242
298
virtual void SetLogCallback (InferenceEngine::IErrorListener&) noexcept {}
243
- #endif
244
- virtual void Unload () noexcept {}
245
- virtual void Release () noexcept {}
246
- virtual void GetVersion (const InferenceEngine::Version*&) const noexcept {}
247
299
248
300
virtual InferenceEngine::StatusCode getPrimitiveTypes (char **&, unsigned int &,
249
301
InferenceEngine::ResponseDesc*) noexcept
@@ -260,6 +312,7 @@ class InfEngineNgraphExtension : public InferenceEngine::IExtension
260
312
factory = new InfEngineNgraphCustomLayerFactory (cnnLayer);
261
313
return InferenceEngine::StatusCode::OK;
262
314
}
315
+ #endif
263
316
};
264
317
265
318
0 commit comments