Skip to content

Commit

Permalink
Merge pull request #1 from itikhono/itikhono/pdpd/support_mul_outputs
Browse files Browse the repository at this point in the history
Add support for multiple outputs
  • Loading branch information
nosovmik authored Apr 14, 2021
2 parents 08c728c + 47cae2a commit 9174c87
Show file tree
Hide file tree
Showing 8 changed files with 139 additions and 16 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -26,7 +26,7 @@ namespace frontend {

class NGRAPH_API FrontEndPDPD : public FrontEnd
{
std::shared_ptr<Function> convert_model(std::shared_ptr<InputModelPDPD> model) const;
std::shared_ptr<Function> convert_model(const std::shared_ptr<InputModelPDPD>& model) const;
std::shared_ptr<opset6::Constant> read_tensor(std::shared_ptr<VarPlacePDPD> place,
std::shared_ptr<InputModelPDPD> model) const;
public:
Expand Down
24 changes: 11 additions & 13 deletions ngraph/frontend/paddlepaddle/src/frontend.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -45,16 +45,16 @@ namespace frontend {
namespace pdpd {

std::shared_ptr<ngraph::Node>
make_ng_node(std::map<std::string, std::shared_ptr<ngraph::Node>> &nodes,
std::shared_ptr<OpPlacePDPD> place,
make_ng_node(std::map<std::string, Output<Node>> &nodes,
const std::shared_ptr<OpPlacePDPD>& place,
const std::map<std::string, CreatorFunction>& CREATORS_MAP) {
auto op = (paddle::framework::proto::OpDesc*)place->op;
std::cout << "Making node: " << op->type() << std::endl;

MY_ASSERT(CREATORS_MAP.find(op->type()) != CREATORS_MAP.end(), "No creator found");
std::map<std::string, std::vector<std::shared_ptr<ngraph::Node>>> inputs_preproc;
std::map<std::string, OutputVector> inputs_preproc;
for (const auto &item : place->inputs) {
inputs_preproc[item.first] = std::vector<std::shared_ptr<ngraph::Node>>();
inputs_preproc[item.first] = OutputVector();
for (auto& var_place : item.second) {
// TODO: refactor to not search every time
auto var = (paddle::framework::proto::VarDesc*)var_place.lock()->var;
Expand All @@ -68,12 +68,11 @@ make_ng_node(std::map<std::string, std::shared_ptr<ngraph::Node>> &nodes,
NamedInputs named_inputs;
for(const auto& input: inputs_preproc)
{
for(auto node: input.second)
for(const auto& node: input.second)
named_inputs[input.first].push_back(node);
}

OutputVector outputs = CREATORS_MAP.at(op->type())(NodeContext(*op, named_inputs));
MY_ASSERT(outputs.size() == 1);
return outputs[0].get_node_shared_ptr();
}

Expand Down Expand Up @@ -103,11 +102,11 @@ std::shared_ptr<opset6::Constant> FrontEndPDPD::read_tensor(std::shared_ptr<VarP
}

std::shared_ptr<Function>
FrontEndPDPD::convert_model(std::shared_ptr<InputModelPDPD> model) const
FrontEndPDPD::convert_model(const std::shared_ptr<InputModelPDPD>& model) const
{
std::cout << "Convert Model Start" << std::endl;

std::map<std::string, std::shared_ptr<Node>> nodes_dict;
std::map<std::string, Output<Node>> nodes_dict;
ParameterVector parameter_nodes;
ResultVector result_nodes;

Expand Down Expand Up @@ -165,15 +164,14 @@ std::shared_ptr<Function>
auto node = pdpd::make_ng_node(nodes_dict, op_place, CREATORS_MAP);
// set layer name by the name of first output var
auto& first_output_var_place = op_place->outputs.begin()->second[0];
auto var = (paddle::framework::proto::VarDesc*)first_output_var_place.lock()->var;
const auto& var = static_cast<const paddle::framework::proto::VarDesc*>(first_output_var_place.lock()->var);
node->set_friendly_name(var->name());

std::cerr << "Named with " << node->get_friendly_name() << "\n";
for (const auto &item : op_place->outputs) {
MY_ASSERT(item.second.size() <= 1);
if (item.second.size() == 1) {
auto var = (paddle::framework::proto::VarDesc*)item.second[0].lock()->var;
nodes_dict[var->name()] = node;
for (size_t idx = 0; idx < item.second.size(); ++idx) {
auto var = (paddle::framework::proto::VarDesc *)item.second[idx].lock()->var;
nodes_dict[var->name()] = node->output(idx);
}
}
}
Expand Down
40 changes: 40 additions & 0 deletions ngraph/frontend/paddlepaddle/src/op/split.cpp
Original file line number Diff line number Diff line change
@@ -0,0 +1,40 @@
//*****************************************************************************
// Copyright 2017-2021 Intel Corporation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//*****************************************************************************

#include <ngraph/opsets/opset6.hpp>
#include "split.h"
#include "utility.hpp"

namespace ngraph {
namespace frontend {
namespace pdpd {
namespace op {
OutputVector split(const NodeContext& node) {
using namespace ngraph;
using namespace opset6;
const auto& data = node.get_ng_input("X");
auto dim = node.get_attribute<int32_t>("axis");
// todo: 'num' can be list of values, in this case we should create VariadicSplit
// todo: support VariadicSplit
auto num_or_sections = node.get_attribute<int32_t>("num");
auto axis = std::make_shared<Constant>(ngraph::element::i32, Shape{}, dim);

return std::make_shared<ngraph::opset6::Split>(data, axis, num_or_sections)->outputs();
}
} // namespace op
} // namespace pdpd
} // namespace frontend
} // namespace ngraph
30 changes: 30 additions & 0 deletions ngraph/frontend/paddlepaddle/src/op/split.h
Original file line number Diff line number Diff line change
@@ -0,0 +1,30 @@
//*****************************************************************************
// Copyright 2017-2021 Intel Corporation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//*****************************************************************************

#pragma once
#include "node_context.hpp"

namespace ngraph {
namespace frontend {
namespace pdpd {
namespace op {

OutputVector split(const NodeContext& node);

} // namespace op
} // namespace pdpd
} // namespace frontend
} // namespace ngraph
4 changes: 3 additions & 1 deletion ngraph/frontend/paddlepaddle/src/op_table.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -30,6 +30,7 @@
#include "op/concat.hpp"
#include "op/cast.hpp"
#include "op/softmax.hpp"
#include "op/split.h"

#include "op_table.hpp"

Expand Down Expand Up @@ -59,7 +60,8 @@ std::map<std::string, CreatorFunction> get_supported_ops() {
{"nearest_interp_v2", op::nearest_interp_v2},
{"concat", op::concat},
{"cast", op::cast},
{"softmax", op::softmax}
{"softmax", op::softmax},
{"split", op::split}
};
};

Expand Down
Original file line number Diff line number Diff line change
@@ -0,0 +1,52 @@
import paddle
from paddle import fluid
import numpy as np

# it's better to use PYTHON_PATH
# import sys
# sys.path.append('/home/itikhonov/OpenVINO/openvino/bin/intel64/Debug/lib/python_api/python3.6/')
# from openvino.inference_engine import IECore


def create_multi_output_model():
paddle.enable_static()

# PDPD model creation and inference
num_splits_1 = 10
inp_blob_1 = np.random.randn(2, num_splits_1, 4, 4).astype(np.float32)

x = fluid.data(name='x', shape=[2, num_splits_1, 4, 4], dtype='float32')
test_layer = fluid.layers.split(x, num_or_sections=10, dim=1)

var = []
for i in range(num_splits_1//2):
add = fluid.layers.elementwise_add(test_layer[2*i], test_layer[2*i+1])
var.append(add)

exe = fluid.Executor(fluid.CPUPlace())
exe.run(fluid.default_startup_program())
inp_dict = {'x': inp_blob_1}
res_pdpd = exe.run(fluid.default_main_program(), fetch_list=var, feed=inp_dict)

fluid.io.save_inference_model("../models/multi_output_split",
list(inp_dict.keys()), var, exe,
model_filename="multi_output_split.pdmodel",
params_filename="multi_output_split.pdiparams")

# IE inference
# ie = IECore()
# path_to_ie_model = "../models/multi_output_split/multi_output_split"
# net = ie.read_network(model=path_to_ie_model + ".xml", weights=path_to_ie_model + ".bin")
# exec_net = ie.load_network(net, "CPU")
# res = exec_net.infer({'x': inp_blob_1})
#
# # compare results: IE vs PDPD
# idx = 0
# for key in res:
# comp = np.all(np.isclose(res_pdpd[idx], res[key], rtol=1e-05, atol=1e-08, equal_nan=False))
# assert comp, "PDPD and IE results are different"
# idx = idx + 1


create_multi_output_model()

Binary file not shown.
3 changes: 2 additions & 1 deletion ngraph/test/frontend/paddlepaddle/basic_api.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -17,11 +17,12 @@ static const std::vector<std::string> models {
std::string("conv2d_s/conv2d.pdmodel"),
std::string("conv2d_relu/conv2d_relu.pdmodel"),
std::string("2in_2out/2in_2out.pdmodel"),
std::string("multi_output_split/multi_output_split.pdmodel"),
};

INSTANTIATE_TEST_CASE_P(PDPDBasicTest, FrontEndBasicTest,
::testing::Combine(
::testing::Values(PDPD),
::testing::Values(PATH_TO_MODELS),
::testing::ValuesIn(models)),
FrontEndBasicTest::getTestCaseName);
FrontEndBasicTest::getTestCaseName);

0 comments on commit 9174c87

Please sign in to comment.