Skip to content
Merged
Show file tree
Hide file tree
Changes from 12 commits
Commits
Show all changes
21 commits
Select commit Hold shift + click to select a range
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
31 changes: 31 additions & 0 deletions src/frontends/paddle/src/op/one_hot_v2.cpp
Original file line number Diff line number Diff line change
@@ -0,0 +1,31 @@
// Copyright (C) 2018-2023 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//

#include "default_opset.hpp"
#include "openvino/frontend/paddle/node_context.hpp"

namespace ov {
namespace frontend {
namespace paddle {
namespace op {
NamedOutputs one_hot_v2(const NodeContext& node) {
auto data = node.get_input("X");
Output<Node> depth;
if (node.has_input("depth_tensor")) {
auto depth_value = node.get_input("depth_tensor");
depth = std::make_shared<default_opset::Squeeze>(depth_value);
} else {
auto depth_value = node.get_attribute<int>("depth");
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Hi can we leave it as const, and also set a default value for the attributes

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

1.const had been added
2.input without num_class is not permitted in paddle, so I think it is presumably not needful to set a default value. If it is necessary, the default number of classes could be set as one greater than the largest class value in the input tensor, according to the pytorch implement.

depth = default_opset::Constant::create(element::i32, Shape{}, {depth_value});
}
auto on_value = default_opset::Constant::create(element::f32, Shape{}, {1});
auto off_value = default_opset::Constant::create(element::f32, Shape{}, {0});
const auto indices_axis = 1;
auto result = std::make_shared<default_opset::OneHot>(data, depth, on_value, off_value, indices_axis);
return node.default_single_output_mapping({result}, {"Out"});
}
} // namespace op
} // namespace paddle
} // namespace frontend
} // namespace ov
2 changes: 2 additions & 0 deletions src/frontends/paddle/src/op_table.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -69,6 +69,7 @@ OP_CONVERTER(matrix_nms);
OP_CONVERTER(meshgrid);
OP_CONVERTER(multiclass_nms);
OP_CONVERTER(nearest_interp_v2);
OP_CONVERTER(one_hot_v2);
OP_CONVERTER(p_norm);
OP_CONVERTER(pad3d);
OP_CONVERTER(pow);
Expand Down Expand Up @@ -185,6 +186,7 @@ std::map<std::string, CreatorFunction> get_supported_ops() {
{"nearest_interp_v2", op::nearest_interp_v2},
{"nearest_interp", op::nearest_interp_v2},
{"not_equal", op::elementwise_not_equal},
{"one_hot_v2", op::one_hot_v2},
{"p_norm", op::p_norm},
{"pad3d", op::pad3d},
{"pow", op::pow},
Expand Down
2 changes: 2 additions & 0 deletions src/frontends/paddle/tests/op_fuzzy.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -362,6 +362,8 @@ static const std::vector<std::string> models{
std::string("not_equal_float32"),
std::string("not_equal_int32"),
std::string("not_equal_int64"),
std::string("one_hot_v2_1"),
std::string("one_hot_v2_2"),
std::string("p_norm1"),
std::string("p_norm2"),
std::string("p_norm3"),
Expand Down
Original file line number Diff line number Diff line change
@@ -0,0 +1,52 @@
# Copyright (C) 2018-2023 Intel Corporation
# SPDX-License-Identifier: Apache-2.0

#
# one_hot_v2 paddle model generator
#
import paddle
import numpy as np
from save_model import saveModel
import sys


def one_hot_v2_1(name: str, x, num_classes):
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Since there are some duplicated code, can we combine these 2 functions together? You can refer to top_k_v2

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Done

paddle.enable_static()

with paddle.static.program_guard(paddle.static.Program(), paddle.static.Program()):
x_node = paddle.static.data(name="x", shape=x.shape, dtype=x.dtype)
out = paddle.nn.functional.one_hot(x_node, num_classes=num_classes)
place = paddle.CPUPlace()
exe = paddle.static.Executor(place)
outs = exe.run(feed={"x": x}, fetch_list=[out])
saveModel(name, exe, feedkeys=['x'], fetchlist=[out], inputs=[x], outputs=[outs[0]], target_dir=sys.argv[1])

return outs[0]

def one_hot_v2_2(name: str, x, num_classes):
paddle.enable_static()

with paddle.static.program_guard(paddle.static.Program(), paddle.static.Program()):
x_node = paddle.static.data(name="x", shape=x.shape, dtype=x.dtype)
depth_node = paddle.static.data(name="depth_tensor", shape=num_classes.shape, dtype=num_classes.dtype)
out = paddle.nn.functional.one_hot(x_node, num_classes=depth_node)
place = paddle.CPUPlace()
exe = paddle.static.Executor(place)
outs = exe.run(feed={"x": x, "depth_tensor": num_classes}, fetch_list=[out])
saveModel(name, exe, feedkeys=["x", "depth_tensor"], fetchlist=[out], inputs=[x, num_classes], outputs=[outs[0]], target_dir=sys.argv[1])

return outs[0]

def main():
# int32
data = np.array([1, 1, 3, 0]).astype("int32")
num_classes = 4
one_hot_v2_1("one_hot_v2_1", data, num_classes)
# int64
data = np.array([4, 1, 3, 3]).astype("int64")
num_classes = np.array([5]).astype("int32")
one_hot_v2_2("one_hot_v2_2", data, num_classes)


if __name__ == "__main__":
main()