Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
107 changes: 90 additions & 17 deletions runtime/onert/core/src/exec/Execution.test.cc
Original file line number Diff line number Diff line change
Expand Up @@ -29,10 +29,10 @@ namespace

using namespace onert::ir;

class CompiledMockUpModel
class MockUpModel
{
public:
CompiledMockUpModel()
MockUpModel()
{
// Model: two elementwise add operation
// model input: lhs, rhs1
Expand Down Expand Up @@ -76,12 +76,17 @@ class CompiledMockUpModel
graph->addOutput(operand_result2);
graph->verify();

// Compile
// Initialize compile option
coptions = onert::compiler::CompilerOptions::fromGlobalConfig();
}

void compile()
{
auto model = std::make_shared<onert::ir::Model>();
model->push(onert::ir::SubgraphIndex{0}, graph);
coptions = onert::compiler::CompilerOptions::fromGlobalConfig();
auto compiler = onert::compiler::CompilerFactory::get().create(std::make_unique<NNPkg>(model),
coptions.get());
// Compile copied nnpkg to handle multiple compilation
auto compiler = onert::compiler::CompilerFactory::get().create(
std::make_unique<onert::ir::NNPkg>(model), coptions.get());
artifact = compiler->compile();
}

Expand Down Expand Up @@ -301,8 +306,8 @@ class CompiledMockUpQuantModelToFloat

TEST(ExecInstance, simple)
{
auto mockup = CompiledMockUpModel();
auto graph = mockup.graph;
auto mockup = MockUpModel();
mockup.compile();
auto executors = mockup.artifact->_executors;

auto input1 = IOIndex{0};
Expand All @@ -327,10 +332,75 @@ TEST(ExecInstance, simple)
}
}

TEST(ExecInstance, shapeinf)
{
auto mockup = MockUpModel();
mockup.compile();
auto executors = mockup.artifact->_executors;

auto input1 = IOIndex{0};
auto input2 = IOIndex{1};
auto output = IOIndex{0};

const onert::ir::Shape new_shape{2, 2, 2, 1};
const float input1_buffer[8] = {1, 0, -1, -2, 1, 2, 0, -1};
const float input2_buffer[8] = {1, -3, 2, -4, 4, -2, 3, 1};
float output_buffer[8] = {};
const float output_expected[8] = {5, -2, 0, -1, 8, 1, 2, 5};

onert::exec::Execution execution{executors};

execution.changeInputShape(input1, new_shape);
execution.changeInputShape(input2, new_shape);
execution.setInput(input1, input1_buffer, sizeof(input1_buffer));
execution.setInput(input2, input2_buffer, sizeof(input2_buffer));
execution.setOutput(output, output_buffer, sizeof(output_buffer));
execution.execute();

EXPECT_EQ(execution.outputInfo(0).shape(), new_shape);
for (auto i = 0; i < 8; i++)
{
EXPECT_EQ(output_buffer[i], output_expected[i]);
}
}

TEST(ExecInstance, internaloutput_shapeinf)
{
auto mockup = MockUpModel();
mockup.coptions->internal_output_alloc = true;
mockup.compile();
auto executors = mockup.artifact->_executors;

auto input1 = IOIndex{0};
auto input2 = IOIndex{1};
auto output = IOIndex{0};

const onert::ir::Shape new_shape{2, 2, 2, 1};
const float input1_buffer[8] = {1, 0, -1, -2, 1, 2, 0, -1};
const float input2_buffer[8] = {1, -3, 2, -4, 4, -2, 3, 1};
const float output_expected[8] = {5, -2, 0, -1, 8, 1, 2, 5};

onert::exec::Execution execution{executors};

execution.changeInputShape(input1, new_shape);
execution.changeInputShape(input2, new_shape);
execution.setInput(input1, reinterpret_cast<const void *>(input1_buffer), 32);
execution.setInput(input2, reinterpret_cast<const void *>(input2_buffer), 32);
execution.execute();

const float *output_buffer = reinterpret_cast<const float *>(executors->outputBuffer(output));
ASSERT_NE(output_buffer, nullptr);
EXPECT_EQ(execution.outputInfo(0).shape(), new_shape);
for (auto i = 0; i < 8; i++)
{
EXPECT_EQ(output_buffer[i], output_expected[i]);
}
}

TEST(ExecInstance, neg_small_outputbuffer)
{
auto mockup = CompiledMockUpModel();
auto graph = mockup.graph;
auto mockup = MockUpModel();
mockup.compile();
auto executors = mockup.artifact->_executors;

auto input1 = IOIndex{0};
Expand All @@ -351,8 +421,8 @@ TEST(ExecInstance, neg_small_outputbuffer)

TEST(ExecInstance, neg_small_inoutsize)
{
auto mockup = CompiledMockUpModel();
auto graph = mockup.graph;
auto mockup = MockUpModel();
mockup.compile();
auto executors = mockup.artifact->_executors;

auto input1 = IOIndex{0};
Expand Down Expand Up @@ -385,7 +455,8 @@ TEST(ExecInstance, neg_small_inoutsize)

TEST(ExecInstance, twoCompile)
{
auto mockup = CompiledMockUpModel();
auto mockup = MockUpModel();
mockup.compile();
auto graph = mockup.graph;
auto executors1 = mockup.artifact->_executors;
onert::exec::Execution execution1{executors1};
Expand Down Expand Up @@ -434,7 +505,8 @@ TEST(ExecInstance, twoCompile)
// Support two initialized execution instance then ordered execution
TEST(ExecInstance, twoExecution)
{
auto mockup = CompiledMockUpModel();
auto mockup = MockUpModel();
mockup.compile();
auto executors = mockup.artifact->_executors;
auto input1 = IOIndex{0};
auto input2 = IOIndex{1};
Expand Down Expand Up @@ -533,7 +605,8 @@ class Inference
// Support multi-thread execution
TEST(ExecInstance, twoThreads)
{
auto mockup = CompiledMockUpModel();
auto mockup = MockUpModel();
mockup.compile();
auto executors = mockup.artifact->_executors;

const float exe1_input1_buffer[4] = {1, 0, -1, -2};
Expand Down Expand Up @@ -566,8 +639,8 @@ TEST(ExecInstance, twoThreads)
// Support asynchronous execution
TEST(ExecInstance, async)
{
auto mockup = CompiledMockUpModel();
auto graph = mockup.graph;
auto mockup = MockUpModel();
mockup.compile();
auto executors = mockup.artifact->_executors;

auto input1 = IOIndex{0};
Expand Down
12 changes: 4 additions & 8 deletions runtime/onert/core/src/exec/SingleModelExecutors.cc
Original file line number Diff line number Diff line change
Expand Up @@ -94,7 +94,8 @@ void SingleModelExecutors::execute(ExecutionContext &ctx)
throw std::runtime_error{"Output tensor must be IOTensor"};
bool skip_set_output = output_io_tensor->hasBackendTensor();

// Output is optional if buffer is nullptr, and optional output's size is 0
// If buffer is nullptr, output is optional or internally allocated buffer,
// and optional output's size is 0
if (desc.buffer == nullptr && (desc.size != 0 || desc.info.total_size() != 0) &&
!skip_set_output)
throw std::runtime_error{"Output " + std::to_string(i) + "'s buffer is not set."};
Expand All @@ -110,13 +111,8 @@ void SingleModelExecutors::execute(ExecutionContext &ctx)
// Get dynamic shape inference result
for (uint32_t i = 0; i < outputs.size(); i++)
{
if (ctx.desc.outputs[i].buffer == nullptr)
{
// Output is optional if buffer is nullptr
continue;
}

ctx.desc.outputs[i].info.shape(outputs[i]->getShape());
const auto output_io_tensor = outputTensor(ir::IOIndex{i});
ctx.desc.outputs[i].info.shape(output_io_tensor->get_info().shape());
}
}

Expand Down