Skip to content

Commit e86800f

Browse files
committed
[onert] Add shape inference test after compile
This commit adds shape inference test on Execution test to test shape inference after compilation. It tests two case: external output buffer and internal output buffer allocation ONE-DCO-1.0-Signed-off-by: Hyeongseok Oh <[email protected]>
1 parent ea01780 commit e86800f

File tree

2 files changed

+93
-25
lines changed

2 files changed

+93
-25
lines changed

runtime/onert/core/src/exec/Execution.test.cc

Lines changed: 89 additions & 17 deletions
Original file line numberDiff line numberDiff line change
@@ -29,10 +29,10 @@ namespace
2929

3030
using namespace onert::ir;
3131

32-
class CompiledMockUpModel
32+
class MockUpModel
3333
{
3434
public:
35-
CompiledMockUpModel()
35+
MockUpModel()
3636
{
3737
// Model: two elementwise add operation
3838
// model input: lhs, rhs1
@@ -76,12 +76,17 @@ class CompiledMockUpModel
7676
graph->addOutput(operand_result2);
7777
graph->verify();
7878

79-
// Compile
79+
// Initialize compile option
80+
coptions = onert::compiler::CompilerOptions::fromGlobalConfig();
81+
}
82+
83+
void compile()
84+
{
8085
auto model = std::make_shared<onert::ir::Model>();
8186
model->push(onert::ir::SubgraphIndex{0}, graph);
82-
coptions = onert::compiler::CompilerOptions::fromGlobalConfig();
83-
auto compiler = onert::compiler::CompilerFactory::get().create(std::make_unique<NNPkg>(model),
84-
coptions.get());
87+
// Compile copied nnpkg to handle multiple compilation
88+
auto compiler = onert::compiler::CompilerFactory::get().create(
89+
std::make_unique<onert::ir::NNPkg>(model), coptions.get());
8590
artifact = compiler->compile();
8691
}
8792

@@ -301,8 +306,8 @@ class CompiledMockUpQuantModelToFloat
301306

302307
TEST(ExecInstance, simple)
303308
{
304-
auto mockup = CompiledMockUpModel();
305-
auto graph = mockup.graph;
309+
auto mockup = MockUpModel();
310+
mockup.compile();
306311
auto executors = mockup.artifact->_executors;
307312

308313
auto input1 = IOIndex{0};
@@ -327,10 +332,74 @@ TEST(ExecInstance, simple)
327332
}
328333
}
329334

335+
TEST(ExecInstance, shapeinf)
336+
{
337+
auto mockup = MockUpModel();
338+
mockup.compile();
339+
auto executors = mockup.artifact->_executors;
340+
341+
auto input1 = IOIndex{0};
342+
auto input2 = IOIndex{1};
343+
auto output = IOIndex{0};
344+
345+
const onert::ir::Shape new_shape{2, 2, 2, 1};
346+
const float input1_buffer[8] = {1, 0, -1, -2, 1, 2, 0, -1};
347+
const float input2_buffer[8] = {1, -3, 2, -4, 4, -2, 3, 1};
348+
float output_buffer[8] = {};
349+
const float output_expected[8] = {5, -2, 0, -1, 8, 1, 2, 5};
350+
351+
onert::exec::Execution execution{executors};
352+
353+
execution.changeInputShape(input1, new_shape);
354+
execution.changeInputShape(input2, new_shape);
355+
execution.setInput(input1, reinterpret_cast<const void *>(input1_buffer), 32);
356+
execution.setInput(input2, reinterpret_cast<const void *>(input2_buffer), 32);
357+
execution.setOutput(output, reinterpret_cast<void *>(output_buffer), 32);
358+
execution.execute();
359+
360+
EXPECT_EQ(execution.outputInfo(0).shape(), new_shape);
361+
for (auto i = 0; i < 8; i++)
362+
{
363+
EXPECT_EQ(output_buffer[i], output_expected[i]);
364+
}
365+
}
366+
367+
TEST(ExecInstance, internaloutput_shapeinf)
368+
{
369+
auto mockup = MockUpModel();
370+
mockup.coptions->internal_output_alloc = true;
371+
mockup.compile();
372+
auto executors = mockup.artifact->_executors;
373+
374+
auto input1 = IOIndex{0};
375+
auto input2 = IOIndex{1};
376+
auto output = IOIndex{0};
377+
378+
const onert::ir::Shape new_shape{2, 2, 2, 1};
379+
const float input1_buffer[8] = {1, 0, -1, -2, 1, 2, 0, -1};
380+
const float input2_buffer[8] = {1, -3, 2, -4, 4, -2, 3, 1};
381+
const float output_expected[8] = {5, -2, 0, -1, 8, 1, 2, 5};
382+
383+
onert::exec::Execution execution{executors};
384+
385+
execution.changeInputShape(input1, new_shape);
386+
execution.changeInputShape(input2, new_shape);
387+
execution.setInput(input1, reinterpret_cast<const void *>(input1_buffer), 32);
388+
execution.setInput(input2, reinterpret_cast<const void *>(input2_buffer), 32);
389+
execution.execute();
390+
391+
const float *output_buffer = reinterpret_cast<const float *>(executors->outputBuffer(output));
392+
EXPECT_EQ(execution.outputInfo(0).shape(), new_shape);
393+
for (auto i = 0; i < 8; i++)
394+
{
395+
EXPECT_EQ(output_buffer[i], output_expected[i]);
396+
}
397+
}
398+
330399
TEST(ExecInstance, neg_small_outputbuffer)
331400
{
332-
auto mockup = CompiledMockUpModel();
333-
auto graph = mockup.graph;
401+
auto mockup = MockUpModel();
402+
mockup.compile();
334403
auto executors = mockup.artifact->_executors;
335404

336405
auto input1 = IOIndex{0};
@@ -351,8 +420,8 @@ TEST(ExecInstance, neg_small_outputbuffer)
351420

352421
TEST(ExecInstance, neg_small_inoutsize)
353422
{
354-
auto mockup = CompiledMockUpModel();
355-
auto graph = mockup.graph;
423+
auto mockup = MockUpModel();
424+
mockup.compile();
356425
auto executors = mockup.artifact->_executors;
357426

358427
auto input1 = IOIndex{0};
@@ -385,7 +454,8 @@ TEST(ExecInstance, neg_small_inoutsize)
385454

386455
TEST(ExecInstance, twoCompile)
387456
{
388-
auto mockup = CompiledMockUpModel();
457+
auto mockup = MockUpModel();
458+
mockup.compile();
389459
auto graph = mockup.graph;
390460
auto executors1 = mockup.artifact->_executors;
391461
onert::exec::Execution execution1{executors1};
@@ -434,7 +504,8 @@ TEST(ExecInstance, twoCompile)
434504
// Support two initialized execution instance then ordered execution
435505
TEST(ExecInstance, twoExecution)
436506
{
437-
auto mockup = CompiledMockUpModel();
507+
auto mockup = MockUpModel();
508+
mockup.compile();
438509
auto executors = mockup.artifact->_executors;
439510
auto input1 = IOIndex{0};
440511
auto input2 = IOIndex{1};
@@ -533,7 +604,8 @@ class Inference
533604
// Support multi-thread execution
534605
TEST(ExecInstance, twoThreads)
535606
{
536-
auto mockup = CompiledMockUpModel();
607+
auto mockup = MockUpModel();
608+
mockup.compile();
537609
auto executors = mockup.artifact->_executors;
538610

539611
const float exe1_input1_buffer[4] = {1, 0, -1, -2};
@@ -566,8 +638,8 @@ TEST(ExecInstance, twoThreads)
566638
// Support asynchronous execution
567639
TEST(ExecInstance, async)
568640
{
569-
auto mockup = CompiledMockUpModel();
570-
auto graph = mockup.graph;
641+
auto mockup = MockUpModel();
642+
mockup.compile();
571643
auto executors = mockup.artifact->_executors;
572644

573645
auto input1 = IOIndex{0};

runtime/onert/core/src/exec/SingleModelExecutors.cc

Lines changed: 4 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -94,7 +94,8 @@ void SingleModelExecutors::execute(ExecutionContext &ctx)
9494
throw std::runtime_error{"Output tensor must be IOTensor"};
9595
bool skip_set_output = output_io_tensor->hasBackendTensor();
9696

97-
// Output is optional if buffer is nullptr, and optional output's size is 0
97+
// If buffer is nullptr, output is optional or internally allocated buffer,
98+
// and optional output's size is 0
9899
if (desc.buffer == nullptr && (desc.size != 0 || desc.info.total_size() != 0) &&
99100
!skip_set_output)
100101
throw std::runtime_error{"Output " + std::to_string(i) + "'s buffer is not set."};
@@ -110,13 +111,8 @@ void SingleModelExecutors::execute(ExecutionContext &ctx)
110111
// Get dynamic shape inference result
111112
for (uint32_t i = 0; i < outputs.size(); i++)
112113
{
113-
if (ctx.desc.outputs[i].buffer == nullptr)
114-
{
115-
// Output is optional if buffer is nullptr
116-
continue;
117-
}
118-
119-
ctx.desc.outputs[i].info.shape(outputs[i]->getShape());
114+
const auto output_io_tensor = outputTensor(ir::IOIndex{i});
115+
ctx.desc.outputs[i].info.shape(output_io_tensor->get_info().shape());
120116
}
121117
}
122118

0 commit comments

Comments
 (0)