@@ -32,13 +32,11 @@ Execution::Execution(const std::shared_ptr<IExecutors> &executors) : _executors{
32
32
assert (executors->entryExecutor () != nullptr );
33
33
34
34
// Initialize I/O description
35
- _ctx.desc .inputs .resize (_executors->inputSize ());
36
35
for (uint32_t i = 0 ; i < _executors->inputSize (); ++i)
37
- _ctx.desc .inputs .at (i) = std::make_unique<InputDesc> (_executors->inputInfo (ir::IOIndex (i)));
36
+ _ctx.desc .inputs .emplace_back (_executors->inputInfo (ir::IOIndex (i)));
38
37
39
- _ctx.desc .outputs .resize (_executors->outputSize ());
40
38
for (uint32_t i = 0 ; i < _executors->outputSize (); ++i)
41
- _ctx.desc .outputs .at (i) = std::make_unique<OutputDesc> (_executors->outputInfo (ir::IOIndex (i)));
39
+ _ctx.desc .outputs .emplace_back (_executors->outputInfo (ir::IOIndex (i)));
42
40
_ctx.shape_updated = false ;
43
41
44
42
_is_internal_output_tensor.resize (_executors->outputSize ());
@@ -62,9 +60,9 @@ void Execution::changeInputShape(const ir::IOIndex &index, const ir::Shape &new_
62
60
// Note that 'compiled' model will not be updated with new_shape
63
61
// but new_shape will change model input shape while 'running' the model
64
62
auto &input_desc = _ctx.desc .inputs .at (index.value ());
65
- if (new_shape != input_desc-> info .shape ())
63
+ if (new_shape != input_desc. info .shape ())
66
64
{
67
- input_desc-> info .shape (new_shape);
65
+ input_desc. info .shape (new_shape);
68
66
_ctx.shape_updated = true ;
69
67
70
68
VERBOSE (Execution) << " Model input shape will be changed at the start of execute()"
@@ -77,15 +75,8 @@ void Execution::setInput(const ir::IOIndex &index, const void *buffer, size_t le
77
75
{
78
76
// Length validation in execute(): datatype can be changed by API call
79
77
auto &input_desc = _ctx.desc .inputs .at (index.value ());
80
- input_desc->buffer = buffer;
81
- input_desc->size = length;
82
- }
83
-
84
- void Execution::setInput (const ir::IOIndex &index, const ir::Shape &shape, const void *buffer,
85
- size_t length)
86
- {
87
- changeInputShape (index, shape);
88
- setInput (index, buffer, length);
78
+ input_desc.buffer = buffer;
79
+ input_desc.size = length;
89
80
}
90
81
91
82
void Execution::setOutput (const ir::IOIndex &index, void *buffer, size_t length)
@@ -94,17 +85,8 @@ void Execution::setOutput(const ir::IOIndex &index, void *buffer, size_t length)
94
85
// - datatype can be changed by API call
95
86
// - shape can be changed by dynamic shape inference
96
87
auto &output_desc = _ctx.desc .outputs .at (index.value ());
97
- output_desc->buffer = buffer;
98
- output_desc->size = length;
99
- }
100
-
101
- void Execution::setOutput (const ir::IOIndex &index, const ir::Shape &shape, void *buffer,
102
- size_t length)
103
- {
104
- auto &output_desc = _ctx.desc .outputs .at (index.value ());
105
- output_desc->info .shape (shape);
106
-
107
- setOutput (index, buffer, length);
88
+ output_desc.buffer = buffer;
89
+ output_desc.size = length;
108
90
}
109
91
110
92
void Execution::execute ()
@@ -114,7 +96,7 @@ void Execution::execute()
114
96
// Input length validation check
115
97
for (const auto &input : _ctx.desc .inputs )
116
98
{
117
- if (input-> info .total_size () > input-> size )
99
+ if (input. info .total_size () > input. size )
118
100
throw std::runtime_error{" Too small input buffer length" };
119
101
}
120
102
@@ -126,9 +108,9 @@ void Execution::execute()
126
108
{
127
109
const bool is_managed_internally = _is_internal_output_tensor.at (i);
128
110
const auto &output = _ctx.desc .outputs .at (i);
129
- if (!is_managed_internally && output-> info .total_size () > output-> size )
111
+ if (!is_managed_internally && output. info .total_size () > output. size )
130
112
throw std::runtime_error{" Too small output buffer length" };
131
- if (is_managed_internally && output-> buffer != nullptr )
113
+ if (is_managed_internally && output. buffer != nullptr )
132
114
VERBOSE (Execution) << " Warning: Output buffer was set from API even though the output "
133
115
" tensor was allocated internally"
134
116
<< std::endl;
@@ -193,41 +175,4 @@ void Execution::iterateTrainableTensors(
193
175
execs->iterateTrainableTensors (fn);
194
176
}
195
177
196
- ir::Shape Execution::getInputShape (ir::IOIndex ind) const
197
- {
198
- return _ctx.desc .inputs .at (ind.value ())->info .shape ();
199
- }
200
-
201
- // NNAPI return fail if ANeuralNetworksExecution_getOutputOperandRank or
202
- // ANeuralNetworksExecution_getOutputOperandDimensions is called before execution.
203
- // On the other hand, NNFW API return static shape inference result if nnfw_output_tensorinfo is
204
- // called before execution.
205
- // To handle both case, this method retun static shape inference result and fail will be handled on
206
- // NNAPI frontend.
207
- ir::Shape Execution::getOutputShape (ir::IOIndex ind) const
208
- {
209
- return _ctx.desc .outputs .at (ind.value ())->info .shape ();
210
- }
211
-
212
- size_t Execution::getInputTotalSize (ir::IOIndex ind) const
213
- {
214
- // TODO Support dynamic shape
215
- return _ctx.desc .inputs .at (ind.value ())->info .total_size ();
216
- }
217
-
218
- size_t Execution::getOutputTotalSize (ir::IOIndex ind) const
219
- {
220
- return _ctx.desc .outputs .at (ind.value ())->info .total_size ();
221
- }
222
-
223
- const void *Execution::getInputBuffer (ir::IOIndex ind) const
224
- {
225
- return _ctx.desc .inputs .at (ind.value ())->buffer ;
226
- }
227
-
228
- void *Execution::getOutputBuffer (ir::IOIndex ind)
229
- {
230
- return _ctx.desc .outputs .at (ind.value ())->buffer ;
231
- }
232
-
233
178
} // namespace onert::exec
0 commit comments