Skip to content

Commit f2e5fa8

Browse files
yamtlum1n0us
authored andcommitted
wasi_nn_openvino.c: remove pre/postprocessing and layout assumptions (bytecodealliance#4361)
as wasi-nn doesn't have these concepts, the best we can do without risking breaking certain applications here is to pass through tensors as they are. this matches wasmtime's behavior. tested with: * wasmtime classification-example (with this change, this example fails on tensor size mismatch instead of implicitly resizing it.) * license-plate-recognition-barrier-0007, a converted version with non-fp32 output. [1] (with this change, this model outputs integers as expected.) [1] https://github.com/openvinotoolkit/open_model_zoo/tree/cd7ebe313b69372763e76b82e5d24935308fece4/models/public/license-plate-recognition-barrier-0007
1 parent d5a7f01 commit f2e5fa8

File tree

1 file changed

+2
-81
lines changed

1 file changed

+2
-81
lines changed

core/iwasm/libraries/wasi-nn/src/wasi_nn_openvino.c

Lines changed: 2 additions & 81 deletions
Original file line numberDiff line numberDiff line change
@@ -32,8 +32,6 @@ typedef struct {
3232
void *weight_data;
3333
ov_tensor_t *weights_tensor;
3434
ov_model_t *model;
35-
/* add prepostprocess */
36-
ov_model_t *new_model;
3735
ov_compiled_model_t *compiled_model;
3836
ov_infer_request_t *infer_request;
3937
ov_tensor_t *input_tensor;
@@ -284,16 +282,6 @@ set_input(void *ctx, graph_execution_context exec_ctx, uint32_t index,
284282
ov_shape_t input_shape = { 0 };
285283
int64_t *ov_dims = NULL;
286284

287-
ov_preprocess_prepostprocessor_t *ppp = NULL;
288-
ov_preprocess_input_info_t *input_info = NULL;
289-
ov_preprocess_input_tensor_info_t *input_tensor_info = NULL;
290-
ov_layout_t *input_layout = NULL;
291-
ov_preprocess_preprocess_steps_t *input_process = NULL;
292-
ov_preprocess_input_model_info_t *p_input_model = NULL;
293-
ov_layout_t *model_layout = NULL;
294-
ov_preprocess_output_info_t *output_info = NULL;
295-
ov_preprocess_output_tensor_info_t *output_tensor_info = NULL;
296-
297285
/* wasi_nn_tensor -> ov_tensor */
298286
{
299287
ret = uint32_array_to_int64_array(wasi_nn_tensor->dimensions->size,
@@ -322,57 +310,8 @@ set_input(void *ctx, graph_execution_context exec_ctx, uint32_t index,
322310
ret);
323311
}
324312

325-
/* set preprocess based on wasi_nn_tensor */
326-
{
327-
CHECK_OV_STATUS(
328-
ov_preprocess_prepostprocessor_create(ov_ctx->model, &ppp), ret);
329-
330-
/* reuse user' created tensor's info */
331-
CHECK_OV_STATUS(ov_preprocess_prepostprocessor_get_input_info_by_index(
332-
ppp, index, &input_info),
333-
ret);
334-
CHECK_OV_STATUS(ov_preprocess_input_info_get_tensor_info(
335-
input_info, &input_tensor_info),
336-
ret);
337-
CHECK_OV_STATUS(ov_preprocess_input_tensor_info_set_from(
338-
input_tensor_info, ov_ctx->input_tensor),
339-
ret);
340-
341-
/* add RESIZE */
342-
CHECK_OV_STATUS(ov_preprocess_input_info_get_preprocess_steps(
343-
input_info, &input_process),
344-
ret);
345-
CHECK_OV_STATUS(
346-
ov_preprocess_preprocess_steps_resize(input_process, RESIZE_LINEAR),
347-
ret);
348-
349-
/* input model */
350-
CHECK_OV_STATUS(
351-
ov_preprocess_input_info_get_model_info(input_info, &p_input_model),
352-
ret);
353-
// TODO: what if not?
354-
CHECK_OV_STATUS(ov_layout_create("NCHW", &model_layout), ret);
355-
CHECK_OV_STATUS(ov_preprocess_input_model_info_set_layout(p_input_model,
356-
model_layout),
357-
ret);
358-
359-
/* output -> F32(possibility) */
360-
CHECK_OV_STATUS(ov_preprocess_prepostprocessor_get_output_info_by_index(
361-
ppp, index, &output_info),
362-
ret);
363-
CHECK_OV_STATUS(ov_preprocess_output_info_get_tensor_info(
364-
output_info, &output_tensor_info),
365-
ret);
366-
CHECK_OV_STATUS(
367-
ov_preprocess_output_set_element_type(output_tensor_info, F32),
368-
ret);
369-
370-
CHECK_OV_STATUS(
371-
ov_preprocess_prepostprocessor_build(ppp, &ov_ctx->new_model), ret);
372-
}
373-
374-
CHECK_OV_STATUS(ov_core_compile_model(ov_ctx->core, ov_ctx->new_model,
375-
"CPU", 0, &ov_ctx->compiled_model),
313+
CHECK_OV_STATUS(ov_core_compile_model(ov_ctx->core, ov_ctx->model, "CPU", 0,
314+
&ov_ctx->compiled_model),
376315
ret);
377316

378317
CHECK_OV_STATUS(ov_compiled_model_create_infer_request(
@@ -389,24 +328,6 @@ set_input(void *ctx, graph_execution_context exec_ctx, uint32_t index,
389328
if (ov_dims)
390329
os_free(ov_dims);
391330
ov_shape_free(&input_shape);
392-
if (ppp)
393-
ov_preprocess_prepostprocessor_free(ppp);
394-
if (input_info)
395-
ov_preprocess_input_info_free(input_info);
396-
if (input_tensor_info)
397-
ov_preprocess_input_tensor_info_free(input_tensor_info);
398-
if (input_layout)
399-
ov_layout_free(input_layout);
400-
if (input_process)
401-
ov_preprocess_preprocess_steps_free(input_process);
402-
if (p_input_model)
403-
ov_preprocess_input_model_info_free(p_input_model);
404-
if (model_layout)
405-
ov_layout_free(model_layout);
406-
if (output_info)
407-
ov_preprocess_output_info_free(output_info);
408-
if (output_tensor_info)
409-
ov_preprocess_output_tensor_info_free(output_tensor_info);
410331

411332
return ret;
412333
}

0 commit comments

Comments
 (0)