15
15
#include <stdint.h>
16
16
#include "wasi_nn_types.h"
17
17
18
+ #define WASI_NN_IMPORT (name ) \
19
+ __attribute__((import_module("wasi_nn"), import_name(name)))
20
+
18
21
/**
19
22
* @brief Load an opaque sequence of bytes to use for inference.
20
23
*
26
29
*/
27
30
wasi_nn_error
28
31
load (graph_builder_array * builder , graph_encoding encoding ,
29
- execution_target target , graph * g )
30
- __attribute__((import_module ("wasi_nn" )));
32
+ execution_target target , graph * g ) WASI_NN_IMPORT ("load ");
31
33
32
34
wasi_nn_error
33
35
load_by_name (const char * name , uint32_t name_len , graph * g )
34
- __attribute__(( import_module ( "wasi_nn" )) );
36
+ WASI_NN_IMPORT (" load_by_name " );
35
37
36
38
/**
37
39
* INFERENCE
@@ -47,7 +49,7 @@ load_by_name(const char *name, uint32_t name_len, graph *g)
47
49
*/
48
50
wasi_nn_error
49
51
init_execution_context (graph g , graph_execution_context * ctx )
50
- __attribute__(( import_module ( "wasi_nn" )) );
52
+ WASI_NN_IMPORT (" init_execution_context " );
51
53
52
54
/**
53
55
* @brief Define the inputs to use for inference.
@@ -59,7 +61,7 @@ init_execution_context(graph g, graph_execution_context *ctx)
59
61
*/
60
62
wasi_nn_error
61
63
set_input (graph_execution_context ctx , uint32_t index , tensor * tensor )
62
- __attribute__(( import_module ( "wasi_nn" )) );
64
+ WASI_NN_IMPORT (" set_input " );
63
65
64
66
/**
65
67
* @brief Compute the inference on the given inputs.
@@ -68,7 +70,7 @@ set_input(graph_execution_context ctx, uint32_t index, tensor *tensor)
68
70
* @return wasi_nn_error Execution status.
69
71
*/
70
72
wasi_nn_error
71
- compute (graph_execution_context ctx ) __attribute__(( import_module ( "wasi_nn" )) );
73
+ compute (graph_execution_context ctx ) WASI_NN_IMPORT (" compute " );
72
74
73
75
/**
74
76
* @brief Extract the outputs after inference.
@@ -85,6 +87,6 @@ compute(graph_execution_context ctx) __attribute__((import_module("wasi_nn")));
85
87
wasi_nn_error
86
88
get_output (graph_execution_context ctx , uint32_t index ,
87
89
tensor_data output_tensor , uint32_t * output_tensor_size )
88
- __attribute__(( import_module ( "wasi_nn" )) );
90
+ WASI_NN_IMPORT (" get_output " );
89
91
90
92
#endif
0 commit comments