@@ -51,7 +51,7 @@ struct btf {
5151
5252 /* type ID to `struct btf_type *` lookup index */
5353 __u32 * type_offs ;
54- __u32 type_offs_cap ;
54+ size_t type_offs_cap ;
5555 __u32 nr_types ;
5656
5757 /* BTF object FD, if loaded into kernel */
@@ -66,31 +66,60 @@ static inline __u64 ptr_to_u64(const void *ptr)
6666 return (__u64 ) (unsigned long ) ptr ;
6767}
6868
69- static int btf_add_type_idx_entry (struct btf * btf , __u32 type_off )
69+ /* Ensure given dynamically allocated memory region pointed to by *data* with
70+ * capacity of *cap_cnt* elements each taking *elem_sz* bytes has enough
71+ * memory to accomodate *add_cnt* new elements, assuming *cur_cnt* elements
72+ * are already used. At most *max_cnt* elements can be ever allocated.
73+ * If necessary, memory is reallocated and all existing data is copied over,
74+ * new pointer to the memory region is stored at *data, new memory region
75+ * capacity (in number of elements) is stored in *cap.
76+ * On success, memory pointer to the beginning of unused memory is returned.
77+ * On error, NULL is returned.
78+ */
79+ void * btf_add_mem (void * * data , size_t * cap_cnt , size_t elem_sz ,
80+ size_t cur_cnt , size_t max_cnt , size_t add_cnt )
7081{
71- /* nr_types is 1-based, so N types means we need N+1-sized array */
72- if (btf -> nr_types + 2 > btf -> type_offs_cap ) {
73- __u32 * new_offs ;
74- __u32 expand_by , new_size ;
82+ size_t new_cnt ;
83+ void * new_data ;
7584
76- if (btf -> type_offs_cap == BTF_MAX_NR_TYPES )
77- return - E2BIG ;
85+ if (cur_cnt + add_cnt <= * cap_cnt )
86+ return * data + cur_cnt * elem_sz ;
7887
79- expand_by = max (btf -> type_offs_cap / 4 , 16U );
80- new_size = min (BTF_MAX_NR_TYPES , btf -> type_offs_cap + expand_by );
88+ /* requested more than the set limit */
89+ if (cur_cnt + add_cnt > max_cnt )
90+ return NULL ;
8191
82- new_offs = libbpf_reallocarray (btf -> type_offs , new_size , sizeof (* new_offs ));
83- if (!new_offs )
84- return - ENOMEM ;
92+ new_cnt = * cap_cnt ;
93+ new_cnt += new_cnt / 4 ; /* expand by 25% */
94+ if (new_cnt < 16 ) /* but at least 16 elements */
95+ new_cnt = 16 ;
96+ if (new_cnt > max_cnt ) /* but not exceeding a set limit */
97+ new_cnt = max_cnt ;
98+ if (new_cnt < cur_cnt + add_cnt ) /* also ensure we have enough memory */
99+ new_cnt = cur_cnt + add_cnt ;
100+
101+ new_data = libbpf_reallocarray (* data , new_cnt , elem_sz );
102+ if (!new_data )
103+ return NULL ;
85104
86- new_offs [0 ] = UINT_MAX ; /* VOID is specially handled */
105+ /* zero out newly allocated portion of memory */
106+ memset (new_data + (* cap_cnt ) * elem_sz , 0 , (new_cnt - * cap_cnt ) * elem_sz );
87107
88- btf -> type_offs = new_offs ;
89- btf -> type_offs_cap = new_size ;
90- }
108+ * data = new_data ;
109+ * cap_cnt = new_cnt ;
110+ return new_data + cur_cnt * elem_sz ;
111+ }
91112
92- btf -> type_offs [btf -> nr_types + 1 ] = type_off ;
113+ static int btf_add_type_idx_entry (struct btf * btf , __u32 type_off )
114+ {
115+ __u32 * p ;
116+
117+ p = btf_add_mem ((void * * )& btf -> type_offs , & btf -> type_offs_cap , sizeof (__u32 ),
118+ btf -> nr_types + 1 , BTF_MAX_NR_TYPES , 1 );
119+ if (!p )
120+ return - ENOMEM ;
93121
122+ * p = type_off ;
94123 return 0 ;
95124}
96125
@@ -203,11 +232,17 @@ static int btf_parse_type_sec(struct btf *btf)
203232 struct btf_header * hdr = btf -> hdr ;
204233 void * next_type = btf -> types_data ;
205234 void * end_type = next_type + hdr -> type_len ;
235+ int err , type_size ;
206236
207- while (next_type < end_type ) {
208- int type_size ;
209- int err ;
237+ /* VOID (type_id == 0) is specially handled by btf__get_type_by_id(),
238+ * so ensure we can never properly use its offset from index by
239+ * setting it to a large value
240+ */
241+ err = btf_add_type_idx_entry (btf , UINT_MAX );
242+ if (err )
243+ return err ;
210244
245+ while (next_type < end_type ) {
211246 err = btf_add_type_idx_entry (btf , next_type - btf -> types_data );
212247 if (err )
213248 return err ;
0 commit comments