@@ -249,7 +249,18 @@ static void htab_free_prealloced_fields(struct bpf_htab *htab)
249249 struct htab_elem * elem ;
250250
251251 elem = get_htab_elem (htab , i );
252- bpf_obj_free_fields (htab -> map .record , elem -> key + round_up (htab -> map .key_size , 8 ));
252+ if (htab_is_percpu (htab )) {
253+ void __percpu * pptr = htab_elem_get_ptr (elem , htab -> map .key_size );
254+ int cpu ;
255+
256+ for_each_possible_cpu (cpu ) {
257+ bpf_obj_free_fields (htab -> map .record , per_cpu_ptr (pptr , cpu ));
258+ cond_resched ();
259+ }
260+ } else {
261+ bpf_obj_free_fields (htab -> map .record , elem -> key + round_up (htab -> map .key_size , 8 ));
262+ cond_resched ();
263+ }
253264 cond_resched ();
254265 }
255266}
@@ -759,9 +770,17 @@ static int htab_lru_map_gen_lookup(struct bpf_map *map,
759770static void check_and_free_fields (struct bpf_htab * htab ,
760771 struct htab_elem * elem )
761772{
762- void * map_value = elem -> key + round_up (htab -> map .key_size , 8 );
773+ if (htab_is_percpu (htab )) {
774+ void __percpu * pptr = htab_elem_get_ptr (elem , htab -> map .key_size );
775+ int cpu ;
763776
764- bpf_obj_free_fields (htab -> map .record , map_value );
777+ for_each_possible_cpu (cpu )
778+ bpf_obj_free_fields (htab -> map .record , per_cpu_ptr (pptr , cpu ));
779+ } else {
780+ void * map_value = elem -> key + round_up (htab -> map .key_size , 8 );
781+
782+ bpf_obj_free_fields (htab -> map .record , map_value );
783+ }
765784}
766785
767786/* It is called from the bpf_lru_list when the LRU needs to delete
@@ -858,9 +877,9 @@ static int htab_map_get_next_key(struct bpf_map *map, void *key, void *next_key)
858877
859878static void htab_elem_free (struct bpf_htab * htab , struct htab_elem * l )
860879{
880+ check_and_free_fields (htab , l );
861881 if (htab -> map .map_type == BPF_MAP_TYPE_PERCPU_HASH )
862882 bpf_mem_cache_free (& htab -> pcpu_ma , l -> ptr_to_pptr );
863- check_and_free_fields (htab , l );
864883 bpf_mem_cache_free (& htab -> ma , l );
865884}
866885
@@ -918,14 +937,13 @@ static void pcpu_copy_value(struct bpf_htab *htab, void __percpu *pptr,
918937{
919938 if (!onallcpus ) {
920939 /* copy true value_size bytes */
921- memcpy ( this_cpu_ptr (pptr ), value , htab -> map . value_size );
940+ copy_map_value ( & htab -> map , this_cpu_ptr (pptr ), value );
922941 } else {
923942 u32 size = round_up (htab -> map .value_size , 8 );
924943 int off = 0 , cpu ;
925944
926945 for_each_possible_cpu (cpu ) {
927- bpf_long_memcpy (per_cpu_ptr (pptr , cpu ),
928- value + off , size );
946+ copy_map_value_long (& htab -> map , per_cpu_ptr (pptr , cpu ), value + off );
929947 off += size ;
930948 }
931949 }
@@ -940,16 +958,14 @@ static void pcpu_init_value(struct bpf_htab *htab, void __percpu *pptr,
940958 * (onallcpus=false always when coming from bpf prog).
941959 */
942960 if (!onallcpus ) {
943- u32 size = round_up (htab -> map .value_size , 8 );
944961 int current_cpu = raw_smp_processor_id ();
945962 int cpu ;
946963
947964 for_each_possible_cpu (cpu ) {
948965 if (cpu == current_cpu )
949- bpf_long_memcpy (per_cpu_ptr (pptr , cpu ), value ,
950- size );
951- else
952- memset (per_cpu_ptr (pptr , cpu ), 0 , size );
966+ copy_map_value_long (& htab -> map , per_cpu_ptr (pptr , cpu ), value );
967+ else /* Since elem is preallocated, we cannot touch special fields */
968+ zero_map_value (& htab -> map , per_cpu_ptr (pptr , cpu ));
953969 }
954970 } else {
955971 pcpu_copy_value (htab , pptr , value , onallcpus );
@@ -1575,9 +1591,8 @@ static int __htab_map_lookup_and_delete_elem(struct bpf_map *map, void *key,
15751591
15761592 pptr = htab_elem_get_ptr (l , key_size );
15771593 for_each_possible_cpu (cpu ) {
1578- bpf_long_memcpy (value + off ,
1579- per_cpu_ptr (pptr , cpu ),
1580- roundup_value_size );
1594+ copy_map_value_long (& htab -> map , value + off , per_cpu_ptr (pptr , cpu ));
1595+ check_and_init_map_value (& htab -> map , value + off );
15811596 off += roundup_value_size ;
15821597 }
15831598 } else {
@@ -1772,8 +1787,8 @@ __htab_map_lookup_and_delete_batch(struct bpf_map *map,
17721787
17731788 pptr = htab_elem_get_ptr (l , map -> key_size );
17741789 for_each_possible_cpu (cpu ) {
1775- bpf_long_memcpy ( dst_val + off ,
1776- per_cpu_ptr ( pptr , cpu ), size );
1790+ copy_map_value_long ( & htab -> map , dst_val + off , per_cpu_ptr ( pptr , cpu ));
1791+ check_and_init_map_value ( & htab -> map , dst_val + off );
17771792 off += size ;
17781793 }
17791794 } else {
@@ -2046,9 +2061,9 @@ static int __bpf_hash_map_seq_show(struct seq_file *seq, struct htab_elem *elem)
20462061 roundup_value_size = round_up (map -> value_size , 8 );
20472062 pptr = htab_elem_get_ptr (elem , map -> key_size );
20482063 for_each_possible_cpu (cpu ) {
2049- bpf_long_memcpy ( info -> percpu_value_buf + off ,
2050- per_cpu_ptr (pptr , cpu ),
2051- roundup_value_size );
2064+ copy_map_value_long ( map , info -> percpu_value_buf + off ,
2065+ per_cpu_ptr (pptr , cpu ));
2066+ check_and_init_map_value ( map , info -> percpu_value_buf + off );
20522067 off += roundup_value_size ;
20532068 }
20542069 ctx .value = info -> percpu_value_buf ;
@@ -2292,8 +2307,8 @@ int bpf_percpu_hash_copy(struct bpf_map *map, void *key, void *value)
22922307 */
22932308 pptr = htab_elem_get_ptr (l , map -> key_size );
22942309 for_each_possible_cpu (cpu ) {
2295- bpf_long_memcpy ( value + off ,
2296- per_cpu_ptr ( pptr , cpu ), size );
2310+ copy_map_value_long ( map , value + off , per_cpu_ptr ( pptr , cpu ));
2311+ check_and_init_map_value ( map , value + off );
22972312 off += size ;
22982313 }
22992314 ret = 0 ;
0 commit comments