File tree Expand file tree Collapse file tree 1 file changed +20
-1
lines changed Expand file tree Collapse file tree 1 file changed +20
-1
lines changed Original file line number Diff line number Diff line change @@ -161,9 +161,24 @@ static inline int htab_lock_bucket(const struct bpf_htab *htab,
161161 return - EBUSY ;
162162 }
163163
164+ /*
165+ * The lock may be taken in both NMI and non-NMI contexts.
166+ * There is a false lockdep warning (inconsistent lock state),
167+ * if lockdep enabled. The potential deadlock happens when the
168+ * lock is contended from the same cpu. map_locked rejects
169+ * concurrent access to the same bucket from the same CPU.
170+ * When the lock is contended from a remote cpu, we would
171+ * like the remote cpu to spin and wait, instead of giving
172+ * up immediately. As this gives better throughput. So replacing
173+ * the current raw_spin_lock_irqsave() with trylock sacrifices
174+ * this performance gain. atomic map_locked is necessary.
175+ * lockdep_off is invoked temporarily to fix the false warning.
176+ */
177+ lockdep_off ();
164178 raw_spin_lock_irqsave (& b -> raw_lock , flags );
165- * pflags = flags ;
179+ lockdep_on () ;
166180
181+ * pflags = flags ;
167182 return 0 ;
168183}
169184
@@ -172,7 +187,11 @@ static inline void htab_unlock_bucket(const struct bpf_htab *htab,
172187 unsigned long flags )
173188{
174189 hash = hash & min_t (u32 , HASHTAB_MAP_LOCK_MASK , htab -> n_buckets - 1 );
190+
191+ lockdep_off ();
175192 raw_spin_unlock_irqrestore (& b -> raw_lock , flags );
193+ lockdep_on ();
194+
176195 __this_cpu_dec (* (htab -> map_locked [hash ]));
177196 preempt_enable ();
178197}
You can’t perform that action at this time.
0 commit comments