--- x/kernel/bpf/queue_stack_maps.c +++ y/kernel/bpf/queue_stack_maps.c @@ -124,7 +124,7 @@ out: return err; } - +static DEFINE_PER_CPU(int, map_depth); static long __stack_map_get(struct bpf_map *map, void *value, bool delete) { struct bpf_queue_stack *qs = bpf_queue_stack(map); @@ -132,12 +132,18 @@ static long __stack_map_get(struct bpf_m int err = 0; void *ptr; u32 index; + int dumy; + int *depth = &dumy; if (in_nmi()) { if (!raw_spin_trylock_irqsave(&qs->lock, flags)) return -EBUSY; } else { - raw_spin_lock_irqsave(&qs->lock, flags); + preempt_disable_notrace(); + depth = this_cpu_ptr(&map_depth); + *depth += 1; + raw_spin_lock_irqsave_nested(&qs->lock, flags, *depth); + preempt_enable_notrace(); } if (queue_stack_map_is_empty(qs)) { @@ -157,6 +163,7 @@ static long __stack_map_get(struct bpf_m qs->head = index; out: + *depth -= 1; raw_spin_unlock_irqrestore(&qs->lock, flags); return err; }