--- x/kernel/bpf/ringbuf.c +++ y/kernel/bpf/ringbuf.c @@ -402,10 +402,14 @@ bpf_ringbuf_restore_from_rec(struct bpf_ return (void*)((addr & PAGE_MASK) - off); } +static DEFINE_PER_CPU(int, bpf_ringbuf_lock_subclass); + static void *__bpf_ringbuf_reserve(struct bpf_ringbuf *rb, u64 size) { unsigned long cons_pos, prod_pos, new_prod_pos, flags; u32 len, pg_off; + int dumy = 0; + int *sbc = &dumy; struct bpf_ringbuf_hdr *hdr; if (unlikely(size > RINGBUF_MAX_RECORD_SZ)) @@ -421,7 +425,10 @@ static void *__bpf_ringbuf_reserve(struc if (!spin_trylock_irqsave(&rb->spinlock, flags)) return NULL; } else { - spin_lock_irqsave(&rb->spinlock, flags); + sbc = get_cpu_ptr(&bpf_ringbuf_lock_subclass); + *sbc += 1; + spin_lock_irqsave_nested(&rb->spinlock, flags, *sbc); + put_cpu_ptr(&bpf_ringbuf_lock_subclass); } prod_pos = rb->producer_pos; @@ -431,6 +438,7 @@ static void *__bpf_ringbuf_reserve(struc * doesn't advance more than (ringbuf_size - 1) ahead */ if (new_prod_pos - cons_pos > rb->mask) { + *sbc -= 1; spin_unlock_irqrestore(&rb->spinlock, flags); return NULL; } @@ -443,6 +451,7 @@ static void *__bpf_ringbuf_reserve(struc /* pairs with consumer's smp_load_acquire() */ smp_store_release(&rb->producer_pos, new_prod_pos); + *sbc -= 1; spin_unlock_irqrestore(&rb->spinlock, flags); return (void *)hdr + BPF_RINGBUF_HDR_SZ;