====================================================== WARNING: possible circular locking dependency detected 6.6.97-syzkaller #0 Not tainted ------------------------------------------------------ syz-executor/22185 is trying to acquire lock: ffff8880467c6a38 (&trie->lock){..-.}-{2:2}, at: trie_delete_elem+0x96/0x6a0 kernel/bpf/lpm_trie.c:467 but task is already holding lock: ffff888019a4e498 (&n->list_lock){-.-.}-{2:2}, at: get_partial_node+0x36/0x540 mm/slub.c:2296 which lock already depends on the new lock. the existing dependency chain (in reverse order) is: -> #1 (&n->list_lock){-.-.}-{2:2}: __raw_spin_lock_irqsave include/linux/spinlock_api_smp.h:110 [inline] _raw_spin_lock_irqsave+0xa8/0xf0 kernel/locking/spinlock.c:162 get_partial_node+0x36/0x540 mm/slub.c:2296 get_partial mm/slub.c:2411 [inline] ___slab_alloc+0x9cd/0x12f0 mm/slub.c:3225 __slab_alloc mm/slub.c:3329 [inline] __slab_alloc_node mm/slub.c:3382 [inline] slab_alloc_node mm/slub.c:3475 [inline] __kmem_cache_alloc_node+0x1a2/0x260 mm/slub.c:3524 __do_kmalloc_node mm/slab_common.c:1006 [inline] __kmalloc_node+0xa4/0x230 mm/slab_common.c:1014 kmalloc_node include/linux/slab.h:620 [inline] bpf_map_kmalloc_node+0xbc/0x1b0 kernel/bpf/syscall.c:422 lpm_trie_node_alloc kernel/bpf/lpm_trie.c:291 [inline] trie_update_elem+0xa9e/0xea0 kernel/bpf/lpm_trie.c:419 bpf_map_update_value+0x67c/0x740 kernel/bpf/syscall.c:201 generic_map_update_batch+0x5e0/0x810 kernel/bpf/syscall.c:1794 bpf_map_do_batch+0x3d7/0x610 kernel/bpf/syscall.c:5001 __sys_bpf+0x31b/0x800 kernel/bpf/syscall.c:-1 __do_sys_bpf kernel/bpf/syscall.c:5571 [inline] __se_sys_bpf kernel/bpf/syscall.c:5569 [inline] __x64_sys_bpf+0x7c/0x90 kernel/bpf/syscall.c:5569 do_syscall_x64 arch/x86/entry/common.c:51 [inline] do_syscall_64+0x55/0xb0 arch/x86/entry/common.c:81 entry_SYSCALL_64_after_hwframe+0x68/0xd2 -> #0 (&trie->lock){..-.}-{2:2}: check_prev_add kernel/locking/lockdep.c:3134 [inline] check_prevs_add kernel/locking/lockdep.c:3253 [inline] validate_chain kernel/locking/lockdep.c:3869 [inline] __lock_acquire+0x2ddb/0x7c80 kernel/locking/lockdep.c:5137 lock_acquire+0x197/0x410 kernel/locking/lockdep.c:5754 __raw_spin_lock_irqsave include/linux/spinlock_api_smp.h:110 [inline] _raw_spin_lock_irqsave+0xa8/0xf0 kernel/locking/spinlock.c:162 trie_delete_elem+0x96/0x6a0 kernel/bpf/lpm_trie.c:467 bpf_prog_2c29ac5cdc6b1842+0x42/0x46 bpf_dispatcher_nop_func include/linux/bpf.h:1213 [inline] __bpf_prog_run include/linux/filter.h:612 [inline] bpf_prog_run include/linux/filter.h:619 [inline] __bpf_trace_run kernel/trace/bpf_trace.c:2322 [inline] bpf_trace_run2+0x1d1/0x3c0 kernel/trace/bpf_trace.c:2361 __bpf_trace_contention_end+0xdd/0x130 include/trace/events/lock.h:122 trace_contention_end+0xe6/0x110 include/trace/events/lock.h:122 __pv_queued_spin_lock_slowpath+0x7ec/0x9d0 kernel/locking/qspinlock.c:560 pv_queued_spin_lock_slowpath arch/x86/include/asm/paravirt.h:586 [inline] queued_spin_lock_slowpath arch/x86/include/asm/qspinlock.h:51 [inline] queued_spin_lock include/asm-generic/qspinlock.h:114 [inline] do_raw_spin_lock+0x24e/0x2c0 kernel/locking/spinlock_debug.c:115 __raw_spin_lock_irqsave include/linux/spinlock_api_smp.h:111 [inline] _raw_spin_lock_irqsave+0xb4/0xf0 kernel/locking/spinlock.c:162 get_partial_node+0x36/0x540 mm/slub.c:2296 get_partial mm/slub.c:2411 [inline] ___slab_alloc+0x9cd/0x12f0 mm/slub.c:3225 __slab_alloc mm/slub.c:3329 [inline] __slab_alloc_node mm/slub.c:3382 [inline] slab_alloc_node mm/slub.c:3475 [inline] slab_alloc mm/slub.c:3493 [inline] __kmem_cache_alloc_lru mm/slub.c:3500 [inline] kmem_cache_alloc+0x1b7/0x2e0 mm/slub.c:3509 getname_flags+0xbb/0x500 fs/namei.c:140 do_sys_openat2+0xcb/0x1c0 fs/open.c:1413 do_sys_open fs/open.c:1434 [inline] __do_sys_openat fs/open.c:1450 [inline] __se_sys_openat fs/open.c:1445 [inline] __x64_sys_openat+0x139/0x160 fs/open.c:1445 do_syscall_x64 arch/x86/entry/common.c:51 [inline] do_syscall_64+0x55/0xb0 arch/x86/entry/common.c:81 entry_SYSCALL_64_after_hwframe+0x68/0xd2 other info that might help us debug this: Possible unsafe locking scenario: CPU0 CPU1 ---- ---- lock(&n->list_lock); lock(&trie->lock); lock(&n->list_lock); lock(&trie->lock); *** DEADLOCK *** 2 locks held by syz-executor/22185: #0: ffff888019a4e498 (&n->list_lock){-.-.}-{2:2}, at: get_partial_node+0x36/0x540 mm/slub.c:2296 #1: ffffffff8cd2fa60 (rcu_read_lock){....}-{1:2}, at: rcu_lock_acquire include/linux/rcupdate.h:334 [inline] #1: ffffffff8cd2fa60 (rcu_read_lock){....}-{1:2}, at: rcu_read_lock include/linux/rcupdate.h:786 [inline] #1: ffffffff8cd2fa60 (rcu_read_lock){....}-{1:2}, at: __bpf_trace_run kernel/trace/bpf_trace.c:2321 [inline] #1: ffffffff8cd2fa60 (rcu_read_lock){....}-{1:2}, at: bpf_trace_run2+0xde/0x3c0 kernel/trace/bpf_trace.c:2361 stack backtrace: CPU: 1 PID: 22185 Comm: syz-executor Not tainted 6.6.97-syzkaller #0 Hardware name: Google Google Compute Engine/Google Compute Engine, BIOS Google 05/07/2025 Call Trace: dump_stack_lvl+0x16c/0x230 lib/dump_stack.c:106 check_noncircular+0x2bd/0x3c0 kernel/locking/lockdep.c:2187 check_prev_add kernel/locking/lockdep.c:3134 [inline] check_prevs_add kernel/locking/lockdep.c:3253 [inline] validate_chain kernel/locking/lockdep.c:3869 [inline] __lock_acquire+0x2ddb/0x7c80 kernel/locking/lockdep.c:5137 lock_acquire+0x197/0x410 kernel/locking/lockdep.c:5754 __raw_spin_lock_irqsave include/linux/spinlock_api_smp.h:110 [inline] _raw_spin_lock_irqsave+0xa8/0xf0 kernel/locking/spinlock.c:162 trie_delete_elem+0x96/0x6a0 kernel/bpf/lpm_trie.c:467 bpf_prog_2c29ac5cdc6b1842+0x42/0x46 bpf_dispatcher_nop_func include/linux/bpf.h:1213 [inline] __bpf_prog_run include/linux/filter.h:612 [inline] bpf_prog_run include/linux/filter.h:619 [inline] __bpf_trace_run kernel/trace/bpf_trace.c:2322 [inline] bpf_trace_run2+0x1d1/0x3c0 kernel/trace/bpf_trace.c:2361 __bpf_trace_contention_end+0xdd/0x130 include/trace/events/lock.h:122 trace_contention_end+0xe6/0x110 include/trace/events/lock.h:122 __pv_queued_spin_lock_slowpath+0x7ec/0x9d0 kernel/locking/qspinlock.c:560 pv_queued_spin_lock_slowpath arch/x86/include/asm/paravirt.h:586 [inline] queued_spin_lock_slowpath arch/x86/include/asm/qspinlock.h:51 [inline] queued_spin_lock include/asm-generic/qspinlock.h:114 [inline] do_raw_spin_lock+0x24e/0x2c0 kernel/locking/spinlock_debug.c:115 __raw_spin_lock_irqsave include/linux/spinlock_api_smp.h:111 [inline] _raw_spin_lock_irqsave+0xb4/0xf0 kernel/locking/spinlock.c:162 get_partial_node+0x36/0x540 mm/slub.c:2296 get_partial mm/slub.c:2411 [inline] ___slab_alloc+0x9cd/0x12f0 mm/slub.c:3225 __slab_alloc mm/slub.c:3329 [inline] __slab_alloc_node mm/slub.c:3382 [inline] slab_alloc_node mm/slub.c:3475 [inline] slab_alloc mm/slub.c:3493 [inline] __kmem_cache_alloc_lru mm/slub.c:3500 [inline] kmem_cache_alloc+0x1b7/0x2e0 mm/slub.c:3509 getname_flags+0xbb/0x500 fs/namei.c:140 do_sys_openat2+0xcb/0x1c0 fs/open.c:1413 do_sys_open fs/open.c:1434 [inline] __do_sys_openat fs/open.c:1450 [inline] __se_sys_openat fs/open.c:1445 [inline] __x64_sys_openat+0x139/0x160 fs/open.c:1445 do_syscall_x64 arch/x86/entry/common.c:51 [inline] do_syscall_64+0x55/0xb0 arch/x86/entry/common.c:81 entry_SYSCALL_64_after_hwframe+0x68/0xd2 RIP: 0033:0x7fd97af8d290 Code: 48 89 44 24 20 75 93 44 89 54 24 0c e8 49 94 02 00 44 8b 54 24 0c 89 da 48 89 ee 41 89 c0 bf 9c ff ff ff b8 01 01 00 00 0f 05 <48> 3d 00 f0 ff ff 77 38 44 89 c7 89 44 24 0c e8 9c 94 02 00 8b 44 RSP: 002b:00007ffc3a514f70 EFLAGS: 00000293 ORIG_RAX: 0000000000000101 RAX: ffffffffffffffda RBX: 0000000000080001 RCX: 00007fd97af8d290 RDX: 0000000000080001 RSI: 00007fd97b01236c RDI: 00000000ffffff9c RBP: 00007fd97b01236c R08: 0000000000000000 R09: 0000000000000000 R10: 0000000000000000 R11: 0000000000000293 R12: 0000000000000004 R13: 00007ffc3a515010 R14: 00000000001028af R15: 00007ffc3a515580