====================================================== WARNING: possible circular locking dependency detected 5.15.162-syzkaller #0 Not tainted ------------------------------------------------------ syz.0.2001/9531 is trying to acquire lock: ffff8880b9b35bb8 (lock#9){+.+.}-{2:2}, at: local_lock_acquire+0xd/0x170 include/linux/local_lock_internal.h:28 but task is already holding lock: ffff8880b9b3a358 (&rq->__lock){-.-.}-{2:2}, at: raw_spin_rq_lock_nested+0x26/0x140 kernel/sched/core.c:475 which lock already depends on the new lock. the existing dependency chain (in reverse order) is: -> #2 (&rq->__lock){-.-.}-{2:2}: lock_acquire+0x1db/0x4f0 kernel/locking/lockdep.c:5623 _raw_spin_lock_nested+0x2d/0x40 kernel/locking/spinlock.c:368 raw_spin_rq_lock_nested+0x26/0x140 kernel/sched/core.c:475 raw_spin_rq_lock kernel/sched/sched.h:1326 [inline] rq_lock kernel/sched/sched.h:1621 [inline] task_fork_fair+0x5d/0x350 kernel/sched/fair.c:11495 sched_cgroup_fork+0x2d3/0x330 kernel/sched/core.c:4466 copy_process+0x224a/0x3ef0 kernel/fork.c:2320 kernel_clone+0x210/0x960 kernel/fork.c:2604 kernel_thread+0x168/0x1e0 kernel/fork.c:2656 rest_init+0x21/0x330 init/main.c:706 start_kernel+0x48c/0x540 init/main.c:1140 secondary_startup_64_no_verify+0xb1/0xbb -> #1 (&p->pi_lock){-.-.}-{2:2}: lock_acquire+0x1db/0x4f0 kernel/locking/lockdep.c:5623 __raw_spin_lock_irqsave include/linux/spinlock_api_smp.h:110 [inline] _raw_spin_lock_irqsave+0xd1/0x120 kernel/locking/spinlock.c:162 try_to_wake_up+0xae/0x1300 kernel/sched/core.c:4030 rcu_read_unlock_special+0x3aa/0x520 kernel/rcu/tree_plugin.h:650 __rcu_read_unlock+0x92/0x100 kernel/rcu/tree_plugin.h:422 rcu_read_unlock include/linux/rcupdate.h:771 [inline] put_memcg_path_buf+0xde/0x100 mm/mmap_lock.c:153 __mmap_lock_do_trace_acquire_returned+0x12f/0x340 mm/mmap_lock.c:237 __mmap_lock_trace_acquire_returned include/linux/mmap_lock.h:36 [inline] mmap_read_trylock include/linux/mmap_lock.h:137 [inline] do_user_addr_fault arch/x86/mm/fault.c:1298 [inline] handle_page_fault arch/x86/mm/fault.c:1445 [inline] exc_page_fault+0x564/0x700 arch/x86/mm/fault.c:1501 asm_exc_page_fault+0x22/0x30 arch/x86/include/asm/idtentry.h:568 copy_user_enhanced_fast_string+0xe/0x40 arch/x86/lib/copy_user_64.S:205 copy_user_generic arch/x86/include/asm/uaccess_64.h:37 [inline] raw_copy_from_user arch/x86/include/asm/uaccess_64.h:52 [inline] copyin lib/iov_iter.c:168 [inline] copy_page_from_iter_iovec lib/iov_iter.c:312 [inline] copy_page_from_iter+0x3fe/0x750 lib/iov_iter.c:911 pipe_write+0x99a/0x1b90 fs/pipe.c:536 call_write_iter include/linux/fs.h:2172 [inline] new_sync_write fs/read_write.c:507 [inline] vfs_write+0xacf/0xe50 fs/read_write.c:594 ksys_write+0x1a2/0x2c0 fs/read_write.c:647 do_syscall_x64 arch/x86/entry/common.c:50 [inline] do_syscall_64+0x3b/0xb0 arch/x86/entry/common.c:80 entry_SYSCALL_64_after_hwframe+0x66/0xd0 -> #0 (lock#9){+.+.}-{2:2}: check_prev_add kernel/locking/lockdep.c:3053 [inline] check_prevs_add kernel/locking/lockdep.c:3172 [inline] validate_chain+0x1649/0x5930 kernel/locking/lockdep.c:3788 __lock_acquire+0x1295/0x1ff0 kernel/locking/lockdep.c:5012 lock_acquire+0x1db/0x4f0 kernel/locking/lockdep.c:5623 local_lock_acquire+0x29/0x170 include/linux/local_lock_internal.h:29 __mmap_lock_do_trace_acquire_returned+0x7c/0x340 mm/mmap_lock.c:237 __mmap_lock_trace_acquire_returned include/linux/mmap_lock.h:36 [inline] mmap_read_trylock include/linux/mmap_lock.h:137 [inline] stack_map_get_build_id_offset+0x612/0x930 kernel/bpf/stackmap.c:185 __bpf_get_stack+0x495/0x570 kernel/bpf/stackmap.c:496 ____bpf_get_stack_raw_tp kernel/trace/bpf_trace.c:1490 [inline] bpf_get_stack_raw_tp+0x1b2/0x220 kernel/trace/bpf_trace.c:1480 bpf_prog_ec3b2eefa702d8d3+0x3a/0xb70 bpf_dispatcher_nop_func include/linux/bpf.h:790 [inline] __bpf_prog_run include/linux/filter.h:628 [inline] bpf_prog_run include/linux/filter.h:635 [inline] __bpf_trace_run kernel/trace/bpf_trace.c:1880 [inline] bpf_trace_run2+0x19e/0x340 kernel/trace/bpf_trace.c:1917 trace_tlb_flush+0xed/0x110 include/trace/events/tlb.h:38 switch_mm_irqs_off+0x748/0xa30 context_switch kernel/sched/core.c:5016 [inline] __schedule+0x1167/0x45b0 kernel/sched/core.c:6376 schedule+0x11b/0x1f0 kernel/sched/core.c:6459 freezable_schedule include/linux/freezer.h:172 [inline] futex_wait_queue_me+0x25b/0x480 kernel/futex/core.c:2863 futex_wait+0x2f8/0x740 kernel/futex/core.c:2964 do_futex+0x1414/0x1810 kernel/futex/core.c:3982 __do_sys_futex kernel/futex/core.c:4059 [inline] __se_sys_futex+0x407/0x490 kernel/futex/core.c:4040 do_syscall_x64 arch/x86/entry/common.c:50 [inline] do_syscall_64+0x3b/0xb0 arch/x86/entry/common.c:80 entry_SYSCALL_64_after_hwframe+0x66/0xd0 other info that might help us debug this: Chain exists of: lock#9 --> &p->pi_lock --> &rq->__lock Possible unsafe locking scenario: CPU0 CPU1 ---- ---- lock(&rq->__lock); lock(&p->pi_lock); lock(&rq->__lock); lock(lock#9); *** DEADLOCK *** 3 locks held by syz.0.2001/9531: #0: ffff8880b9b3a358 (&rq->__lock){-.-.}-{2:2}, at: raw_spin_rq_lock_nested+0x26/0x140 kernel/sched/core.c:475 #1: ffffffff8c91fb20 (rcu_read_lock){....}-{1:2}, at: rcu_lock_acquire+0x5/0x30 include/linux/rcupdate.h:311 #2: ffff88807ae1a428 (&mm->mmap_lock){++++}-{3:3}, at: mmap_read_trylock include/linux/mmap_lock.h:136 [inline] #2: ffff88807ae1a428 (&mm->mmap_lock){++++}-{3:3}, at: stack_map_get_build_id_offset+0x23e/0x930 kernel/bpf/stackmap.c:185 stack backtrace: CPU: 1 PID: 9531 Comm: syz.0.2001 Not tainted 5.15.162-syzkaller #0 Hardware name: Google Google Compute Engine/Google Compute Engine, BIOS Google 06/07/2024 Call Trace: __dump_stack lib/dump_stack.c:88 [inline] dump_stack_lvl+0x1e3/0x2d0 lib/dump_stack.c:106 check_noncircular+0x2f8/0x3b0 kernel/locking/lockdep.c:2133 check_prev_add kernel/locking/lockdep.c:3053 [inline] check_prevs_add kernel/locking/lockdep.c:3172 [inline] validate_chain+0x1649/0x5930 kernel/locking/lockdep.c:3788 __lock_acquire+0x1295/0x1ff0 kernel/locking/lockdep.c:5012 lock_acquire+0x1db/0x4f0 kernel/locking/lockdep.c:5623 local_lock_acquire+0x29/0x170 include/linux/local_lock_internal.h:29 __mmap_lock_do_trace_acquire_returned+0x7c/0x340 mm/mmap_lock.c:237 __mmap_lock_trace_acquire_returned include/linux/mmap_lock.h:36 [inline] mmap_read_trylock include/linux/mmap_lock.h:137 [inline] stack_map_get_build_id_offset+0x612/0x930 kernel/bpf/stackmap.c:185 __bpf_get_stack+0x495/0x570 kernel/bpf/stackmap.c:496 ____bpf_get_stack_raw_tp kernel/trace/bpf_trace.c:1490 [inline] bpf_get_stack_raw_tp+0x1b2/0x220 kernel/trace/bpf_trace.c:1480 bpf_prog_ec3b2eefa702d8d3+0x3a/0xb70 bpf_dispatcher_nop_func include/linux/bpf.h:790 [inline] __bpf_prog_run include/linux/filter.h:628 [inline] bpf_prog_run include/linux/filter.h:635 [inline] __bpf_trace_run kernel/trace/bpf_trace.c:1880 [inline] bpf_trace_run2+0x19e/0x340 kernel/trace/bpf_trace.c:1917 trace_tlb_flush+0xed/0x110 include/trace/events/tlb.h:38 switch_mm_irqs_off+0x748/0xa30 context_switch kernel/sched/core.c:5016 [inline] __schedule+0x1167/0x45b0 kernel/sched/core.c:6376 schedule+0x11b/0x1f0 kernel/sched/core.c:6459 freezable_schedule include/linux/freezer.h:172 [inline] futex_wait_queue_me+0x25b/0x480 kernel/futex/core.c:2863 futex_wait+0x2f8/0x740 kernel/futex/core.c:2964 do_futex+0x1414/0x1810 kernel/futex/core.c:3982 __do_sys_futex kernel/futex/core.c:4059 [inline] __se_sys_futex+0x407/0x490 kernel/futex/core.c:4040 do_syscall_x64 arch/x86/entry/common.c:50 [inline] do_syscall_64+0x3b/0xb0 arch/x86/entry/common.c:80 entry_SYSCALL_64_after_hwframe+0x66/0xd0 RIP: 0033:0x7f09d5c46bd9 Code: Unable to access opcode bytes at RIP 0x7f09d5c46baf. RSP: 002b:00007ffcc6b8e068 EFLAGS: 00000246 ORIG_RAX: 00000000000000ca RAX: ffffffffffffffda RBX: 0000000000041cc2 RCX: 00007f09d5c46bd9 RDX: 0000000000000000 RSI: 0000000000000080 RDI: 00007f09d5dd4f6c RBP: 0000000000041c90 R08: 0000000000000010 R09: 00000008c6b8e38f R10: 00007ffcc6b8e150 R11: 0000000000000246 R12: 00007f09d5dd4f6c R13: 0000000000000032 R14: 00007ffcc6b8e170 R15: 00007ffcc6b8e150