====================================================== WARNING: possible circular locking dependency detected 6.10.0-syzkaller-11323-g7846b618e0a4 #0 Not tainted ------------------------------------------------------ syz.3.3227/14941 is trying to acquire lock: ffff88806b038aa0 (lock#11){+.+.}-{2:2}, at: local_lock_acquire include/linux/local_lock_internal.h:29 [inline] ffff88806b038aa0 (lock#11){+.+.}-{2:2}, at: __mmap_lock_do_trace_acquire_returned+0x7f/0x790 mm/mmap_lock.c:237 but task is already holding lock: ffff88806b03ec98 (&rq->__lock){-.-.}-{2:2}, at: raw_spin_rq_lock_nested kernel/sched/core.c:568 [inline] ffff88806b03ec98 (&rq->__lock){-.-.}-{2:2}, at: raw_spin_rq_lock_nested+0x7e/0x130 kernel/sched/core.c:553 which lock already depends on the new lock. the existing dependency chain (in reverse order) is: -> #2 (&rq->__lock){-.-.}-{2:2}: _raw_spin_lock_nested+0x31/0x40 kernel/locking/spinlock.c:378 raw_spin_rq_lock_nested+0x29/0x130 kernel/sched/core.c:560 raw_spin_rq_lock kernel/sched/sched.h:1415 [inline] rq_lock kernel/sched/sched.h:1714 [inline] task_fork_fair+0x73/0x250 kernel/sched/fair.c:12710 sched_cgroup_fork+0x3cf/0x510 kernel/sched/core.c:4633 copy_process+0x43a1/0x8de0 kernel/fork.c:2482 kernel_clone+0xfd/0x980 kernel/fork.c:2780 user_mode_thread+0xb4/0xf0 kernel/fork.c:2858 rest_init+0x23/0x2b0 init/main.c:712 start_kernel+0x3df/0x4c0 init/main.c:1103 x86_64_start_reservations+0x18/0x30 arch/x86/kernel/head64.c:507 x86_64_start_kernel+0xb2/0xc0 arch/x86/kernel/head64.c:488 common_startup_64+0x13e/0x148 -> #1 (&p->pi_lock){-.-.}-{2:2}: __raw_spin_lock_irqsave include/linux/spinlock_api_smp.h:110 [inline] _raw_spin_lock_irqsave+0x3a/0x60 kernel/locking/spinlock.c:162 class_raw_spinlock_irqsave_constructor include/linux/spinlock.h:551 [inline] try_to_wake_up+0x9a/0x13e0 kernel/sched/core.c:4051 rcu_read_unlock_special kernel/rcu/tree_plugin.h:665 [inline] __rcu_read_unlock+0x24c/0x580 kernel/rcu/tree_plugin.h:436 rcu_read_unlock include/linux/rcupdate.h:872 [inline] put_memcg_path_buf mm/mmap_lock.c:153 [inline] __mmap_lock_do_trace_acquire_returned+0x262/0x790 mm/mmap_lock.c:237 __mmap_lock_trace_acquire_returned include/linux/mmap_lock.h:36 [inline] mmap_read_trylock include/linux/mmap_lock.h:164 [inline] get_mmap_lock_carefully mm/memory.c:5716 [inline] lock_mm_and_find_vma+0xeb/0x6a0 mm/memory.c:5776 do_user_addr_fault+0x2b5/0x13f0 arch/x86/mm/fault.c:1361 handle_page_fault arch/x86/mm/fault.c:1481 [inline] exc_page_fault+0x5c/0xc0 arch/x86/mm/fault.c:1539 asm_exc_page_fault+0x26/0x30 arch/x86/include/asm/idtentry.h:623 copy_user_generic arch/x86/include/asm/uaccess_64.h:110 [inline] raw_copy_to_user arch/x86/include/asm/uaccess_64.h:131 [inline] copy_to_user_iter lib/iov_iter.c:25 [inline] iterate_iovec include/linux/iov_iter.h:51 [inline] iterate_and_advance2 include/linux/iov_iter.h:247 [inline] iterate_and_advance include/linux/iov_iter.h:271 [inline] _copy_to_iter+0x4cd/0x1140 lib/iov_iter.c:185 copy_page_to_iter lib/iov_iter.c:362 [inline] copy_page_to_iter+0xf1/0x180 lib/iov_iter.c:349 process_vm_rw_pages mm/process_vm_access.c:45 [inline] process_vm_rw_single_vec mm/process_vm_access.c:118 [inline] process_vm_rw_core.constprop.0+0x5c9/0xa10 mm/process_vm_access.c:216 process_vm_rw+0x301/0x360 mm/process_vm_access.c:284 __do_sys_process_vm_readv mm/process_vm_access.c:296 [inline] __se_sys_process_vm_readv mm/process_vm_access.c:292 [inline] __x64_sys_process_vm_readv+0xe2/0x1c0 mm/process_vm_access.c:292 do_syscall_x64 arch/x86/entry/common.c:52 [inline] do_syscall_64+0xcd/0x250 arch/x86/entry/common.c:83 entry_SYSCALL_64_after_hwframe+0x77/0x7f -> #0 (lock#11){+.+.}-{2:2}: check_prev_add kernel/locking/lockdep.c:3133 [inline] check_prevs_add kernel/locking/lockdep.c:3252 [inline] validate_chain kernel/locking/lockdep.c:3868 [inline] __lock_acquire+0x24ed/0x3cb0 kernel/locking/lockdep.c:5142 lock_acquire kernel/locking/lockdep.c:5759 [inline] lock_acquire+0x1b1/0x560 kernel/locking/lockdep.c:5724 local_lock_acquire include/linux/local_lock_internal.h:29 [inline] __mmap_lock_do_trace_acquire_returned+0x97/0x790 mm/mmap_lock.c:237 __mmap_lock_trace_acquire_returned include/linux/mmap_lock.h:36 [inline] mmap_read_trylock include/linux/mmap_lock.h:164 [inline] stack_map_get_build_id_offset+0x5d9/0x7c0 kernel/bpf/stackmap.c:141 __bpf_get_stack+0x6bf/0x700 kernel/bpf/stackmap.c:449 ____bpf_get_stack_raw_tp kernel/trace/bpf_trace.c:1997 [inline] bpf_get_stack_raw_tp+0x124/0x160 kernel/trace/bpf_trace.c:1987 ___bpf_prog_run+0x3e51/0xabd0 kernel/bpf/core.c:2010 __bpf_prog_run32+0xc1/0x100 kernel/bpf/core.c:2251 bpf_dispatcher_nop_func include/linux/bpf.h:1243 [inline] __bpf_prog_run include/linux/filter.h:691 [inline] bpf_prog_run include/linux/filter.h:698 [inline] __bpf_trace_run kernel/trace/bpf_trace.c:2406 [inline] bpf_trace_run2+0x231/0x590 kernel/trace/bpf_trace.c:2447 __bpf_trace_tlb_flush+0xd2/0x110 include/trace/events/tlb.h:38 trace_tlb_flush+0xf3/0x170 include/trace/events/tlb.h:38 switch_mm_irqs_off+0x697/0xbb0 arch/x86/mm/tlb.c:642 context_switch kernel/sched/core.c:5172 [inline] __schedule+0xc4d/0x5490 kernel/sched/core.c:6529 __schedule_loop kernel/sched/core.c:6606 [inline] schedule+0xe7/0x350 kernel/sched/core.c:6621 futex_wait_queue+0xfc/0x1f0 kernel/futex/waitwake.c:370 __futex_wait+0x291/0x3c0 kernel/futex/waitwake.c:669 futex_wait+0xe9/0x380 kernel/futex/waitwake.c:697 do_futex+0x22b/0x350 kernel/futex/syscalls.c:102 __do_sys_futex kernel/futex/syscalls.c:179 [inline] __se_sys_futex kernel/futex/syscalls.c:160 [inline] __x64_sys_futex+0x1e1/0x4c0 kernel/futex/syscalls.c:160 do_syscall_x64 arch/x86/entry/common.c:52 [inline] do_syscall_64+0xcd/0x250 arch/x86/entry/common.c:83 entry_SYSCALL_64_after_hwframe+0x77/0x7f other info that might help us debug this: Chain exists of: lock#11 --> &p->pi_lock --> &rq->__lock Possible unsafe locking scenario: CPU0 CPU1 ---- ---- lock(&rq->__lock); lock(&p->pi_lock); lock(&rq->__lock); lock(lock#11); *** DEADLOCK *** 3 locks held by syz.3.3227/14941: #0: ffff88806b03ec98 (&rq->__lock){-.-.}-{2:2}, at: raw_spin_rq_lock_nested kernel/sched/core.c:568 [inline] #0: ffff88806b03ec98 (&rq->__lock){-.-.}-{2:2}, at: raw_spin_rq_lock_nested+0x7e/0x130 kernel/sched/core.c:553 #1: ffffffff8dbb49e0 (rcu_read_lock){....}-{1:2}, at: rcu_lock_acquire include/linux/rcupdate.h:327 [inline] #1: ffffffff8dbb49e0 (rcu_read_lock){....}-{1:2}, at: rcu_read_lock include/linux/rcupdate.h:839 [inline] #1: ffffffff8dbb49e0 (rcu_read_lock){....}-{1:2}, at: __bpf_trace_run kernel/trace/bpf_trace.c:2405 [inline] #1: ffffffff8dbb49e0 (rcu_read_lock){....}-{1:2}, at: bpf_trace_run2+0x1c2/0x590 kernel/trace/bpf_trace.c:2447 #2: ffff88802f667398 (&mm->mmap_lock){++++}-{3:3}, at: mmap_read_trylock include/linux/mmap_lock.h:163 [inline] #2: ffff88802f667398 (&mm->mmap_lock){++++}-{3:3}, at: stack_map_get_build_id_offset+0x1e8/0x7c0 kernel/bpf/stackmap.c:141 stack backtrace: CPU: 0 PID: 14941 Comm: syz.3.3227 Not tainted 6.10.0-syzkaller-11323-g7846b618e0a4 #0 Hardware name: QEMU Standard PC (Q35 + ICH9, 2009), BIOS 1.16.3-debian-1.16.3-2~bpo12+1 04/01/2014 Call Trace: __dump_stack lib/dump_stack.c:88 [inline] dump_stack_lvl+0x116/0x1f0 lib/dump_stack.c:114 check_noncircular+0x31a/0x400 kernel/locking/lockdep.c:2186 check_prev_add kernel/locking/lockdep.c:3133 [inline] check_prevs_add kernel/locking/lockdep.c:3252 [inline] validate_chain kernel/locking/lockdep.c:3868 [inline] __lock_acquire+0x24ed/0x3cb0 kernel/locking/lockdep.c:5142 lock_acquire kernel/locking/lockdep.c:5759 [inline] lock_acquire+0x1b1/0x560 kernel/locking/lockdep.c:5724 local_lock_acquire include/linux/local_lock_internal.h:29 [inline] __mmap_lock_do_trace_acquire_returned+0x97/0x790 mm/mmap_lock.c:237 __mmap_lock_trace_acquire_returned include/linux/mmap_lock.h:36 [inline] mmap_read_trylock include/linux/mmap_lock.h:164 [inline] stack_map_get_build_id_offset+0x5d9/0x7c0 kernel/bpf/stackmap.c:141 __bpf_get_stack+0x6bf/0x700 kernel/bpf/stackmap.c:449 ____bpf_get_stack_raw_tp kernel/trace/bpf_trace.c:1997 [inline] bpf_get_stack_raw_tp+0x124/0x160 kernel/trace/bpf_trace.c:1987 ___bpf_prog_run+0x3e51/0xabd0 kernel/bpf/core.c:2010 __bpf_prog_run32+0xc1/0x100 kernel/bpf/core.c:2251 bpf_dispatcher_nop_func include/linux/bpf.h:1243 [inline] __bpf_prog_run include/linux/filter.h:691 [inline] bpf_prog_run include/linux/filter.h:698 [inline] __bpf_trace_run kernel/trace/bpf_trace.c:2406 [inline] bpf_trace_run2+0x231/0x590 kernel/trace/bpf_trace.c:2447 __bpf_trace_tlb_flush+0xd2/0x110 include/trace/events/tlb.h:38 trace_tlb_flush+0xf3/0x170 include/trace/events/tlb.h:38 switch_mm_irqs_off+0x697/0xbb0 arch/x86/mm/tlb.c:642 context_switch kernel/sched/core.c:5172 [inline] __schedule+0xc4d/0x5490 kernel/sched/core.c:6529 __schedule_loop kernel/sched/core.c:6606 [inline] schedule+0xe7/0x350 kernel/sched/core.c:6621 futex_wait_queue+0xfc/0x1f0 kernel/futex/waitwake.c:370 __futex_wait+0x291/0x3c0 kernel/futex/waitwake.c:669 futex_wait+0xe9/0x380 kernel/futex/waitwake.c:697 do_futex+0x22b/0x350 kernel/futex/syscalls.c:102 __do_sys_futex kernel/futex/syscalls.c:179 [inline] __se_sys_futex kernel/futex/syscalls.c:160 [inline] __x64_sys_futex+0x1e1/0x4c0 kernel/futex/syscalls.c:160 do_syscall_x64 arch/x86/entry/common.c:52 [inline] do_syscall_64+0xcd/0x250 arch/x86/entry/common.c:83 entry_SYSCALL_64_after_hwframe+0x77/0x7f RIP: 0033:0x7f2754975b59 Code: Unable to access opcode bytes at 0x7f2754975b2f. RSP: 002b:00007f27557040f8 EFLAGS: 00000246 ORIG_RAX: 00000000000000ca RAX: ffffffffffffffda RBX: 00007f2754b05f68 RCX: 00007f2754975b59 RDX: 0000000000000000 RSI: 0000000000000080 RDI: 00007f2754b05f68 RBP: 00007f2754b05f60 R08: 00007f27557046c0 R09: 00007f27557046c0 R10: 0000000000000000 R11: 0000000000000246 R12: 00007f2754b05f6c R13: 000000000000000b R14: 00007ffd7d591ce0 R15: 00007ffd7d591dc8