====================================================== WARNING: possible circular locking dependency detected 6.9.0-rc3-syzkaller-00399-g72374d71c315 #0 Not tainted ------------------------------------------------------ syz-executor.2/10675 is trying to acquire lock: ffff88802a9def18 (&sighand->siglock){-.-.}-{2:2}, at: __lock_task_sighand+0xc2/0x340 kernel/signal.c:1414 but task is already holding lock: ffff88806b538a80 (lock#12){+.+.}-{2:2}, at: local_lock_acquire include/linux/local_lock_internal.h:29 [inline] ffff88806b538a80 (lock#12){+.+.}-{2:2}, at: __mmap_lock_do_trace_acquire_returned+0x7f/0x790 mm/mmap_lock.c:237 which lock already depends on the new lock. the existing dependency chain (in reverse order) is: -> #3 (lock#12){+.+.}-{2:2}: local_lock_acquire include/linux/local_lock_internal.h:29 [inline] __mmap_lock_do_trace_acquire_returned+0x97/0x790 mm/mmap_lock.c:237 __mmap_lock_trace_acquire_returned include/linux/mmap_lock.h:36 [inline] mmap_read_trylock include/linux/mmap_lock.h:166 [inline] stack_map_get_build_id_offset+0x5df/0x7d0 kernel/bpf/stackmap.c:141 __bpf_get_stack+0x6bf/0x700 kernel/bpf/stackmap.c:449 ____bpf_get_stack_raw_tp kernel/trace/bpf_trace.c:1985 [inline] bpf_get_stack_raw_tp+0x124/0x160 kernel/trace/bpf_trace.c:1975 ___bpf_prog_run+0x3e51/0xabd0 kernel/bpf/core.c:1997 __bpf_prog_run32+0xc1/0x100 kernel/bpf/core.c:2236 bpf_dispatcher_nop_func include/linux/bpf.h:1234 [inline] __bpf_prog_run include/linux/filter.h:657 [inline] bpf_prog_run include/linux/filter.h:664 [inline] __bpf_trace_run kernel/trace/bpf_trace.c:2381 [inline] bpf_trace_run3+0x167/0x440 kernel/trace/bpf_trace.c:2421 __bpf_trace_workqueue_queue_work+0x101/0x140 include/trace/events/workqueue.h:23 trace_workqueue_queue_work include/trace/events/workqueue.h:23 [inline] __queue_work+0x627/0x1020 kernel/workqueue.c:2382 queue_work_on+0xf4/0x120 kernel/workqueue.c:2435 bpf_prog_load+0x19bb/0x2660 kernel/bpf/syscall.c:2944 __sys_bpf+0x9b4/0x4b40 kernel/bpf/syscall.c:5660 __do_sys_bpf kernel/bpf/syscall.c:5767 [inline] __se_sys_bpf kernel/bpf/syscall.c:5765 [inline] __x64_sys_bpf+0x78/0xc0 kernel/bpf/syscall.c:5765 do_syscall_x64 arch/x86/entry/common.c:52 [inline] do_syscall_64+0xcf/0x260 arch/x86/entry/common.c:83 entry_SYSCALL_64_after_hwframe+0x77/0x7f -> #2 (&pool->lock){-.-.}-{2:2}: __raw_spin_lock include/linux/spinlock_api_smp.h:133 [inline] _raw_spin_lock+0x2e/0x40 kernel/locking/spinlock.c:154 __queue_work+0x39e/0x1020 kernel/workqueue.c:2360 queue_work_on+0xf4/0x120 kernel/workqueue.c:2435 queue_work include/linux/workqueue.h:605 [inline] schedule_work include/linux/workqueue.h:666 [inline] aio_poll_wake+0x1cb/0xcc0 fs/aio.c:1845 __wake_up_common+0x131/0x1e0 kernel/sched/wait.c:89 __wake_up_common_lock kernel/sched/wait.c:106 [inline] __wake_up+0x31/0x60 kernel/sched/wait.c:127 do_signalfd4+0x200/0x3d0 fs/signalfd.c:295 __do_sys_signalfd fs/signalfd.c:323 [inline] __se_sys_signalfd fs/signalfd.c:314 [inline] __x64_sys_signalfd+0x121/0x1a0 fs/signalfd.c:314 do_syscall_x64 arch/x86/entry/common.c:52 [inline] do_syscall_64+0xcf/0x260 arch/x86/entry/common.c:83 entry_SYSCALL_64_after_hwframe+0x77/0x7f -> #1 (&sighand->signalfd_wqh){..-.}-{2:2}: __raw_spin_lock_irqsave include/linux/spinlock_api_smp.h:110 [inline] _raw_spin_lock_irqsave+0x3a/0x60 kernel/locking/spinlock.c:162 __wake_up_common_lock kernel/sched/wait.c:105 [inline] __wake_up+0x1c/0x60 kernel/sched/wait.c:127 signalfd_notify include/linux/signalfd.h:22 [inline] __send_signal_locked+0x951/0x11c0 kernel/signal.c:1168 do_notify_parent+0xeb4/0x1040 kernel/signal.c:2143 exit_notify kernel/exit.c:754 [inline] do_exit+0x1369/0x2c10 kernel/exit.c:898 do_group_exit+0xd3/0x2a0 kernel/exit.c:1027 __do_sys_exit_group kernel/exit.c:1038 [inline] __se_sys_exit_group kernel/exit.c:1036 [inline] __x64_sys_exit_group+0x3e/0x50 kernel/exit.c:1036 do_syscall_x64 arch/x86/entry/common.c:52 [inline] do_syscall_64+0xcf/0x260 arch/x86/entry/common.c:83 entry_SYSCALL_64_after_hwframe+0x77/0x7f -> #0 (&sighand->siglock){-.-.}-{2:2}: check_prev_add kernel/locking/lockdep.c:3134 [inline] check_prevs_add kernel/locking/lockdep.c:3253 [inline] validate_chain kernel/locking/lockdep.c:3869 [inline] __lock_acquire+0x2478/0x3b30 kernel/locking/lockdep.c:5137 lock_acquire kernel/locking/lockdep.c:5754 [inline] lock_acquire+0x1b1/0x560 kernel/locking/lockdep.c:5719 __raw_spin_lock_irqsave include/linux/spinlock_api_smp.h:110 [inline] _raw_spin_lock_irqsave+0x3a/0x60 kernel/locking/spinlock.c:162 __lock_task_sighand+0xc2/0x340 kernel/signal.c:1414 lock_task_sighand include/linux/sched/signal.h:746 [inline] do_send_sig_info kernel/signal.c:1300 [inline] group_send_sig_info+0x290/0x300 kernel/signal.c:1453 bpf_send_signal_common+0x2e8/0x3a0 kernel/trace/bpf_trace.c:881 ____bpf_send_signal_thread kernel/trace/bpf_trace.c:898 [inline] bpf_send_signal_thread+0x16/0x20 kernel/trace/bpf_trace.c:896 ___bpf_prog_run+0x3e51/0xabd0 kernel/bpf/core.c:1997 __bpf_prog_run32+0xc1/0x100 kernel/bpf/core.c:2236 bpf_dispatcher_nop_func include/linux/bpf.h:1234 [inline] __bpf_prog_run include/linux/filter.h:657 [inline] bpf_prog_run include/linux/filter.h:664 [inline] __bpf_trace_run kernel/trace/bpf_trace.c:2381 [inline] bpf_trace_run4+0x176/0x460 kernel/trace/bpf_trace.c:2422 __bpf_trace_mmap_lock_acquire_returned+0x134/0x180 include/trace/events/mmap_lock.h:52 trace_mmap_lock_acquire_returned include/trace/events/mmap_lock.h:52 [inline] __mmap_lock_do_trace_acquire_returned+0x456/0x790 mm/mmap_lock.c:237 __mmap_lock_trace_acquire_returned include/linux/mmap_lock.h:36 [inline] mmap_read_trylock include/linux/mmap_lock.h:166 [inline] get_mmap_lock_carefully mm/memory.c:5633 [inline] lock_mm_and_find_vma+0xeb/0x580 mm/memory.c:5693 do_user_addr_fault+0x29c/0x1080 arch/x86/mm/fault.c:1385 handle_page_fault arch/x86/mm/fault.c:1505 [inline] exc_page_fault+0x5c/0xc0 arch/x86/mm/fault.c:1563 asm_exc_page_fault+0x26/0x30 arch/x86/include/asm/idtentry.h:623 copy_user_generic arch/x86/include/asm/uaccess_64.h:110 [inline] raw_copy_to_user arch/x86/include/asm/uaccess_64.h:131 [inline] copy_to_user_iter lib/iov_iter.c:25 [inline] iterate_ubuf include/linux/iov_iter.h:29 [inline] iterate_and_advance2 include/linux/iov_iter.h:245 [inline] iterate_and_advance include/linux/iov_iter.h:271 [inline] _copy_to_iter+0x379/0x1110 lib/iov_iter.c:185 copy_page_to_iter lib/iov_iter.c:362 [inline] copy_page_to_iter+0xf1/0x180 lib/iov_iter.c:349 pipe_read+0x543/0x1400 fs/pipe.c:327 call_read_iter include/linux/fs.h:2104 [inline] new_sync_read fs/read_write.c:395 [inline] vfs_read+0x9fd/0xb80 fs/read_write.c:476 ksys_read+0x1f8/0x260 fs/read_write.c:619 do_syscall_x64 arch/x86/entry/common.c:52 [inline] do_syscall_64+0xcf/0x260 arch/x86/entry/common.c:83 entry_SYSCALL_64_after_hwframe+0x77/0x7f other info that might help us debug this: Chain exists of: &sighand->siglock --> &pool->lock --> lock#12 Possible unsafe locking scenario: CPU0 CPU1 ---- ---- lock(lock#12); lock(&pool->lock); lock(lock#12); lock(&sighand->siglock); *** DEADLOCK *** 6 locks held by syz-executor.2/10675: #0: ffff88801d6c9068 (&pipe->mutex){+.+.}-{3:3}, at: pipe_read+0x141/0x1400 fs/pipe.c:264 #1: ffff8880240e9e20 (&mm->mmap_lock){++++}-{3:3}, at: mmap_read_trylock include/linux/mmap_lock.h:165 [inline] #1: ffff8880240e9e20 (&mm->mmap_lock){++++}-{3:3}, at: get_mmap_lock_carefully mm/memory.c:5633 [inline] #1: ffff8880240e9e20 (&mm->mmap_lock){++++}-{3:3}, at: lock_mm_and_find_vma+0x35/0x580 mm/memory.c:5693 #2: ffff88806b538a80 (lock#12){+.+.}-{2:2}, at: local_lock_acquire include/linux/local_lock_internal.h:29 [inline] #2: ffff88806b538a80 (lock#12){+.+.}-{2:2}, at: __mmap_lock_do_trace_acquire_returned+0x7f/0x790 mm/mmap_lock.c:237 #3: ffffffff8d7b0e20 (rcu_read_lock){....}-{1:2}, at: rcu_lock_acquire include/linux/rcupdate.h:329 [inline] #3: ffffffff8d7b0e20 (rcu_read_lock){....}-{1:2}, at: rcu_read_lock include/linux/rcupdate.h:781 [inline] #3: ffffffff8d7b0e20 (rcu_read_lock){....}-{1:2}, at: get_memcg_path_buf mm/mmap_lock.c:139 [inline] #3: ffffffff8d7b0e20 (rcu_read_lock){....}-{1:2}, at: get_mm_memcg_path+0xb1/0x6f0 mm/mmap_lock.c:209 #4: ffffffff8d7b0e20 (rcu_read_lock){....}-{1:2}, at: rcu_lock_acquire include/linux/rcupdate.h:329 [inline] #4: ffffffff8d7b0e20 (rcu_read_lock){....}-{1:2}, at: rcu_read_lock include/linux/rcupdate.h:781 [inline] #4: ffffffff8d7b0e20 (rcu_read_lock){....}-{1:2}, at: __bpf_trace_run kernel/trace/bpf_trace.c:2380 [inline] #4: ffffffff8d7b0e20 (rcu_read_lock){....}-{1:2}, at: bpf_trace_run4+0x107/0x460 kernel/trace/bpf_trace.c:2422 #5: ffffffff8d7b0e20 (rcu_read_lock){....}-{1:2}, at: rcu_lock_acquire include/linux/rcupdate.h:329 [inline] #5: ffffffff8d7b0e20 (rcu_read_lock){....}-{1:2}, at: rcu_read_lock include/linux/rcupdate.h:781 [inline] #5: ffffffff8d7b0e20 (rcu_read_lock){....}-{1:2}, at: __lock_task_sighand+0x3f/0x340 kernel/signal.c:1397 stack backtrace: CPU: 3 PID: 10675 Comm: syz-executor.2 Not tainted 6.9.0-rc3-syzkaller-00399-g72374d71c315 #0 Hardware name: QEMU Standard PC (Q35 + ICH9, 2009), BIOS 1.16.2-debian-1.16.2-1 04/01/2014 Call Trace: __dump_stack lib/dump_stack.c:88 [inline] dump_stack_lvl+0x116/0x1f0 lib/dump_stack.c:114 check_noncircular+0x31a/0x400 kernel/locking/lockdep.c:2187 check_prev_add kernel/locking/lockdep.c:3134 [inline] check_prevs_add kernel/locking/lockdep.c:3253 [inline] validate_chain kernel/locking/lockdep.c:3869 [inline] __lock_acquire+0x2478/0x3b30 kernel/locking/lockdep.c:5137 lock_acquire kernel/locking/lockdep.c:5754 [inline] lock_acquire+0x1b1/0x560 kernel/locking/lockdep.c:5719 __raw_spin_lock_irqsave include/linux/spinlock_api_smp.h:110 [inline] _raw_spin_lock_irqsave+0x3a/0x60 kernel/locking/spinlock.c:162 __lock_task_sighand+0xc2/0x340 kernel/signal.c:1414 lock_task_sighand include/linux/sched/signal.h:746 [inline] do_send_sig_info kernel/signal.c:1300 [inline] group_send_sig_info+0x290/0x300 kernel/signal.c:1453 bpf_send_signal_common+0x2e8/0x3a0 kernel/trace/bpf_trace.c:881 ____bpf_send_signal_thread kernel/trace/bpf_trace.c:898 [inline] bpf_send_signal_thread+0x16/0x20 kernel/trace/bpf_trace.c:896 ___bpf_prog_run+0x3e51/0xabd0 kernel/bpf/core.c:1997 __bpf_prog_run32+0xc1/0x100 kernel/bpf/core.c:2236 bpf_dispatcher_nop_func include/linux/bpf.h:1234 [inline] __bpf_prog_run include/linux/filter.h:657 [inline] bpf_prog_run include/linux/filter.h:664 [inline] __bpf_trace_run kernel/trace/bpf_trace.c:2381 [inline] bpf_trace_run4+0x176/0x460 kernel/trace/bpf_trace.c:2422 __bpf_trace_mmap_lock_acquire_returned+0x134/0x180 include/trace/events/mmap_lock.h:52 trace_mmap_lock_acquire_returned include/trace/events/mmap_lock.h:52 [inline] __mmap_lock_do_trace_acquire_returned+0x456/0x790 mm/mmap_lock.c:237 __mmap_lock_trace_acquire_returned include/linux/mmap_lock.h:36 [inline] mmap_read_trylock include/linux/mmap_lock.h:166 [inline] get_mmap_lock_carefully mm/memory.c:5633 [inline] lock_mm_and_find_vma+0xeb/0x580 mm/memory.c:5693 do_user_addr_fault+0x29c/0x1080 arch/x86/mm/fault.c:1385 handle_page_fault arch/x86/mm/fault.c:1505 [inline] exc_page_fault+0x5c/0xc0 arch/x86/mm/fault.c:1563 asm_exc_page_fault+0x26/0x30 arch/x86/include/asm/idtentry.h:623 RIP: 0010:copy_user_generic arch/x86/include/asm/uaccess_64.h:110 [inline] RIP: 0010:raw_copy_to_user arch/x86/include/asm/uaccess_64.h:131 [inline] RIP: 0010:copy_to_user_iter lib/iov_iter.c:25 [inline] RIP: 0010:iterate_ubuf include/linux/iov_iter.h:29 [inline] RIP: 0010:iterate_and_advance2 include/linux/iov_iter.h:245 [inline] RIP: 0010:iterate_and_advance include/linux/iov_iter.h:271 [inline] RIP: 0010:_copy_to_iter+0x379/0x1110 lib/iov_iter.c:185 Code: fd 4d 85 ff 0f 85 59 ff ff ff e8 f2 4a 10 fd 4c 8b 74 24 18 89 de 4c 89 f7 e8 33 75 6b fd 0f 01 cb 48 89 d9 48 89 ef 4c 89 f6 a4 0f 1f 00 48 89 cd 0f 01 ca 49 89 df 49 29 cf e9 29 ff ff ff RSP: 0018:ffffc900033cfa40 EFLAGS: 00050246 RAX: 0000000000000001 RBX: 0000000000000040 RCX: 0000000000000040 RDX: ffffed1008d67c08 RSI: ffff888046b3e000 RDI: 00007f9825fa8020 RBP: 00007f9825fa8020 R08: 0000000000000000 R09: ffffed1008d67c07 R10: ffff888046b3e03f R11: 0000000000000001 R12: ffffc900033cfda0 R13: 00007f9825fa8060 R14: ffff888046b3e000 R15: 0000000000000000 copy_page_to_iter lib/iov_iter.c:362 [inline] copy_page_to_iter+0xf1/0x180 lib/iov_iter.c:349 pipe_read+0x543/0x1400 fs/pipe.c:327 call_read_iter include/linux/fs.h:2104 [inline] new_sync_read fs/read_write.c:395 [inline] vfs_read+0x9fd/0xb80 fs/read_write.c:476 ksys_read+0x1f8/0x260 fs/read_write.c:619 do_syscall_x64 arch/x86/entry/common.c:52 [inline] do_syscall_64+0xcf/0x260 arch/x86/entry/common.c:83 entry_SYSCALL_64_after_hwframe+0x77/0x7f RIP: 0033:0x7f9825e7cb0c Code: ec 28 48 89 54 24 18 48 89 74 24 10 89 7c 24 08 e8 59 81 02 00 48 8b 54 24 18 48 8b 74 24 10 41 89 c0 8b 7c 24 08 31 c0 0f 05 <48> 3d 00 f0 ff ff 77 34 44 89 c7 48 89 44 24 08 e8 af 81 02 00 48 RSP: 002b:00007ffeaf0d0a10 EFLAGS: 00000246 ORIG_RAX: 0000000000000000 RAX: ffffffffffffffda RBX: 0000000000000003 RCX: 00007f9825e7cb0c RDX: 0000000000000040 RSI: 00007f9825fa8020 RDI: 00000000000000f9 RBP: 00007ffeaf0d0a9c R08: 0000000000000000 R09: 0079746972756365 R10: 00007ffeaf0d03d0 R11: 0000000000000246 R12: 0000000000000032 R13: 0000000000031ece R14: 0000000000031e65 R15: 0000000000000013 ---------------- Code disassembly (best guess): 0: fd std 1: 4d 85 ff test %r15,%r15 4: 0f 85 59 ff ff ff jne 0xffffff63 a: e8 f2 4a 10 fd call 0xfd104b01 f: 4c 8b 74 24 18 mov 0x18(%rsp),%r14 14: 89 de mov %ebx,%esi 16: 4c 89 f7 mov %r14,%rdi 19: e8 33 75 6b fd call 0xfd6b7551 1e: 0f 01 cb stac 21: 48 89 d9 mov %rbx,%rcx 24: 48 89 ef mov %rbp,%rdi 27: 4c 89 f6 mov %r14,%rsi * 2a: f3 a4 rep movsb %ds:(%rsi),%es:(%rdi) <-- trapping instruction 2c: 0f 1f 00 nopl (%rax) 2f: 48 89 cd mov %rcx,%rbp 32: 0f 01 ca clac 35: 49 89 df mov %rbx,%r15 38: 49 29 cf sub %rcx,%r15 3b: e9 29 ff ff ff jmp 0xffffff69