===================================================== WARNING: SOFTIRQ-safe -> SOFTIRQ-unsafe lock order detected 5.2.0+ #69 Not tainted ----------------------------------------------------- syz-executor.0/29303 [HC0[0]:SC0[0]:HE0:SE1] is trying to acquire: 0000000056499967 (&fiq->waitq){+.+.}, at: spin_lock /./include/linux/spinlock.h:338 [inline] 0000000056499967 (&fiq->waitq){+.+.}, at: aio_poll /fs/aio.c:1753 [inline] 0000000056499967 (&fiq->waitq){+.+.}, at: __io_submit_one /fs/aio.c:1827 [inline] 0000000056499967 (&fiq->waitq){+.+.}, at: io_submit_one+0xefa/0x2ef0 /fs/aio.c:1864 and this task is already holding: 00000000557b9697 (&(&ctx->ctx_lock)->rlock){..-.}, at: spin_lock_irq /./include/linux/spinlock.h:363 [inline] 00000000557b9697 (&(&ctx->ctx_lock)->rlock){..-.}, at: aio_poll /fs/aio.c:1751 [inline] 00000000557b9697 (&(&ctx->ctx_lock)->rlock){..-.}, at: __io_submit_one /fs/aio.c:1827 [inline] 00000000557b9697 (&(&ctx->ctx_lock)->rlock){..-.}, at: io_submit_one+0xeb5/0x2ef0 /fs/aio.c:1864 which would create a new lock dependency: (&(&ctx->ctx_lock)->rlock){..-.} -> (&fiq->waitq){+.+.} but this new dependency connects a SOFTIRQ-irq-safe lock: (&(&ctx->ctx_lock)->rlock){..-.} ... which became SOFTIRQ-irq-safe at: lock_acquire+0x190/0x410 /kernel/locking/lockdep.c:4413 __raw_spin_lock_irq /./include/linux/spinlock_api_smp.h:128 [inline] _raw_spin_lock_irq+0x60/0x80 /kernel/locking/spinlock.c:167 spin_lock_irq /./include/linux/spinlock.h:363 [inline] free_ioctx_users+0x2d/0x490 /fs/aio.c:620 percpu_ref_put_many /./include/linux/percpu-refcount.h:293 [inline] percpu_ref_put /./include/linux/percpu-refcount.h:309 [inline] percpu_ref_call_confirm_rcu /lib/percpu-refcount.c:130 [inline] percpu_ref_switch_to_atomic_rcu+0x4c0/0x570 /lib/percpu-refcount.c:165 __rcu_reclaim /kernel/rcu/rcu.h:222 [inline] rcu_do_batch /kernel/rcu/tree.c:2114 [inline] rcu_core+0x67f/0x1580 /kernel/rcu/tree.c:2314 rcu_core_si+0x9/0x10 /kernel/rcu/tree.c:2323 __do_softirq+0x262/0x98c /kernel/softirq.c:292 invoke_softirq /kernel/softirq.c:373 [inline] irq_exit+0x19b/0x1e0 /kernel/softirq.c:413 exiting_irq /./arch/x86/include/asm/apic.h:537 [inline] smp_apic_timer_interrupt+0x1a3/0x610 /arch/x86/kernel/apic/apic.c:1095 apic_timer_interrupt+0xf/0x20 /arch/x86/entry/entry_64.S:828 __sanitizer_cov_trace_pc+0x1/0x50 /kernel/kcov.c:95 rcu_lockdep_current_cpu_online /kernel/rcu/tree.c:942 [inline] rcu_lockdep_current_cpu_online+0x39/0x130 /kernel/rcu/tree.c:933 rcu_read_lock_sched_held+0x97/0x130 /kernel/rcu/update.c:102 trace_workqueue_execute_end /./include/trace/events/workqueue.h:114 [inline] process_one_work+0x12aa/0x1740 /kernel/workqueue.c:2274 worker_thread+0x98/0xe40 /kernel/workqueue.c:2415 kthread+0x361/0x430 /kernel/kthread.c:255 ret_from_fork+0x24/0x30 /arch/x86/entry/entry_64.S:352 to a SOFTIRQ-irq-unsafe lock: (&fiq->waitq){+.+.} ... which became SOFTIRQ-irq-unsafe at: ... lock_acquire+0x190/0x410 /kernel/locking/lockdep.c:4413 __raw_spin_lock /./include/linux/spinlock_api_smp.h:142 [inline] _raw_spin_lock+0x2f/0x40 /kernel/locking/spinlock.c:151 spin_lock /./include/linux/spinlock.h:338 [inline] flush_bg_queue+0x1f3/0x3c0 /fs/fuse/dev.c:415 fuse_request_queue_background+0x2f8/0x5a0 /fs/fuse/dev.c:676 fuse_request_send_background+0x58/0x110 /fs/fuse/dev.c:687 fuse_send_init /fs/fuse/inode.c:986 [inline] fuse_fill_super+0x13cd/0x1730 /fs/fuse/inode.c:1211 mount_nodev+0x66/0x110 /fs/super.c:1392 fuse_mount+0x2d/0x40 /fs/fuse/inode.c:1236 legacy_get_tree+0x108/0x220 /fs/fs_context.c:661 vfs_get_tree+0x8e/0x390 /fs/super.c:1476 do_new_mount /fs/namespace.c:2792 [inline] do_mount+0x138c/0x1c00 /fs/namespace.c:3112 ksys_mount+0xdb/0x150 /fs/namespace.c:3321 __do_sys_mount /fs/namespace.c:3335 [inline] __se_sys_mount /fs/namespace.c:3332 [inline] __x64_sys_mount+0xbe/0x150 /fs/namespace.c:3332 do_syscall_64+0xfd/0x6a0 /arch/x86/entry/common.c:296 entry_SYSCALL_64_after_hwframe+0x49/0xbe other info that might help us debug this: Possible interrupt unsafe locking scenario: CPU0 CPU1 ---- ---- lock(&fiq->waitq); local_irq_disable(); lock(&(&ctx->ctx_lock)->rlock); lock(&fiq->waitq); lock(&(&ctx->ctx_lock)->rlock); *** DEADLOCK *** 1 lock held by syz-executor.0/29303: #0: 00000000557b9697 (&(&ctx->ctx_lock)->rlock){..-.}, at: spin_lock_irq /./include/linux/spinlock.h:363 [inline] #0: 00000000557b9697 (&(&ctx->ctx_lock)->rlock){..-.}, at: aio_poll /fs/aio.c:1751 [inline] #0: 00000000557b9697 (&(&ctx->ctx_lock)->rlock){..-.}, at: __io_submit_one /fs/aio.c:1827 [inline] #0: 00000000557b9697 (&(&ctx->ctx_lock)->rlock){..-.}, at: io_submit_one+0xeb5/0x2ef0 /fs/aio.c:1864 the dependencies between SOFTIRQ-irq-safe lock and the holding lock: -> (&(&ctx->ctx_lock)->rlock){..-.} { IN-SOFTIRQ-W at: lock_acquire+0x190/0x410 /kernel/locking/lockdep.c:4413 __raw_spin_lock_irq /./include/linux/spinlock_api_smp.h:128 [inline] _raw_spin_lock_irq+0x60/0x80 /kernel/locking/spinlock.c:167 spin_lock_irq /./include/linux/spinlock.h:363 [inline] free_ioctx_users+0x2d/0x490 /fs/aio.c:620 percpu_ref_put_many /./include/linux/percpu-refcount.h:293 [inline] percpu_ref_put /./include/linux/percpu-refcount.h:309 [inline] percpu_ref_call_confirm_rcu /lib/percpu-refcount.c:130 [inline] percpu_ref_switch_to_atomic_rcu+0x4c0/0x570 /lib/percpu-refcount.c:165 __rcu_reclaim /kernel/rcu/rcu.h:222 [inline] rcu_do_batch /kernel/rcu/tree.c:2114 [inline] rcu_core+0x67f/0x1580 /kernel/rcu/tree.c:2314 rcu_core_si+0x9/0x10 /kernel/rcu/tree.c:2323 __do_softirq+0x262/0x98c /kernel/softirq.c:292 invoke_softirq /kernel/softirq.c:373 [inline] irq_exit+0x19b/0x1e0 /kernel/softirq.c:413 exiting_irq /./arch/x86/include/asm/apic.h:537 [inline] smp_apic_timer_interrupt+0x1a3/0x610 /arch/x86/kernel/apic/apic.c:1095 apic_timer_interrupt+0xf/0x20 /arch/x86/entry/entry_64.S:828 __sanitizer_cov_trace_pc+0x1/0x50 /kernel/kcov.c:95 rcu_lockdep_current_cpu_online /kernel/rcu/tree.c:942 [inline] rcu_lockdep_current_cpu_online+0x39/0x130 /kernel/rcu/tree.c:933 rcu_read_lock_sched_held+0x97/0x130 /kernel/rcu/update.c:102 trace_workqueue_execute_end /./include/trace/events/workqueue.h:114 [inline] process_one_work+0x12aa/0x1740 /kernel/workqueue.c:2274 worker_thread+0x98/0xe40 /kernel/workqueue.c:2415 kthread+0x361/0x430 /kernel/kthread.c:255 ret_from_fork+0x24/0x30 /arch/x86/entry/entry_64.S:352 INITIAL USE at: lock_acquire+0x190/0x410 /kernel/locking/lockdep.c:4413 __raw_spin_lock_irq /./include/linux/spinlock_api_smp.h:128 [inline] _raw_spin_lock_irq+0x60/0x80 /kernel/locking/spinlock.c:167 spin_lock_irq /./include/linux/spinlock.h:363 [inline] free_ioctx_users+0x2d/0x490 /fs/aio.c:620 percpu_ref_put_many /./include/linux/percpu-refcount.h:293 [inline] percpu_ref_put /./include/linux/percpu-refcount.h:309 [inline] percpu_ref_call_confirm_rcu /lib/percpu-refcount.c:130 [inline] percpu_ref_switch_to_atomic_rcu+0x4c0/0x570 /lib/percpu-refcount.c:165 __rcu_reclaim /kernel/rcu/rcu.h:222 [inline] rcu_do_batch /kernel/rcu/tree.c:2114 [inline] rcu_core+0x67f/0x1580 /kernel/rcu/tree.c:2314 rcu_core_si+0x9/0x10 /kernel/rcu/tree.c:2323 __do_softirq+0x262/0x98c /kernel/softirq.c:292 invoke_softirq /kernel/softirq.c:373 [inline] irq_exit+0x19b/0x1e0 /kernel/softirq.c:413 exiting_irq /./arch/x86/include/asm/apic.h:537 [inline] smp_apic_timer_interrupt+0x1a3/0x610 /arch/x86/kernel/apic/apic.c:1095 apic_timer_interrupt+0xf/0x20 /arch/x86/entry/entry_64.S:828 __sanitizer_cov_trace_pc+0x1/0x50 /kernel/kcov.c:95 rcu_lockdep_current_cpu_online /kernel/rcu/tree.c:942 [inline] rcu_lockdep_current_cpu_online+0x39/0x130 /kernel/rcu/tree.c:933 rcu_read_lock_sched_held+0x97/0x130 /kernel/rcu/update.c:102 trace_workqueue_execute_end /./include/trace/events/workqueue.h:114 [inline] process_one_work+0x12aa/0x1740 /kernel/workqueue.c:2274 worker_thread+0x98/0xe40 /kernel/workqueue.c:2415 kthread+0x361/0x430 /kernel/kthread.c:255 ret_from_fork+0x24/0x30 /arch/x86/entry/entry_64.S:352 } ... key at: [] __key.53780+0x0/0x40 ... acquired at: lock_acquire+0x190/0x410 /kernel/locking/lockdep.c:4413 __raw_spin_lock /./include/linux/spinlock_api_smp.h:142 [inline] _raw_spin_lock+0x2f/0x40 /kernel/locking/spinlock.c:151 spin_lock /./include/linux/spinlock.h:338 [inline] aio_poll /fs/aio.c:1753 [inline] __io_submit_one /fs/aio.c:1827 [inline] io_submit_one+0xefa/0x2ef0 /fs/aio.c:1864 __do_sys_io_submit /fs/aio.c:1923 [inline] __se_sys_io_submit /fs/aio.c:1893 [inline] __x64_sys_io_submit+0x1bd/0x570 /fs/aio.c:1893 do_syscall_64+0xfd/0x6a0 /arch/x86/entry/common.c:296 entry_SYSCALL_64_after_hwframe+0x49/0xbe the dependencies between the lock to be acquired and SOFTIRQ-irq-unsafe lock: -> (&fiq->waitq){+.+.} { HARDIRQ-ON-W at: lock_acquire+0x190/0x410 /kernel/locking/lockdep.c:4413 __raw_spin_lock /./include/linux/spinlock_api_smp.h:142 [inline] _raw_spin_lock+0x2f/0x40 /kernel/locking/spinlock.c:151 spin_lock /./include/linux/spinlock.h:338 [inline] flush_bg_queue+0x1f3/0x3c0 /fs/fuse/dev.c:415 fuse_request_queue_background+0x2f8/0x5a0 /fs/fuse/dev.c:676 fuse_request_send_background+0x58/0x110 /fs/fuse/dev.c:687 fuse_send_init /fs/fuse/inode.c:986 [inline] fuse_fill_super+0x13cd/0x1730 /fs/fuse/inode.c:1211 mount_nodev+0x66/0x110 /fs/super.c:1392 fuse_mount+0x2d/0x40 /fs/fuse/inode.c:1236 legacy_get_tree+0x108/0x220 /fs/fs_context.c:661 vfs_get_tree+0x8e/0x390 /fs/super.c:1476 do_new_mount /fs/namespace.c:2792 [inline] do_mount+0x138c/0x1c00 /fs/namespace.c:3112 ksys_mount+0xdb/0x150 /fs/namespace.c:3321 __do_sys_mount /fs/namespace.c:3335 [inline] __se_sys_mount /fs/namespace.c:3332 [inline] __x64_sys_mount+0xbe/0x150 /fs/namespace.c:3332 do_syscall_64+0xfd/0x6a0 /arch/x86/entry/common.c:296 entry_SYSCALL_64_after_hwframe+0x49/0xbe SOFTIRQ-ON-W at: lock_acquire+0x190/0x410 /kernel/locking/lockdep.c:4413 __raw_spin_lock /./include/linux/spinlock_api_smp.h:142 [inline] _raw_spin_lock+0x2f/0x40 /kernel/locking/spinlock.c:151 spin_lock /./include/linux/spinlock.h:338 [inline] flush_bg_queue+0x1f3/0x3c0 /fs/fuse/dev.c:415 fuse_request_queue_background+0x2f8/0x5a0 /fs/fuse/dev.c:676 fuse_request_send_background+0x58/0x110 /fs/fuse/dev.c:687 fuse_send_init /fs/fuse/inode.c:986 [inline] fuse_fill_super+0x13cd/0x1730 /fs/fuse/inode.c:1211 mount_nodev+0x66/0x110 /fs/super.c:1392 fuse_mount+0x2d/0x40 /fs/fuse/inode.c:1236 legacy_get_tree+0x108/0x220 /fs/fs_context.c:661 vfs_get_tree+0x8e/0x390 /fs/super.c:1476 do_new_mount /fs/namespace.c:2792 [inline] do_mount+0x138c/0x1c00 /fs/namespace.c:3112 ksys_mount+0xdb/0x150 /fs/namespace.c:3321 __do_sys_mount /fs/namespace.c:3335 [inline] __se_sys_mount /fs/namespace.c:3332 [inline] __x64_sys_mount+0xbe/0x150 /fs/namespace.c:3332 do_syscall_64+0xfd/0x6a0 /arch/x86/entry/common.c:296 entry_SYSCALL_64_after_hwframe+0x49/0xbe INITIAL USE at: lock_acquire+0x190/0x410 /kernel/locking/lockdep.c:4413 __raw_spin_lock /./include/linux/spinlock_api_smp.h:142 [inline] _raw_spin_lock+0x2f/0x40 /kernel/locking/spinlock.c:151 spin_lock /./include/linux/spinlock.h:338 [inline] flush_bg_queue+0x1f3/0x3c0 /fs/fuse/dev.c:415 fuse_request_queue_background+0x2f8/0x5a0 /fs/fuse/dev.c:676 fuse_request_send_background+0x58/0x110 /fs/fuse/dev.c:687 fuse_send_init /fs/fuse/inode.c:986 [inline] fuse_fill_super+0x13cd/0x1730 /fs/fuse/inode.c:1211 mount_nodev+0x66/0x110 /fs/super.c:1392 fuse_mount+0x2d/0x40 /fs/fuse/inode.c:1236 legacy_get_tree+0x108/0x220 /fs/fs_context.c:661 vfs_get_tree+0x8e/0x390 /fs/super.c:1476 do_new_mount /fs/namespace.c:2792 [inline] do_mount+0x138c/0x1c00 /fs/namespace.c:3112 ksys_mount+0xdb/0x150 /fs/namespace.c:3321 __do_sys_mount /fs/namespace.c:3335 [inline] __se_sys_mount /fs/namespace.c:3332 [inline] __x64_sys_mount+0xbe/0x150 /fs/namespace.c:3332 do_syscall_64+0xfd/0x6a0 /arch/x86/entry/common.c:296 entry_SYSCALL_64_after_hwframe+0x49/0xbe } ... key at: [] __key.44353+0x0/0x40 ... acquired at: lock_acquire+0x190/0x410 /kernel/locking/lockdep.c:4413 __raw_spin_lock /./include/linux/spinlock_api_smp.h:142 [inline] _raw_spin_lock+0x2f/0x40 /kernel/locking/spinlock.c:151 spin_lock /./include/linux/spinlock.h:338 [inline] aio_poll /fs/aio.c:1753 [inline] __io_submit_one /fs/aio.c:1827 [inline] io_submit_one+0xefa/0x2ef0 /fs/aio.c:1864 __do_sys_io_submit /fs/aio.c:1923 [inline] __se_sys_io_submit /fs/aio.c:1893 [inline] __x64_sys_io_submit+0x1bd/0x570 /fs/aio.c:1893 do_syscall_64+0xfd/0x6a0 /arch/x86/entry/common.c:296 entry_SYSCALL_64_after_hwframe+0x49/0xbe stack backtrace: CPU: 0 PID: 29303 Comm: syz-executor.0 Not tainted 5.2.0+ #69 Hardware name: Google Google Compute Engine/Google Compute Engine, BIOS Google 01/01/2011 Call Trace: __dump_stack /lib/dump_stack.c:77 [inline] dump_stack+0x172/0x1f0 /lib/dump_stack.c:113 print_bad_irq_dependency /kernel/locking/lockdep.c:2025 [inline] check_irq_usage.cold+0x5b4/0x72e /kernel/locking/lockdep.c:2223 check_prev_add /kernel/locking/lockdep.c:2409 [inline] check_prevs_add /kernel/locking/lockdep.c:2507 [inline] validate_chain /kernel/locking/lockdep.c:2897 [inline] __lock_acquire+0x25c3/0x4c30 /kernel/locking/lockdep.c:3880 lock_acquire+0x190/0x410 /kernel/locking/lockdep.c:4413 __raw_spin_lock /./include/linux/spinlock_api_smp.h:142 [inline] _raw_spin_lock+0x2f/0x40 /kernel/locking/spinlock.c:151 spin_lock /./include/linux/spinlock.h:338 [inline] aio_poll /fs/aio.c:1753 [inline] __io_submit_one /fs/aio.c:1827 [inline] io_submit_one+0xefa/0x2ef0 /fs/aio.c:1864 __do_sys_io_submit /fs/aio.c:1923 [inline] __se_sys_io_submit /fs/aio.c:1893 [inline] __x64_sys_io_submit+0x1bd/0x570 /fs/aio.c:1893 do_syscall_64+0xfd/0x6a0 /arch/x86/entry/common.c:296 entry_SYSCALL_64_after_hwframe+0x49/0xbe RIP: 0033:0x459819 Code: fd b7 fb ff c3 66 2e 0f 1f 84 00 00 00 00 00 66 90 48 89 f8 48 89 f7 48 89 d6 48 89 ca 4d 89 c2 4d 89 c8 4c 8b 4c 24 08 0f 05 <48> 3d 01 f0 ff ff 0f 83 cb b7 fb ff c3 66 2e 0f 1f 84 00 00 00 00 RSP: 002b:00007f2004d8bc78 EFLAGS: 00000246 ORIG_RAX: 00000000000000d1 RAX: ffffffffffffffda RBX: 0000000000000003 RCX: 0000000000459819 RDX: 0000000020000200 RSI: 0000000000000001 RDI: 00007f2004d6b000 RBP: 000000000075bf20 R08: 0000000000000000 R09: 0000000000000000 R10: 0000000000000000 R11: 0000000000000246 R12: 00007f2004d8c6d4 R13: 00000000004c0c01 R14: 00000000004d39e8 R15: 00000000ffffffff