======================================================== WARNING: possible irq lock inversion dependency detected 5.3.0+ #0 Not tainted -------------------------------------------------------- ksoftirqd/1/16 just changed the state of lock: ffff8880a055fb58 (&(&ctx->ctx_lock)->rlock){..-.}, at: spin_lock_irq include/linux/spinlock.h:363 [inline] ffff8880a055fb58 (&(&ctx->ctx_lock)->rlock){..-.}, at: free_ioctx_users+0x2d/0x490 fs/aio.c:618 but this lock took another, SOFTIRQ-unsafe lock in the past: (&fiq->waitq){+.+.} and interrupts could create inverse lock ordering between them. other info that might help us debug this: Possible interrupt unsafe locking scenario: CPU0 CPU1 ---- ---- lock(&fiq->waitq); local_irq_disable(); lock(&(&ctx->ctx_lock)->rlock); lock(&fiq->waitq); lock(&(&ctx->ctx_lock)->rlock); *** DEADLOCK *** 2 locks held by ksoftirqd/1/16: #0: ffffffff88fab1c0 (rcu_callback){....}, at: __rcu_reclaim kernel/rcu/rcu.h:210 [inline] #0: ffffffff88fab1c0 (rcu_callback){....}, at: rcu_do_batch kernel/rcu/tree.c:2157 [inline] #0: ffffffff88fab1c0 (rcu_callback){....}, at: rcu_core+0x60e/0x1560 kernel/rcu/tree.c:2377 #1: ffffffff88fab200 (rcu_read_lock_sched){....}, at: percpu_ref_call_confirm_rcu lib/percpu-refcount.c:126 [inline] #1: ffffffff88fab200 (rcu_read_lock_sched){....}, at: percpu_ref_switch_to_atomic_rcu+0x20e/0x570 lib/percpu-refcount.c:165 the shortest dependencies between 2nd lock and 1st lock: -> (&fiq->waitq){+.+.} { HARDIRQ-ON-W at: lock_acquire+0x190/0x410 kernel/locking/lockdep.c:4487 __raw_spin_lock include/linux/spinlock_api_smp.h:142 [inline] _raw_spin_lock+0x2f/0x40 kernel/locking/spinlock.c:151 spin_lock include/linux/spinlock.h:338 [inline] flush_bg_queue+0x1f3/0x3c0 fs/fuse/dev.c:415 fuse_request_queue_background+0x2f8/0x5a0 fs/fuse/dev.c:676 fuse_request_send_background+0x58/0x110 fs/fuse/dev.c:687 cuse_send_init fs/fuse/cuse.c:459 [inline] cuse_channel_open+0x5ba/0x830 fs/fuse/cuse.c:519 misc_open+0x395/0x4c0 drivers/char/misc.c:141 chrdev_open+0x245/0x6b0 fs/char_dev.c:414 do_dentry_open+0x4df/0x1250 fs/open.c:797 vfs_open+0xa0/0xd0 fs/open.c:906 do_last fs/namei.c:3408 [inline] path_openat+0x10e9/0x4630 fs/namei.c:3525 do_filp_open+0x1a1/0x280 fs/namei.c:3555 do_sys_open+0x3fe/0x5d0 fs/open.c:1089 __do_sys_openat fs/open.c:1116 [inline] __se_sys_openat fs/open.c:1110 [inline] __x64_sys_openat+0x9d/0x100 fs/open.c:1110 do_syscall_64+0xfa/0x760 arch/x86/entry/common.c:290 entry_SYSCALL_64_after_hwframe+0x49/0xbe SOFTIRQ-ON-W at: lock_acquire+0x190/0x410 kernel/locking/lockdep.c:4487 __raw_spin_lock include/linux/spinlock_api_smp.h:142 [inline] _raw_spin_lock+0x2f/0x40 kernel/locking/spinlock.c:151 spin_lock include/linux/spinlock.h:338 [inline] flush_bg_queue+0x1f3/0x3c0 fs/fuse/dev.c:415 fuse_request_queue_background+0x2f8/0x5a0 fs/fuse/dev.c:676 fuse_request_send_background+0x58/0x110 fs/fuse/dev.c:687 cuse_send_init fs/fuse/cuse.c:459 [inline] cuse_channel_open+0x5ba/0x830 fs/fuse/cuse.c:519 misc_open+0x395/0x4c0 drivers/char/misc.c:141 chrdev_open+0x245/0x6b0 fs/char_dev.c:414 do_dentry_open+0x4df/0x1250 fs/open.c:797 vfs_open+0xa0/0xd0 fs/open.c:906 do_last fs/namei.c:3408 [inline] path_openat+0x10e9/0x4630 fs/namei.c:3525 do_filp_open+0x1a1/0x280 fs/namei.c:3555 do_sys_open+0x3fe/0x5d0 fs/open.c:1089 __do_sys_openat fs/open.c:1116 [inline] __se_sys_openat fs/open.c:1110 [inline] __x64_sys_openat+0x9d/0x100 fs/open.c:1110 do_syscall_64+0xfa/0x760 arch/x86/entry/common.c:290 entry_SYSCALL_64_after_hwframe+0x49/0xbe INITIAL USE at: lock_acquire+0x190/0x410 kernel/locking/lockdep.c:4487 __raw_spin_lock include/linux/spinlock_api_smp.h:142 [inline] _raw_spin_lock+0x2f/0x40 kernel/locking/spinlock.c:151 spin_lock include/linux/spinlock.h:338 [inline] flush_bg_queue+0x1f3/0x3c0 fs/fuse/dev.c:415 fuse_request_queue_background+0x2f8/0x5a0 fs/fuse/dev.c:676 fuse_request_send_background+0x58/0x110 fs/fuse/dev.c:687 cuse_send_init fs/fuse/cuse.c:459 [inline] cuse_channel_open+0x5ba/0x830 fs/fuse/cuse.c:519 misc_open+0x395/0x4c0 drivers/char/misc.c:141 chrdev_open+0x245/0x6b0 fs/char_dev.c:414 do_dentry_open+0x4df/0x1250 fs/open.c:797 vfs_open+0xa0/0xd0 fs/open.c:906 do_last fs/namei.c:3408 [inline] path_openat+0x10e9/0x4630 fs/namei.c:3525 do_filp_open+0x1a1/0x280 fs/namei.c:3555 do_sys_open+0x3fe/0x5d0 fs/open.c:1089 __do_sys_openat fs/open.c:1116 [inline] __se_sys_openat fs/open.c:1110 [inline] __x64_sys_openat+0x9d/0x100 fs/open.c:1110 do_syscall_64+0xfa/0x760 arch/x86/entry/common.c:290 entry_SYSCALL_64_after_hwframe+0x49/0xbe } ... key at: [] __key.44699+0x0/0x40 ... acquired at: __raw_spin_lock include/linux/spinlock_api_smp.h:142 [inline] _raw_spin_lock+0x2f/0x40 kernel/locking/spinlock.c:151 spin_lock include/linux/spinlock.h:338 [inline] aio_poll fs/aio.c:1751 [inline] __io_submit_one fs/aio.c:1825 [inline] io_submit_one+0xefa/0x2ef0 fs/aio.c:1862 __do_sys_io_submit fs/aio.c:1921 [inline] __se_sys_io_submit fs/aio.c:1891 [inline] __x64_sys_io_submit+0x1bd/0x570 fs/aio.c:1891 do_syscall_64+0xfa/0x760 arch/x86/entry/common.c:290 entry_SYSCALL_64_after_hwframe+0x49/0xbe -> (&(&ctx->ctx_lock)->rlock){..-.} { IN-SOFTIRQ-W at: lock_acquire+0x190/0x410 kernel/locking/lockdep.c:4487 __raw_spin_lock_irq include/linux/spinlock_api_smp.h:128 [inline] _raw_spin_lock_irq+0x60/0x80 kernel/locking/spinlock.c:167 spin_lock_irq include/linux/spinlock.h:363 [inline] free_ioctx_users+0x2d/0x490 fs/aio.c:618 percpu_ref_put_many include/linux/percpu-refcount.h:293 [inline] percpu_ref_put include/linux/percpu-refcount.h:309 [inline] percpu_ref_call_confirm_rcu lib/percpu-refcount.c:130 [inline] percpu_ref_switch_to_atomic_rcu+0x4c0/0x570 lib/percpu-refcount.c:165 __rcu_reclaim kernel/rcu/rcu.h:222 [inline] rcu_do_batch kernel/rcu/tree.c:2157 [inline] rcu_core+0x581/0x1560 kernel/rcu/tree.c:2377 rcu_core_si+0x9/0x10 kernel/rcu/tree.c:2386 __do_softirq+0x262/0x98c kernel/softirq.c:292 run_ksoftirqd kernel/softirq.c:603 [inline] run_ksoftirqd+0x8e/0x110 kernel/softirq.c:595 smpboot_thread_fn+0x6a3/0xa40 kernel/smpboot.c:165 kthread+0x361/0x430 kernel/kthread.c:255 ret_from_fork+0x24/0x30 arch/x86/entry/entry_64.S:352 INITIAL USE at: lock_acquire+0x190/0x410 kernel/locking/lockdep.c:4487 __raw_spin_lock_irq include/linux/spinlock_api_smp.h:128 [inline] _raw_spin_lock_irq+0x60/0x80 kernel/locking/spinlock.c:167 spin_lock_irq include/linux/spinlock.h:363 [inline] aio_poll fs/aio.c:1749 [inline] __io_submit_one fs/aio.c:1825 [inline] io_submit_one+0xeb5/0x2ef0 fs/aio.c:1862 __do_sys_io_submit fs/aio.c:1921 [inline] __se_sys_io_submit fs/aio.c:1891 [inline] __x64_sys_io_submit+0x1bd/0x570 fs/aio.c:1891 do_syscall_64+0xfa/0x760 arch/x86/entry/common.c:290 entry_SYSCALL_64_after_hwframe+0x49/0xbe } ... key at: [] __key.54326+0x0/0x40 ... acquired at: mark_lock_irq kernel/locking/lockdep.c:3317 [inline] mark_lock+0x517/0x1220 kernel/locking/lockdep.c:3666 mark_usage kernel/locking/lockdep.c:3566 [inline] __lock_acquire+0x1eaf/0x4e70 kernel/locking/lockdep.c:3909 lock_acquire+0x190/0x410 kernel/locking/lockdep.c:4487 __raw_spin_lock_irq include/linux/spinlock_api_smp.h:128 [inline] _raw_spin_lock_irq+0x60/0x80 kernel/locking/spinlock.c:167 spin_lock_irq include/linux/spinlock.h:363 [inline] free_ioctx_users+0x2d/0x490 fs/aio.c:618 percpu_ref_put_many include/linux/percpu-refcount.h:293 [inline] percpu_ref_put include/linux/percpu-refcount.h:309 [inline] percpu_ref_call_confirm_rcu lib/percpu-refcount.c:130 [inline] percpu_ref_switch_to_atomic_rcu+0x4c0/0x570 lib/percpu-refcount.c:165 __rcu_reclaim kernel/rcu/rcu.h:222 [inline] rcu_do_batch kernel/rcu/tree.c:2157 [inline] rcu_core+0x581/0x1560 kernel/rcu/tree.c:2377 rcu_core_si+0x9/0x10 kernel/rcu/tree.c:2386 __do_softirq+0x262/0x98c kernel/softirq.c:292 run_ksoftirqd kernel/softirq.c:603 [inline] run_ksoftirqd+0x8e/0x110 kernel/softirq.c:595 smpboot_thread_fn+0x6a3/0xa40 kernel/smpboot.c:165 kthread+0x361/0x430 kernel/kthread.c:255 ret_from_fork+0x24/0x30 arch/x86/entry/entry_64.S:352 stack backtrace: CPU: 1 PID: 16 Comm: ksoftirqd/1 Not tainted 5.3.0+ #0 Hardware name: Google Google Compute Engine/Google Compute Engine, BIOS Google 01/01/2011 Call Trace: __dump_stack lib/dump_stack.c:77 [inline] dump_stack+0x172/0x1f0 lib/dump_stack.c:113 print_irq_inversion_bug.part.0+0x2e4/0x2f1 kernel/locking/lockdep.c:3179 print_irq_inversion_bug kernel/locking/lockdep.c:3180 [inline] check_usage_forwards.cold+0x20/0x29 kernel/locking/lockdep.c:3204 mark_lock_irq kernel/locking/lockdep.c:3317 [inline] mark_lock+0x517/0x1220 kernel/locking/lockdep.c:3666 mark_usage kernel/locking/lockdep.c:3566 [inline] __lock_acquire+0x1eaf/0x4e70 kernel/locking/lockdep.c:3909 lock_acquire+0x190/0x410 kernel/locking/lockdep.c:4487 __raw_spin_lock_irq include/linux/spinlock_api_smp.h:128 [inline] _raw_spin_lock_irq+0x60/0x80 kernel/locking/spinlock.c:167 spin_lock_irq include/linux/spinlock.h:363 [inline] free_ioctx_users+0x2d/0x490 fs/aio.c:618 percpu_ref_put_many include/linux/percpu-refcount.h:293 [inline] percpu_ref_put include/linux/percpu-refcount.h:309 [inline] percpu_ref_call_confirm_rcu lib/percpu-refcount.c:130 [inline] percpu_ref_switch_to_atomic_rcu+0x4c0/0x570 lib/percpu-refcount.c:165 __rcu_reclaim kernel/rcu/rcu.h:222 [inline] rcu_do_batch kernel/rcu/tree.c:2157 [inline] rcu_core+0x581/0x1560 kernel/rcu/tree.c:2377 rcu_core_si+0x9/0x10 kernel/rcu/tree.c:2386 __do_softirq+0x262/0x98c kernel/softirq.c:292 run_ksoftirqd kernel/softirq.c:603 [inline] run_ksoftirqd+0x8e/0x110 kernel/softirq.c:595 smpboot_thread_fn+0x6a3/0xa40 kernel/smpboot.c:165 kthread+0x361/0x430 kernel/kthread.c:255 ret_from_fork+0x24/0x30 arch/x86/entry/entry_64.S:352