===================================================== WARNING: HARDIRQ-safe -> HARDIRQ-unsafe lock order detected 6.8.0-syzkaller-08951-gfe46a7dd189e #0 Not tainted ----------------------------------------------------- kvm-nx-lpage-re/7210 [HC0[0]:SC0[2]:HE0:SE0] is trying to acquire: ffff88807682da00 (&stab->lock){+...}-{2:2}, at: spin_lock_bh include/linux/spinlock.h:356 [inline] ffff88807682da00 (&stab->lock){+...}-{2:2}, at: __sock_map_delete net/core/sock_map.c:414 [inline] ffff88807682da00 (&stab->lock){+...}-{2:2}, at: sock_map_delete_elem+0x97/0x140 net/core/sock_map.c:446 and this task is already holding: ffff888016ebf020 ((worker)->lock){-...}-{2:2}, at: kthread_queue_work+0x27/0x180 kernel/kthread.c:1019 which would create a new lock dependency: ((worker)->lock){-...}-{2:2} -> (&stab->lock){+...}-{2:2} but this new dependency connects a HARDIRQ-irq-safe lock: ((worker)->lock){-...}-{2:2} ... which became HARDIRQ-irq-safe at: lock_acquire+0x1e4/0x530 kernel/locking/lockdep.c:5754 __raw_spin_lock_irqsave include/linux/spinlock_api_smp.h:110 [inline] _raw_spin_lock_irqsave+0xd5/0x120 kernel/locking/spinlock.c:162 kthread_queue_work+0x27/0x180 kernel/kthread.c:1019 pit_timer_fn+0xa5/0x180 arch/x86/kvm/i8254.c:276 __run_hrtimer kernel/time/hrtimer.c:1692 [inline] __hrtimer_run_queues+0x595/0xd00 kernel/time/hrtimer.c:1756 hrtimer_interrupt+0x396/0x990 kernel/time/hrtimer.c:1818 local_apic_timer_interrupt arch/x86/kernel/apic/apic.c:1032 [inline] __sysvec_apic_timer_interrupt+0x107/0x3a0 arch/x86/kernel/apic/apic.c:1049 instr_sysvec_apic_timer_interrupt arch/x86/kernel/apic/apic.c:1043 [inline] sysvec_apic_timer_interrupt+0xa1/0xc0 arch/x86/kernel/apic/apic.c:1043 asm_sysvec_apic_timer_interrupt+0x1a/0x20 arch/x86/include/asm/idtentry.h:702 lock_release+0x0/0x9d0 rcu_lock_release include/linux/rcupdate.h:308 [inline] rcu_read_unlock include/linux/rcupdate.h:783 [inline] percpu_ref_tryget_many include/linux/percpu-refcount.h:250 [inline] percpu_ref_tryget+0xfc/0x180 include/linux/percpu-refcount.h:266 css_tryget include/linux/cgroup_refcnt.h:45 [inline] get_mem_cgroup_from_mm+0x103/0x2a0 mm/memcontrol.c:1111 __mem_cgroup_charge+0x16/0x80 mm/memcontrol.c:7290 mem_cgroup_charge include/linux/memcontrol.h:690 [inline] shmem_alloc_and_add_folio+0x394/0xdf0 mm/shmem.c:1683 shmem_get_folio_gfp+0x82d/0x1f50 mm/shmem.c:2061 shmem_get_folio mm/shmem.c:2166 [inline] shmem_write_begin+0x170/0x4d0 mm/shmem.c:2750 generic_perform_write+0x322/0x640 mm/filemap.c:3930 shmem_file_write_iter+0xfc/0x120 mm/shmem.c:2926 call_write_iter include/linux/fs.h:2108 [inline] new_sync_write fs/read_write.c:497 [inline] vfs_write+0xa84/0xcb0 fs/read_write.c:590 ksys_write+0x1a0/0x2c0 fs/read_write.c:643 do_syscall_64+0xfb/0x240 entry_SYSCALL_64_after_hwframe+0x6d/0x75 to a HARDIRQ-irq-unsafe lock: (&stab->lock){+...}-{2:2} ... which became HARDIRQ-irq-unsafe at: ... lock_acquire+0x1e4/0x530 kernel/locking/lockdep.c:5754 __raw_spin_lock_bh include/linux/spinlock_api_smp.h:126 [inline] _raw_spin_lock_bh+0x35/0x50 kernel/locking/spinlock.c:178 spin_lock_bh include/linux/spinlock.h:356 [inline] sock_map_update_common+0x1b6/0x5b0 net/core/sock_map.c:490 sock_map_update_elem_sys+0x55f/0x910 net/core/sock_map.c:579 map_update_elem+0x53a/0x6f0 kernel/bpf/syscall.c:1641 __sys_bpf+0x76f/0x810 kernel/bpf/syscall.c:5619 __do_sys_bpf kernel/bpf/syscall.c:5738 [inline] __se_sys_bpf kernel/bpf/syscall.c:5736 [inline] __x64_sys_bpf+0x7c/0x90 kernel/bpf/syscall.c:5736 do_syscall_64+0xfb/0x240 entry_SYSCALL_64_after_hwframe+0x6d/0x75 other info that might help us debug this: Possible interrupt unsafe locking scenario: CPU0 CPU1 ---- ---- lock(&stab->lock); local_irq_disable(); lock((worker)->lock); lock(&stab->lock); lock((worker)->lock); *** DEADLOCK *** 6 locks held by kvm-nx-lpage-re/7210: #0: ffffffff8e15f048 (cgroup_mutex){+.+.}-{3:3}, at: cgroup_lock include/linux/cgroup.h:368 [inline] #0: ffffffff8e15f048 (cgroup_mutex){+.+.}-{3:3}, at: cgroup_attach_task_all+0x27/0xe0 kernel/cgroup/cgroup-v1.c:61 #1: ffffffff8dfccb90 (cpu_hotplug_lock){++++}-{0:0}, at: cgroup_attach_lock+0x11/0x40 kernel/cgroup/cgroup.c:2413 #2: ffffffff8e15f230 (cgroup_threadgroup_rwsem){++++}-{0:0}, at: cgroup_attach_task_all+0x31/0xe0 kernel/cgroup/cgroup-v1.c:62 #3: ffffffff8e1373b8 (rcu_state.exp_mutex){+.+.}-{3:3}, at: exp_funnel_lock kernel/rcu/tree_exp.h:291 [inline] #3: ffffffff8e1373b8 (rcu_state.exp_mutex){+.+.}-{3:3}, at: synchronize_rcu_expedited+0x39a/0x820 kernel/rcu/tree_exp.h:939 #4: ffff888016ebf020 ((worker)->lock){-...}-{2:2}, at: kthread_queue_work+0x27/0x180 kernel/kthread.c:1019 #5: ffffffff8e132020 (rcu_read_lock){....}-{1:2}, at: rcu_lock_acquire include/linux/rcupdate.h:298 [inline] #5: ffffffff8e132020 (rcu_read_lock){....}-{1:2}, at: rcu_read_lock include/linux/rcupdate.h:750 [inline] #5: ffffffff8e132020 (rcu_read_lock){....}-{1:2}, at: __bpf_trace_run kernel/trace/bpf_trace.c:2380 [inline] #5: ffffffff8e132020 (rcu_read_lock){....}-{1:2}, at: bpf_trace_run2+0x114/0x420 kernel/trace/bpf_trace.c:2420 the dependencies between HARDIRQ-irq-safe lock and the holding lock: -> ((worker)->lock){-...}-{2:2} { IN-HARDIRQ-W at: lock_acquire+0x1e4/0x530 kernel/locking/lockdep.c:5754 __raw_spin_lock_irqsave include/linux/spinlock_api_smp.h:110 [inline] _raw_spin_lock_irqsave+0xd5/0x120 kernel/locking/spinlock.c:162 kthread_queue_work+0x27/0x180 kernel/kthread.c:1019 pit_timer_fn+0xa5/0x180 arch/x86/kvm/i8254.c:276 __run_hrtimer kernel/time/hrtimer.c:1692 [inline] __hrtimer_run_queues+0x595/0xd00 kernel/time/hrtimer.c:1756 hrtimer_interrupt+0x396/0x990 kernel/time/hrtimer.c:1818 local_apic_timer_interrupt arch/x86/kernel/apic/apic.c:1032 [inline] __sysvec_apic_timer_interrupt+0x107/0x3a0 arch/x86/kernel/apic/apic.c:1049 instr_sysvec_apic_timer_interrupt arch/x86/kernel/apic/apic.c:1043 [inline] sysvec_apic_timer_interrupt+0xa1/0xc0 arch/x86/kernel/apic/apic.c:1043 asm_sysvec_apic_timer_interrupt+0x1a/0x20 arch/x86/include/asm/idtentry.h:702 lock_release+0x0/0x9d0 rcu_lock_release include/linux/rcupdate.h:308 [inline] rcu_read_unlock include/linux/rcupdate.h:783 [inline] percpu_ref_tryget_many include/linux/percpu-refcount.h:250 [inline] percpu_ref_tryget+0xfc/0x180 include/linux/percpu-refcount.h:266 css_tryget include/linux/cgroup_refcnt.h:45 [inline] get_mem_cgroup_from_mm+0x103/0x2a0 mm/memcontrol.c:1111 __mem_cgroup_charge+0x16/0x80 mm/memcontrol.c:7290 mem_cgroup_charge include/linux/memcontrol.h:690 [inline] shmem_alloc_and_add_folio+0x394/0xdf0 mm/shmem.c:1683 shmem_get_folio_gfp+0x82d/0x1f50 mm/shmem.c:2061 shmem_get_folio mm/shmem.c:2166 [inline] shmem_write_begin+0x170/0x4d0 mm/shmem.c:2750 generic_perform_write+0x322/0x640 mm/filemap.c:3930 shmem_file_write_iter+0xfc/0x120 mm/shmem.c:2926 call_write_iter include/linux/fs.h:2108 [inline] new_sync_write fs/read_write.c:497 [inline] vfs_write+0xa84/0xcb0 fs/read_write.c:590 ksys_write+0x1a0/0x2c0 fs/read_write.c:643 do_syscall_64+0xfb/0x240 entry_SYSCALL_64_after_hwframe+0x6d/0x75 INITIAL USE at: lock_acquire+0x1e4/0x530 kernel/locking/lockdep.c:5754 __raw_spin_lock_irq include/linux/spinlock_api_smp.h:119 [inline] _raw_spin_lock_irq+0xd3/0x120 kernel/locking/spinlock.c:170 kthread_worker_fn+0x236/0xab0 kernel/kthread.c:828 kthread+0x2f0/0x390 kernel/kthread.c:388 ret_from_fork+0x4b/0x80 arch/x86/kernel/process.c:147 ret_from_fork_asm+0x1a/0x30 arch/x86/entry/entry_64.S:243 } ... key at: [] __kthread_create_worker.__key+0x0/0x20 the dependencies between the lock to be acquired and HARDIRQ-irq-unsafe lock: -> (&stab->lock){+...}-{2:2} { HARDIRQ-ON-W at: lock_acquire+0x1e4/0x530 kernel/locking/lockdep.c:5754 __raw_spin_lock_bh include/linux/spinlock_api_smp.h:126 [inline] _raw_spin_lock_bh+0x35/0x50 kernel/locking/spinlock.c:178 spin_lock_bh include/linux/spinlock.h:356 [inline] sock_map_update_common+0x1b6/0x5b0 net/core/sock_map.c:490 sock_map_update_elem_sys+0x55f/0x910 net/core/sock_map.c:579 map_update_elem+0x53a/0x6f0 kernel/bpf/syscall.c:1641 __sys_bpf+0x76f/0x810 kernel/bpf/syscall.c:5619 __do_sys_bpf kernel/bpf/syscall.c:5738 [inline] __se_sys_bpf kernel/bpf/syscall.c:5736 [inline] __x64_sys_bpf+0x7c/0x90 kernel/bpf/syscall.c:5736 do_syscall_64+0xfb/0x240 entry_SYSCALL_64_after_hwframe+0x6d/0x75 INITIAL USE at: lock_acquire+0x1e4/0x530 kernel/locking/lockdep.c:5754 __raw_spin_lock_bh include/linux/spinlock_api_smp.h:126 [inline] _raw_spin_lock_bh+0x35/0x50 kernel/locking/spinlock.c:178 spin_lock_bh include/linux/spinlock.h:356 [inline] sock_map_update_common+0x1b6/0x5b0 net/core/sock_map.c:490 sock_map_update_elem_sys+0x55f/0x910 net/core/sock_map.c:579 map_update_elem+0x53a/0x6f0 kernel/bpf/syscall.c:1641 __sys_bpf+0x76f/0x810 kernel/bpf/syscall.c:5619 __do_sys_bpf kernel/bpf/syscall.c:5738 [inline] __se_sys_bpf kernel/bpf/syscall.c:5736 [inline] __x64_sys_bpf+0x7c/0x90 kernel/bpf/syscall.c:5736 do_syscall_64+0xfb/0x240 entry_SYSCALL_64_after_hwframe+0x6d/0x75 } ... key at: [] sock_map_alloc.__key+0x0/0x20 ... acquired at: lock_acquire+0x1e4/0x530 kernel/locking/lockdep.c:5754 __raw_spin_lock_bh include/linux/spinlock_api_smp.h:126 [inline] _raw_spin_lock_bh+0x35/0x50 kernel/locking/spinlock.c:178 spin_lock_bh include/linux/spinlock.h:356 [inline] __sock_map_delete net/core/sock_map.c:414 [inline] sock_map_delete_elem+0x97/0x140 net/core/sock_map.c:446 bpf_prog_de673fc90bd1569b+0x6c/0x70 bpf_dispatcher_nop_func include/linux/bpf.h:1234 [inline] __bpf_prog_run include/linux/filter.h:657 [inline] bpf_prog_run include/linux/filter.h:664 [inline] __bpf_trace_run kernel/trace/bpf_trace.c:2381 [inline] bpf_trace_run2+0x204/0x420 kernel/trace/bpf_trace.c:2420 trace_sched_kthread_work_queue_work include/trace/events/sched.h:64 [inline] kthread_insert_work+0x3f4/0x460 kernel/kthread.c:993 kthread_queue_work+0xff/0x180 kernel/kthread.c:1021 synchronize_rcu_expedited_queue_work kernel/rcu/tree_exp.h:469 [inline] synchronize_rcu_expedited+0x593/0x820 kernel/rcu/tree_exp.h:949 synchronize_rcu+0x136/0x3e0 kernel/rcu/tree.c:3611 rcu_sync_enter+0x1fa/0x350 kernel/rcu/sync.c:133 percpu_down_write+0x63/0x320 kernel/locking/percpu-rwsem.c:232 cgroup_attach_task_all+0x31/0xe0 kernel/cgroup/cgroup-v1.c:62 kvm_vm_worker_thread+0xd5/0x580 arch/x86/kvm/../../../virt/kvm/kvm_main.c:6548 kthread+0x2f0/0x390 kernel/kthread.c:388 ret_from_fork+0x4b/0x80 arch/x86/kernel/process.c:147 ret_from_fork_asm+0x1a/0x30 arch/x86/entry/entry_64.S:243 stack backtrace: CPU: 1 PID: 7210 Comm: kvm-nx-lpage-re Not tainted 6.8.0-syzkaller-08951-gfe46a7dd189e #0 Hardware name: Google Google Compute Engine/Google Compute Engine, BIOS Google 03/27/2024 Call Trace: __dump_stack lib/dump_stack.c:88 [inline] dump_stack_lvl+0x241/0x360 lib/dump_stack.c:114 print_bad_irq_dependency kernel/locking/lockdep.c:2626 [inline] check_irq_usage kernel/locking/lockdep.c:2865 [inline] check_prev_add kernel/locking/lockdep.c:3138 [inline] check_prevs_add kernel/locking/lockdep.c:3253 [inline] validate_chain+0x4dc7/0x58e0 kernel/locking/lockdep.c:3869 __lock_acquire+0x1346/0x1fd0 kernel/locking/lockdep.c:5137 lock_acquire+0x1e4/0x530 kernel/locking/lockdep.c:5754 __raw_spin_lock_bh include/linux/spinlock_api_smp.h:126 [inline] _raw_spin_lock_bh+0x35/0x50 kernel/locking/spinlock.c:178 spin_lock_bh include/linux/spinlock.h:356 [inline] __sock_map_delete net/core/sock_map.c:414 [inline] sock_map_delete_elem+0x97/0x140 net/core/sock_map.c:446 bpf_prog_de673fc90bd1569b+0x6c/0x70 bpf_dispatcher_nop_func include/linux/bpf.h:1234 [inline] __bpf_prog_run include/linux/filter.h:657 [inline] bpf_prog_run include/linux/filter.h:664 [inline] __bpf_trace_run kernel/trace/bpf_trace.c:2381 [inline] bpf_trace_run2+0x204/0x420 kernel/trace/bpf_trace.c:2420 trace_sched_kthread_work_queue_work include/trace/events/sched.h:64 [inline] kthread_insert_work+0x3f4/0x460 kernel/kthread.c:993 kthread_queue_work+0xff/0x180 kernel/kthread.c:1021 synchronize_rcu_expedited_queue_work kernel/rcu/tree_exp.h:469 [inline] synchronize_rcu_expedited+0x593/0x820 kernel/rcu/tree_exp.h:949 synchronize_rcu+0x136/0x3e0 kernel/rcu/tree.c:3611 rcu_sync_enter+0x1fa/0x350 kernel/rcu/sync.c:133 percpu_down_write+0x63/0x320 kernel/locking/percpu-rwsem.c:232 cgroup_attach_task_all+0x31/0xe0 kernel/cgroup/cgroup-v1.c:62 kvm_vm_worker_thread+0xd5/0x580 arch/x86/kvm/../../../virt/kvm/kvm_main.c:6548 kthread+0x2f0/0x390 kernel/kthread.c:388 ret_from_fork+0x4b/0x80 arch/x86/kernel/process.c:147 ret_from_fork_asm+0x1a/0x30 arch/x86/entry/entry_64.S:243 ------------[ cut here ]------------ raw_local_irq_restore() called with IRQs enabled WARNING: CPU: 1 PID: 7210 at kernel/locking/irqflag-debug.c:10 warn_bogus_irq_restore+0x29/0x40 kernel/locking/irqflag-debug.c:10 Modules linked in: CPU: 1 PID: 7210 Comm: kvm-nx-lpage-re Not tainted 6.8.0-syzkaller-08951-gfe46a7dd189e #0 Hardware name: Google Google Compute Engine/Google Compute Engine, BIOS Google 03/27/2024 RIP: 0010:warn_bogus_irq_restore+0x29/0x40 kernel/locking/irqflag-debug.c:10 Code: 90 f3 0f 1e fa 90 80 3d bd 16 05 04 00 74 06 90 c3 cc cc cc cc c6 05 ae 16 05 04 01 90 48 c7 c7 e0 b9 aa 8b e8 88 34 ec f5 90 <0f> 0b 90 90 90 c3 cc cc cc cc 66 2e 0f 1f 84 00 00 00 00 00 0f 1f RSP: 0018:ffffc9000acef7f8 EFLAGS: 00010246 RAX: 78a79117332c5800 RBX: 1ffff9200159df04 RCX: ffff888011695a00 RDX: 0000000000000000 RSI: 0000000000000000 RDI: 0000000000000000 RBP: ffffc9000acef890 R08: ffffffff8157cb22 R09: 1ffffffff1f0c1bd R10: dffffc0000000000 R11: fffffbfff1f0c1be R12: dffffc0000000000 R13: 1ffff9200159df00 R14: ffffc9000acef820 R15: 0000000000000246 FS: 0000000000000000(0000) GS:ffff8880b9500000(0000) knlGS:0000000000000000 CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033 CR2: 0000001b33b21000 CR3: 000000005997e000 CR4: 00000000003526f0 DR0: 0000000000000000 DR1: 0000000000000000 DR2: 0000000000000000 DR3: 0000000000000000 DR6: 00000000fffe0ff0 DR7: 0000000000000400 Call Trace: __raw_spin_unlock_irqrestore include/linux/spinlock_api_smp.h:151 [inline] _raw_spin_unlock_irqrestore+0x120/0x140 kernel/locking/spinlock.c:194 kthread_queue_work+0x110/0x180 kernel/kthread.c:1024 synchronize_rcu_expedited_queue_work kernel/rcu/tree_exp.h:469 [inline] synchronize_rcu_expedited+0x593/0x820 kernel/rcu/tree_exp.h:949 synchronize_rcu+0x136/0x3e0 kernel/rcu/tree.c:3611 rcu_sync_enter+0x1fa/0x350 kernel/rcu/sync.c:133 percpu_down_write+0x63/0x320 kernel/locking/percpu-rwsem.c:232 cgroup_attach_task_all+0x31/0xe0 kernel/cgroup/cgroup-v1.c:62 kvm_vm_worker_thread+0xd5/0x580 arch/x86/kvm/../../../virt/kvm/kvm_main.c:6548 kthread+0x2f0/0x390 kernel/kthread.c:388 ret_from_fork+0x4b/0x80 arch/x86/kernel/process.c:147 ret_from_fork_asm+0x1a/0x30 arch/x86/entry/entry_64.S:243