===================================================== WARNING: HARDIRQ-safe -> HARDIRQ-unsafe lock order detected 6.8.0-syzkaller-08951-gfe46a7dd189e #0 Not tainted ----------------------------------------------------- kvm-nx-lpage-re/8378 [HC0[0]:SC0[2]:HE0:SE0] is trying to acquire: ffff88804d643200 (&stab->lock){+...}-{2:2}, at: spin_lock_bh include/linux/spinlock.h:356 [inline] ffff88804d643200 (&stab->lock){+...}-{2:2}, at: __sock_map_delete net/core/sock_map.c:414 [inline] ffff88804d643200 (&stab->lock){+...}-{2:2}, at: sock_map_delete_elem+0x97/0x140 net/core/sock_map.c:446 and this task is already holding: ffff888016ef7020 ((worker)->lock){-...}-{2:2}, at: kthread_queue_work+0x27/0x180 kernel/kthread.c:1019 which would create a new lock dependency: ((worker)->lock){-...}-{2:2} -> (&stab->lock){+...}-{2:2} but this new dependency connects a HARDIRQ-irq-safe lock: ((worker)->lock){-...}-{2:2} ... which became HARDIRQ-irq-safe at: lock_acquire+0x1e4/0x530 kernel/locking/lockdep.c:5754 __raw_spin_lock_irqsave include/linux/spinlock_api_smp.h:110 [inline] _raw_spin_lock_irqsave+0xd5/0x120 kernel/locking/spinlock.c:162 kthread_queue_work+0x27/0x180 kernel/kthread.c:1019 pit_timer_fn+0xa5/0x180 arch/x86/kvm/i8254.c:276 __run_hrtimer kernel/time/hrtimer.c:1692 [inline] __hrtimer_run_queues+0x595/0xd00 kernel/time/hrtimer.c:1756 hrtimer_interrupt+0x396/0x990 kernel/time/hrtimer.c:1818 local_apic_timer_interrupt arch/x86/kernel/apic/apic.c:1032 [inline] __sysvec_apic_timer_interrupt+0x107/0x3a0 arch/x86/kernel/apic/apic.c:1049 instr_sysvec_apic_timer_interrupt arch/x86/kernel/apic/apic.c:1043 [inline] sysvec_apic_timer_interrupt+0xa1/0xc0 arch/x86/kernel/apic/apic.c:1043 asm_sysvec_apic_timer_interrupt+0x1a/0x20 arch/x86/include/asm/idtentry.h:702 lock_acquire+0x25b/0x530 kernel/locking/lockdep.c:5758 rcu_lock_acquire include/linux/rcupdate.h:298 [inline] rcu_read_lock include/linux/rcupdate.h:750 [inline] percpu_ref_get_many+0x36/0x140 include/linux/percpu-refcount.h:202 percpu_ref_get include/linux/percpu-refcount.h:222 [inline] obj_cgroup_get include/linux/memcontrol.h:811 [inline] __memcg_kmem_charge_page+0x13c/0x250 mm/memcontrol.c:3329 __alloc_pages+0x28c/0x680 mm/page_alloc.c:4586 alloc_pages_mpol+0x3de/0x650 mm/mempolicy.c:2133 __get_free_pages+0xc/0x30 mm/page_alloc.c:4616 mmu_memory_cache_alloc_obj arch/x86/kvm/../../../virt/kvm/kvm_main.c:409 [inline] __kvm_mmu_topup_memory_cache+0x1bc/0x4b0 arch/x86/kvm/../../../virt/kvm/kvm_main.c:436 mmu_topup_memory_caches arch/x86/kvm/mmu/mmu.c:688 [inline] kvm_mmu_load+0x153/0x2680 arch/x86/kvm/mmu/mmu.c:5592 kvm_mmu_reload arch/x86/kvm/mmu.h:132 [inline] vcpu_enter_guest arch/x86/kvm/x86.c:10892 [inline] vcpu_run+0x607d/0x8790 arch/x86/kvm/x86.c:11184 kvm_arch_vcpu_ioctl_run+0xa7e/0x1920 arch/x86/kvm/x86.c:11410 kvm_vcpu_ioctl+0x7f5/0xd00 arch/x86/kvm/../../../virt/kvm/kvm_main.c:4447 vfs_ioctl fs/ioctl.c:51 [inline] __do_sys_ioctl fs/ioctl.c:904 [inline] __se_sys_ioctl+0xfc/0x170 fs/ioctl.c:890 do_syscall_64+0xfb/0x240 entry_SYSCALL_64_after_hwframe+0x6d/0x75 to a HARDIRQ-irq-unsafe lock: (&stab->lock){+...}-{2:2} ... which became HARDIRQ-irq-unsafe at: ... lock_acquire+0x1e4/0x530 kernel/locking/lockdep.c:5754 __raw_spin_lock_bh include/linux/spinlock_api_smp.h:126 [inline] _raw_spin_lock_bh+0x35/0x50 kernel/locking/spinlock.c:178 spin_lock_bh include/linux/spinlock.h:356 [inline] __sock_map_delete net/core/sock_map.c:414 [inline] sock_map_delete_elem+0x97/0x140 net/core/sock_map.c:446 0xffffffffa0001be0 bpf_dispatcher_nop_func include/linux/bpf.h:1234 [inline] __bpf_prog_run include/linux/filter.h:657 [inline] bpf_prog_run include/linux/filter.h:664 [inline] __bpf_trace_run kernel/trace/bpf_trace.c:2381 [inline] bpf_trace_run2+0x204/0x420 kernel/trace/bpf_trace.c:2420 trace_kfree include/trace/events/kmem.h:94 [inline] kfree+0x291/0x380 mm/slub.c:4377 vfree+0x24c/0x2e0 mm/vmalloc.c:2918 copy_entries_to_user net/ipv4/netfilter/ip_tables.c:866 [inline] get_entries net/ipv4/netfilter/ip_tables.c:1022 [inline] do_ipt_get_ctl+0x11df/0x1810 net/ipv4/netfilter/ip_tables.c:1660 nf_getsockopt+0x299/0x2c0 net/netfilter/nf_sockopt.c:116 ip_getsockopt+0x222/0x2e0 net/ipv4/ip_sockglue.c:1777 tcp_getsockopt+0x163/0x1c0 net/ipv4/tcp.c:4373 do_sock_getsockopt+0x373/0x850 net/socket.c:2373 __sys_getsockopt+0x271/0x330 net/socket.c:2402 __do_sys_getsockopt net/socket.c:2412 [inline] __se_sys_getsockopt net/socket.c:2409 [inline] __x64_sys_getsockopt+0xb5/0xd0 net/socket.c:2409 do_syscall_64+0xfb/0x240 entry_SYSCALL_64_after_hwframe+0x6d/0x75 other info that might help us debug this: Possible interrupt unsafe locking scenario: CPU0 CPU1 ---- ---- lock(&stab->lock); local_irq_disable(); lock((worker)->lock); lock(&stab->lock); lock((worker)->lock); *** DEADLOCK *** 6 locks held by kvm-nx-lpage-re/8378: #0: ffffffff8e15f048 (cgroup_mutex){+.+.}-{3:3}, at: cgroup_lock include/linux/cgroup.h:368 [inline] #0: ffffffff8e15f048 (cgroup_mutex){+.+.}-{3:3}, at: cgroup_attach_task_all+0x27/0xe0 kernel/cgroup/cgroup-v1.c:61 #1: ffffffff8dfccb90 (cpu_hotplug_lock){++++}-{0:0}, at: cgroup_attach_lock+0x11/0x40 kernel/cgroup/cgroup.c:2413 #2: ffffffff8e15f230 (cgroup_threadgroup_rwsem){++++}-{0:0}, at: cgroup_attach_task_all+0x31/0xe0 kernel/cgroup/cgroup-v1.c:62 #3: ffffffff8e1373b8 (rcu_state.exp_mutex){+.+.}-{3:3}, at: exp_funnel_lock kernel/rcu/tree_exp.h:291 [inline] #3: ffffffff8e1373b8 (rcu_state.exp_mutex){+.+.}-{3:3}, at: synchronize_rcu_expedited+0x39a/0x820 kernel/rcu/tree_exp.h:939 #4: ffff888016ef7020 ((worker)->lock){-...}-{2:2}, at: kthread_queue_work+0x27/0x180 kernel/kthread.c:1019 #5: ffffffff8e132020 (rcu_read_lock){....}-{1:2}, at: rcu_lock_acquire include/linux/rcupdate.h:298 [inline] #5: ffffffff8e132020 (rcu_read_lock){....}-{1:2}, at: rcu_read_lock include/linux/rcupdate.h:750 [inline] #5: ffffffff8e132020 (rcu_read_lock){....}-{1:2}, at: __bpf_trace_run kernel/trace/bpf_trace.c:2380 [inline] #5: ffffffff8e132020 (rcu_read_lock){....}-{1:2}, at: bpf_trace_run2+0x114/0x420 kernel/trace/bpf_trace.c:2420 the dependencies between HARDIRQ-irq-safe lock and the holding lock: -> ((worker)->lock){-...}-{2:2} { IN-HARDIRQ-W at: lock_acquire+0x1e4/0x530 kernel/locking/lockdep.c:5754 __raw_spin_lock_irqsave include/linux/spinlock_api_smp.h:110 [inline] _raw_spin_lock_irqsave+0xd5/0x120 kernel/locking/spinlock.c:162 kthread_queue_work+0x27/0x180 kernel/kthread.c:1019 pit_timer_fn+0xa5/0x180 arch/x86/kvm/i8254.c:276 __run_hrtimer kernel/time/hrtimer.c:1692 [inline] __hrtimer_run_queues+0x595/0xd00 kernel/time/hrtimer.c:1756 hrtimer_interrupt+0x396/0x990 kernel/time/hrtimer.c:1818 local_apic_timer_interrupt arch/x86/kernel/apic/apic.c:1032 [inline] __sysvec_apic_timer_interrupt+0x107/0x3a0 arch/x86/kernel/apic/apic.c:1049 instr_sysvec_apic_timer_interrupt arch/x86/kernel/apic/apic.c:1043 [inline] sysvec_apic_timer_interrupt+0xa1/0xc0 arch/x86/kernel/apic/apic.c:1043 asm_sysvec_apic_timer_interrupt+0x1a/0x20 arch/x86/include/asm/idtentry.h:702 lock_acquire+0x25b/0x530 kernel/locking/lockdep.c:5758 rcu_lock_acquire include/linux/rcupdate.h:298 [inline] rcu_read_lock include/linux/rcupdate.h:750 [inline] percpu_ref_get_many+0x36/0x140 include/linux/percpu-refcount.h:202 percpu_ref_get include/linux/percpu-refcount.h:222 [inline] obj_cgroup_get include/linux/memcontrol.h:811 [inline] __memcg_kmem_charge_page+0x13c/0x250 mm/memcontrol.c:3329 __alloc_pages+0x28c/0x680 mm/page_alloc.c:4586 alloc_pages_mpol+0x3de/0x650 mm/mempolicy.c:2133 __get_free_pages+0xc/0x30 mm/page_alloc.c:4616 mmu_memory_cache_alloc_obj arch/x86/kvm/../../../virt/kvm/kvm_main.c:409 [inline] __kvm_mmu_topup_memory_cache+0x1bc/0x4b0 arch/x86/kvm/../../../virt/kvm/kvm_main.c:436 mmu_topup_memory_caches arch/x86/kvm/mmu/mmu.c:688 [inline] kvm_mmu_load+0x153/0x2680 arch/x86/kvm/mmu/mmu.c:5592 kvm_mmu_reload arch/x86/kvm/mmu.h:132 [inline] vcpu_enter_guest arch/x86/kvm/x86.c:10892 [inline] vcpu_run+0x607d/0x8790 arch/x86/kvm/x86.c:11184 kvm_arch_vcpu_ioctl_run+0xa7e/0x1920 arch/x86/kvm/x86.c:11410 kvm_vcpu_ioctl+0x7f5/0xd00 arch/x86/kvm/../../../virt/kvm/kvm_main.c:4447 vfs_ioctl fs/ioctl.c:51 [inline] __do_sys_ioctl fs/ioctl.c:904 [inline] __se_sys_ioctl+0xfc/0x170 fs/ioctl.c:890 do_syscall_64+0xfb/0x240 entry_SYSCALL_64_after_hwframe+0x6d/0x75 INITIAL USE at: lock_acquire+0x1e4/0x530 kernel/locking/lockdep.c:5754 __raw_spin_lock_irq include/linux/spinlock_api_smp.h:119 [inline] _raw_spin_lock_irq+0xd3/0x120 kernel/locking/spinlock.c:170 kthread_worker_fn+0x236/0xab0 kernel/kthread.c:828 kthread+0x2f0/0x390 kernel/kthread.c:388 ret_from_fork+0x4b/0x80 arch/x86/kernel/process.c:147 ret_from_fork_asm+0x1a/0x30 arch/x86/entry/entry_64.S:243 } ... key at: [] __kthread_create_worker.__key+0x0/0x20 the dependencies between the lock to be acquired and HARDIRQ-irq-unsafe lock: -> (&stab->lock){+...}-{2:2} { HARDIRQ-ON-W at: lock_acquire+0x1e4/0x530 kernel/locking/lockdep.c:5754 __raw_spin_lock_bh include/linux/spinlock_api_smp.h:126 [inline] _raw_spin_lock_bh+0x35/0x50 kernel/locking/spinlock.c:178 spin_lock_bh include/linux/spinlock.h:356 [inline] __sock_map_delete net/core/sock_map.c:414 [inline] sock_map_delete_elem+0x97/0x140 net/core/sock_map.c:446 0xffffffffa0001be0 bpf_dispatcher_nop_func include/linux/bpf.h:1234 [inline] __bpf_prog_run include/linux/filter.h:657 [inline] bpf_prog_run include/linux/filter.h:664 [inline] __bpf_trace_run kernel/trace/bpf_trace.c:2381 [inline] bpf_trace_run2+0x204/0x420 kernel/trace/bpf_trace.c:2420 trace_kfree include/trace/events/kmem.h:94 [inline] kfree+0x291/0x380 mm/slub.c:4377 vfree+0x24c/0x2e0 mm/vmalloc.c:2918 copy_entries_to_user net/ipv4/netfilter/ip_tables.c:866 [inline] get_entries net/ipv4/netfilter/ip_tables.c:1022 [inline] do_ipt_get_ctl+0x11df/0x1810 net/ipv4/netfilter/ip_tables.c:1660 nf_getsockopt+0x299/0x2c0 net/netfilter/nf_sockopt.c:116 ip_getsockopt+0x222/0x2e0 net/ipv4/ip_sockglue.c:1777 tcp_getsockopt+0x163/0x1c0 net/ipv4/tcp.c:4373 do_sock_getsockopt+0x373/0x850 net/socket.c:2373 __sys_getsockopt+0x271/0x330 net/socket.c:2402 __do_sys_getsockopt net/socket.c:2412 [inline] __se_sys_getsockopt net/socket.c:2409 [inline] __x64_sys_getsockopt+0xb5/0xd0 net/socket.c:2409 do_syscall_64+0xfb/0x240 entry_SYSCALL_64_after_hwframe+0x6d/0x75 INITIAL USE at: lock_acquire+0x1e4/0x530 kernel/locking/lockdep.c:5754 __raw_spin_lock_bh include/linux/spinlock_api_smp.h:126 [inline] _raw_spin_lock_bh+0x35/0x50 kernel/locking/spinlock.c:178 spin_lock_bh include/linux/spinlock.h:356 [inline] __sock_map_delete net/core/sock_map.c:414 [inline] sock_map_delete_elem+0x97/0x140 net/core/sock_map.c:446 0xffffffffa0001be0 bpf_dispatcher_nop_func include/linux/bpf.h:1234 [inline] __bpf_prog_run include/linux/filter.h:657 [inline] bpf_prog_run include/linux/filter.h:664 [inline] __bpf_trace_run kernel/trace/bpf_trace.c:2381 [inline] bpf_trace_run2+0x204/0x420 kernel/trace/bpf_trace.c:2420 trace_kfree include/trace/events/kmem.h:94 [inline] kfree+0x291/0x380 mm/slub.c:4377 vfree+0x24c/0x2e0 mm/vmalloc.c:2918 copy_entries_to_user net/ipv4/netfilter/ip_tables.c:866 [inline] get_entries net/ipv4/netfilter/ip_tables.c:1022 [inline] do_ipt_get_ctl+0x11df/0x1810 net/ipv4/netfilter/ip_tables.c:1660 nf_getsockopt+0x299/0x2c0 net/netfilter/nf_sockopt.c:116 ip_getsockopt+0x222/0x2e0 net/ipv4/ip_sockglue.c:1777 tcp_getsockopt+0x163/0x1c0 net/ipv4/tcp.c:4373 do_sock_getsockopt+0x373/0x850 net/socket.c:2373 __sys_getsockopt+0x271/0x330 net/socket.c:2402 __do_sys_getsockopt net/socket.c:2412 [inline] __se_sys_getsockopt net/socket.c:2409 [inline] __x64_sys_getsockopt+0xb5/0xd0 net/socket.c:2409 do_syscall_64+0xfb/0x240 entry_SYSCALL_64_after_hwframe+0x6d/0x75 } ... key at: [] sock_map_alloc.__key+0x0/0x20 ... acquired at: lock_acquire+0x1e4/0x530 kernel/locking/lockdep.c:5754 __raw_spin_lock_bh include/linux/spinlock_api_smp.h:126 [inline] _raw_spin_lock_bh+0x35/0x50 kernel/locking/spinlock.c:178 spin_lock_bh include/linux/spinlock.h:356 [inline] __sock_map_delete net/core/sock_map.c:414 [inline] sock_map_delete_elem+0x97/0x140 net/core/sock_map.c:446 bpf_prog_bc20a984d57ef3f1+0x66/0x6a bpf_dispatcher_nop_func include/linux/bpf.h:1234 [inline] __bpf_prog_run include/linux/filter.h:657 [inline] bpf_prog_run include/linux/filter.h:664 [inline] __bpf_trace_run kernel/trace/bpf_trace.c:2381 [inline] bpf_trace_run2+0x204/0x420 kernel/trace/bpf_trace.c:2420 trace_sched_kthread_work_queue_work include/trace/events/sched.h:64 [inline] kthread_insert_work+0x3f4/0x460 kernel/kthread.c:993 kthread_queue_work+0xff/0x180 kernel/kthread.c:1021 synchronize_rcu_expedited_queue_work kernel/rcu/tree_exp.h:469 [inline] synchronize_rcu_expedited+0x593/0x820 kernel/rcu/tree_exp.h:949 synchronize_rcu+0x136/0x3e0 kernel/rcu/tree.c:3611 rcu_sync_enter+0x1fa/0x350 kernel/rcu/sync.c:133 percpu_down_write+0x63/0x320 kernel/locking/percpu-rwsem.c:232 cgroup_attach_task_all+0x31/0xe0 kernel/cgroup/cgroup-v1.c:62 kvm_vm_worker_thread+0x39b/0x580 arch/x86/kvm/../../../virt/kvm/kvm_main.c:6587 kthread+0x2f0/0x390 kernel/kthread.c:388 ret_from_fork+0x4b/0x80 arch/x86/kernel/process.c:147 ret_from_fork_asm+0x1a/0x30 arch/x86/entry/entry_64.S:243 stack backtrace: CPU: 1 PID: 8378 Comm: kvm-nx-lpage-re Not tainted 6.8.0-syzkaller-08951-gfe46a7dd189e #0 Hardware name: Google Google Compute Engine/Google Compute Engine, BIOS Google 03/27/2024 Call Trace: __dump_stack lib/dump_stack.c:88 [inline] dump_stack_lvl+0x241/0x360 lib/dump_stack.c:114 print_bad_irq_dependency kernel/locking/lockdep.c:2626 [inline] check_irq_usage kernel/locking/lockdep.c:2865 [inline] check_prev_add kernel/locking/lockdep.c:3138 [inline] check_prevs_add kernel/locking/lockdep.c:3253 [inline] validate_chain+0x4dc7/0x58e0 kernel/locking/lockdep.c:3869 __lock_acquire+0x1346/0x1fd0 kernel/locking/lockdep.c:5137 lock_acquire+0x1e4/0x530 kernel/locking/lockdep.c:5754 __raw_spin_lock_bh include/linux/spinlock_api_smp.h:126 [inline] _raw_spin_lock_bh+0x35/0x50 kernel/locking/spinlock.c:178 spin_lock_bh include/linux/spinlock.h:356 [inline] __sock_map_delete net/core/sock_map.c:414 [inline] sock_map_delete_elem+0x97/0x140 net/core/sock_map.c:446 bpf_prog_bc20a984d57ef3f1+0x66/0x6a bpf_dispatcher_nop_func include/linux/bpf.h:1234 [inline] __bpf_prog_run include/linux/filter.h:657 [inline] bpf_prog_run include/linux/filter.h:664 [inline] __bpf_trace_run kernel/trace/bpf_trace.c:2381 [inline] bpf_trace_run2+0x204/0x420 kernel/trace/bpf_trace.c:2420 trace_sched_kthread_work_queue_work include/trace/events/sched.h:64 [inline] kthread_insert_work+0x3f4/0x460 kernel/kthread.c:993 kthread_queue_work+0xff/0x180 kernel/kthread.c:1021 synchronize_rcu_expedited_queue_work kernel/rcu/tree_exp.h:469 [inline] synchronize_rcu_expedited+0x593/0x820 kernel/rcu/tree_exp.h:949 synchronize_rcu+0x136/0x3e0 kernel/rcu/tree.c:3611 rcu_sync_enter+0x1fa/0x350 kernel/rcu/sync.c:133 percpu_down_write+0x63/0x320 kernel/locking/percpu-rwsem.c:232 cgroup_attach_task_all+0x31/0xe0 kernel/cgroup/cgroup-v1.c:62 kvm_vm_worker_thread+0x39b/0x580 arch/x86/kvm/../../../virt/kvm/kvm_main.c:6587 kthread+0x2f0/0x390 kernel/kthread.c:388 ret_from_fork+0x4b/0x80 arch/x86/kernel/process.c:147 ret_from_fork_asm+0x1a/0x30 arch/x86/entry/entry_64.S:243 ------------[ cut here ]------------ raw_local_irq_restore() called with IRQs enabled WARNING: CPU: 1 PID: 8378 at kernel/locking/irqflag-debug.c:10 warn_bogus_irq_restore+0x29/0x40 kernel/locking/irqflag-debug.c:10 Modules linked in: CPU: 1 PID: 8378 Comm: kvm-nx-lpage-re Not tainted 6.8.0-syzkaller-08951-gfe46a7dd189e #0 Hardware name: Google Google Compute Engine/Google Compute Engine, BIOS Google 03/27/2024 RIP: 0010:warn_bogus_irq_restore+0x29/0x40 kernel/locking/irqflag-debug.c:10 Code: 90 f3 0f 1e fa 90 80 3d bd 16 05 04 00 74 06 90 c3 cc cc cc cc c6 05 ae 16 05 04 01 90 48 c7 c7 e0 b9 aa 8b e8 88 34 ec f5 90 <0f> 0b 90 90 90 c3 cc cc cc cc 66 2e 0f 1f 84 00 00 00 00 00 0f 1f RSP: 0018:ffffc90009c2f7f8 EFLAGS: 00010246 RAX: 805f4110a702ce00 RBX: 1ffff92001385f04 RCX: ffff88805dac3c00 RDX: 0000000000000000 RSI: 0000000000000000 RDI: 0000000000000000 RBP: ffffc90009c2f890 R08: ffffffff8157cb22 R09: fffffbfff1bf9650 R10: dffffc0000000000 R11: fffffbfff1bf9650 R12: dffffc0000000000 R13: 1ffff92001385f00 R14: ffffc90009c2f820 R15: 0000000000000246 FS: 0000000000000000(0000) GS:ffff8880b9500000(0000) knlGS:0000000000000000 CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033 CR2: 00007f4ad3bf6f78 CR3: 00000000670be000 CR4: 00000000003526f0 DR0: 0000000000000000 DR1: 0000000000000000 DR2: 0000000000000000 DR3: 0000000000000000 DR6: 00000000fffe0ff0 DR7: 0000000000000400 Call Trace: __raw_spin_unlock_irqrestore include/linux/spinlock_api_smp.h:151 [inline] _raw_spin_unlock_irqrestore+0x120/0x140 kernel/locking/spinlock.c:194 kthread_queue_work+0x110/0x180 kernel/kthread.c:1024 synchronize_rcu_expedited_queue_work kernel/rcu/tree_exp.h:469 [inline] synchronize_rcu_expedited+0x593/0x820 kernel/rcu/tree_exp.h:949 synchronize_rcu+0x136/0x3e0 kernel/rcu/tree.c:3611 rcu_sync_enter+0x1fa/0x350 kernel/rcu/sync.c:133 percpu_down_write+0x63/0x320 kernel/locking/percpu-rwsem.c:232 cgroup_attach_task_all+0x31/0xe0 kernel/cgroup/cgroup-v1.c:62 kvm_vm_worker_thread+0x39b/0x580 arch/x86/kvm/../../../virt/kvm/kvm_main.c:6587 kthread+0x2f0/0x390 kernel/kthread.c:388 ret_from_fork+0x4b/0x80 arch/x86/kernel/process.c:147 ret_from_fork_asm+0x1a/0x30 arch/x86/entry/entry_64.S:243