===================================================== WARNING: HARDIRQ-safe -> HARDIRQ-unsafe lock order detected 5.12.0-rc5-syzkaller #0 Not tainted ----------------------------------------------------- syz-executor.1/16056 [HC0[0]:SC0[0]:HE0:SE1] is trying to acquire: ffffc9000998a230 (&kvm->arch.pvclock_gtod_sync_lock){+.+.}-{2:2}, at: spin_lock include/linux/spinlock.h:354 [inline] ffffc9000998a230 (&kvm->arch.pvclock_gtod_sync_lock){+.+.}-{2:2}, at: get_kvmclock_ns+0x25/0x390 arch/x86/kvm/x86.c:2587 and this task is already holding: ffff8880b9c35198 (&rq->lock){-.-.}-{2:2}, at: rq_lock kernel/sched/sched.h:1321 [inline] ffff8880b9c35198 (&rq->lock){-.-.}-{2:2}, at: __schedule+0x21c/0x21b0 kernel/sched/core.c:4990 which would create a new lock dependency: (&rq->lock){-.-.}-{2:2} -> (&kvm->arch.pvclock_gtod_sync_lock){+.+.}-{2:2} but this new dependency connects a HARDIRQ-irq-safe lock: (&rq->lock){-.-.}-{2:2} ... which became HARDIRQ-irq-safe at: lock_acquire kernel/locking/lockdep.c:5510 [inline] lock_acquire+0x1ab/0x740 kernel/locking/lockdep.c:5475 __raw_spin_lock include/linux/spinlock_api_smp.h:142 [inline] _raw_spin_lock+0x2a/0x40 kernel/locking/spinlock.c:151 rq_lock kernel/sched/sched.h:1321 [inline] scheduler_tick+0xa4/0x4b0 kernel/sched/core.c:4538 update_process_times+0x191/0x200 kernel/time/timer.c:1801 tick_periodic+0x79/0x230 kernel/time/tick-common.c:100 tick_handle_periodic+0x41/0x120 kernel/time/tick-common.c:112 timer_interrupt+0x3f/0x60 arch/x86/kernel/time.c:57 __handle_irq_event_percpu+0x303/0x8f0 kernel/irq/handle.c:156 handle_irq_event_percpu kernel/irq/handle.c:196 [inline] handle_irq_event+0x102/0x290 kernel/irq/handle.c:213 handle_level_irq+0x256/0x6e0 kernel/irq/chip.c:650 generic_handle_irq_desc include/linux/irqdesc.h:158 [inline] handle_irq arch/x86/kernel/irq.c:231 [inline] __common_interrupt+0x9e/0x200 arch/x86/kernel/irq.c:250 common_interrupt+0x9f/0xd0 arch/x86/kernel/irq.c:240 asm_common_interrupt+0x1e/0x40 arch/x86/include/asm/idtentry.h:623 __raw_spin_unlock_irqrestore include/linux/spinlock_api_smp.h:161 [inline] _raw_spin_unlock_irqrestore+0x38/0x70 kernel/locking/spinlock.c:191 __setup_irq+0xc72/0x1ce0 kernel/irq/manage.c:1741 request_threaded_irq+0x28a/0x3b0 kernel/irq/manage.c:2131 request_irq include/linux/interrupt.h:160 [inline] setup_default_timer_irq arch/x86/kernel/time.c:70 [inline] hpet_time_init+0x28/0x42 arch/x86/kernel/time.c:82 x86_late_time_init+0x58/0x94 arch/x86/kernel/time.c:94 start_kernel+0x3ee/0x496 init/main.c:1028 secondary_startup_64_no_verify+0xb0/0xbb to a HARDIRQ-irq-unsafe lock: (&kvm->arch.pvclock_gtod_sync_lock){+.+.}-{2:2} ... which became HARDIRQ-irq-unsafe at: ... lock_acquire kernel/locking/lockdep.c:5510 [inline] lock_acquire+0x1ab/0x740 kernel/locking/lockdep.c:5475 __raw_spin_lock include/linux/spinlock_api_smp.h:142 [inline] _raw_spin_lock+0x2a/0x40 kernel/locking/spinlock.c:151 spin_lock include/linux/spinlock.h:354 [inline] kvm_synchronize_tsc+0x459/0x1230 arch/x86/kvm/x86.c:2332 kvm_arch_vcpu_postcreate+0x73/0x180 arch/x86/kvm/x86.c:10183 kvm_vm_ioctl_create_vcpu arch/x86/kvm/../../../virt/kvm/kvm_main.c:3239 [inline] kvm_vm_ioctl+0x1b2d/0x2800 arch/x86/kvm/../../../virt/kvm/kvm_main.c:3839 vfs_ioctl fs/ioctl.c:48 [inline] __do_sys_ioctl fs/ioctl.c:753 [inline] __se_sys_ioctl fs/ioctl.c:739 [inline] __x64_sys_ioctl+0x193/0x200 fs/ioctl.c:739 do_syscall_64+0x2d/0x70 arch/x86/entry/common.c:46 entry_SYSCALL_64_after_hwframe+0x44/0xae other info that might help us debug this: Possible interrupt unsafe locking scenario: CPU0 CPU1 ---- ---- lock(&kvm->arch.pvclock_gtod_sync_lock); local_irq_disable(); lock(&rq->lock); lock(&kvm->arch.pvclock_gtod_sync_lock); lock(&rq->lock); *** DEADLOCK *** 3 locks held by syz-executor.1/16056: #0: ffff88802b4a5048 (&vcpu->mutex){+.+.}-{3:3}, at: kvm_vcpu_ioctl+0x175/0xd90 arch/x86/kvm/../../../virt/kvm/kvm_main.c:3293 #1: ffffc9000998abe0 (&kvm->srcu){....}-{0:0}, at: vcpu_run arch/x86/kvm/x86.c:9275 [inline] #1: ffffc9000998abe0 (&kvm->srcu){....}-{0:0}, at: kvm_arch_vcpu_ioctl_run+0x34d/0x19b0 arch/x86/kvm/x86.c:9510 #2: ffff8880b9c35198 (&rq->lock){-.-.}-{2:2}, at: rq_lock kernel/sched/sched.h:1321 [inline] #2: ffff8880b9c35198 (&rq->lock){-.-.}-{2:2}, at: __schedule+0x21c/0x21b0 kernel/sched/core.c:4990 the dependencies between HARDIRQ-irq-safe lock and the holding lock: -> (&rq->lock){-.-.}-{2:2} { IN-HARDIRQ-W at: lock_acquire kernel/locking/lockdep.c:5510 [inline] lock_acquire+0x1ab/0x740 kernel/locking/lockdep.c:5475 __raw_spin_lock include/linux/spinlock_api_smp.h:142 [inline] _raw_spin_lock+0x2a/0x40 kernel/locking/spinlock.c:151 rq_lock kernel/sched/sched.h:1321 [inline] scheduler_tick+0xa4/0x4b0 kernel/sched/core.c:4538 update_process_times+0x191/0x200 kernel/time/timer.c:1801 tick_periodic+0x79/0x230 kernel/time/tick-common.c:100 tick_handle_periodic+0x41/0x120 kernel/time/tick-common.c:112 timer_interrupt+0x3f/0x60 arch/x86/kernel/time.c:57 __handle_irq_event_percpu+0x303/0x8f0 kernel/irq/handle.c:156 handle_irq_event_percpu kernel/irq/handle.c:196 [inline] handle_irq_event+0x102/0x290 kernel/irq/handle.c:213 handle_level_irq+0x256/0x6e0 kernel/irq/chip.c:650 generic_handle_irq_desc include/linux/irqdesc.h:158 [inline] handle_irq arch/x86/kernel/irq.c:231 [inline] __common_interrupt+0x9e/0x200 arch/x86/kernel/irq.c:250 common_interrupt+0x9f/0xd0 arch/x86/kernel/irq.c:240 asm_common_interrupt+0x1e/0x40 arch/x86/include/asm/idtentry.h:623 __raw_spin_unlock_irqrestore include/linux/spinlock_api_smp.h:161 [inline] _raw_spin_unlock_irqrestore+0x38/0x70 kernel/locking/spinlock.c:191 __setup_irq+0xc72/0x1ce0 kernel/irq/manage.c:1741 request_threaded_irq+0x28a/0x3b0 kernel/irq/manage.c:2131 request_irq include/linux/interrupt.h:160 [inline] setup_default_timer_irq arch/x86/kernel/time.c:70 [inline] hpet_time_init+0x28/0x42 arch/x86/kernel/time.c:82 x86_late_time_init+0x58/0x94 arch/x86/kernel/time.c:94 start_kernel+0x3ee/0x496 init/main.c:1028 secondary_startup_64_no_verify+0xb0/0xbb IN-SOFTIRQ-W at: lock_acquire kernel/locking/lockdep.c:5510 [inline] lock_acquire+0x1ab/0x740 kernel/locking/lockdep.c:5475 __raw_spin_lock include/linux/spinlock_api_smp.h:142 [inline] _raw_spin_lock+0x2a/0x40 kernel/locking/spinlock.c:151 rq_lock kernel/sched/sched.h:1321 [inline] ttwu_queue kernel/sched/core.c:3184 [inline] try_to_wake_up+0x5e6/0x14a0 kernel/sched/core.c:3464 call_timer_fn+0x1a5/0x6b0 kernel/time/timer.c:1431 expire_timers kernel/time/timer.c:1476 [inline] __run_timers.part.0+0x67c/0xa50 kernel/time/timer.c:1745 __run_timers kernel/time/timer.c:1726 [inline] run_timer_softirq+0xb3/0x1d0 kernel/time/timer.c:1758 __do_softirq+0x29b/0x9f6 kernel/softirq.c:345 invoke_softirq kernel/softirq.c:221 [inline] __irq_exit_rcu kernel/softirq.c:422 [inline] irq_exit_rcu+0x134/0x200 kernel/softirq.c:434 sysvec_apic_timer_interrupt+0x93/0xc0 arch/x86/kernel/apic/apic.c:1100 asm_sysvec_apic_timer_interrupt+0x12/0x20 arch/x86/include/asm/idtentry.h:632 __sanitizer_cov_trace_pc+0x0/0x60 kernel/kcov.c:917 queue_delayed_work_on+0xbb/0x120 kernel/workqueue.c:1685 process_one_work+0x98d/0x1600 kernel/workqueue.c:2275 worker_thread+0x64c/0x1120 kernel/workqueue.c:2421 kthread+0x3b1/0x4a0 kernel/kthread.c:292 ret_from_fork+0x1f/0x30 arch/x86/entry/entry_64.S:294 INITIAL USE at: lock_acquire kernel/locking/lockdep.c:5510 [inline] lock_acquire+0x1ab/0x740 kernel/locking/lockdep.c:5475 __raw_spin_lock_irqsave include/linux/spinlock_api_smp.h:110 [inline] _raw_spin_lock_irqsave+0x39/0x50 kernel/locking/spinlock.c:159 rq_attach_root+0x20/0x2e0 kernel/sched/topology.c:470 sched_init+0x6e8/0xbf3 kernel/sched/core.c:8213 start_kernel+0x18e/0x496 init/main.c:920 secondary_startup_64_no_verify+0xb0/0xbb } ... key at: [] __key.298+0x0/0x40 ... acquired at: lock_acquire kernel/locking/lockdep.c:5510 [inline] lock_acquire+0x1ab/0x740 kernel/locking/lockdep.c:5475 __raw_spin_lock include/linux/spinlock_api_smp.h:142 [inline] _raw_spin_lock+0x2a/0x40 kernel/locking/spinlock.c:151 spin_lock include/linux/spinlock.h:354 [inline] get_kvmclock_ns+0x25/0x390 arch/x86/kvm/x86.c:2587 kvm_xen_update_runstate+0x3d/0x2c0 arch/x86/kvm/xen.c:69 kvm_xen_update_runstate_guest+0x74/0x320 arch/x86/kvm/xen.c:100 kvm_xen_runstate_set_preempted arch/x86/kvm/xen.h:96 [inline] kvm_arch_vcpu_put+0x2d8/0x5a0 arch/x86/kvm/x86.c:4062 kvm_sched_out+0xbf/0x100 arch/x86/kvm/../../../virt/kvm/kvm_main.c:4876 __fire_sched_out_preempt_notifiers kernel/sched/core.c:3922 [inline] fire_sched_out_preempt_notifiers kernel/sched/core.c:3930 [inline] prepare_task_switch kernel/sched/core.c:4126 [inline] context_switch kernel/sched/core.c:4274 [inline] __schedule+0xfd0/0x21b0 kernel/sched/core.c:5073 preempt_schedule_irq+0x4e/0x90 kernel/sched/core.c:5530 irqentry_exit_cond_resched kernel/entry/common.c:392 [inline] irqentry_exit_cond_resched kernel/entry/common.c:384 [inline] irqentry_exit+0x7a/0xa0 kernel/entry/common.c:428 asm_sysvec_apic_timer_interrupt+0x12/0x20 arch/x86/include/asm/idtentry.h:632 __seqprop_spinlock_sequence include/linux/seqlock.h:277 [inline] read_seqbegin include/linux/seqlock.h:840 [inline] zone_span_seqbegin include/linux/memory_hotplug.h:81 [inline] page_outside_zone_boundaries mm/page_alloc.c:568 [inline] bad_range+0xcf/0x300 mm/page_alloc.c:597 rmqueue mm/page_alloc.c:3529 [inline] get_page_from_freelist+0x1c4b/0x3fb0 mm/page_alloc.c:3948 __alloc_pages_nodemask+0x2d6/0x730 mm/page_alloc.c:5001 alloc_pages_current+0x18c/0x2a0 mm/mempolicy.c:2277 alloc_pages include/linux/gfp.h:561 [inline] __get_free_pages+0x8/0x40 mm/page_alloc.c:5044 mmu_memory_cache_alloc_obj arch/x86/kvm/../../../virt/kvm/kvm_main.c:353 [inline] kvm_mmu_topup_memory_cache+0x16e/0x1f0 arch/x86/kvm/../../../virt/kvm/kvm_main.c:363 mmu_topup_memory_caches+0x53/0xd0 arch/x86/kvm/mmu/mmu.c:669 kvm_mmu_load+0x7d/0xf60 arch/x86/kvm/mmu/mmu.c:4802 kvm_mmu_reload arch/x86/kvm/mmu.h:81 [inline] vcpu_enter_guest+0x2e68/0x47f0 arch/x86/kvm/x86.c:9061 vcpu_run arch/x86/kvm/x86.c:9282 [inline] kvm_arch_vcpu_ioctl_run+0x47d/0x19b0 arch/x86/kvm/x86.c:9510 kvm_vcpu_ioctl+0x467/0xd90 arch/x86/kvm/../../../virt/kvm/kvm_main.c:3316 vfs_ioctl fs/ioctl.c:48 [inline] __do_sys_ioctl fs/ioctl.c:753 [inline] __se_sys_ioctl fs/ioctl.c:739 [inline] __x64_sys_ioctl+0x193/0x200 fs/ioctl.c:739 do_syscall_64+0x2d/0x70 arch/x86/entry/common.c:46 entry_SYSCALL_64_after_hwframe+0x44/0xae the dependencies between the lock to be acquired and HARDIRQ-irq-unsafe lock: -> (&kvm->arch.pvclock_gtod_sync_lock){+.+.}-{2:2} { HARDIRQ-ON-W at: lock_acquire kernel/locking/lockdep.c:5510 [inline] lock_acquire+0x1ab/0x740 kernel/locking/lockdep.c:5475 __raw_spin_lock include/linux/spinlock_api_smp.h:142 [inline] _raw_spin_lock+0x2a/0x40 kernel/locking/spinlock.c:151 spin_lock include/linux/spinlock.h:354 [inline] kvm_synchronize_tsc+0x459/0x1230 arch/x86/kvm/x86.c:2332 kvm_arch_vcpu_postcreate+0x73/0x180 arch/x86/kvm/x86.c:10183 kvm_vm_ioctl_create_vcpu arch/x86/kvm/../../../virt/kvm/kvm_main.c:3239 [inline] kvm_vm_ioctl+0x1b2d/0x2800 arch/x86/kvm/../../../virt/kvm/kvm_main.c:3839 vfs_ioctl fs/ioctl.c:48 [inline] __do_sys_ioctl fs/ioctl.c:753 [inline] __se_sys_ioctl fs/ioctl.c:739 [inline] __x64_sys_ioctl+0x193/0x200 fs/ioctl.c:739 do_syscall_64+0x2d/0x70 arch/x86/entry/common.c:46 entry_SYSCALL_64_after_hwframe+0x44/0xae SOFTIRQ-ON-W at: lock_acquire kernel/locking/lockdep.c:5510 [inline] lock_acquire+0x1ab/0x740 kernel/locking/lockdep.c:5475 __raw_spin_lock include/linux/spinlock_api_smp.h:142 [inline] _raw_spin_lock+0x2a/0x40 kernel/locking/spinlock.c:151 spin_lock include/linux/spinlock.h:354 [inline] kvm_synchronize_tsc+0x459/0x1230 arch/x86/kvm/x86.c:2332 kvm_arch_vcpu_postcreate+0x73/0x180 arch/x86/kvm/x86.c:10183 kvm_vm_ioctl_create_vcpu arch/x86/kvm/../../../virt/kvm/kvm_main.c:3239 [inline] kvm_vm_ioctl+0x1b2d/0x2800 arch/x86/kvm/../../../virt/kvm/kvm_main.c:3839 vfs_ioctl fs/ioctl.c:48 [inline] __do_sys_ioctl fs/ioctl.c:753 [inline] __se_sys_ioctl fs/ioctl.c:739 [inline] __x64_sys_ioctl+0x193/0x200 fs/ioctl.c:739 do_syscall_64+0x2d/0x70 arch/x86/entry/common.c:46 entry_SYSCALL_64_after_hwframe+0x44/0xae INITIAL USE at: lock_acquire kernel/locking/lockdep.c:5510 [inline] lock_acquire+0x1ab/0x740 kernel/locking/lockdep.c:5475 __raw_spin_lock include/linux/spinlock_api_smp.h:142 [inline] _raw_spin_lock+0x2a/0x40 kernel/locking/spinlock.c:151 spin_lock include/linux/spinlock.h:354 [inline] kvm_synchronize_tsc+0x459/0x1230 arch/x86/kvm/x86.c:2332 kvm_arch_vcpu_postcreate+0x73/0x180 arch/x86/kvm/x86.c:10183 kvm_vm_ioctl_create_vcpu arch/x86/kvm/../../../virt/kvm/kvm_main.c:3239 [inline] kvm_vm_ioctl+0x1b2d/0x2800 arch/x86/kvm/../../../virt/kvm/kvm_main.c:3839 vfs_ioctl fs/ioctl.c:48 [inline] __do_sys_ioctl fs/ioctl.c:753 [inline] __se_sys_ioctl fs/ioctl.c:739 [inline] __x64_sys_ioctl+0x193/0x200 fs/ioctl.c:739 do_syscall_64+0x2d/0x70 arch/x86/entry/common.c:46 entry_SYSCALL_64_after_hwframe+0x44/0xae } ... key at: [] __key.4+0x0/0x40 ... acquired at: lock_acquire kernel/locking/lockdep.c:5510 [inline] lock_acquire+0x1ab/0x740 kernel/locking/lockdep.c:5475 __raw_spin_lock include/linux/spinlock_api_smp.h:142 [inline] _raw_spin_lock+0x2a/0x40 kernel/locking/spinlock.c:151 spin_lock include/linux/spinlock.h:354 [inline] get_kvmclock_ns+0x25/0x390 arch/x86/kvm/x86.c:2587 kvm_xen_update_runstate+0x3d/0x2c0 arch/x86/kvm/xen.c:69 kvm_xen_update_runstate_guest+0x74/0x320 arch/x86/kvm/xen.c:100 kvm_xen_runstate_set_preempted arch/x86/kvm/xen.h:96 [inline] kvm_arch_vcpu_put+0x2d8/0x5a0 arch/x86/kvm/x86.c:4062 kvm_sched_out+0xbf/0x100 arch/x86/kvm/../../../virt/kvm/kvm_main.c:4876 __fire_sched_out_preempt_notifiers kernel/sched/core.c:3922 [inline] fire_sched_out_preempt_notifiers kernel/sched/core.c:3930 [inline] prepare_task_switch kernel/sched/core.c:4126 [inline] context_switch kernel/sched/core.c:4274 [inline] __schedule+0xfd0/0x21b0 kernel/sched/core.c:5073 preempt_schedule_irq+0x4e/0x90 kernel/sched/core.c:5530 irqentry_exit_cond_resched kernel/entry/common.c:392 [inline] irqentry_exit_cond_resched kernel/entry/common.c:384 [inline] irqentry_exit+0x7a/0xa0 kernel/entry/common.c:428 asm_sysvec_apic_timer_interrupt+0x12/0x20 arch/x86/include/asm/idtentry.h:632 __seqprop_spinlock_sequence include/linux/seqlock.h:277 [inline] read_seqbegin include/linux/seqlock.h:840 [inline] zone_span_seqbegin include/linux/memory_hotplug.h:81 [inline] page_outside_zone_boundaries mm/page_alloc.c:568 [inline] bad_range+0xcf/0x300 mm/page_alloc.c:597 rmqueue mm/page_alloc.c:3529 [inline] get_page_from_freelist+0x1c4b/0x3fb0 mm/page_alloc.c:3948 __alloc_pages_nodemask+0x2d6/0x730 mm/page_alloc.c:5001 alloc_pages_current+0x18c/0x2a0 mm/mempolicy.c:2277 alloc_pages include/linux/gfp.h:561 [inline] __get_free_pages+0x8/0x40 mm/page_alloc.c:5044 mmu_memory_cache_alloc_obj arch/x86/kvm/../../../virt/kvm/kvm_main.c:353 [inline] kvm_mmu_topup_memory_cache+0x16e/0x1f0 arch/x86/kvm/../../../virt/kvm/kvm_main.c:363 mmu_topup_memory_caches+0x53/0xd0 arch/x86/kvm/mmu/mmu.c:669 kvm_mmu_load+0x7d/0xf60 arch/x86/kvm/mmu/mmu.c:4802 kvm_mmu_reload arch/x86/kvm/mmu.h:81 [inline] vcpu_enter_guest+0x2e68/0x47f0 arch/x86/kvm/x86.c:9061 vcpu_run arch/x86/kvm/x86.c:9282 [inline] kvm_arch_vcpu_ioctl_run+0x47d/0x19b0 arch/x86/kvm/x86.c:9510 kvm_vcpu_ioctl+0x467/0xd90 arch/x86/kvm/../../../virt/kvm/kvm_main.c:3316 vfs_ioctl fs/ioctl.c:48 [inline] __do_sys_ioctl fs/ioctl.c:753 [inline] __se_sys_ioctl fs/ioctl.c:739 [inline] __x64_sys_ioctl+0x193/0x200 fs/ioctl.c:739 do_syscall_64+0x2d/0x70 arch/x86/entry/common.c:46 entry_SYSCALL_64_after_hwframe+0x44/0xae stack backtrace: CPU: 0 PID: 16056 Comm: syz-executor.1 Not tainted 5.12.0-rc5-syzkaller #0 Hardware name: Google Google Compute Engine/Google Compute Engine, BIOS Google 01/01/2011 Call Trace: __dump_stack lib/dump_stack.c:79 [inline] dump_stack+0x141/0x1d7 lib/dump_stack.c:120 print_bad_irq_dependency kernel/locking/lockdep.c:2460 [inline] check_irq_usage.cold+0x50d/0x744 kernel/locking/lockdep.c:2689 check_prev_add kernel/locking/lockdep.c:2940 [inline] check_prevs_add kernel/locking/lockdep.c:3059 [inline] validate_chain kernel/locking/lockdep.c:3674 [inline] __lock_acquire+0x2b2c/0x54c0 kernel/locking/lockdep.c:4900 lock_acquire kernel/locking/lockdep.c:5510 [inline] lock_acquire+0x1ab/0x740 kernel/locking/lockdep.c:5475 __raw_spin_lock include/linux/spinlock_api_smp.h:142 [inline] _raw_spin_lock+0x2a/0x40 kernel/locking/spinlock.c:151 spin_lock include/linux/spinlock.h:354 [inline] get_kvmclock_ns+0x25/0x390 arch/x86/kvm/x86.c:2587 kvm_xen_update_runstate+0x3d/0x2c0 arch/x86/kvm/xen.c:69 kvm_xen_update_runstate_guest+0x74/0x320 arch/x86/kvm/xen.c:100 kvm_xen_runstate_set_preempted arch/x86/kvm/xen.h:96 [inline] kvm_arch_vcpu_put+0x2d8/0x5a0 arch/x86/kvm/x86.c:4062 kvm_sched_out+0xbf/0x100 arch/x86/kvm/../../../virt/kvm/kvm_main.c:4876 __fire_sched_out_preempt_notifiers kernel/sched/core.c:3922 [inline] fire_sched_out_preempt_notifiers kernel/sched/core.c:3930 [inline] prepare_task_switch kernel/sched/core.c:4126 [inline] context_switch kernel/sched/core.c:4274 [inline] __schedule+0xfd0/0x21b0 kernel/sched/core.c:5073 preempt_schedule_irq+0x4e/0x90 kernel/sched/core.c:5530 irqentry_exit_cond_resched kernel/entry/common.c:392 [inline] irqentry_exit_cond_resched kernel/entry/common.c:384 [inline] irqentry_exit+0x7a/0xa0 kernel/entry/common.c:428 asm_sysvec_apic_timer_interrupt+0x12/0x20 arch/x86/include/asm/idtentry.h:632 RIP: 0010:__seqprop_spinlock_sequence include/linux/seqlock.h:277 [inline] RIP: 0010:read_seqbegin include/linux/seqlock.h:840 [inline] RIP: 0010:zone_span_seqbegin include/linux/memory_hotplug.h:81 [inline] RIP: 0010:page_outside_zone_boundaries mm/page_alloc.c:568 [inline] RIP: 0010:bad_range+0xcf/0x300 mm/page_alloc.c:597 Code: ff e8 05 97 ab ff 48 8b 74 24 78 4c 89 ff e8 d8 8f ab ff 58 9c 5a 80 e6 02 0f 85 94 01 00 00 4d 85 e4 74 01 fb 41 0f b6 55 00 <84> d2 74 09 80 fa 03 0f 8e 99 01 00 00 44 8b a3 a8 00 00 00 41 f6 RSP: 0018:ffffc900098bf778 EFLAGS: 00000206 RAX: 000000000000068d RBX: ffff88813fffb700 RCX: 1ffffffff1f5ff62 RDX: 0000000000000000 RSI: 0000000000000000 RDI: 0000000000000000 RBP: dffffc0000000000 R08: 0000000000000001 R09: ffffffff8fa979b7 R10: 0000000000000001 R11: 0000000000000100 R12: 0000000000000200 R13: ffffed1027fff6f5 R14: 0000000000000000 R15: ffff88813fffb7b0 rmqueue mm/page_alloc.c:3529 [inline] get_page_from_freelist+0x1c4b/0x3fb0 mm/page_alloc.c:3948 __alloc_pages_nodemask+0x2d6/0x730 mm/page_alloc.c:5001 alloc_pages_current+0x18c/0x2a0 mm/mempolicy.c:2277 alloc_pages include/linux/gfp.h:561 [inline] __get_free_pages+0x8/0x40 mm/page_alloc.c:5044 mmu_memory_cache_alloc_obj arch/x86/kvm/../../../virt/kvm/kvm_main.c:353 [inline] kvm_mmu_topup_memory_cache+0x16e/0x1f0 arch/x86/kvm/../../../virt/kvm/kvm_main.c:363 mmu_topup_memory_caches+0x53/0xd0 arch/x86/kvm/mmu/mmu.c:669 kvm_mmu_load+0x7d/0xf60 arch/x86/kvm/mmu/mmu.c:4802 kvm_mmu_reload arch/x86/kvm/mmu.h:81 [inline] vcpu_enter_guest+0x2e68/0x47f0 arch/x86/kvm/x86.c:9061 vcpu_run arch/x86/kvm/x86.c:9282 [inline] kvm_arch_vcpu_ioctl_run+0x47d/0x19b0 arch/x86/kvm/x86.c:9510 kvm_vcpu_ioctl+0x467/0xd90 arch/x86/kvm/../../../virt/kvm/kvm_main.c:3316 vfs_ioctl fs/ioctl.c:48 [inline] __do_sys_ioctl fs/ioctl.c:753 [inline] __se_sys_ioctl fs/ioctl.c:739 [inline] __x64_sys_ioctl+0x193/0x200 fs/ioctl.c:739 do_syscall_64+0x2d/0x70 arch/x86/entry/common.c:46 entry_SYSCALL_64_after_hwframe+0x44/0xae RIP: 0033:0x466459 Code: ff ff c3 66 2e 0f 1f 84 00 00 00 00 00 0f 1f 40 00 48 89 f8 48 89 f7 48 89 d6 48 89 ca 4d 89 c2 4d 89 c8 4c 8b 4c 24 08 0f 05 <48> 3d 01 f0 ff ff 73 01 c3 48 c7 c1 bc ff ff ff f7 d8 64 89 01 48 RSP: 002b:00007f4c10635188 EFLAGS: 00000246 ORIG_RAX: 0000000000000010 RAX: ffffffffffffffda RBX: 000000000056bf60 RCX: 0000000000466459 RDX: 0000000000000000 RSI: 000000000000ae80 RDI: 0000000000000005 RBP: 00000000004bf9fb R08: 0000000000000000 R09: 0000000000000000 R10: 0000000000000000 R11: 0000000000000246 R12: 000000000056bf60 R13: 0000000000a9fb1f R14: 00007f4c10635300 R15: 0000000000022000