diff --git a/arch/x86/kvm/mmu/tdp_mmu.c b/arch/x86/kvm/mmu/tdp_mmu.c index e7e7876251b3..b3e8ff7ac5b0 100644 --- a/arch/x86/kvm/mmu/tdp_mmu.c +++ b/arch/x86/kvm/mmu/tdp_mmu.c @@ -48,8 +48,10 @@ void kvm_mmu_uninit_tdp_mmu(struct kvm *kvm) if (!kvm->arch.tdp_mmu_enabled) return; - flush_workqueue(kvm->arch.tdp_mmu_zap_wq); - destroy_workqueue(kvm->arch.tdp_mmu_zap_wq); + if (kvm->arch.tdp_mmu_zap_wq) { + flush_workqueue(kvm->arch.tdp_mmu_zap_wq); + destroy_workqueue(kvm->arch.tdp_mmu_zap_wq); + } WARN_ON(!list_empty(&kvm->arch.tdp_mmu_pages)); WARN_ON(!list_empty(&kvm->arch.tdp_mmu_roots)); @@ -119,9 +121,11 @@ static void tdp_mmu_zap_root_work(struct work_struct *work) static void tdp_mmu_schedule_zap_root(struct kvm *kvm, struct kvm_mmu_page *root) { - root->tdp_mmu_async_data = kvm; - INIT_WORK(&root->tdp_mmu_async_work, tdp_mmu_zap_root_work); - queue_work(kvm->arch.tdp_mmu_zap_wq, &root->tdp_mmu_async_work); + if (kvm->arch.tdp_mmu_zap_wq) { + root->tdp_mmu_async_data = kvm; + INIT_WORK(&root->tdp_mmu_async_work, tdp_mmu_zap_root_work); + queue_work(kvm->arch.tdp_mmu_zap_wq, &root->tdp_mmu_async_work); + } } static inline bool kvm_tdp_root_mark_invalid(struct kvm_mmu_page *page)