diff --git a/mm/vmalloc.c b/mm/vmalloc.c index effd1ff6a4b4..4d770931a35f 100644 --- a/mm/vmalloc.c +++ b/mm/vmalloc.c @@ -1494,12 +1494,14 @@ __alloc_vmap_area(unsigned long size, unsigned long align, */ static void free_vmap_area(struct vmap_area *va) { + unsigned long flags; + /* * Remove from the busy tree/list. */ - spin_lock(&vmap_area_lock); + spin_lock_irqsave(&vmap_area_lock, flags); unlink_va(va, &vmap_area_root); - spin_unlock(&vmap_area_lock); + spin_unlock_irqrestore(&vmap_area_lock, flags); /* * Insert/Merge it back to the free tree/list. @@ -1544,6 +1546,7 @@ static struct vmap_area *alloc_vmap_area(unsigned long size, struct vmap_area *va; unsigned long freed; unsigned long addr; + unsigned long flags; int purged = 0; int ret; @@ -1583,9 +1586,9 @@ static struct vmap_area *alloc_vmap_area(unsigned long size, va->va_end = addr + size; va->vm = NULL; - spin_lock(&vmap_area_lock); + spin_lock_irqsave(&vmap_area_lock, flags); insert_vmap_area(va, &vmap_area_root, &vmap_area_list); - spin_unlock(&vmap_area_lock); + spin_unlock_irqrestore(&vmap_area_lock, flags); BUG_ON(!IS_ALIGNED(va->va_start, align)); BUG_ON(va->va_start < vstart); @@ -1763,11 +1766,12 @@ static void drain_vmap_area_work(struct work_struct *work) */ static void free_vmap_area_noflush(struct vmap_area *va) { + unsigned long flags; unsigned long nr_lazy; - spin_lock(&vmap_area_lock); + spin_lock_irqsave(&vmap_area_lock, flags); unlink_va(va, &vmap_area_root); - spin_unlock(&vmap_area_lock); + spin_unlock_irqrestore(&vmap_area_lock, flags); nr_lazy = atomic_long_add_return((va->va_end - va->va_start) >> PAGE_SHIFT, &vmap_lazy_nr); @@ -1800,11 +1804,12 @@ static void free_unmap_vmap_area(struct vmap_area *va) struct vmap_area *find_vmap_area(unsigned long addr) { + unsigned long flags; struct vmap_area *va; - spin_lock(&vmap_area_lock); + spin_lock_irqsave(&vmap_area_lock, flags); va = __find_vmap_area(addr); - spin_unlock(&vmap_area_lock); + spin_unlock_irqrestore(&vmap_area_lock, flags); return va; } @@ -2409,9 +2414,10 @@ static inline void setup_vmalloc_vm_locked(struct vm_struct *vm, static void setup_vmalloc_vm(struct vm_struct *vm, struct vmap_area *va, unsigned long flags, const void *caller) { - spin_lock(&vmap_area_lock); + unsigned long irq_flags; + spin_lock_irqsave(&vmap_area_lock, irq_flags); setup_vmalloc_vm_locked(vm, va, flags, caller); - spin_unlock(&vmap_area_lock); + spin_unlock_irqrestore(&vmap_area_lock, irq_flags); } static void clear_vm_uninitialized_flag(struct vm_struct *vm) @@ -3734,6 +3740,7 @@ struct vm_struct **pcpu_get_vm_areas(const unsigned long *offsets, struct vm_struct **vms; int area, area2, last_area, term_area; unsigned long base, start, size, end, last_end, orig_start, orig_end; + unsigned long flags; bool purged = false; enum fit_type type; @@ -3870,14 +3877,14 @@ struct vm_struct **pcpu_get_vm_areas(const unsigned long *offsets, } /* insert all vm's */ - spin_lock(&vmap_area_lock); + spin_lock_irqsave(&vmap_area_lock, flags); for (area = 0; area < nr_vms; area++) { insert_vmap_area(vas[area], &vmap_area_root, &vmap_area_list); setup_vmalloc_vm_locked(vms[area], vas[area], VM_ALLOC, pcpu_get_vm_areas); } - spin_unlock(&vmap_area_lock); + spin_unlock_irqrestore(&vmap_area_lock, flags); /* * Mark allocated areas as accessible. Do it now as a best-effort