--- x/kernel/sched/sched.h +++ y/kernel/sched/sched.h @@ -3264,22 +3264,25 @@ static inline int __mm_cid_get(struct mm static inline void mm_cid_put(struct mm_struct *mm, int cid) { + unsigned long flags; + lockdep_assert_irqs_disabled(); if (cid < 0) return; - raw_spin_lock(&mm->cid_lock); + raw_spin_lock_irqsave(&mm->cid_lock, flags); __cpumask_clear_cpu(cid, mm_cidmask(mm)); - raw_spin_unlock(&mm->cid_lock); + raw_spin_unlock_irqrestore(&mm->cid_lock, flags); } static inline int mm_cid_get(struct mm_struct *mm) { + unsigned long flags; int ret; lockdep_assert_irqs_disabled(); - raw_spin_lock(&mm->cid_lock); + raw_spin_lock_irqsave(&mm->cid_lock, flags); ret = __mm_cid_get(mm); - raw_spin_unlock(&mm->cid_lock); + raw_spin_unlock_irqrestore(&mm->cid_lock, flags); return ret; } --- x/lib/debugobjects.c +++ y/lib/debugobjects.c @@ -221,6 +221,7 @@ alloc_object(void *addr, struct debug_bu { struct debug_percpu_free *percpu_pool = this_cpu_ptr(&percpu_obj_pool); struct debug_obj *obj; + unsigned long flags; if (likely(obj_cache)) { obj = __alloc_object(&percpu_pool->free_objs); @@ -230,7 +231,7 @@ alloc_object(void *addr, struct debug_bu } } - raw_spin_lock(&pool_lock); + raw_spin_lock_irqsave(&pool_lock, flags); obj = __alloc_object(&obj_pool); if (obj) { obj_pool_used++; @@ -263,7 +264,7 @@ alloc_object(void *addr, struct debug_bu if (obj_pool_free < obj_pool_min_free) obj_pool_min_free = obj_pool_free; } - raw_spin_unlock(&pool_lock); + raw_spin_unlock_irqrestore(&pool_lock, flags); init_obj: if (obj) {