diff --git a/include/linux/workqueue.h b/include/linux/workqueue.h index ab6cb70ca1a5..f5c3cc85a1e7 100644 --- a/include/linux/workqueue.h +++ b/include/linux/workqueue.h @@ -267,7 +267,10 @@ static inline unsigned int work_static(struct work_struct *work) return *work_data_bits(work) & WORK_STRUCT_STATIC; } #else -static inline void __init_work(struct work_struct *work, int onstack) { } +static inline void __init_work(struct work_struct *work, int onstack) { + atomic_set(&work->enable_count, 0); + atomic_set(&work->disable_count, 0); +} static inline void destroy_work_on_stack(struct work_struct *work) { } static inline void destroy_delayed_work_on_stack(struct delayed_work *work) { } static inline unsigned int work_static(struct work_struct *work) { return 0; } diff --git a/include/linux/workqueue_types.h b/include/linux/workqueue_types.h index 4c38824f3ab4..a9f5d74fe814 100644 --- a/include/linux/workqueue_types.h +++ b/include/linux/workqueue_types.h @@ -20,6 +20,8 @@ struct work_struct { #ifdef CONFIG_LOCKDEP struct lockdep_map lockdep_map; #endif + atomic_t enable_count; + atomic_t disable_count; }; #endif /* _LINUX_WORKQUEUE_TYPES_H */ diff --git a/kernel/workqueue.c b/kernel/workqueue.c index 5f747f241a5f..48e8d572fa33 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c @@ -689,6 +689,8 @@ static inline void debug_work_deactivate(struct work_struct *work) void __init_work(struct work_struct *work, int onstack) { + atomic_set(&work->enable_count, 0); + atomic_set(&work->disable_count, 0); if (onstack) debug_object_init_on_stack(work, &work_debug_descr); else @@ -4418,22 +4420,28 @@ bool flush_rcu_work(struct rcu_work *rwork) } EXPORT_SYMBOL(flush_rcu_work); -static void work_offqd_disable(struct work_offq_data *offqd) +static void work_offqd_disable(struct work_struct *work, struct work_offq_data *offqd) { const unsigned long max = (1lu << WORK_OFFQ_DISABLE_BITS) - 1; + atomic_inc(&work->disable_count); if (likely(offqd->disable < max)) offqd->disable++; else - WARN_ONCE(true, "workqueue: work disable count overflowed\n"); + WARN_ONCE(true, "workqueue: work disable count overflowed: %d %d %d\n", + offqd->disable, atomic_read(&work->disable_count), + atomic_read(&work->enable_count)); } -static void work_offqd_enable(struct work_offq_data *offqd) +static void work_offqd_enable(struct work_struct *work, struct work_offq_data *offqd) { + atomic_inc(&work->enable_count); if (likely(offqd->disable > 0)) offqd->disable--; else - WARN_ONCE(true, "workqueue: work disable count underflowed\n"); + WARN_ONCE(true, "workqueue: work disable count underflowed: %d %d %d\n", + offqd->disable, atomic_read(&work->disable_count), + atomic_read(&work->enable_count)); } static bool __cancel_work(struct work_struct *work, u32 cflags) @@ -4447,7 +4455,7 @@ static bool __cancel_work(struct work_struct *work, u32 cflags) work_offqd_unpack(&offqd, *work_data_bits(work)); if (cflags & WORK_CANCEL_DISABLE) - work_offqd_disable(&offqd); + work_offqd_disable(work, &offqd); set_work_pool_and_clear_pending(work, offqd.pool_id, work_offqd_pack_flags(&offqd)); @@ -4604,7 +4612,7 @@ bool enable_work(struct work_struct *work) work_grab_pending(work, 0, &irq_flags); work_offqd_unpack(&offqd, *work_data_bits(work)); - work_offqd_enable(&offqd); + work_offqd_enable(work, &offqd); set_work_pool_and_clear_pending(work, offqd.pool_id, work_offqd_pack_flags(&offqd)); local_irq_restore(irq_flags);