diff --git a/lib/debugobjects.c b/lib/debugobjects.c index 12e2e42e6a31..621394e2242d 100644 --- a/lib/debugobjects.c +++ b/lib/debugobjects.c @@ -711,6 +711,15 @@ static struct debug_obj *lookup_object_or_alloc(void *addr, struct debug_bucket return NULL; } +static inline bool task_pi_blocked(void) +{ +#ifdef CONFIG_PREEMPT_RT + return current->pi_blocked_on != NULL; +#else + return false; +#endif +} + static void debug_objects_fill_pool(void) { if (!static_branch_likely(&obj_cache_enabled)) @@ -726,12 +735,21 @@ static void debug_objects_fill_pool(void) return; /* - * On RT enabled kernels the pool refill must happen in preemptible - * context -- for !RT kernels we rely on the fact that spinlock_t and - * raw_spinlock_t are basically the same type and this lock-type - * inversion works just fine. + * On RT enabled kernels the pool refill must happen in a context that + * can take a sleeping spinlock_t. preemptible() catches the + * preempt_count and IRQ-disabled cases, but not a task that is already + * enqueued as a waiter on an rt_mutex (current->pi_blocked_on != NULL). + * The latter happens for example inside futex_lock_pi(), between + * __rt_mutex_start_proxy_lock() and rt_mutex_wait_proxy_lock(), where + * hrtimer_sleeper_start_expires() reaches debug_objects_fill_pool(). + * Acquiring a sleeping spinlock_t in that state corrupts the PI chain + * and trips lockdep_assert(!current->pi_blocked_on) in rtlock_lock(). + * + * For !RT kernels spinlock_t and raw_spinlock_t are basically the same + * type and this lock-type inversion works just fine. */ - if (!IS_ENABLED(CONFIG_PREEMPT_RT) || preemptible() || system_state < SYSTEM_SCHEDULING) { + if (!IS_ENABLED(CONFIG_PREEMPT_RT) || system_state < SYSTEM_SCHEDULING || + (preemptible() && !task_pi_blocked())) { /* * Annotate away the spinlock_t inside raw_spinlock_t warning * by temporarily raising the wait-type to LD_WAIT_CONFIG, matching