--- y/block/blk-core.c 2022-04-07 16:21:49.955001600 +0800 +++ x/block/blk-core.c 2022-04-07 16:31:19.966670200 +0800 @@ -420,6 +420,7 @@ static void blk_queue_usage_counter_rele struct request_queue *q = container_of(ref, struct request_queue, q_usage_counter); + atomic_inc(&q->mq_freeze_wq_sequence); wake_up_all(&q->mq_freeze_wq); } --- y/block/blk-mq.c 2022-04-07 14:30:39.689915100 +0800 +++ x/block/blk-mq.c 2022-04-07 16:31:04.105155500 +0800 @@ -176,7 +176,10 @@ EXPORT_SYMBOL_GPL(blk_freeze_queue_start void blk_mq_freeze_queue_wait(struct request_queue *q) { - wait_event(q->mq_freeze_wq, percpu_ref_is_zero(&q->q_usage_counter)); + atomic_t seq = atomic_read(&q->mq_freeze_wq_sequence); + + wait_event(q->mq_freeze_wq, percpu_ref_is_zero(&q->q_usage_counter) || + seq != atomic_read(&q->mq_freeze_wq_sequence)); } EXPORT_SYMBOL_GPL(blk_mq_freeze_queue_wait); @@ -225,6 +228,7 @@ void __blk_mq_unfreeze_queue(struct requ WARN_ON_ONCE(q->mq_freeze_depth < 0); if (!q->mq_freeze_depth) { percpu_ref_resurrect(&q->q_usage_counter); + atomic_inc(&q->mq_freeze_wq_sequence); wake_up_all(&q->mq_freeze_wq); } mutex_unlock(&q->mq_freeze_lock); --- y/include/linux/blkdev.h 2022-04-07 16:07:19.658671600 +0800 +++ x/include/linux/blkdev.h 2022-04-07 16:13:19.903660400 +0800 @@ -496,6 +496,7 @@ struct request_queue { #endif struct rcu_head rcu_head; wait_queue_head_t mq_freeze_wq; + atomic_t mq_freeze_wq_sequence; /* * Protect concurrent access to q_usage_counter by * percpu_ref_kill() and percpu_ref_reinit().