--- x/block/blk-cgroup.c +++ y/block/blk-cgroup.c @@ -89,9 +89,9 @@ static void blkg_free(struct blkcg_gq *b kfree(blkg); } -static void __blkg_release(struct rcu_head *rcu) +static void blkg_free_workfn(struct work_struct *work) { - struct blkcg_gq *blkg = container_of(rcu, struct blkcg_gq, rcu_head); + struct blkcg_gq *blkg = container_of(work, struct blkcg_gq, async_bio_work); WARN_ON(!bio_list_empty(&blkg->async_bios)); @@ -102,6 +102,15 @@ static void __blkg_release(struct rcu_he blkg_free(blkg); } +static void __blkg_release(struct rcu_head *rcu) +{ + struct blkcg_gq *blkg = container_of(rcu, struct blkcg_gq, rcu_head); + + /* reuse work to avoid putting blkg->q in rcu context */ + INIT_WORK(&blkg->async_bio_work, blkg_free_workfn); + schedule_work(&blkg->async_bio_work); +} + /* * A group is RCU protected, but having an rcu lock does not mean that one * can access all the fields of blkg and assume these are valid. For --- a/net/core/dev.c +++ b/net/core/dev.c @@ -4277,7 +4277,6 @@ static inline void ____napi_schedule(str { struct task_struct *thread; - lockdep_assert_softirq_will_run(); lockdep_assert_irqs_disabled(); if (test_bit(NAPI_STATE_THREADED, &napi->state)) {