--- x/include/linux/sched.h +++ y/include/linux/sched.h @@ -1544,6 +1544,7 @@ struct task_struct { struct user_event_mm *user_event_mm; #endif + unsigned long bfl; /* * New fields for task_struct should be added above here, so that * they are included in the randomized portion of task_struct. --- x/include/linux/buffer_head.h +++ y/include/linux/buffer_head.h @@ -78,6 +78,7 @@ struct buffer_head { spinlock_t b_uptodate_lock; /* Used by the first bh in a page, to * serialise IO completion of other * buffers in the page */ + struct task_struct *lko; }; /* @@ -402,6 +403,9 @@ static inline void lock_buffer(struct bu might_sleep(); if (!trylock_buffer(bh)) __lock_buffer(bh); + bh->lko = current; + get_task_struct(bh->lko); + bh->lko->bfl = (unsigned long) bh; } static inline void bh_readahead(struct buffer_head *bh, blk_opf_t op_flags) --- x/fs/ext4/super.c +++ y/fs/ext4/super.c @@ -7248,6 +7248,7 @@ static ssize_t ext4_quota_write(struct s brelse(bh); return err; } + BUG_ON(current->bfl == (unsigned long) bh); lock_buffer(bh); memcpy(bh->b_data+offset, data, len); flush_dcache_page(bh->b_page); --- x/fs/buffer.c +++ y/fs/buffer.c @@ -77,6 +77,11 @@ void unlock_buffer(struct buffer_head *b clear_bit_unlock(BH_Lock, &bh->b_state); smp_mb__after_atomic(); wake_up_bit(&bh->b_state, BH_Lock); + if (!bh->lko) + return; + bh->lko->bfl = 0; + put_task_struct(bh->lko); + bh->lko = NULL; } EXPORT_SYMBOL(unlock_buffer);