diff --git a/fs/9p/fid.c b/fs/9p/fid.c index de009a33e0e2..b5ccab74bb6f 100644 --- a/fs/9p/fid.c +++ b/fs/9p/fid.c @@ -13,6 +13,7 @@ #include #include #include +#include #include "v9fs.h" #include "v9fs_vfs.h" diff --git a/fs/9p/vfs_addr.c b/fs/9p/vfs_addr.c index a97ceb105cd8..7768cc70439d 100644 --- a/fs/9p/vfs_addr.c +++ b/fs/9p/vfs_addr.c @@ -34,6 +34,7 @@ static void v9fs_begin_writeback(struct netfs_io_request *wreq) { struct p9_fid *fid; + printk("ino: %lx, %s\n", wreq->inode->i_ino, __func__); fid = v9fs_fid_find_inode(wreq->inode, true, INVALID_UID, true); if (!fid) { WARN_ONCE(1, "folio expected an open fid inode->i_ino=%lx\n", diff --git a/fs/9p/vfs_dir.c b/fs/9p/vfs_dir.c index e0d34e4e9076..0ce9ab0d9a9d 100644 --- a/fs/9p/vfs_dir.c +++ b/fs/9p/vfs_dir.c @@ -219,6 +219,15 @@ int v9fs_dir_release(struct inode *inode, struct file *filp) retval = filemap_fdatawrite(inode->i_mapping); spin_lock(&inode->i_lock); + printk("del, ino: %lx, ino sync: %d, %s\n", inode->i_ino, inode->i_state & I_SYNC, __func__); + if (I_SYNC & inode->i_state) { + spin_unlock(&inode->i_lock); + if (wait_on_bit_timeout(&inode->i_state, I_SYNC, + TASK_UNINTERRUPTIBLE, 5 * HZ)) + return -EBUSY; + spin_lock(&inode->i_lock); + } + hlist_del(&fid->ilist); spin_unlock(&inode->i_lock); put_err = p9_fid_put(fid); diff --git a/fs/netfs/write_issue.c b/fs/netfs/write_issue.c index 9258d30cffe3..60472069907e 100644 --- a/fs/netfs/write_issue.c +++ b/fs/netfs/write_issue.c @@ -502,6 +502,7 @@ int netfs_writepages(struct address_space *mapping, struct netfs_io_request *wreq = NULL; struct folio *folio; int error = 0; + static DEFINE_MUTEX(wlock); if (wbc->sync_mode == WB_SYNC_ALL) mutex_lock(&ictx->wb_lock); @@ -522,12 +523,18 @@ int netfs_writepages(struct address_space *mapping, trace_netfs_write(wreq, netfs_write_trace_writeback); netfs_stat(&netfs_n_wh_writepages); + printk("sync: %d, tb-sync: %d, ino: %lx, %s\n", wreq->inode->i_state & I_SYNC, + test_bit(I_SYNC, &wreq->inode->i_state), + wreq->inode->i_ino, __func__); + mutex_lock(&wlock); + wreq->inode->i_state |= I_SYNC; do { _debug("wbiter %lx %llx", folio->index, wreq->start + wreq->submitted); /* It appears we don't have to handle cyclic writeback wrapping. */ WARN_ON_ONCE(wreq && folio_pos(folio) < wreq->start + wreq->submitted); + printk("ino: %lx, folio: %p, %s\n", wreq->inode->i_ino, folio, __func__); if (netfs_folio_group(folio) != NETFS_FOLIO_COPY_TO_CACHE && unlikely(!test_bit(NETFS_RREQ_UPLOAD_TO_SERVER, &wreq->flags))) { set_bit(NETFS_RREQ_UPLOAD_TO_SERVER, &wreq->flags); @@ -538,6 +545,8 @@ int netfs_writepages(struct address_space *mapping, if (error < 0) break; } while ((folio = writeback_iter(mapping, wbc, folio, &error))); + wreq->inode->i_state &= ~I_SYNC; + mutex_unlock(&wlock); for (int s = 0; s < NR_IO_STREAMS; s++) netfs_issue_write(wreq, &wreq->io_streams[s]);