diff --git a/fs/9p/fid.c b/fs/9p/fid.c index de009a33e0e2..b5ccab74bb6f 100644 --- a/fs/9p/fid.c +++ b/fs/9p/fid.c @@ -13,6 +13,7 @@ #include #include #include +#include #include "v9fs.h" #include "v9fs_vfs.h" diff --git a/fs/9p/vfs_addr.c b/fs/9p/vfs_addr.c index a97ceb105cd8..7768cc70439d 100644 --- a/fs/9p/vfs_addr.c +++ b/fs/9p/vfs_addr.c @@ -34,6 +34,7 @@ static void v9fs_begin_writeback(struct netfs_io_request *wreq) { struct p9_fid *fid; + printk("ino: %lx, %s\n", wreq->inode->i_ino, __func__); fid = v9fs_fid_find_inode(wreq->inode, true, INVALID_UID, true); if (!fid) { WARN_ONCE(1, "folio expected an open fid inode->i_ino=%lx\n", diff --git a/fs/9p/vfs_dir.c b/fs/9p/vfs_dir.c index e0d34e4e9076..3fe715ab6efd 100644 --- a/fs/9p/vfs_dir.c +++ b/fs/9p/vfs_dir.c @@ -219,6 +219,15 @@ int v9fs_dir_release(struct inode *inode, struct file *filp) retval = filemap_fdatawrite(inode->i_mapping); spin_lock(&inode->i_lock); + printk("del, ino: %lx, ino sync: %d, comm: %s, %s\n", inode->i_ino, inode->i_state & I_SYNC, current->comm, __func__); + if (I_SYNC & inode->i_state) { + spin_unlock(&inode->i_lock); + if (wait_on_bit_timeout(&inode->i_state, I_SYNC, + TASK_UNINTERRUPTIBLE, 5 * HZ)) + return -EBUSY; + spin_lock(&inode->i_lock); + } + hlist_del(&fid->ilist); spin_unlock(&inode->i_lock); put_err = p9_fid_put(fid); diff --git a/fs/9p/vfs_file.c b/fs/9p/vfs_file.c index 348cc90bf9c5..ed319921a898 100644 --- a/fs/9p/vfs_file.c +++ b/fs/9p/vfs_file.c @@ -22,6 +22,7 @@ #include #include #include +#include #include "v9fs.h" #include "v9fs_vfs.h" diff --git a/fs/netfs/write_issue.c b/fs/netfs/write_issue.c index 9258d30cffe3..4c03b8911375 100644 --- a/fs/netfs/write_issue.c +++ b/fs/netfs/write_issue.c @@ -522,12 +522,19 @@ int netfs_writepages(struct address_space *mapping, trace_netfs_write(wreq, netfs_write_trace_writeback); netfs_stat(&netfs_n_wh_writepages); + unsigned long i_state = wreq->inode->i_state; + wreq->inode->i_state |= I_SYNC; + printk("doing sync: %d, before sync: %d, ino: %lx, comm: %s, %s\n", wreq->inode->i_state & I_SYNC, + i_state & I_SYNC, + wreq->inode->i_ino, current->comm, __func__); + do { _debug("wbiter %lx %llx", folio->index, wreq->start + wreq->submitted); /* It appears we don't have to handle cyclic writeback wrapping. */ WARN_ON_ONCE(wreq && folio_pos(folio) < wreq->start + wreq->submitted); + printk("ino: %lx, folio: %p, %s\n", wreq->inode->i_ino, folio, __func__); if (netfs_folio_group(folio) != NETFS_FOLIO_COPY_TO_CACHE && unlikely(!test_bit(NETFS_RREQ_UPLOAD_TO_SERVER, &wreq->flags))) { set_bit(NETFS_RREQ_UPLOAD_TO_SERVER, &wreq->flags); @@ -538,6 +545,9 @@ int netfs_writepages(struct address_space *mapping, if (error < 0) break; } while ((folio = writeback_iter(mapping, wbc, folio, &error))); + wreq->inode->i_state &= ~I_SYNC; + printk("end sync: %d, ino: %lx, comm: %s, error: %d, %s\n", wreq->inode->i_state & I_SYNC, + wreq->inode->i_ino, current->comm, error, __func__); for (int s = 0; s < NR_IO_STREAMS; s++) netfs_issue_write(wreq, &wreq->io_streams[s]);