--- a/fs/file_table.c 2022-05-16 19:54:05.244159900 +0800 +++ b/fs/file_table.c 2022-05-16 19:55:14.706577300 +0800 @@ -330,7 +330,7 @@ static void __fput(struct file *file) dput(dentry); if (unlikely(mode & FMODE_NEED_UNMOUNT)) dissolve_on_fput(mnt); - mntput(mnt); + //mntput(mnt); out: file_free(file); } --- a/fs/mount.h 2022-05-16 19:33:29.792582900 +0800 +++ b/fs/mount.h 2022-05-16 19:38:05.627715000 +0800 @@ -77,6 +77,7 @@ struct mount { int mnt_expiry_mark; /* true if marked for expiry */ struct hlist_head mnt_pins; struct hlist_head mnt_stuck_children; + struct wait_queue_head wwq; /* writer wq */ } __randomize_layout; #define MNT_NS_INTERNAL ERR_PTR(-EINVAL) /* distinct from any mnt_namespace */ --- a/fs/namespace.c 2022-05-16 19:34:10.676163000 +0800 +++ b/fs/namespace.c 2022-05-16 19:51:59.915219000 +0800 @@ -233,6 +233,7 @@ static struct mount *alloc_vfsmnt(const INIT_LIST_HEAD(&mnt->mnt_umounting); INIT_HLIST_HEAD(&mnt->mnt_stuck_children); mnt->mnt.mnt_userns = &init_user_ns; + init_waitqueue_head(&mnt->wwq); } return mnt; @@ -469,6 +470,12 @@ void mnt_drop_write(struct vfsmount *mnt { __mnt_drop_write(mnt); sb_end_write(mnt->mnt_sb); + if (mnt->mnt_flags & MNT_DOOMED) { + struct mount *m = real_mount(mnt); + + if (!m->mnt_ns && !mnt_get_writers(m)) + wake_up(&m->wwq); + } } EXPORT_SYMBOL_GPL(mnt_drop_write); @@ -1174,7 +1181,7 @@ static void cleanup_mnt(struct mount *mn * The locking used to deal with mnt_count decrement provides barriers, * so mnt_get_writers() below is safe. */ - WARN_ON(mnt_get_writers(mnt)); + wait_event(mnt->wwq, !mnt_get_writers(mnt)); if (unlikely(mnt->mnt_pins.first)) mnt_pin_kill(mnt); hlist_for_each_entry_safe(m, p, &mnt->mnt_stuck_children, mnt_umount) { @@ -1221,10 +1228,16 @@ static void mntput_no_expire(struct moun * we are dropping is not the final one. */ mnt_add_count(mnt, -1); + count = mnt_get_count(mnt); + WARN_ON(count == 0); + WARN_ON(count < 0); rcu_read_unlock(); return; } lock_mount_hash(); + count = mnt_get_count(mnt); + WARN_ON(count == 0); + WARN_ON(count < 0); /* * make sure that if __legitimize_mnt() has not seen us grab * mount_lock, we'll see their refcount increment here. --- a/include/linux/cgroup-defs.h 2022-05-16 20:01:41.873691800 +0800 +++ b/include/linux/cgroup-defs.h 2022-05-16 20:05:57.239210800 +0800 @@ -179,7 +179,7 @@ struct cgroup_subsys_state { atomic_t online_cnt; /* percpu_ref killing and RCU release */ - struct work_struct destroy_work; + struct work_struct destroy_work, release_work; struct rcu_work destroy_rwork; /* --- a/kernel/cgroup/cgroup.c 2022-05-16 20:03:31.595702700 +0800 +++ b/kernel/cgroup/cgroup.c 2022-05-16 20:05:57.255709200 +0800 @@ -5154,7 +5154,7 @@ static void css_free_rwork_fn(struct wor static void css_release_work_fn(struct work_struct *work) { struct cgroup_subsys_state *css = - container_of(work, struct cgroup_subsys_state, destroy_work); + container_of(work, struct cgroup_subsys_state, release_work); struct cgroup_subsys *ss = css->ss; struct cgroup *cgrp = css->cgroup; @@ -5210,8 +5210,8 @@ static void css_release(struct percpu_re struct cgroup_subsys_state *css = container_of(ref, struct cgroup_subsys_state, refcnt); - INIT_WORK(&css->destroy_work, css_release_work_fn); - queue_work(cgroup_destroy_wq, &css->destroy_work); + INIT_WORK(&css->release_work, css_release_work_fn); + queue_work(cgroup_destroy_wq, &css->release_work); } static void init_and_link_css(struct cgroup_subsys_state *css, --- a/net/ipv4/tcp_input.c 2022-05-16 19:59:50.885069300 +0800 +++ b/net/ipv4/tcp_input.c 2022-05-16 20:05:57.183788500 +0800 @@ -5926,6 +5926,7 @@ void tcp_rcv_established(struct sock *sk NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPHPHITS); /* Bulk data transfer: receiver */ + skb_dst_drop(skb); __skb_pull(skb, tcp_header_len); eaten = tcp_queue_rcv(sk, skb, &fragstolen);