--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -5926,6 +5926,7 @@ void tcp_rcv_established(struct sock *sk
 			NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPHPHITS);
 
 			/* Bulk data transfer: receiver */
+			skb_dst_drop(skb);
 			__skb_pull(skb, tcp_header_len);
 			eaten = tcp_queue_rcv(sk, skb, &fragstolen);
 
--- y/include/linux/cgroup-defs.h
+++ x/include/linux/cgroup-defs.h
@@ -179,7 +179,7 @@ struct cgroup_subsys_state {
 	atomic_t online_cnt;
 
 	/* percpu_ref killing and RCU release */
-	struct work_struct destroy_work;
+	struct work_struct destroy_work, release_work;
 	struct rcu_work destroy_rwork;
 
 	/*
--- y/kernel/cgroup/cgroup.c
+++ x/kernel/cgroup/cgroup.c
@@ -5154,7 +5154,7 @@ static void css_free_rwork_fn(struct wor
 static void css_release_work_fn(struct work_struct *work)
 {
 	struct cgroup_subsys_state *css =
-		container_of(work, struct cgroup_subsys_state, destroy_work);
+		container_of(work, struct cgroup_subsys_state, release_work);
 	struct cgroup_subsys *ss = css->ss;
 	struct cgroup *cgrp = css->cgroup;
 
@@ -5210,8 +5210,8 @@ static void css_release(struct percpu_re
 	struct cgroup_subsys_state *css =
 		container_of(ref, struct cgroup_subsys_state, refcnt);
 
-	INIT_WORK(&css->destroy_work, css_release_work_fn);
-	queue_work(cgroup_destroy_wq, &css->destroy_work);
+	INIT_WORK(&css->release_work, css_release_work_fn);
+	queue_work(cgroup_destroy_wq, &css->release_work);
 }
 
 static void init_and_link_css(struct cgroup_subsys_state *css,
--- y/fs/namespace.c
+++ x/fs/namespace.c
@@ -1221,10 +1221,16 @@ static void mntput_no_expire(struct moun
 		 * we are dropping is not the final one.
 		 */
 		mnt_add_count(mnt, -1);
+		count = mnt_get_count(mnt);
+		WARN_ON(count == 0);
+		WARN_ON(count < 0);
 		rcu_read_unlock();
 		return;
 	}
 	lock_mount_hash();
+	count = mnt_get_count(mnt);
+	WARN_ON(count == 0);
+	WARN_ON(count < 0);
 	/*
 	 * make sure that if __legitimize_mnt() has not seen us grab
 	 * mount_lock, we'll see their refcount increment here.
@@ -1292,6 +1298,18 @@ struct vfsmount *mntget(struct vfsmount
 }
 EXPORT_SYMBOL(mntget);
 
+void mnt_check_count(struct vfsmount *mnt)
+{
+	if (mnt) {
+		struct mount *m = real_mount(mnt);
+		int count = mnt_get_count(m);
+
+		//WARN_ON(count == 0);
+		//WARN_ON(count < 2 && m->mnt_ns);
+	}
+}
+EXPORT_SYMBOL(mnt_check_count);
+
 /**
  * path_is_mountpoint() - Check if path is a mount in the current namespace.
  * @path: path to check
--- y/include/linux/mount.h
+++ x/include/linux/mount.h
@@ -89,6 +89,7 @@ extern int mnt_want_write_file(struct fi
 extern void mnt_drop_write(struct vfsmount *mnt);
 extern void mnt_drop_write_file(struct file *file);
 extern void mntput(struct vfsmount *mnt);
+extern void mnt_check_count(struct vfsmount *mnt);
 extern struct vfsmount *mntget(struct vfsmount *mnt);
 extern struct vfsmount *mnt_clone_internal(const struct path *path);
 extern bool __mnt_is_readonly(struct vfsmount *mnt);
--- y/fs/file_table.c
+++ x/fs/file_table.c
@@ -330,7 +330,7 @@ static void __fput(struct file *file)
 	dput(dentry);
 	if (unlikely(mode & FMODE_NEED_UNMOUNT))
 		dissolve_on_fput(mnt);
-	mntput(mnt);
+	//mntput(mnt);
 out:
 	file_free(file);
 }
@@ -373,6 +373,8 @@ void fput_many(struct file *file, unsign
 	if (atomic_long_sub_and_test(refs, &file->f_count)) {
 		struct task_struct *task = current;
 
+		//mnt_check_count(file->f_path.mnt);
+
 		if (likely(!in_interrupt() && !(task->flags & PF_KTHREAD))) {
 			init_task_work(&file->f_u.fu_rcuhead, ____fput);
 			if (!task_work_add(task, &file->f_u.fu_rcuhead, TWA_RESUME))