--- a/net/ipv4/tcp_input.c +++ b/net/ipv4/tcp_input.c @@ -5926,6 +5926,7 @@ void tcp_rcv_established(struct sock *sk NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPHPHITS); /* Bulk data transfer: receiver */ + skb_dst_drop(skb); __skb_pull(skb, tcp_header_len); eaten = tcp_queue_rcv(sk, skb, &fragstolen); --- y/include/linux/cgroup-defs.h +++ x/include/linux/cgroup-defs.h @@ -179,7 +179,7 @@ struct cgroup_subsys_state { atomic_t online_cnt; /* percpu_ref killing and RCU release */ - struct work_struct destroy_work; + struct work_struct destroy_work, release_work; struct rcu_work destroy_rwork; /* --- y/kernel/cgroup/cgroup.c +++ x/kernel/cgroup/cgroup.c @@ -5154,7 +5154,7 @@ static void css_free_rwork_fn(struct wor static void css_release_work_fn(struct work_struct *work) { struct cgroup_subsys_state *css = - container_of(work, struct cgroup_subsys_state, destroy_work); + container_of(work, struct cgroup_subsys_state, release_work); struct cgroup_subsys *ss = css->ss; struct cgroup *cgrp = css->cgroup; @@ -5210,8 +5210,8 @@ static void css_release(struct percpu_re struct cgroup_subsys_state *css = container_of(ref, struct cgroup_subsys_state, refcnt); - INIT_WORK(&css->destroy_work, css_release_work_fn); - queue_work(cgroup_destroy_wq, &css->destroy_work); + INIT_WORK(&css->release_work, css_release_work_fn); + queue_work(cgroup_destroy_wq, &css->release_work); } static void init_and_link_css(struct cgroup_subsys_state *css, --- y/fs/namespace.c +++ x/fs/namespace.c @@ -215,7 +215,7 @@ static struct mount *alloc_vfsmnt(const if (!mnt->mnt_pcp) goto out_free_devname; - this_cpu_add(mnt->mnt_pcp->mnt_count, 1); + this_cpu_add(mnt->mnt_pcp->mnt_count, 2); #else mnt->mnt_count = 1; mnt->mnt_writers = 0; @@ -1221,10 +1221,16 @@ static void mntput_no_expire(struct moun * we are dropping is not the final one. */ mnt_add_count(mnt, -1); + count = mnt_get_count(mnt); + WARN_ON(count == 0); + WARN_ON(count < 0); rcu_read_unlock(); return; } lock_mount_hash(); + count = mnt_get_count(mnt); + WARN_ON(count == 0); + WARN_ON(count < 0); /* * make sure that if __legitimize_mnt() has not seen us grab * mount_lock, we'll see their refcount increment here. @@ -1498,7 +1504,6 @@ static void namespace_unlock(void) hlist_for_each_entry_safe(m, p, &head, mnt_umount) { hlist_del(&m->mnt_umount); - mntput(&m->mnt); } } @@ -1595,8 +1600,10 @@ static void umount_tree(struct mount *mn } } change_mnt_propagation(p, MS_PRIVATE); - if (disconnect) + if (disconnect) { hlist_add_head(&p->mnt_umount, &unmounted); + WARN_ON(mnt_get_count(p) == 0); + } } } @@ -1748,6 +1755,7 @@ void __detach_mounts(struct dentry *dent if (mnt->mnt.mnt_flags & MNT_UMOUNT) { umount_mnt(mnt); hlist_add_head(&mnt->mnt_umount, &unmounted); + WARN_ON(mnt_get_count(mnt) == 0); } else umount_tree(mnt, UMOUNT_CONNECTED); }