--- a/fs/namespace.c 2022-05-16 19:34:10.676163000 +0800 +++ j/fs/namespace.c 2022-05-18 18:38:01.165374300 +0800 @@ -1221,10 +1221,16 @@ static void mntput_no_expire(struct moun * we are dropping is not the final one. */ mnt_add_count(mnt, -1); + count = mnt_get_count(mnt); + WARN_ON(count == 0); + WARN_ON(count < 0); rcu_read_unlock(); return; } lock_mount_hash(); + count = mnt_get_count(mnt); + WARN_ON(count == 0); + WARN_ON(count < 0); /* * make sure that if __legitimize_mnt() has not seen us grab * mount_lock, we'll see their refcount increment here. --- a/include/linux/cgroup-defs.h 2022-05-16 20:01:41.873691800 +0800 +++ j/include/linux/cgroup-defs.h 2022-05-18 18:38:01.178665800 +0800 @@ -179,7 +179,7 @@ struct cgroup_subsys_state { atomic_t online_cnt; /* percpu_ref killing and RCU release */ - struct work_struct destroy_work; + struct work_struct destroy_work, release_work; struct rcu_work destroy_rwork; /* --- a/kernel/cgroup/cgroup.c 2022-05-16 20:03:31.595702700 +0800 +++ j/kernel/cgroup/cgroup.c 2022-05-18 18:38:01.194420100 +0800 @@ -5154,7 +5154,7 @@ static void css_free_rwork_fn(struct wor static void css_release_work_fn(struct work_struct *work) { struct cgroup_subsys_state *css = - container_of(work, struct cgroup_subsys_state, destroy_work); + container_of(work, struct cgroup_subsys_state, release_work); struct cgroup_subsys *ss = css->ss; struct cgroup *cgrp = css->cgroup; @@ -5210,8 +5210,8 @@ static void css_release(struct percpu_re struct cgroup_subsys_state *css = container_of(ref, struct cgroup_subsys_state, refcnt); - INIT_WORK(&css->destroy_work, css_release_work_fn); - queue_work(cgroup_destroy_wq, &css->destroy_work); + INIT_WORK(&css->release_work, css_release_work_fn); + queue_work(cgroup_destroy_wq, &css->release_work); } static void init_and_link_css(struct cgroup_subsys_state *css, --- a/lib/percpu-refcount.c 2022-05-18 18:18:40.337365100 +0800 +++ j/lib/percpu-refcount.c 2022-05-18 18:38:01.215498900 +0800 @@ -76,6 +76,7 @@ int percpu_ref_init(struct percpu_ref *r data = kzalloc(sizeof(*ref->data), gfp); if (!data) { free_percpu((void __percpu *)ref->percpu_count_ptr); + ref->percpu_count_ptr = 0; return -ENOMEM; } --- a/net/ipv4/tcp_input.c 2022-05-16 19:59:50.885069300 +0800 +++ j/net/ipv4/tcp_input.c 2022-05-18 18:38:01.244757300 +0800 @@ -5926,6 +5926,7 @@ void tcp_rcv_established(struct sock *sk NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPHPHITS); /* Bulk data transfer: receiver */ + skb_dst_drop(skb); __skb_pull(skb, tcp_header_len); eaten = tcp_queue_rcv(sk, skb, &fragstolen);