diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c index 111607d91489..124b4b7b793f 100644 --- a/kernel/locking/lockdep.c +++ b/kernel/locking/lockdep.c @@ -5009,6 +5009,9 @@ static int __lock_acquire(struct lockdep_map *lock, unsigned int subclass, if (unlikely(!debug_locks)) return 0; + if (unlikely(!lock)) + return 0; + if (!prove_locking || lock->key == &__lockdep_no_validate__) check = 0; diff --git a/include/net/sock.h b/include/net/sock.h index c4b91fc19b9c..4edc64674eb0 100644 --- a/include/net/sock.h +++ b/include/net/sock.h @@ -2014,8 +2014,12 @@ static inline void sk_set_socket(struct sock *sk, struct socket *sock) static inline wait_queue_head_t *sk_sleep(struct sock *sk) { + wait_queue_head_t *wq = NULL; BUILD_BUG_ON(offsetof(struct socket_wq, wait) != 0); - return &rcu_dereference_raw(sk->sk_wq)->wait; + wq = &rcu_dereference_raw(sk->sk_wq)->wait; + if (!wq) + wq = &sk->sk_socket->wq.wait; + return wq; } /* Detach socket from process context. * Announce socket dead, detach it from wait queue and inode. @@ -2029,7 +2033,7 @@ static inline void sock_orphan(struct sock *sk) write_lock_bh(&sk->sk_callback_lock); sock_set_flag(sk, SOCK_DEAD); sk_set_socket(sk, NULL); - sk->sk_wq = NULL; + rcu_assign_pointer(sk->sk_wq, NULL); write_unlock_bh(&sk->sk_callback_lock); }