================================ WARNING: inconsistent lock state 4.19.106-syzkaller #0 Not tainted -------------------------------- inconsistent {SOFTIRQ-ON-W} -> {IN-SOFTIRQ-W} usage. kworker/u4:4/670 [HC0[0]:SC1[1]:HE1:SE0] takes: 00000000c687667d (&(&local->client_conns_lock)->rlock){+.?.}, at: spin_lock include/linux/spinlock.h:329 [inline] 00000000c687667d (&(&local->client_conns_lock)->rlock){+.?.}, at: rxrpc_put_one_client_conn net/rxrpc/conn_client.c:949 [inline] 00000000c687667d (&(&local->client_conns_lock)->rlock){+.?.}, at: rxrpc_put_client_conn+0x666/0xc00 net/rxrpc/conn_client.c:1002 {SOFTIRQ-ON-W} state was registered at: __raw_spin_lock include/linux/spinlock_api_smp.h:142 [inline] _raw_spin_lock+0x2a/0x40 kernel/locking/spinlock.c:144 spin_lock include/linux/spinlock.h:329 [inline] rxrpc_get_client_conn net/rxrpc/conn_client.c:309 [inline] rxrpc_connect_call+0x301/0x4630 net/rxrpc/conn_client.c:702 rxrpc_new_client_call+0x8c6/0x1850 net/rxrpc/call_object.c:291 rxrpc_new_client_call_for_sendmsg net/rxrpc/sendmsg.c:596 [inline] rxrpc_do_sendmsg+0xf2e/0x1bc1 net/rxrpc/sendmsg.c:652 rxrpc_sendmsg+0x4a8/0x5b0 net/rxrpc/af_rxrpc.c:593 sock_sendmsg_nosec net/socket.c:622 [inline] sock_sendmsg+0xcf/0x120 net/socket.c:632 ___sys_sendmsg+0x3e2/0x920 net/socket.c:2115 __sys_sendmmsg+0x195/0x470 net/socket.c:2210 __do_sys_sendmmsg net/socket.c:2239 [inline] __se_sys_sendmmsg net/socket.c:2236 [inline] __x64_sys_sendmmsg+0x99/0x100 net/socket.c:2236 do_syscall_64+0xf9/0x620 arch/x86/entry/common.c:293 entry_SYSCALL_64_after_hwframe+0x49/0xbe irq event stamp: 21258254 hardirqs last enabled at (21258254): [] __raw_spin_unlock_irqrestore include/linux/spinlock_api_smp.h:160 [inline] hardirqs last enabled at (21258254): [] _raw_spin_unlock_irqrestore+0x67/0xe0 kernel/locking/spinlock.c:184 hardirqs last disabled at (21258253): [] __raw_spin_lock_irqsave include/linux/spinlock_api_smp.h:108 [inline] hardirqs last disabled at (21258253): [] _raw_spin_lock_irqsave+0x66/0xbf kernel/locking/spinlock.c:152 softirqs last enabled at (21258202): [] spin_unlock_bh include/linux/spinlock.h:374 [inline] softirqs last enabled at (21258202): [] batadv_nc_purge_paths+0x28e/0x3b0 net/batman-adv/network-coding.c:482 softirqs last disabled at (21258209): [] invoke_softirq kernel/softirq.c:372 [inline] softirqs last disabled at (21258209): [] irq_exit+0x17b/0x1c0 kernel/softirq.c:412 other info that might help us debug this: Possible unsafe locking scenario: CPU0 ---- lock(&(&local->client_conns_lock)->rlock); lock(&(&local->client_conns_lock)->rlock); *** DEADLOCK *** 4 locks held by kworker/u4:4/670: #0: 00000000da94a8fc ((wq_completion)"%s""bat_events"){+.+.}, at: __write_once_size include/linux/compiler.h:220 [inline] #0: 00000000da94a8fc ((wq_completion)"%s""bat_events"){+.+.}, at: arch_atomic64_set arch/x86/include/asm/atomic64_64.h:34 [inline] #0: 00000000da94a8fc ((wq_completion)"%s""bat_events"){+.+.}, at: atomic64_set include/asm-generic/atomic-instrumented.h:40 [inline] #0: 00000000da94a8fc ((wq_completion)"%s""bat_events"){+.+.}, at: atomic_long_set include/asm-generic/atomic-long.h:59 [inline] #0: 00000000da94a8fc ((wq_completion)"%s""bat_events"){+.+.}, at: set_work_data kernel/workqueue.c:617 [inline] #0: 00000000da94a8fc ((wq_completion)"%s""bat_events"){+.+.}, at: set_work_pool_and_clear_pending kernel/workqueue.c:644 [inline] #0: 00000000da94a8fc ((wq_completion)"%s""bat_events"){+.+.}, at: process_one_work+0x81a/0x1640 kernel/workqueue.c:2124 #1: 0000000053612252 ((work_completion)(&(&bat_priv->nc.work)->work)){+.+.}, at: process_one_work+0x84e/0x1640 kernel/workqueue.c:2128 #2: 0000000068f88a15 (rcu_read_lock){....}, at: batadv_nc_purge_orig_hash net/batman-adv/network-coding.c:417 [inline] #2: 0000000068f88a15 (rcu_read_lock){....}, at: batadv_nc_worker+0xe0/0x760 net/batman-adv/network-coding.c:730 #3: 00000000a6a1561d (rcu_callback){....}, at: __rcu_reclaim kernel/rcu/rcu.h:226 [inline] #3: 00000000a6a1561d (rcu_callback){....}, at: rcu_do_batch kernel/rcu/tree.c:2584 [inline] #3: 00000000a6a1561d (rcu_callback){....}, at: invoke_rcu_callbacks kernel/rcu/tree.c:2897 [inline] #3: 00000000a6a1561d (rcu_callback){....}, at: __rcu_process_callbacks kernel/rcu/tree.c:2864 [inline] #3: 00000000a6a1561d (rcu_callback){....}, at: rcu_process_callbacks+0xbff/0x17f0 kernel/rcu/tree.c:2881 stack backtrace: CPU: 1 PID: 670 Comm: kworker/u4:4 Not tainted 4.19.106-syzkaller #0 Hardware name: Google Google Compute Engine/Google Compute Engine, BIOS Google 01/01/2011 Workqueue: bat_events batadv_nc_worker Call Trace: __dump_stack lib/dump_stack.c:77 [inline] dump_stack+0x188/0x20d lib/dump_stack.c:118 print_usage_bug.cold+0x327/0x425 kernel/locking/lockdep.c:2540 valid_state kernel/locking/lockdep.c:2553 [inline] mark_lock_irq kernel/locking/lockdep.c:2747 [inline] mark_lock+0xc71/0x11b0 kernel/locking/lockdep.c:3127 mark_irqflags kernel/locking/lockdep.c:3005 [inline] __lock_acquire+0xc62/0x49c0 kernel/locking/lockdep.c:3368 lock_acquire+0x170/0x400 kernel/locking/lockdep.c:3903 __raw_spin_lock include/linux/spinlock_api_smp.h:142 [inline] _raw_spin_lock+0x2a/0x40 kernel/locking/spinlock.c:144 spin_lock include/linux/spinlock.h:329 [inline] rxrpc_put_one_client_conn net/rxrpc/conn_client.c:949 [inline] rxrpc_put_client_conn+0x666/0xc00 net/rxrpc/conn_client.c:1002 rxrpc_put_connection net/rxrpc/ar-internal.h:951 [inline] rxrpc_rcu_destroy_call+0xb6/0x1e0 net/rxrpc/call_object.c:657 __rcu_reclaim kernel/rcu/rcu.h:236 [inline] rcu_do_batch kernel/rcu/tree.c:2584 [inline] invoke_rcu_callbacks kernel/rcu/tree.c:2897 [inline] __rcu_process_callbacks kernel/rcu/tree.c:2864 [inline] rcu_process_callbacks+0xb2d/0x17f0 kernel/rcu/tree.c:2881 __do_softirq+0x26c/0x93c kernel/softirq.c:292 invoke_softirq kernel/softirq.c:372 [inline] irq_exit+0x17b/0x1c0 kernel/softirq.c:412 exiting_irq arch/x86/include/asm/apic.h:544 [inline] smp_apic_timer_interrupt+0x136/0x550 arch/x86/kernel/apic/apic.c:1094 apic_timer_interrupt+0xf/0x20 arch/x86/entry/entry_64.S:893 RIP: 0010:arch_local_irq_restore arch/x86/include/asm/paravirt.h:789 [inline] RIP: 0010:lock_acquire+0x1ec/0x400 kernel/locking/lockdep.c:3906 Code: 08 00 00 00 00 00 00 48 c1 e8 03 80 3c 10 00 0f 85 cd 01 00 00 48 83 3d d9 c1 62 07 00 0f 84 38 01 00 00 48 8b 7c 24 08 57 9d <0f> 1f 44 00 00 48 83 c4 20 5b 5d 41 5c 41 5d 41 5e 41 5f c3 65 8b RSP: 0018:ffff8880a8637ca0 EFLAGS: 00000282 ORIG_RAX: ffffffffffffff13 RAX: 1ffffffff1164ac1 RBX: ffff8880a8628180 RCX: 0000000070321728 RDX: dffffc0000000000 RSI: 0000000000000000 RDI: 0000000000000282 RBP: ffffffff88b92960 R08: 0000000000000000 R09: 0000000000000002 R10: ffff8880a8628a50 R11: 0000000031b0c51a R12: 0000000000000000 R13: 0000000000000000 R14: 0000000000000002 R15: dffffc0000000000 rcu_lock_acquire include/linux/rcupdate.h:242 [inline] rcu_read_lock include/linux/rcupdate.h:627 [inline] batadv_nc_purge_orig_hash net/batman-adv/network-coding.c:419 [inline] batadv_nc_worker+0x114/0x760 net/batman-adv/network-coding.c:730 process_one_work+0x91f/0x1640 kernel/workqueue.c:2153 worker_thread+0x96/0xe20 kernel/workqueue.c:2296 kthread+0x34a/0x420 kernel/kthread.c:246 ret_from_fork+0x24/0x30 arch/x86/entry/entry_64.S:415 libceph: connect [c::]:6789 error -101 libceph: mon0 [c::]:6789 connect error ceph: No mds server is up or the cluster is laggy libceph: connect [c::]:6789 error -101 libceph: mon0 [c::]:6789 connect error libceph: connect [c::]:6789 error -101 libceph: mon0 [c::]:6789 connect error ceph: No mds server is up or the cluster is laggy ceph: No mds server is up or the cluster is laggy libceph: connect [c::]:6789 error -101 libceph: mon0 [c::]:6789 connect error ceph: No mds server is up or the cluster is laggy libceph: connect [c::]:6789 error -101 libceph: mon0 [c::]:6789 connect error libceph: connect [c::]:6789 error -101 libceph: mon0 [c::]:6789 connect error ceph: No mds server is up or the cluster is laggy ceph: No mds server is up or the cluster is laggy libceph: connect [c::]:6789 error -101 libceph: mon0 [c::]:6789 connect error ceph: No mds server is up or the cluster is laggy libceph: connect [c::]:6789 error -101 libceph: mon0 [c::]:6789 connect error libceph: connect [c::]:6789 error -101 ceph: No mds server is up or the cluster is laggy libceph: mon0 [c::]:6789 connect error ceph: No mds server is up or the cluster is laggy libceph: connect [c::]:6789 error -101 libceph: mon0 [c::]:6789 connect error ceph: No mds server is up or the cluster is laggy