INFO: task kworker/0:0H:11 blocked for more than 143 seconds.
Not tainted syzkaller #0
"echo 0 > /proc/sys/kernel/hung_task_timeout_secs" disables this message.
task:kworker/0:0H state:D stack:25896 pid:11 tgid:11 ppid:2 task_flags:0x4208060 flags:0x00080000
Workqueue: kblockd blk_mq_requeue_work
Call Trace:
context_switch kernel/sched/core.c:5256 [inline]
__schedule+0x14bc/0x5000 kernel/sched/core.c:6863
__schedule_loop kernel/sched/core.c:6945 [inline]
schedule+0x165/0x360 kernel/sched/core.c:6960
schedule_timeout+0x12b/0x270 kernel/time/sleep_timeout.c:99
wait_for_reconnect drivers/block/nbd.c:1107 [inline]
nbd_handle_cmd drivers/block/nbd.c:1149 [inline]
nbd_queue_rq+0x662/0xf10 drivers/block/nbd.c:1207
blk_mq_dispatch_rq_list+0x4c0/0x1900 block/blk-mq.c:2129
__blk_mq_do_dispatch_sched block/blk-mq-sched.c:168 [inline]
blk_mq_do_dispatch_sched block/blk-mq-sched.c:182 [inline]
__blk_mq_sched_dispatch_requests+0xda4/0x1570 block/blk-mq-sched.c:307
blk_mq_sched_dispatch_requests+0xd7/0x190 block/blk-mq-sched.c:329
blk_mq_run_hw_queue+0x348/0x4f0 block/blk-mq.c:2367
blk_mq_run_hw_queues+0x33e/0x430 block/blk-mq.c:2416
blk_mq_requeue_work+0x717/0x760 block/blk-mq.c:1583
process_one_work kernel/workqueue.c:3257 [inline]
process_scheduled_works+0xad1/0x1770 kernel/workqueue.c:3340
worker_thread+0x8a0/0xda0 kernel/workqueue.c:3421
kthread+0x711/0x8a0 kernel/kthread.c:463
ret_from_fork+0x599/0xb30 arch/x86/kernel/process.c:158
ret_from_fork_asm+0x1a/0x30 arch/x86/entry/entry_64.S:246
Showing all locks held in the system:
3 locks held by kworker/0:1/10:
#0: ffff8880b883a7d8 (&rq->__lock){-.-.}-{2:2}, at: raw_spin_rq_lock_nested+0x2a/0x140 kernel/sched/core.c:639
#1: ffff8880b8824448 (psi_seq){-.-.}-{0:0}, at: psi_task_switch+0x53/0x880 kernel/sched/psi.c:933
#2: ffff8880806fa930 (&ht->mutex){+.+.}-{4:4}, at: rhashtable_free_and_destroy+0x48/0x940 lib/rhashtable.c:1153
4 locks held by kworker/0:0H/11:
#0: ffff888140af5548 ((wq_completion)kblockd){+.+.}-{0:0}, at: process_one_work kernel/workqueue.c:3232 [inline]
#0: ffff888140af5548 ((wq_completion)kblockd){+.+.}-{0:0}, at: process_scheduled_works+0x9b4/0x1770 kernel/workqueue.c:3340
#1: ffffc90000107b80 ((work_completion)(&(&q->requeue_work)->work)){+.+.}-{0:0}, at: process_one_work kernel/workqueue.c:3233 [inline]
#1: ffffc90000107b80 ((work_completion)(&(&q->requeue_work)->work)){+.+.}-{0:0}, at: process_scheduled_works+0x9ef/0x1770 kernel/workqueue.c:3340
#2: ffff8881437a8698 (set->srcu){.+.+}-{0:0}, at: srcu_lock_acquire include/linux/srcu.h:185 [inline]
#2: ffff8881437a8698 (set->srcu){.+.+}-{0:0}, at: srcu_read_lock include/linux/srcu.h:277 [inline]
#2: ffff8881437a8698 (set->srcu){.+.+}-{0:0}, at: blk_mq_run_hw_queue+0x31f/0x4f0 block/blk-mq.c:2367
#3: ffff888024bd0178 (&cmd->lock){+.+.}-{4:4}, at: nbd_queue_rq+0xc8/0xf10 drivers/block/nbd.c:1199
1 lock held by khungtaskd/31:
#0: ffffffff8df41cc0 (rcu_read_lock){....}-{1:3}, at: rcu_lock_acquire include/linux/rcupdate.h:331 [inline]
#0: ffffffff8df41cc0 (rcu_read_lock){....}-{1:3}, at: rcu_read_lock include/linux/rcupdate.h:867 [inline]
#0: ffffffff8df41cc0 (rcu_read_lock){....}-{1:3}, at: debug_show_all_locks+0x2e/0x180 kernel/locking/lockdep.c:6775
2 locks held by kworker/u8:7/1105:
3 locks held by kworker/u8:10/3428:
#0: ffff88802f12d148 ((wq_completion)ipv6_addrconf){+.+.}-{0:0}, at: process_one_work kernel/workqueue.c:3232 [inline]
#0: ffff88802f12d148 ((wq_completion)ipv6_addrconf){+.+.}-{0:0}, at: process_scheduled_works+0x9b4/0x1770 kernel/workqueue.c:3340
#1: ffffc9000c857b80 ((work_completion)(&(&ifa->dad_work)->work)){+.+.}-{0:0}, at: process_one_work kernel/workqueue.c:3233 [inline]
#1: ffffc9000c857b80 ((work_completion)(&(&ifa->dad_work)->work)){+.+.}-{0:0}, at: process_scheduled_works+0x9ef/0x1770 kernel/workqueue.c:3340
#2: ffffffff8f2f55c8 (rtnl_mutex){+.+.}-{4:4}, at: rtnl_net_lock include/linux/rtnetlink.h:130 [inline]
#2: ffffffff8f2f55c8 (rtnl_mutex){+.+.}-{4:4}, at: addrconf_dad_work+0x112/0x14b0 net/ipv6/addrconf.c:4194
6 locks held by kworker/u8:12/3564:
#0: ffff88801aedf148 ((wq_completion)netns){+.+.}-{0:0}, at: process_one_work kernel/workqueue.c:3232 [inline]
#0: ffff88801aedf148 ((wq_completion)netns){+.+.}-{0:0}, at: process_scheduled_works+0x9b4/0x1770 kernel/workqueue.c:3340
#1: ffffc9000cd57b80 (net_cleanup_work){+.+.}-{0:0}, at: process_one_work kernel/workqueue.c:3233 [inline]
#1: ffffc9000cd57b80 (net_cleanup_work){+.+.}-{0:0}, at: process_scheduled_works+0x9ef/0x1770 kernel/workqueue.c:3340
#2: ffffffff8f2e8510 (pernet_ops_rwsem){++++}-{4:4}, at: cleanup_net+0xf7/0x7a0 net/core/net_namespace.c:670
#3: ffffffff8f2f55c8 (rtnl_mutex){+.+.}-{4:4}, at: caif_exit_net+0x6a/0x4a0 net/caif/caif_dev.c:528
#4: ffff88814d52df80 (&caifn->caifdevs.lock){+.+.}-{4:4}, at: caif_exit_net+0x7d/0x4a0 net/caif/caif_dev.c:529
#5: ffffffff8df477f8 (rcu_state.exp_mutex){+.+.}-{4:4}, at: exp_funnel_lock kernel/rcu/tree_exp.h:311 [inline]
#5: ffffffff8df477f8 (rcu_state.exp_mutex){+.+.}-{4:4}, at: synchronize_rcu_expedited+0x2f6/0x730 kernel/rcu/tree_exp.h:956
2 locks held by getty/5592:
#0: ffff88802f6df0a0 (&tty->ldisc_sem){++++}-{0:0}, at: tty_ldisc_ref_wait+0x25/0x70 drivers/tty/tty_ldisc.c:243
#1: ffffc9000332b2f0 (&ldata->atomic_read_lock){+.+.}-{4:4}, at: n_tty_read+0x43e/0x1400 drivers/tty/n_tty.c:2222
2 locks held by syz-executor/5820:
#0: ffff8880b893a7d8 (&rq->__lock){-.-.}-{2:2}, at: raw_spin_rq_lock_nested+0x2a/0x140 kernel/sched/core.c:639
#1: ffff8880b8924448 (psi_seq){-.-.}-{0:0}, at: psi_task_switch+0x53/0x880 kernel/sched/psi.c:933
1 lock held by udevd/5831:
#0: ffff888143b18358 (&disk->open_mutex){+.+.}-{4:4}, at: bdev_open+0xe0/0xd30 block/bdev.c:962
1 lock held by udevd/5850:
#0: ffff888024a24358 (&disk->open_mutex){+.+.}-{4:4}, at: bdev_open+0xe0/0xd30 block/bdev.c:962
1 lock held by udevd/6525:
#0: ffff888024b7b358 (&disk->open_mutex){+.+.}-{4:4}, at: bdev_open+0xe0/0xd30 block/bdev.c:962
1 lock held by syz-executor/16995:
#0: ffffffff8f2f55c8 (rtnl_mutex){+.+.}-{4:4}, at: rtnl_lock net/core/rtnetlink.c:80 [inline]
#0: ffffffff8f2f55c8 (rtnl_mutex){+.+.}-{4:4}, at: rtnl_nets_lock net/core/rtnetlink.c:341 [inline]
#0: ffffffff8f2f55c8 (rtnl_mutex){+.+.}-{4:4}, at: rtnl_newlink+0x8ec/0x1c90 net/core/rtnetlink.c:4071
2 locks held by syz.3.3015/17126:
#0: ffffffff8f2e8510 (pernet_ops_rwsem){++++}-{4:4}, at: copy_net_ns+0x3cc/0x570 net/core/net_namespace.c:577
#1: ffffffff8df476c0 (rcu_state.barrier_mutex){+.+.}-{4:4}, at: rcu_barrier+0x4c/0x570 kernel/rcu/tree.c:3816
2 locks held by syz.2.3018/17142:
#0: ffffffff8f2e8510 (pernet_ops_rwsem){++++}-{4:4}, at: copy_net_ns+0x3cc/0x570 net/core/net_namespace.c:577
#1: ffffffff8df476c0 (rcu_state.barrier_mutex){+.+.}-{4:4}, at: rcu_barrier+0x4c/0x570 kernel/rcu/tree.c:3816
2 locks held by syz.1.3024/17169:
#0: ffffffff8f2e8510 (pernet_ops_rwsem){++++}-{4:4}, at: copy_net_ns+0x3cc/0x570 net/core/net_namespace.c:577
#1: ffffffff8f2f55c8 (rtnl_mutex){+.+.}-{4:4}, at: ip_tunnel_init_net+0x2ab/0x800 net/ipv4/ip_tunnel.c:1146
=============================================
NMI backtrace for cpu 1
CPU: 1 UID: 0 PID: 31 Comm: khungtaskd Not tainted syzkaller #0 PREEMPT(full)
Hardware name: Google Google Compute Engine/Google Compute Engine, BIOS Google 10/25/2025
Call Trace:
dump_stack_lvl+0x189/0x250 lib/dump_stack.c:120
nmi_cpu_backtrace+0x39e/0x3d0 lib/nmi_backtrace.c:113
nmi_trigger_cpumask_backtrace+0x17a/0x300 lib/nmi_backtrace.c:62
trigger_all_cpu_backtrace include/linux/nmi.h:160 [inline]
check_hung_uninterruptible_tasks kernel/hung_task.c:332 [inline]
watchdog+0xf3c/0xf80 kernel/hung_task.c:495
kthread+0x711/0x8a0 kernel/kthread.c:463
ret_from_fork+0x599/0xb30 arch/x86/kernel/process.c:158
ret_from_fork_asm+0x1a/0x30 arch/x86/entry/entry_64.S:246
Sending NMI from CPU 1 to CPUs 0:
NMI backtrace for cpu 0
CPU: 0 UID: 0 PID: 10 Comm: kworker/0:1 Not tainted syzkaller #0 PREEMPT(full)
Hardware name: Google Google Compute Engine/Google Compute Engine, BIOS Google 10/25/2025
Workqueue: events drain_vmap_area_work
RIP: 0010:lookup_chain_cache kernel/locking/lockdep.c:3801 [inline]
RIP: 0010:lookup_chain_cache_add kernel/locking/lockdep.c:3821 [inline]
RIP: 0010:validate_chain kernel/locking/lockdep.c:3876 [inline]
RIP: 0010:__lock_acquire+0xb18/0x2cf0 kernel/locking/lockdep.c:5237
Code: 65 e5 0d 48 c7 c6 9b ad 84 8d 67 48 0f b9 3a 90 48 bb eb 83 b5 80 46 86 c8 61 48 0f af dd 48 c1 eb 2d 48 8b 04 dd e0 a3 3a 93 <48> 85 c0 0f 94 c1 48 83 c0 f8 0f 94 c2 08 ca 0f 84 1c 01 00 00 e8
RSP: 0018:ffffc900000f7688 EFLAGS: 00000806
RAX: ffffffff93911358 RBX: 0000000000063d8e RCX: 0000000000040000
RDX: 00000000faa4cfac RSI: 00000000be00ed8f RDI: ffff88801c6f9e80
RBP: f2c486c998bc8ec4 R08: ffffffff82170acd R09: ffff88801a041818
R10: dffffc0000000000 R11: ffffed1003408301 R12: ffff88801c6faa28
R13: ffff88801c6faa28 R14: ffff88801c6f9e80 R15: 0000000000000003
FS: 0000000000000000(0000) GS:ffff8881260b1000(0000) knlGS:0000000000000000
CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033
CR2: 00007fa8a69b42f8 CR3: 0000000075104000 CR4: 00000000003526f0
Call Trace:
lock_acquire+0x117/0x340 kernel/locking/lockdep.c:5868
__raw_spin_lock include/linux/spinlock_api_smp.h:133 [inline]
_raw_spin_lock+0x2e/0x40 kernel/locking/spinlock.c:154
spin_lock include/linux/spinlock.h:351 [inline]
node_pool_add_va mm/vmalloc.c:1917 [inline]
purge_vmap_node+0x73d/0x8d0 mm/vmalloc.c:2286
__purge_vmap_area_lazy+0x77a/0xb00 mm/vmalloc.c:2362
drain_vmap_area_work+0x27/0x40 mm/vmalloc.c:2396
process_one_work kernel/workqueue.c:3257 [inline]
process_scheduled_works+0xad1/0x1770 kernel/workqueue.c:3340
worker_thread+0x8a0/0xda0 kernel/workqueue.c:3421
kthread+0x711/0x8a0 kernel/kthread.c:463
ret_from_fork+0x599/0xb30 arch/x86/kernel/process.c:158
ret_from_fork_asm+0x1a/0x30 arch/x86/entry/entry_64.S:246