INFO: task syz-executor:5848 blocked for more than 143 seconds.
Not tainted 6.16.0-rc5-next-20250711-syzkaller #0
"echo 0 > /proc/sys/kernel/hung_task_timeout_secs" disables this message.
task:syz-executor state:D stack:17320 pid:5848 tgid:5848 ppid:1 task_flags:0x400140 flags:0x00004006
Call Trace:
context_switch kernel/sched/core.c:5314 [inline]
__schedule+0x16f5/0x4d00 kernel/sched/core.c:6697
__schedule_loop kernel/sched/core.c:6775 [inline]
schedule+0x165/0x360 kernel/sched/core.c:6790
schedule_timeout+0x9a/0x270 kernel/time/sleep_timeout.c:75
do_wait_for_common kernel/sched/completion.c:100 [inline]
__wait_for_common kernel/sched/completion.c:121 [inline]
wait_for_common kernel/sched/completion.c:132 [inline]
wait_for_completion+0x2bf/0x5d0 kernel/sched/completion.c:153
kthread_stop+0x194/0x5f0 kernel/kthread.c:790
bch2_copygc_stop+0x4f/0x150 fs/bcachefs/movinggc.c:438
__bch2_fs_read_only+0x47/0x5b0 fs/bcachefs/super.c:280
bch2_fs_read_only+0x42a/0xb00 fs/bcachefs/super.c:366
__bch2_fs_stop+0x100/0x900 fs/bcachefs/super.c:677
generic_shutdown_super+0x135/0x2c0 fs/super.c:643
bch2_kill_sb+0x41/0x50 fs/bcachefs/fs.c:2624
deactivate_locked_super+0xb9/0x130 fs/super.c:474
cleanup_mnt+0x425/0x4c0 fs/namespace.c:1378
task_work_run+0x1d4/0x260 kernel/task_work.c:227
resume_user_mode_work include/linux/resume_user_mode.h:50 [inline]
exit_to_user_mode_loop+0xec/0x110 kernel/entry/common.c:43
exit_to_user_mode_prepare include/linux/irq-entry-common.h:208 [inline]
syscall_exit_to_user_mode_work include/linux/entry-common.h:175 [inline]
syscall_exit_to_user_mode include/linux/entry-common.h:210 [inline]
do_syscall_64+0x2bd/0x3b0 arch/x86/entry/syscall_64.c:100
entry_SYSCALL_64_after_hwframe+0x77/0x7f
RIP: 0033:0x7f570df8fc57
RSP: 002b:00007ffc1b6eb418 EFLAGS: 00000246 ORIG_RAX: 00000000000000a6
RAX: 0000000000000000 RBX: 00007f570e010925 RCX: 00007f570df8fc57
RDX: 0000000000000000 RSI: 0000000000000009 RDI: 00007ffc1b6eb4d0
RBP: 00007ffc1b6eb4d0 R08: 0000000000000000 R09: 0000000000000000
R10: 00000000ffffffff R11: 0000000000000246 R12: 00007ffc1b6ec560
R13: 00007f570e010925 R14: 000000000004d5f1 R15: 00007ffc1b6ec5a0
Showing all locks held in the system:
1 lock held by khungtaskd/31:
#0: ffffffff8e13c5a0 (rcu_read_lock){....}-{1:3}, at: rcu_lock_acquire include/linux/rcupdate.h:331 [inline]
#0: ffffffff8e13c5a0 (rcu_read_lock){....}-{1:3}, at: rcu_read_lock include/linux/rcupdate.h:841 [inline]
#0: ffffffff8e13c5a0 (rcu_read_lock){....}-{1:3}, at: debug_show_all_locks+0x2e/0x180 kernel/locking/lockdep.c:6770
2 locks held by kworker/u8:7/3064:
#0: ffff88801ebae148 ((wq_completion)iou_exit){+.+.}-{0:0}, at: process_one_work kernel/workqueue.c:3214 [inline]
#0: ffff88801ebae148 ((wq_completion)iou_exit){+.+.}-{0:0}, at: process_scheduled_works+0x9b4/0x17b0 kernel/workqueue.c:3322
#1: ffffc9000bee7bc0 ((work_completion)(&ctx->exit_work)){+.+.}-{0:0}, at: process_one_work kernel/workqueue.c:3215 [inline]
#1: ffffc9000bee7bc0 ((work_completion)(&ctx->exit_work)){+.+.}-{0:0}, at: process_scheduled_works+0x9ef/0x17b0 kernel/workqueue.c:3322
2 locks held by getty/5615:
#0: ffff88802fde60a0 (&tty->ldisc_sem){++++}-{0:0}, at: tty_ldisc_ref_wait+0x25/0x70 drivers/tty/tty_ldisc.c:243
#1: ffffc9000332b2f0 (&ldata->atomic_read_lock){+.+.}-{4:4}, at: n_tty_read+0x43e/0x1400 drivers/tty/n_tty.c:2222
2 locks held by syz-executor/5848:
#0: ffff88802f1560e0 (&type->s_umount_key#54){+.+.}-{4:4}, at: __super_lock fs/super.c:57 [inline]
#0: ffff88802f1560e0 (&type->s_umount_key#54){+.+.}-{4:4}, at: __super_lock_excl fs/super.c:72 [inline]
#0: ffff88802f1560e0 (&type->s_umount_key#54){+.+.}-{4:4}, at: deactivate_super+0xa9/0xe0 fs/super.c:506
#1: ffff88803a080278 (&c->state_lock){++++}-{4:4}, at: __bch2_fs_stop+0xf8/0x900 fs/bcachefs/super.c:676
5 locks held by kworker/1:3/5866:
3 locks held by kworker/1:6/5970:
#0: ffff88801a480d48 ((wq_completion)events){+.+.}-{0:0}, at: process_one_work kernel/workqueue.c:3214 [inline]
#0: ffff88801a480d48 ((wq_completion)events){+.+.}-{0:0}, at: process_scheduled_works+0x9b4/0x17b0 kernel/workqueue.c:3322
#1: ffffc900057b7bc0 ((work_completion)(&aux->work)){+.+.}-{0:0}, at: process_one_work kernel/workqueue.c:3215 [inline]
#1: ffffc900057b7bc0 ((work_completion)(&aux->work)){+.+.}-{0:0}, at: process_scheduled_works+0x9ef/0x17b0 kernel/workqueue.c:3322
#2: ffffffff8f537a08 (rtnl_mutex){+.+.}-{4:4}, at: bpf_prog_dev_bound_destroy+0x75/0x590 kernel/bpf/offload.c:387
2 locks held by kworker/u8:9/6043:
#0: ffff88801ebae148 ((wq_completion)iou_exit){+.+.}-{0:0}, at: process_one_work kernel/workqueue.c:3214 [inline]
#0: ffff88801ebae148 ((wq_completion)iou_exit){+.+.}-{0:0}, at: process_scheduled_works+0x9b4/0x17b0 kernel/workqueue.c:3322
#1: ffffc9000bef7bc0 ((work_completion)(&ctx->exit_work)){+.+.}-{0:0}, at: process_one_work kernel/workqueue.c:3215 [inline]
#1: ffffc9000bef7bc0 ((work_completion)(&ctx->exit_work)){+.+.}-{0:0}, at: process_scheduled_works+0x9ef/0x17b0 kernel/workqueue.c:3322
5 locks held by kworker/u8:17/6321:
#0: ffff88801b2fe948 ((wq_completion)netns){+.+.}-{0:0}, at: process_one_work kernel/workqueue.c:3214 [inline]
#0: ffff88801b2fe948 ((wq_completion)netns){+.+.}-{0:0}, at: process_scheduled_works+0x9b4/0x17b0 kernel/workqueue.c:3322
#1: ffffc90003377bc0 (net_cleanup_work){+.+.}-{0:0}, at: process_one_work kernel/workqueue.c:3215 [inline]
#1: ffffc90003377bc0 (net_cleanup_work){+.+.}-{0:0}, at: process_scheduled_works+0x9ef/0x17b0 kernel/workqueue.c:3322
#2: ffffffff8f52ac10 (pernet_ops_rwsem){++++}-{4:4}, at: cleanup_net+0xf7/0x800 net/core/net_namespace.c:658
#3: ffffffff8f537a08 (rtnl_mutex){+.+.}-{4:4}, at: default_device_exit_batch+0xdc/0x890 net/core/dev.c:12657
#4: ffffffff8e1420f8 (rcu_state.exp_mutex){+.+.}-{4:4}, at: exp_funnel_lock kernel/rcu/tree_exp.h:343 [inline]
#4: ffffffff8e1420f8 (rcu_state.exp_mutex){+.+.}-{4:4}, at: synchronize_rcu_expedited+0x3b9/0x730 kernel/rcu/tree_exp.h:967
3 locks held by kworker/u8:18/6590:
#0: ffff88801a489148 ((wq_completion)events_unbound){+.+.}-{0:0}, at: process_one_work kernel/workqueue.c:3214 [inline]
#0: ffff88801a489148 ((wq_completion)events_unbound){+.+.}-{0:0}, at: process_scheduled_works+0x9b4/0x17b0 kernel/workqueue.c:3322
#1: ffffc9000c6c7bc0 ((linkwatch_work).work){+.+.}-{0:0}, at: process_one_work kernel/workqueue.c:3215 [inline]
#1: ffffc9000c6c7bc0 ((linkwatch_work).work){+.+.}-{0:0}, at: process_scheduled_works+0x9ef/0x17b0 kernel/workqueue.c:3322
#2: ffffffff8f537a08 (rtnl_mutex){+.+.}-{4:4}, at: linkwatch_event+0xe/0x60 net/core/link_watch.c:303
2 locks held by bch-copygc/loop/9229:
#0: ffff88803a0843a0 (&c->btree_trans_barrier){.+.+}-{0:0}, at: srcu_lock_acquire include/linux/srcu.h:161 [inline]
#0: ffff88803a0843a0 (&c->btree_trans_barrier){.+.+}-{0:0}, at: srcu_read_lock include/linux/srcu.h:253 [inline]
#0: ffff88803a0843a0 (&c->btree_trans_barrier){.+.+}-{0:0}, at: bch2_trans_srcu_lock+0xaf/0x220 fs/bcachefs/btree_iter.c:3300
#1: ffff88803a0a6710 (&c->gc_lock){++++}-{4:4}, at: bch2_btree_update_start+0x542/0x1de0 fs/bcachefs/btree_update_interior.c:1208
1 lock held by syz-executor/9435:
#0: ffffffff8e1420f8 (rcu_state.exp_mutex){+.+.}-{4:4}, at: exp_funnel_lock kernel/rcu/tree_exp.h:311 [inline]
#0: ffffffff8e1420f8 (rcu_state.exp_mutex){+.+.}-{4:4}, at: synchronize_rcu_expedited+0x2f6/0x730 kernel/rcu/tree_exp.h:967
2 locks held by syz.2.835/9709:
3 locks held by kworker/0:1/10786:
2 locks held by syz-executor/10824:
#0: ffffffff8ecabb80 (&ops->srcu#2){.+.+}-{0:0}, at: rcu_lock_acquire include/linux/rcupdate.h:331 [inline]
#0: ffffffff8ecabb80 (&ops->srcu#2){.+.+}-{0:0}, at: rcu_read_lock include/linux/rcupdate.h:841 [inline]
#0: ffffffff8ecabb80 (&ops->srcu#2){.+.+}-{0:0}, at: rtnl_link_ops_get+0x23/0x250 net/core/rtnetlink.c:570
#1: ffffffff8f537a08 (rtnl_mutex){+.+.}-{4:4}, at: rtnl_lock net/core/rtnetlink.c:80 [inline]
#1: ffffffff8f537a08 (rtnl_mutex){+.+.}-{4:4}, at: rtnl_nets_lock net/core/rtnetlink.c:341 [inline]
#1: ffffffff8f537a08 (rtnl_mutex){+.+.}-{4:4}, at: rtnl_newlink+0x8db/0x1c70 net/core/rtnetlink.c:4056
1 lock held by syz.8.1073/10988:
#0: ffffffff8f537a08 (rtnl_mutex){+.+.}-{4:4}, at: rtnl_lock net/core/rtnetlink.c:80 [inline]
#0: ffffffff8f537a08 (rtnl_mutex){+.+.}-{4:4}, at: rtnetlink_rcv_msg+0x71c/0xb70 net/core/rtnetlink.c:6952
=============================================
NMI backtrace for cpu 1
CPU: 1 UID: 0 PID: 31 Comm: khungtaskd Not tainted 6.16.0-rc5-next-20250711-syzkaller #0 PREEMPT(full)
Hardware name: Google Google Compute Engine/Google Compute Engine, BIOS Google 05/07/2025
Call Trace:
dump_stack_lvl+0x189/0x250 lib/dump_stack.c:120
nmi_cpu_backtrace+0x39e/0x3d0 lib/nmi_backtrace.c:113
nmi_trigger_cpumask_backtrace+0x17a/0x300 lib/nmi_backtrace.c:62
trigger_all_cpu_backtrace include/linux/nmi.h:160 [inline]
check_hung_uninterruptible_tasks kernel/hung_task.c:328 [inline]
watchdog+0xfee/0x1030 kernel/hung_task.c:491
kthread+0x711/0x8a0 kernel/kthread.c:463
ret_from_fork+0x3f9/0x770 arch/x86/kernel/process.c:148
ret_from_fork_asm+0x1a/0x30 arch/x86/entry/entry_64.S:245
Sending NMI from CPU 1 to CPUs 0:
NMI backtrace for cpu 0
CPU: 0 UID: 0 PID: 10786 Comm: kworker/0:1 Not tainted 6.16.0-rc5-next-20250711-syzkaller #0 PREEMPT(full)
Hardware name: Google Google Compute Engine/Google Compute Engine, BIOS Google 05/07/2025
Workqueue: events drain_vmap_area_work
RIP: 0010:validate_chain+0x24/0x2140 kernel/locking/lockdep.c:3878
Code: 90 90 90 90 90 90 55 41 57 41 56 41 55 41 54 53 48 81 ec e0 00 00 00 49 89 cf 65 48 8b 05 e4 b5 07 11 48 89 84 24 d8 00 00 00 <8b> 46 20 89 c1 81 e1 00 80 04 00 81 f9 00 00 04 00 0f 85 61 02 00
RSP: 0018:ffffc9000407f0b0 EFLAGS: 00000082
RAX: 71bb9b3b41e8d600 RBX: 0000000000000004 RCX: 8a8746b0552cdd60
RDX: 0000000000000000 RSI: ffff88802ac5e590 RDI: ffff88802ac5da00
RBP: 0000000000000000 R08: 0000000000000000 R09: ffffffff81729ea5
R10: ffffc9000407f418 R11: ffffffff81ac7b20 R12: 00000000999d3bc4
R13: ffff88802ac5e4f0 R14: ffff88802ac5e590 R15: 8a8746b0552cdd60
FS: 0000000000000000(0000) GS:ffff888125bc6000(0000) knlGS:0000000000000000
CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033
CR2: 0000200000404030 CR3: 000000000df36000 CR4: 00000000003526f0
Call Trace:
__lock_acquire+0xab9/0xd20 kernel/locking/lockdep.c:5240
lock_acquire+0x120/0x360 kernel/locking/lockdep.c:5871
rcu_lock_acquire include/linux/rcupdate.h:331 [inline]
rcu_read_lock include/linux/rcupdate.h:841 [inline]
class_rcu_constructor include/linux/rcupdate.h:1155 [inline]
unwind_next_frame+0xc2/0x2390 arch/x86/kernel/unwind_orc.c:479
arch_stack_walk+0x11c/0x150 arch/x86/kernel/stacktrace.c:25
stack_trace_save+0x9c/0xe0 kernel/stacktrace.c:122
save_stack+0xf5/0x1f0 mm/page_owner.c:156
__reset_page_owner+0x71/0x1f0 mm/page_owner.c:308
reset_page_owner include/linux/page_owner.h:25 [inline]
free_pages_prepare mm/page_alloc.c:1395 [inline]
__free_frozen_pages+0xbc4/0xd30 mm/page_alloc.c:2895
kasan_depopulate_vmalloc_pte+0x74/0xa0 mm/kasan/shadow.c:472
apply_to_pte_range mm/memory.c:3028 [inline]
apply_to_pmd_range mm/memory.c:3072 [inline]
apply_to_pud_range mm/memory.c:3108 [inline]
apply_to_p4d_range mm/memory.c:3144 [inline]
__apply_to_page_range+0xb8f/0x1380 mm/memory.c:3180
kasan_release_vmalloc+0xa2/0xd0 mm/kasan/shadow.c:593
kasan_release_vmalloc_node mm/vmalloc.c:2249 [inline]
purge_vmap_node+0x214/0x8f0 mm/vmalloc.c:2266
__purge_vmap_area_lazy+0x7a4/0xb40 mm/vmalloc.c:2356
drain_vmap_area_work+0x27/0x40 mm/vmalloc.c:2390
process_one_work kernel/workqueue.c:3239 [inline]
process_scheduled_works+0xade/0x17b0 kernel/workqueue.c:3322
worker_thread+0x8a0/0xda0 kernel/workqueue.c:3403
kthread+0x711/0x8a0 kernel/kthread.c:463
ret_from_fork+0x3f9/0x770 arch/x86/kernel/process.c:148
ret_from_fork_asm+0x1a/0x30 arch/x86/entry/entry_64.S:245