INFO: task syz.0.191:7291 blocked for more than 143 seconds.
      Not tainted 6.14.0-rc6-syzkaller-gb7f94fcf5546 #0
"echo 0 > /proc/sys/kernel/hung_task_timeout_secs" disables this message.
task:syz.0.191       state:D stack:25840 pid:7291  tgid:7290  ppid:6570   task_flags:0x400040 flags:0x00004004
Call Trace:
 <TASK>
 context_switch kernel/sched/core.c:5378 [inline]
 __schedule+0x190e/0x4c90 kernel/sched/core.c:6765
 __schedule_loop kernel/sched/core.c:6842 [inline]
 schedule+0x14b/0x320 kernel/sched/core.c:6857
 schedule_preempt_disabled+0x13/0x30 kernel/sched/core.c:6914
 rwsem_down_read_slowpath kernel/locking/rwsem.c:1084 [inline]
 __down_read_common kernel/locking/rwsem.c:1248 [inline]
 __down_read kernel/locking/rwsem.c:1261 [inline]
 down_read+0x705/0xa40 kernel/locking/rwsem.c:1526
 i_mmap_lock_read include/linux/fs.h:564 [inline]
 rmap_walk_file+0x69c/0x780 mm/rmap.c:2694
 remove_migration_ptes mm/migrate.c:365 [inline]
 unmap_and_move_huge_page mm/migrate.c:1518 [inline]
 migrate_hugetlbs mm/migrate.c:1640 [inline]
 migrate_pages+0xd7d/0x3680 mm/migrate.c:2070
 do_mbind mm/mempolicy.c:1394 [inline]
 kernel_mbind mm/mempolicy.c:1537 [inline]
 __do_sys_mbind mm/mempolicy.c:1611 [inline]
 __se_sys_mbind+0x1489/0x1950 mm/mempolicy.c:1607
 do_syscall_x64 arch/x86/entry/common.c:52 [inline]
 do_syscall_64+0xf3/0x230 arch/x86/entry/common.c:83
 entry_SYSCALL_64_after_hwframe+0x77/0x7f
RIP: 0033:0x7fbd7e38cda9
RSP: 002b:00007fbd7f1af038 EFLAGS: 00000246 ORIG_RAX: 00000000000000ed
RAX: ffffffffffffffda RBX: 00007fbd7e5a5fa0 RCX: 00007fbd7e38cda9
RDX: 0000000000000000 RSI: 0000000000800000 RDI: 0000000020001000
RBP: 00007fbd7e40e2a0 R08: 0000000000000040 R09: 0000000000000002
R10: 0000000000000000 R11: 0000000000000246 R12: 0000000000000000
R13: 0000000000000000 R14: 00007fbd7e5a5fa0 R15: 00007fff81c96be8
 </TASK>
INFO: task syz.0.191:7300 blocked for more than 143 seconds.
      Not tainted 6.14.0-rc6-syzkaller-gb7f94fcf5546 #0
"echo 0 > /proc/sys/kernel/hung_task_timeout_secs" disables this message.
task:syz.0.191       state:D stack:26680 pid:7300  tgid:7290  ppid:6570   task_flags:0x400040 flags:0x00000004
Call Trace:
 <TASK>
 context_switch kernel/sched/core.c:5378 [inline]
 __schedule+0x190e/0x4c90 kernel/sched/core.c:6765
 __schedule_loop kernel/sched/core.c:6842 [inline]
 schedule+0x14b/0x320 kernel/sched/core.c:6857
 io_schedule+0x8d/0x110 kernel/sched/core.c:7690
 folio_wait_bit_common+0x839/0xee0 mm/filemap.c:1318
 __folio_lock mm/filemap.c:1665 [inline]
 folio_lock include/linux/pagemap.h:1163 [inline]
 __filemap_get_folio+0x14e/0xae0 mm/filemap.c:1918
 filemap_lock_folio include/linux/pagemap.h:800 [inline]
 filemap_lock_hugetlb_folio include/linux/hugetlb.h:791 [inline]
 hugetlbfs_zero_partial_page+0xb0/0x590 fs/hugetlbfs/inode.c:656
 hugetlbfs_punch_hole fs/hugetlbfs/inode.c:710 [inline]
 hugetlbfs_fallocate+0xbd2/0x11a0 fs/hugetlbfs/inode.c:743
 vfs_fallocate+0x623/0x7a0 fs/open.c:338
 madvise_remove mm/madvise.c:1034 [inline]
 madvise_vma_behavior mm/madvise.c:1269 [inline]
 madvise_walk_vmas mm/madvise.c:1511 [inline]
 do_madvise+0x23c1/0x4db0 mm/madvise.c:1698
 __do_sys_madvise mm/madvise.c:1714 [inline]
 __se_sys_madvise mm/madvise.c:1712 [inline]
 __x64_sys_madvise+0xa6/0xc0 mm/madvise.c:1712
 do_syscall_x64 arch/x86/entry/common.c:52 [inline]
 do_syscall_64+0xf3/0x230 arch/x86/entry/common.c:83
 entry_SYSCALL_64_after_hwframe+0x77/0x7f
RIP: 0033:0x7fbd7e38cda9
RSP: 002b:00007fbd7f18e038 EFLAGS: 00000246 ORIG_RAX: 000000000000001c
RAX: ffffffffffffffda RBX: 00007fbd7e5a6080 RCX: 00007fbd7e38cda9
RDX: 0000000000000009 RSI: 0000000000600002 RDI: 0000000020000000
RBP: 00007fbd7e40e2a0 R08: 0000000000000000 R09: 0000000000000000
R10: 0000000000000000 R11: 0000000000000246 R12: 0000000000000000
R13: 0000000000000001 R14: 00007fbd7e5a6080 R15: 00007fff81c96be8
 </TASK>

Showing all locks held in the system:
4 locks held by kworker/u8:1/13:
 #0: ffff88801bef6148 ((wq_completion)netns){+.+.}-{0:0}, at: process_one_work kernel/workqueue.c:3213 [inline]
 #0: ffff88801bef6148 ((wq_completion)netns){+.+.}-{0:0}, at: process_scheduled_works+0x98b/0x18e0 kernel/workqueue.c:3319
 #1: ffffc90000127c60 (net_cleanup_work){+.+.}-{0:0}, at: process_one_work kernel/workqueue.c:3214 [inline]
 #1: ffffc90000127c60 (net_cleanup_work){+.+.}-{0:0}, at: process_scheduled_works+0x9c6/0x18e0 kernel/workqueue.c:3319
 #2: ffffffff8fec9f90 (pernet_ops_rwsem){++++}-{4:4}, at: cleanup_net+0x17a/0xd60 net/core/net_namespace.c:606
 #3: ffffffff8fed67c8 (rtnl_mutex){+.+.}-{4:4}, at: default_device_exit_batch+0xdc/0x880 net/core/dev.c:12417
1 lock held by khungtaskd/31:
 #0: ffffffff8eb393e0 (rcu_read_lock){....}-{1:3}, at: rcu_lock_acquire include/linux/rcupdate.h:337 [inline]
 #0: ffffffff8eb393e0 (rcu_read_lock){....}-{1:3}, at: rcu_read_lock include/linux/rcupdate.h:849 [inline]
 #0: ffffffff8eb393e0 (rcu_read_lock){....}-{1:3}, at: debug_show_all_locks+0x55/0x2a0 kernel/locking/lockdep.c:6746
5 locks held by kworker/u8:2/36:
3 locks held by kworker/u8:5/969:
 #0: ffff88801b089148 ((wq_completion)events_unbound){+.+.}-{0:0}, at: process_one_work kernel/workqueue.c:3213 [inline]
 #0: ffff88801b089148 ((wq_completion)events_unbound){+.+.}-{0:0}, at: process_scheduled_works+0x98b/0x18e0 kernel/workqueue.c:3319
 #1: ffffc90003bf7c60 ((linkwatch_work).work){+.+.}-{0:0}, at: process_one_work kernel/workqueue.c:3214 [inline]
 #1: ffffc90003bf7c60 ((linkwatch_work).work){+.+.}-{0:0}, at: process_scheduled_works+0x9c6/0x18e0 kernel/workqueue.c:3319
 #2: ffffffff8fed67c8 (rtnl_mutex){+.+.}-{4:4}, at: linkwatch_event+0xe/0x60 net/core/link_watch.c:285
2 locks held by syslogd/5191:
 #0: ffff8880b863e958 (&rq->__lock){-.-.}-{2:2}, at: raw_spin_rq_lock_nested+0x2a/0x140 kernel/sched/core.c:598
 #1: ffff888021ecefe0 (&mm->mmap_lock){++++}-{4:4}, at: __skb_try_recv_datagram+0x19c/0x6a0 net/core/datagram.c:263
1 lock held by dhcpcd/5503:
 #0: ffffffff8fed67c8 (rtnl_mutex){+.+.}-{4:4}, at: rtnl_net_lock include/linux/rtnetlink.h:129 [inline]
 #0: ffffffff8fed67c8 (rtnl_mutex){+.+.}-{4:4}, at: devinet_ioctl+0x34c/0x1d80 net/ipv4/devinet.c:1129
2 locks held by getty/5592:
 #0: ffff8880359a20a0 (&tty->ldisc_sem){++++}-{0:0}, at: tty_ldisc_ref_wait+0x25/0x70 drivers/tty/tty_ldisc.c:243
 #1: ffffc9000331b2f0 (&ldata->atomic_read_lock){+.+.}-{4:4}, at: n_tty_read+0x616/0x1770 drivers/tty/n_tty.c:2211
1 lock held by syz.0.191/7291:
 #0: ffff88802e9ad348 (&hugetlbfs_i_mmap_rwsem_key){++++}-{4:4}, at: i_mmap_lock_read include/linux/fs.h:564 [inline]
 #0: ffff88802e9ad348 (&hugetlbfs_i_mmap_rwsem_key){++++}-{4:4}, at: rmap_walk_file+0x69c/0x780 mm/rmap.c:2694
3 locks held by syz.0.191/7300:
 #0: ffff8881452d4420 (sb_writers#13){.+.+}-{0:0}, at: file_start_write include/linux/fs.h:3035 [inline]
 #0: ffff8881452d4420 (sb_writers#13){.+.+}-{0:0}, at: vfs_fallocate+0x59d/0x7a0 fs/open.c:337
 #1: ffff88802e9ad078 (&sb->s_type->i_mutex_key#21){+.+.}-{4:4}, at: inode_lock include/linux/fs.h:877 [inline]
 #1: ffff88802e9ad078 (&sb->s_type->i_mutex_key#21){+.+.}-{4:4}, at: hugetlbfs_punch_hole fs/hugetlbfs/inode.c:685 [inline]
 #1: ffff88802e9ad078 (&sb->s_type->i_mutex_key#21){+.+.}-{4:4}, at: hugetlbfs_fallocate+0x408/0x11a0 fs/hugetlbfs/inode.c:743
 #2: ffff88802e9ad348 (&hugetlbfs_i_mmap_rwsem_key){++++}-{4:4}, at: i_mmap_lock_write include/linux/fs.h:544 [inline]
 #2: ffff88802e9ad348 (&hugetlbfs_i_mmap_rwsem_key){++++}-{4:4}, at: hugetlbfs_punch_hole fs/hugetlbfs/inode.c:693 [inline]
 #2: ffff88802e9ad348 (&hugetlbfs_i_mmap_rwsem_key){++++}-{4:4}, at: hugetlbfs_fallocate+0x4ce/0x11a0 fs/hugetlbfs/inode.c:743
1 lock held by syz-executor/20767:
 #0: ffffffff8fed67c8 (rtnl_mutex){+.+.}-{4:4}, at: rtnl_lock net/core/rtnetlink.c:79 [inline]
 #0: ffffffff8fed67c8 (rtnl_mutex){+.+.}-{4:4}, at: rtnl_nets_lock net/core/rtnetlink.c:335 [inline]
 #0: ffffffff8fed67c8 (rtnl_mutex){+.+.}-{4:4}, at: rtnl_newlink+0xc55/0x1d30 net/core/rtnetlink.c:4021
1 lock held by syz.3.6780/21491:
 #0: ffffffff8eb3e8b8 (rcu_state.exp_mutex){+.+.}-{4:4}, at: exp_funnel_lock kernel/rcu/tree_exp.h:334 [inline]
 #0: ffffffff8eb3e8b8 (rcu_state.exp_mutex){+.+.}-{4:4}, at: synchronize_rcu_expedited+0x451/0x820 kernel/rcu/tree_exp.h:996

=============================================

NMI backtrace for cpu 1
CPU: 1 UID: 0 PID: 31 Comm: khungtaskd Not tainted 6.14.0-rc6-syzkaller-gb7f94fcf5546 #0
Hardware name: Google Google Compute Engine/Google Compute Engine, BIOS Google 02/12/2025
Call Trace:
 <TASK>
 __dump_stack lib/dump_stack.c:94 [inline]
 dump_stack_lvl+0x241/0x360 lib/dump_stack.c:120
 nmi_cpu_backtrace+0x49c/0x4d0 lib/nmi_backtrace.c:113
 nmi_trigger_cpumask_backtrace+0x198/0x320 lib/nmi_backtrace.c:62
 trigger_all_cpu_backtrace include/linux/nmi.h:162 [inline]
 check_hung_uninterruptible_tasks kernel/hung_task.c:236 [inline]
 watchdog+0x1058/0x10a0 kernel/hung_task.c:399
 kthread+0x7a9/0x920 kernel/kthread.c:464
 ret_from_fork+0x4b/0x80 arch/x86/kernel/process.c:148
 ret_from_fork_asm+0x1a/0x30 arch/x86/entry/entry_64.S:244
 </TASK>
Sending NMI from CPU 1 to CPUs 0:
NMI backtrace for cpu 0 skipped: idling at native_safe_halt arch/x86/include/asm/irqflags.h:48 [inline]
NMI backtrace for cpu 0 skipped: idling at arch_safe_halt arch/x86/include/asm/irqflags.h:106 [inline]
NMI backtrace for cpu 0 skipped: idling at acpi_safe_halt+0x21/0x30 drivers/acpi/processor_idle.c:111