INFO: task syz.4.942:10055 blocked for more than 143 seconds. Not tainted 6.12.0-rc1-syzkaller-00229-gd521db38f339 #0 "echo 0 > /proc/sys/kernel/hung_task_timeout_secs" disables this message. task:syz.4.942 state:D stack:23808 pid:10055 tgid:10055 ppid:8640 flags:0x00004004 Call Trace: context_switch kernel/sched/core.c:5315 [inline] __schedule+0x1895/0x4b30 kernel/sched/core.c:6675 __schedule_loop kernel/sched/core.c:6752 [inline] schedule+0x14b/0x320 kernel/sched/core.c:6767 schedule_preempt_disabled+0x13/0x30 kernel/sched/core.c:6824 __mutex_lock_common kernel/locking/mutex.c:684 [inline] __mutex_lock+0x6a7/0xd70 kernel/locking/mutex.c:752 rcu_barrier+0x4c/0x530 kernel/rcu/tree.c:4561 netdev_run_todo+0x3a0/0x1000 net/core/dev.c:10782 tun_detach drivers/net/tun.c:704 [inline] tun_chr_close+0x137/0x1b0 drivers/net/tun.c:3517 __fput+0x23f/0x880 fs/file_table.c:431 task_work_run+0x24f/0x310 kernel/task_work.c:228 resume_user_mode_work include/linux/resume_user_mode.h:50 [inline] exit_to_user_mode_loop kernel/entry/common.c:114 [inline] exit_to_user_mode_prepare include/linux/entry-common.h:328 [inline] __syscall_exit_to_user_mode_work kernel/entry/common.c:207 [inline] syscall_exit_to_user_mode+0x168/0x370 kernel/entry/common.c:218 do_syscall_64+0x100/0x230 arch/x86/entry/common.c:89 entry_SYSCALL_64_after_hwframe+0x77/0x7f RIP: 0033:0x7f4f21f7dff9 RSP: 002b:00007fff0b8f2068 EFLAGS: 00000246 ORIG_RAX: 00000000000001b4 RAX: 0000000000000000 RBX: 0000000000060814 RCX: 00007f4f21f7dff9 RDX: 0000000000000000 RSI: 000000000000001e RDI: 0000000000000003 RBP: 00007f4f22137a80 R08: 0000000000000001 R09: 00007fff0b8f235f R10: 00007f4f21e00000 R11: 0000000000000246 R12: 0000000000060966 R13: 00007fff0b8f2170 R14: 0000000000000032 R15: ffffffffffffffff Showing all locks held in the system: 6 locks held by kworker/0:0/8: 6 locks held by kworker/u8:1/12: #0: ffff88801baed948 ((wq_completion)netns){+.+.}-{0:0}, at: process_one_work kernel/workqueue.c:3204 [inline] #0: ffff88801baed948 ((wq_completion)netns){+.+.}-{0:0}, at: process_scheduled_works+0x93b/0x1850 kernel/workqueue.c:3310 #1: ffffc90000117d00 (net_cleanup_work){+.+.}-{0:0}, at: process_one_work kernel/workqueue.c:3205 [inline] #1: ffffc90000117d00 (net_cleanup_work){+.+.}-{0:0}, at: process_scheduled_works+0x976/0x1850 kernel/workqueue.c:3310 #2: ffffffff8fcc4c50 (pernet_ops_rwsem){++++}-{3:3}, at: cleanup_net+0x16a/0xcc0 net/core/net_namespace.c:580 #3: ffff88807f7a70e8 (&dev->mutex){....}-{3:3}, at: device_lock include/linux/device.h:1014 [inline] #3: ffff88807f7a70e8 (&dev->mutex){....}-{3:3}, at: devl_dev_lock net/devlink/devl_internal.h:108 [inline] #3: ffff88807f7a70e8 (&dev->mutex){....}-{3:3}, at: devlink_pernet_pre_exit+0x13b/0x440 net/devlink/core.c:506 #4: ffff88807f7a2250 (&devlink->lock_key#20){+.+.}-{3:3}, at: devl_lock net/devlink/core.c:276 [inline] #4: ffff88807f7a2250 (&devlink->lock_key#20){+.+.}-{3:3}, at: devl_dev_lock net/devlink/devl_internal.h:109 [inline] #4: ffff88807f7a2250 (&devlink->lock_key#20){+.+.}-{3:3}, at: devlink_pernet_pre_exit+0x14d/0x440 net/devlink/core.c:506 #5: ffffffff8e93d240 (rcu_state.barrier_mutex){+.+.}-{3:3}, at: rcu_barrier+0x4c/0x530 kernel/rcu/tree.c:4561 1 lock held by khungtaskd/30: #0: ffffffff8e937de0 (rcu_read_lock){....}-{1:2}, at: rcu_lock_acquire include/linux/rcupdate.h:337 [inline] #0: ffffffff8e937de0 (rcu_read_lock){....}-{1:2}, at: rcu_read_lock include/linux/rcupdate.h:849 [inline] #0: ffffffff8e937de0 (rcu_read_lock){....}-{1:2}, at: debug_show_all_locks+0x55/0x2a0 kernel/locking/lockdep.c:6720 3 locks held by kworker/u8:4/61: #0: ffff88802da1f148 ((wq_completion)ipv6_addrconf){+.+.}-{0:0}, at: process_one_work kernel/workqueue.c:3204 [inline] #0: ffff88802da1f148 ((wq_completion)ipv6_addrconf){+.+.}-{0:0}, at: process_scheduled_works+0x93b/0x1850 kernel/workqueue.c:3310 #1: ffffc900015d7d00 ((work_completion)(&(&net->ipv6.addr_chk_work)->work)){+.+.}-{0:0}, at: process_one_work kernel/workqueue.c:3205 [inline] #1: ffffc900015d7d00 ((work_completion)(&(&net->ipv6.addr_chk_work)->work)){+.+.}-{0:0}, at: process_scheduled_works+0x976/0x1850 kernel/workqueue.c:3310 #2: ffffffff8fcd1748 (rtnl_mutex){+.+.}-{3:3}, at: addrconf_verify_work+0x19/0x30 net/ipv6/addrconf.c:4736 3 locks held by kworker/1:2/938: #0: ffff88801ac81948 ((wq_completion)events_power_efficient){+.+.}-{0:0}, at: process_one_work kernel/workqueue.c:3204 [inline] #0: ffff88801ac81948 ((wq_completion)events_power_efficient){+.+.}-{0:0}, at: process_scheduled_works+0x93b/0x1850 kernel/workqueue.c:3310 #1: ffffc90003c17d00 ((reg_check_chans).work){+.+.}-{0:0}, at: process_one_work kernel/workqueue.c:3205 [inline] #1: ffffc90003c17d00 ((reg_check_chans).work){+.+.}-{0:0}, at: process_scheduled_works+0x976/0x1850 kernel/workqueue.c:3310 #2: ffffffff8fcd1748 (rtnl_mutex){+.+.}-{3:3}, at: reg_check_chans_work+0x99/0xfd0 net/wireless/reg.c:2480 3 locks held by kworker/0:3/1181: 2 locks held by getty/5007: #0: ffff88802e3e40a0 (&tty->ldisc_sem){++++}-{0:0}, at: tty_ldisc_ref_wait+0x25/0x70 drivers/tty/tty_ldisc.c:243 #1: ffffc90002f062f0 (&ldata->atomic_read_lock){+.+.}-{3:3}, at: n_tty_read+0x6a6/0x1e00 drivers/tty/n_tty.c:2211 3 locks held by kworker/u8:12/7047: #0: ffff88801ac89148 ((wq_completion)events_unbound){+.+.}-{0:0}, at: process_one_work kernel/workqueue.c:3204 [inline] #0: ffff88801ac89148 ((wq_completion)events_unbound){+.+.}-{0:0}, at: process_scheduled_works+0x93b/0x1850 kernel/workqueue.c:3310 #1: ffffc9000b4afd00 ((linkwatch_work).work){+.+.}-{0:0}, at: process_one_work kernel/workqueue.c:3205 [inline] #1: ffffc9000b4afd00 ((linkwatch_work).work){+.+.}-{0:0}, at: process_scheduled_works+0x976/0x1850 kernel/workqueue.c:3310 #2: ffffffff8fcd1748 (rtnl_mutex){+.+.}-{3:3}, at: linkwatch_event+0xe/0x60 net/core/link_watch.c:276 1 lock held by syz.4.942/10055: #0: ffffffff8e93d240 (rcu_state.barrier_mutex){+.+.}-{3:3}, at: rcu_barrier+0x4c/0x530 kernel/rcu/tree.c:4561 1 lock held by syz.0.949/10090: #0: ffffffff8e93d240 (rcu_state.barrier_mutex){+.+.}-{3:3}, at: rcu_barrier+0x4c/0x530 kernel/rcu/tree.c:4561 2 locks held by syz.2.950/10091: #0: ffffffff8fcd1748 (rtnl_mutex){+.+.}-{3:3}, at: ppp_release+0x87/0x1f0 drivers/net/ppp/ppp_generic.c:408 #1: ffffffff8e7d1dd0 (cpu_hotplug_lock){++++}-{0:0}, at: flush_all_backlogs net/core/dev.c:6025 [inline] #1: ffffffff8e7d1dd0 (cpu_hotplug_lock){++++}-{0:0}, at: unregister_netdevice_many_notify+0x5ea/0x1da0 net/core/dev.c:11384 1 lock held by syz.1.951/10093: #0: ffffffff8fcd1748 (rtnl_mutex){+.+.}-{3:3}, at: rtnl_lock net/core/rtnetlink.c:79 [inline] #0: ffffffff8fcd1748 (rtnl_mutex){+.+.}-{3:3}, at: rtnetlink_rcv_msg+0x6e6/0xcf0 net/core/rtnetlink.c:6647 1 lock held by syz-executor/10096: #0: ffffffff8fcd1748 (rtnl_mutex){+.+.}-{3:3}, at: rtnl_lock net/core/rtnetlink.c:79 [inline] #0: ffffffff8fcd1748 (rtnl_mutex){+.+.}-{3:3}, at: rtnetlink_rcv_msg+0x6e6/0xcf0 net/core/rtnetlink.c:6647 1 lock held by syz-executor/10103: #0: ffffffff8fcd1748 (rtnl_mutex){+.+.}-{3:3}, at: rtnl_lock net/core/rtnetlink.c:79 [inline] #0: ffffffff8fcd1748 (rtnl_mutex){+.+.}-{3:3}, at: rtnetlink_rcv_msg+0x6e6/0xcf0 net/core/rtnetlink.c:6647 1 lock held by syz-executor/10105: #0: ffffffff8fcd1748 (rtnl_mutex){+.+.}-{3:3}, at: rtnl_lock net/core/rtnetlink.c:79 [inline] #0: ffffffff8fcd1748 (rtnl_mutex){+.+.}-{3:3}, at: rtnetlink_rcv_msg+0x6e6/0xcf0 net/core/rtnetlink.c:6647 1 lock held by syz-executor/10107: #0: ffffffff8fcd1748 (rtnl_mutex){+.+.}-{3:3}, at: rtnl_lock net/core/rtnetlink.c:79 [inline] #0: ffffffff8fcd1748 (rtnl_mutex){+.+.}-{3:3}, at: rtnetlink_rcv_msg+0x6e6/0xcf0 net/core/rtnetlink.c:6647 1 lock held by syz-executor/10111: #0: ffffffff8fcd1748 (rtnl_mutex){+.+.}-{3:3}, at: rtnl_lock net/core/rtnetlink.c:79 [inline] #0: ffffffff8fcd1748 (rtnl_mutex){+.+.}-{3:3}, at: rtnetlink_rcv_msg+0x6e6/0xcf0 net/core/rtnetlink.c:6647 1 lock held by syz-executor/10117: #0: ffffffff8fcd1748 (rtnl_mutex){+.+.}-{3:3}, at: rtnl_lock net/core/rtnetlink.c:79 [inline] #0: ffffffff8fcd1748 (rtnl_mutex){+.+.}-{3:3}, at: rtnetlink_rcv_msg+0x6e6/0xcf0 net/core/rtnetlink.c:6647 1 lock held by syz-executor/10124: #0: ffffffff8fcd1748 (rtnl_mutex){+.+.}-{3:3}, at: rtnl_lock net/core/rtnetlink.c:79 [inline] #0: ffffffff8fcd1748 (rtnl_mutex){+.+.}-{3:3}, at: rtnetlink_rcv_msg+0x6e6/0xcf0 net/core/rtnetlink.c:6647 1 lock held by syz-executor/10126: #0: ffffffff8fcd1748 (rtnl_mutex){+.+.}-{3:3}, at: rtnl_lock net/core/rtnetlink.c:79 [inline] #0: ffffffff8fcd1748 (rtnl_mutex){+.+.}-{3:3}, at: rtnetlink_rcv_msg+0x6e6/0xcf0 net/core/rtnetlink.c:6647 1 lock held by syz-executor/10129: #0: ffffffff8fcd1748 (rtnl_mutex){+.+.}-{3:3}, at: rtnl_lock net/core/rtnetlink.c:79 [inline] #0: ffffffff8fcd1748 (rtnl_mutex){+.+.}-{3:3}, at: rtnetlink_rcv_msg+0x6e6/0xcf0 net/core/rtnetlink.c:6647 1 lock held by syz-executor/10138: #0: ffffffff8fcd1748 (rtnl_mutex){+.+.}-{3:3}, at: rtnl_lock net/core/rtnetlink.c:79 [inline] #0: ffffffff8fcd1748 (rtnl_mutex){+.+.}-{3:3}, at: rtnetlink_rcv_msg+0x6e6/0xcf0 net/core/rtnetlink.c:6647 1 lock held by syz-executor/10143: #0: ffffffff8fcd1748 (rtnl_mutex){+.+.}-{3:3}, at: rtnl_lock net/core/rtnetlink.c:79 [inline] #0: ffffffff8fcd1748 (rtnl_mutex){+.+.}-{3:3}, at: rtnetlink_rcv_msg+0x6e6/0xcf0 net/core/rtnetlink.c:6647 1 lock held by syz-executor/10150: #0: ffffffff8fcd1748 (rtnl_mutex){+.+.}-{3:3}, at: rtnl_lock net/core/rtnetlink.c:79 [inline] #0: ffffffff8fcd1748 (rtnl_mutex){+.+.}-{3:3}, at: rtnetlink_rcv_msg+0x6e6/0xcf0 net/core/rtnetlink.c:6647 1 lock held by syz-executor/10152: #0: ffffffff8fcd1748 (rtnl_mutex){+.+.}-{3:3}, at: rtnl_lock net/core/rtnetlink.c:79 [inline] #0: ffffffff8fcd1748 (rtnl_mutex){+.+.}-{3:3}, at: rtnetlink_rcv_msg+0x6e6/0xcf0 net/core/rtnetlink.c:6647 1 lock held by syz-executor/10153: #0: ffffffff8fcd1748 (rtnl_mutex){+.+.}-{3:3}, at: rtnl_lock net/core/rtnetlink.c:79 [inline] #0: ffffffff8fcd1748 (rtnl_mutex){+.+.}-{3:3}, at: rtnetlink_rcv_msg+0x6e6/0xcf0 net/core/rtnetlink.c:6647 ============================================= NMI backtrace for cpu 1 CPU: 1 UID: 0 PID: 30 Comm: khungtaskd Not tainted 6.12.0-rc1-syzkaller-00229-gd521db38f339 #0 Hardware name: Google Google Compute Engine/Google Compute Engine, BIOS Google 09/13/2024 Call Trace: __dump_stack lib/dump_stack.c:94 [inline] dump_stack_lvl+0x241/0x360 lib/dump_stack.c:120 nmi_cpu_backtrace+0x49c/0x4d0 lib/nmi_backtrace.c:113 nmi_trigger_cpumask_backtrace+0x198/0x320 lib/nmi_backtrace.c:62 trigger_all_cpu_backtrace include/linux/nmi.h:162 [inline] check_hung_uninterruptible_tasks kernel/hung_task.c:223 [inline] watchdog+0xff4/0x1040 kernel/hung_task.c:379 kthread+0x2f0/0x390 kernel/kthread.c:389 ret_from_fork+0x4b/0x80 arch/x86/kernel/process.c:147 ret_from_fork_asm+0x1a/0x30 arch/x86/entry/entry_64.S:244 Sending NMI from CPU 1 to CPUs 0: NMI backtrace for cpu 0 CPU: 0 UID: 0 PID: 8 Comm: kworker/0:0 Not tainted 6.12.0-rc1-syzkaller-00229-gd521db38f339 #0 Hardware name: Google Google Compute Engine/Google Compute Engine, BIOS Google 09/13/2024 Workqueue: wg-crypt-wg0 wg_packet_tx_worker RIP: 0010:mark_held_locks kernel/locking/lockdep.c:4309 [inline] RIP: 0010:__trace_hardirqs_on_caller kernel/locking/lockdep.c:4339 [inline] RIP: 0010:lockdep_hardirqs_on_prepare+0x297/0x780 kernel/locking/lockdep.c:4406 Code: 03 00 0f 94 c2 83 f2 03 48 8b 7c 24 10 48 89 de e8 5e b1 00 00 48 ba 00 00 00 00 00 fc ff df 85 c0 0f 84 9d 01 00 00 49 ff c5 <41> 0f b6 04 17 84 c0 75 33 49 63 06 48 83 c3 28 49 83 c4 28 49 39 RSP: 0018:ffffc900000067c0 EFLAGS: 00000002 RAX: 0000000000000001 RBX: ffff88801bef64e0 RCX: ffffffff8170bc6a RDX: dffffc0000000000 RSI: 0000000000000008 RDI: ffffffff942c58e8 RBP: ffffc90000006868 R08: ffffffff942c58ef R09: 1ffffffff2858b1d R10: dffffc0000000000 R11: fffffbfff2858b1e R12: ffff88801bef6500 R13: 0000000000000001 R14: ffff88801bef64d8 R15: 1ffff110037dec9b FS: 0000000000000000(0000) GS:ffff8880b8600000(0000) knlGS:0000000000000000 CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033 CR2: 00007f7bbf226920 CR3: 000000000e734000 CR4: 00000000003526f0 DR0: 0000000000000000 DR1: 0000000000000000 DR2: 0000000000000000 DR3: 0000000000000000 DR6: 00000000fffe0ff0 DR7: 0000000000000400 Call Trace: trace_hardirqs_on+0x28/0x40 kernel/trace/trace_preemptirq.c:61 __raw_spin_unlock_irqrestore include/linux/spinlock_api_smp.h:151 [inline] _raw_spin_unlock_irqrestore+0x8f/0x140 kernel/locking/spinlock.c:194 spin_unlock_irqrestore include/linux/spinlock.h:406 [inline] backlog_unlock_irq_restore net/core/dev.c:250 [inline] enqueue_to_backlog+0x8b2/0xc80 net/core/dev.c:4887 netif_rx_internal+0x17a/0x630 net/core/dev.c:5195 __netif_rx+0x78/0xc0 net/core/dev.c:5215 loopback_xmit+0x454/0x6b0 drivers/net/loopback.c:89 __netdev_start_xmit include/linux/netdevice.h:4920 [inline] netdev_start_xmit include/linux/netdevice.h:4929 [inline] xmit_one net/core/dev.c:3588 [inline] dev_hard_start_xmit+0x27a/0x7e0 net/core/dev.c:3604 __dev_queue_xmit+0x1b11/0x3ed0 net/core/dev.c:4428 dev_queue_xmit include/linux/netdevice.h:3098 [inline] neigh_hh_output include/net/neighbour.h:526 [inline] neigh_output include/net/neighbour.h:540 [inline] ip_finish_output2+0xd41/0x1390 net/ipv4/ip_output.c:236 synproxy_send_client_synack+0x8b8/0xf30 net/netfilter/nf_synproxy_core.c:484 nft_synproxy_eval_v4+0x3ca/0x610 net/netfilter/nft_synproxy.c:59 nft_synproxy_do_eval+0x362/0xa60 net/netfilter/nft_synproxy.c:141 expr_call_ops_eval net/netfilter/nf_tables_core.c:240 [inline] nft_do_chain+0x4ad/0x1da0 net/netfilter/nf_tables_core.c:288 nft_do_chain_inet+0x418/0x6b0 net/netfilter/nft_chain_filter.c:161 nf_hook_entry_hookfn include/linux/netfilter.h:154 [inline] nf_hook_slow+0xc3/0x220 net/netfilter/core.c:626 nf_hook include/linux/netfilter.h:269 [inline] NF_HOOK+0x29e/0x450 include/linux/netfilter.h:312 NF_HOOK+0x3a4/0x450 include/linux/netfilter.h:314 __netif_receive_skb_one_core net/core/dev.c:5666 [inline] __netif_receive_skb+0x2bf/0x650 net/core/dev.c:5779 process_backlog+0x662/0x15b0 net/core/dev.c:6111 __napi_poll+0xcb/0x490 net/core/dev.c:6775 napi_poll net/core/dev.c:6844 [inline] net_rx_action+0x89b/0x1240 net/core/dev.c:6966 handle_softirqs+0x2c5/0x980 kernel/softirq.c:554 do_softirq+0x11b/0x1e0 kernel/softirq.c:455 __local_bh_enable_ip+0x1bb/0x200 kernel/softirq.c:382 wg_packet_create_data_done drivers/net/wireguard/send.c:247 [inline] wg_packet_tx_worker+0x160/0x810 drivers/net/wireguard/send.c:276 process_one_work kernel/workqueue.c:3229 [inline] process_scheduled_works+0xa63/0x1850 kernel/workqueue.c:3310 worker_thread+0x870/0xd30 kernel/workqueue.c:3391 kthread+0x2f0/0x390 kernel/kthread.c:389 ret_from_fork+0x4b/0x80 arch/x86/kernel/process.c:147 ret_from_fork_asm+0x1a/0x30 arch/x86/entry/entry_64.S:244