INFO: task syz.1.24:4225 blocked for more than 143 seconds. Not tainted 5.15.165-syzkaller #0 "echo 0 > /proc/sys/kernel/hung_task_timeout_secs" disables this message. task:syz.1.24 state:D stack: 0 pid: 4225 ppid: 4031 flags:0x04000001 Call trace: __switch_to+0x308/0x5e8 arch/arm64/kernel/process.c:518 context_switch kernel/sched/core.c:5027 [inline] __schedule+0xf10/0x1e48 kernel/sched/core.c:6373 schedule+0x11c/0x1c8 kernel/sched/core.c:6456 schedule_preempt_disabled+0x18/0x2c kernel/sched/core.c:6515 rwsem_down_read_slowpath+0x5b0/0x988 kernel/locking/rwsem.c:1055 __down_read_common kernel/locking/rwsem.c:1239 [inline] __down_read kernel/locking/rwsem.c:1252 [inline] down_read+0x10c/0x398 kernel/locking/rwsem.c:1500 inode_lock_shared include/linux/fs.h:799 [inline] lookup_slow+0x50/0x84 fs/namei.c:1679 walk_component+0x394/0x4cc fs/namei.c:1976 lookup_last fs/namei.c:2431 [inline] path_lookupat+0x13c/0x3d0 fs/namei.c:2455 filename_lookup+0x1c4/0x4c8 fs/namei.c:2484 user_path_at_empty+0x5c/0x1a4 fs/namei.c:2883 user_path_at include/linux/namei.h:57 [inline] do_mount fs/namespace.c:3345 [inline] __do_sys_mount fs/namespace.c:3556 [inline] __se_sys_mount fs/namespace.c:3533 [inline] __arm64_sys_mount+0x4dc/0x5e0 fs/namespace.c:3533 __invoke_syscall arch/arm64/kernel/syscall.c:38 [inline] invoke_syscall+0x98/0x2b8 arch/arm64/kernel/syscall.c:52 el0_svc_common+0x138/0x258 arch/arm64/kernel/syscall.c:142 do_el0_svc+0x58/0x14c arch/arm64/kernel/syscall.c:181 el0_svc+0x7c/0x1f0 arch/arm64/kernel/entry-common.c:608 el0t_64_sync_handler+0x84/0xe4 arch/arm64/kernel/entry-common.c:626 el0t_64_sync+0x1a0/0x1a4 arch/arm64/kernel/entry.S:584 INFO: task syz.1.24:4228 blocked for more than 143 seconds. Not tainted 5.15.165-syzkaller #0 "echo 0 > /proc/sys/kernel/hung_task_timeout_secs" disables this message. task:syz.1.24 state:D stack: 0 pid: 4228 ppid: 4031 flags:0x00000001 Call trace: __switch_to+0x308/0x5e8 arch/arm64/kernel/process.c:518 context_switch kernel/sched/core.c:5027 [inline] __schedule+0xf10/0x1e48 kernel/sched/core.c:6373 schedule+0x11c/0x1c8 kernel/sched/core.c:6456 schedule_preempt_disabled+0x18/0x2c kernel/sched/core.c:6515 rwsem_down_read_slowpath+0x5b0/0x988 kernel/locking/rwsem.c:1055 __down_read_common kernel/locking/rwsem.c:1239 [inline] __down_read kernel/locking/rwsem.c:1252 [inline] down_read+0x10c/0x398 kernel/locking/rwsem.c:1500 inode_lock_shared include/linux/fs.h:799 [inline] lookup_slow+0x50/0x84 fs/namei.c:1679 walk_component+0x394/0x4cc fs/namei.c:1976 lookup_last fs/namei.c:2431 [inline] path_lookupat+0x13c/0x3d0 fs/namei.c:2455 filename_lookup+0x1c4/0x4c8 fs/namei.c:2484 user_path_at_empty+0x5c/0x1a4 fs/namei.c:2883 user_path_at include/linux/namei.h:57 [inline] __do_sys_chdir fs/open.c:490 [inline] __se_sys_chdir fs/open.c:484 [inline] __arm64_sys_chdir+0xc0/0x2d0 fs/open.c:484 __invoke_syscall arch/arm64/kernel/syscall.c:38 [inline] invoke_syscall+0x98/0x2b8 arch/arm64/kernel/syscall.c:52 el0_svc_common+0x138/0x258 arch/arm64/kernel/syscall.c:142 do_el0_svc+0x58/0x14c arch/arm64/kernel/syscall.c:181 el0_svc+0x7c/0x1f0 arch/arm64/kernel/entry-common.c:608 el0t_64_sync_handler+0x84/0xe4 arch/arm64/kernel/entry-common.c:626 el0t_64_sync+0x1a0/0x1a4 arch/arm64/kernel/entry.S:584 INFO: task syz.1.24:4229 blocked for more than 143 seconds. Not tainted 5.15.165-syzkaller #0 "echo 0 > /proc/sys/kernel/hung_task_timeout_secs" disables this message. task:syz.1.24 state:D stack: 0 pid: 4229 ppid: 4031 flags:0x00000001 Call trace: __switch_to+0x308/0x5e8 arch/arm64/kernel/process.c:518 context_switch kernel/sched/core.c:5027 [inline] __schedule+0xf10/0x1e48 kernel/sched/core.c:6373 schedule+0x11c/0x1c8 kernel/sched/core.c:6456 rwsem_down_write_slowpath+0xd94/0x17e0 kernel/locking/rwsem.c:1165 __down_write_common kernel/locking/rwsem.c:1292 [inline] __down_write kernel/locking/rwsem.c:1301 [inline] down_write+0xe8/0x12c kernel/locking/rwsem.c:1552 inode_lock include/linux/fs.h:789 [inline] open_last_lookups fs/namei.c:3529 [inline] path_openat+0x640/0x26cc fs/namei.c:3739 do_filp_open+0x1a8/0x3b4 fs/namei.c:3769 do_sys_openat2+0x128/0x3e0 fs/open.c:1253 do_sys_open fs/open.c:1269 [inline] __do_sys_openat fs/open.c:1285 [inline] __se_sys_openat fs/open.c:1280 [inline] __arm64_sys_openat+0x1f0/0x240 fs/open.c:1280 __invoke_syscall arch/arm64/kernel/syscall.c:38 [inline] invoke_syscall+0x98/0x2b8 arch/arm64/kernel/syscall.c:52 el0_svc_common+0x138/0x258 arch/arm64/kernel/syscall.c:142 do_el0_svc+0x58/0x14c arch/arm64/kernel/syscall.c:181 el0_svc+0x7c/0x1f0 arch/arm64/kernel/entry-common.c:608 el0t_64_sync_handler+0x84/0xe4 arch/arm64/kernel/entry-common.c:626 el0t_64_sync+0x1a0/0x1a4 arch/arm64/kernel/entry.S:584 Showing all locks held in the system: 1 lock held by khungtaskd/27: #0: ffff800014c917a0 (rcu_read_lock){....}-{1:2}, at: rcu_lock_acquire+0xc/0x44 include/linux/rcupdate.h:311 3 locks held by kworker/1:2/3603: #0: ffff0000d16ca138 ((wq_completion)ipv6_addrconf){+.+.}-{0:0}, at: process_one_work+0x66c/0x11b8 kernel/workqueue.c:2283 #1: ffff80001f337c00 ((work_completion)(&(&ifa->dad_work)->work)){+.+.}-{0:0}, at: process_one_work+0x6ac/0x11b8 kernel/workqueue.c:2285 #2: ffff800016bf4768 (rtnl_mutex){+.+.}-{3:3}, at: rtnl_lock+0x20/0x2c net/core/rtnetlink.c:72 2 locks held by getty/3780: #0: ffff0000d3591098 (&tty->ldisc_sem){++++}-{0:0}, at: ldsem_down_read+0x40/0x50 drivers/tty/tty_ldsem.c:340 #1: ffff80001a4ce2e8 (&ldata->atomic_read_lock){+.+.}-{3:3}, at: n_tty_read+0x414/0x1204 drivers/tty/n_tty.c:2158 1 lock held by syz-executor/4039: #0: ffff800016bf4768 (rtnl_mutex){+.+.}-{3:3}, at: rtnl_lock+0x20/0x2c net/core/rtnetlink.c:72 3 locks held by kworker/1:8/4079: #0: ffff0000c0020938 ((wq_completion)events){+.+.}-{0:0}, at: process_one_work+0x66c/0x11b8 kernel/workqueue.c:2283 #1: ffff80001d177c00 (deferred_process_work){+.+.}-{0:0}, at: process_one_work+0x6ac/0x11b8 kernel/workqueue.c:2285 #2: ffff800016bf4768 (rtnl_mutex){+.+.}-{3:3}, at: rtnl_lock+0x20/0x2c net/core/rtnetlink.c:72 3 locks held by kworker/u4:7/4136: #0: ffff0000c0029138 ((wq_completion)events_unbound){+.+.}-{0:0}, at: process_one_work+0x66c/0x11b8 kernel/workqueue.c:2283 #1: ffff80001d237c00 ((linkwatch_work).work){+.+.}-{0:0}, at: process_one_work+0x6ac/0x11b8 kernel/workqueue.c:2285 #2: ffff800016bf4768 (rtnl_mutex){+.+.}-{3:3}, at: rtnl_lock+0x20/0x2c net/core/rtnetlink.c:72 4 locks held by kworker/u4:8/4192: #0: ffff0000c03f4138 ((wq_completion)netns){+.+.}-{0:0}, at: process_one_work+0x66c/0x11b8 kernel/workqueue.c:2283 #1: ffff80001de97c00 (net_cleanup_work){+.+.}-{0:0}, at: process_one_work+0x6ac/0x11b8 kernel/workqueue.c:2285 #2: ffff800016be8f90 (pernet_ops_rwsem){++++}-{3:3}, at: cleanup_net+0xf4/0x9bc net/core/net_namespace.c:561 #3: ffff800016bf4768 (rtnl_mutex){+.+.}-{3:3}, at: rtnl_lock+0x20/0x2c net/core/rtnetlink.c:72 2 locks held by syz.1.24/4194: 1 lock held by syz.1.24/4225: #0: ffff0000e6244188 (&type->i_mutex_dir_key#13){++++}-{3:3}, at: inode_lock_shared include/linux/fs.h:799 [inline] #0: ffff0000e6244188 (&type->i_mutex_dir_key#13){++++}-{3:3}, at: lookup_slow+0x50/0x84 fs/namei.c:1679 1 lock held by syz.1.24/4228: #0: ffff0000e6244188 (&type->i_mutex_dir_key#13){++++}-{3:3}, at: inode_lock_shared include/linux/fs.h:799 [inline] #0: ffff0000e6244188 (&type->i_mutex_dir_key#13){++++}-{3:3}, at: lookup_slow+0x50/0x84 fs/namei.c:1679 2 locks held by syz.1.24/4229: #0: ffff0000ceda0460 (sb_writers#16){.+.+}-{0:0}, at: mnt_want_write+0x44/0x9c fs/namespace.c:377 #1: ffff0000e6244188 (&type->i_mutex_dir_key#13){++++}-{3:3}, at: inode_lock include/linux/fs.h:789 [inline] #1: ffff0000e6244188 (&type->i_mutex_dir_key#13){++++}-{3:3}, at: open_last_lookups fs/namei.c:3529 [inline] #1: ffff0000e6244188 (&type->i_mutex_dir_key#13){++++}-{3:3}, at: path_openat+0x640/0x26cc fs/namei.c:3739 3 locks held by kworker/0:6/4287: #0: ffff0000d16ca138 ((wq_completion)ipv6_addrconf){+.+.}-{0:0}, at: process_one_work+0x66c/0x11b8 kernel/workqueue.c:2283 #1: ffff80001cd87c00 ((work_completion)(&(&ifa->dad_work)->work)){+.+.}-{0:0}, at: process_one_work+0x6ac/0x11b8 kernel/workqueue.c:2285 #2: ffff800016bf4768 (rtnl_mutex){+.+.}-{3:3}, at: rtnl_lock+0x20/0x2c net/core/rtnetlink.c:72 1 lock held by syz-executor/6341: #0: ffff800016bf4768 (rtnl_mutex){+.+.}-{3:3}, at: rtnl_lock net/core/rtnetlink.c:72 [inline] #0: ffff800016bf4768 (rtnl_mutex){+.+.}-{3:3}, at: rtnetlink_rcv_msg+0xa2c/0xdac net/core/rtnetlink.c:5615 1 lock held by syz.3.593/6525: #0: ffff800016bf4768 (rtnl_mutex){+.+.}-{3:3}, at: rtnl_lock net/core/rtnetlink.c:72 [inline] #0: ffff800016bf4768 (rtnl_mutex){+.+.}-{3:3}, at: rtnetlink_rcv_msg+0xa2c/0xdac net/core/rtnetlink.c:5615 1 lock held by syz.0.599/6534: #0: ffff800016bf4768 (rtnl_mutex){+.+.}-{3:3}, at: rtnl_lock+0x20/0x2c net/core/rtnetlink.c:72 1 lock held by syz.0.599/6536: #0: ffff800016bf4768 (rtnl_mutex){+.+.}-{3:3}, at: rtnl_lock+0x20/0x2c net/core/rtnetlink.c:72 4 locks held by syz.2.601/6543: #0: ffff0000c84e2460 (sb_writers#9){.+.+}-{0:0}, at: mnt_want_write+0x44/0x9c fs/namespace.c:377 #1: ffff0000dc2e5aa0 (&type->i_mutex_dir_key#7/1){+.+.}-{3:3}, at: inode_lock_nested include/linux/fs.h:824 [inline] #1: ffff0000dc2e5aa0 (&type->i_mutex_dir_key#7/1){+.+.}-{3:3}, at: filename_create+0x204/0x468 fs/namei.c:3835 #2: ffff800014cb6aa8 (cgroup_mutex){+.+.}-{3:3}, at: cgroup_kn_lock_live+0xf8/0x258 kernel/cgroup/cgroup.c:1639 #3: ffff800016bf4768 (rtnl_mutex){+.+.}-{3:3}, at: rtnl_lock+0x20/0x2c net/core/rtnetlink.c:72 1 lock held by syz.2.601/6544: #0: ffff0000dc2e5aa0 (&type->i_mutex_dir_key#7){++++}-{3:3}, at: inode_lock_shared include/linux/fs.h:799 [inline] #0: ffff0000dc2e5aa0 (&type->i_mutex_dir_key#7){++++}-{3:3}, at: lookup_slow+0x50/0x84 fs/namei.c:1679 2 locks held by syz.2.601/6545: #0: ffff0000eae88460 (sb_writers#26){.+.+}-{0:0}, at: vfs_writev fs/read_write.c:927 [inline] #0: ffff0000eae88460 (sb_writers#26){.+.+}-{0:0}, at: do_pwritev+0x1d8/0x334 fs/read_write.c:1025 #1: ffff0000ddbf3e80 (&sb->s_type->i_mutex_key#30){+.+.}-{3:3}, at: inode_lock include/linux/fs.h:789 [inline] #1: ffff0000ddbf3e80 (&sb->s_type->i_mutex_key#30){+.+.}-{3:3}, at: reiserfs_sync_file+0xa0/0x254 fs/reiserfs/file.c:155 =============================================