INFO: task syz.0.924:7403 blocked for more than 143 seconds. Not tainted syzkaller #0 "echo 0 > /proc/sys/kernel/hung_task_timeout_secs" disables this message. task:syz.0.924 state:D stack:0 pid:7403 ppid:4321 flags:0x00000009 Call trace: __switch_to+0x2f4/0x550 arch/arm64/kernel/process.c:555 context_switch kernel/sched/core.c:5245 [inline] __schedule+0xdd0/0x1b0c kernel/sched/core.c:6562 schedule+0xc4/0x170 kernel/sched/core.c:6638 rwsem_down_write_slowpath+0xb60/0x1280 kernel/locking/rwsem.c:1189 __down_write_common kernel/locking/rwsem.c:1314 [inline] __down_write kernel/locking/rwsem.c:1323 [inline] down_write+0x84/0x88 kernel/locking/rwsem.c:1574 filemap_invalidate_lock include/linux/fs.h:803 [inline] set_blocksize+0x1c8/0x3f8 block/bdev.c:161 sb_set_blocksize block/bdev.c:178 [inline] sb_min_blocksize+0xb8/0x184 block/bdev.c:194 fat_fill_super+0x1570/0x3d30 fs/fat/inode.c:1650 msdos_fill_super+0x40/0x54 fs/fat/namei_msdos.c:655 mount_bdev+0x264/0x358 fs/super.c:1443 msdos_mount+0x44/0x58 fs/fat/namei_msdos.c:662 legacy_get_tree+0xd4/0x16c fs/fs_context.c:632 vfs_get_tree+0x90/0x274 fs/super.c:1573 do_new_mount+0x228/0x810 fs/namespace.c:3078 path_mount+0x5bc/0xe80 fs/namespace.c:3408 do_mount fs/namespace.c:3421 [inline] __do_sys_mount fs/namespace.c:3629 [inline] __se_sys_mount fs/namespace.c:3606 [inline] __arm64_sys_mount+0x49c/0x59c fs/namespace.c:3606 __invoke_syscall arch/arm64/kernel/syscall.c:38 [inline] invoke_syscall+0x98/0x2b4 arch/arm64/kernel/syscall.c:52 el0_svc_common+0x138/0x258 arch/arm64/kernel/syscall.c:140 do_el0_svc+0x58/0x130 arch/arm64/kernel/syscall.c:204 el0_svc+0x58/0x128 arch/arm64/kernel/entry-common.c:637 el0t_64_sync_handler+0x84/0xf0 arch/arm64/kernel/entry-common.c:655 el0t_64_sync+0x18c/0x190 arch/arm64/kernel/entry.S:585 Showing all locks held in the system: 1 lock held by rcu_tasks_kthre/12: #0: ffff8000153e7c30 (rcu_tasks.tasks_gp_mutex){+.+.}-{3:3}, at: rcu_tasks_one_gp+0x40/0xbb4 kernel/rcu/tasks.h:517 1 lock held by rcu_tasks_trace/13: #0: ffff8000153e8450 (rcu_tasks_trace.tasks_gp_mutex){+.+.}-{3:3}, at: rcu_tasks_one_gp+0x40/0xbb4 kernel/rcu/tasks.h:517 1 lock held by khungtaskd/28: #0: ffff8000153e72c0 (rcu_read_lock){....}-{1:2}, at: rcu_lock_acquire+0xc/0x44 include/linux/rcupdate.h:349 1 lock held by udevd/3936: 2 locks held by getty/4078: #0: ffff0000d8d68098 (&tty->ldisc_sem){++++}-{0:0}, at: ldsem_down_read+0x3c/0x4c drivers/tty/tty_ldsem.c:340 #1: ffff80001cfe62f0 (&ldata->atomic_read_lock){+.+.}-{3:3}, at: n_tty_read+0x2ec/0xfa0 drivers/tty/n_tty.c:2198 3 locks held by kworker/u4:6/4419: #0: ffff0000c0029138 ((wq_completion)events_unbound){+.+.}-{0:0}, at: process_one_work+0x6b8/0x13a4 kernel/workqueue.c:2265 #1: ffff800020f47c20 ((linkwatch_work).work){+.+.}-{0:0}, at: process_one_work+0x6fc/0x13a4 kernel/workqueue.c:2267 #2: ffff80001787f1c8 (rtnl_mutex){+.+.}-{3:3}, at: rtnl_lock+0x20/0x2c net/core/rtnetlink.c:74 1 lock held by syz.3.742/6774: 3 locks held by syz.0.924/7403: #0: ffff0001112380e0 (&type->s_umount_key#61/1){+.+.}-{3:3}, at: alloc_super+0x1a4/0x800 fs/super.c:228 #1: ffff0000c04a5f38 (&sb->s_type->i_mutex_key#9){++++}-{3:3}, at: inode_lock include/linux/fs.h:758 [inline] #1: ffff0000c04a5f38 (&sb->s_type->i_mutex_key#9){++++}-{3:3}, at: set_blocksize+0x190/0x3f8 block/bdev.c:160 #2: ffff0000c04a60d8 (mapping.invalidate_lock){++++}-{3:3}, at: filemap_invalidate_lock include/linux/fs.h:803 [inline] #2: ffff0000c04a60d8 (mapping.invalidate_lock){++++}-{3:3}, at: set_blocksize+0x1c8/0x3f8 block/bdev.c:161 1 lock held by udevd/10037: #0: ffff00019f578198 (&rq->__lock){-.-.}-{2:2}, at: raw_spin_rq_lock_nested kernel/sched/core.c:538 [inline] #0: ffff00019f578198 (&rq->__lock){-.-.}-{2:2}, at: raw_spin_rq_lock kernel/sched/sched.h:1362 [inline] #0: ffff00019f578198 (&rq->__lock){-.-.}-{2:2}, at: rq_lock kernel/sched/sched.h:1652 [inline] #0: ffff00019f578198 (&rq->__lock){-.-.}-{2:2}, at: __schedule+0x2b8/0x1b0c kernel/sched/core.c:6478 1 lock held by udevd/10039: #0: ffff00019f557198 (&rq->__lock){-.-.}-{2:2}, at: raw_spin_rq_lock_nested kernel/sched/core.c:538 [inline] #0: ffff00019f557198 (&rq->__lock){-.-.}-{2:2}, at: raw_spin_rq_lock kernel/sched/sched.h:1362 [inline] #0: ffff00019f557198 (&rq->__lock){-.-.}-{2:2}, at: rq_lock kernel/sched/sched.h:1652 [inline] #0: ffff00019f557198 (&rq->__lock){-.-.}-{2:2}, at: __schedule+0x2b8/0x1b0c kernel/sched/core.c:6478 1 lock held by syz.2.1800/10042: #0: ffff80001787f1c8 (rtnl_mutex){+.+.}-{3:3}, at: rtnl_lock+0x20/0x2c net/core/rtnetlink.c:74 2 locks held by syz.1.1805/10059: #0: ffff0000d20aa0f0 (&smc->clcsock_release_lock){+.+.}-{3:3}, at: smc_setsockopt+0x158/0xbfc net/smc/af_smc.c:2977 #1: ffff80001787f1c8 (rtnl_mutex){+.+.}-{3:3}, at: rtnl_lock+0x20/0x2c net/core/rtnetlink.c:74 1 lock held by syz.1.1805/10062: #0: ffff0000d20aa0f0 (&smc->clcsock_release_lock){+.+.}-{3:3}, at: smc_getsockopt+0x15c/0x460 net/smc/af_smc.c:3057 1 lock held by syz.6.1808/10067: #0: ffff80001787f1c8 (rtnl_mutex){+.+.}-{3:3}, at: rtnl_lock net/core/rtnetlink.c:74 [inline] #0: ffff80001787f1c8 (rtnl_mutex){+.+.}-{3:3}, at: rtnetlink_rcv_msg+0x650/0xcdc net/core/rtnetlink.c:6147 1 lock held by syz.6.1808/10071: #0: ffff80001787f1c8 (rtnl_mutex){+.+.}-{3:3}, at: rtnl_lock net/core/rtnetlink.c:74 [inline] #0: ffff80001787f1c8 (rtnl_mutex){+.+.}-{3:3}, at: rtnetlink_rcv_msg+0x650/0xcdc net/core/rtnetlink.c:6147 1 lock held by syz.5.1809/10069: #0: ffff80001787f1c8 (rtnl_mutex){+.+.}-{3:3}, at: rtnl_lock+0x20/0x2c net/core/rtnetlink.c:74 1 lock held by syz.5.1809/10074: #0: ffff80001787f1c8 (rtnl_mutex){+.+.}-{3:3}, at: rtnl_lock+0x20/0x2c net/core/rtnetlink.c:74 4 locks held by syz.4.1811/10080: #0: ffff0000d4192098 (&tty->ldisc_sem){++++}-{0:0}, at: ldsem_down_write+0x3c/0x4c drivers/tty/tty_ldsem.c:366 #1: ffff0000d46a7098 (&tty->ldisc_sem/1){+.+.}-{0:0}, at: ldsem_down_write_nested+0x44/0x58 drivers/tty/tty_ldsem.c:427 #2: ffff0000f84920b0 (&gsm->mutex){+.+.}-{3:3}, at: gsm_cleanup_mux+0xac/0x7e4 drivers/tty/n_gsm.c:2542 #3: ffff0000c0023148 (&root->kernfs_rwsem){++++}-{3:3}, at: kernfs_remove_by_name_ns+0x78/0x180 fs/kernfs/dir.c:1662 =============================================