====================================================== WARNING: possible circular locking dependency detected 5.15.118-syzkaller #0 Not tainted ------------------------------------------------------ syz-executor.5/3525 is trying to acquire lock: ffff88807c452138 ((wq_completion)loop5){+.+.}-{0:0}, at: flush_workqueue+0x154/0x1610 kernel/workqueue.c:2827 but task is already holding lock: ffff888146f69468 (&lo->lo_mutex){+.+.}-{3:3}, at: __loop_clr_fd+0xa9/0xbe0 drivers/block/loop.c:1365 which lock already depends on the new lock. the existing dependency chain (in reverse order) is: -> #8 (&lo->lo_mutex){+.+.}-{3:3}: lock_acquire+0x1db/0x4f0 kernel/locking/lockdep.c:5622 __mutex_lock_common+0x1da/0x25a0 kernel/locking/mutex.c:596 __mutex_lock kernel/locking/mutex.c:729 [inline] mutex_lock_killable_nested+0x17/0x20 kernel/locking/mutex.c:758 lo_open+0x68/0x100 drivers/block/loop.c:2055 blkdev_get_whole+0x94/0x390 block/bdev.c:669 blkdev_get_by_dev+0x2b2/0xa50 block/bdev.c:824 blkdev_open+0x138/0x2d0 block/fops.c:463 do_dentry_open+0x807/0xfb0 fs/open.c:826 do_open fs/namei.c:3538 [inline] path_openat+0x2702/0x2f20 fs/namei.c:3672 do_filp_open+0x21c/0x460 fs/namei.c:3699 do_sys_openat2+0x13b/0x500 fs/open.c:1211 do_sys_open fs/open.c:1227 [inline] __do_sys_openat fs/open.c:1243 [inline] __se_sys_openat fs/open.c:1238 [inline] __x64_sys_openat+0x243/0x290 fs/open.c:1238 do_syscall_x64 arch/x86/entry/common.c:50 [inline] do_syscall_64+0x3d/0xb0 arch/x86/entry/common.c:80 entry_SYSCALL_64_after_hwframe+0x61/0xcb -> #7 (&disk->open_mutex){+.+.}-{3:3}: lock_acquire+0x1db/0x4f0 kernel/locking/lockdep.c:5622 __mutex_lock_common+0x1da/0x25a0 kernel/locking/mutex.c:596 __mutex_lock kernel/locking/mutex.c:729 [inline] mutex_lock_nested+0x17/0x20 kernel/locking/mutex.c:743 bd_register_pending_holders+0x33/0x320 block/holder.c:161 device_add_disk+0x526/0xc50 block/genhd.c:484 add_disk include/linux/genhd.h:212 [inline] md_alloc+0x8ad/0xd40 drivers/md/md.c:5723 blk_request_module+0x181/0x1a0 block/genhd.c:684 blkdev_get_no_open+0x39/0x190 block/bdev.c:740 blkdev_get_by_dev+0x89/0xa50 block/bdev.c:804 swsusp_check+0xb1/0x2c0 kernel/power/swap.c:1526 software_resume+0xc6/0x3c0 kernel/power/hibernate.c:977 resume_store+0xe3/0x130 kernel/power/hibernate.c:1179 kernfs_fop_write_iter+0x3a2/0x4f0 fs/kernfs/file.c:296 call_write_iter include/linux/fs.h:2103 [inline] new_sync_write fs/read_write.c:507 [inline] vfs_write+0xacf/0xe50 fs/read_write.c:594 ksys_write+0x1a2/0x2c0 fs/read_write.c:647 do_syscall_x64 arch/x86/entry/common.c:50 [inline] do_syscall_64+0x3d/0xb0 arch/x86/entry/common.c:80 entry_SYSCALL_64_after_hwframe+0x61/0xcb -> #6 (disks_mutex){+.+.}-{3:3}: lock_acquire+0x1db/0x4f0 kernel/locking/lockdep.c:5622 __mutex_lock_common+0x1da/0x25a0 kernel/locking/mutex.c:596 __mutex_lock kernel/locking/mutex.c:729 [inline] mutex_lock_nested+0x17/0x20 kernel/locking/mutex.c:743 md_alloc+0x3e/0xd40 drivers/md/md.c:5664 blk_request_module+0x181/0x1a0 block/genhd.c:684 blkdev_get_no_open+0x39/0x190 block/bdev.c:740 blkdev_get_by_dev+0x89/0xa50 block/bdev.c:804 swsusp_check+0xb1/0x2c0 kernel/power/swap.c:1526 software_resume+0xc6/0x3c0 kernel/power/hibernate.c:977 resume_store+0xe3/0x130 kernel/power/hibernate.c:1179 kernfs_fop_write_iter+0x3a2/0x4f0 fs/kernfs/file.c:296 call_write_iter include/linux/fs.h:2103 [inline] new_sync_write fs/read_write.c:507 [inline] vfs_write+0xacf/0xe50 fs/read_write.c:594 ksys_write+0x1a2/0x2c0 fs/read_write.c:647 do_syscall_x64 arch/x86/entry/common.c:50 [inline] do_syscall_64+0x3d/0xb0 arch/x86/entry/common.c:80 entry_SYSCALL_64_after_hwframe+0x61/0xcb -> #5 (major_names_lock){+.+.}-{3:3}: lock_acquire+0x1db/0x4f0 kernel/locking/lockdep.c:5622 __mutex_lock_common+0x1da/0x25a0 kernel/locking/mutex.c:596 __mutex_lock kernel/locking/mutex.c:729 [inline] mutex_lock_nested+0x17/0x20 kernel/locking/mutex.c:743 blk_request_module+0x2f/0x1a0 block/genhd.c:681 blkdev_get_no_open+0x39/0x190 block/bdev.c:740 blkdev_get_by_dev+0x89/0xa50 block/bdev.c:804 swsusp_check+0xb1/0x2c0 kernel/power/swap.c:1526 software_resume+0xc6/0x3c0 kernel/power/hibernate.c:977 resume_store+0xe3/0x130 kernel/power/hibernate.c:1179 kernfs_fop_write_iter+0x3a2/0x4f0 fs/kernfs/file.c:296 call_write_iter include/linux/fs.h:2103 [inline] new_sync_write fs/read_write.c:507 [inline] vfs_write+0xacf/0xe50 fs/read_write.c:594 ksys_write+0x1a2/0x2c0 fs/read_write.c:647 do_syscall_x64 arch/x86/entry/common.c:50 [inline] do_syscall_64+0x3d/0xb0 arch/x86/entry/common.c:80 entry_SYSCALL_64_after_hwframe+0x61/0xcb -> #4 (system_transition_mutex/1){+.+.}-{3:3}: lock_acquire+0x1db/0x4f0 kernel/locking/lockdep.c:5622 __mutex_lock_common+0x1da/0x25a0 kernel/locking/mutex.c:596 __mutex_lock kernel/locking/mutex.c:729 [inline] mutex_lock_nested+0x17/0x20 kernel/locking/mutex.c:743 software_resume+0x7c/0x3c0 kernel/power/hibernate.c:932 resume_store+0xe3/0x130 kernel/power/hibernate.c:1179 kernfs_fop_write_iter+0x3a2/0x4f0 fs/kernfs/file.c:296 call_write_iter include/linux/fs.h:2103 [inline] new_sync_write fs/read_write.c:507 [inline] vfs_write+0xacf/0xe50 fs/read_write.c:594 ksys_write+0x1a2/0x2c0 fs/read_write.c:647 do_syscall_x64 arch/x86/entry/common.c:50 [inline] do_syscall_64+0x3d/0xb0 arch/x86/entry/common.c:80 entry_SYSCALL_64_after_hwframe+0x61/0xcb -> #3 (&of->mutex){+.+.}-{3:3}: lock_acquire+0x1db/0x4f0 kernel/locking/lockdep.c:5622 __mutex_lock_common+0x1da/0x25a0 kernel/locking/mutex.c:596 __mutex_lock kernel/locking/mutex.c:729 [inline] mutex_lock_nested+0x17/0x20 kernel/locking/mutex.c:743 kernfs_seq_start+0x50/0x3b0 fs/kernfs/file.c:112 seq_read_iter+0x3d0/0xd10 fs/seq_file.c:225 call_read_iter include/linux/fs.h:2097 [inline] new_sync_read fs/read_write.c:404 [inline] vfs_read+0xa9f/0xe10 fs/read_write.c:485 ksys_read+0x1a2/0x2c0 fs/read_write.c:623 do_syscall_x64 arch/x86/entry/common.c:50 [inline] do_syscall_64+0x3d/0xb0 arch/x86/entry/common.c:80 entry_SYSCALL_64_after_hwframe+0x61/0xcb -> #2 (&p->lock){+.+.}-{3:3}: lock_acquire+0x1db/0x4f0 kernel/locking/lockdep.c:5622 __mutex_lock_common+0x1da/0x25a0 kernel/locking/mutex.c:596 __mutex_lock kernel/locking/mutex.c:729 [inline] mutex_lock_nested+0x17/0x20 kernel/locking/mutex.c:743 seq_read_iter+0xae/0xd10 fs/seq_file.c:182 do_iter_readv_writev+0x594/0x7a0 do_iter_read+0x1ec/0x760 fs/read_write.c:790 lo_read_simple drivers/block/loop.c:392 [inline] do_req_filebacked drivers/block/loop.c:663 [inline] loop_handle_cmd drivers/block/loop.c:2234 [inline] loop_process_work+0x1d18/0x2af0 drivers/block/loop.c:2274 process_one_work+0x8a1/0x10c0 kernel/workqueue.c:2307 worker_thread+0xaca/0x1280 kernel/workqueue.c:2454 kthread+0x3f6/0x4f0 kernel/kthread.c:319 ret_from_fork+0x1f/0x30 arch/x86/entry/entry_64.S:298 -> #1 ((work_completion)(&worker->work)){+.+.}-{0:0}: lock_acquire+0x1db/0x4f0 kernel/locking/lockdep.c:5622 process_one_work+0x7f1/0x10c0 kernel/workqueue.c:2283 worker_thread+0xaca/0x1280 kernel/workqueue.c:2454 kthread+0x3f6/0x4f0 kernel/kthread.c:319 ret_from_fork+0x1f/0x30 arch/x86/entry/entry_64.S:298 -> #0 ((wq_completion)loop5){+.+.}-{0:0}: check_prev_add kernel/locking/lockdep.c:3053 [inline] check_prevs_add kernel/locking/lockdep.c:3172 [inline] validate_chain+0x1646/0x58b0 kernel/locking/lockdep.c:3787 __lock_acquire+0x1295/0x1ff0 kernel/locking/lockdep.c:5011 lock_acquire+0x1db/0x4f0 kernel/locking/lockdep.c:5622 flush_workqueue+0x170/0x1610 kernel/workqueue.c:2827 drain_workqueue+0xc5/0x390 kernel/workqueue.c:2992 destroy_workqueue+0x7b/0xae0 kernel/workqueue.c:4427 __loop_clr_fd+0x241/0xbe0 drivers/block/loop.c:1383 loop_clr_fd drivers/block/loop.c:1509 [inline] lo_ioctl+0x1529/0x23b0 drivers/block/loop.c:1865 blkdev_ioctl+0x333/0x6b0 block/ioctl.c:601 block_ioctl+0xb1/0xf0 block/fops.c:493 vfs_ioctl fs/ioctl.c:51 [inline] __do_sys_ioctl fs/ioctl.c:874 [inline] __se_sys_ioctl+0xf1/0x160 fs/ioctl.c:860 do_syscall_x64 arch/x86/entry/common.c:50 [inline] do_syscall_64+0x3d/0xb0 arch/x86/entry/common.c:80 entry_SYSCALL_64_after_hwframe+0x61/0xcb other info that might help us debug this: Chain exists of: (wq_completion)loop5 --> &disk->open_mutex --> &lo->lo_mutex Possible unsafe locking scenario: CPU0 CPU1 ---- ---- lock(&lo->lo_mutex); lock(&disk->open_mutex); lock(&lo->lo_mutex); lock((wq_completion)loop5); *** DEADLOCK *** 1 lock held by syz-executor.5/3525: #0: ffff888146f69468 (&lo->lo_mutex){+.+.}-{3:3}, at: __loop_clr_fd+0xa9/0xbe0 drivers/block/loop.c:1365 stack backtrace: CPU: 1 PID: 3525 Comm: syz-executor.5 Not tainted 5.15.118-syzkaller #0 Hardware name: Google Google Compute Engine/Google Compute Engine, BIOS Google 05/27/2023 Call Trace: __dump_stack lib/dump_stack.c:88 [inline] dump_stack_lvl+0x1e3/0x2cb lib/dump_stack.c:106 check_noncircular+0x2f8/0x3b0 kernel/locking/lockdep.c:2133 check_prev_add kernel/locking/lockdep.c:3053 [inline] check_prevs_add kernel/locking/lockdep.c:3172 [inline] validate_chain+0x1646/0x58b0 kernel/locking/lockdep.c:3787 __lock_acquire+0x1295/0x1ff0 kernel/locking/lockdep.c:5011 lock_acquire+0x1db/0x4f0 kernel/locking/lockdep.c:5622 flush_workqueue+0x170/0x1610 kernel/workqueue.c:2827 drain_workqueue+0xc5/0x390 kernel/workqueue.c:2992 destroy_workqueue+0x7b/0xae0 kernel/workqueue.c:4427 __loop_clr_fd+0x241/0xbe0 drivers/block/loop.c:1383 loop_clr_fd drivers/block/loop.c:1509 [inline] lo_ioctl+0x1529/0x23b0 drivers/block/loop.c:1865 blkdev_ioctl+0x333/0x6b0 block/ioctl.c:601 block_ioctl+0xb1/0xf0 block/fops.c:493 vfs_ioctl fs/ioctl.c:51 [inline] __do_sys_ioctl fs/ioctl.c:874 [inline] __se_sys_ioctl+0xf1/0x160 fs/ioctl.c:860 do_syscall_x64 arch/x86/entry/common.c:50 [inline] do_syscall_64+0x3d/0xb0 arch/x86/entry/common.c:80 entry_SYSCALL_64_after_hwframe+0x61/0xcb RIP: 0033:0x7fa625e9a117 Code: 3c 1c 48 f7 d8 49 39 c4 72 b8 e8 24 58 02 00 85 c0 78 bd 48 83 c4 08 4c 89 e0 5b 41 5c c3 0f 1f 44 00 00 b8 10 00 00 00 0f 05 <48> 3d 01 f0 ff ff 73 01 c3 48 c7 c1 b8 ff ff ff f7 d8 64 89 01 48 RSP: 002b:00007ffc941a6368 EFLAGS: 00000246 ORIG_RAX: 0000000000000010 RAX: ffffffffffffffda RBX: 00007ffc941a63f0 RCX: 00007fa625e9a117 RDX: 0000000000000000 RSI: 0000000000004c01 RDI: 0000000000000003 RBP: 0000000000000003 R08: 0000000000000000 R09: 00007ffc941a6200 R10: 0000000000000000 R11: 0000000000000246 R12: 0000000000000003 R13: 00007fa625f91140 R14: 00007fa625f91e48 R15: 00007ffc941a6430 Dev loop5: unable to read RDB block 8 loop5: unable to read partition table loop5: partition table beyond EOD, truncated