====================================================== WARNING: possible circular locking dependency detected 6.13.0-rc3-syzkaller-00193-ge9b8ffafd20a #0 Not tainted ------------------------------------------------------ syz.5.327/8509 is trying to acquire lock: ffff888030420520 (&lp->qli_lock){+.+.}-{3:3}, at: spin_lock include/linux/spinlock.h:351 [inline] ffff888030420520 (&lp->qli_lock){+.+.}-{3:3}, at: xfs_dquot_detach_buf+0x2f/0x1a0 fs/xfs/xfs_dquot.c:83 but task is already holding lock: ffff8880271d0630 (&l->lock){+.+.}-{3:3}, at: spin_lock include/linux/spinlock.h:351 [inline] ffff8880271d0630 (&l->lock){+.+.}-{3:3}, at: lock_list_lru_of_memcg+0x24b/0x4e0 mm/list_lru.c:77 which lock already depends on the new lock. the existing dependency chain (in reverse order) is: -> #2 (&l->lock){+.+.}-{3:3}: lock_acquire+0x1ed/0x550 kernel/locking/lockdep.c:5849 __raw_spin_lock include/linux/spinlock_api_smp.h:133 [inline] _raw_spin_lock+0x2e/0x40 kernel/locking/spinlock.c:154 spin_lock include/linux/spinlock.h:351 [inline] lock_list_lru_of_memcg+0x24b/0x4e0 mm/list_lru.c:77 list_lru_del+0x58/0x1f0 mm/list_lru.c:203 xfs_buf_stale+0x1f5/0x320 fs/xfs/xfs_buf.c:178 xfs_buf_ioend_fail+0x36/0x70 fs/xfs/xfs_buf.c:1471 __xfs_buf_submit+0x321/0x7e0 fs/xfs/xfs_buf.c:1733 xfs_buf_delwri_submit_buffers+0x6d4/0x970 fs/xfs/xfs_buf.c:2330 xfs_buf_delwri_submit+0xba/0x270 fs/xfs/xfs_buf.c:2376 xlog_do_recovery_pass+0xbf0/0xdc0 fs/xfs/xfs_log_recover.c:3266 xlog_do_log_recovery+0x79/0x90 fs/xfs/xfs_log_recover.c:3328 xlog_do_recover+0x123/0x4a0 fs/xfs/xfs_log_recover.c:3356 xlog_recover+0x43e/0x540 fs/xfs/xfs_log_recover.c:3481 xfs_log_mount+0x252/0x3e0 fs/xfs/xfs_log.c:666 xfs_mountfs+0xf2f/0x2410 fs/xfs/xfs_mount.c:867 xfs_fs_fill_super+0x12db/0x1590 fs/xfs/xfs_super.c:1791 get_tree_bdev_flags+0x48c/0x5c0 fs/super.c:1636 vfs_get_tree+0x90/0x2b0 fs/super.c:1814 do_new_mount+0x2be/0xb40 fs/namespace.c:3507 do_mount fs/namespace.c:3847 [inline] __do_sys_mount fs/namespace.c:4057 [inline] __se_sys_mount+0x2d6/0x3c0 fs/namespace.c:4034 do_syscall_x64 arch/x86/entry/common.c:52 [inline] do_syscall_64+0xf3/0x230 arch/x86/entry/common.c:83 entry_SYSCALL_64_after_hwframe+0x77/0x7f -> #1 (&bp->b_lock){+.+.}-{3:3}: lock_acquire+0x1ed/0x550 kernel/locking/lockdep.c:5849 __raw_spin_lock include/linux/spinlock_api_smp.h:133 [inline] _raw_spin_lock+0x2e/0x40 kernel/locking/spinlock.c:154 spin_lock include/linux/spinlock.h:351 [inline] xfs_buf_rele_cached fs/xfs/xfs_buf.c:1084 [inline] xfs_buf_rele+0x164/0x15b0 fs/xfs/xfs_buf.c:1151 xfs_dquot_attach_buf+0x33e/0x560 fs/xfs/xfs_dquot.c:1345 xfs_qm_quotacheck_dqadjust+0x13f/0x5e0 fs/xfs/xfs_qm.c:1341 xfs_qm_dqusage_adjust+0x5e1/0x850 fs/xfs/xfs_qm.c:1457 xfs_iwalk_ag_recs+0x4e3/0x820 fs/xfs/xfs_iwalk.c:209 xfs_iwalk_run_callbacks+0x218/0x470 fs/xfs/xfs_iwalk.c:370 xfs_iwalk_ag+0xa9a/0xbb0 fs/xfs/xfs_iwalk.c:476 xfs_iwalk_ag_work+0xfb/0x1b0 fs/xfs/xfs_iwalk.c:625 xfs_pwork_work+0x7f/0x190 fs/xfs/xfs_pwork.c:47 process_one_work kernel/workqueue.c:3229 [inline] process_scheduled_works+0xa66/0x1840 kernel/workqueue.c:3310 worker_thread+0x870/0xd30 kernel/workqueue.c:3391 kthread+0x2f0/0x390 kernel/kthread.c:389 ret_from_fork+0x4b/0x80 arch/x86/kernel/process.c:147 ret_from_fork_asm+0x1a/0x30 arch/x86/entry/entry_64.S:244 -> #0 (&lp->qli_lock){+.+.}-{3:3}: check_prev_add kernel/locking/lockdep.c:3161 [inline] check_prevs_add kernel/locking/lockdep.c:3280 [inline] validate_chain+0x18ef/0x5920 kernel/locking/lockdep.c:3904 __lock_acquire+0x1397/0x2100 kernel/locking/lockdep.c:5226 lock_acquire+0x1ed/0x550 kernel/locking/lockdep.c:5849 __raw_spin_lock include/linux/spinlock_api_smp.h:133 [inline] _raw_spin_lock+0x2e/0x40 kernel/locking/spinlock.c:154 spin_lock include/linux/spinlock.h:351 [inline] xfs_dquot_detach_buf+0x2f/0x1a0 fs/xfs/xfs_dquot.c:83 xfs_qm_dquot_isolate+0x49d/0x1420 fs/xfs/xfs_qm.c:528 __list_lru_walk_one+0x170/0x470 mm/list_lru.c:301 list_lru_walk_one+0x3c/0x50 mm/list_lru.c:338 list_lru_shrink_walk include/linux/list_lru.h:240 [inline] xfs_qm_shrink_scan+0x1e1/0x400 fs/xfs/xfs_qm.c:574 do_shrink_slab+0x72d/0x1160 mm/shrinker.c:437 shrink_slab+0x1093/0x14d0 mm/shrinker.c:664 drop_slab_node mm/vmscan.c:414 [inline] drop_slab+0x142/0x280 mm/vmscan.c:432 drop_caches_sysctl_handler+0xbc/0x160 fs/drop_caches.c:68 proc_sys_call_handler+0x5ec/0x920 fs/proc/proc_sysctl.c:601 do_iter_readv_writev+0x600/0x880 vfs_writev+0x376/0xba0 fs/read_write.c:1050 do_writev+0x1b6/0x360 fs/read_write.c:1096 do_syscall_x64 arch/x86/entry/common.c:52 [inline] do_syscall_64+0xf3/0x230 arch/x86/entry/common.c:83 entry_SYSCALL_64_after_hwframe+0x77/0x7f other info that might help us debug this: Chain exists of: &lp->qli_lock --> &bp->b_lock --> &l->lock Possible unsafe locking scenario: CPU0 CPU1 ---- ---- lock(&l->lock); lock(&bp->b_lock); lock(&l->lock); lock(&lp->qli_lock); *** DEADLOCK *** 4 locks held by syz.5.327/8509: #0: ffff88805cbd5b38 (&f->f_pos_lock){+.+.}-{4:4}, at: fdget_pos+0x254/0x320 fs/file.c:1191 #1: ffff88805789c420 (sb_writers#3){.+.+}-{0:0}, at: file_start_write include/linux/fs.h:2964 [inline] #1: ffff88805789c420 (sb_writers#3){.+.+}-{0:0}, at: vfs_writev+0x2d1/0xba0 fs/read_write.c:1048 #2: ffff8880271d0630 (&l->lock){+.+.}-{3:3}, at: spin_lock include/linux/spinlock.h:351 [inline] #2: ffff8880271d0630 (&l->lock){+.+.}-{3:3}, at: lock_list_lru_of_memcg+0x24b/0x4e0 mm/list_lru.c:77 #3: ffff888030420608 (&xfs_dquot_project_class){+.+.}-{4:4}, at: xfs_dqlock_nowait fs/xfs/xfs_dquot.h:126 [inline] #3: ffff888030420608 (&xfs_dquot_project_class){+.+.}-{4:4}, at: xfs_qm_dquot_isolate+0x8d/0x1420 fs/xfs/xfs_qm.c:467 stack backtrace: CPU: 0 UID: 0 PID: 8509 Comm: syz.5.327 Not tainted 6.13.0-rc3-syzkaller-00193-ge9b8ffafd20a #0 Hardware name: Google Google Compute Engine/Google Compute Engine, BIOS Google 09/13/2024 Call Trace: __dump_stack lib/dump_stack.c:94 [inline] dump_stack_lvl+0x241/0x360 lib/dump_stack.c:120 print_circular_bug+0x13a/0x1b0 kernel/locking/lockdep.c:2074 check_noncircular+0x36a/0x4a0 kernel/locking/lockdep.c:2206 check_prev_add kernel/locking/lockdep.c:3161 [inline] check_prevs_add kernel/locking/lockdep.c:3280 [inline] validate_chain+0x18ef/0x5920 kernel/locking/lockdep.c:3904 __lock_acquire+0x1397/0x2100 kernel/locking/lockdep.c:5226 lock_acquire+0x1ed/0x550 kernel/locking/lockdep.c:5849 __raw_spin_lock include/linux/spinlock_api_smp.h:133 [inline] _raw_spin_lock+0x2e/0x40 kernel/locking/spinlock.c:154 spin_lock include/linux/spinlock.h:351 [inline] xfs_dquot_detach_buf+0x2f/0x1a0 fs/xfs/xfs_dquot.c:83 xfs_qm_dquot_isolate+0x49d/0x1420 fs/xfs/xfs_qm.c:528 __list_lru_walk_one+0x170/0x470 mm/list_lru.c:301 list_lru_walk_one+0x3c/0x50 mm/list_lru.c:338 list_lru_shrink_walk include/linux/list_lru.h:240 [inline] xfs_qm_shrink_scan+0x1e1/0x400 fs/xfs/xfs_qm.c:574 do_shrink_slab+0x72d/0x1160 mm/shrinker.c:437 shrink_slab+0x1093/0x14d0 mm/shrinker.c:664 drop_slab_node mm/vmscan.c:414 [inline] drop_slab+0x142/0x280 mm/vmscan.c:432 drop_caches_sysctl_handler+0xbc/0x160 fs/drop_caches.c:68 proc_sys_call_handler+0x5ec/0x920 fs/proc/proc_sysctl.c:601 do_iter_readv_writev+0x600/0x880 vfs_writev+0x376/0xba0 fs/read_write.c:1050 do_writev+0x1b6/0x360 fs/read_write.c:1096 do_syscall_x64 arch/x86/entry/common.c:52 [inline] do_syscall_64+0xf3/0x230 arch/x86/entry/common.c:83 entry_SYSCALL_64_after_hwframe+0x77/0x7f RIP: 0033:0x7fdbb9585d29 Code: ff ff c3 66 2e 0f 1f 84 00 00 00 00 00 0f 1f 40 00 48 89 f8 48 89 f7 48 89 d6 48 89 ca 4d 89 c2 4d 89 c8 4c 8b 4c 24 08 0f 05 <48> 3d 01 f0 ff ff 73 01 c3 48 c7 c1 a8 ff ff ff f7 d8 64 89 01 48 RSP: 002b:00007fdbba33b038 EFLAGS: 00000246 ORIG_RAX: 0000000000000014 RAX: ffffffffffffffda RBX: 00007fdbb9775fa0 RCX: 00007fdbb9585d29 RDX: 0000000000000001 RSI: 00000000200000c0 RDI: 0000000000000003 RBP: 00007fdbb9601aa8 R08: 0000000000000000 R09: 0000000000000000 R10: 0000000000000000 R11: 0000000000000246 R12: 0000000000000000 R13: 0000000000000000 R14: 00007fdbb9775fa0 R15: 00007ffcbf23e2c8 syz.5.327 (8509): drop_caches: 2