====================================================== WARNING: possible circular locking dependency detected 6.7.0-rc8-syzkaller #0 Not tainted ------------------------------------------------------ syz-executor.2/19999 is trying to acquire lock: ffff88801d5788f8 (&sbi->alloc_mutex){+.+.}-{3:3}, at: hfsplus_block_allocate+0x9e/0x8b0 fs/hfsplus/bitmap.c:35 but task is already holding lock: ffff888081c36648 (&HFSPLUS_I(inode)->extents_lock){+.+.}-{3:3}, at: hfsplus_file_extend+0x21b/0x1b70 fs/hfsplus/extents.c:457 which lock already depends on the new lock. the existing dependency chain (in reverse order) is: -> #1 (&HFSPLUS_I(inode)->extents_lock){+.+.}-{3:3}: lock_acquire+0x1e3/0x530 kernel/locking/lockdep.c:5754 __mutex_lock_common kernel/locking/mutex.c:603 [inline] __mutex_lock+0x136/0xd60 kernel/locking/mutex.c:747 hfsplus_get_block+0x383/0x14e0 fs/hfsplus/extents.c:260 block_read_full_folio+0x474/0xea0 fs/buffer.c:2399 filemap_read_folio+0x19c/0x780 mm/filemap.c:2323 do_read_cache_folio+0x134/0x810 mm/filemap.c:3700 do_read_cache_page+0x30/0x200 mm/filemap.c:3766 read_mapping_page include/linux/pagemap.h:871 [inline] hfsplus_block_allocate+0xee/0x8b0 fs/hfsplus/bitmap.c:37 hfsplus_file_extend+0xade/0x1b70 fs/hfsplus/extents.c:468 hfsplus_get_block+0x406/0x14e0 fs/hfsplus/extents.c:245 __block_write_begin_int+0x54d/0x1ad0 fs/buffer.c:2119 __block_write_begin fs/buffer.c:2168 [inline] block_write_begin+0x9b/0x1e0 fs/buffer.c:2227 cont_write_begin+0x643/0x880 fs/buffer.c:2582 hfsplus_write_begin+0x8a/0xd0 fs/hfsplus/inode.c:52 generic_perform_write+0x31b/0x630 mm/filemap.c:3927 generic_file_write_iter+0xaf/0x310 mm/filemap.c:4048 call_write_iter include/linux/fs.h:2020 [inline] new_sync_write fs/read_write.c:491 [inline] vfs_write+0x792/0xb20 fs/read_write.c:584 ksys_write+0x1a0/0x2c0 fs/read_write.c:637 do_syscall_x64 arch/x86/entry/common.c:52 [inline] do_syscall_64+0x45/0x110 arch/x86/entry/common.c:83 entry_SYSCALL_64_after_hwframe+0x63/0x6b -> #0 (&sbi->alloc_mutex){+.+.}-{3:3}: check_prev_add kernel/locking/lockdep.c:3134 [inline] check_prevs_add kernel/locking/lockdep.c:3253 [inline] validate_chain+0x1909/0x5ab0 kernel/locking/lockdep.c:3869 __lock_acquire+0x1345/0x1fd0 kernel/locking/lockdep.c:5137 lock_acquire+0x1e3/0x530 kernel/locking/lockdep.c:5754 __mutex_lock_common kernel/locking/mutex.c:603 [inline] __mutex_lock+0x136/0xd60 kernel/locking/mutex.c:747 hfsplus_block_allocate+0x9e/0x8b0 fs/hfsplus/bitmap.c:35 hfsplus_file_extend+0xade/0x1b70 fs/hfsplus/extents.c:468 hfsplus_bmap_reserve+0x105/0x4e0 fs/hfsplus/btree.c:358 hfsplus_rename_cat+0x1d0/0x1050 fs/hfsplus/catalog.c:456 hfsplus_link+0x3ab/0x800 fs/hfsplus/dir.c:323 vfs_link+0x4ed/0x680 fs/namei.c:4588 do_linkat+0x356/0x750 fs/namei.c:4659 __do_sys_link fs/namei.c:4693 [inline] __se_sys_link fs/namei.c:4691 [inline] __x64_sys_link+0x86/0x90 fs/namei.c:4691 do_syscall_x64 arch/x86/entry/common.c:52 [inline] do_syscall_64+0x45/0x110 arch/x86/entry/common.c:83 entry_SYSCALL_64_after_hwframe+0x63/0x6b other info that might help us debug this: Possible unsafe locking scenario: CPU0 CPU1 ---- ---- lock(&HFSPLUS_I(inode)->extents_lock); lock(&sbi->alloc_mutex); lock(&HFSPLUS_I(inode)->extents_lock); lock(&sbi->alloc_mutex); *** DEADLOCK *** 6 locks held by syz-executor.2/19999: #0: ffff888079de8418 (sb_writers#32){.+.+}-{0:0}, at: mnt_want_write+0x3f/0x90 fs/namespace.c:404 #1: ffff888081c31080 (&type->i_mutex_dir_key#22/1){+.+.}-{3:3}, at: inode_lock_nested include/linux/fs.h:837 [inline] #1: ffff888081c31080 (&type->i_mutex_dir_key#22/1){+.+.}-{3:3}, at: filename_create+0x260/0x530 fs/namei.c:3875 #2: ffff888081c31740 (&sb->s_type->i_mutex_key#40){+.+.}-{3:3}, at: inode_lock include/linux/fs.h:802 [inline] #2: ffff888081c31740 (&sb->s_type->i_mutex_key#40){+.+.}-{3:3}, at: vfs_link+0x3af/0x680 fs/namei.c:4579 #3: ffff88801d578998 (&sbi->vh_mutex){+.+.}-{3:3}, at: hfsplus_link+0x237/0x800 fs/hfsplus/dir.c:316 #4: ffff88807c6f60b0 (&tree->tree_lock#2){+.+.}-{3:3}, at: hfsplus_find_init+0x14a/0x1c0 #5: ffff888081c36648 (&HFSPLUS_I(inode)->extents_lock){+.+.}-{3:3}, at: hfsplus_file_extend+0x21b/0x1b70 fs/hfsplus/extents.c:457 stack backtrace: CPU: 0 PID: 19999 Comm: syz-executor.2 Not tainted 6.7.0-rc8-syzkaller #0 Hardware name: Google Google Compute Engine/Google Compute Engine, BIOS Google 11/17/2023 Call Trace: __dump_stack lib/dump_stack.c:88 [inline] dump_stack_lvl+0x1e7/0x2d0 lib/dump_stack.c:106 check_noncircular+0x366/0x490 kernel/locking/lockdep.c:2187 check_prev_add kernel/locking/lockdep.c:3134 [inline] check_prevs_add kernel/locking/lockdep.c:3253 [inline] validate_chain+0x1909/0x5ab0 kernel/locking/lockdep.c:3869 __lock_acquire+0x1345/0x1fd0 kernel/locking/lockdep.c:5137 lock_acquire+0x1e3/0x530 kernel/locking/lockdep.c:5754 __mutex_lock_common kernel/locking/mutex.c:603 [inline] __mutex_lock+0x136/0xd60 kernel/locking/mutex.c:747 hfsplus_block_allocate+0x9e/0x8b0 fs/hfsplus/bitmap.c:35 hfsplus_file_extend+0xade/0x1b70 fs/hfsplus/extents.c:468 hfsplus_bmap_reserve+0x105/0x4e0 fs/hfsplus/btree.c:358 hfsplus_rename_cat+0x1d0/0x1050 fs/hfsplus/catalog.c:456 hfsplus_link+0x3ab/0x800 fs/hfsplus/dir.c:323 vfs_link+0x4ed/0x680 fs/namei.c:4588 do_linkat+0x356/0x750 fs/namei.c:4659 __do_sys_link fs/namei.c:4693 [inline] __se_sys_link fs/namei.c:4691 [inline] __x64_sys_link+0x86/0x90 fs/namei.c:4691 do_syscall_x64 arch/x86/entry/common.c:52 [inline] do_syscall_64+0x45/0x110 arch/x86/entry/common.c:83 entry_SYSCALL_64_after_hwframe+0x63/0x6b RIP: 0033:0x7fb28667cce9 Code: 28 00 00 00 75 05 48 83 c4 28 c3 e8 e1 20 00 00 90 48 89 f8 48 89 f7 48 89 d6 48 89 ca 4d 89 c2 4d 89 c8 4c 8b 4c 24 08 0f 05 <48> 3d 01 f0 ff ff 73 01 c3 48 c7 c1 b0 ff ff ff f7 d8 64 89 01 48 RSP: 002b:00007fb2874730c8 EFLAGS: 00000246 ORIG_RAX: 0000000000000056 RAX: ffffffffffffffda RBX: 00007fb28679c120 RCX: 00007fb28667cce9 RDX: 0000000000000000 RSI: 0000000020000280 RDI: 00000000200000c0 RBP: 00007fb2866c947a R08: 0000000000000000 R09: 0000000000000000 R10: 0000000000000000 R11: 0000000000000246 R12: 0000000000000000 R13: 000000000000000b R14: 00007fb28679c120 R15: 00007ffc648f4598