syzbot


possible deadlock in jffs2_read_folio

Status: auto-obsoleted due to no activity on 2024/07/23 00:31
Subsystems: jffs2
[Documentation on labels]
Reported-by: syzbot+7cab786a93f00e566e5b@syzkaller.appspotmail.com
First crash: 225d, last: 224d
Discussions (1)
Title Replies (including bot) Last reply
[syzbot] [jffs2?] possible deadlock in jffs2_read_folio 0 (1) 2024/04/17 08:55

Sample crash report:
jffs2: notice: (9998) jffs2_build_xattr_subsystem: complete building xattr subsystem, 1 of xdatum (0 unchecked, 0 orphan) and 2 of xref (0 dead, 0 orphan) found.
======================================================
WARNING: possible circular locking dependency detected
6.9.0-rc3-syzkaller-gb5d2afe8745b #0 Not tainted
------------------------------------------------------
syz-executor.4/9998 is trying to acquire lock:
ffff0000efee8c18 (&f->sem){+.+.}-{3:3}, at: jffs2_read_folio+0x6c/0xd0 fs/jffs2/file.c:125

but task is already holding lock:
ffff0000efee8f68 (mapping.invalidate_lock#11){.+.+}-{3:3}, at: filemap_invalidate_lock_shared include/linux/fs.h:850 [inline]
ffff0000efee8f68 (mapping.invalidate_lock#11){.+.+}-{3:3}, at: filemap_fault+0x6b4/0x1004 mm/filemap.c:3296

which lock already depends on the new lock.


the existing dependency chain (in reverse order) is:

-> #2 (mapping.invalidate_lock#11){.+.+}-{3:3}:
       down_read+0x58/0x2fc kernel/locking/rwsem.c:1526
       filemap_invalidate_lock_shared include/linux/fs.h:850 [inline]
       filemap_fault+0x6b4/0x1004 mm/filemap.c:3296
       __do_fault+0x11c/0x374 mm/memory.c:4531
       do_read_fault mm/memory.c:4894 [inline]
       do_fault mm/memory.c:5024 [inline]
       do_pte_missing mm/memory.c:3880 [inline]
       handle_pte_fault mm/memory.c:5300 [inline]
       __handle_mm_fault+0x36d0/0x5920 mm/memory.c:5441
       handle_mm_fault+0x1e8/0x63c mm/memory.c:5606
       __do_page_fault arch/arm64/mm/fault.c:505 [inline]
       do_page_fault+0x550/0xaec arch/arm64/mm/fault.c:620
       do_translation_fault+0xc4/0x114 arch/arm64/mm/fault.c:704
       do_mem_abort+0x74/0x200 arch/arm64/mm/fault.c:840
       el1_abort+0x3c/0x5c arch/arm64/kernel/entry-common.c:432
       el1h_64_sync_handler+0x60/0xac arch/arm64/kernel/entry-common.c:492
       el1h_64_sync+0x64/0x68 arch/arm64/kernel/entry.S:593
       __arch_copy_from_user+0xb8/0x230 arch/arm64/lib/copy_template.S:110
       vfs_ioctl fs/ioctl.c:51 [inline]
       __do_sys_ioctl fs/ioctl.c:904 [inline]
       __se_sys_ioctl fs/ioctl.c:890 [inline]
       __arm64_sys_ioctl+0x14c/0x1c8 fs/ioctl.c:890
       __invoke_syscall arch/arm64/kernel/syscall.c:34 [inline]
       invoke_syscall+0x98/0x2b8 arch/arm64/kernel/syscall.c:48
       el0_svc_common+0x130/0x23c arch/arm64/kernel/syscall.c:133
       do_el0_svc+0x48/0x58 arch/arm64/kernel/syscall.c:152
       el0_svc+0x54/0x168 arch/arm64/kernel/entry-common.c:712
       el0t_64_sync_handler+0x84/0xfc arch/arm64/kernel/entry-common.c:730
       el0t_64_sync+0x190/0x194 arch/arm64/kernel/entry.S:598

-> #1 (&mm->mmap_lock){++++}-{3:3}:
       __might_fault+0xc4/0x124 mm/memory.c:6220
       filldir64+0x2d4/0x948 fs/readdir.c:375
       dir_emit include/linux/fs.h:3570 [inline]
       jffs2_readdir+0x314/0x42c fs/jffs2/dir.c:152
       iterate_dir+0x3f8/0x580 fs/readdir.c:110
       __do_sys_getdents64 fs/readdir.c:409 [inline]
       __se_sys_getdents64 fs/readdir.c:394 [inline]
       __arm64_sys_getdents64+0x1c4/0x4a0 fs/readdir.c:394
       __invoke_syscall arch/arm64/kernel/syscall.c:34 [inline]
       invoke_syscall+0x98/0x2b8 arch/arm64/kernel/syscall.c:48
       el0_svc_common+0x130/0x23c arch/arm64/kernel/syscall.c:133
       do_el0_svc+0x48/0x58 arch/arm64/kernel/syscall.c:152
       el0_svc+0x54/0x168 arch/arm64/kernel/entry-common.c:712
       el0t_64_sync_handler+0x84/0xfc arch/arm64/kernel/entry-common.c:730
       el0t_64_sync+0x190/0x194 arch/arm64/kernel/entry.S:598

-> #0 (&f->sem){+.+.}-{3:3}:
       check_prev_add kernel/locking/lockdep.c:3134 [inline]
       check_prevs_add kernel/locking/lockdep.c:3253 [inline]
       validate_chain kernel/locking/lockdep.c:3869 [inline]
       __lock_acquire+0x3384/0x763c kernel/locking/lockdep.c:5137
       lock_acquire+0x248/0x73c kernel/locking/lockdep.c:5754
       __mutex_lock_common+0x190/0x21a0 kernel/locking/mutex.c:608
       __mutex_lock kernel/locking/mutex.c:752 [inline]
       mutex_lock_nested+0x2c/0x38 kernel/locking/mutex.c:804
       jffs2_read_folio+0x6c/0xd0 fs/jffs2/file.c:125
       filemap_read_folio+0x14c/0x39c mm/filemap.c:2331
       filemap_fault+0xab8/0x1004 mm/filemap.c:3381
       __do_fault+0x11c/0x374 mm/memory.c:4531
       do_read_fault mm/memory.c:4894 [inline]
       do_fault mm/memory.c:5024 [inline]
       do_pte_missing mm/memory.c:3880 [inline]
       handle_pte_fault mm/memory.c:5300 [inline]
       __handle_mm_fault+0x36d0/0x5920 mm/memory.c:5441
       handle_mm_fault+0x1e8/0x63c mm/memory.c:5606
       __do_page_fault arch/arm64/mm/fault.c:505 [inline]
       do_page_fault+0x550/0xaec arch/arm64/mm/fault.c:620
       do_translation_fault+0xc4/0x114 arch/arm64/mm/fault.c:704
       do_mem_abort+0x74/0x200 arch/arm64/mm/fault.c:840
       el1_abort+0x3c/0x5c arch/arm64/kernel/entry-common.c:432
       el1h_64_sync_handler+0x60/0xac arch/arm64/kernel/entry-common.c:492
       el1h_64_sync+0x64/0x68 arch/arm64/kernel/entry.S:593
       __arch_copy_from_user+0xb8/0x230 arch/arm64/lib/copy_template.S:110
       vfs_ioctl fs/ioctl.c:51 [inline]
       __do_sys_ioctl fs/ioctl.c:904 [inline]
       __se_sys_ioctl fs/ioctl.c:890 [inline]
       __arm64_sys_ioctl+0x14c/0x1c8 fs/ioctl.c:890
       __invoke_syscall arch/arm64/kernel/syscall.c:34 [inline]
       invoke_syscall+0x98/0x2b8 arch/arm64/kernel/syscall.c:48
       el0_svc_common+0x130/0x23c arch/arm64/kernel/syscall.c:133
       do_el0_svc+0x48/0x58 arch/arm64/kernel/syscall.c:152
       el0_svc+0x54/0x168 arch/arm64/kernel/entry-common.c:712
       el0t_64_sync_handler+0x84/0xfc arch/arm64/kernel/entry-common.c:730
       el0t_64_sync+0x190/0x194 arch/arm64/kernel/entry.S:598

other info that might help us debug this:

Chain exists of:
  &f->sem --> &mm->mmap_lock --> mapping.invalidate_lock#11

 Possible unsafe locking scenario:

       CPU0                    CPU1
       ----                    ----
  rlock(mapping.invalidate_lock#11);
                               lock(&mm->mmap_lock);
                               lock(mapping.invalidate_lock#11);
  lock(&f->sem);

 *** DEADLOCK ***

2 locks held by syz-executor.4/9998:
 #0: ffff0000cd860190 (&dev->mutex){....}-{3:3}, at: device_lock include/linux/device.h:990 [inline]
 #0: ffff0000cd860190 (&dev->mutex){....}-{3:3}, at: usbdev_do_ioctl drivers/usb/core/devio.c:2608 [inline]
 #0: ffff0000cd860190 (&dev->mutex){....}-{3:3}, at: usbdev_ioctl+0x24c/0x66e0 drivers/usb/core/devio.c:2824
 #1: ffff0000efee8f68 (mapping.invalidate_lock#11){.+.+}-{3:3}, at: filemap_invalidate_lock_shared include/linux/fs.h:850 [inline]
 #1: ffff0000efee8f68 (mapping.invalidate_lock#11){.+.+}-{3:3}, at: filemap_fault+0x6b4/0x1004 mm/filemap.c:3296

stack backtrace:
CPU: 1 PID: 9998 Comm: syz-executor.4 Not tainted 6.9.0-rc3-syzkaller-gb5d2afe8745b #0
Hardware name: Google Google Compute Engine/Google Compute Engine, BIOS Google 03/27/2024
Call trace:
 dump_backtrace+0x1b8/0x1e4 arch/arm64/kernel/stacktrace.c:317
 show_stack+0x2c/0x3c arch/arm64/kernel/stacktrace.c:324
 __dump_stack lib/dump_stack.c:88 [inline]
 dump_stack_lvl+0xe4/0x150 lib/dump_stack.c:114
 dump_stack+0x1c/0x28 lib/dump_stack.c:123
 print_circular_bug+0x150/0x1b8 kernel/locking/lockdep.c:2060
 check_noncircular+0x310/0x404 kernel/locking/lockdep.c:2187
 check_prev_add kernel/locking/lockdep.c:3134 [inline]
 check_prevs_add kernel/locking/lockdep.c:3253 [inline]
 validate_chain kernel/locking/lockdep.c:3869 [inline]
 __lock_acquire+0x3384/0x763c kernel/locking/lockdep.c:5137
 lock_acquire+0x248/0x73c kernel/locking/lockdep.c:5754
 __mutex_lock_common+0x190/0x21a0 kernel/locking/mutex.c:608
 __mutex_lock kernel/locking/mutex.c:752 [inline]
 mutex_lock_nested+0x2c/0x38 kernel/locking/mutex.c:804
 jffs2_read_folio+0x6c/0xd0 fs/jffs2/file.c:125
 filemap_read_folio+0x14c/0x39c mm/filemap.c:2331
 filemap_fault+0xab8/0x1004 mm/filemap.c:3381
 __do_fault+0x11c/0x374 mm/memory.c:4531
 do_read_fault mm/memory.c:4894 [inline]
 do_fault mm/memory.c:5024 [inline]
 do_pte_missing mm/memory.c:3880 [inline]
 handle_pte_fault mm/memory.c:5300 [inline]
 __handle_mm_fault+0x36d0/0x5920 mm/memory.c:5441
 handle_mm_fault+0x1e8/0x63c mm/memory.c:5606
 __do_page_fault arch/arm64/mm/fault.c:505 [inline]
 do_page_fault+0x550/0xaec arch/arm64/mm/fault.c:620
 do_translation_fault+0xc4/0x114 arch/arm64/mm/fault.c:704
 do_mem_abort+0x74/0x200 arch/arm64/mm/fault.c:840
 el1_abort+0x3c/0x5c arch/arm64/kernel/entry-common.c:432
 el1h_64_sync_handler+0x60/0xac arch/arm64/kernel/entry-common.c:492
 el1h_64_sync+0x64/0x68 arch/arm64/kernel/entry.S:593
 __arch_copy_from_user+0xb8/0x230 arch/arm64/lib/copy_template.S:110
 vfs_ioctl fs/ioctl.c:51 [inline]
 __do_sys_ioctl fs/ioctl.c:904 [inline]
 __se_sys_ioctl fs/ioctl.c:890 [inline]
 __arm64_sys_ioctl+0x14c/0x1c8 fs/ioctl.c:890
 __invoke_syscall arch/arm64/kernel/syscall.c:34 [inline]
 invoke_syscall+0x98/0x2b8 arch/arm64/kernel/syscall.c:48
 el0_svc_common+0x130/0x23c arch/arm64/kernel/syscall.c:133
 do_el0_svc+0x48/0x58 arch/arm64/kernel/syscall.c:152
 el0_svc+0x54/0x168 arch/arm64/kernel/entry-common.c:712
 el0t_64_sync_handler+0x84/0xfc arch/arm64/kernel/entry-common.c:730
 el0t_64_sync+0x190/0x194 arch/arm64/kernel/entry.S:598

Crashes (2):
Time Kernel Commit Syzkaller Config Log Report Syz repro C repro VM info Assets (help?) Manager Title
2024/04/14 00:30 git://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux.git for-kernelci b5d2afe8745b c8349e48 .config console log report info [disk image] [vmlinux] [kernel image] ci-upstream-gce-arm64 possible deadlock in jffs2_read_folio
2024/04/13 08:45 git://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux.git for-kernelci b5d2afe8745b c8349e48 .config console log report info [disk image] [vmlinux] [kernel image] ci-upstream-gce-arm64 possible deadlock in jffs2_read_folio
* Struck through repros no longer work on HEAD.