./strace-static-x86_64 -e \!wait4,clock_nanosleep,nanosleep -s 100 -x -f ./syz-executor1695048323 <...> Warning: Permanently added '10.128.1.80' (ECDSA) to the list of known hosts. execve("./syz-executor1695048323", ["./syz-executor1695048323"], 0x7ffc2c277130 /* 10 vars */) = 0 brk(NULL) = 0x55555749d000 brk(0x55555749dc40) = 0x55555749dc40 arch_prctl(ARCH_SET_FS, 0x55555749d300) = 0 uname({sysname="Linux", nodename="syzkaller", ...}) = 0 readlink("/proc/self/exe", "/root/syz-executor1695048323", 4096) = 28 brk(0x5555574bec40) = 0x5555574bec40 brk(0x5555574bf000) = 0x5555574bf000 mprotect(0x7ff52d183000, 16384, PROT_READ) = 0 mmap(0x1ffff000, 4096, PROT_NONE, MAP_PRIVATE|MAP_FIXED|MAP_ANONYMOUS, -1, 0) = 0x1ffff000 mmap(0x20000000, 16777216, PROT_READ|PROT_WRITE|PROT_EXEC, MAP_PRIVATE|MAP_FIXED|MAP_ANONYMOUS, -1, 0) = 0x20000000 mmap(0x21000000, 4096, PROT_NONE, MAP_PRIVATE|MAP_FIXED|MAP_ANONYMOUS, -1, 0) = 0x21000000 unshare(CLONE_NEWPID) = 0 clone(child_stack=NULL, flags=CLONE_CHILD_CLEARTID|CLONE_CHILD_SETTID|SIGCHLD./strace-static-x86_64: Process 5072 attached , child_tidptr=0x55555749d5d0) = 5072 [pid 5072] mount(NULL, "/sys/fs/fuse/connections", "fusectl", 0, NULL) = -1 EBUSY (Device or resource busy) [pid 5072] prctl(PR_SET_PDEATHSIG, SIGKILL) = 0 [pid 5072] setsid() = 1 [pid 5072] prlimit64(0, RLIMIT_AS, {rlim_cur=204800*1024, rlim_max=204800*1024}, NULL) = 0 [pid 5072] prlimit64(0, RLIMIT_MEMLOCK, {rlim_cur=32768*1024, rlim_max=32768*1024}, NULL) = 0 [pid 5072] prlimit64(0, RLIMIT_FSIZE, {rlim_cur=139264*1024, rlim_max=139264*1024}, NULL) = 0 [pid 5072] prlimit64(0, RLIMIT_STACK, {rlim_cur=1024*1024, rlim_max=1024*1024}, NULL) = 0 [pid 5072] prlimit64(0, RLIMIT_CORE, {rlim_cur=131072*1024, rlim_max=131072*1024}, NULL) = 0 [pid 5072] prlimit64(0, RLIMIT_NOFILE, {rlim_cur=256, rlim_max=256}, NULL) = 0 [pid 5072] unshare(CLONE_NEWNS) = 0 [pid 5072] mount(NULL, "/", NULL, MS_REC|MS_PRIVATE, NULL) = 0 [pid 5072] unshare(CLONE_NEWIPC) = 0 [pid 5072] unshare(CLONE_NEWCGROUP) = 0 [pid 5072] unshare(CLONE_NEWUTS) = 0 [pid 5072] unshare(CLONE_SYSVSEM) = 0 [pid 5072] openat(AT_FDCWD, "/proc/sys/kernel/shmmax", O_WRONLY|O_CLOEXEC) = 3 [pid 5072] write(3, "16777216", 8) = 8 [pid 5072] close(3) = 0 [pid 5072] openat(AT_FDCWD, "/proc/sys/kernel/shmall", O_WRONLY|O_CLOEXEC) = 3 [pid 5072] write(3, "536870912", 9) = 9 [pid 5072] close(3) = 0 [pid 5072] openat(AT_FDCWD, "/proc/sys/kernel/shmmni", O_WRONLY|O_CLOEXEC) = 3 [pid 5072] write(3, "1024", 4) = 4 [pid 5072] close(3) = 0 [pid 5072] openat(AT_FDCWD, "/proc/sys/kernel/msgmax", O_WRONLY|O_CLOEXEC) = 3 [pid 5072] write(3, "8192", 4) = 4 [pid 5072] close(3) = 0 [pid 5072] openat(AT_FDCWD, "/proc/sys/kernel/msgmni", O_WRONLY|O_CLOEXEC) = 3 [pid 5072] write(3, "1024", 4) = 4 [pid 5072] close(3) = 0 [pid 5072] openat(AT_FDCWD, "/proc/sys/kernel/msgmnb", O_WRONLY|O_CLOEXEC) = 3 [pid 5072] write(3, "1024", 4) = 4 [pid 5072] close(3) = 0 [pid 5072] openat(AT_FDCWD, "/proc/sys/kernel/sem", O_WRONLY|O_CLOEXEC) = 3 [pid 5072] write(3, "1024 1048576 500 1024", 21) = 21 [pid 5072] close(3) = 0 [pid 5072] getpid() = 1 [pid 5072] capget({version=_LINUX_CAPABILITY_VERSION_3, pid=1}, {effective=1<tree_lock/1){+.+.}-{3:3}, at: hfsplus_find_init+0x14a/0x1c0 [ 59.543622][ T9] [ 59.543622][ T9] but task is already holding lock: [ 59.550975][ T9] ffff88801ca900b0 (&tree->tree_lock/1){+.+.}-{3:3}, at: hfsplus_find_init+0x14a/0x1c0 [ 59.560629][ T9] [ 59.560629][ T9] other info that might help us debug this: [ 59.568683][ T9] Possible unsafe locking scenario: [ 59.568683][ T9] [ 59.576123][ T9] CPU0 [ 59.579396][ T9] ---- [ 59.582700][ T9] lock(&tree->tree_lock/1); [ 59.587379][ T9] lock(&tree->tree_lock/1); [ 59.592057][ T9] [ 59.592057][ T9] *** DEADLOCK *** [ 59.592057][ T9] [ 59.600194][ T9] May be due to missing lock nesting notation [ 59.600194][ T9] [ 59.608511][ T9] 5 locks held by kworker/u4:0/9: [ 59.613537][ T9] #0: ffff888017513138 ((wq_completion)writeback){+.+.}-{0:0}, at: process_one_work+0x77e/0x10e0 [ 59.624253][ T9] #1: ffffc900000e7d20 ((work_completion)(&(&wb->dwork)->work)){+.+.}-{0:0}, at: process_one_work+0x7c8/0x10e0 [ 59.636095][ T9] #2: ffff8880774fa988 (&hip->extents_lock){+.+.}-{3:3}, at: hfsplus_ext_write_extent+0x8e/0x1f0 [ 59.646734][ T9] #3: ffff88801ca900b0 (&tree->tree_lock/1){+.+.}-{3:3}, at: hfsplus_find_init+0x14a/0x1c0 [ 59.656837][ T9] #4: ffff8880774f8108 (&HFSPLUS_I(inode)->extents_lock){+.+.}-{3:3}, at: hfsplus_file_extend+0x1d6/0x1b10 [ 59.668416][ T9] [ 59.668416][ T9] stack backtrace: [ 59.674298][ T9] CPU: 0 PID: 9 Comm: kworker/u4:0 Not tainted 6.3.0-rc4-syzkaller-00199-g7b50567bdcad #0 [ 59.684183][ T9] Hardware name: Google Google Compute Engine/Google Compute Engine, BIOS Google 03/02/2023 [ 59.694255][ T9] Workqueue: writeback wb_workfn (flush-7:0) [ 59.700259][ T9] Call Trace: [ 59.703548][ T9] [ 59.706477][ T9] dump_stack_lvl+0x1e7/0x2d0 [ 59.711168][ T9] ? nf_tcp_handle_invalid+0x650/0x650 [ 59.716630][ T9] ? panic+0x770/0x770 [ 59.720698][ T9] validate_chain+0x472a/0x58e0 [ 59.725553][ T9] ? reacquire_held_locks+0x660/0x660 [ 59.730927][ T9] ? mark_lock+0x9a/0x340 [ 59.735259][ T9] ? lockdep_hardirqs_on_prepare+0x43c/0x7a0 [ 59.741250][ T9] ? mark_lock+0x9a/0x340 [ 59.745600][ T9] __lock_acquire+0x125b/0x1f80 [ 59.750456][ T9] lock_acquire+0x1e1/0x520 [ 59.754958][ T9] ? hfsplus_find_init+0x14a/0x1c0 [ 59.760089][ T9] ? read_lock_is_recursive+0x20/0x20 [ 59.765469][ T9] ? hfsplus_bmap_reserve+0x105/0x4e0 [ 59.770851][ T9] ? __hfsplus_ext_write_extent+0x2a4/0x5b0 [ 59.776743][ T9] ? __might_sleep+0xc0/0xc0 [ 59.781325][ T9] ? wb_writeback+0x458/0xc70 [ 59.785997][ T9] ? wb_workfn+0x400/0xff0 [ 59.790411][ T9] ? process_one_work+0x8a0/0x10e0 [ 59.795522][ T9] ? worker_thread+0xa63/0x1210 [ 59.800457][ T9] ? kthread+0x270/0x300 [ 59.804700][ T9] ? ret_from_fork+0x1f/0x30 [ 59.809303][ T9] __mutex_lock_common+0x1d8/0x2530 [ 59.814507][ T9] ? hfsplus_find_init+0x14a/0x1c0 [ 59.819635][ T9] ? hfsplus_find_init+0x14a/0x1c0 [ 59.824755][ T9] ? mutex_lock_io_nested+0x60/0x60 [ 59.829964][ T9] ? hfsplus_find_init+0x85/0x1c0 [ 59.834998][ T9] ? rcu_is_watching+0x15/0xb0 [ 59.839763][ T9] ? hfsplus_find_init+0x85/0x1c0 [ 59.844796][ T9] ? __kmalloc+0xe6/0x230 [ 59.849131][ T9] mutex_lock_nested+0x1b/0x20 [ 59.853902][ T9] hfsplus_find_init+0x14a/0x1c0 [ 59.858845][ T9] hfsplus_file_extend+0x40e/0x1b10 [ 59.864054][ T9] ? hfsplus_get_block+0x14e0/0x14e0 [ 59.869366][ T9] ? rcu_is_watching+0x15/0xb0 [ 59.874134][ T9] ? trace_contention_end+0x3c/0xf0 [ 59.879340][ T9] ? __mutex_lock_common+0x42d/0x2530 [ 59.884723][ T9] ? hfsplus_brec_find+0x19d/0x570 [ 59.889836][ T9] hfsplus_bmap_reserve+0x105/0x4e0 [ 59.895052][ T9] __hfsplus_ext_write_extent+0x2a4/0x5b0 [ 59.900777][ T9] hfsplus_ext_write_extent+0x16a/0x1f0 [ 59.906321][ T9] ? hfsplus_ext_cmp_key+0x2f0/0x2f0 [ 59.911607][ T9] ? do_raw_spin_lock+0x14d/0x3a0 [ 59.916634][ T9] hfsplus_write_inode+0x22/0x5e0 [ 59.921660][ T9] __writeback_single_inode+0x69b/0xfb0 [ 59.927208][ T9] writeback_sb_inodes+0x8ef/0x11d0 [ 59.932422][ T9] ? queue_io+0x570/0x570 [ 59.936755][ T9] ? __writeback_inodes_wb+0x260/0x260 [ 59.942222][ T9] ? queue_io+0x3d5/0x570 [ 59.946555][ T9] wb_writeback+0x458/0xc70 [ 59.951063][ T9] ? rcu_lock_release+0x30/0x30 [ 59.955922][ T9] ? lockdep_hardirqs_on_prepare+0x43c/0x7a0 [ 59.961907][ T9] wb_workfn+0x400/0xff0 [ 59.966165][ T9] ? inode_wait_for_writeback+0x290/0x290 [ 59.971894][ T9] ? read_lock_is_recursive+0x20/0x20 [ 59.977256][ T9] ? lockdep_hardirqs_on_prepare+0x43c/0x7a0 [ 59.983245][ T9] ? print_irqtrace_events+0x220/0x220 [ 59.988701][ T9] ? _raw_spin_unlock_irqrestore+0xdd/0x140 [ 59.994607][ T9] process_one_work+0x8a0/0x10e0 [ 59.999554][ T9] ? worker_detach_from_pool+0x290/0x290 [ 60.005193][ T9] ? _raw_spin_lock_irqsave+0x120/0x120 [ 60.010751][ T9] ? kthread_data+0x52/0xc0 [ 60.015271][ T9] ? wq_worker_running+0x9b/0x1a0 [ 60.020289][ T9] worker_thread+0xa63/0x1210 [ 60.024990][ T9] kthread+0x270/0x300 [ 60.029062][ T9] ? pr_cont_work+0x5e0/0x5e0 [ 60.033761][ T9] ? kthread_blkcg+0xd0/0xd0 [ 60.038348][ T9] ret_from_fork+0x1f/0x30 [ 60.042792][ T9]