./strace-static-x86_64 -e \!wait4,clock_nanosleep,nanosleep -s 100 -x -f ./syz-executor1654595074 <...> Warning: Permanently added '10.128.0.71' (ED25519) to the list of known hosts. execve("./syz-executor1654595074", ["./syz-executor1654595074"], 0x7fff502df700 /* 10 vars */) = 0 brk(NULL) = 0x5555678ca000 brk(0x5555678cad00) = 0x5555678cad00 arch_prctl(ARCH_SET_FS, 0x5555678ca380) = 0 set_tid_address(0x5555678ca650) = 5832 set_robust_list(0x5555678ca660, 24) = 0 rseq(0x5555678caca0, 0x20, 0, 0x53053053) = 0 prlimit64(0, RLIMIT_STACK, NULL, {rlim_cur=8192*1024, rlim_max=RLIM64_INFINITY}) = 0 readlink("/proc/self/exe", "/root/syz-executor1654595074", 4096) = 28 getrandom("\x49\x01\x9d\x33\xfe\x09\xcf\x41", 8, GRND_NONBLOCK) = 8 brk(NULL) = 0x5555678cad00 brk(0x5555678ebd00) = 0x5555678ebd00 brk(0x5555678ec000) = 0x5555678ec000 mprotect(0x7fd815417000, 16384, PROT_READ) = 0 mmap(0x1ffffffff000, 4096, PROT_NONE, MAP_PRIVATE|MAP_FIXED|MAP_ANONYMOUS, -1, 0) = 0x1ffffffff000 mmap(0x200000000000, 16777216, PROT_READ|PROT_WRITE|PROT_EXEC, MAP_PRIVATE|MAP_FIXED|MAP_ANONYMOUS, -1, 0) = 0x200000000000 mmap(0x200001000000, 4096, PROT_NONE, MAP_PRIVATE|MAP_FIXED|MAP_ANONYMOUS, -1, 0) = 0x200001000000 unshare(CLONE_NEWPID) = 0 clone(child_stack=NULL, flags=CLONE_CHILD_CLEARTID|CLONE_CHILD_SETTID|SIGCHLD, child_tidptr=0x5555678ca650) = 5833 ./strace-static-x86_64: Process 5833 attached [pid 5833] set_robust_list(0x5555678ca660, 24) = 0 [pid 5833] prctl(PR_SET_PDEATHSIG, SIGKILL) = 0 [pid 5833] getppid() = 0 [pid 5833] prlimit64(0, RLIMIT_AS, {rlim_cur=204800*1024, rlim_max=204800*1024}, NULL) = 0 [pid 5833] prlimit64(0, RLIMIT_MEMLOCK, {rlim_cur=32768*1024, rlim_max=32768*1024}, NULL) = 0 [pid 5833] prlimit64(0, RLIMIT_FSIZE, {rlim_cur=139264*1024, rlim_max=139264*1024}, NULL) = 0 [pid 5833] prlimit64(0, RLIMIT_STACK, {rlim_cur=1024*1024, rlim_max=1024*1024}, NULL) = 0 [pid 5833] prlimit64(0, RLIMIT_CORE, {rlim_cur=131072*1024, rlim_max=131072*1024}, NULL) = 0 [pid 5833] prlimit64(0, RLIMIT_NOFILE, {rlim_cur=256, rlim_max=256}, NULL) = 0 [pid 5833] unshare(CLONE_NEWNS) = 0 [pid 5833] mount(NULL, "/", NULL, MS_REC|MS_PRIVATE, NULL) = 0 [pid 5833] unshare(CLONE_NEWIPC) = 0 [pid 5833] unshare(CLONE_NEWCGROUP) = 0 [pid 5833] unshare(CLONE_NEWUTS) = 0 [pid 5833] unshare(CLONE_SYSVSEM) = 0 [pid 5833] openat(AT_FDCWD, "/proc/sys/kernel/shmmax", O_WRONLY|O_CLOEXEC) = 3 [pid 5833] write(3, "16777216", 8) = 8 [pid 5833] close(3) = 0 [pid 5833] openat(AT_FDCWD, "/proc/sys/kernel/shmall", O_WRONLY|O_CLOEXEC) = 3 [pid 5833] write(3, "536870912", 9) = 9 [pid 5833] close(3) = 0 [pid 5833] openat(AT_FDCWD, "/proc/sys/kernel/shmmni", O_WRONLY|O_CLOEXEC) = 3 [pid 5833] write(3, "1024", 4) = 4 [pid 5833] close(3) = 0 [pid 5833] openat(AT_FDCWD, "/proc/sys/kernel/msgmax", O_WRONLY|O_CLOEXEC) = 3 [pid 5833] write(3, "8192", 4) = 4 [pid 5833] close(3) = 0 [pid 5833] openat(AT_FDCWD, "/proc/sys/kernel/msgmni", O_WRONLY|O_CLOEXEC) = 3 [pid 5833] write(3, "1024", 4) = 4 [pid 5833] close(3) = 0 [pid 5833] openat(AT_FDCWD, "/proc/sys/kernel/msgmnb", O_WRONLY|O_CLOEXEC) = 3 [pid 5833] write(3, "1024", 4) = 4 [pid 5833] close(3) = 0 [pid 5833] openat(AT_FDCWD, "/proc/sys/kernel/sem", O_WRONLY|O_CLOEXEC) = 3 [pid 5833] write(3, "1024 1048576 500 1024", 21) = 21 [pid 5833] close(3) = 0 [pid 5833] getpid() = 1 [pid 5833] capget({version=_LINUX_CAPABILITY_VERSION_3, pid=1}, {effective=1<team_lock_key){+.+.}-{4:4}, at: team_device_event+0x544/0xa20 [ 99.547479][ T13] [ 99.547479][ T13] but task is already holding lock: [ 99.554850][ T13] ffff888075178d30 (&dev_instance_lock_key#19){+.+.}-{4:4}, at: __linkwatch_run_queue+0x4a0/0x7e0 [ 99.565481][ T13] [ 99.565481][ T13] which lock already depends on the new lock. [ 99.565481][ T13] [ 99.575964][ T13] [ 99.575964][ T13] the existing dependency chain (in reverse order) is: [ 99.584968][ T13] [ 99.584968][ T13] -> #1 (&dev_instance_lock_key#19){+.+.}-{4:4}: [ 99.593524][ T13] lock_acquire+0x120/0x360 [ 99.598550][ T13] __mutex_lock+0x182/0xe80 [ 99.603570][ T13] dev_set_mtu+0x10e/0x260 [ 99.608498][ T13] team_add_slave+0x8b8/0x2840 [ 99.613791][ T13] do_set_master+0x530/0x6d0 [ 99.618901][ T13] do_setlink+0xd47/0x40d0 [ 99.623838][ T13] rtnl_newlink+0x160b/0x1c70 [ 99.629072][ T13] rtnetlink_rcv_msg+0x7cc/0xb70 [ 99.634537][ T13] netlink_rcv_skb+0x219/0x490 [ 99.639828][ T13] netlink_unicast+0x758/0x8d0 [ 99.645115][ T13] netlink_sendmsg+0x805/0xb30 [ 99.650402][ T13] __sock_sendmsg+0x219/0x270 [ 99.655615][ T13] ____sys_sendmsg+0x505/0x830 [ 99.660900][ T13] ___sys_sendmsg+0x21f/0x2a0 [ 99.666099][ T13] __x64_sys_sendmsg+0x19b/0x260 [ 99.671564][ T13] do_syscall_64+0xf6/0x210 [ 99.676592][ T13] entry_SYSCALL_64_after_hwframe+0x77/0x7f [ 99.683008][ T13] [ 99.683008][ T13] -> #0 (team->team_lock_key){+.+.}-{4:4}: [ 99.691011][ T13] validate_chain+0xb9b/0x2140 [ 99.696293][ T13] __lock_acquire+0xaac/0xd20 [ 99.701503][ T13] lock_acquire+0x120/0x360 [ 99.706531][ T13] __mutex_lock+0x182/0xe80 [ 99.711559][ T13] team_device_event+0x544/0xa20 [ 99.717019][ T13] notifier_call_chain+0x1b3/0x3e0 [ 99.722650][ T13] netif_state_change+0x284/0x3a0 [ 99.728202][ T13] linkwatch_do_dev+0x117/0x170 [ 99.733580][ T13] __linkwatch_run_queue+0x56d/0x7e0 [ 99.739401][ T13] linkwatch_event+0x4c/0x60 [ 99.744516][ T13] process_scheduled_works+0xadb/0x17a0 [ 99.750591][ T13] worker_thread+0x8a0/0xda0 [ 99.755701][ T13] kthread+0x70e/0x8a0 [ 99.760295][ T13] ret_from_fork+0x4b/0x80 [ 99.765240][ T13] ret_from_fork_asm+0x1a/0x30 [ 99.770528][ T13] [ 99.770528][ T13] other info that might help us debug this: [ 99.770528][ T13] [ 99.780765][ T13] Possible unsafe locking scenario: [ 99.780765][ T13] [ 99.788225][ T13] CPU0 CPU1 [ 99.793595][ T13] ---- ---- [ 99.798957][ T13] lock(&dev_instance_lock_key#19); [ 99.804257][ T13] lock(team->team_lock_key); [ 99.811545][ T13] lock(&dev_instance_lock_key#19); [ 99.819365][ T13] lock(team->team_lock_key); [ 99.824131][ T13] [ 99.824131][ T13] *** DEADLOCK *** [ 99.824131][ T13] [ 99.832269][ T13] 4 locks held by kworker/u8:1/13: [ 99.837375][ T13] #0: ffff88801a089148 ((wq_completion)events_unbound){+.+.}-{0:0}, at: process_scheduled_works+0x9b1/0x17a0 [ 99.849054][ T13] #1: ffffc90000127c60 ((linkwatch_work).work){+.+.}-{0:0}, at: process_scheduled_works+0x9ec/0x17a0 [ 99.860042][ T13] #2: ffffffff8f2f3348 (rtnl_mutex){+.+.}-{4:4}, at: linkwatch_event+0xe/0x60 [ 99.869053][ T13] #3: ffff888075178d30 (&dev_instance_lock_key#19){+.+.}-{4:4}, at: __linkwatch_run_queue+0x4a0/0x7e0 [ 99.880214][ T13] [ 99.880214][ T13] stack backtrace: [ 99.886128][ T13] CPU: 0 UID: 0 PID: 13 Comm: kworker/u8:1 Not tainted 6.15.0-rc3-syzkaller-00580-g4acf6d4f6afc #0 PREEMPT(full) [ 99.886148][ T13] Hardware name: Google Google Compute Engine/Google Compute Engine, BIOS Google 04/19/2025 [ 99.886159][ T13] Workqueue: events_unbound linkwatch_event [ 99.886183][ T13] Call Trace: [ 99.886194][ T13] [ 99.886202][ T13] dump_stack_lvl+0x189/0x250 [ 99.886231][ T13] ? __pfx_dump_stack_lvl+0x10/0x10 [ 99.886253][ T13] ? __pfx__printk+0x10/0x10 [ 99.886269][ T13] ? print_lock_name+0xde/0x100 [ 99.886294][ T13] print_circular_bug+0x2ee/0x310 [ 99.886311][ T13] check_noncircular+0x134/0x160 [ 99.886330][ T13] validate_chain+0xb9b/0x2140 [ 99.886353][ T13] __lock_acquire+0xaac/0xd20 [ 99.886376][ T13] ? team_device_event+0x544/0xa20 [ 99.886390][ T13] lock_acquire+0x120/0x360 [ 99.886410][ T13] ? team_device_event+0x544/0xa20 [ 99.886426][ T13] ? _raw_spin_unlock_irqrestore+0x85/0x110 [ 99.886447][ T13] __mutex_lock+0x182/0xe80 [ 99.886466][ T13] ? team_device_event+0x544/0xa20 [ 99.886480][ T13] ? __asan_memset+0x22/0x50 [ 99.886495][ T13] ? call_fib_nh_notifiers+0x33a/0x4e0 [ 99.886521][ T13] ? __pfx_call_fib_nh_notifiers+0x10/0x10 [ 99.886543][ T13] ? team_device_event+0x544/0xa20 [ 99.886559][ T13] ? __pfx___mutex_lock+0x10/0x10 [ 99.886580][ T13] ? fib_sync_down_dev+0x78a/0x7b0 [ 99.886608][ T13] team_device_event+0x544/0xa20 [ 99.886624][ T13] notifier_call_chain+0x1b3/0x3e0 [ 99.886642][ T13] netif_state_change+0x284/0x3a0 [ 99.886666][ T13] ? __pfx_netif_state_change+0x10/0x10 [ 99.886687][ T13] ? dev_deactivate+0x129/0x1b0 [ 99.886701][ T13] ? nsim_get_iflink+0x20/0x280 [ 99.886724][ T13] ? rfc2863_policy+0x1c6/0x3e0 [ 99.886741][ T13] linkwatch_do_dev+0x117/0x170 [ 99.886759][ T13] __linkwatch_run_queue+0x56d/0x7e0 [ 99.886779][ T13] ? __pfx___linkwatch_run_queue+0x10/0x10 [ 99.886798][ T13] ? _raw_spin_unlock_irq+0x23/0x50 [ 99.886813][ T13] ? process_scheduled_works+0x9ec/0x17a0 [ 99.886836][ T13] ? process_scheduled_works+0x9ec/0x17a0 [ 99.886860][ T13] linkwatch_event+0x4c/0x60 [ 99.886876][ T13] process_scheduled_works+0xadb/0x17a0 [ 99.886910][ T13] ? __pfx_process_scheduled_works+0x10/0x10 [ 99.886939][ T13] worker_thread+0x8a0/0xda0 [ 99.886962][ T13] kthread+0x70e/0x8a0 [ 99.886981][ T13] ? __pfx_worker_thread+0x10/0x10 [ 99.886994][ T13] ? __pfx_kthread+0x10/0x10 [ 99.887011][ T13] ? __pfx_kthread+0x10/0x10 [pid 5833] exit_group(1) = ? [ 99.887028][ T13] ? _raw_spin_unlock_irq+0x23/0x50 [ 99.887044][ T13] ? lockdep_hardirqs_on+0x9c/0x150 [ 99.887062][ T13] ? __pfx_kthread+0x10/0x10 [ 99.887085][ T13] ret_from_fork+0x4b/0x80 [ 99.887100][ T13] ? __pfx_kthread+0x10/0x10 [ 99.887116][ T13] ret_from_fork_asm+0x1a/0x30 [ 99.887169][ T13] [pid 5833] +++ exited with 1 +++ --- SIGCHLD {si_signo=SIGCHLD, si_code=CLD_EXITED, si_pid=5833, si_uid=0, si_status=1, si_utime=0, si_stime=119 /* 1.19 s */} --- exit_group(0) = ? +++ exited with 0 +++