syzbot


possible deadlock in fq_pie_timer

Status: auto-obsoleted due to no activity on 2023/10/20 02:10
Reported-by: syzbot+bd5310d8028271c42133@syzkaller.appspotmail.com
First crash: 466d, last: 463d

Sample crash report:
=====================================================
WARNING: SOFTIRQ-safe -> SOFTIRQ-unsafe lock order detected
5.15.120-syzkaller #0 Not tainted
-----------------------------------------------------
syz-executor.2/12312 [HC0[0]:SC0[2]:HE1:SE0] is trying to acquire:
ffffffff8c9deda0 (fs_reclaim){+.+.}-{0:0}, at: might_alloc include/linux/sched/mm.h:206 [inline]
ffffffff8c9deda0 (fs_reclaim){+.+.}-{0:0}, at: slab_pre_alloc_hook+0x22/0xc0 mm/slab.h:492

and this task is already holding:
ffff88807299b908 (&sch->q.lock){+.-.}-{2:2}, at: netem_change+0x257/0x20c0 net/sched/sch_netem.c:972
which would create a new lock dependency:
 (&sch->q.lock){+.-.}-{2:2} -> (fs_reclaim){+.+.}-{0:0}

but this new dependency connects a SOFTIRQ-irq-safe lock:
 (&sch->q.lock){+.-.}-{2:2}

... which became SOFTIRQ-irq-safe at:
  lock_acquire+0x1db/0x4f0 kernel/locking/lockdep.c:5622
  __raw_spin_lock include/linux/spinlock_api_smp.h:142 [inline]
  _raw_spin_lock+0x2a/0x40 kernel/locking/spinlock.c:154
  spin_lock include/linux/spinlock.h:363 [inline]
  fq_pie_timer+0x87/0x260 net/sched/sch_fq_pie.c:386
  call_timer_fn+0x16d/0x560 kernel/time/timer.c:1421
  expire_timers kernel/time/timer.c:1466 [inline]
  __run_timers+0x67c/0x890 kernel/time/timer.c:1737
  run_timer_softirq+0x63/0xf0 kernel/time/timer.c:1750
  __do_softirq+0x3b3/0x93a kernel/softirq.c:558
  invoke_softirq kernel/softirq.c:432 [inline]
  __irq_exit_rcu+0x155/0x240 kernel/softirq.c:636
  irq_exit_rcu+0x5/0x20 kernel/softirq.c:648
  sysvec_apic_timer_interrupt+0x91/0xb0 arch/x86/kernel/apic/apic.c:1096
  asm_sysvec_apic_timer_interrupt+0x16/0x20 arch/x86/include/asm/idtentry.h:638
  bytes_is_nonzero mm/kasan/generic.c:85 [inline]
  memory_is_nonzero mm/kasan/generic.c:102 [inline]
  memory_is_poisoned_n mm/kasan/generic.c:128 [inline]
  memory_is_poisoned mm/kasan/generic.c:159 [inline]
  check_region_inline mm/kasan/generic.c:180 [inline]
  kasan_check_range+0x81/0x290 mm/kasan/generic.c:189
  memset+0x1f/0x40 mm/kasan/shadow.c:44
  __hrtimer_init+0x45/0x260 kernel/time/hrtimer.c:1557
  __hrtimer_init_sleeper kernel/time/hrtimer.c:1996 [inline]
  hrtimer_init_sleeper_on_stack kernel/time/hrtimer.c:446 [inline]
  schedule_hrtimeout_range_clock+0xfd/0x470 kernel/time/hrtimer.c:2302
  ep_poll+0x199a/0x1c60 fs/eventpoll.c:1877
  do_epoll_wait+0x1ae/0x220 fs/eventpoll.c:2255
  do_epoll_pwait+0x56/0x1d0 fs/eventpoll.c:2289
  __do_sys_epoll_pwait fs/eventpoll.c:2302 [inline]
  __se_sys_epoll_pwait fs/eventpoll.c:2296 [inline]
  __x64_sys_epoll_pwait+0x2b4/0x300 fs/eventpoll.c:2296
  do_syscall_x64 arch/x86/entry/common.c:50 [inline]
  do_syscall_64+0x3d/0xb0 arch/x86/entry/common.c:80
  entry_SYSCALL_64_after_hwframe+0x61/0xcb

to a SOFTIRQ-irq-unsafe lock:
 (fs_reclaim){+.+.}-{0:0}

... which became SOFTIRQ-irq-unsafe at:
...
  lock_acquire+0x1db/0x4f0 kernel/locking/lockdep.c:5622
  __fs_reclaim_acquire mm/page_alloc.c:4547 [inline]
  fs_reclaim_acquire+0x83/0x120 mm/page_alloc.c:4561
  might_alloc include/linux/sched/mm.h:206 [inline]
  slab_pre_alloc_hook+0x22/0xc0 mm/slab.h:492
  slab_alloc_node mm/slub.c:3134 [inline]
  slab_alloc mm/slub.c:3228 [inline]
  kmem_cache_alloc_trace+0x49/0x290 mm/slub.c:3245
  kmalloc include/linux/slab.h:591 [inline]
  kzalloc include/linux/slab.h:721 [inline]
  alloc_workqueue_attrs kernel/workqueue.c:3403 [inline]
  wq_numa_init+0x122/0x49f kernel/workqueue.c:5994
  workqueue_init+0x18/0x5e1 kernel/workqueue.c:6121
  kernel_init_freeable+0x40a/0x5c5 init/main.c:1603
  kernel_init+0x19/0x290 init/main.c:1510
  ret_from_fork+0x1f/0x30 arch/x86/entry/entry_64.S:298

other info that might help us debug this:

 Possible interrupt unsafe locking scenario:

       CPU0                    CPU1
       ----                    ----
  lock(
fs_reclaim);
                               local_irq_disable();
                               lock(&sch->q.lock);
                               lock(
fs_reclaim);
  <Interrupt>
    lock(&sch->q.lock);

 *** DEADLOCK ***

2 locks held by syz-executor.2/12312:
 #0: ffffffff8d9e02c8 (rtnl_mutex){+.+.}-{3:3}, at: rtnl_lock net/core/rtnetlink.c:72 [inline]
 #0: ffffffff8d9e02c8 (rtnl_mutex){+.+.}-{3:3}, at: rtnetlink_rcv_msg+0x94c/0xee0 net/core/rtnetlink.c:5590
 #1: ffff88807299b908 (&sch->q.lock){+.-.}-{2:2}, at: netem_change+0x257/0x20c0 net/sched/sch_netem.c:972

the dependencies between SOFTIRQ-irq-safe lock and the holding lock:
-> (&sch->q.lock){+.-.}-{2:2} {
   HARDIRQ-ON-W at:
                    lock_acquire+0x1db/0x4f0 kernel/locking/lockdep.c:5622
                    __raw_spin_lock_bh include/linux/spinlock_api_smp.h:135 [inline]
                    _raw_spin_lock_bh+0x31/0x40 kernel/locking/spinlock.c:178
                    spin_lock_bh include/linux/spinlock.h:368 [inline]
                    dev_reset_queue+0xc1/0x140 net/sched/sch_generic.c:1245
                    netdev_for_each_tx_queue include/linux/netdevice.h:2379 [inline]
                    dev_deactivate_many+0x6ad/0xbf0 net/sched/sch_generic.c:1313
                    dev_deactivate+0x177/0x270 net/sched/sch_generic.c:1336
                    linkwatch_do_dev+0x104/0x160 net/core/link_watch.c:165
                    __linkwatch_run_queue+0x4ca/0x7f0 net/core/link_watch.c:213
                    linkwatch_event+0x48/0x50 net/core/link_watch.c:252
                    process_one_work+0x8a1/0x10c0 kernel/workqueue.c:2307
                    worker_thread+0xaca/0x1280 kernel/workqueue.c:2454
                    kthread+0x3f6/0x4f0 kernel/kthread.c:319
                    ret_from_fork+0x1f/0x30 arch/x86/entry/entry_64.S:298
   IN-SOFTIRQ-W at:
                    lock_acquire+0x1db/0x4f0 kernel/locking/lockdep.c:5622
                    __raw_spin_lock include/linux/spinlock_api_smp.h:142 [inline]
                    _raw_spin_lock+0x2a/0x40 kernel/locking/spinlock.c:154
                    spin_lock include/linux/spinlock.h:363 [inline]
                    fq_pie_timer+0x87/0x260 net/sched/sch_fq_pie.c:386
                    call_timer_fn+0x16d/0x560 kernel/time/timer.c:1421
                    expire_timers kernel/time/timer.c:1466 [inline]
                    __run_timers+0x67c/0x890 kernel/time/timer.c:1737
                    run_timer_softirq+0x63/0xf0 kernel/time/timer.c:1750
                    __do_softirq+0x3b3/0x93a kernel/softirq.c:558
                    invoke_softirq kernel/softirq.c:432 [inline]
                    __irq_exit_rcu+0x155/0x240 kernel/softirq.c:636
                    irq_exit_rcu+0x5/0x20 kernel/softirq.c:648
                    sysvec_apic_timer_interrupt+0x91/0xb0 arch/x86/kernel/apic/apic.c:1096
                    asm_sysvec_apic_timer_interrupt+0x16/0x20 arch/x86/include/asm/idtentry.h:638
                    bytes_is_nonzero mm/kasan/generic.c:85 [inline]
                    memory_is_nonzero mm/kasan/generic.c:102 [inline]
                    memory_is_poisoned_n mm/kasan/generic.c:128 [inline]
                    memory_is_poisoned mm/kasan/generic.c:159 [inline]
                    check_region_inline mm/kasan/generic.c:180 [inline]
                    kasan_check_range+0x81/0x290 mm/kasan/generic.c:189
                    memset+0x1f/0x40 mm/kasan/shadow.c:44
                    __hrtimer_init+0x45/0x260 kernel/time/hrtimer.c:1557
                    __hrtimer_init_sleeper kernel/time/hrtimer.c:1996 [inline]
                    hrtimer_init_sleeper_on_stack kernel/time/hrtimer.c:446 [inline]
                    schedule_hrtimeout_range_clock+0xfd/0x470 kernel/time/hrtimer.c:2302
                    ep_poll+0x199a/0x1c60 fs/eventpoll.c:1877
                    do_epoll_wait+0x1ae/0x220 fs/eventpoll.c:2255
                    do_epoll_pwait+0x56/0x1d0 fs/eventpoll.c:2289
                    __do_sys_epoll_pwait fs/eventpoll.c:2302 [inline]
                    __se_sys_epoll_pwait fs/eventpoll.c:2296 [inline]
                    __x64_sys_epoll_pwait+0x2b4/0x300 fs/eventpoll.c:2296
                    do_syscall_x64 arch/x86/entry/common.c:50 [inline]
                    do_syscall_64+0x3d/0xb0 arch/x86/entry/common.c:80
                    entry_SYSCALL_64_after_hwframe+0x61/0xcb
   INITIAL USE at:
                   lock_acquire+0x1db/0x4f0 kernel/locking/lockdep.c:5622
                   __raw_spin_lock_bh include/linux/spinlock_api_smp.h:135 [inline]
                   _raw_spin_lock_bh+0x31/0x40 kernel/locking/spinlock.c:178
                   spin_lock_bh include/linux/spinlock.h:368 [inline]
                   dev_reset_queue+0xc1/0x140 net/sched/sch_generic.c:1245
                   netdev_for_each_tx_queue include/linux/netdevice.h:2379 [inline]
                   dev_deactivate_many+0x6ad/0xbf0 net/sched/sch_generic.c:1313
                   dev_deactivate+0x177/0x270 net/sched/sch_generic.c:1336
                   linkwatch_do_dev+0x104/0x160 net/core/link_watch.c:165
                   __linkwatch_run_queue+0x4ca/0x7f0 net/core/link_watch.c:213
                   linkwatch_event+0x48/0x50 net/core/link_watch.c:252
                   process_one_work+0x8a1/0x10c0 kernel/workqueue.c:2307
                   worker_thread+0xaca/0x1280 kernel/workqueue.c:2454
                   kthread+0x3f6/0x4f0 kernel/kthread.c:319
                   ret_from_fork+0x1f/0x30 arch/x86/entry/entry_64.S:298
 }
 ... key      at: [<ffffffff91795380>] qdisc_alloc.__key+0x0/0x20

the dependencies between the lock to be acquired
 and SOFTIRQ-irq-unsafe lock:
-> (fs_reclaim){+.+.}-{0:0} {
   HARDIRQ-ON-W at:
                    lock_acquire+0x1db/0x4f0 kernel/locking/lockdep.c:5622
                    __fs_reclaim_acquire mm/page_alloc.c:4547 [inline]
                    fs_reclaim_acquire+0x83/0x120 mm/page_alloc.c:4561
                    might_alloc include/linux/sched/mm.h:206 [inline]
                    slab_pre_alloc_hook+0x22/0xc0 mm/slab.h:492
                    slab_alloc_node mm/slub.c:3134 [inline]
                    slab_alloc mm/slub.c:3228 [inline]
                    kmem_cache_alloc_trace+0x49/0x290 mm/slub.c:3245
                    kmalloc include/linux/slab.h:591 [inline]
                    kzalloc include/linux/slab.h:721 [inline]
                    alloc_workqueue_attrs kernel/workqueue.c:3403 [inline]
                    wq_numa_init+0x122/0x49f kernel/workqueue.c:5994
                    workqueue_init+0x18/0x5e1 kernel/workqueue.c:6121
                    kernel_init_freeable+0x40a/0x5c5 init/main.c:1603
                    kernel_init+0x19/0x290 init/main.c:1510
                    ret_from_fork+0x1f/0x30 arch/x86/entry/entry_64.S:298
   SOFTIRQ-ON-W at:
                    lock_acquire+0x1db/0x4f0 kernel/locking/lockdep.c:5622
                    __fs_reclaim_acquire mm/page_alloc.c:4547 [inline]
                    fs_reclaim_acquire+0x83/0x120 mm/page_alloc.c:4561
                    might_alloc include/linux/sched/mm.h:206 [inline]
                    slab_pre_alloc_hook+0x22/0xc0 mm/slab.h:492
                    slab_alloc_node mm/slub.c:3134 [inline]
                    slab_alloc mm/slub.c:3228 [inline]
                    kmem_cache_alloc_trace+0x49/0x290 mm/slub.c:3245
                    kmalloc include/linux/slab.h:591 [inline]
                    kzalloc include/linux/slab.h:721 [inline]
                    alloc_workqueue_attrs kernel/workqueue.c:3403 [inline]
                    wq_numa_init+0x122/0x49f kernel/workqueue.c:5994
                    workqueue_init+0x18/0x5e1 kernel/workqueue.c:6121
                    kernel_init_freeable+0x40a/0x5c5 init/main.c:1603
                    kernel_init+0x19/0x290 init/main.c:1510
                    ret_from_fork+0x1f/0x30 arch/x86/entry/entry_64.S:298
   INITIAL USE at:
                   lock_acquire+0x1db/0x4f0 kernel/locking/lockdep.c:5622
                   __fs_reclaim_acquire mm/page_alloc.c:4547 [inline]
                   fs_reclaim_acquire+0x83/0x120 mm/page_alloc.c:4561
                   might_alloc include/linux/sched/mm.h:206 [inline]
                   slab_pre_alloc_hook+0x22/0xc0 mm/slab.h:492
                   slab_alloc_node mm/slub.c:3134 [inline]
                   slab_alloc mm/slub.c:3228 [inline]
                   kmem_cache_alloc_trace+0x49/0x290 mm/slub.c:3245
                   kmalloc include/linux/slab.h:591 [inline]
                   kzalloc include/linux/slab.h:721 [inline]
                   alloc_workqueue_attrs kernel/workqueue.c:3403 [inline]
                   wq_numa_init+0x122/0x49f kernel/workqueue.c:5994
                   workqueue_init+0x18/0x5e1 kernel/workqueue.c:6121
                   kernel_init_freeable+0x40a/0x5c5 init/main.c:1603
                   kernel_init+0x19/0x290 init/main.c:1510
                   ret_from_fork+0x1f/0x30 arch/x86/entry/entry_64.S:298
 }
 ... key      at: [<ffffffff8c9deda0>] __fs_reclaim_map+0x0/0x160
 ... acquired at:
   lock_acquire+0x1db/0x4f0 kernel/locking/lockdep.c:5622
   __fs_reclaim_acquire mm/page_alloc.c:4547 [inline]
   fs_reclaim_acquire+0x83/0x120 mm/page_alloc.c:4561
   might_alloc include/linux/sched/mm.h:206 [inline]
   slab_pre_alloc_hook+0x22/0xc0 mm/slab.h:492
   slab_alloc_node mm/slub.c:3134 [inline]
   __kmalloc_node+0x71/0x390 mm/slub.c:4451
   kmalloc_node include/linux/slab.h:614 [inline]
   kvmalloc_node+0x80/0x140 mm/util.c:619
   kvmalloc include/linux/mm.h:805 [inline]
   get_dist_table+0x83/0x2c0 net/sched/sch_netem.c:788
   netem_change+0xa05/0x20c0 net/sched/sch_netem.c:988
   netem_init+0x58/0xb0 net/sched/sch_netem.c:1075
   qdisc_create+0x8ae/0x1390 net/sched/sch_api.c:1264
   tc_modify_qdisc+0xac5/0x1710
   rtnetlink_rcv_msg+0x993/0xee0 net/core/rtnetlink.c:5593
   netlink_rcv_skb+0x1cf/0x410 net/netlink/af_netlink.c:2504
   netlink_unicast_kernel net/netlink/af_netlink.c:1330 [inline]
   netlink_unicast+0x7b6/0x980 net/netlink/af_netlink.c:1356
   netlink_sendmsg+0xa30/0xd60 net/netlink/af_netlink.c:1923
   sock_sendmsg_nosec net/socket.c:704 [inline]
   sock_sendmsg net/socket.c:724 [inline]
   ____sys_sendmsg+0x59e/0x8f0 net/socket.c:2412
   ___sys_sendmsg+0x252/0x2e0 net/socket.c:2466
   __sys_sendmsg net/socket.c:2495 [inline]
   __do_sys_sendmsg net/socket.c:2504 [inline]
   __se_sys_sendmsg+0x19a/0x260 net/socket.c:2502
   do_syscall_x64 arch/x86/entry/common.c:50 [inline]
   do_syscall_64+0x3d/0xb0 arch/x86/entry/common.c:80
   entry_SYSCALL_64_after_hwframe+0x61/0xcb


stack backtrace:
CPU: 1 PID: 12312 Comm: syz-executor.2 Not tainted 5.15.120-syzkaller #0
Hardware name: Google Google Compute Engine/Google Compute Engine, BIOS Google 07/03/2023
Call Trace:
 <TASK>
 __dump_stack lib/dump_stack.c:88 [inline]
 dump_stack_lvl+0x1e3/0x2cb lib/dump_stack.c:106
 print_bad_irq_dependency kernel/locking/lockdep.c:2567 [inline]
 check_irq_usage kernel/locking/lockdep.c:2806 [inline]
 check_prev_add kernel/locking/lockdep.c:3057 [inline]
 check_prevs_add kernel/locking/lockdep.c:3172 [inline]
 validate_chain+0x4cfe/0x58b0 kernel/locking/lockdep.c:3787
 __lock_acquire+0x1295/0x1ff0 kernel/locking/lockdep.c:5011
 lock_acquire+0x1db/0x4f0 kernel/locking/lockdep.c:5622
 __fs_reclaim_acquire mm/page_alloc.c:4547 [inline]
 fs_reclaim_acquire+0x83/0x120 mm/page_alloc.c:4561
 might_alloc include/linux/sched/mm.h:206 [inline]
 slab_pre_alloc_hook+0x22/0xc0 mm/slab.h:492
 slab_alloc_node mm/slub.c:3134 [inline]
 __kmalloc_node+0x71/0x390 mm/slub.c:4451
 kmalloc_node include/linux/slab.h:614 [inline]
 kvmalloc_node+0x80/0x140 mm/util.c:619
 kvmalloc include/linux/mm.h:805 [inline]
 get_dist_table+0x83/0x2c0 net/sched/sch_netem.c:788
 netem_change+0xa05/0x20c0 net/sched/sch_netem.c:988
 netem_init+0x58/0xb0 net/sched/sch_netem.c:1075
 qdisc_create+0x8ae/0x1390 net/sched/sch_api.c:1264
 tc_modify_qdisc+0xac5/0x1710
 rtnetlink_rcv_msg+0x993/0xee0 net/core/rtnetlink.c:5593
 netlink_rcv_skb+0x1cf/0x410 net/netlink/af_netlink.c:2504
 netlink_unicast_kernel net/netlink/af_netlink.c:1330 [inline]
 netlink_unicast+0x7b6/0x980 net/netlink/af_netlink.c:1356
 netlink_sendmsg+0xa30/0xd60 net/netlink/af_netlink.c:1923
 sock_sendmsg_nosec net/socket.c:704 [inline]
 sock_sendmsg net/socket.c:724 [inline]
 ____sys_sendmsg+0x59e/0x8f0 net/socket.c:2412
 ___sys_sendmsg+0x252/0x2e0 net/socket.c:2466
 __sys_sendmsg net/socket.c:2495 [inline]
 __do_sys_sendmsg net/socket.c:2504 [inline]
 __se_sys_sendmsg+0x19a/0x260 net/socket.c:2502
 do_syscall_x64 arch/x86/entry/common.c:50 [inline]
 do_syscall_64+0x3d/0xb0 arch/x86/entry/common.c:80
 entry_SYSCALL_64_after_hwframe+0x61/0xcb
RIP: 0033:0x7fecf5028b29
Code: 28 00 00 00 75 05 48 83 c4 28 c3 e8 e1 20 00 00 90 48 89 f8 48 89 f7 48 89 d6 48 89 ca 4d 89 c2 4d 89 c8 4c 8b 4c 24 08 0f 05 <48> 3d 01 f0 ff ff 73 01 c3 48 c7 c1 b0 ff ff ff f7 d8 64 89 01 48
RSP: 002b:00007fecf35890c8 EFLAGS: 00000246 ORIG_RAX: 000000000000002e
RAX: ffffffffffffffda RBX: 00007fecf5148050 RCX: 00007fecf5028b29
RDX: 0000000000000000 RSI: 00000000200007c0 RDI: 0000000000000004
RBP: 00007fecf507447a R08: 0000000000000000 R09: 0000000000000000
R10: 0000000000000000 R11: 0000000000000246 R12: 0000000000000000
R13: 000000000000006e R14: 00007fecf5148050 R15: 00007ffc12401e18
 </TASK>
BUG: sleeping function called from invalid context at include/linux/sched/mm.h:209
in_atomic(): 1, irqs_disabled(): 0, non_block: 0, pid: 12312, name: syz-executor.2
INFO: lockdep is turned off.
Preemption disabled at:
[<0000000000000000>] 0x0
CPU: 1 PID: 12312 Comm: syz-executor.2 Not tainted 5.15.120-syzkaller #0
Hardware name: Google Google Compute Engine/Google Compute Engine, BIOS Google 07/03/2023
Call Trace:
 <TASK>
 __dump_stack lib/dump_stack.c:88 [inline]
 dump_stack_lvl+0x1e3/0x2cb lib/dump_stack.c:106
 ___might_sleep+0x547/0x6a0 kernel/sched/core.c:9625
 might_alloc include/linux/sched/mm.h:209 [inline]
 slab_pre_alloc_hook+0x44/0xc0 mm/slab.h:492
 slab_alloc_node mm/slub.c:3134 [inline]
 __kmalloc_node+0x71/0x390 mm/slub.c:4451
 kmalloc_node include/linux/slab.h:614 [inline]
 kvmalloc_node+0x80/0x140 mm/util.c:619
 kvmalloc include/linux/mm.h:805 [inline]
 get_dist_table+0x83/0x2c0 net/sched/sch_netem.c:788
 netem_change+0xa05/0x20c0 net/sched/sch_netem.c:988
 netem_init+0x58/0xb0 net/sched/sch_netem.c:1075
 qdisc_create+0x8ae/0x1390 net/sched/sch_api.c:1264
 tc_modify_qdisc+0xac5/0x1710
 rtnetlink_rcv_msg+0x993/0xee0 net/core/rtnetlink.c:5593
 netlink_rcv_skb+0x1cf/0x410 net/netlink/af_netlink.c:2504
 netlink_unicast_kernel net/netlink/af_netlink.c:1330 [inline]
 netlink_unicast+0x7b6/0x980 net/netlink/af_netlink.c:1356
 netlink_sendmsg+0xa30/0xd60 net/netlink/af_netlink.c:1923
 sock_sendmsg_nosec net/socket.c:704 [inline]
 sock_sendmsg net/socket.c:724 [inline]
 ____sys_sendmsg+0x59e/0x8f0 net/socket.c:2412
 ___sys_sendmsg+0x252/0x2e0 net/socket.c:2466
 __sys_sendmsg net/socket.c:2495 [inline]
 __do_sys_sendmsg net/socket.c:2504 [inline]
 __se_sys_sendmsg+0x19a/0x260 net/socket.c:2502
 do_syscall_x64 arch/x86/entry/common.c:50 [inline]
 do_syscall_64+0x3d/0xb0 arch/x86/entry/common.c:80
 entry_SYSCALL_64_after_hwframe+0x61/0xcb
RIP: 0033:0x7fecf5028b29
Code: 28 00 00 00 75 05 48 83 c4 28 c3 e8 e1 20 00 00 90 48 89 f8 48 89 f7 48 89 d6 48 89 ca 4d 89 c2 4d 89 c8 4c 8b 4c 24 08 0f 05 <48> 3d 01 f0 ff ff 73 01 c3 48 c7 c1 b0 ff ff ff f7 d8 64 89 01 48
RSP: 002b:00007fecf35890c8 EFLAGS: 00000246 ORIG_RAX: 000000000000002e
RAX: ffffffffffffffda RBX: 00007fecf5148050 RCX: 00007fecf5028b29
RDX: 0000000000000000 RSI: 00000000200007c0 RDI: 0000000000000004
RBP: 00007fecf507447a R08: 0000000000000000 R09: 0000000000000000
R10: 0000000000000000 R11: 0000000000000246 R12: 0000000000000000
R13: 000000000000006e R14: 00007fecf5148050 R15: 00007ffc12401e18
 </TASK>

Crashes (3):
Time Kernel Commit Syzkaller Config Log Report Syz repro C repro VM info Assets (help?) Manager Title
2023/07/12 02:10 linux-5.15.y d54cfc420586 2f19aa4f .config console log report info [disk image] [vmlinux] [kernel image] ci2-linux-5-15-kasan possible deadlock in fq_pie_timer
2023/07/09 13:34 linux-5.15.y d54cfc420586 668cb1fa .config console log report info [disk image] [vmlinux] [kernel image] ci2-linux-5-15-kasan possible deadlock in fq_pie_timer
2023/07/09 13:33 linux-5.15.y d54cfc420586 668cb1fa .config console log report info [disk image] [vmlinux] [kernel image] ci2-linux-5-15-kasan-arm64 possible deadlock in fq_pie_timer
* Struck through repros no longer work on HEAD.