INFO: task syz-executor.5:31578 blocked for more than 143 seconds. Not tainted 5.19.0-rc4-next-20220628-syzkaller #0 "echo 0 > /proc/sys/kernel/hung_task_timeout_secs" disables this message. task:syz-executor.5 state:D stack:28248 pid:31578 ppid: 31555 flags:0x00000004 Call Trace: context_switch kernel/sched/core.c:5184 [inline] __schedule+0xa09/0x4f10 kernel/sched/core.c:6496 schedule+0xd2/0x1f0 kernel/sched/core.c:6568 rwsem_down_write_slowpath+0x68a/0x11a0 kernel/locking/rwsem.c:1172 __down_write_common kernel/locking/rwsem.c:1287 [inline] __down_write_common kernel/locking/rwsem.c:1284 [inline] __down_write kernel/locking/rwsem.c:1296 [inline] down_write+0x135/0x150 kernel/locking/rwsem.c:1543 inode_lock include/linux/fs.h:761 [inline] process_measurement+0x6f7/0x1880 security/integrity/ima/ima_main.c:241 ima_file_check+0xac/0x100 security/integrity/ima/ima_main.c:517 do_open fs/namei.c:3522 [inline] path_openat+0x161a/0x2930 fs/namei.c:3653 do_filp_open+0x1aa/0x400 fs/namei.c:3680 do_sys_openat2+0x16d/0x4c0 fs/open.c:1308 do_sys_open fs/open.c:1324 [inline] __do_sys_openat fs/open.c:1340 [inline] __se_sys_openat fs/open.c:1335 [inline] __x64_sys_openat+0x13f/0x1f0 fs/open.c:1335 do_syscall_x64 arch/x86/entry/common.c:50 [inline] do_syscall_64+0x35/0xb0 arch/x86/entry/common.c:80 entry_SYSCALL_64_after_hwframe+0x46/0xb0 RIP: 0033:0x7fdc89e3c124 RSP: 002b:00007fdc8b060ca0 EFLAGS: 00000293 ORIG_RAX: 0000000000000101 RAX: ffffffffffffffda RBX: 6666666666666667 RCX: 00007fdc89e3c124 RDX: 0000000000141101 RSI: 00007fdc8b060d40 RDI: 00000000ffffff9c RBP: 00007fdc8b060d40 R08: 0000000000000000 R09: 0000000000000000 R10: 0000000000000000 R11: 0000000000000293 R12: 0000000000141101 R13: 00007ffe64573fdf R14: 00007fdc8b061300 R15: 0000000000022000 Showing all locks held in the system: 1 lock held by rcu_tasks_kthre/12: #0: ffffffff8bd864f0 (rcu_tasks.tasks_gp_mutex){+.+.}-{3:3}, at: rcu_tasks_one_gp+0x26/0xc70 kernel/rcu/tasks.h:507 1 lock held by rcu_tasks_trace/13: #0: ffffffff8bd861f0 (rcu_tasks_trace.tasks_gp_mutex){+.+.}-{3:3}, at: rcu_tasks_one_gp+0x26/0xc70 kernel/rcu/tasks.h:507 3 locks held by kworker/1:1/26: #0: ffff888011864d38 ((wq_completion)events){+.+.}-{0:0}, at: arch_atomic64_set arch/x86/include/asm/atomic64_64.h:34 [inline] #0: ffff888011864d38 ((wq_completion)events){+.+.}-{0:0}, at: arch_atomic_long_set include/linux/atomic/atomic-long.h:41 [inline] #0: ffff888011864d38 ((wq_completion)events){+.+.}-{0:0}, at: atomic_long_set include/linux/atomic/atomic-instrumented.h:1280 [inline] #0: ffff888011864d38 ((wq_completion)events){+.+.}-{0:0}, at: set_work_data kernel/workqueue.c:636 [inline] #0: ffff888011864d38 ((wq_completion)events){+.+.}-{0:0}, at: set_work_pool_and_clear_pending kernel/workqueue.c:663 [inline] #0: ffff888011864d38 ((wq_completion)events){+.+.}-{0:0}, at: process_one_work+0x87a/0x1610 kernel/workqueue.c:2260 #1: ffffc90000a1fda8 ((work_completion)(&pwq->unbound_release_work)){+.+.}-{0:0}, at: process_one_work+0x8ae/0x1610 kernel/workqueue.c:2264 #2: ffffffff8bd91938 (rcu_state.exp_mutex){+.+.}-{3:3}, at: exp_funnel_lock kernel/rcu/tree_exp.h:324 [inline] #2: ffffffff8bd91938 (rcu_state.exp_mutex){+.+.}-{3:3}, at: synchronize_rcu_expedited+0x24a/0x670 kernel/rcu/tree_exp.h:940 1 lock held by khungtaskd/28: #0: ffffffff8bd87040 (rcu_read_lock){....}-{1:2}, at: debug_show_all_locks+0x53/0x260 kernel/locking/lockdep.c:6491 2 locks held by getty/3280: #0: ffff888026573098 (&tty->ldisc_sem){++++}-{0:0}, at: tty_ldisc_ref_wait+0x22/0x80 drivers/tty/tty_ldisc.c:244 #1: ffffc90002d162f0 (&ldata->atomic_read_lock){+.+.}-{3:3}, at: n_tty_read+0xe50/0x13c0 drivers/tty/n_tty.c:2177 4 locks held by kworker/u4:16/17038: 2 locks held by kworker/0:11/26893: #0: ffff888011864d38 ((wq_completion)events){+.+.}-{0:0}, at: arch_atomic64_set arch/x86/include/asm/atomic64_64.h:34 [inline] #0: ffff888011864d38 ((wq_completion)events){+.+.}-{0:0}, at: arch_atomic_long_set include/linux/atomic/atomic-long.h:41 [inline] #0: ffff888011864d38 ((wq_completion)events){+.+.}-{0:0}, at: atomic_long_set include/linux/atomic/atomic-instrumented.h:1280 [inline] #0: ffff888011864d38 ((wq_completion)events){+.+.}-{0:0}, at: set_work_data kernel/workqueue.c:636 [inline] #0: ffff888011864d38 ((wq_completion)events){+.+.}-{0:0}, at: set_work_pool_and_clear_pending kernel/workqueue.c:663 [inline] #0: ffff888011864d38 ((wq_completion)events){+.+.}-{0:0}, at: process_one_work+0x87a/0x1610 kernel/workqueue.c:2260 #1: ffffc90014ae7da8 ((work_completion)(&pwq->unbound_release_work)){+.+.}-{0:0}, at: process_one_work+0x8ae/0x1610 kernel/workqueue.c:2264 2 locks held by kworker/0:18/19345: #0: ffff888011866538 ((wq_completion)rcu_gp){+.+.}-{0:0}, at: arch_atomic64_set arch/x86/include/asm/atomic64_64.h:34 [inline] #0: ffff888011866538 ((wq_completion)rcu_gp){+.+.}-{0:0}, at: arch_atomic_long_set include/linux/atomic/atomic-long.h:41 [inline] #0: ffff888011866538 ((wq_completion)rcu_gp){+.+.}-{0:0}, at: atomic_long_set include/linux/atomic/atomic-instrumented.h:1280 [inline] #0: ffff888011866538 ((wq_completion)rcu_gp){+.+.}-{0:0}, at: set_work_data kernel/workqueue.c:636 [inline] #0: ffff888011866538 ((wq_completion)rcu_gp){+.+.}-{0:0}, at: set_work_pool_and_clear_pending kernel/workqueue.c:663 [inline] #0: ffff888011866538 ((wq_completion)rcu_gp){+.+.}-{0:0}, at: process_one_work+0x87a/0x1610 kernel/workqueue.c:2260 #1: ffffc90004aa7da8 ((work_completion)(&rew->rew_work)){+.+.}-{0:0}, at: process_one_work+0x8ae/0x1610 kernel/workqueue.c:2264 1 lock held by syz-executor.1/31359: 1 lock held by syz-executor.5/31429: 6 locks held by syz-executor.2/31516: 2 locks held by syz-executor.2/31528: 1 lock held by syz-executor.5/31578: #0: ffff888036460f18 (&sb->s_type->i_mutex_key#4){+.+.}-{3:3}, at: inode_lock include/linux/fs.h:761 [inline] #0: ffff888036460f18 (&sb->s_type->i_mutex_key#4){+.+.}-{3:3}, at: process_measurement+0x6f7/0x1880 security/integrity/ima/ima_main.c:241 1 lock held by syz-executor.5/31579: 1 lock held by syz-executor.5/31672: #0: ffff888036460f18 (&sb->s_type->i_mutex_key#4){+.+.}-{3:3}, at: inode_lock include/linux/fs.h:761 [inline] #0: ffff888036460f18 (&sb->s_type->i_mutex_key#4){+.+.}-{3:3}, at: process_measurement+0x6f7/0x1880 security/integrity/ima/ima_main.c:241 1 lock held by syz-executor.5/31673: 1 lock held by syz-executor.4/31733: #0: ffffffff8bd91800 (rcu_state.barrier_mutex){+.+.}-{3:3}, at: rcu_barrier+0x44/0x630 kernel/rcu/tree.c:3783 1 lock held by syz-executor.3/31735: #0: ffffffff8bd91800 (rcu_state.barrier_mutex){+.+.}-{3:3}, at: rcu_barrier+0x44/0x630 kernel/rcu/tree.c:3783 2 locks held by dhcpcd/31752: #0: ffff8880acf9e130 (sk_lock-AF_PACKET){+.+.}-{0:0}, at: lock_sock include/net/sock.h:1664 [inline] #0: ffff8880acf9e130 (sk_lock-AF_PACKET){+.+.}-{0:0}, at: packet_do_bind+0x2f/0xdc0 net/packet/af_packet.c:3194 #1: ffffffff8bd91938 (rcu_state.exp_mutex){+.+.}-{3:3}, at: exp_funnel_lock kernel/rcu/tree_exp.h:292 [inline] #1: ffffffff8bd91938 (rcu_state.exp_mutex){+.+.}-{3:3}, at: synchronize_rcu_expedited+0x562/0x670 kernel/rcu/tree_exp.h:940 1 lock held by dhcpcd/31753: #0: ffff8880990d2130 (sk_lock-AF_PACKET){+.+.}-{0:0}, at: lock_sock include/net/sock.h:1664 [inline] #0: ffff8880990d2130 (sk_lock-AF_PACKET){+.+.}-{0:0}, at: packet_do_bind+0x2f/0xdc0 net/packet/af_packet.c:3194 ============================================= NMI backtrace for cpu 0 CPU: 0 PID: 28 Comm: khungtaskd Not tainted 5.19.0-rc4-next-20220628-syzkaller #0 Hardware name: Google Google Compute Engine/Google Compute Engine, BIOS Google 07/22/2022 Call Trace: __dump_stack lib/dump_stack.c:88 [inline] dump_stack_lvl+0xcd/0x134 lib/dump_stack.c:106 nmi_cpu_backtrace.cold+0x47/0x144 lib/nmi_backtrace.c:111 nmi_trigger_cpumask_backtrace+0x1e6/0x230 lib/nmi_backtrace.c:62 trigger_all_cpu_backtrace include/linux/nmi.h:146 [inline] check_hung_uninterruptible_tasks kernel/hung_task.c:212 [inline] watchdog+0xc18/0xf50 kernel/hung_task.c:369 kthread+0x2e9/0x3a0 kernel/kthread.c:376 ret_from_fork+0x1f/0x30 arch/x86/entry/entry_64.S:302 Sending NMI from CPU 0 to CPUs 1: NMI backtrace for cpu 1 CPU: 1 PID: 6836 Comm: kworker/u4:9 Not tainted 5.19.0-rc4-next-20220628-syzkaller #0 Hardware name: Google Google Compute Engine/Google Compute Engine, BIOS Google 07/22/2022 Workqueue: events_unbound toggle_allocation_gate RIP: 0010:hlock_class kernel/locking/lockdep.c:227 [inline] RIP: 0010:__lock_acquire+0x145a/0x5660 kernel/locking/lockdep.c:5049 Code: f8 66 81 e3 ff 1f 0f b7 db be 08 00 00 00 48 89 d8 48 c1 f8 06 48 8d 3c c5 40 59 6a 90 e8 6e 84 68 00 48 0f a3 1d 16 1f 0c 0f <0f> 83 2c 06 00 00 48 8d 1c 5b 48 c1 e3 06 48 81 c3 60 5d 6a 90 48 RSP: 0018:ffffc90014a27740 EFLAGS: 00000047 RAX: 0000000000000001 RBX: 0000000000000008 RCX: ffffffff815e3a22 RDX: fffffbfff20d4b29 RSI: 0000000000000008 RDI: ffffffff906a5940 RBP: ffff88803983a87a R08: 0000000000000000 R09: ffffffff906a5947 R10: fffffbfff20d4b28 R11: 0000000000000001 R12: ffff88803983a858 R13: ffff888039839d40 R14: 0000000000000000 R15: 07cd2ba08520a020 FS: 0000000000000000(0000) GS:ffff8880b9b00000(0000) knlGS:0000000000000000 CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033 CR2: 00007fff6a8abec8 CR3: 000000000ba8e000 CR4: 00000000003526e0 Call Trace: lock_acquire kernel/locking/lockdep.c:5665 [inline] lock_acquire+0x1ab/0x570 kernel/locking/lockdep.c:5630 __raw_spin_lock include/linux/spinlock_api_smp.h:133 [inline] _raw_spin_lock+0x2a/0x40 kernel/locking/spinlock.c:154 spin_lock include/linux/spinlock.h:360 [inline] __get_locked_pte+0x154/0x270 mm/memory.c:1853 get_locked_pte include/linux/mm.h:2183 [inline] __text_poke+0x1b3/0x8e0 arch/x86/kernel/alternative.c:1052 text_poke arch/x86/kernel/alternative.c:1137 [inline] text_poke_bp_batch+0x382/0x6c0 arch/x86/kernel/alternative.c:1432 text_poke_flush arch/x86/kernel/alternative.c:1589 [inline] text_poke_flush arch/x86/kernel/alternative.c:1586 [inline] text_poke_finish+0x16/0x30 arch/x86/kernel/alternative.c:1596 arch_jump_label_transform_apply+0x13/0x20 arch/x86/kernel/jump_label.c:146 jump_label_update+0x32f/0x410 kernel/jump_label.c:830 static_key_enable_cpuslocked+0x1b1/0x260 kernel/jump_label.c:177 static_key_enable+0x16/0x20 kernel/jump_label.c:190 toggle_allocation_gate mm/kfence/core.c:811 [inline] toggle_allocation_gate+0x100/0x390 mm/kfence/core.c:803 process_one_work+0x991/0x1610 kernel/workqueue.c:2289 worker_thread+0x665/0x1080 kernel/workqueue.c:2436 kthread+0x2e9/0x3a0 kernel/kthread.c:376 ret_from_fork+0x1f/0x30 arch/x86/entry/entry_64.S:302