====================================================== WARNING: possible circular locking dependency detected 5.14.0-rc1-syzkaller #0 Not tainted ------------------------------------------------------ syz-execprog/8452 is trying to acquire lock: ffffffff8ba9c5e0 (fs_reclaim){+.+.}-{0:0}, at: fs_reclaim_acquire+0xf7/0x160 mm/page_alloc.c:4574 but task is already holding lock: ffff8880b9d4d660 (lock#2){..-.}-{2:2}, at: __alloc_pages_bulk+0x4ad/0x1870 mm/page_alloc.c:5279 which lock already depends on the new lock. the existing dependency chain (in reverse order) is: -> #2 (lock#2){..-.}-{2:2}: local_lock_acquire include/linux/local_lock_internal.h:42 [inline] rmqueue_pcplist mm/page_alloc.c:3663 [inline] rmqueue mm/page_alloc.c:3701 [inline] get_page_from_freelist+0x4aa/0x2f80 mm/page_alloc.c:4163 __alloc_pages+0x1b2/0x500 mm/page_alloc.c:5374 alloc_pages+0x18c/0x2a0 mm/mempolicy.c:2244 stack_depot_save+0x39d/0x4e0 lib/stackdepot.c:303 kasan_save_stack+0x32/0x40 mm/kasan/common.c:40 kasan_record_aux_stack+0xe5/0x110 mm/kasan/generic.c:348 insert_work+0x48/0x370 kernel/workqueue.c:1332 __queue_work+0x5c1/0xed0 kernel/workqueue.c:1498 __queue_delayed_work+0x1c8/0x270 kernel/workqueue.c:1645 mod_delayed_work_on+0xdd/0x220 kernel/workqueue.c:1719 kblockd_mod_delayed_work_on+0x26/0x30 block/blk-core.c:1633 blk_mq_kick_requeue_list block/blk-mq.c:828 [inline] blk_mq_add_to_requeue_list+0x322/0x3f0 block/blk-mq.c:823 blk_flush_queue_rq block/blk-flush.c:136 [inline] blk_kick_flush block/blk-flush.c:333 [inline] blk_flush_complete_seq+0x935/0xfd0 block/blk-flush.c:211 blk_insert_flush+0x35f/0x4b0 block/blk-flush.c:427 blk_mq_submit_bio+0x13a5/0x1860 block/blk-mq.c:2234 __submit_bio_noacct_mq block/blk-core.c:1011 [inline] submit_bio_noacct block/blk-core.c:1044 [inline] submit_bio_noacct+0xad2/0xf20 block/blk-core.c:1027 submit_bio+0x1ea/0x470 block/blk-core.c:1106 submit_bio_wait+0x106/0x230 block/bio.c:1155 blkdev_issue_flush+0xd6/0x130 block/blk-flush.c:445 ext4_sync_file+0x60b/0xfd0 fs/ext4/fsync.c:177 vfs_fsync_range+0x13a/0x220 fs/sync.c:200 vfs_fsync fs/sync.c:214 [inline] do_fsync fs/sync.c:224 [inline] __do_sys_fsync fs/sync.c:232 [inline] __se_sys_fsync fs/sync.c:230 [inline] __x64_sys_fsync+0x6b/0xc0 fs/sync.c:230 do_syscall_x64 arch/x86/entry/common.c:50 [inline] do_syscall_64+0x35/0xb0 arch/x86/entry/common.c:80 entry_SYSCALL_64_after_hwframe+0x44/0xae -> #1 (&pool->lock){-.-.}-{2:2}: __raw_spin_lock include/linux/spinlock_api_smp.h:142 [inline] _raw_spin_lock+0x2a/0x40 kernel/locking/spinlock.c:151 __queue_work+0x366/0xed0 kernel/workqueue.c:1455 queue_work_on+0xee/0x110 kernel/workqueue.c:1525 queue_work include/linux/workqueue.h:507 [inline] schedule_work include/linux/workqueue.h:568 [inline] __vfree_deferred mm/vmalloc.c:2609 [inline] vfree_atomic+0xac/0xe0 mm/vmalloc.c:2627 free_thread_stack kernel/fork.c:292 [inline] release_task_stack kernel/fork.c:432 [inline] put_task_stack+0x2e0/0x4e0 kernel/fork.c:443 finish_task_switch.isra.0+0x77f/0xa50 kernel/sched/core.c:4595 context_switch kernel/sched/core.c:4686 [inline] __schedule+0x942/0x26f0 kernel/sched/core.c:5940 preempt_schedule_irq+0x4e/0x90 kernel/sched/core.c:6328 irqentry_exit+0x31/0x80 kernel/entry/common.c:427 asm_sysvec_reschedule_ipi+0x12/0x20 arch/x86/include/asm/idtentry.h:643 lock_release+0x3f1/0x720 kernel/locking/lockdep.c:5633 might_alloc include/linux/sched/mm.h:198 [inline] slab_pre_alloc_hook mm/slab.h:485 [inline] slab_alloc_node mm/slub.c:2902 [inline] slab_alloc mm/slub.c:2989 [inline] kmem_cache_alloc+0x3e/0x3a0 mm/slub.c:2994 kmem_cache_zalloc include/linux/slab.h:711 [inline] __kernfs_new_node+0xd4/0x8b0 fs/kernfs/dir.c:583 kernfs_new_node fs/kernfs/dir.c:645 [inline] kernfs_create_dir_ns+0x9c/0x220 fs/kernfs/dir.c:982 internal_create_group+0x798/0xb20 fs/sysfs/group.c:137 kernel_add_sysfs_param kernel/params.c:796 [inline] param_sysfs_builtin kernel/params.c:833 [inline] param_sysfs_init+0x39a/0x498 kernel/params.c:952 do_one_initcall+0x103/0x650 init/main.c:1282 do_initcall_level init/main.c:1355 [inline] do_initcalls init/main.c:1371 [inline] do_basic_setup init/main.c:1391 [inline] kernel_init_freeable+0x6b8/0x741 init/main.c:1593 kernel_init+0x1a/0x1d0 init/main.c:1485 ret_from_fork+0x1f/0x30 arch/x86/entry/entry_64.S:295 -> #0 (fs_reclaim){+.+.}-{0:0}: check_prev_add kernel/locking/lockdep.c:3051 [inline] check_prevs_add kernel/locking/lockdep.c:3174 [inline] validate_chain kernel/locking/lockdep.c:3789 [inline] __lock_acquire+0x2a07/0x54a0 kernel/locking/lockdep.c:5015 lock_acquire kernel/locking/lockdep.c:5625 [inline] lock_acquire+0x1ab/0x510 kernel/locking/lockdep.c:5590 __fs_reclaim_acquire mm/page_alloc.c:4552 [inline] fs_reclaim_acquire+0x117/0x160 mm/page_alloc.c:4566 prepare_alloc_pages+0x15c/0x580 mm/page_alloc.c:5164 __alloc_pages+0x12f/0x500 mm/page_alloc.c:5363 alloc_pages+0x18c/0x2a0 mm/mempolicy.c:2244 stack_depot_save+0x39d/0x4e0 lib/stackdepot.c:303 save_stack+0x15e/0x1e0 mm/page_owner.c:120 __set_page_owner+0x50/0x290 mm/page_owner.c:181 prep_new_page mm/page_alloc.c:2433 [inline] __alloc_pages_bulk+0x8b9/0x1870 mm/page_alloc.c:5301 alloc_pages_bulk_array_node include/linux/gfp.h:557 [inline] vm_area_alloc_pages mm/vmalloc.c:2793 [inline] __vmalloc_area_node mm/vmalloc.c:2863 [inline] __vmalloc_node_range+0x39d/0x960 mm/vmalloc.c:2966 vmalloc_user+0x67/0x80 mm/vmalloc.c:3101 kcov_mmap+0x2b/0x140 kernel/kcov.c:465 call_mmap include/linux/fs.h:2119 [inline] mmap_region+0xcde/0x1760 mm/mmap.c:1808 do_mmap+0x86e/0x1180 mm/mmap.c:1584 vm_mmap_pgoff+0x1b7/0x290 mm/util.c:519 ksys_mmap_pgoff+0x4a8/0x620 mm/mmap.c:1635 do_syscall_x64 arch/x86/entry/common.c:50 [inline] do_syscall_64+0x35/0xb0 arch/x86/entry/common.c:80 entry_SYSCALL_64_after_hwframe+0x44/0xae other info that might help us debug this: Chain exists of: fs_reclaim --> &pool->lock --> lock#2 Possible unsafe locking scenario: CPU0 CPU1 ---- ---- lock(lock#2); lock(&pool->lock); lock(lock#2); lock(fs_reclaim); *** DEADLOCK *** 2 locks held by syz-execprog/8452: #0: ffff88802c3c1d28 (&mm->mmap_lock#2){++++}-{3:3}, at: mmap_write_lock_killable include/linux/mmap_lock.h:87 [inline] #0: ffff88802c3c1d28 (&mm->mmap_lock#2){++++}-{3:3}, at: vm_mmap_pgoff+0x15c/0x290 mm/util.c:517 #1: ffff8880b9d4d660 (lock#2){..-.}-{2:2}, at: __alloc_pages_bulk+0x4ad/0x1870 mm/page_alloc.c:5279 stack backtrace: CPU: 1 PID: 8452 Comm: syz-execprog Not tainted 5.14.0-rc1-syzkaller #0 Hardware name: Google Google Compute Engine/Google Compute Engine, BIOS Google 01/01/2011 Call Trace: __dump_stack lib/dump_stack.c:88 [inline] dump_stack_lvl+0xcd/0x134 lib/dump_stack.c:105 check_noncircular+0x25f/0x2e0 kernel/locking/lockdep.c:2131 check_prev_add kernel/locking/lockdep.c:3051 [inline] check_prevs_add kernel/locking/lockdep.c:3174 [inline] validate_chain kernel/locking/lockdep.c:3789 [inline] __lock_acquire+0x2a07/0x54a0 kernel/locking/lockdep.c:5015 lock_acquire kernel/locking/lockdep.c:5625 [inline] lock_acquire+0x1ab/0x510 kernel/locking/lockdep.c:5590 __fs_reclaim_acquire mm/page_alloc.c:4552 [inline] fs_reclaim_acquire+0x117/0x160 mm/page_alloc.c:4566 prepare_alloc_pages+0x15c/0x580 mm/page_alloc.c:5164 __alloc_pages+0x12f/0x500 mm/page_alloc.c:5363 alloc_pages+0x18c/0x2a0 mm/mempolicy.c:2244 stack_depot_save+0x39d/0x4e0 lib/stackdepot.c:303 save_stack+0x15e/0x1e0 mm/page_owner.c:120 __set_page_owner+0x50/0x290 mm/page_owner.c:181 prep_new_page mm/page_alloc.c:2433 [inline] __alloc_pages_bulk+0x8b9/0x1870 mm/page_alloc.c:5301 alloc_pages_bulk_array_node include/linux/gfp.h:557 [inline] vm_area_alloc_pages mm/vmalloc.c:2793 [inline] __vmalloc_area_node mm/vmalloc.c:2863 [inline] __vmalloc_node_range+0x39d/0x960 mm/vmalloc.c:2966 vmalloc_user+0x67/0x80 mm/vmalloc.c:3101 kcov_mmap+0x2b/0x140 kernel/kcov.c:465 call_mmap include/linux/fs.h:2119 [inline] mmap_region+0xcde/0x1760 mm/mmap.c:1808 do_mmap+0x86e/0x1180 mm/mmap.c:1584 vm_mmap_pgoff+0x1b7/0x290 mm/util.c:519 ksys_mmap_pgoff+0x4a8/0x620 mm/mmap.c:1635 do_syscall_x64 arch/x86/entry/common.c:50 [inline] do_syscall_64+0x35/0xb0 arch/x86/entry/common.c:80 entry_SYSCALL_64_after_hwframe+0x44/0xae RIP: 0033:0x4b132a Code: e8 db 57 fb ff 48 8b 7c 24 10 48 8b 74 24 18 48 8b 54 24 20 4c 8b 54 24 28 4c 8b 44 24 30 4c 8b 4c 24 38 48 8b 44 24 08 0f 05 <48> 3d 01 f0 ff ff 76 20 48 c7 44 24 40 ff ff ff ff 48 c7 44 24 48 RSP: 002b:000000c0001a1a10 EFLAGS: 00000202 ORIG_RAX: 0000000000000009 RAX: ffffffffffffffda RBX: 000000c000020800 RCX: 00000000004b132a RDX: 0000000000000003 RSI: 0000000000080000 RDI: 0000000000000000 RBP: 000000c0001a1a70 R08: 0000000000000006 R09: 0000000000000000 R10: 0000000000000001 R11: 0000000000000202 R12: 000000000072903a R13: 0000000000000063 R14: 0000000000000200 R15: 0000000000000100 BUG: sleeping function called from invalid context at mm/page_alloc.c:5167 in_atomic(): 0, irqs_disabled(): 1, non_block: 0, pid: 8452, name: syz-execprog INFO: lockdep is turned off. irq event stamp: 10814 hardirqs last enabled at (10813): [] __raw_spin_unlock_irqrestore include/linux/spinlock_api_smp.h:160 [inline] hardirqs last enabled at (10813): [] _raw_spin_unlock_irqrestore+0x50/0x70 kernel/locking/spinlock.c:191 hardirqs last disabled at (10814): [] __alloc_pages_bulk+0x1017/0x1870 mm/page_alloc.c:5279 softirqs last enabled at (9920): [] invoke_softirq kernel/softirq.c:432 [inline] softirqs last enabled at (9920): [] __irq_exit_rcu+0x16e/0x1c0 kernel/softirq.c:636 softirqs last disabled at (9911): [] invoke_softirq kernel/softirq.c:432 [inline] softirqs last disabled at (9911): [] __irq_exit_rcu+0x16e/0x1c0 kernel/softirq.c:636 CPU: 1 PID: 8452 Comm: syz-execprog Not tainted 5.14.0-rc1-syzkaller #0 Hardware name: Google Google Compute Engine/Google Compute Engine, BIOS Google 01/01/2011 Call Trace: __dump_stack lib/dump_stack.c:88 [inline] dump_stack_lvl+0xcd/0x134 lib/dump_stack.c:105 ___might_sleep.cold+0x1f1/0x237 kernel/sched/core.c:9154 prepare_alloc_pages+0x3da/0x580 mm/page_alloc.c:5167 __alloc_pages+0x12f/0x500 mm/page_alloc.c:5363 alloc_pages+0x18c/0x2a0 mm/mempolicy.c:2244 stack_depot_save+0x39d/0x4e0 lib/stackdepot.c:303 save_stack+0x15e/0x1e0 mm/page_owner.c:120 __set_page_owner+0x50/0x290 mm/page_owner.c:181 prep_new_page mm/page_alloc.c:2433 [inline] __alloc_pages_bulk+0x8b9/0x1870 mm/page_alloc.c:5301 alloc_pages_bulk_array_node include/linux/gfp.h:557 [inline] vm_area_alloc_pages mm/vmalloc.c:2793 [inline] __vmalloc_area_node mm/vmalloc.c:2863 [inline] __vmalloc_node_range+0x39d/0x960 mm/vmalloc.c:2966 vmalloc_user+0x67/0x80 mm/vmalloc.c:3101 kcov_mmap+0x2b/0x140 kernel/kcov.c:465 call_mmap include/linux/fs.h:2119 [inline] mmap_region+0xcde/0x1760 mm/mmap.c:1808 do_mmap+0x86e/0x1180 mm/mmap.c:1584 vm_mmap_pgoff+0x1b7/0x290 mm/util.c:519 ksys_mmap_pgoff+0x4a8/0x620 mm/mmap.c:1635 do_syscall_x64 arch/x86/entry/common.c:50 [inline] do_syscall_64+0x35/0xb0 arch/x86/entry/common.c:80 entry_SYSCALL_64_after_hwframe+0x44/0xae RIP: 0033:0x4b132a Code: e8 db 57 fb ff 48 8b 7c 24 10 48 8b 74 24 18 48 8b 54 24 20 4c 8b 54 24 28 4c 8b 44 24 30 4c 8b 4c 24 38 48 8b 44 24 08 0f 05 <48> 3d 01 f0 ff ff 76 20 48 c7 44 24 40 ff ff ff ff 48 c7 44 24 48 RSP: 002b:000000c0001a1a10 EFLAGS: 00000202 ORIG_RAX: 0000000000000009 RAX: ffffffffffffffda RBX: 000000c000020800 RCX: 00000000004b132a RDX: 0000000000000003 RSI: 0000000000080000 RDI: 0000000000000000 RBP: 000000c0001a1a70 R08: 0000000000000006 R09: 0000000000000000 R10: 0000000000000001 R11: 0000000000000202 R12: 000000000072903a R13: 0000000000000063 R14: 0000000000000200 R15: 0000000000000100