====================================================== WARNING: possible circular locking dependency detected 5.14.0-rc1-syzkaller #0 Not tainted ------------------------------------------------------ syz-executor.0/8466 is trying to acquire lock: ffffffff8ba9afc0 (fs_reclaim){+.+.}-{0:0}, at: fs_reclaim_acquire+0xf7/0x160 mm/page_alloc.c:4574 but task is already holding lock: ffff8880b9c4d580 (lock#2){-.-.}-{2:2}, at: __alloc_pages_bulk+0x4ad/0x1870 mm/page_alloc.c:5279 which lock already depends on the new lock. the existing dependency chain (in reverse order) is: -> #1 (lock#2){-.-.}-{2:2}: local_lock_acquire include/linux/local_lock_internal.h:42 [inline] rmqueue_pcplist mm/page_alloc.c:3663 [inline] rmqueue mm/page_alloc.c:3701 [inline] get_page_from_freelist+0x4aa/0x2f80 mm/page_alloc.c:4163 __alloc_pages+0x1b2/0x500 mm/page_alloc.c:5374 alloc_pages+0x18c/0x2a0 mm/mempolicy.c:2244 stack_depot_save+0x39d/0x4e0 lib/stackdepot.c:303 save_stack+0x15e/0x1e0 mm/page_owner.c:120 __reset_page_owner+0x5d/0x170 mm/page_owner.c:140 reset_page_owner include/linux/page_owner.h:24 [inline] free_pages_prepare mm/page_alloc.c:1343 [inline] free_pcp_prepare+0x2c5/0x780 mm/page_alloc.c:1394 free_unref_page_prepare mm/page_alloc.c:3329 [inline] free_unref_page+0x19/0x690 mm/page_alloc.c:3408 mm_free_pgd kernel/fork.c:636 [inline] __mmdrop+0xcb/0x3f0 kernel/fork.c:687 mmdrop include/linux/sched/mm.h:49 [inline] finish_task_switch.isra.0+0x6da/0xa50 kernel/sched/core.c:4582 context_switch kernel/sched/core.c:4686 [inline] __schedule+0x942/0x26f0 kernel/sched/core.c:5940 preempt_schedule_irq+0x4e/0x90 kernel/sched/core.c:6328 irqentry_exit+0x31/0x80 kernel/entry/common.c:427 asm_sysvec_reschedule_ipi+0x12/0x20 arch/x86/include/asm/idtentry.h:643 lock_acquire+0x1ef/0x510 kernel/locking/lockdep.c:5593 __fs_reclaim_acquire mm/page_alloc.c:4552 [inline] fs_reclaim_acquire+0x117/0x160 mm/page_alloc.c:4566 might_alloc include/linux/sched/mm.h:198 [inline] slab_pre_alloc_hook mm/slab.h:485 [inline] slab_alloc_node mm/slub.c:2902 [inline] slab_alloc mm/slub.c:2989 [inline] kmem_cache_alloc+0x3e/0x3a0 mm/slub.c:2994 vm_area_dup+0x88/0x2b0 kernel/fork.c:357 __split_vma+0xa5/0x550 mm/mmap.c:2741 split_vma+0x95/0xd0 mm/mmap.c:2799 mprotect_fixup+0x71c/0x940 mm/mprotect.c:483 do_mprotect_pkey+0x558/0x9a0 mm/mprotect.c:636 __do_sys_mprotect mm/mprotect.c:662 [inline] __se_sys_mprotect mm/mprotect.c:659 [inline] __x64_sys_mprotect+0x74/0xb0 mm/mprotect.c:659 do_syscall_x64 arch/x86/entry/common.c:50 [inline] do_syscall_64+0x35/0xb0 arch/x86/entry/common.c:80 entry_SYSCALL_64_after_hwframe+0x44/0xae -> #0 (fs_reclaim){+.+.}-{0:0}: check_prev_add kernel/locking/lockdep.c:3051 [inline] check_prevs_add kernel/locking/lockdep.c:3174 [inline] validate_chain kernel/locking/lockdep.c:3789 [inline] __lock_acquire+0x2a07/0x54a0 kernel/locking/lockdep.c:5015 lock_acquire kernel/locking/lockdep.c:5625 [inline] lock_acquire+0x1ab/0x510 kernel/locking/lockdep.c:5590 __fs_reclaim_acquire mm/page_alloc.c:4552 [inline] fs_reclaim_acquire+0x117/0x160 mm/page_alloc.c:4566 prepare_alloc_pages+0x15c/0x580 mm/page_alloc.c:5164 __alloc_pages+0x12f/0x500 mm/page_alloc.c:5363 alloc_pages+0x18c/0x2a0 mm/mempolicy.c:2244 stack_depot_save+0x39d/0x4e0 lib/stackdepot.c:303 save_stack+0x15e/0x1e0 mm/page_owner.c:120 __set_page_owner+0x50/0x290 mm/page_owner.c:181 prep_new_page mm/page_alloc.c:2433 [inline] __alloc_pages_bulk+0x8b9/0x1870 mm/page_alloc.c:5301 alloc_pages_bulk_array_node include/linux/gfp.h:557 [inline] vm_area_alloc_pages mm/vmalloc.c:2793 [inline] __vmalloc_area_node mm/vmalloc.c:2863 [inline] __vmalloc_node_range+0x39d/0x960 mm/vmalloc.c:2966 vmalloc_user+0x67/0x80 mm/vmalloc.c:3101 kcov_mmap+0x2b/0x140 kernel/kcov.c:465 call_mmap include/linux/fs.h:2119 [inline] mmap_region+0xcde/0x1760 mm/mmap.c:1808 do_mmap+0x86e/0x1180 mm/mmap.c:1584 vm_mmap_pgoff+0x1b7/0x290 mm/util.c:519 ksys_mmap_pgoff+0x4a8/0x620 mm/mmap.c:1635 do_syscall_x64 arch/x86/entry/common.c:50 [inline] do_syscall_64+0x35/0xb0 arch/x86/entry/common.c:80 entry_SYSCALL_64_after_hwframe+0x44/0xae other info that might help us debug this: Possible unsafe locking scenario: CPU0 CPU1 ---- ---- lock(lock#2); lock(fs_reclaim); lock(lock#2); lock(fs_reclaim); *** DEADLOCK *** 2 locks held by syz-executor.0/8466: #0: ffff88802af11628 (&mm->mmap_lock#2){++++}-{3:3}, at: mmap_write_lock_killable include/linux/mmap_lock.h:87 [inline] #0: ffff88802af11628 (&mm->mmap_lock#2){++++}-{3:3}, at: vm_mmap_pgoff+0x15c/0x290 mm/util.c:517 #1: ffff8880b9c4d580 (lock#2){-.-.}-{2:2}, at: __alloc_pages_bulk+0x4ad/0x1870 mm/page_alloc.c:5279 stack backtrace: CPU: 0 PID: 8466 Comm: syz-executor.0 Not tainted 5.14.0-rc1-syzkaller #0 Hardware name: Google Google Compute Engine/Google Compute Engine, BIOS Google 01/01/2011 Call Trace: __dump_stack lib/dump_stack.c:88 [inline] dump_stack_lvl+0xcd/0x134 lib/dump_stack.c:105 check_noncircular+0x25f/0x2e0 kernel/locking/lockdep.c:2131 check_prev_add kernel/locking/lockdep.c:3051 [inline] check_prevs_add kernel/locking/lockdep.c:3174 [inline] validate_chain kernel/locking/lockdep.c:3789 [inline] __lock_acquire+0x2a07/0x54a0 kernel/locking/lockdep.c:5015 lock_acquire kernel/locking/lockdep.c:5625 [inline] lock_acquire+0x1ab/0x510 kernel/locking/lockdep.c:5590 __fs_reclaim_acquire mm/page_alloc.c:4552 [inline] fs_reclaim_acquire+0x117/0x160 mm/page_alloc.c:4566 prepare_alloc_pages+0x15c/0x580 mm/page_alloc.c:5164 __alloc_pages+0x12f/0x500 mm/page_alloc.c:5363 alloc_pages+0x18c/0x2a0 mm/mempolicy.c:2244 stack_depot_save+0x39d/0x4e0 lib/stackdepot.c:303 save_stack+0x15e/0x1e0 mm/page_owner.c:120 __set_page_owner+0x50/0x290 mm/page_owner.c:181 prep_new_page mm/page_alloc.c:2433 [inline] __alloc_pages_bulk+0x8b9/0x1870 mm/page_alloc.c:5301 alloc_pages_bulk_array_node include/linux/gfp.h:557 [inline] vm_area_alloc_pages mm/vmalloc.c:2793 [inline] __vmalloc_area_node mm/vmalloc.c:2863 [inline] __vmalloc_node_range+0x39d/0x960 mm/vmalloc.c:2966 vmalloc_user+0x67/0x80 mm/vmalloc.c:3101 kcov_mmap+0x2b/0x140 kernel/kcov.c:465 call_mmap include/linux/fs.h:2119 [inline] mmap_region+0xcde/0x1760 mm/mmap.c:1808 do_mmap+0x86e/0x1180 mm/mmap.c:1584 vm_mmap_pgoff+0x1b7/0x290 mm/util.c:519 ksys_mmap_pgoff+0x4a8/0x620 mm/mmap.c:1635 do_syscall_x64 arch/x86/entry/common.c:50 [inline] do_syscall_64+0x35/0xb0 arch/x86/entry/common.c:80 entry_SYSCALL_64_after_hwframe+0x44/0xae RIP: 0033:0x466632 Code: 00 00 00 00 00 0f 1f 00 41 f7 c1 ff 0f 00 00 75 27 55 48 89 fd 53 89 cb 48 85 ff 74 3b 41 89 da 48 89 ef b8 09 00 00 00 0f 05 <48> 3d 00 f0 ff ff 77 66 5b 5d c3 0f 1f 00 48 c7 c0 bc ff ff ff 64 RSP: 002b:00007ffe7f5a1018 EFLAGS: 00000246 ORIG_RAX: 0000000000000009 RAX: ffffffffffffffda RBX: 0000000000000001 RCX: 0000000000466632 RDX: 0000000000000003 RSI: 0000000000200000 RDI: 0000000000000000 RBP: 0000000000000000 R08: 00000000000000e8 R09: 0000000000000000 R10: 0000000000000001 R11: 0000000000000246 R12: 000000000056c0c0 R13: 000000000056cb88 R14: 0000000000000000 R15: 00000000000000e9 BUG: sleeping function called from invalid context at mm/page_alloc.c:5167 in_atomic(): 0, irqs_disabled(): 1, non_block: 0, pid: 8466, name: syz-executor.0 INFO: lockdep is turned off. irq event stamp: 3510 hardirqs last enabled at (3509): [] __raw_spin_unlock_irqrestore include/linux/spinlock_api_smp.h:160 [inline] hardirqs last enabled at (3509): [] _raw_spin_unlock_irqrestore+0x50/0x70 kernel/locking/spinlock.c:191 hardirqs last disabled at (3510): [] __alloc_pages_bulk+0x1017/0x1870 mm/page_alloc.c:5279 softirqs last enabled at (2652): [] invoke_softirq kernel/softirq.c:432 [inline] softirqs last enabled at (2652): [] __irq_exit_rcu+0x16e/0x1c0 kernel/softirq.c:636 softirqs last disabled at (2641): [] invoke_softirq kernel/softirq.c:432 [inline] softirqs last disabled at (2641): [] __irq_exit_rcu+0x16e/0x1c0 kernel/softirq.c:636 CPU: 0 PID: 8466 Comm: syz-executor.0 Not tainted 5.14.0-rc1-syzkaller #0 Hardware name: Google Google Compute Engine/Google Compute Engine, BIOS Google 01/01/2011 Call Trace: __dump_stack lib/dump_stack.c:88 [inline] dump_stack_lvl+0xcd/0x134 lib/dump_stack.c:105 ___might_sleep.cold+0x1f1/0x237 kernel/sched/core.c:9154 prepare_alloc_pages+0x3da/0x580 mm/page_alloc.c:5167 __alloc_pages+0x12f/0x500 mm/page_alloc.c:5363 alloc_pages+0x18c/0x2a0 mm/mempolicy.c:2244 stack_depot_save+0x39d/0x4e0 lib/stackdepot.c:303 save_stack+0x15e/0x1e0 mm/page_owner.c:120 __set_page_owner+0x50/0x290 mm/page_owner.c:181 prep_new_page mm/page_alloc.c:2433 [inline] __alloc_pages_bulk+0x8b9/0x1870 mm/page_alloc.c:5301 alloc_pages_bulk_array_node include/linux/gfp.h:557 [inline] vm_area_alloc_pages mm/vmalloc.c:2793 [inline] __vmalloc_area_node mm/vmalloc.c:2863 [inline] __vmalloc_node_range+0x39d/0x960 mm/vmalloc.c:2966 vmalloc_user+0x67/0x80 mm/vmalloc.c:3101 kcov_mmap+0x2b/0x140 kernel/kcov.c:465 call_mmap include/linux/fs.h:2119 [inline] mmap_region+0xcde/0x1760 mm/mmap.c:1808 do_mmap+0x86e/0x1180 mm/mmap.c:1584 vm_mmap_pgoff+0x1b7/0x290 mm/util.c:519 ksys_mmap_pgoff+0x4a8/0x620 mm/mmap.c:1635 do_syscall_x64 arch/x86/entry/common.c:50 [inline] do_syscall_64+0x35/0xb0 arch/x86/entry/common.c:80 entry_SYSCALL_64_after_hwframe+0x44/0xae RIP: 0033:0x466632 Code: 00 00 00 00 00 0f 1f 00 41 f7 c1 ff 0f 00 00 75 27 55 48 89 fd 53 89 cb 48 85 ff 74 3b 41 89 da 48 89 ef b8 09 00 00 00 0f 05 <48> 3d 00 f0 ff ff 77 66 5b 5d c3 0f 1f 00 48 c7 c0 bc ff ff ff 64 RSP: 002b:00007ffe7f5a1018 EFLAGS: 00000246 ORIG_RAX: 0000000000000009 RAX: ffffffffffffffda RBX: 0000000000000001 RCX: 0000000000466632 RDX: 0000000000000003 RSI: 0000000000200000 RDI: 0000000000000000 RBP: 0000000000000000 R08: 00000000000000e8 R09: 0000000000000000 R10: 0000000000000001 R11: 0000000000000246 R12: 000000000056c0c0 R13: 000000000056cb88 R14: 0000000000000000 R15: 00000000000000e9