===================================================== WARNING: HARDIRQ-safe -> HARDIRQ-unsafe lock order detected 6.8.0-syzkaller-05236-g443574b03387 #0 Not tainted ----------------------------------------------------- syz-executor280/5061 [HC0[0]:SC0[2]:HE0:SE0] is trying to acquire: ffff88802050ba18 (&htab->buckets[i].lock){+...}-{2:2}, at: spin_lock_bh include/linux/spinlock.h:356 [inline] ffff88802050ba18 (&htab->buckets[i].lock){+...}-{2:2}, at: sock_hash_delete_elem+0xb0/0x300 net/core/sock_map.c:939 and this task is already holding: ffff8880b953e158 (&rq->__lock){-.-.}-{2:2}, at: raw_spin_rq_lock_nested+0x2a/0x140 kernel/sched/core.c:559 which would create a new lock dependency: (&rq->__lock){-.-.}-{2:2} -> (&htab->buckets[i].lock){+...}-{2:2} but this new dependency connects a HARDIRQ-irq-safe lock: (&rq->__lock){-.-.}-{2:2} ... which became HARDIRQ-irq-safe at: lock_acquire+0x1e4/0x530 kernel/locking/lockdep.c:5754 _raw_spin_lock_nested+0x31/0x40 kernel/locking/spinlock.c:378 raw_spin_rq_lock_nested+0x2a/0x140 kernel/sched/core.c:559 raw_spin_rq_lock kernel/sched/sched.h:1385 [inline] rq_lock kernel/sched/sched.h:1699 [inline] scheduler_tick+0xa1/0x6e0 kernel/sched/core.c:5679 update_process_times+0x202/0x230 kernel/time/timer.c:2481 tick_periodic+0x190/0x220 kernel/time/tick-common.c:100 tick_handle_periodic+0x4a/0x160 kernel/time/tick-common.c:112 timer_interrupt+0x5c/0x70 arch/x86/kernel/time.c:57 __handle_irq_event_percpu+0x28c/0xa30 kernel/irq/handle.c:158 handle_irq_event_percpu kernel/irq/handle.c:193 [inline] handle_irq_event+0x89/0x1f0 kernel/irq/handle.c:210 handle_edge_irq+0x25f/0xc20 kernel/irq/chip.c:831 generic_handle_irq_desc include/linux/irqdesc.h:161 [inline] handle_irq arch/x86/kernel/irq.c:238 [inline] __common_interrupt+0x13a/0x230 arch/x86/kernel/irq.c:257 common_interrupt+0xa5/0xd0 arch/x86/kernel/irq.c:247 asm_common_interrupt+0x26/0x40 arch/x86/include/asm/idtentry.h:693 __raw_spin_unlock_irqrestore include/linux/spinlock_api_smp.h:152 [inline] _raw_spin_unlock_irqrestore+0xd8/0x140 kernel/locking/spinlock.c:194 __debug_check_no_obj_freed lib/debugobjects.c:998 [inline] debug_check_no_obj_freed+0x561/0x580 lib/debugobjects.c:1019 free_pages_prepare mm/page_alloc.c:1146 [inline] free_unref_page_prepare+0x319/0xa90 mm/page_alloc.c:2346 free_unref_page+0x37/0x3f0 mm/page_alloc.c:2486 stack_depot_save_flags+0x38f/0x860 lib/stackdepot.c:714 kasan_save_stack mm/kasan/common.c:48 [inline] kasan_save_track+0x51/0x80 mm/kasan/common.c:68 unpoison_slab_object mm/kasan/common.c:312 [inline] __kasan_slab_alloc+0x66/0x80 mm/kasan/common.c:338 kasan_slab_alloc include/linux/kasan.h:201 [inline] slab_post_alloc_hook mm/slub.c:3813 [inline] slab_alloc_node mm/slub.c:3860 [inline] kmem_cache_alloc_node+0x192/0x380 mm/slub.c:3903 preload_this_cpu_lock mm/vmalloc.c:1642 [inline] alloc_vmap_area+0x6aa/0x1c10 mm/vmalloc.c:1686 __get_vm_area_node+0x16e/0x370 mm/vmalloc.c:2667 __vmalloc_node_range+0x3df/0x14a0 mm/vmalloc.c:3352 __vmalloc_node mm/vmalloc.c:3457 [inline] vzalloc_node+0x7e/0x90 mm/vmalloc.c:3586 gen_pool_add_owner+0x8b/0x290 lib/genalloc.c:192 gen_pool_add_virt include/linux/genalloc.h:104 [inline] gen_pool_add include/linux/genalloc.h:122 [inline] mce_gen_pool_create arch/x86/kernel/cpu/mce/genpool.c:128 [inline] mce_gen_pool_init+0x66/0xb0 arch/x86/kernel/cpu/mce/genpool.c:146 mcheck_cpu_init+0xb7f/0x1200 arch/x86/kernel/cpu/mce/core.c:2232 identify_cpu+0x1939/0x3280 arch/x86/kernel/cpu/common.c:1889 identify_boot_cpu+0xd/0xe0 arch/x86/kernel/cpu/common.c:1928 arch_cpu_finalize_init+0x9/0xa0 arch/x86/kernel/cpu/common.c:2310 start_kernel+0x402/0x500 init/main.c:1043 x86_64_start_reservations+0x2a/0x30 arch/x86/kernel/head64.c:509 x86_64_start_kernel+0x99/0xa0 arch/x86/kernel/head64.c:490 common_startup_64+0x13e/0x147 to a HARDIRQ-irq-unsafe lock: (&htab->buckets[i].lock){+...}-{2:2} ... which became HARDIRQ-irq-unsafe at: ... lock_acquire+0x1e4/0x530 kernel/locking/lockdep.c:5754 __raw_spin_lock_bh include/linux/spinlock_api_smp.h:126 [inline] _raw_spin_lock_bh+0x35/0x50 kernel/locking/spinlock.c:178 spin_lock_bh include/linux/spinlock.h:356 [inline] sock_hash_free+0x164/0x820 net/core/sock_map.c:1154 bpf_map_free_deferred+0xe6/0x110 kernel/bpf/syscall.c:734 process_one_work kernel/workqueue.c:3254 [inline] process_scheduled_works+0xa00/0x1770 kernel/workqueue.c:3335 worker_thread+0x86d/0xd70 kernel/workqueue.c:3416 kthread+0x2f0/0x390 kernel/kthread.c:388 ret_from_fork+0x4b/0x80 arch/x86/kernel/process.c:147 ret_from_fork_asm+0x1a/0x30 arch/x86/entry/entry_64.S:243 other info that might help us debug this: Possible interrupt unsafe locking scenario: CPU0 CPU1 ---- ---- lock(&htab->buckets[i].lock); local_irq_disable(); lock(&rq->__lock); lock(&htab->buckets[i].lock); lock(&rq->__lock); *** DEADLOCK *** 3 locks held by syz-executor280/5061: #0: ffffffff8e131920 (rcu_read_lock){....}-{1:2}, at: rcu_lock_acquire include/linux/rcupdate.h:298 [inline] #0: ffffffff8e131920 (rcu_read_lock){....}-{1:2}, at: rcu_read_lock include/linux/rcupdate.h:750 [inline] #0: ffffffff8e131920 (rcu_read_lock){....}-{1:2}, at: newidle_balance+0x2a8/0x1080 kernel/sched/fair.c:12314 #1: ffff8880b953e158 (&rq->__lock){-.-.}-{2:2}, at: raw_spin_rq_lock_nested+0x2a/0x140 kernel/sched/core.c:559 #2: ffffffff8e131920 (rcu_read_lock){....}-{1:2}, at: rcu_lock_acquire include/linux/rcupdate.h:298 [inline] #2: ffffffff8e131920 (rcu_read_lock){....}-{1:2}, at: rcu_read_lock include/linux/rcupdate.h:750 [inline] #2: ffffffff8e131920 (rcu_read_lock){....}-{1:2}, at: __bpf_trace_run kernel/trace/bpf_trace.c:2380 [inline] #2: ffffffff8e131920 (rcu_read_lock){....}-{1:2}, at: bpf_trace_run2+0x114/0x420 kernel/trace/bpf_trace.c:2420 the dependencies between HARDIRQ-irq-safe lock and the holding lock: -> (&rq->__lock){-.-.}-{2:2} { IN-HARDIRQ-W at: lock_acquire+0x1e4/0x530 kernel/locking/lockdep.c:5754 _raw_spin_lock_nested+0x31/0x40 kernel/locking/spinlock.c:378 raw_spin_rq_lock_nested+0x2a/0x140 kernel/sched/core.c:559 raw_spin_rq_lock kernel/sched/sched.h:1385 [inline] rq_lock kernel/sched/sched.h:1699 [inline] scheduler_tick+0xa1/0x6e0 kernel/sched/core.c:5679 update_process_times+0x202/0x230 kernel/time/timer.c:2481 tick_periodic+0x190/0x220 kernel/time/tick-common.c:100 tick_handle_periodic+0x4a/0x160 kernel/time/tick-common.c:112 timer_interrupt+0x5c/0x70 arch/x86/kernel/time.c:57 __handle_irq_event_percpu+0x28c/0xa30 kernel/irq/handle.c:158 handle_irq_event_percpu kernel/irq/handle.c:193 [inline] handle_irq_event+0x89/0x1f0 kernel/irq/handle.c:210 handle_edge_irq+0x25f/0xc20 kernel/irq/chip.c:831 generic_handle_irq_desc include/linux/irqdesc.h:161 [inline] handle_irq arch/x86/kernel/irq.c:238 [inline] __common_interrupt+0x13a/0x230 arch/x86/kernel/irq.c:257 common_interrupt+0xa5/0xd0 arch/x86/kernel/irq.c:247 asm_common_interrupt+0x26/0x40 arch/x86/include/asm/idtentry.h:693 __raw_spin_unlock_irqrestore include/linux/spinlock_api_smp.h:152 [inline] _raw_spin_unlock_irqrestore+0xd8/0x140 kernel/locking/spinlock.c:194 __debug_check_no_obj_freed lib/debugobjects.c:998 [inline] debug_check_no_obj_freed+0x561/0x580 lib/debugobjects.c:1019 free_pages_prepare mm/page_alloc.c:1146 [inline] free_unref_page_prepare+0x319/0xa90 mm/page_alloc.c:2346 free_unref_page+0x37/0x3f0 mm/page_alloc.c:2486 stack_depot_save_flags+0x38f/0x860 lib/stackdepot.c:714 kasan_save_stack mm/kasan/common.c:48 [inline] kasan_save_track+0x51/0x80 mm/kasan/common.c:68 unpoison_slab_object mm/kasan/common.c:312 [inline] __kasan_slab_alloc+0x66/0x80 mm/kasan/common.c:338 kasan_slab_alloc include/linux/kasan.h:201 [inline] slab_post_alloc_hook mm/slub.c:3813 [inline] slab_alloc_node mm/slub.c:3860 [inline] kmem_cache_alloc_node+0x192/0x380 mm/slub.c:3903 preload_this_cpu_lock mm/vmalloc.c:1642 [inline] alloc_vmap_area+0x6aa/0x1c10 mm/vmalloc.c:1686 __get_vm_area_node+0x16e/0x370 mm/vmalloc.c:2667 __vmalloc_node_range+0x3df/0x14a0 mm/vmalloc.c:3352 __vmalloc_node mm/vmalloc.c:3457 [inline] vzalloc_node+0x7e/0x90 mm/vmalloc.c:3586 gen_pool_add_owner+0x8b/0x290 lib/genalloc.c:192 gen_pool_add_virt include/linux/genalloc.h:104 [inline] gen_pool_add include/linux/genalloc.h:122 [inline] mce_gen_pool_create arch/x86/kernel/cpu/mce/genpool.c:128 [inline] mce_gen_pool_init+0x66/0xb0 arch/x86/kernel/cpu/mce/genpool.c:146 mcheck_cpu_init+0xb7f/0x1200 arch/x86/kernel/cpu/mce/core.c:2232 identify_cpu+0x1939/0x3280 arch/x86/kernel/cpu/common.c:1889 identify_boot_cpu+0xd/0xe0 arch/x86/kernel/cpu/common.c:1928 arch_cpu_finalize_init+0x9/0xa0 arch/x86/kernel/cpu/common.c:2310 start_kernel+0x402/0x500 init/main.c:1043 x86_64_start_reservations+0x2a/0x30 arch/x86/kernel/head64.c:509 x86_64_start_kernel+0x99/0xa0 arch/x86/kernel/head64.c:490 common_startup_64+0x13e/0x147 IN-SOFTIRQ-W at: lock_acquire+0x1e4/0x530 kernel/locking/lockdep.c:5754 _raw_spin_lock_nested+0x31/0x40 kernel/locking/spinlock.c:378 raw_spin_rq_lock_nested+0x2a/0x140 kernel/sched/core.c:559 raw_spin_rq_lock kernel/sched/sched.h:1385 [inline] rq_lock kernel/sched/sched.h:1699 [inline] ttwu_queue kernel/sched/core.c:4055 [inline] try_to_wake_up+0x7d3/0x1470 kernel/sched/core.c:4378 call_timer_fn+0x17e/0x600 kernel/time/timer.c:1792 expire_timers kernel/time/timer.c:1843 [inline] __run_timers kernel/time/timer.c:2408 [inline] __run_timer_base+0x66a/0x8e0 kernel/time/timer.c:2419 run_timer_base kernel/time/timer.c:2428 [inline] run_timer_softirq+0xb7/0x170 kernel/time/timer.c:2438 __do_softirq+0x2bc/0x943 kernel/softirq.c:554 invoke_softirq kernel/softirq.c:428 [inline] __irq_exit_rcu+0xf2/0x1c0 kernel/softirq.c:633 irq_exit_rcu+0x9/0x30 kernel/softirq.c:645 instr_sysvec_apic_timer_interrupt arch/x86/kernel/apic/apic.c:1043 [inline] sysvec_apic_timer_interrupt+0xa6/0xc0 arch/x86/kernel/apic/apic.c:1043 asm_sysvec_apic_timer_interrupt+0x1a/0x20 arch/x86/include/asm/idtentry.h:702 native_safe_halt arch/x86/include/asm/irqflags.h:48 [inline] arch_safe_halt arch/x86/include/asm/irqflags.h:86 [inline] default_idle+0x13/0x20 arch/x86/kernel/process.c:742 default_idle_call+0x74/0xb0 kernel/sched/idle.c:117 cpuidle_idle_call kernel/sched/idle.c:191 [inline] do_idle+0x22f/0x5d0 kernel/sched/idle.c:332 cpu_startup_entry+0x42/0x60 kernel/sched/idle.c:430 rest_init+0x2e0/0x300 init/main.c:730 arch_call_rest_init+0xe/0x10 init/main.c:831 start_kernel+0x47a/0x500 init/main.c:1077 x86_64_start_reservations+0x2a/0x30 arch/x86/kernel/head64.c:509 x86_64_start_kernel+0x99/0xa0 arch/x86/kernel/head64.c:490 common_startup_64+0x13e/0x147 INITIAL USE at: lock_acquire+0x1e4/0x530 kernel/locking/lockdep.c:5754 _raw_spin_lock_nested+0x31/0x40 kernel/locking/spinlock.c:378 raw_spin_rq_lock_nested+0x2a/0x140 kernel/sched/core.c:559 raw_spin_rq_lock kernel/sched/sched.h:1385 [inline] _raw_spin_rq_lock_irqsave kernel/sched/sched.h:1404 [inline] rq_lock_irqsave kernel/sched/sched.h:1683 [inline] rq_attach_root+0xee/0x540 kernel/sched/topology.c:494 sched_init+0x64e/0xc30 kernel/sched/core.c:10031 start_kernel+0x1ab/0x500 init/main.c:948 x86_64_start_reservations+0x2a/0x30 arch/x86/kernel/head64.c:509 x86_64_start_kernel+0x99/0xa0 arch/x86/kernel/head64.c:490 common_startup_64+0x13e/0x147 } ... key at: [] sched_init.__key+0x0/0x20 the dependencies between the lock to be acquired and HARDIRQ-irq-unsafe lock: -> (&htab->buckets[i].lock){+...}-{2:2} { HARDIRQ-ON-W at: lock_acquire+0x1e4/0x530 kernel/locking/lockdep.c:5754 __raw_spin_lock_bh include/linux/spinlock_api_smp.h:126 [inline] _raw_spin_lock_bh+0x35/0x50 kernel/locking/spinlock.c:178 spin_lock_bh include/linux/spinlock.h:356 [inline] sock_hash_free+0x164/0x820 net/core/sock_map.c:1154 bpf_map_free_deferred+0xe6/0x110 kernel/bpf/syscall.c:734 process_one_work kernel/workqueue.c:3254 [inline] process_scheduled_works+0xa00/0x1770 kernel/workqueue.c:3335 worker_thread+0x86d/0xd70 kernel/workqueue.c:3416 kthread+0x2f0/0x390 kernel/kthread.c:388 ret_from_fork+0x4b/0x80 arch/x86/kernel/process.c:147 ret_from_fork_asm+0x1a/0x30 arch/x86/entry/entry_64.S:243 INITIAL USE at: lock_acquire+0x1e4/0x530 kernel/locking/lockdep.c:5754 __raw_spin_lock_bh include/linux/spinlock_api_smp.h:126 [inline] _raw_spin_lock_bh+0x35/0x50 kernel/locking/spinlock.c:178 spin_lock_bh include/linux/spinlock.h:356 [inline] sock_hash_free+0x164/0x820 net/core/sock_map.c:1154 bpf_map_free_deferred+0xe6/0x110 kernel/bpf/syscall.c:734 process_one_work kernel/workqueue.c:3254 [inline] process_scheduled_works+0xa00/0x1770 kernel/workqueue.c:3335 worker_thread+0x86d/0xd70 kernel/workqueue.c:3416 kthread+0x2f0/0x390 kernel/kthread.c:388 ret_from_fork+0x4b/0x80 arch/x86/kernel/process.c:147 ret_from_fork_asm+0x1a/0x30 arch/x86/entry/entry_64.S:243 } ... key at: [] sock_hash_alloc.__key+0x0/0x20 ... acquired at: lock_acquire+0x1e4/0x530 kernel/locking/lockdep.c:5754 __raw_spin_lock_bh include/linux/spinlock_api_smp.h:126 [inline] _raw_spin_lock_bh+0x35/0x50 kernel/locking/spinlock.c:178 spin_lock_bh include/linux/spinlock.h:356 [inline] sock_hash_delete_elem+0xb0/0x300 net/core/sock_map.c:939 bpf_prog_2c29ac5cdc6b1842+0x42/0x46 bpf_dispatcher_nop_func include/linux/bpf.h:1234 [inline] __bpf_prog_run include/linux/filter.h:657 [inline] bpf_prog_run include/linux/filter.h:664 [inline] __bpf_trace_run kernel/trace/bpf_trace.c:2381 [inline] bpf_trace_run2+0x204/0x420 kernel/trace/bpf_trace.c:2420 trace_sched_migrate_task include/trace/events/sched.h:274 [inline] set_task_cpu+0x53d/0x5b0 kernel/sched/core.c:3390 detach_task kernel/sched/fair.c:9007 [inline] detach_tasks kernel/sched/fair.c:9146 [inline] load_balance+0x60f5/0x8840 kernel/sched/fair.c:11318 newidle_balance+0x6be/0x1080 kernel/sched/fair.c:12345 pick_next_task_fair+0x27a/0xde0 kernel/sched/fair.c:8508 __pick_next_task+0xb0/0x2c0 kernel/sched/core.c:6030 pick_next_task kernel/sched/core.c:6120 [inline] __schedule+0x726/0x4a20 kernel/sched/core.c:6700 __schedule_loop kernel/sched/core.c:6813 [inline] schedule+0x14b/0x320 kernel/sched/core.c:6828 do_nanosleep+0x197/0x600 kernel/time/hrtimer.c:2051 hrtimer_nanosleep+0x227/0x470 kernel/time/hrtimer.c:2104 __do_sys_clock_nanosleep kernel/time/posix-timers.c:1396 [inline] __se_sys_clock_nanosleep+0x32b/0x3c0 kernel/time/posix-timers.c:1373 do_syscall_64+0xfb/0x240 entry_SYSCALL_64_after_hwframe+0x6d/0x75 stack backtrace: CPU: 0 PID: 5061 Comm: syz-executor280 Not tainted 6.8.0-syzkaller-05236-g443574b03387 #0 Hardware name: Google Google Compute Engine/Google Compute Engine, BIOS Google 03/27/2024 Call Trace: __dump_stack lib/dump_stack.c:88 [inline] dump_stack_lvl+0x1e7/0x2e0 lib/dump_stack.c:106 print_bad_irq_dependency kernel/locking/lockdep.c:2626 [inline] check_irq_usage kernel/locking/lockdep.c:2865 [inline] check_prev_add kernel/locking/lockdep.c:3138 [inline] check_prevs_add kernel/locking/lockdep.c:3253 [inline] validate_chain+0x4dc7/0x58e0 kernel/locking/lockdep.c:3869 __lock_acquire+0x1346/0x1fd0 kernel/locking/lockdep.c:5137 lock_acquire+0x1e4/0x530 kernel/locking/lockdep.c:5754 __raw_spin_lock_bh include/linux/spinlock_api_smp.h:126 [inline] _raw_spin_lock_bh+0x35/0x50 kernel/locking/spinlock.c:178 spin_lock_bh include/linux/spinlock.h:356 [inline] sock_hash_delete_elem+0xb0/0x300 net/core/sock_map.c:939 bpf_prog_2c29ac5cdc6b1842+0x42/0x46 bpf_dispatcher_nop_func include/linux/bpf.h:1234 [inline] __bpf_prog_run include/linux/filter.h:657 [inline] bpf_prog_run include/linux/filter.h:664 [inline] __bpf_trace_run kernel/trace/bpf_trace.c:2381 [inline] bpf_trace_run2+0x204/0x420 kernel/trace/bpf_trace.c:2420 trace_sched_migrate_task include/trace/events/sched.h:274 [inline] set_task_cpu+0x53d/0x5b0 kernel/sched/core.c:3390 detach_task kernel/sched/fair.c:9007 [inline] detach_tasks kernel/sched/fair.c:9146 [inline] load_balance+0x60f5/0x8840 kernel/sched/fair.c:11318 newidle_balance+0x6be/0x1080 kernel/sched/fair.c:12345 pick_next_task_fair+0x27a/0xde0 kernel/sched/fair.c:8508 __pick_next_task+0xb0/0x2c0 kernel/sched/core.c:6030 pick_next_task kernel/sched/core.c:6120 [inline] __schedule+0x726/0x4a20 kernel/sched/core.c:6700 __schedule_loop kernel/sched/core.c:6813 [inline] schedule+0x14b/0x320 kernel/sched/core.c:6828 do_nanosleep+0x197/0x600 kernel/time/hrtimer.c:2051 hrtimer_nanosleep+0x227/0x470 kernel/time/hrtimer.c:2104 __do_sys_clock_nanosleep kernel/time/posix-timers.c:1396 [inline] __se_sys_clock_nanosleep+0x32b/0x3c0 kernel/time/posix-timers.c:1373 do_syscall_64+0xfb/0x240 entry_SYSCALL_64_after_hwframe+0x6d/0x75 RIP: 0033:0x7f4d9d3c66b3 Code: 00 00 00 00 0f 1f 00 83 ff 03 74 7b 83 ff 02 b8 fa ff ff ff 49 89 ca 0f 44 f8 80 3d ce e9 03 00 00 74 14 b8 e6 00 00 00 0f 05 d8 c3 66 2e 0f 1f 84 00 00 00 00 00 48 83 ec 28 48 89 54 24 10 RSP: 002b:00007ffe3ac5d8a8 EFLAGS: 00000202 ORIG_RAX: 00000000000000e6 RAX: ffffffffffffffda RBX: 000000000000144c RCX: 00007f4d9d3c66b3 RDX: 00007ffe3ac5d8c0 RSI: 0000000000000000 RDI: 0000000000000000 RBP: 0000000000010881 R08: 0000000000000010 R09: 00007f4d9d34c0b0 R10: 0000000000000000 R11: 0000000000000202 R12: 00007ffe3ac5d8fc R13: 431bde82d7b634db R14: 0000000000000001 R15: 0000000000000001 ------------[ cut here ]------------ raw_local_irq_restore() called with IRQs enabled WARNING: CPU: 0 PID: 5061 at kernel/locking/irqflag-debug.c:10 warn_bogus_irq_restore+0x29/0x40 kernel/locking/irqflag-debug.c:10 Modules linked in: CPU: 0 PID: 5061 Comm: syz-executor280 Not tainted 6.8.0-syzkaller-05236-g443574b03387 #0 Hardware name: Google Google Compute Engine/Google Compute Engine, BIOS Google 03/27/2024 RIP: 0010:warn_bogus_irq_restore+0x29/0x40 kernel/locking/irqflag-debug.c:10 Code: 90 f3 0f 1e fa 90 80 3d de 59 01 04 00 74 06 90 c3 cc cc cc cc c6 05 cf 59 01 04 01 90 48 c7 c7 20 ba aa 8b e8 f8 d5 e7 f5 90 <0f> 0b 90 90 90 c3 cc cc cc cc 66 2e 0f 1f 84 00 00 00 00 00 0f 1f RSP: 0018:ffffc9000418f0d8 EFLAGS: 00010246 RAX: dbab36d8128c7100 RBX: 1ffff92000831ecc RCX: ffff888017bdbc00 RDX: 0000000000000000 RSI: 0000000000000000 RDI: 0000000000000000 RBP: ffffc9000418f770 R08: ffffffff8157cc12 R09: 1ffff92000831d70 R10: dffffc0000000000 R11: fffff52000831d71 R12: ffffc9000418f698 R13: ffffc9000418f6d8 R14: 0000000000000000 R15: ffff8880b943e140 FS: 000055558e400380(0000) GS:ffff8880b9400000(0000) knlGS:0000000000000000 CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033 CR2: 000055558e400ca8 CR3: 00000000768ba000 CR4: 00000000003506f0 DR0: 0000000000000000 DR1: 0000000000000000 DR2: 0000000000000000 DR3: 0000000000000000 DR6: 00000000fffe0ff0 DR7: 0000000000000400 Call Trace: load_balance+0x69c8/0x8840 kernel/sched/fair.c:11335 newidle_balance+0x6be/0x1080 kernel/sched/fair.c:12345 pick_next_task_fair+0x27a/0xde0 kernel/sched/fair.c:8508 __pick_next_task+0xb0/0x2c0 kernel/sched/core.c:6030 pick_next_task kernel/sched/core.c:6120 [inline] __schedule+0x726/0x4a20 kernel/sched/core.c:6700 __schedule_loop kernel/sched/core.c:6813 [inline] schedule+0x14b/0x320 kernel/sched/core.c:6828 do_nanosleep+0x197/0x600 kernel/time/hrtimer.c:2051 hrtimer_nanosleep+0x227/0x470 kernel/time/hrtimer.c:2104 __do_sys_clock_nanosleep kernel/time/posix-timers.c:1396 [inline] __se_sys_clock_nanosleep+0x32b/0x3c0 kernel/time/posix-timers.c:1373 do_syscall_64+0xfb/0x240 entry_SYSCALL_64_after_hwframe+0x6d/0x75 RIP: 0033:0x7f4d9d3c66b3 Code: 00 00 00 00 0f 1f 00 83 ff 03 74 7b 83 ff 02 b8 fa ff ff ff 49 89 ca 0f 44 f8 80 3d ce e9 03 00 00 74 14 b8 e6 00 00 00 0f 05 d8 c3 66 2e 0f 1f 84 00 00 00 00 00 48 83 ec 28 48 89 54 24 10 RSP: 002b:00007ffe3ac5d8a8 EFLAGS: 00000202 ORIG_RAX: 00000000000000e6 RAX: ffffffffffffffda RBX: 000000000000144c RCX: 00007f4d9d3c66b3 RDX: 00007ffe3ac5d8c0 RSI: 0000000000000000 RDI: 0000000000000000 RBP: 0000000000010881 R08: 0000000000000010 R09: 00007f4d9d34c0b0 R10: 0000000000000000 R11: 0000000000000202 R12: 00007ffe3ac5d8fc R13: 431bde82d7b634db R14: 0000000000000001 R15: 0000000000000001