ret_from_fork+0x48/0x80 arch/x86/kernel/process.c:147 ret_from_fork_asm+0x1a/0x30 arch/x86/entry/entry_64.S:244 BUG: workqueue lockup - pool cpus=1 node=0 flags=0x0 nice=0 stuck for 216s! Showing busy workqueues and worker pools: workqueue events: flags=0x0 pwq 2: cpus=0 node=0 flags=0x0 nice=0 active=19 refcnt=20 pending: 3*nsim_dev_trap_report_work, 3*nsim_dev_hwstats_traffic_work, vmstat_shepherd, 3*psi_avgs_work, 2*ovs_dp_masks_rebalance, 2*psi_avgs_work, ovs_dp_masks_rebalance, kfree_rcu_monitor, ovs_dp_masks_rebalance, switchdev_deferred_process_work, rht_deferred_worker pwq 6: cpus=1 node=0 flags=0x0 nice=0 active=1 refcnt=2 in-flight: 2748:linkwatch_event workqueue events_long: flags=0x0 pwq 2: cpus=0 node=0 flags=0x0 nice=0 active=8 refcnt=9 pending: 6*defense_work_handler, br_multicast_gc_work, br_fdb_cleanup workqueue events_unbound: flags=0x2 pwq 8: cpus=0-1 flags=0x4 nice=0 active=8 refcnt=11 in-flight: 50:toggle_allocation_gate pending: flush_memcg_stats_dwork, crng_reseed, 2*cfg80211_wiphy_work, idle_cull_fn, 2*macvlan_process_broadcast inactive: macvlan_process_broadcast, idle_cull_fn workqueue events_power_efficient: flags=0x80 pwq 2: cpus=0 node=0 flags=0x0 nice=0 active=2 refcnt=3 pending: reg_check_chans_work, check_lifetime workqueue netns: flags=0x6000a pwq 8: cpus=0-1 flags=0x4 nice=0 active=1 refcnt=4 in-flight: 10:cleanup_net workqueue mm_percpu_wq: flags=0x8 pwq 2: cpus=0 node=0 flags=0x0 nice=0 active=1 refcnt=2 pending: vmstat_update workqueue writeback: flags=0x4a pwq 8: cpus=0-1 flags=0x4 nice=0 active=4 refcnt=5 pending: 2*wb_update_bandwidth_workfn, 2*wb_workfn workqueue kblockd: flags=0x18 pwq 3: cpus=0 node=0 flags=0x0 nice=-20 active=2 refcnt=3 pending: 2*blk_mq_timeout_work workqueue dm_bufio_cache: flags=0x8 pwq 2: cpus=0 node=0 flags=0x0 nice=0 active=1 refcnt=2 pending: work_fn workqueue ipv6_addrconf: flags=0x6000a pwq 8: cpus=0-1 flags=0x4 nice=0 active=1 refcnt=6 pending: addrconf_verify_work inactive: 2*addrconf_verify_work workqueue bat_events: flags=0x6000a pwq 8: cpus=0-1 flags=0x4 nice=0 active=1 refcnt=33 in-flight: 11:batadv_iv_send_outstanding_bat_ogm_packet inactive: 6*batadv_nc_worker, batadv_dat_purge, batadv_bla_periodic_work, batadv_mcast_mla_update, 2*batadv_purge_orig, 5*batadv_iv_send_outstanding_bat_ogm_packet, batadv_purge_orig, 2*batadv_iv_send_outstanding_bat_ogm_packet, batadv_tt_purge, 3*batadv_iv_send_outstanding_bat_ogm_packet, batadv_tt_purge, batadv_bla_periodic_work, 2*batadv_dat_purge, 2*batadv_bla_periodic_work workqueue wg-crypt-wg0: flags=0x28 pwq 2: cpus=0 node=0 flags=0x0 nice=0 active=1 refcnt=2 pending: wg_packet_encrypt_worker pwq 6: cpus=1 node=0 flags=0x0 nice=0 active=1 refcnt=2 pending: wg_packet_encrypt_worker workqueue wg-kex-wg1: flags=0x6 pwq 8: cpus=0-1 flags=0x4 nice=0 active=1 refcnt=2 pending: wg_packet_handshake_send_worker workqueue wg-crypt-wg1: flags=0x28 pwq 2: cpus=0 node=0 flags=0x0 nice=0 active=1 refcnt=2 pending: wg_packet_encrypt_worker workqueue wg-kex-wg2: flags=0x6 pwq 8: cpus=0-1 flags=0x4 nice=0 active=1 refcnt=2 pending: wg_packet_handshake_send_worker workqueue wg-crypt-wg2: flags=0x28 pwq 2: cpus=0 node=0 flags=0x0 nice=0 active=1 refcnt=2 pending: wg_packet_encrypt_worker workqueue wg-kex-wg0: flags=0x6 pwq 8: cpus=0-1 flags=0x4 nice=0 active=1 refcnt=2 pending: wg_packet_handshake_send_worker workqueue wg-crypt-wg0: flags=0x28 pwq 2: cpus=0 node=0 flags=0x0 nice=0 active=1 refcnt=2 pending: wg_packet_encrypt_worker pwq 6: cpus=1 node=0 flags=0x0 nice=0 active=1 refcnt=2 pending: wg_packet_encrypt_worker workqueue wg-kex-wg1: flags=0x6 pwq 8: cpus=0-1 flags=0x4 nice=0 active=1 refcnt=2 pending: wg_packet_handshake_send_worker workqueue wg-crypt-wg1: flags=0x28 pwq 2: cpus=0 node=0 flags=0x0 nice=0 active=1 refcnt=2 pending: wg_packet_encrypt_worker pwq 6: cpus=1 node=0 flags=0x0 nice=0 active=1 refcnt=2 pending: wg_packet_encrypt_worker workqueue wg-crypt-wg2: flags=0x28 pwq 2: cpus=0 node=0 flags=0x0 nice=0 active=1 refcnt=2 pending: wg_packet_encrypt_worker workqueue hci4: flags=0x20012 pwq 9: cpus=0-1 node=0 flags=0x4 nice=-20 active=1 refcnt=4 pending: hci_conn_timeout workqueue wg-crypt-wg0: flags=0x28 pwq 2: cpus=0 node=0 flags=0x0 nice=0 active=1 refcnt=2 pending: wg_packet_encrypt_worker pwq 6: cpus=1 node=0 flags=0x0 nice=0 active=1 refcnt=2 pending: wg_packet_encrypt_worker workqueue wg-crypt-wg1: flags=0x28 pwq 2: cpus=0 node=0 flags=0x0 nice=0 active=1 refcnt=2 pending: wg_packet_encrypt_worker pwq 6: cpus=1 node=0 flags=0x0 nice=0 active=1 refcnt=2 pending: wg_packet_encrypt_worker workqueue wg-kex-wg2: flags=0x6 pwq 8: cpus=0-1 flags=0x4 nice=0 active=1 refcnt=2 pending: wg_packet_handshake_send_worker workqueue wg-crypt-wg2: flags=0x28 pwq 2: cpus=0 node=0 flags=0x0 nice=0 active=1 refcnt=2 pending: wg_packet_encrypt_worker pwq 6: cpus=1 node=0 flags=0x0 nice=0 active=1 refcnt=2 pending: wg_packet_encrypt_worker pool 6: cpus=1 node=0 flags=0x0 nice=0 hung=217s workers=22 idle: 8827 5162 8840 8831 24 5175 8824 8835 8822 8834 44 8860 8830 5307 8991 5167 8815 8842 5164 8823 8993 pool 8: cpus=0-1 flags=0x4 nice=0 hung=218s workers=10 idle: 60 10097 2410 34 2472 74 2793 Showing backtraces of running workers in stalled CPU-bound worker pools: