BUG: workqueue lockup - pool cpus=0 node=0 flags=0x0 nice=0 stuck for 179s! Showing busy workqueues and worker pools: workqueue events: flags=0x100 pwq 2: cpus=0 node=0 flags=0x0 nice=0 active=11 refcnt=12 pending: 3*nsim_dev_hwstats_traffic_work, vmstat_shepherd, psi_avgs_work, ovs_dp_masks_rebalance, psi_avgs_work, 3*ovs_dp_masks_rebalance, xfrm_state_gc_task pwq 6: cpus=1 node=0 flags=0x0 nice=0 active=13 refcnt=14 pending: 5*nsim_dev_hwstats_traffic_work, ovs_dp_masks_rebalance, 2*psi_avgs_work, 5*ovs_dp_masks_rebalance workqueue events_highpri: flags=0x110 pwq 3: cpus=0 node=0 flags=0x0 nice=-20 active=1 refcnt=2 pending: fill_page_cache_func pwq 7: cpus=1 node=0 flags=0x0 nice=-20 active=1 refcnt=2 pending: fill_page_cache_func workqueue events_long: flags=0x100 pwq 2: cpus=0 node=0 flags=0x0 nice=0 active=4 refcnt=5 pending: 2*defense_work_handler, 2*br_fdb_cleanup pwq 6: cpus=1 node=0 flags=0x0 nice=0 active=9 refcnt=10 pending: 8*defense_work_handler, br_fdb_cleanup workqueue events_unbound: flags=0x2 pwq 8: cpus=0-1 flags=0x4 nice=0 active=2 refcnt=3 pending: toggle_allocation_gate, flush_memcg_stats_dwork workqueue events_unbound: flags=0x2 pwq 8: cpus=0-1 flags=0x4 nice=0 active=9 refcnt=10 pending: 4*cfg80211_wiphy_work, 3*macvlan_process_broadcast, 2*idle_cull_fn pwq 8: cpus=0-1 flags=0x4 nice=0 active=13 refcnt=14 pending: 7*nsim_dev_trap_report_work, 2*cfg80211_wiphy_work, 2*nsim_dev_trap_report_work, crng_reseed, idle_cull_fn workqueue events_power_efficient: flags=0x180 pwq 2: cpus=0 node=0 flags=0x0 nice=0 active=8 refcnt=9 pending: fb_flashcursor, wg_ratelimiter_gc_entries, neigh_periodic_work, neigh_managed_work, do_cache_clean, 3*check_lifetime pwq 6: cpus=1 node=0 flags=0x0 nice=0 active=11 refcnt=12 pending: nf_flow_offload_work_gc, neigh_managed_work, neigh_periodic_work, gc_worker, 7*check_lifetime workqueue kvfree_rcu_reclaim: flags=0xa pwq 8: cpus=0-1 flags=0x4 nice=0 active=1 refcnt=2 pending: kfree_rcu_monitor pwq 8: cpus=0-1 flags=0x4 nice=0 active=1 refcnt=2 pending: kfree_rcu_monitor workqueue netns: flags=0x6000a pwq 8: cpus=0-1 flags=0x4 nice=0 active=1 refcnt=4 in-flight: 3637:cleanup_net workqueue mm_percpu_wq: flags=0x8 pwq 2: cpus=0 node=0 flags=0x0 nice=0 active=1 refcnt=2 pending: vmstat_update pwq 6: cpus=1 node=0 flags=0x0 nice=0 active=1 refcnt=2 pending: vmstat_update workqueue writeback: flags=0x4a pwq 8: cpus=0-1 flags=0x4 nice=0 active=3 refcnt=4 pending: wb_update_bandwidth_workfn, 2*wb_workfn pwq 8: cpus=0-1 flags=0x4 nice=0 active=2 refcnt=3 pending: 2*wb_workfn workqueue kblockd: flags=0x18 pwq 3: cpus=0 node=0 flags=0x0 nice=-20 active=2 refcnt=3 pending: 2*blk_mq_timeout_work pwq 7: cpus=1 node=0 flags=0x0 nice=-20 active=1 refcnt=2 pending: blk_mq_timeout_work workqueue iou_exit: flags=0x2 pwq 8: cpus=0-1 flags=0x4 nice=0 active=1 refcnt=2 in-flight: 13:io_ring_exit_work workqueue ipv6_addrconf: flags=0x6000a pwq 8: cpus=0-1 flags=0x4 nice=0 active=1 refcnt=13 pending: addrconf_verify_work inactive: 9*addrconf_verify_work workqueue krxrpcd: flags=0x2001a pwq 9: cpus=0-1 node=0 flags=0x4 nice=-20 active=1 refcnt=13 pending: rxrpc_peer_keepalive_worker inactive: 9*rxrpc_peer_keepalive_worker workqueue bat_events: flags=0x6000a pwq 8: cpus=0-1 flags=0x4 nice=0 active=1 refcnt=67 pending: batadv_iv_send_outstanding_bat_ogm_packet inactive: 9*batadv_mcast_mla_update, 3*batadv_iv_send_outstanding_bat_ogm_packet, 2*batadv_purge_orig, 2*batadv_iv_send_outstanding_bat_ogm_packet, 2*batadv_purge_orig, 3*batadv_iv_send_outstanding_bat_ogm_packet, batadv_purge_orig, 5*batadv_iv_send_outstanding_bat_ogm_packet, 2*batadv_purge_orig, batadv_iv_send_outstanding_bat_ogm_packet, 2*batadv_purge_orig, 4*batadv_iv_send_outstanding_bat_ogm_packet, 2*batadv_tt_purge, 3*batadv_dat_purge, batadv_bla_periodic_work, batadv_dat_purge, 2*batadv_bla_periodic_work, batadv_dat_purge, 2*batadv_bla_periodic_work, 7*batadv_tt_purge, batadv_dat_purge, 3*batadv_bla_periodic_work, 2*batadv_dat_purge, batadv_bla_periodic_work, batadv_dat_purge workqueue wg-crypt-wg0: flags=0x128 pwq 2: cpus=0 node=0 flags=0x0 nice=0 active=1 refcnt=2 pending: wg_packet_encrypt_worker pwq 6: cpus=1 node=0 flags=0x0 nice=0 active=1 refcnt=2 pending: wg_packet_encrypt_worker workqueue wg-crypt-wg0: flags=0x128 pwq 2: cpus=0 node=0 flags=0x0 nice=0 active=1 refcnt=2 pending: wg_packet_encrypt_worker pwq 6: cpus=1 node=0 flags=0x0 nice=0 active=1 refcnt=2 pending: wg_packet_encrypt_worker workqueue wg-crypt-wg1: flags=0x128 pwq 2: cpus=0 node=0 flags=0x0 nice=0 active=1 refcnt=2 pending: wg_packet_encrypt_worker pwq 6: cpus=1 node=0 flags=0x0 nice=0 active=1 refcnt=2 pending: wg_packet_encrypt_worker workqueue wg-kex-wg1: flags=0x6 pwq 8: cpus=0-1 flags=0x4 nice=0 active=1 refcnt=2 pending: wg_packet_handshake_send_worker workqueue wg-crypt-wg1: flags=0x128 pwq 2: cpus=0 node=0 flags=0x0 nice=0 active=1 refcnt=2 pending: wg_packet_encrypt_worker pwq 6: cpus=1 node=0 flags=0x0 nice=0 active=1 refcnt=2 pending: wg_packet_encrypt_worker workqueue wg-kex-wg2: flags=0x6 pwq 8: cpus=0-1 flags=0x4 nice=0 active=1 refcnt=2 pending: wg_packet_handshake_send_worker workqueue wg-crypt-wg2: flags=0x128 pwq 2: cpus=0 node=0 flags=0x0 nice=0 active=1 refcnt=2 pending: wg_packet_encrypt_worker pwq 6: cpus=1 node=0 flags=0x0 nice=0 active=1 refcnt=2 pending: wg_packet_encrypt_worker workqueue wg-kex-wg2: flags=0x6 pwq 8: cpus=0-1 flags=0x4 nice=0 active=1 refcnt=2 pending: wg_packet_handshake_send_worker workqueue wg-crypt-wg2: flags=0x128 pwq 2: cpus=0 node=0 flags=0x0 nice=0 active=1 refcnt=2 pending: wg_packet_encrypt_worker pwq 6: cpus=1 node=0 flags=0x0 nice=0 active=1 refcnt=2 pending: wg_packet_encrypt_worker workqueue wg-crypt-wg0: flags=0x128 pwq 2: cpus=0 node=0 flags=0x0 nice=0 active=1 refcnt=2 pending: wg_packet_encrypt_worker pwq 6: cpus=1 node=0 flags=0x0 nice=0 active=1 refcnt=2 pending: wg_packet_encrypt_worker workqueue wg-crypt-wg1: flags=0x128 pwq 2: cpus=0 node=0 flags=0x0 nice=0 active=1 refcnt=2 pending: wg_packet_encrypt_worker pwq 6: cpus=1 node=0 flags=0x0 nice=0 active=1 refcnt=2 pending: wg_packet_encrypt_worker workqueue wg-crypt-wg2: flags=0x128 pwq 2: cpus=0 node=0 flags=0x0 nice=0 active=1 refcnt=2 pending: wg_packet_encrypt_worker pwq 6: cpus=1 node=0 flags=0x0 nice=0 active=1 refcnt=2 pending: wg_packet_encrypt_worker workqueue hci5: flags=0x20012 pwq 9: cpus=0-1 node=0 flags=0x4 nice=-20 active=1 refcnt=4 pending: hci_conn_timeout workqueue hci3: flags=0x20012 pwq 9: cpus=0-1 node=0 flags=0x4 nice=-20 active=1 refcnt=4 pending: hci_conn_timeout workqueue hci6: flags=0x20012 pwq 9: cpus=0-1 node=0 flags=0x4 nice=-20 active=1 refcnt=4 pending: hci_conn_timeout workqueue btrfs-endio-meta: flags=0xe pwq 8: cpus=0-1 flags=0x4 nice=0 active=1 refcnt=2 in-flight: 3441:simple_end_io_work workqueue btrfs-qgroup-rescan: flags=0x2010e pwq 8: cpus=0-1 flags=0x4 nice=0 active=1 refcnt=4 in-flight: 6175:btrfs_work_helper workqueue btrfs-discard: flags=0x20006 pwq 8: cpus=0-1 flags=0x4 nice=0 active=1 refcnt=4 pending: btrfs_discard_workfn pool 8: cpus=0-1 flags=0x4 nice=0 hung=180s workers=27 idle: 3556 49 7420 6204 6727 3722 3510 37 12 6085 5993 6335 6015 3455 6367 1325 6077 6563 62 1313 6072 6091 702 Showing backtraces of running workers in stalled CPU-bound worker pools: