--- x/net/sched/sch_taprio.c +++ y/net/sched/sch_taprio.c @@ -787,6 +787,7 @@ static struct sk_buff *taprio_dequeue_tc int num_tc = netdev_get_num_tc(dev); struct sk_buff *skb; int tc; + int loop = 0; for (tc = num_tc - 1; tc >= 0; tc--) { int first_txq = q->cur_txq[tc]; @@ -805,6 +806,8 @@ static struct sk_buff *taprio_dequeue_tc if (skb) return skb; + if (loop++ > 50) + return NULL; } while (q->cur_txq[tc] != first_txq); } --- x/net/batman-adv/bridge_loop_avoidance.c +++ y/net/batman-adv/bridge_loop_avoidance.c @@ -1224,6 +1224,7 @@ static void batadv_bla_purge_backbone_gw struct batadv_hashtable *hash; spinlock_t *list_lock; /* protects write access to the hash lists */ int i; + int rounds = 0; hash = bat_priv->bla.backbone_hash; if (!hash) @@ -1236,6 +1237,10 @@ static void batadv_bla_purge_backbone_gw spin_lock_bh(list_lock); hlist_for_each_entry_safe(backbone_gw, node_tmp, head, hash_entry) { + if (rounds++ > 50) { + spin_unlock_bh(list_lock); + return; + } if (now) goto purge_now; if (!batadv_has_timed_out(backbone_gw->lasttime, @@ -1278,6 +1283,7 @@ static void batadv_bla_purge_claims(stru struct hlist_head *head; struct batadv_hashtable *hash; int i; + int rounds = 0; hash = bat_priv->bla.claim_hash; if (!hash) @@ -1313,6 +1319,10 @@ purge_now: claim->addr, claim->vid); skip: batadv_backbone_gw_put(backbone_gw); + if (rounds++ > 50) { + rcu_read_unlock(); + return; + } } rcu_read_unlock(); } @@ -1433,6 +1443,7 @@ static void batadv_bla_periodic_work(str struct batadv_hard_iface *primary_if; bool send_loopdetect = false; int i; + int rounds = 0; delayed_work = to_delayed_work(work); priv_bla = container_of(delayed_work, struct batadv_priv_bla, work); @@ -1472,6 +1483,10 @@ static void batadv_bla_periodic_work(str rcu_read_lock(); hlist_for_each_entry_rcu(backbone_gw, head, hash_entry) { + if (rounds++ > 50) { + rcu_read_unlock(); + goto out; + } if (!batadv_compare_eth(backbone_gw->orig, primary_if->net_dev->dev_addr)) continue;