--- x/net/core/dev.c +++ y/net/core/dev.c @@ -4258,6 +4258,7 @@ int __dev_queue_xmit(struct sk_buff *skb struct Qdisc *q; int rc = -ENOMEM; bool again = false; + static int re_entrance = 0; skb_reset_mac_header(skb); skb_assert_len(skb); @@ -4309,7 +4310,9 @@ int __dev_queue_xmit(struct sk_buff *skb trace_net_dev_queue(skb); if (q->enqueue) { + ++re_entrance; rc = __dev_xmit_skb(skb, q, dev, txq); + --re_entrance; goto out; } @@ -4332,6 +4335,8 @@ int __dev_queue_xmit(struct sk_buff *skb * to -1 or to their cpu id, but not to our id. */ if (READ_ONCE(txq->xmit_lock_owner) != cpu) { + int locked = 0; + if (dev_xmit_recursion()) goto recursion_alert; @@ -4339,18 +4344,24 @@ int __dev_queue_xmit(struct sk_buff *skb if (!skb) goto out; - HARD_TX_LOCK(dev, txq, cpu); + if (!re_entrance) { + HARD_TX_LOCK(dev, txq, cpu); + if (READ_ONCE(txq->xmit_lock_owner) == cpu) + locked = 1; + } if (!netif_xmit_stopped(txq)) { dev_xmit_recursion_inc(); skb = dev_hard_start_xmit(skb, dev, txq, &rc); dev_xmit_recursion_dec(); if (dev_xmit_complete(rc)) { - HARD_TX_UNLOCK(dev, txq); + if (locked) + HARD_TX_UNLOCK(dev, txq); goto out; } } - HARD_TX_UNLOCK(dev, txq); + if (locked) + HARD_TX_UNLOCK(dev, txq); net_crit_ratelimited("Virtual device %s asks to queue packet!\n", dev->name); } else {