--- x/net/core/filter.c +++ y/net/core/filter.c @@ -2144,6 +2144,7 @@ static int __bpf_redirect_no_mac(struct unsigned int mlen = skb_network_offset(skb); if (unlikely(skb->len <= mlen)) { +out: kfree_skb(skb); return -ERANGE; } @@ -2159,6 +2160,8 @@ static int __bpf_redirect_no_mac(struct if (!skb_at_tc_ingress(skb)) skb_postpull_rcsum(skb, skb_mac_header(skb), mlen); } + if (skb->len <= sizeof(struct iphdr)) + goto out; skb_pop_mac_header(skb); skb_reset_mac_len(skb); return flags & BPF_F_INGRESS ? @@ -2431,7 +2434,7 @@ enum { BPF_CALL_3(bpf_clone_redirect, struct sk_buff *, skb, u32, ifindex, u64, flags) { struct net_device *dev; - struct sk_buff *clone; + struct sk_buff *buf; int ret; if (unlikely(flags & (~(BPF_F_INGRESS) | BPF_F_REDIRECT_INTERNAL))) @@ -2441,22 +2444,11 @@ BPF_CALL_3(bpf_clone_redirect, struct sk if (unlikely(!dev)) return -EINVAL; - clone = skb_clone(skb, GFP_ATOMIC); - if (unlikely(!clone)) + buf = skb_copy(skb, GFP_ATOMIC); + if (unlikely(!buf)) return -ENOMEM; - /* For direct write, we need to keep the invariant that the skbs - * we're dealing with need to be uncloned. Should uncloning fail - * here, we need to free the just generated clone to unclone once - * again. - */ - ret = bpf_try_make_head_writable(skb); - if (unlikely(ret)) { - kfree_skb(clone); - return -ENOMEM; - } - - return __bpf_redirect(clone, dev, flags); + return __bpf_redirect(buf, dev, flags); } static const struct bpf_func_proto bpf_clone_redirect_proto = {