--- x/net/core/skbuff.c +++ y/net/core/skbuff.c @@ -594,6 +594,10 @@ static void *kmalloc_reserve(unsigned in */ *size = (unsigned int)obj_size; + if (obj_size > (PAGE_SIZE << MAX_PAGE_ORDER)) { + obj = NULL; + goto out; + } /* * Try a regular allocation, when that fails and we're not entitled * to the reserves, fail. @@ -657,7 +661,7 @@ struct sk_buff *__alloc_skb(unsigned int likely(node == NUMA_NO_NODE || node == numa_mem_id())) skb = napi_skb_cache_get(); else - skb = kmem_cache_alloc_node(cache, gfp_mask & ~GFP_DMA, node); + skb = kmem_cache_alloc_node(cache, gfp_mask & ~(GFP_DMA | __GFP_HIGHMEM), node); if (unlikely(!skb)) return NULL; prefetchw(skb);