diff --git a/include/linux/filter.h b/include/linux/filter.h index b6672ff61407..22691015d175 100644 --- a/include/linux/filter.h +++ b/include/linux/filter.h @@ -842,15 +842,15 @@ static inline void bpf_net_ctx_get_all_used_flush_lists(struct list_head **lh_ma if (!IS_ENABLED(CONFIG_BPF_SYSCALL)) return; - lh = &bpf_net_ctx->dev_map_flush_list; + lh = this_cpu_ptr(&bpf_net_ctx->dev_map_flush_list); if (kern_flags & BPF_RI_F_DEV_MAP_INIT && !list_empty(lh)) *lh_dev = lh; - lh = &bpf_net_ctx->cpu_map_flush_list; + lh = this_cpu_ptr(&bpf_net_ctx->cpu_map_flush_list); if (kern_flags & BPF_RI_F_CPU_MAP_INIT && !list_empty(lh)) *lh_map = lh; - lh = &bpf_net_ctx->xskmap_map_flush_list; + lh = this_cpu_ptr(&bpf_net_ctx->xskmap_map_flush_list); if (IS_ENABLED(CONFIG_XDP_SOCKETS) && kern_flags & BPF_RI_F_XSK_MAP_INIT && !list_empty(lh)) *lh_xsk = lh; diff --git a/kernel/bpf/cpumap.c b/kernel/bpf/cpumap.c index fbdf5a1aabfe..8fccc311397c 100644 --- a/kernel/bpf/cpumap.c +++ b/kernel/bpf/cpumap.c @@ -676,7 +676,7 @@ static void bq_flush_to_queue(struct xdp_bulk_queue *bq) struct ptr_ring *q; int i; - if (unlikely(!bq->count)) + if (unlikely(!bq->count) || unlikely(bq->count) > CPU_MAP_BULK_SIZE) return; q = rcpu->queue; diff --git a/kernel/bpf/devmap.c b/kernel/bpf/devmap.c index 9e0e3b0a18e4..4b9203deb711 100644 --- a/kernel/bpf/devmap.c +++ b/kernel/bpf/devmap.c @@ -378,7 +378,7 @@ static void bq_xmit_all(struct xdp_dev_bulk_queue *bq, u32 flags) int to_send = cnt; int i; - if (unlikely(!cnt)) + if (unlikely(!cnt) || unlikely(cnt) > DEV_MAP_BULK_SIZE) return; for (i = 0; i < cnt; i++) {