diff --git a/mm/slab_common.c b/mm/slab_common.c index d5a70a831a2a..73f4668d870d 100644 --- a/mm/slab_common.c +++ b/mm/slab_common.c @@ -1954,8 +1954,14 @@ void kvfree_call_rcu(struct rcu_head *head, void *ptr) if (!head) might_sleep(); - if (!IS_ENABLED(CONFIG_PREEMPT_RT) && kfree_rcu_sheaf(ptr)) + if (!IS_ENABLED(CONFIG_PREEMPT_RT) && kfree_rcu_sheaf(ptr)) { + /* + * The object is now queued for deferred freeing via an RCU + * sheaf. Tell kmemleak to ignore it. + */ + kmemleak_ignore(ptr); return; + } // Queue the object but don't yet schedule the batch. if (debug_rcu_head_queue(ptr)) { diff --git a/mm/slub.c b/mm/slub.c index 20cb4f3b636d..2f2228d3e8b2 100644 --- a/mm/slub.c +++ b/mm/slub.c @@ -2776,6 +2776,7 @@ static struct slab_sheaf *__alloc_empty_sheaf(struct kmem_cache *s, gfp_t gfp, sheaf->cache = s; stat(s, SHEAF_ALLOC); + kmemleak_ignore(sheaf); return sheaf; } @@ -7538,6 +7539,7 @@ static void early_kmem_cache_node_alloc(int node) slab->freelist = get_freepointer(kmem_cache_node, n); slab->inuse = 1; kmem_cache_node->node[node] = n; + kmemleak_alloc(n, kmem_cache_node->object_size, 1, GFP_NOWAIT); init_kmem_cache_node(n, NULL); inc_slabs_node(kmem_cache_node, node, slab->objects);