aboutsummaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
authorAlexander Potapenko <glider@google.com>2019-10-14 17:11:57 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2019-10-14 18:04:01 -0400
commit0f181f9fbea8bc7ea2f7e13ae7f8c256b39e254c (patch)
treea6d1d1a674b791224d2430bae6d2fa69f10037b0 /mm
parent3c52b0af059e11a063970aed1ad143b9284a79c7 (diff)
mm/slub.c: init_on_free=1 should wipe freelist ptr for bulk allocations
slab_alloc_node() already zeroed out the freelist pointer if init_on_free was on. Thibaut Sautereau noticed that the same needs to be done for kmem_cache_alloc_bulk(), which performs the allocations separately. kmem_cache_alloc_bulk() is currently used in two places in the kernel, so this change is unlikely to have a major performance impact. SLAB doesn't require a similar change, as auto-initialization makes the allocator store the freelist pointers off-slab. Link: http://lkml.kernel.org/r/20191007091605.30530-1-glider@google.com Fixes: 6471384af2a6 ("mm: security: introduce init_on_alloc=1 and init_on_free=1 boot options") Signed-off-by: Alexander Potapenko <glider@google.com> Reported-by: Thibaut Sautereau <thibaut@sautereau.fr> Reported-by: Kees Cook <keescook@chromium.org> Cc: Christoph Lameter <cl@linux.com> Cc: Laura Abbott <labbott@redhat.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm')
-rw-r--r--mm/slub.c22
1 files changed, 16 insertions, 6 deletions
diff --git a/mm/slub.c b/mm/slub.c
index 442f111d1e98..b25c807a111f 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -2672,6 +2672,17 @@ static void *__slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node,
2672} 2672}
2673 2673
2674/* 2674/*
2675 * If the object has been wiped upon free, make sure it's fully initialized by
2676 * zeroing out freelist pointer.
2677 */
2678static __always_inline void maybe_wipe_obj_freeptr(struct kmem_cache *s,
2679 void *obj)
2680{
2681 if (unlikely(slab_want_init_on_free(s)) && obj)
2682 memset((void *)((char *)obj + s->offset), 0, sizeof(void *));
2683}
2684
2685/*
2675 * Inlined fastpath so that allocation functions (kmalloc, kmem_cache_alloc) 2686 * Inlined fastpath so that allocation functions (kmalloc, kmem_cache_alloc)
2676 * have the fastpath folded into their functions. So no function call 2687 * have the fastpath folded into their functions. So no function call
2677 * overhead for requests that can be satisfied on the fastpath. 2688 * overhead for requests that can be satisfied on the fastpath.
@@ -2759,12 +2770,8 @@ redo:
2759 prefetch_freepointer(s, next_object); 2770 prefetch_freepointer(s, next_object);
2760 stat(s, ALLOC_FASTPATH); 2771 stat(s, ALLOC_FASTPATH);
2761 } 2772 }
2762 /* 2773
2763 * If the object has been wiped upon free, make sure it's fully 2774 maybe_wipe_obj_freeptr(s, object);
2764 * initialized by zeroing out freelist pointer.
2765 */
2766 if (unlikely(slab_want_init_on_free(s)) && object)
2767 memset(object + s->offset, 0, sizeof(void *));
2768 2775
2769 if (unlikely(slab_want_init_on_alloc(gfpflags, s)) && object) 2776 if (unlikely(slab_want_init_on_alloc(gfpflags, s)) && object)
2770 memset(object, 0, s->object_size); 2777 memset(object, 0, s->object_size);
@@ -3178,10 +3185,13 @@ int kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t size,
3178 goto error; 3185 goto error;
3179 3186
3180 c = this_cpu_ptr(s->cpu_slab); 3187 c = this_cpu_ptr(s->cpu_slab);
3188 maybe_wipe_obj_freeptr(s, p[i]);
3189
3181 continue; /* goto for-loop */ 3190 continue; /* goto for-loop */
3182 } 3191 }
3183 c->freelist = get_freepointer(s, object); 3192 c->freelist = get_freepointer(s, object);
3184 p[i] = object; 3193 p[i] = object;
3194 maybe_wipe_obj_freeptr(s, p[i]);
3185 } 3195 }
3186 c->tid = next_tid(c->tid); 3196 c->tid = next_tid(c->tid);
3187 local_irq_enable(); 3197 local_irq_enable();