summaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
authorJesper Dangaard Brouer <brouer@redhat.com>2015-11-20 18:57:52 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2015-11-22 14:58:44 -0500
commit03ec0ed57ffc77720b811dbb6d44733b58360d9f (patch)
treea697617cf6f04b22ebc4cb677c163586f4f349a1 /mm
parentd0ecd894e3d5f768a84403b34019c4a7daa05882 (diff)
slub: fix kmem cgroup bug in kmem_cache_alloc_bulk
The call slab_pre_alloc_hook() interacts with kmemgc and is not allowed to be called several times inside the bulk alloc for loop, due to the call to memcg_kmem_get_cache(). This would result in hitting the VM_BUG_ON in __memcg_kmem_get_cache. As suggested by Vladimir Davydov, change slab_post_alloc_hook() to be able to handle an array of objects. A subtle detail is, loop iterator "i" in slab_post_alloc_hook() must have same type (size_t) as size argument. This helps the compiler to easier realize that it can remove the loop, when all debug statements inside loop evaluates to nothing. Note, this is only an issue because the kernel is compiled with GCC option: -fno-strict-overflow In slab_alloc_node() the compiler inlines and optimizes the invocation of slab_post_alloc_hook(s, flags, 1, &object) by removing the loop and access object directly. Signed-off-by: Jesper Dangaard Brouer <brouer@redhat.com> Reported-by: Vladimir Davydov <vdavydov@virtuozzo.com> Suggested-by: Vladimir Davydov <vdavydov@virtuozzo.com> Reviewed-by: Vladimir Davydov <vdavydov@virtuozzo.com> Cc: Christoph Lameter <cl@linux.com> Cc: Pekka Enberg <penberg@kernel.org> Cc: David Rientjes <rientjes@google.com> Cc: Joonsoo Kim <iamjoonsoo.kim@lge.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm')
-rw-r--r--mm/slub.c40
1 files changed, 22 insertions, 18 deletions
diff --git a/mm/slub.c b/mm/slub.c
index c17c5202864d..ce1797623391 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -1292,14 +1292,21 @@ static inline struct kmem_cache *slab_pre_alloc_hook(struct kmem_cache *s,
1292 return memcg_kmem_get_cache(s, flags); 1292 return memcg_kmem_get_cache(s, flags);
1293} 1293}
1294 1294
1295static inline void slab_post_alloc_hook(struct kmem_cache *s, 1295static inline void slab_post_alloc_hook(struct kmem_cache *s, gfp_t flags,
1296 gfp_t flags, void *object) 1296 size_t size, void **p)
1297{ 1297{
1298 size_t i;
1299
1298 flags &= gfp_allowed_mask; 1300 flags &= gfp_allowed_mask;
1299 kmemcheck_slab_alloc(s, flags, object, slab_ksize(s)); 1301 for (i = 0; i < size; i++) {
1300 kmemleak_alloc_recursive(object, s->object_size, 1, s->flags, flags); 1302 void *object = p[i];
1303
1304 kmemcheck_slab_alloc(s, flags, object, slab_ksize(s));
1305 kmemleak_alloc_recursive(object, s->object_size, 1,
1306 s->flags, flags);
1307 kasan_slab_alloc(s, object);
1308 }
1301 memcg_kmem_put_cache(s); 1309 memcg_kmem_put_cache(s);
1302 kasan_slab_alloc(s, object);
1303} 1310}
1304 1311
1305static inline void slab_free_hook(struct kmem_cache *s, void *x) 1312static inline void slab_free_hook(struct kmem_cache *s, void *x)
@@ -2475,7 +2482,7 @@ static void *__slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node,
2475static __always_inline void *slab_alloc_node(struct kmem_cache *s, 2482static __always_inline void *slab_alloc_node(struct kmem_cache *s,
2476 gfp_t gfpflags, int node, unsigned long addr) 2483 gfp_t gfpflags, int node, unsigned long addr)
2477{ 2484{
2478 void **object; 2485 void *object;
2479 struct kmem_cache_cpu *c; 2486 struct kmem_cache_cpu *c;
2480 struct page *page; 2487 struct page *page;
2481 unsigned long tid; 2488 unsigned long tid;
@@ -2554,7 +2561,7 @@ redo:
2554 if (unlikely(gfpflags & __GFP_ZERO) && object) 2561 if (unlikely(gfpflags & __GFP_ZERO) && object)
2555 memset(object, 0, s->object_size); 2562 memset(object, 0, s->object_size);
2556 2563
2557 slab_post_alloc_hook(s, gfpflags, object); 2564 slab_post_alloc_hook(s, gfpflags, 1, &object);
2558 2565
2559 return object; 2566 return object;
2560} 2567}
@@ -2904,6 +2911,10 @@ bool kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t size,
2904 struct kmem_cache_cpu *c; 2911 struct kmem_cache_cpu *c;
2905 int i; 2912 int i;
2906 2913
2914 /* memcg and kmem_cache debug support */
2915 s = slab_pre_alloc_hook(s, flags);
2916 if (unlikely(!s))
2917 return false;
2907 /* 2918 /*
2908 * Drain objects in the per cpu slab, while disabling local 2919 * Drain objects in the per cpu slab, while disabling local
2909 * IRQs, which protects against PREEMPT and interrupts 2920 * IRQs, which protects against PREEMPT and interrupts
@@ -2928,17 +2939,8 @@ bool kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t size,
2928 c = this_cpu_ptr(s->cpu_slab); 2939 c = this_cpu_ptr(s->cpu_slab);
2929 continue; /* goto for-loop */ 2940 continue; /* goto for-loop */
2930 } 2941 }
2931
2932 /* kmem_cache debug support */
2933 s = slab_pre_alloc_hook(s, flags);
2934 if (unlikely(!s))
2935 goto error;
2936
2937 c->freelist = get_freepointer(s, object); 2942 c->freelist = get_freepointer(s, object);
2938 p[i] = object; 2943 p[i] = object;
2939
2940 /* kmem_cache debug support */
2941 slab_post_alloc_hook(s, flags, object);
2942 } 2944 }
2943 c->tid = next_tid(c->tid); 2945 c->tid = next_tid(c->tid);
2944 local_irq_enable(); 2946 local_irq_enable();
@@ -2951,11 +2953,13 @@ bool kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t size,
2951 memset(p[j], 0, s->object_size); 2953 memset(p[j], 0, s->object_size);
2952 } 2954 }
2953 2955
2956 /* memcg and kmem_cache debug support */
2957 slab_post_alloc_hook(s, flags, size, p);
2954 return true; 2958 return true;
2955
2956error: 2959error:
2957 __kmem_cache_free_bulk(s, i, p);
2958 local_irq_enable(); 2960 local_irq_enable();
2961 slab_post_alloc_hook(s, flags, i, p);
2962 __kmem_cache_free_bulk(s, i, p);
2959 return false; 2963 return false;
2960} 2964}
2961EXPORT_SYMBOL(kmem_cache_alloc_bulk); 2965EXPORT_SYMBOL(kmem_cache_alloc_bulk);