aboutsummaryrefslogtreecommitdiffstats
path: root/mm/slub.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/slub.c')
-rw-r--r--mm/slub.c24
1 files changed, 15 insertions, 9 deletions
diff --git a/mm/slub.c b/mm/slub.c
index 548d78df81e1..479eb5c01917 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -1077,7 +1077,7 @@ static struct page *new_slab(struct kmem_cache *s, gfp_t flags, int node)
1077 void *last; 1077 void *last;
1078 void *p; 1078 void *p;
1079 1079
1080 BUG_ON(flags & ~(GFP_DMA | GFP_LEVEL_MASK)); 1080 BUG_ON(flags & ~(GFP_DMA | __GFP_ZERO | GFP_LEVEL_MASK));
1081 1081
1082 if (flags & __GFP_WAIT) 1082 if (flags & __GFP_WAIT)
1083 local_irq_enable(); 1083 local_irq_enable();
@@ -1540,7 +1540,7 @@ debug:
1540 * Otherwise we can simply pick the next object from the lockless free list. 1540 * Otherwise we can simply pick the next object from the lockless free list.
1541 */ 1541 */
1542static void __always_inline *slab_alloc(struct kmem_cache *s, 1542static void __always_inline *slab_alloc(struct kmem_cache *s,
1543 gfp_t gfpflags, int node, void *addr) 1543 gfp_t gfpflags, int node, void *addr, int length)
1544{ 1544{
1545 struct page *page; 1545 struct page *page;
1546 void **object; 1546 void **object;
@@ -1558,19 +1558,25 @@ static void __always_inline *slab_alloc(struct kmem_cache *s,
1558 page->lockless_freelist = object[page->offset]; 1558 page->lockless_freelist = object[page->offset];
1559 } 1559 }
1560 local_irq_restore(flags); 1560 local_irq_restore(flags);
1561
1562 if (unlikely((gfpflags & __GFP_ZERO) && object))
1563 memset(object, 0, length);
1564
1561 return object; 1565 return object;
1562} 1566}
1563 1567
1564void *kmem_cache_alloc(struct kmem_cache *s, gfp_t gfpflags) 1568void *kmem_cache_alloc(struct kmem_cache *s, gfp_t gfpflags)
1565{ 1569{
1566 return slab_alloc(s, gfpflags, -1, __builtin_return_address(0)); 1570 return slab_alloc(s, gfpflags, -1,
1571 __builtin_return_address(0), s->objsize);
1567} 1572}
1568EXPORT_SYMBOL(kmem_cache_alloc); 1573EXPORT_SYMBOL(kmem_cache_alloc);
1569 1574
1570#ifdef CONFIG_NUMA 1575#ifdef CONFIG_NUMA
1571void *kmem_cache_alloc_node(struct kmem_cache *s, gfp_t gfpflags, int node) 1576void *kmem_cache_alloc_node(struct kmem_cache *s, gfp_t gfpflags, int node)
1572{ 1577{
1573 return slab_alloc(s, gfpflags, node, __builtin_return_address(0)); 1578 return slab_alloc(s, gfpflags, node,
1579 __builtin_return_address(0), s->objsize);
1574} 1580}
1575EXPORT_SYMBOL(kmem_cache_alloc_node); 1581EXPORT_SYMBOL(kmem_cache_alloc_node);
1576#endif 1582#endif
@@ -2318,7 +2324,7 @@ void *__kmalloc(size_t size, gfp_t flags)
2318 if (ZERO_OR_NULL_PTR(s)) 2324 if (ZERO_OR_NULL_PTR(s))
2319 return s; 2325 return s;
2320 2326
2321 return slab_alloc(s, flags, -1, __builtin_return_address(0)); 2327 return slab_alloc(s, flags, -1, __builtin_return_address(0), size);
2322} 2328}
2323EXPORT_SYMBOL(__kmalloc); 2329EXPORT_SYMBOL(__kmalloc);
2324 2330
@@ -2330,7 +2336,7 @@ void *__kmalloc_node(size_t size, gfp_t flags, int node)
2330 if (ZERO_OR_NULL_PTR(s)) 2336 if (ZERO_OR_NULL_PTR(s))
2331 return s; 2337 return s;
2332 2338
2333 return slab_alloc(s, flags, node, __builtin_return_address(0)); 2339 return slab_alloc(s, flags, node, __builtin_return_address(0), size);
2334} 2340}
2335EXPORT_SYMBOL(__kmalloc_node); 2341EXPORT_SYMBOL(__kmalloc_node);
2336#endif 2342#endif
@@ -2643,7 +2649,7 @@ void *kmem_cache_zalloc(struct kmem_cache *s, gfp_t flags)
2643{ 2649{
2644 void *x; 2650 void *x;
2645 2651
2646 x = slab_alloc(s, flags, -1, __builtin_return_address(0)); 2652 x = slab_alloc(s, flags, -1, __builtin_return_address(0), 0);
2647 if (x) 2653 if (x)
2648 memset(x, 0, s->objsize); 2654 memset(x, 0, s->objsize);
2649 return x; 2655 return x;
@@ -2693,7 +2699,7 @@ void *__kmalloc_track_caller(size_t size, gfp_t gfpflags, void *caller)
2693 if (ZERO_OR_NULL_PTR(s)) 2699 if (ZERO_OR_NULL_PTR(s))
2694 return s; 2700 return s;
2695 2701
2696 return slab_alloc(s, gfpflags, -1, caller); 2702 return slab_alloc(s, gfpflags, -1, caller, size);
2697} 2703}
2698 2704
2699void *__kmalloc_node_track_caller(size_t size, gfp_t gfpflags, 2705void *__kmalloc_node_track_caller(size_t size, gfp_t gfpflags,
@@ -2704,7 +2710,7 @@ void *__kmalloc_node_track_caller(size_t size, gfp_t gfpflags,
2704 if (ZERO_OR_NULL_PTR(s)) 2710 if (ZERO_OR_NULL_PTR(s))
2705 return s; 2711 return s;
2706 2712
2707 return slab_alloc(s, gfpflags, node, caller); 2713 return slab_alloc(s, gfpflags, node, caller, size);
2708} 2714}
2709 2715
2710#if defined(CONFIG_SYSFS) && defined(CONFIG_SLUB_DEBUG) 2716#if defined(CONFIG_SYSFS) && defined(CONFIG_SLUB_DEBUG)