aboutsummaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
authorChristoph Lameter <clameter@sgi.com>2007-07-17 07:03:23 -0400
committerLinus Torvalds <torvalds@woody.linux-foundation.org>2007-07-17 13:23:01 -0400
commitd07dbea46405b37d59495eb4de9d1056dcfb7c6d (patch)
tree221376c8c5509a88f8942246180685d5c01baf46 /mm
parent6cb8f91320d3e720351c21741da795fed580b21b (diff)
Slab allocators: support __GFP_ZERO in all allocators
A kernel convention for many allocators is that if __GFP_ZERO is passed to an allocator then the allocated memory should be zeroed. This is currently not supported by the slab allocators. The inconsistency makes it difficult to implement in derived allocators such as in the uncached allocator and the pool allocators. In addition the support zeroed allocations in the slab allocators does not have a consistent API. There are no zeroing allocator functions for NUMA node placement (kmalloc_node, kmem_cache_alloc_node). The zeroing allocations are only provided for default allocs (kzalloc, kmem_cache_zalloc_node). __GFP_ZERO will make zeroing universally available and does not require any addititional functions. So add the necessary logic to all slab allocators to support __GFP_ZERO. The code is added to the hot path. The gfp flags are on the stack and so the cacheline is readily available for checking if we want a zeroed object. Zeroing while allocating is now a frequent operation and we seem to be gradually approaching a 1-1 parity between zeroing and not zeroing allocs. The current tree has 3476 uses of kmalloc vs 2731 uses of kzalloc. Signed-off-by: Christoph Lameter <clameter@sgi.com> Acked-by: Pekka Enberg <penberg@cs.helsinki.fi> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm')
-rw-r--r--mm/slab.c8
-rw-r--r--mm/slob.c2
-rw-r--r--mm/slub.c24
3 files changed, 24 insertions, 10 deletions
diff --git a/mm/slab.c b/mm/slab.c
index d2cd304fd8af..1a88fded7f19 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -2746,7 +2746,7 @@ static int cache_grow(struct kmem_cache *cachep,
2746 * Be lazy and only check for valid flags here, keeping it out of the 2746 * Be lazy and only check for valid flags here, keeping it out of the
2747 * critical path in kmem_cache_alloc(). 2747 * critical path in kmem_cache_alloc().
2748 */ 2748 */
2749 BUG_ON(flags & ~(GFP_DMA | GFP_LEVEL_MASK)); 2749 BUG_ON(flags & ~(GFP_DMA | __GFP_ZERO | GFP_LEVEL_MASK));
2750 2750
2751 local_flags = (flags & GFP_LEVEL_MASK); 2751 local_flags = (flags & GFP_LEVEL_MASK);
2752 /* Take the l3 list lock to change the colour_next on this node */ 2752 /* Take the l3 list lock to change the colour_next on this node */
@@ -3392,6 +3392,9 @@ __cache_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid,
3392 local_irq_restore(save_flags); 3392 local_irq_restore(save_flags);
3393 ptr = cache_alloc_debugcheck_after(cachep, flags, ptr, caller); 3393 ptr = cache_alloc_debugcheck_after(cachep, flags, ptr, caller);
3394 3394
3395 if (unlikely((flags & __GFP_ZERO) && ptr))
3396 memset(ptr, 0, obj_size(cachep));
3397
3395 return ptr; 3398 return ptr;
3396} 3399}
3397 3400
@@ -3443,6 +3446,9 @@ __cache_alloc(struct kmem_cache *cachep, gfp_t flags, void *caller)
3443 objp = cache_alloc_debugcheck_after(cachep, flags, objp, caller); 3446 objp = cache_alloc_debugcheck_after(cachep, flags, objp, caller);
3444 prefetchw(objp); 3447 prefetchw(objp);
3445 3448
3449 if (unlikely((flags & __GFP_ZERO) && objp))
3450 memset(objp, 0, obj_size(cachep));
3451
3446 return objp; 3452 return objp;
3447} 3453}
3448 3454
diff --git a/mm/slob.c b/mm/slob.c
index 41d32c3c0be4..b3a45588fc46 100644
--- a/mm/slob.c
+++ b/mm/slob.c
@@ -334,6 +334,8 @@ static void *slob_alloc(size_t size, gfp_t gfp, int align, int node)
334 BUG_ON(!b); 334 BUG_ON(!b);
335 spin_unlock_irqrestore(&slob_lock, flags); 335 spin_unlock_irqrestore(&slob_lock, flags);
336 } 336 }
337 if (unlikely((gfp & __GFP_ZERO) && b))
338 memset(b, 0, size);
337 return b; 339 return b;
338} 340}
339 341
diff --git a/mm/slub.c b/mm/slub.c
index 548d78df81e1..479eb5c01917 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -1077,7 +1077,7 @@ static struct page *new_slab(struct kmem_cache *s, gfp_t flags, int node)
1077 void *last; 1077 void *last;
1078 void *p; 1078 void *p;
1079 1079
1080 BUG_ON(flags & ~(GFP_DMA | GFP_LEVEL_MASK)); 1080 BUG_ON(flags & ~(GFP_DMA | __GFP_ZERO | GFP_LEVEL_MASK));
1081 1081
1082 if (flags & __GFP_WAIT) 1082 if (flags & __GFP_WAIT)
1083 local_irq_enable(); 1083 local_irq_enable();
@@ -1540,7 +1540,7 @@ debug:
1540 * Otherwise we can simply pick the next object from the lockless free list. 1540 * Otherwise we can simply pick the next object from the lockless free list.
1541 */ 1541 */
1542static void __always_inline *slab_alloc(struct kmem_cache *s, 1542static void __always_inline *slab_alloc(struct kmem_cache *s,
1543 gfp_t gfpflags, int node, void *addr) 1543 gfp_t gfpflags, int node, void *addr, int length)
1544{ 1544{
1545 struct page *page; 1545 struct page *page;
1546 void **object; 1546 void **object;
@@ -1558,19 +1558,25 @@ static void __always_inline *slab_alloc(struct kmem_cache *s,
1558 page->lockless_freelist = object[page->offset]; 1558 page->lockless_freelist = object[page->offset];
1559 } 1559 }
1560 local_irq_restore(flags); 1560 local_irq_restore(flags);
1561
1562 if (unlikely((gfpflags & __GFP_ZERO) && object))
1563 memset(object, 0, length);
1564
1561 return object; 1565 return object;
1562} 1566}
1563 1567
1564void *kmem_cache_alloc(struct kmem_cache *s, gfp_t gfpflags) 1568void *kmem_cache_alloc(struct kmem_cache *s, gfp_t gfpflags)
1565{ 1569{
1566 return slab_alloc(s, gfpflags, -1, __builtin_return_address(0)); 1570 return slab_alloc(s, gfpflags, -1,
1571 __builtin_return_address(0), s->objsize);
1567} 1572}
1568EXPORT_SYMBOL(kmem_cache_alloc); 1573EXPORT_SYMBOL(kmem_cache_alloc);
1569 1574
1570#ifdef CONFIG_NUMA 1575#ifdef CONFIG_NUMA
1571void *kmem_cache_alloc_node(struct kmem_cache *s, gfp_t gfpflags, int node) 1576void *kmem_cache_alloc_node(struct kmem_cache *s, gfp_t gfpflags, int node)
1572{ 1577{
1573 return slab_alloc(s, gfpflags, node, __builtin_return_address(0)); 1578 return slab_alloc(s, gfpflags, node,
1579 __builtin_return_address(0), s->objsize);
1574} 1580}
1575EXPORT_SYMBOL(kmem_cache_alloc_node); 1581EXPORT_SYMBOL(kmem_cache_alloc_node);
1576#endif 1582#endif
@@ -2318,7 +2324,7 @@ void *__kmalloc(size_t size, gfp_t flags)
2318 if (ZERO_OR_NULL_PTR(s)) 2324 if (ZERO_OR_NULL_PTR(s))
2319 return s; 2325 return s;
2320 2326
2321 return slab_alloc(s, flags, -1, __builtin_return_address(0)); 2327 return slab_alloc(s, flags, -1, __builtin_return_address(0), size);
2322} 2328}
2323EXPORT_SYMBOL(__kmalloc); 2329EXPORT_SYMBOL(__kmalloc);
2324 2330
@@ -2330,7 +2336,7 @@ void *__kmalloc_node(size_t size, gfp_t flags, int node)
2330 if (ZERO_OR_NULL_PTR(s)) 2336 if (ZERO_OR_NULL_PTR(s))
2331 return s; 2337 return s;
2332 2338
2333 return slab_alloc(s, flags, node, __builtin_return_address(0)); 2339 return slab_alloc(s, flags, node, __builtin_return_address(0), size);
2334} 2340}
2335EXPORT_SYMBOL(__kmalloc_node); 2341EXPORT_SYMBOL(__kmalloc_node);
2336#endif 2342#endif
@@ -2643,7 +2649,7 @@ void *kmem_cache_zalloc(struct kmem_cache *s, gfp_t flags)
2643{ 2649{
2644 void *x; 2650 void *x;
2645 2651
2646 x = slab_alloc(s, flags, -1, __builtin_return_address(0)); 2652 x = slab_alloc(s, flags, -1, __builtin_return_address(0), 0);
2647 if (x) 2653 if (x)
2648 memset(x, 0, s->objsize); 2654 memset(x, 0, s->objsize);
2649 return x; 2655 return x;
@@ -2693,7 +2699,7 @@ void *__kmalloc_track_caller(size_t size, gfp_t gfpflags, void *caller)
2693 if (ZERO_OR_NULL_PTR(s)) 2699 if (ZERO_OR_NULL_PTR(s))
2694 return s; 2700 return s;
2695 2701
2696 return slab_alloc(s, gfpflags, -1, caller); 2702 return slab_alloc(s, gfpflags, -1, caller, size);
2697} 2703}
2698 2704
2699void *__kmalloc_node_track_caller(size_t size, gfp_t gfpflags, 2705void *__kmalloc_node_track_caller(size_t size, gfp_t gfpflags,
@@ -2704,7 +2710,7 @@ void *__kmalloc_node_track_caller(size_t size, gfp_t gfpflags,
2704 if (ZERO_OR_NULL_PTR(s)) 2710 if (ZERO_OR_NULL_PTR(s))
2705 return s; 2711 return s;
2706 2712
2707 return slab_alloc(s, gfpflags, node, caller); 2713 return slab_alloc(s, gfpflags, node, caller, size);
2708} 2714}
2709 2715
2710#if defined(CONFIG_SYSFS) && defined(CONFIG_SLUB_DEBUG) 2716#if defined(CONFIG_SYSFS) && defined(CONFIG_SLUB_DEBUG)