From 5b74ada7eea1b0064d2b72384827853f349d803a Mon Sep 17 00:00:00 2001 From: Ravikiran G Thirumalai Date: Mon, 10 Apr 2006 22:52:53 -0700 Subject: [PATCH] slab: allocate node local memory for off-slab slabmanagement Allocate off-slab slab descriptors from node local memory. Signed-off-by: Alok N Kataria Signed-off-by: Ravikiran Thirumalai Signed-off-by: Shai Fultheim Acked-by: Christoph Lameter Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/slab.c | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) (limited to 'mm/slab.c') diff --git a/mm/slab.c b/mm/slab.c index f055c14202..afabad54c4 100644 --- a/mm/slab.c +++ b/mm/slab.c @@ -2318,13 +2318,15 @@ EXPORT_SYMBOL(kmem_cache_destroy); /* Get the memory for a slab management obj. */ static struct slab *alloc_slabmgmt(struct kmem_cache *cachep, void *objp, - int colour_off, gfp_t local_flags) + int colour_off, gfp_t local_flags, + int nodeid) { struct slab *slabp; if (OFF_SLAB(cachep)) { /* Slab management obj is off-slab. */ - slabp = kmem_cache_alloc(cachep->slabp_cache, local_flags); + slabp = kmem_cache_alloc_node(cachep->slabp_cache, + local_flags, nodeid); if (!slabp) return NULL; } else { @@ -2334,6 +2336,7 @@ static struct slab *alloc_slabmgmt(struct kmem_cache *cachep, void *objp, slabp->inuse = 0; slabp->colouroff = colour_off; slabp->s_mem = objp + colour_off; + slabp->nodeid = nodeid; return slabp; } @@ -2519,7 +2522,7 @@ static int cache_grow(struct kmem_cache *cachep, gfp_t flags, int nodeid) goto failed; /* Get slab management. */ - slabp = alloc_slabmgmt(cachep, objp, offset, local_flags); + slabp = alloc_slabmgmt(cachep, objp, offset, local_flags, nodeid); if (!slabp) goto opps1; -- cgit v1.2.2 From fb7faf3313d527bf68ba2e7ff3a2b6ebf201af73 Mon Sep 17 00:00:00 2001 From: Ravikiran G Thirumalai Date: Mon, 10 Apr 2006 22:52:54 -0700 Subject: [PATCH] slab: add statistics for alien cache overflows Add a statistics counter which is incremented everytime the alien cache overflows. alien_cache limit is hardcoded to 12 right now. We can use this statistics to tune alien cache if needed in the future. Signed-off-by: Alok N Kataria Signed-off-by: Ravikiran Thirumalai Signed-off-by: Shai Fultheim Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/slab.c | 14 ++++++++++---- 1 file changed, 10 insertions(+), 4 deletions(-) (limited to 'mm/slab.c') diff --git a/mm/slab.c b/mm/slab.c index afabad54c4..752c5570f2 100644 --- a/mm/slab.c +++ b/mm/slab.c @@ -420,6 +420,7 @@ struct kmem_cache { unsigned long max_freeable; unsigned long node_allocs; unsigned long node_frees; + unsigned long node_overflow; atomic_t allochit; atomic_t allocmiss; atomic_t freehit; @@ -465,6 +466,7 @@ struct kmem_cache { #define STATS_INC_ERR(x) ((x)->errors++) #define STATS_INC_NODEALLOCS(x) ((x)->node_allocs++) #define STATS_INC_NODEFREES(x) ((x)->node_frees++) +#define STATS_INC_ACOVERFLOW(x) ((x)->node_overflow++) #define STATS_SET_FREEABLE(x, i) \ do { \ if ((x)->max_freeable < i) \ @@ -484,6 +486,7 @@ struct kmem_cache { #define STATS_INC_ERR(x) do { } while (0) #define STATS_INC_NODEALLOCS(x) do { } while (0) #define STATS_INC_NODEFREES(x) do { } while (0) +#define STATS_INC_ACOVERFLOW(x) do { } while (0) #define STATS_SET_FREEABLE(x, i) do { } while (0) #define STATS_INC_ALLOCHIT(x) do { } while (0) #define STATS_INC_ALLOCMISS(x) do { } while (0) @@ -3083,9 +3086,11 @@ static inline void __cache_free(struct kmem_cache *cachep, void *objp) if (l3->alien && l3->alien[nodeid]) { alien = l3->alien[nodeid]; spin_lock(&alien->lock); - if (unlikely(alien->avail == alien->limit)) + if (unlikely(alien->avail == alien->limit)) { + STATS_INC_ACOVERFLOW(cachep); __drain_alien_cache(cachep, alien, nodeid); + } alien->entry[alien->avail++] = objp; spin_unlock(&alien->lock); } else { @@ -3763,7 +3768,7 @@ static void print_slabinfo_header(struct seq_file *m) seq_puts(m, " : slabdata "); #if STATS seq_puts(m, " : globalstat " - " "); + " "); seq_puts(m, " : cpustat "); #endif seq_putc(m, '\n'); @@ -3877,11 +3882,12 @@ static int s_show(struct seq_file *m, void *p) unsigned long max_freeable = cachep->max_freeable; unsigned long node_allocs = cachep->node_allocs; unsigned long node_frees = cachep->node_frees; + unsigned long overflows = cachep->node_overflow; seq_printf(m, " : globalstat %7lu %6lu %5lu %4lu \ - %4lu %4lu %4lu %4lu", allocs, high, grown, + %4lu %4lu %4lu %4lu %4lu", allocs, high, grown, reaped, errors, max_freeable, node_allocs, - node_frees); + node_frees, overflows); } /* cpu stats */ { -- cgit v1.2.2 From d6fef9da19b7acd46e04b7dbbba726b3febeca94 Mon Sep 17 00:00:00 2001 From: Luke Yang Date: Mon, 10 Apr 2006 22:52:56 -0700 Subject: [PATCH] nommu: use compound page in slab allocator The earlier patch to consolidate mmu and nommu page allocation and refcounting by using compound pages for nommu allocations had a bug: kmalloc slabs who's pages were initially allocated by a non-__GFP_COMP allocator could be passed into mm/nommu.c kmalloc allocations which really wanted __GFP_COMP underlying pages. Fix that by having nommu pass __GFP_COMP to all higher order slab allocations. Signed-off-by: Luke Yang Acked-by: Nick Piggin Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/slab.c | 7 +++++++ 1 file changed, 7 insertions(+) (limited to 'mm/slab.c') diff --git a/mm/slab.c b/mm/slab.c index 752c5570f2..e6ef9bd523 100644 --- a/mm/slab.c +++ b/mm/slab.c @@ -1456,7 +1456,14 @@ static void *kmem_getpages(struct kmem_cache *cachep, gfp_t flags, int nodeid) int i; flags |= cachep->gfpflags; +#ifndef CONFIG_MMU + /* nommu uses slab's for process anonymous memory allocations, so + * requires __GFP_COMP to properly refcount higher order allocations" + */ + page = alloc_pages_node(nodeid, (flags | __GFP_COMP), cachep->gfporder); +#else page = alloc_pages_node(nodeid, flags, cachep->gfporder); +#endif if (!page) return NULL; addr = page_address(page); -- cgit v1.2.2