aboutsummaryrefslogtreecommitdiffstats
path: root/mm/slab.c
diff options
context:
space:
mode:
authorChristoph Lameter <clameter@engr.sgi.com>2006-03-22 03:08:15 -0500
committerLinus Torvalds <torvalds@g5.osdl.org>2006-03-22 10:53:59 -0500
commitac2b898ca6fb06196a26869c23b66afe7944e52e (patch)
treee82e7bebd89b02813ce23f76fec4aeb5626da655 /mm/slab.c
parent911851e6ee6ac4e26f07be342a89632f78494fef (diff)
[PATCH] slab: Remove SLAB_NO_REAP option
SLAB_NO_REAP is documented as an option that will cause this slab not to be reaped under memory pressure. However, that is not what happens. The only thing that SLAB_NO_REAP controls at the moment is the reclaim of the unused slab elements that were allocated in batch in cache_reap(). Cache_reap() is run every few seconds independently of memory pressure. Could we remove the whole thing? Its only used by three slabs anyways and I cannot find a reason for having this option. There is an additional problem with SLAB_NO_REAP. If set then the recovery of objects from alien caches is switched off. Objects not freed on the same node where they were initially allocated will only be reused if a certain amount of objects accumulates from one alien node (not very likely) or if the cache is explicitly shrunk. (Strangely __cache_shrink does not check for SLAB_NO_REAP) Getting rid of SLAB_NO_REAP fixes the problems with alien cache freeing. Signed-off-by: Christoph Lameter <clameter@sgi.com> Cc: Pekka Enberg <penberg@cs.helsinki.fi> Cc: Manfred Spraul <manfred@colorfullife.com> Cc: Mark Fasheh <mark.fasheh@oracle.com> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'mm/slab.c')
-rw-r--r--mm/slab.c13
1 files changed, 2 insertions, 11 deletions
diff --git a/mm/slab.c b/mm/slab.c
index 5c2574989834..24235506b2a0 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -170,12 +170,12 @@
170#if DEBUG 170#if DEBUG
171# define CREATE_MASK (SLAB_DEBUG_INITIAL | SLAB_RED_ZONE | \ 171# define CREATE_MASK (SLAB_DEBUG_INITIAL | SLAB_RED_ZONE | \
172 SLAB_POISON | SLAB_HWCACHE_ALIGN | \ 172 SLAB_POISON | SLAB_HWCACHE_ALIGN | \
173 SLAB_NO_REAP | SLAB_CACHE_DMA | \ 173 SLAB_CACHE_DMA | \
174 SLAB_MUST_HWCACHE_ALIGN | SLAB_STORE_USER | \ 174 SLAB_MUST_HWCACHE_ALIGN | SLAB_STORE_USER | \
175 SLAB_RECLAIM_ACCOUNT | SLAB_PANIC | \ 175 SLAB_RECLAIM_ACCOUNT | SLAB_PANIC | \
176 SLAB_DESTROY_BY_RCU) 176 SLAB_DESTROY_BY_RCU)
177#else 177#else
178# define CREATE_MASK (SLAB_HWCACHE_ALIGN | SLAB_NO_REAP | \ 178# define CREATE_MASK (SLAB_HWCACHE_ALIGN | \
179 SLAB_CACHE_DMA | SLAB_MUST_HWCACHE_ALIGN | \ 179 SLAB_CACHE_DMA | SLAB_MUST_HWCACHE_ALIGN | \
180 SLAB_RECLAIM_ACCOUNT | SLAB_PANIC | \ 180 SLAB_RECLAIM_ACCOUNT | SLAB_PANIC | \
181 SLAB_DESTROY_BY_RCU) 181 SLAB_DESTROY_BY_RCU)
@@ -662,7 +662,6 @@ static struct kmem_cache cache_cache = {
662 .limit = BOOT_CPUCACHE_ENTRIES, 662 .limit = BOOT_CPUCACHE_ENTRIES,
663 .shared = 1, 663 .shared = 1,
664 .buffer_size = sizeof(struct kmem_cache), 664 .buffer_size = sizeof(struct kmem_cache),
665 .flags = SLAB_NO_REAP,
666 .name = "kmem_cache", 665 .name = "kmem_cache",
667#if DEBUG 666#if DEBUG
668 .obj_size = sizeof(struct kmem_cache), 667 .obj_size = sizeof(struct kmem_cache),
@@ -1848,9 +1847,6 @@ static void setup_cpu_cache(struct kmem_cache *cachep)
1848 * %SLAB_RED_ZONE - Insert `Red' zones around the allocated memory to check 1847 * %SLAB_RED_ZONE - Insert `Red' zones around the allocated memory to check
1849 * for buffer overruns. 1848 * for buffer overruns.
1850 * 1849 *
1851 * %SLAB_NO_REAP - Don't automatically reap this cache when we're under
1852 * memory pressure.
1853 *
1854 * %SLAB_HWCACHE_ALIGN - Align the objects in this cache to a hardware 1850 * %SLAB_HWCACHE_ALIGN - Align the objects in this cache to a hardware
1855 * cacheline. This can be beneficial if you're counting cycles as closely 1851 * cacheline. This can be beneficial if you're counting cycles as closely
1856 * as davem. 1852 * as davem.
@@ -3584,10 +3580,6 @@ static void cache_reap(void *unused)
3584 struct slab *slabp; 3580 struct slab *slabp;
3585 3581
3586 searchp = list_entry(walk, struct kmem_cache, next); 3582 searchp = list_entry(walk, struct kmem_cache, next);
3587
3588 if (searchp->flags & SLAB_NO_REAP)
3589 goto next;
3590
3591 check_irq_on(); 3583 check_irq_on();
3592 3584
3593 l3 = searchp->nodelists[numa_node_id()]; 3585 l3 = searchp->nodelists[numa_node_id()];
@@ -3635,7 +3627,6 @@ static void cache_reap(void *unused)
3635 } while (--tofree > 0); 3627 } while (--tofree > 0);
3636next_unlock: 3628next_unlock:
3637 spin_unlock_irq(&l3->list_lock); 3629 spin_unlock_irq(&l3->list_lock);
3638next:
3639 cond_resched(); 3630 cond_resched();
3640 } 3631 }
3641 check_irq_on(); 3632 check_irq_on();