aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorChristoph Lameter <clameter@engr.sgi.com>2006-03-22 03:08:15 -0500
committerLinus Torvalds <torvalds@g5.osdl.org>2006-03-22 10:53:59 -0500
commitac2b898ca6fb06196a26869c23b66afe7944e52e (patch)
treee82e7bebd89b02813ce23f76fec4aeb5626da655
parent911851e6ee6ac4e26f07be342a89632f78494fef (diff)
[PATCH] slab: Remove SLAB_NO_REAP option
SLAB_NO_REAP is documented as an option that will cause this slab not to be reaped under memory pressure. However, that is not what happens. The only thing that SLAB_NO_REAP controls at the moment is the reclaim of the unused slab elements that were allocated in batch in cache_reap(). Cache_reap() is run every few seconds independently of memory pressure. Could we remove the whole thing? Its only used by three slabs anyways and I cannot find a reason for having this option. There is an additional problem with SLAB_NO_REAP. If set then the recovery of objects from alien caches is switched off. Objects not freed on the same node where they were initially allocated will only be reused if a certain amount of objects accumulates from one alien node (not very likely) or if the cache is explicitly shrunk. (Strangely __cache_shrink does not check for SLAB_NO_REAP) Getting rid of SLAB_NO_REAP fixes the problems with alien cache freeing. Signed-off-by: Christoph Lameter <clameter@sgi.com> Cc: Pekka Enberg <penberg@cs.helsinki.fi> Cc: Manfred Spraul <manfred@colorfullife.com> Cc: Mark Fasheh <mark.fasheh@oracle.com> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
-rw-r--r--drivers/scsi/iscsi_tcp.c2
-rw-r--r--fs/ocfs2/super.c2
-rw-r--r--include/linux/slab.h1
-rw-r--r--mm/slab.c13
4 files changed, 4 insertions, 14 deletions
diff --git a/drivers/scsi/iscsi_tcp.c b/drivers/scsi/iscsi_tcp.c
index ff79e68b347c..7b82ff090d42 100644
--- a/drivers/scsi/iscsi_tcp.c
+++ b/drivers/scsi/iscsi_tcp.c
@@ -3639,7 +3639,7 @@ iscsi_tcp_init(void)
3639 3639
3640 taskcache = kmem_cache_create("iscsi_taskcache", 3640 taskcache = kmem_cache_create("iscsi_taskcache",
3641 sizeof(struct iscsi_data_task), 0, 3641 sizeof(struct iscsi_data_task), 0,
3642 SLAB_HWCACHE_ALIGN | SLAB_NO_REAP, NULL, NULL); 3642 SLAB_HWCACHE_ALIGN, NULL, NULL);
3643 if (!taskcache) 3643 if (!taskcache)
3644 return -ENOMEM; 3644 return -ENOMEM;
3645 3645
diff --git a/fs/ocfs2/super.c b/fs/ocfs2/super.c
index 8dd3aafec499..09e1c57a86a0 100644
--- a/fs/ocfs2/super.c
+++ b/fs/ocfs2/super.c
@@ -959,7 +959,7 @@ static int ocfs2_initialize_mem_caches(void)
959 ocfs2_lock_cache = kmem_cache_create("ocfs2_lock", 959 ocfs2_lock_cache = kmem_cache_create("ocfs2_lock",
960 sizeof(struct ocfs2_journal_lock), 960 sizeof(struct ocfs2_journal_lock),
961 0, 961 0,
962 SLAB_NO_REAP|SLAB_HWCACHE_ALIGN, 962 SLAB_HWCACHE_ALIGN,
963 NULL, NULL); 963 NULL, NULL);
964 if (!ocfs2_lock_cache) 964 if (!ocfs2_lock_cache)
965 return -ENOMEM; 965 return -ENOMEM;
diff --git a/include/linux/slab.h b/include/linux/slab.h
index 38bed95dda7a..2b28c849d75a 100644
--- a/include/linux/slab.h
+++ b/include/linux/slab.h
@@ -38,7 +38,6 @@ typedef struct kmem_cache kmem_cache_t;
38#define SLAB_DEBUG_INITIAL 0x00000200UL /* Call constructor (as verifier) */ 38#define SLAB_DEBUG_INITIAL 0x00000200UL /* Call constructor (as verifier) */
39#define SLAB_RED_ZONE 0x00000400UL /* Red zone objs in a cache */ 39#define SLAB_RED_ZONE 0x00000400UL /* Red zone objs in a cache */
40#define SLAB_POISON 0x00000800UL /* Poison objects */ 40#define SLAB_POISON 0x00000800UL /* Poison objects */
41#define SLAB_NO_REAP 0x00001000UL /* never reap from the cache */
42#define SLAB_HWCACHE_ALIGN 0x00002000UL /* align objs on a h/w cache lines */ 41#define SLAB_HWCACHE_ALIGN 0x00002000UL /* align objs on a h/w cache lines */
43#define SLAB_CACHE_DMA 0x00004000UL /* use GFP_DMA memory */ 42#define SLAB_CACHE_DMA 0x00004000UL /* use GFP_DMA memory */
44#define SLAB_MUST_HWCACHE_ALIGN 0x00008000UL /* force alignment */ 43#define SLAB_MUST_HWCACHE_ALIGN 0x00008000UL /* force alignment */
diff --git a/mm/slab.c b/mm/slab.c
index 5c2574989834..24235506b2a0 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -170,12 +170,12 @@
170#if DEBUG 170#if DEBUG
171# define CREATE_MASK (SLAB_DEBUG_INITIAL | SLAB_RED_ZONE | \ 171# define CREATE_MASK (SLAB_DEBUG_INITIAL | SLAB_RED_ZONE | \
172 SLAB_POISON | SLAB_HWCACHE_ALIGN | \ 172 SLAB_POISON | SLAB_HWCACHE_ALIGN | \
173 SLAB_NO_REAP | SLAB_CACHE_DMA | \ 173 SLAB_CACHE_DMA | \
174 SLAB_MUST_HWCACHE_ALIGN | SLAB_STORE_USER | \ 174 SLAB_MUST_HWCACHE_ALIGN | SLAB_STORE_USER | \
175 SLAB_RECLAIM_ACCOUNT | SLAB_PANIC | \ 175 SLAB_RECLAIM_ACCOUNT | SLAB_PANIC | \
176 SLAB_DESTROY_BY_RCU) 176 SLAB_DESTROY_BY_RCU)
177#else 177#else
178# define CREATE_MASK (SLAB_HWCACHE_ALIGN | SLAB_NO_REAP | \ 178# define CREATE_MASK (SLAB_HWCACHE_ALIGN | \
179 SLAB_CACHE_DMA | SLAB_MUST_HWCACHE_ALIGN | \ 179 SLAB_CACHE_DMA | SLAB_MUST_HWCACHE_ALIGN | \
180 SLAB_RECLAIM_ACCOUNT | SLAB_PANIC | \ 180 SLAB_RECLAIM_ACCOUNT | SLAB_PANIC | \
181 SLAB_DESTROY_BY_RCU) 181 SLAB_DESTROY_BY_RCU)
@@ -662,7 +662,6 @@ static struct kmem_cache cache_cache = {
662 .limit = BOOT_CPUCACHE_ENTRIES, 662 .limit = BOOT_CPUCACHE_ENTRIES,
663 .shared = 1, 663 .shared = 1,
664 .buffer_size = sizeof(struct kmem_cache), 664 .buffer_size = sizeof(struct kmem_cache),
665 .flags = SLAB_NO_REAP,
666 .name = "kmem_cache", 665 .name = "kmem_cache",
667#if DEBUG 666#if DEBUG
668 .obj_size = sizeof(struct kmem_cache), 667 .obj_size = sizeof(struct kmem_cache),
@@ -1848,9 +1847,6 @@ static void setup_cpu_cache(struct kmem_cache *cachep)
1848 * %SLAB_RED_ZONE - Insert `Red' zones around the allocated memory to check 1847 * %SLAB_RED_ZONE - Insert `Red' zones around the allocated memory to check
1849 * for buffer overruns. 1848 * for buffer overruns.
1850 * 1849 *
1851 * %SLAB_NO_REAP - Don't automatically reap this cache when we're under
1852 * memory pressure.
1853 *
1854 * %SLAB_HWCACHE_ALIGN - Align the objects in this cache to a hardware 1850 * %SLAB_HWCACHE_ALIGN - Align the objects in this cache to a hardware
1855 * cacheline. This can be beneficial if you're counting cycles as closely 1851 * cacheline. This can be beneficial if you're counting cycles as closely
1856 * as davem. 1852 * as davem.
@@ -3584,10 +3580,6 @@ static void cache_reap(void *unused)
3584 struct slab *slabp; 3580 struct slab *slabp;
3585 3581
3586 searchp = list_entry(walk, struct kmem_cache, next); 3582 searchp = list_entry(walk, struct kmem_cache, next);
3587
3588 if (searchp->flags & SLAB_NO_REAP)
3589 goto next;
3590
3591 check_irq_on(); 3583 check_irq_on();
3592 3584
3593 l3 = searchp->nodelists[numa_node_id()]; 3585 l3 = searchp->nodelists[numa_node_id()];
@@ -3635,7 +3627,6 @@ static void cache_reap(void *unused)
3635 } while (--tofree > 0); 3627 } while (--tofree > 0);
3636next_unlock: 3628next_unlock:
3637 spin_unlock_irq(&l3->list_lock); 3629 spin_unlock_irq(&l3->list_lock);
3638next:
3639 cond_resched(); 3630 cond_resched();
3640 } 3631 }
3641 check_irq_on(); 3632 check_irq_on();