diff options
Diffstat (limited to 'mm/slab.c')
-rw-r--r-- | mm/slab.c | 13 |
1 files changed, 2 insertions, 11 deletions
@@ -170,12 +170,12 @@ | |||
170 | #if DEBUG | 170 | #if DEBUG |
171 | # define CREATE_MASK (SLAB_DEBUG_INITIAL | SLAB_RED_ZONE | \ | 171 | # define CREATE_MASK (SLAB_DEBUG_INITIAL | SLAB_RED_ZONE | \ |
172 | SLAB_POISON | SLAB_HWCACHE_ALIGN | \ | 172 | SLAB_POISON | SLAB_HWCACHE_ALIGN | \ |
173 | SLAB_NO_REAP | SLAB_CACHE_DMA | \ | 173 | SLAB_CACHE_DMA | \ |
174 | SLAB_MUST_HWCACHE_ALIGN | SLAB_STORE_USER | \ | 174 | SLAB_MUST_HWCACHE_ALIGN | SLAB_STORE_USER | \ |
175 | SLAB_RECLAIM_ACCOUNT | SLAB_PANIC | \ | 175 | SLAB_RECLAIM_ACCOUNT | SLAB_PANIC | \ |
176 | SLAB_DESTROY_BY_RCU) | 176 | SLAB_DESTROY_BY_RCU) |
177 | #else | 177 | #else |
178 | # define CREATE_MASK (SLAB_HWCACHE_ALIGN | SLAB_NO_REAP | \ | 178 | # define CREATE_MASK (SLAB_HWCACHE_ALIGN | \ |
179 | SLAB_CACHE_DMA | SLAB_MUST_HWCACHE_ALIGN | \ | 179 | SLAB_CACHE_DMA | SLAB_MUST_HWCACHE_ALIGN | \ |
180 | SLAB_RECLAIM_ACCOUNT | SLAB_PANIC | \ | 180 | SLAB_RECLAIM_ACCOUNT | SLAB_PANIC | \ |
181 | SLAB_DESTROY_BY_RCU) | 181 | SLAB_DESTROY_BY_RCU) |
@@ -662,7 +662,6 @@ static struct kmem_cache cache_cache = { | |||
662 | .limit = BOOT_CPUCACHE_ENTRIES, | 662 | .limit = BOOT_CPUCACHE_ENTRIES, |
663 | .shared = 1, | 663 | .shared = 1, |
664 | .buffer_size = sizeof(struct kmem_cache), | 664 | .buffer_size = sizeof(struct kmem_cache), |
665 | .flags = SLAB_NO_REAP, | ||
666 | .name = "kmem_cache", | 665 | .name = "kmem_cache", |
667 | #if DEBUG | 666 | #if DEBUG |
668 | .obj_size = sizeof(struct kmem_cache), | 667 | .obj_size = sizeof(struct kmem_cache), |
@@ -1848,9 +1847,6 @@ static void setup_cpu_cache(struct kmem_cache *cachep) | |||
1848 | * %SLAB_RED_ZONE - Insert `Red' zones around the allocated memory to check | 1847 | * %SLAB_RED_ZONE - Insert `Red' zones around the allocated memory to check |
1849 | * for buffer overruns. | 1848 | * for buffer overruns. |
1850 | * | 1849 | * |
1851 | * %SLAB_NO_REAP - Don't automatically reap this cache when we're under | ||
1852 | * memory pressure. | ||
1853 | * | ||
1854 | * %SLAB_HWCACHE_ALIGN - Align the objects in this cache to a hardware | 1850 | * %SLAB_HWCACHE_ALIGN - Align the objects in this cache to a hardware |
1855 | * cacheline. This can be beneficial if you're counting cycles as closely | 1851 | * cacheline. This can be beneficial if you're counting cycles as closely |
1856 | * as davem. | 1852 | * as davem. |
@@ -3584,10 +3580,6 @@ static void cache_reap(void *unused) | |||
3584 | struct slab *slabp; | 3580 | struct slab *slabp; |
3585 | 3581 | ||
3586 | searchp = list_entry(walk, struct kmem_cache, next); | 3582 | searchp = list_entry(walk, struct kmem_cache, next); |
3587 | |||
3588 | if (searchp->flags & SLAB_NO_REAP) | ||
3589 | goto next; | ||
3590 | |||
3591 | check_irq_on(); | 3583 | check_irq_on(); |
3592 | 3584 | ||
3593 | l3 = searchp->nodelists[numa_node_id()]; | 3585 | l3 = searchp->nodelists[numa_node_id()]; |
@@ -3635,7 +3627,6 @@ static void cache_reap(void *unused) | |||
3635 | } while (--tofree > 0); | 3627 | } while (--tofree > 0); |
3636 | next_unlock: | 3628 | next_unlock: |
3637 | spin_unlock_irq(&l3->list_lock); | 3629 | spin_unlock_irq(&l3->list_lock); |
3638 | next: | ||
3639 | cond_resched(); | 3630 | cond_resched(); |
3640 | } | 3631 | } |
3641 | check_irq_on(); | 3632 | check_irq_on(); |