diff options
author | Christoph Lameter <clameter@sgi.com> | 2006-06-30 04:55:45 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@g5.osdl.org> | 2006-06-30 14:25:36 -0400 |
commit | ed11d9eb2228acc483c819ab353e3c41bcb158fa (patch) | |
tree | fa6efec62228835be260ac55d9dd0480a9064c8a /mm/slab.c | |
parent | f8891e5e1f93a128c3900f82035e8541357896a7 (diff) |
[PATCH] slab: consolidate code to free slabs from freelist
Post and discussion:
http://marc.theaimsgroup.com/?t=115074342800003&r=1&w=2
Code in __shrink_node() duplicates code in cache_reap()
Add a new function drain_freelist that removes slabs with objects that are
already free and use that in various places.
This eliminates the __node_shrink() function and provides the interrupt
holdoff reduction from slab_free to code that used to call __node_shrink.
[akpm@osdl.org: build fixes]
Signed-off-by: Christoph Lameter <clameter@sgi.com>
Cc: Pekka Enberg <penberg@cs.helsinki.fi>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'mm/slab.c')
-rw-r--r-- | mm/slab.c | 119 |
1 files changed, 51 insertions, 68 deletions
@@ -309,6 +309,13 @@ struct kmem_list3 __initdata initkmem_list3[NUM_INIT_LISTS]; | |||
309 | #define SIZE_AC 1 | 309 | #define SIZE_AC 1 |
310 | #define SIZE_L3 (1 + MAX_NUMNODES) | 310 | #define SIZE_L3 (1 + MAX_NUMNODES) |
311 | 311 | ||
312 | static int drain_freelist(struct kmem_cache *cache, | ||
313 | struct kmem_list3 *l3, int tofree); | ||
314 | static void free_block(struct kmem_cache *cachep, void **objpp, int len, | ||
315 | int node); | ||
316 | static void enable_cpucache(struct kmem_cache *cachep); | ||
317 | static void cache_reap(void *unused); | ||
318 | |||
312 | /* | 319 | /* |
313 | * This function must be completely optimized away if a constant is passed to | 320 | * This function must be completely optimized away if a constant is passed to |
314 | * it. Mostly the same as what is in linux/slab.h except it returns an index. | 321 | * it. Mostly the same as what is in linux/slab.h except it returns an index. |
@@ -456,7 +463,7 @@ struct kmem_cache { | |||
456 | #define STATS_DEC_ACTIVE(x) ((x)->num_active--) | 463 | #define STATS_DEC_ACTIVE(x) ((x)->num_active--) |
457 | #define STATS_INC_ALLOCED(x) ((x)->num_allocations++) | 464 | #define STATS_INC_ALLOCED(x) ((x)->num_allocations++) |
458 | #define STATS_INC_GROWN(x) ((x)->grown++) | 465 | #define STATS_INC_GROWN(x) ((x)->grown++) |
459 | #define STATS_INC_REAPED(x) ((x)->reaped++) | 466 | #define STATS_ADD_REAPED(x,y) ((x)->reaped += (y)) |
460 | #define STATS_SET_HIGH(x) \ | 467 | #define STATS_SET_HIGH(x) \ |
461 | do { \ | 468 | do { \ |
462 | if ((x)->num_active > (x)->high_mark) \ | 469 | if ((x)->num_active > (x)->high_mark) \ |
@@ -480,7 +487,7 @@ struct kmem_cache { | |||
480 | #define STATS_DEC_ACTIVE(x) do { } while (0) | 487 | #define STATS_DEC_ACTIVE(x) do { } while (0) |
481 | #define STATS_INC_ALLOCED(x) do { } while (0) | 488 | #define STATS_INC_ALLOCED(x) do { } while (0) |
482 | #define STATS_INC_GROWN(x) do { } while (0) | 489 | #define STATS_INC_GROWN(x) do { } while (0) |
483 | #define STATS_INC_REAPED(x) do { } while (0) | 490 | #define STATS_ADD_REAPED(x,y) do { } while (0) |
484 | #define STATS_SET_HIGH(x) do { } while (0) | 491 | #define STATS_SET_HIGH(x) do { } while (0) |
485 | #define STATS_INC_ERR(x) do { } while (0) | 492 | #define STATS_INC_ERR(x) do { } while (0) |
486 | #define STATS_INC_NODEALLOCS(x) do { } while (0) | 493 | #define STATS_INC_NODEALLOCS(x) do { } while (0) |
@@ -700,12 +707,6 @@ int slab_is_available(void) | |||
700 | 707 | ||
701 | static DEFINE_PER_CPU(struct work_struct, reap_work); | 708 | static DEFINE_PER_CPU(struct work_struct, reap_work); |
702 | 709 | ||
703 | static void free_block(struct kmem_cache *cachep, void **objpp, int len, | ||
704 | int node); | ||
705 | static void enable_cpucache(struct kmem_cache *cachep); | ||
706 | static void cache_reap(void *unused); | ||
707 | static int __node_shrink(struct kmem_cache *cachep, int node); | ||
708 | |||
709 | static inline struct array_cache *cpu_cache_get(struct kmem_cache *cachep) | 710 | static inline struct array_cache *cpu_cache_get(struct kmem_cache *cachep) |
710 | { | 711 | { |
711 | return cachep->array[smp_processor_id()]; | 712 | return cachep->array[smp_processor_id()]; |
@@ -1241,10 +1242,7 @@ free_array_cache: | |||
1241 | l3 = cachep->nodelists[node]; | 1242 | l3 = cachep->nodelists[node]; |
1242 | if (!l3) | 1243 | if (!l3) |
1243 | continue; | 1244 | continue; |
1244 | spin_lock_irq(&l3->list_lock); | 1245 | drain_freelist(cachep, l3, l3->free_objects); |
1245 | /* free slabs belonging to this node */ | ||
1246 | __node_shrink(cachep, node); | ||
1247 | spin_unlock_irq(&l3->list_lock); | ||
1248 | } | 1246 | } |
1249 | mutex_unlock(&cache_chain_mutex); | 1247 | mutex_unlock(&cache_chain_mutex); |
1250 | break; | 1248 | break; |
@@ -2248,32 +2246,45 @@ static void drain_cpu_caches(struct kmem_cache *cachep) | |||
2248 | } | 2246 | } |
2249 | } | 2247 | } |
2250 | 2248 | ||
2251 | static int __node_shrink(struct kmem_cache *cachep, int node) | 2249 | /* |
2250 | * Remove slabs from the list of free slabs. | ||
2251 | * Specify the number of slabs to drain in tofree. | ||
2252 | * | ||
2253 | * Returns the actual number of slabs released. | ||
2254 | */ | ||
2255 | static int drain_freelist(struct kmem_cache *cache, | ||
2256 | struct kmem_list3 *l3, int tofree) | ||
2252 | { | 2257 | { |
2258 | struct list_head *p; | ||
2259 | int nr_freed; | ||
2253 | struct slab *slabp; | 2260 | struct slab *slabp; |
2254 | struct kmem_list3 *l3 = cachep->nodelists[node]; | ||
2255 | int ret; | ||
2256 | 2261 | ||
2257 | for (;;) { | 2262 | nr_freed = 0; |
2258 | struct list_head *p; | 2263 | while (nr_freed < tofree && !list_empty(&l3->slabs_free)) { |
2259 | 2264 | ||
2265 | spin_lock_irq(&l3->list_lock); | ||
2260 | p = l3->slabs_free.prev; | 2266 | p = l3->slabs_free.prev; |
2261 | if (p == &l3->slabs_free) | 2267 | if (p == &l3->slabs_free) { |
2262 | break; | 2268 | spin_unlock_irq(&l3->list_lock); |
2269 | goto out; | ||
2270 | } | ||
2263 | 2271 | ||
2264 | slabp = list_entry(l3->slabs_free.prev, struct slab, list); | 2272 | slabp = list_entry(p, struct slab, list); |
2265 | #if DEBUG | 2273 | #if DEBUG |
2266 | BUG_ON(slabp->inuse); | 2274 | BUG_ON(slabp->inuse); |
2267 | #endif | 2275 | #endif |
2268 | list_del(&slabp->list); | 2276 | list_del(&slabp->list); |
2269 | 2277 | /* | |
2270 | l3->free_objects -= cachep->num; | 2278 | * Safe to drop the lock. The slab is no longer linked |
2279 | * to the cache. | ||
2280 | */ | ||
2281 | l3->free_objects -= cache->num; | ||
2271 | spin_unlock_irq(&l3->list_lock); | 2282 | spin_unlock_irq(&l3->list_lock); |
2272 | slab_destroy(cachep, slabp); | 2283 | slab_destroy(cache, slabp); |
2273 | spin_lock_irq(&l3->list_lock); | 2284 | nr_freed++; |
2274 | } | 2285 | } |
2275 | ret = !list_empty(&l3->slabs_full) || !list_empty(&l3->slabs_partial); | 2286 | out: |
2276 | return ret; | 2287 | return nr_freed; |
2277 | } | 2288 | } |
2278 | 2289 | ||
2279 | static int __cache_shrink(struct kmem_cache *cachep) | 2290 | static int __cache_shrink(struct kmem_cache *cachep) |
@@ -2286,11 +2297,13 @@ static int __cache_shrink(struct kmem_cache *cachep) | |||
2286 | check_irq_on(); | 2297 | check_irq_on(); |
2287 | for_each_online_node(i) { | 2298 | for_each_online_node(i) { |
2288 | l3 = cachep->nodelists[i]; | 2299 | l3 = cachep->nodelists[i]; |
2289 | if (l3) { | 2300 | if (!l3) |
2290 | spin_lock_irq(&l3->list_lock); | 2301 | continue; |
2291 | ret += __node_shrink(cachep, i); | 2302 | |
2292 | spin_unlock_irq(&l3->list_lock); | 2303 | drain_freelist(cachep, l3, l3->free_objects); |
2293 | } | 2304 | |
2305 | ret += !list_empty(&l3->slabs_full) || | ||
2306 | !list_empty(&l3->slabs_partial); | ||
2294 | } | 2307 | } |
2295 | return (ret ? 1 : 0); | 2308 | return (ret ? 1 : 0); |
2296 | } | 2309 | } |
@@ -3694,10 +3707,6 @@ static void cache_reap(void *unused) | |||
3694 | } | 3707 | } |
3695 | 3708 | ||
3696 | list_for_each_entry(searchp, &cache_chain, next) { | 3709 | list_for_each_entry(searchp, &cache_chain, next) { |
3697 | struct list_head *p; | ||
3698 | int tofree; | ||
3699 | struct slab *slabp; | ||
3700 | |||
3701 | check_irq_on(); | 3710 | check_irq_on(); |
3702 | 3711 | ||
3703 | /* | 3712 | /* |
@@ -3722,41 +3731,15 @@ static void cache_reap(void *unused) | |||
3722 | 3731 | ||
3723 | drain_array(searchp, l3, l3->shared, 0, node); | 3732 | drain_array(searchp, l3, l3->shared, 0, node); |
3724 | 3733 | ||
3725 | if (l3->free_touched) { | 3734 | if (l3->free_touched) |
3726 | l3->free_touched = 0; | 3735 | l3->free_touched = 0; |
3727 | goto next; | 3736 | else { |
3728 | } | 3737 | int freed; |
3729 | |||
3730 | tofree = (l3->free_limit + 5 * searchp->num - 1) / | ||
3731 | (5 * searchp->num); | ||
3732 | do { | ||
3733 | /* | ||
3734 | * Do not lock if there are no free blocks. | ||
3735 | */ | ||
3736 | if (list_empty(&l3->slabs_free)) | ||
3737 | break; | ||
3738 | |||
3739 | spin_lock_irq(&l3->list_lock); | ||
3740 | p = l3->slabs_free.next; | ||
3741 | if (p == &(l3->slabs_free)) { | ||
3742 | spin_unlock_irq(&l3->list_lock); | ||
3743 | break; | ||
3744 | } | ||
3745 | 3738 | ||
3746 | slabp = list_entry(p, struct slab, list); | 3739 | freed = drain_freelist(searchp, l3, (l3->free_limit + |
3747 | BUG_ON(slabp->inuse); | 3740 | 5 * searchp->num - 1) / (5 * searchp->num)); |
3748 | list_del(&slabp->list); | 3741 | STATS_ADD_REAPED(searchp, freed); |
3749 | STATS_INC_REAPED(searchp); | 3742 | } |
3750 | |||
3751 | /* | ||
3752 | * Safe to drop the lock. The slab is no longer linked | ||
3753 | * to the cache. searchp cannot disappear, we hold | ||
3754 | * cache_chain_lock | ||
3755 | */ | ||
3756 | l3->free_objects -= searchp->num; | ||
3757 | spin_unlock_irq(&l3->list_lock); | ||
3758 | slab_destroy(searchp, slabp); | ||
3759 | } while (--tofree > 0); | ||
3760 | next: | 3743 | next: |
3761 | cond_resched(); | 3744 | cond_resched(); |
3762 | } | 3745 | } |