aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorChristoph Lameter <clameter@engr.sgi.com>2006-03-22 03:09:07 -0500
committerLinus Torvalds <torvalds@g5.osdl.org>2006-03-22 10:54:05 -0500
commit1b55253a7f95adc82eb20937b57b3e3e32ba65df (patch)
tree5ab3f9e6efa5e2ed2a39843b95dcf95ddb89592a
parentaab2207cf8d9c343b6b5f0e4d27e1732f8618d14 (diff)
[PATCH] slab: remove drain_array_locked
Remove drain_array_locked and use that opportunity to limit the time the l3 lock is taken further. Signed-off-by: Christoph Lameter <clameter@sgi.com> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
-rw-r--r--mm/slab.c31
1 files changed, 10 insertions, 21 deletions
diff --git a/mm/slab.c b/mm/slab.c
index d73b38e7d7e8..3274144c0d16 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -2123,9 +2123,6 @@ static void check_spinlock_acquired_node(struct kmem_cache *cachep, int node)
2123#define check_spinlock_acquired_node(x, y) do { } while(0) 2123#define check_spinlock_acquired_node(x, y) do { } while(0)
2124#endif 2124#endif
2125 2125
2126static void drain_array_locked(struct kmem_cache *cachep,
2127 struct array_cache *ac, int force, int node);
2128
2129static void drain_array(struct kmem_cache *cachep, struct kmem_list3 *l3, 2126static void drain_array(struct kmem_cache *cachep, struct kmem_list3 *l3,
2130 struct array_cache *ac, 2127 struct array_cache *ac,
2131 int force, int node); 2128 int force, int node);
@@ -3522,40 +3519,32 @@ static void enable_cpucache(struct kmem_cache *cachep)
3522 cachep->name, -err); 3519 cachep->name, -err);
3523} 3520}
3524 3521
3525static void drain_array_locked(struct kmem_cache *cachep, 3522/*
3526 struct array_cache *ac, int force, int node) 3523 * Drain an array if it contains any elements taking the l3 lock only if
3524 * necessary.
3525 */
3526void drain_array(struct kmem_cache *cachep, struct kmem_list3 *l3,
3527 struct array_cache *ac, int force, int node)
3527{ 3528{
3528 int tofree; 3529 int tofree;
3529 3530
3530 check_spinlock_acquired_node(cachep, node); 3531 if (!ac || !ac->avail)
3532 return;
3531 if (ac->touched && !force) { 3533 if (ac->touched && !force) {
3532 ac->touched = 0; 3534 ac->touched = 0;
3533 } else if (ac->avail) { 3535 } else if (ac->avail) {
3534 tofree = force ? ac->avail : (ac->limit + 4) / 5; 3536 tofree = force ? ac->avail : (ac->limit + 4) / 5;
3535 if (tofree > ac->avail) 3537 if (tofree > ac->avail)
3536 tofree = (ac->avail + 1) / 2; 3538 tofree = (ac->avail + 1) / 2;
3539 spin_lock_irq(&l3->list_lock);
3537 free_block(cachep, ac->entry, tofree, node); 3540 free_block(cachep, ac->entry, tofree, node);
3541 spin_unlock_irq(&l3->list_lock);
3538 ac->avail -= tofree; 3542 ac->avail -= tofree;
3539 memmove(ac->entry, &(ac->entry[tofree]), 3543 memmove(ac->entry, &(ac->entry[tofree]),
3540 sizeof(void *) * ac->avail); 3544 sizeof(void *) * ac->avail);
3541 } 3545 }
3542} 3546}
3543 3547
3544
3545/*
3546 * Drain an array if it contains any elements taking the l3 lock only if
3547 * necessary.
3548 */
3549static void drain_array(struct kmem_cache *searchp, struct kmem_list3 *l3,
3550 struct array_cache *ac, int force, int node)
3551{
3552 if (ac && ac->avail) {
3553 spin_lock_irq(&l3->list_lock);
3554 drain_array_locked(searchp, ac, force, node);
3555 spin_unlock_irq(&l3->list_lock);
3556 }
3557}
3558
3559/** 3548/**
3560 * cache_reap - Reclaim memory from caches. 3549 * cache_reap - Reclaim memory from caches.
3561 * @unused: unused parameter 3550 * @unused: unused parameter