diff options
author | Christoph Lameter <clameter@engr.sgi.com> | 2006-03-22 03:09:05 -0500 |
---|---|---|
committer | Linus Torvalds <torvalds@g5.osdl.org> | 2006-03-22 10:54:05 -0500 |
commit | 35386e3b0f876bf194982f48f027af0c216499ce (patch) | |
tree | 9fb79545f6797422f5cf666b0e65a4bb1882ee1d /mm/slab.c | |
parent | 248a0301e703cbf781aa02a91bcfc6da75870dd7 (diff) |
[PATCH] slab: cache_reap(): further reduction in interrupt holdoff
cache_reap takes the l3->list_lock (disabling interrupts) unconditionally
and then does a few checks and maybe does some cleanup. This patch makes
cache_reap() only take the lock if there is work to do and then the lock is
taken and released for each cleaning action.
The checking of when to do the next reaping is done without any locking and
becomes racy. Should not matter since reaping can also be skipped if the
slab mutex cannot be acquired.
The same is true for the touched processing. If we get this wrong once in
awhile then we will mistakenly clean or not clean the shared cache. This
will impact performance slightly.
Note that the additional drain_array() function introduced here will fall
out in a subsequent patch since array cleaning will now be very similar
from all callers.
Signed-off-by: Christoph Lameter <clameter@sgi.com>
Cc: Pekka Enberg <penberg@cs.helsinki.fi>
Cc: Manfred Spraul <manfred@colorfullife.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'mm/slab.c')
-rw-r--r-- | mm/slab.c | 57 |
1 files changed, 43 insertions, 14 deletions
@@ -292,13 +292,13 @@ struct kmem_list3 { | |||
292 | struct list_head slabs_full; | 292 | struct list_head slabs_full; |
293 | struct list_head slabs_free; | 293 | struct list_head slabs_free; |
294 | unsigned long free_objects; | 294 | unsigned long free_objects; |
295 | unsigned long next_reap; | ||
296 | int free_touched; | ||
297 | unsigned int free_limit; | 295 | unsigned int free_limit; |
298 | unsigned int colour_next; /* Per-node cache coloring */ | 296 | unsigned int colour_next; /* Per-node cache coloring */ |
299 | spinlock_t list_lock; | 297 | spinlock_t list_lock; |
300 | struct array_cache *shared; /* shared per node */ | 298 | struct array_cache *shared; /* shared per node */ |
301 | struct array_cache **alien; /* on other nodes */ | 299 | struct array_cache **alien; /* on other nodes */ |
300 | unsigned long next_reap; /* updated without locking */ | ||
301 | int free_touched; /* updated without locking */ | ||
302 | }; | 302 | }; |
303 | 303 | ||
304 | /* | 304 | /* |
@@ -3539,6 +3539,22 @@ static void drain_array_locked(struct kmem_cache *cachep, | |||
3539 | } | 3539 | } |
3540 | } | 3540 | } |
3541 | 3541 | ||
3542 | |||
3543 | /* | ||
3544 | * Drain an array if it contains any elements taking the l3 lock only if | ||
3545 | * necessary. | ||
3546 | */ | ||
3547 | static void drain_array(struct kmem_cache *searchp, struct kmem_list3 *l3, | ||
3548 | struct array_cache *ac) | ||
3549 | { | ||
3550 | if (ac && ac->avail) { | ||
3551 | spin_lock_irq(&l3->list_lock); | ||
3552 | drain_array_locked(searchp, ac, 0, | ||
3553 | numa_node_id()); | ||
3554 | spin_unlock_irq(&l3->list_lock); | ||
3555 | } | ||
3556 | } | ||
3557 | |||
3542 | /** | 3558 | /** |
3543 | * cache_reap - Reclaim memory from caches. | 3559 | * cache_reap - Reclaim memory from caches. |
3544 | * @unused: unused parameter | 3560 | * @unused: unused parameter |
@@ -3572,33 +3588,48 @@ static void cache_reap(void *unused) | |||
3572 | searchp = list_entry(walk, struct kmem_cache, next); | 3588 | searchp = list_entry(walk, struct kmem_cache, next); |
3573 | check_irq_on(); | 3589 | check_irq_on(); |
3574 | 3590 | ||
3591 | /* | ||
3592 | * We only take the l3 lock if absolutely necessary and we | ||
3593 | * have established with reasonable certainty that | ||
3594 | * we can do some work if the lock was obtained. | ||
3595 | */ | ||
3575 | l3 = searchp->nodelists[numa_node_id()]; | 3596 | l3 = searchp->nodelists[numa_node_id()]; |
3597 | |||
3576 | reap_alien(searchp, l3); | 3598 | reap_alien(searchp, l3); |
3577 | spin_lock_irq(&l3->list_lock); | ||
3578 | 3599 | ||
3579 | drain_array_locked(searchp, cpu_cache_get(searchp), 0, | 3600 | drain_array(searchp, l3, cpu_cache_get(searchp)); |
3580 | numa_node_id()); | ||
3581 | 3601 | ||
3602 | /* | ||
3603 | * These are racy checks but it does not matter | ||
3604 | * if we skip one check or scan twice. | ||
3605 | */ | ||
3582 | if (time_after(l3->next_reap, jiffies)) | 3606 | if (time_after(l3->next_reap, jiffies)) |
3583 | goto next_unlock; | 3607 | goto next; |
3584 | 3608 | ||
3585 | l3->next_reap = jiffies + REAPTIMEOUT_LIST3; | 3609 | l3->next_reap = jiffies + REAPTIMEOUT_LIST3; |
3586 | 3610 | ||
3587 | if (l3->shared) | 3611 | drain_array(searchp, l3, l3->shared); |
3588 | drain_array_locked(searchp, l3->shared, 0, | ||
3589 | numa_node_id()); | ||
3590 | 3612 | ||
3591 | if (l3->free_touched) { | 3613 | if (l3->free_touched) { |
3592 | l3->free_touched = 0; | 3614 | l3->free_touched = 0; |
3593 | goto next_unlock; | 3615 | goto next; |
3594 | } | 3616 | } |
3595 | 3617 | ||
3596 | tofree = (l3->free_limit + 5 * searchp->num - 1) / | 3618 | tofree = (l3->free_limit + 5 * searchp->num - 1) / |
3597 | (5 * searchp->num); | 3619 | (5 * searchp->num); |
3598 | do { | 3620 | do { |
3621 | /* | ||
3622 | * Do not lock if there are no free blocks. | ||
3623 | */ | ||
3624 | if (list_empty(&l3->slabs_free)) | ||
3625 | break; | ||
3626 | |||
3627 | spin_lock_irq(&l3->list_lock); | ||
3599 | p = l3->slabs_free.next; | 3628 | p = l3->slabs_free.next; |
3600 | if (p == &(l3->slabs_free)) | 3629 | if (p == &(l3->slabs_free)) { |
3630 | spin_unlock_irq(&l3->list_lock); | ||
3601 | break; | 3631 | break; |
3632 | } | ||
3602 | 3633 | ||
3603 | slabp = list_entry(p, struct slab, list); | 3634 | slabp = list_entry(p, struct slab, list); |
3604 | BUG_ON(slabp->inuse); | 3635 | BUG_ON(slabp->inuse); |
@@ -3613,10 +3644,8 @@ static void cache_reap(void *unused) | |||
3613 | l3->free_objects -= searchp->num; | 3644 | l3->free_objects -= searchp->num; |
3614 | spin_unlock_irq(&l3->list_lock); | 3645 | spin_unlock_irq(&l3->list_lock); |
3615 | slab_destroy(searchp, slabp); | 3646 | slab_destroy(searchp, slabp); |
3616 | spin_lock_irq(&l3->list_lock); | ||
3617 | } while (--tofree > 0); | 3647 | } while (--tofree > 0); |
3618 | next_unlock: | 3648 | next: |
3619 | spin_unlock_irq(&l3->list_lock); | ||
3620 | cond_resched(); | 3649 | cond_resched(); |
3621 | } | 3650 | } |
3622 | check_irq_on(); | 3651 | check_irq_on(); |