diff options
author | Christoph Lameter <clameter@engr.sgi.com> | 2006-03-22 03:09:07 -0500 |
---|---|---|
committer | Linus Torvalds <torvalds@g5.osdl.org> | 2006-03-22 10:54:06 -0500 |
commit | b18e7e654d7af741d2bf34a90dc34128d0217fea (patch) | |
tree | 04cbc067da970b8b24c8b5d48a1e7d9520f92b06 | |
parent | 1b55253a7f95adc82eb20937b57b3e3e32ba65df (diff) |
[PATCH] slab: fix drain_array() so that it works correctly with the shared_array
The list_lock also protects the shared array and we call drain_array() with
the shared array. Therefore we cannot go as far as I wanted to but have to
take the lock in a way so that it also protects the array_cache in
drain_pages.
(Note: maybe we should make the array_cache locking more consistent? I.e.
always take the array cache lock for shared arrays and disable interrupts
for the per cpu arrays?)
Signed-off-by: Christoph Lameter <clameter@sgi.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
-rw-r--r-- | mm/slab.c | 21 |
1 files changed, 12 insertions, 9 deletions
@@ -3521,7 +3521,8 @@ static void enable_cpucache(struct kmem_cache *cachep) | |||
3521 | 3521 | ||
3522 | /* | 3522 | /* |
3523 | * Drain an array if it contains any elements taking the l3 lock only if | 3523 | * Drain an array if it contains any elements taking the l3 lock only if |
3524 | * necessary. | 3524 | * necessary. Note that the l3 listlock also protects the array_cache |
3525 | * if drain_array() is used on the shared array. | ||
3525 | */ | 3526 | */ |
3526 | void drain_array(struct kmem_cache *cachep, struct kmem_list3 *l3, | 3527 | void drain_array(struct kmem_cache *cachep, struct kmem_list3 *l3, |
3527 | struct array_cache *ac, int force, int node) | 3528 | struct array_cache *ac, int force, int node) |
@@ -3532,16 +3533,18 @@ void drain_array(struct kmem_cache *cachep, struct kmem_list3 *l3, | |||
3532 | return; | 3533 | return; |
3533 | if (ac->touched && !force) { | 3534 | if (ac->touched && !force) { |
3534 | ac->touched = 0; | 3535 | ac->touched = 0; |
3535 | } else if (ac->avail) { | 3536 | } else { |
3536 | tofree = force ? ac->avail : (ac->limit + 4) / 5; | ||
3537 | if (tofree > ac->avail) | ||
3538 | tofree = (ac->avail + 1) / 2; | ||
3539 | spin_lock_irq(&l3->list_lock); | 3537 | spin_lock_irq(&l3->list_lock); |
3540 | free_block(cachep, ac->entry, tofree, node); | 3538 | if (ac->avail) { |
3539 | tofree = force ? ac->avail : (ac->limit + 4) / 5; | ||
3540 | if (tofree > ac->avail) | ||
3541 | tofree = (ac->avail + 1) / 2; | ||
3542 | free_block(cachep, ac->entry, tofree, node); | ||
3543 | ac->avail -= tofree; | ||
3544 | memmove(ac->entry, &(ac->entry[tofree]), | ||
3545 | sizeof(void *) * ac->avail); | ||
3546 | } | ||
3541 | spin_unlock_irq(&l3->list_lock); | 3547 | spin_unlock_irq(&l3->list_lock); |
3542 | ac->avail -= tofree; | ||
3543 | memmove(ac->entry, &(ac->entry[tofree]), | ||
3544 | sizeof(void *) * ac->avail); | ||
3545 | } | 3548 | } |
3546 | } | 3549 | } |
3547 | 3550 | ||