aboutsummaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
Diffstat (limited to 'mm')
-rw-r--r--mm/slab.c21
1 files changed, 12 insertions, 9 deletions
diff --git a/mm/slab.c b/mm/slab.c
index 3274144c0d16..6b691ecbac44 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -3521,7 +3521,8 @@ static void enable_cpucache(struct kmem_cache *cachep)
3521 3521
3522/* 3522/*
3523 * Drain an array if it contains any elements taking the l3 lock only if 3523 * Drain an array if it contains any elements taking the l3 lock only if
3524 * necessary. 3524 * necessary. Note that the l3 listlock also protects the array_cache
3525 * if drain_array() is used on the shared array.
3525 */ 3526 */
3526void drain_array(struct kmem_cache *cachep, struct kmem_list3 *l3, 3527void drain_array(struct kmem_cache *cachep, struct kmem_list3 *l3,
3527 struct array_cache *ac, int force, int node) 3528 struct array_cache *ac, int force, int node)
@@ -3532,16 +3533,18 @@ void drain_array(struct kmem_cache *cachep, struct kmem_list3 *l3,
3532 return; 3533 return;
3533 if (ac->touched && !force) { 3534 if (ac->touched && !force) {
3534 ac->touched = 0; 3535 ac->touched = 0;
3535 } else if (ac->avail) { 3536 } else {
3536 tofree = force ? ac->avail : (ac->limit + 4) / 5;
3537 if (tofree > ac->avail)
3538 tofree = (ac->avail + 1) / 2;
3539 spin_lock_irq(&l3->list_lock); 3537 spin_lock_irq(&l3->list_lock);
3540 free_block(cachep, ac->entry, tofree, node); 3538 if (ac->avail) {
3539 tofree = force ? ac->avail : (ac->limit + 4) / 5;
3540 if (tofree > ac->avail)
3541 tofree = (ac->avail + 1) / 2;
3542 free_block(cachep, ac->entry, tofree, node);
3543 ac->avail -= tofree;
3544 memmove(ac->entry, &(ac->entry[tofree]),
3545 sizeof(void *) * ac->avail);
3546 }
3541 spin_unlock_irq(&l3->list_lock); 3547 spin_unlock_irq(&l3->list_lock);
3542 ac->avail -= tofree;
3543 memmove(ac->entry, &(ac->entry[tofree]),
3544 sizeof(void *) * ac->avail);
3545 } 3548 }
3546} 3549}
3547 3550