aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--mm/slab.c11
1 files changed, 9 insertions, 2 deletions
diff --git a/mm/slab.c b/mm/slab.c
index dee857a8680b..351aa6c587f7 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -971,6 +971,13 @@ static void __drain_alien_cache(struct kmem_cache *cachep,
971 971
972 if (ac->avail) { 972 if (ac->avail) {
973 spin_lock(&rl3->list_lock); 973 spin_lock(&rl3->list_lock);
974 /*
975 * Stuff objects into the remote nodes shared array first.
976 * That way we could avoid the overhead of putting the objects
977 * into the free lists and getting them back later.
978 */
979 transfer_objects(rl3->shared, ac, ac->limit);
980
974 free_block(cachep, ac->entry, ac->avail, node); 981 free_block(cachep, ac->entry, ac->avail, node);
975 ac->avail = 0; 982 ac->avail = 0;
976 spin_unlock(&rl3->list_lock); 983 spin_unlock(&rl3->list_lock);
@@ -986,8 +993,8 @@ static void reap_alien(struct kmem_cache *cachep, struct kmem_list3 *l3)
986 993
987 if (l3->alien) { 994 if (l3->alien) {
988 struct array_cache *ac = l3->alien[node]; 995 struct array_cache *ac = l3->alien[node];
989 if (ac && ac->avail) { 996
990 spin_lock_irq(&ac->lock); 997 if (ac && ac->avail && spin_trylock_irq(&ac->lock)) {
991 __drain_alien_cache(cachep, ac, node); 998 __drain_alien_cache(cachep, ac, node);
992 spin_unlock_irq(&ac->lock); 999 spin_unlock_irq(&ac->lock);
993 } 1000 }