aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--mm/slab.c20
1 files changed, 11 insertions, 9 deletions
diff --git a/mm/slab.c b/mm/slab.c
index 1845c0127394..d73b38e7d7e8 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -2126,6 +2126,10 @@ static void check_spinlock_acquired_node(struct kmem_cache *cachep, int node)
2126static void drain_array_locked(struct kmem_cache *cachep, 2126static void drain_array_locked(struct kmem_cache *cachep,
2127 struct array_cache *ac, int force, int node); 2127 struct array_cache *ac, int force, int node);
2128 2128
2129static void drain_array(struct kmem_cache *cachep, struct kmem_list3 *l3,
2130 struct array_cache *ac,
2131 int force, int node);
2132
2129static void do_drain(void *arg) 2133static void do_drain(void *arg)
2130{ 2134{
2131 struct kmem_cache *cachep = arg; 2135 struct kmem_cache *cachep = arg;
@@ -2150,9 +2154,7 @@ static void drain_cpu_caches(struct kmem_cache *cachep)
2150 for_each_online_node(node) { 2154 for_each_online_node(node) {
2151 l3 = cachep->nodelists[node]; 2155 l3 = cachep->nodelists[node];
2152 if (l3) { 2156 if (l3) {
2153 spin_lock_irq(&l3->list_lock); 2157 drain_array(cachep, l3, l3->shared, 1, node);
2154 drain_array_locked(cachep, l3->shared, 1, node);
2155 spin_unlock_irq(&l3->list_lock);
2156 if (l3->alien) 2158 if (l3->alien)
2157 drain_alien_cache(cachep, l3->alien); 2159 drain_alien_cache(cachep, l3->alien);
2158 } 2160 }
@@ -3545,12 +3547,11 @@ static void drain_array_locked(struct kmem_cache *cachep,
3545 * necessary. 3547 * necessary.
3546 */ 3548 */
3547static void drain_array(struct kmem_cache *searchp, struct kmem_list3 *l3, 3549static void drain_array(struct kmem_cache *searchp, struct kmem_list3 *l3,
3548 struct array_cache *ac) 3550 struct array_cache *ac, int force, int node)
3549{ 3551{
3550 if (ac && ac->avail) { 3552 if (ac && ac->avail) {
3551 spin_lock_irq(&l3->list_lock); 3553 spin_lock_irq(&l3->list_lock);
3552 drain_array_locked(searchp, ac, 0, 3554 drain_array_locked(searchp, ac, force, node);
3553 numa_node_id());
3554 spin_unlock_irq(&l3->list_lock); 3555 spin_unlock_irq(&l3->list_lock);
3555 } 3556 }
3556} 3557}
@@ -3571,6 +3572,7 @@ static void cache_reap(void *unused)
3571{ 3572{
3572 struct list_head *walk; 3573 struct list_head *walk;
3573 struct kmem_list3 *l3; 3574 struct kmem_list3 *l3;
3575 int node = numa_node_id();
3574 3576
3575 if (!mutex_trylock(&cache_chain_mutex)) { 3577 if (!mutex_trylock(&cache_chain_mutex)) {
3576 /* Give up. Setup the next iteration. */ 3578 /* Give up. Setup the next iteration. */
@@ -3593,11 +3595,11 @@ static void cache_reap(void *unused)
3593 * have established with reasonable certainty that 3595 * have established with reasonable certainty that
3594 * we can do some work if the lock was obtained. 3596 * we can do some work if the lock was obtained.
3595 */ 3597 */
3596 l3 = searchp->nodelists[numa_node_id()]; 3598 l3 = searchp->nodelists[node];
3597 3599
3598 reap_alien(searchp, l3); 3600 reap_alien(searchp, l3);
3599 3601
3600 drain_array(searchp, l3, cpu_cache_get(searchp)); 3602 drain_array(searchp, l3, cpu_cache_get(searchp), 0, node);
3601 3603
3602 /* 3604 /*
3603 * These are racy checks but it does not matter 3605 * These are racy checks but it does not matter
@@ -3608,7 +3610,7 @@ static void cache_reap(void *unused)
3608 3610
3609 l3->next_reap = jiffies + REAPTIMEOUT_LIST3; 3611 l3->next_reap = jiffies + REAPTIMEOUT_LIST3;
3610 3612
3611 drain_array(searchp, l3, l3->shared); 3613 drain_array(searchp, l3, l3->shared, 0, node);
3612 3614
3613 if (l3->free_touched) { 3615 if (l3->free_touched) {
3614 l3->free_touched = 0; 3616 l3->free_touched = 0;