diff options
author | Joonsoo Kim <iamjoonsoo.kim@lge.com> | 2014-08-06 19:04:31 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2014-08-06 21:01:14 -0400 |
commit | 49dfc304ba241b315068023962004542c5118103 (patch) | |
tree | cd8563b6b7fc120bf43cfbbe3ab63a5f859fb583 /mm | |
parent | c8522a3a5832b843570a3315674f5a3575958a51 (diff) |
slab: use the lock on alien_cache, instead of the lock on array_cache
Now, we have separate alien_cache structure, so it'd be better to hold
the lock on alien_cache while manipulating alien_cache. After that, we
don't need the lock on array_cache, so remove it.
Signed-off-by: Joonsoo Kim <iamjoonsoo.kim@lge.com>
Acked-by: Christoph Lameter <cl@linux.com>
Cc: Pekka Enberg <penberg@kernel.org>
Cc: David Rientjes <rientjes@google.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm')
-rw-r--r-- | mm/slab.c | 25 |
1 files changed, 8 insertions, 17 deletions
@@ -191,7 +191,6 @@ struct array_cache { | |||
191 | unsigned int limit; | 191 | unsigned int limit; |
192 | unsigned int batchcount; | 192 | unsigned int batchcount; |
193 | unsigned int touched; | 193 | unsigned int touched; |
194 | spinlock_t lock; | ||
195 | void *entry[]; /* | 194 | void *entry[]; /* |
196 | * Must have this definition in here for the proper | 195 | * Must have this definition in here for the proper |
197 | * alignment of array_cache. Also simplifies accessing | 196 | * alignment of array_cache. Also simplifies accessing |
@@ -512,7 +511,7 @@ static void slab_set_lock_classes(struct kmem_cache *cachep, | |||
512 | return; | 511 | return; |
513 | for_each_node(r) { | 512 | for_each_node(r) { |
514 | if (alc[r]) | 513 | if (alc[r]) |
515 | lockdep_set_class(&(alc[r]->ac.lock), alc_key); | 514 | lockdep_set_class(&(alc[r]->lock), alc_key); |
516 | } | 515 | } |
517 | } | 516 | } |
518 | 517 | ||
@@ -811,7 +810,6 @@ static void init_arraycache(struct array_cache *ac, int limit, int batch) | |||
811 | ac->limit = limit; | 810 | ac->limit = limit; |
812 | ac->batchcount = batch; | 811 | ac->batchcount = batch; |
813 | ac->touched = 0; | 812 | ac->touched = 0; |
814 | spin_lock_init(&ac->lock); | ||
815 | } | 813 | } |
816 | } | 814 | } |
817 | 815 | ||
@@ -1010,6 +1008,7 @@ static struct alien_cache *__alloc_alien_cache(int node, int entries, | |||
1010 | 1008 | ||
1011 | alc = kmalloc_node(memsize, gfp, node); | 1009 | alc = kmalloc_node(memsize, gfp, node); |
1012 | init_arraycache(&alc->ac, entries, batch); | 1010 | init_arraycache(&alc->ac, entries, batch); |
1011 | spin_lock_init(&alc->lock); | ||
1013 | return alc; | 1012 | return alc; |
1014 | } | 1013 | } |
1015 | 1014 | ||
@@ -1086,9 +1085,9 @@ static void reap_alien(struct kmem_cache *cachep, struct kmem_cache_node *n) | |||
1086 | 1085 | ||
1087 | if (alc) { | 1086 | if (alc) { |
1088 | ac = &alc->ac; | 1087 | ac = &alc->ac; |
1089 | if (ac->avail && spin_trylock_irq(&ac->lock)) { | 1088 | if (ac->avail && spin_trylock_irq(&alc->lock)) { |
1090 | __drain_alien_cache(cachep, ac, node); | 1089 | __drain_alien_cache(cachep, ac, node); |
1091 | spin_unlock_irq(&ac->lock); | 1090 | spin_unlock_irq(&alc->lock); |
1092 | } | 1091 | } |
1093 | } | 1092 | } |
1094 | } | 1093 | } |
@@ -1106,9 +1105,9 @@ static void drain_alien_cache(struct kmem_cache *cachep, | |||
1106 | alc = alien[i]; | 1105 | alc = alien[i]; |
1107 | if (alc) { | 1106 | if (alc) { |
1108 | ac = &alc->ac; | 1107 | ac = &alc->ac; |
1109 | spin_lock_irqsave(&ac->lock, flags); | 1108 | spin_lock_irqsave(&alc->lock, flags); |
1110 | __drain_alien_cache(cachep, ac, i); | 1109 | __drain_alien_cache(cachep, ac, i); |
1111 | spin_unlock_irqrestore(&ac->lock, flags); | 1110 | spin_unlock_irqrestore(&alc->lock, flags); |
1112 | } | 1111 | } |
1113 | } | 1112 | } |
1114 | } | 1113 | } |
@@ -1136,13 +1135,13 @@ static inline int cache_free_alien(struct kmem_cache *cachep, void *objp) | |||
1136 | if (n->alien && n->alien[nodeid]) { | 1135 | if (n->alien && n->alien[nodeid]) { |
1137 | alien = n->alien[nodeid]; | 1136 | alien = n->alien[nodeid]; |
1138 | ac = &alien->ac; | 1137 | ac = &alien->ac; |
1139 | spin_lock(&ac->lock); | 1138 | spin_lock(&alien->lock); |
1140 | if (unlikely(ac->avail == ac->limit)) { | 1139 | if (unlikely(ac->avail == ac->limit)) { |
1141 | STATS_INC_ACOVERFLOW(cachep); | 1140 | STATS_INC_ACOVERFLOW(cachep); |
1142 | __drain_alien_cache(cachep, ac, nodeid); | 1141 | __drain_alien_cache(cachep, ac, nodeid); |
1143 | } | 1142 | } |
1144 | ac_put_obj(cachep, ac, objp); | 1143 | ac_put_obj(cachep, ac, objp); |
1145 | spin_unlock(&ac->lock); | 1144 | spin_unlock(&alien->lock); |
1146 | } else { | 1145 | } else { |
1147 | n = get_node(cachep, nodeid); | 1146 | n = get_node(cachep, nodeid); |
1148 | spin_lock(&n->list_lock); | 1147 | spin_lock(&n->list_lock); |
@@ -1613,10 +1612,6 @@ void __init kmem_cache_init(void) | |||
1613 | 1612 | ||
1614 | memcpy(ptr, cpu_cache_get(kmem_cache), | 1613 | memcpy(ptr, cpu_cache_get(kmem_cache), |
1615 | sizeof(struct arraycache_init)); | 1614 | sizeof(struct arraycache_init)); |
1616 | /* | ||
1617 | * Do not assume that spinlocks can be initialized via memcpy: | ||
1618 | */ | ||
1619 | spin_lock_init(&ptr->lock); | ||
1620 | 1615 | ||
1621 | kmem_cache->array[smp_processor_id()] = ptr; | 1616 | kmem_cache->array[smp_processor_id()] = ptr; |
1622 | 1617 | ||
@@ -1626,10 +1621,6 @@ void __init kmem_cache_init(void) | |||
1626 | != &initarray_generic.cache); | 1621 | != &initarray_generic.cache); |
1627 | memcpy(ptr, cpu_cache_get(kmalloc_caches[INDEX_AC]), | 1622 | memcpy(ptr, cpu_cache_get(kmalloc_caches[INDEX_AC]), |
1628 | sizeof(struct arraycache_init)); | 1623 | sizeof(struct arraycache_init)); |
1629 | /* | ||
1630 | * Do not assume that spinlocks can be initialized via memcpy: | ||
1631 | */ | ||
1632 | spin_lock_init(&ptr->lock); | ||
1633 | 1624 | ||
1634 | kmalloc_caches[INDEX_AC]->array[smp_processor_id()] = ptr; | 1625 | kmalloc_caches[INDEX_AC]->array[smp_processor_id()] = ptr; |
1635 | } | 1626 | } |