aboutsummaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
authorJoonsoo Kim <iamjoonsoo.kim@lge.com>2014-08-06 19:04:33 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2014-08-06 21:01:14 -0400
commit833b706cc8b7b555e18d3426e9616bd066883a7a (patch)
treed852d7f0fa39e58bcc4d56004b6b97a156ab7320 /mm
parent49dfc304ba241b315068023962004542c5118103 (diff)
slab: destroy a slab without holding any alien cache lock
I haven't heard that this alien cache lock is contended, but to reduce chance of contention would be better generally. And with this change, we can simplify complex lockdep annotation in slab code. In the following patch, it will be implemented. Signed-off-by: Joonsoo Kim <iamjoonsoo.kim@lge.com> Acked-by: Christoph Lameter <cl@linux.com> Cc: Pekka Enberg <penberg@kernel.org> Cc: David Rientjes <rientjes@google.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm')
-rw-r--r--mm/slab.c20
1 files changed, 13 insertions, 7 deletions
diff --git a/mm/slab.c b/mm/slab.c
index e4ce73c32a7a..e4dc0896b891 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -1050,10 +1050,10 @@ static void free_alien_cache(struct alien_cache **alc_ptr)
1050} 1050}
1051 1051
1052static void __drain_alien_cache(struct kmem_cache *cachep, 1052static void __drain_alien_cache(struct kmem_cache *cachep,
1053 struct array_cache *ac, int node) 1053 struct array_cache *ac, int node,
1054 struct list_head *list)
1054{ 1055{
1055 struct kmem_cache_node *n = get_node(cachep, node); 1056 struct kmem_cache_node *n = get_node(cachep, node);
1056 LIST_HEAD(list);
1057 1057
1058 if (ac->avail) { 1058 if (ac->avail) {
1059 spin_lock(&n->list_lock); 1059 spin_lock(&n->list_lock);
@@ -1065,10 +1065,9 @@ static void __drain_alien_cache(struct kmem_cache *cachep,
1065 if (n->shared) 1065 if (n->shared)
1066 transfer_objects(n->shared, ac, ac->limit); 1066 transfer_objects(n->shared, ac, ac->limit);
1067 1067
1068 free_block(cachep, ac->entry, ac->avail, node, &list); 1068 free_block(cachep, ac->entry, ac->avail, node, list);
1069 ac->avail = 0; 1069 ac->avail = 0;
1070 spin_unlock(&n->list_lock); 1070 spin_unlock(&n->list_lock);
1071 slabs_destroy(cachep, &list);
1072 } 1071 }
1073} 1072}
1074 1073
@@ -1086,8 +1085,11 @@ static void reap_alien(struct kmem_cache *cachep, struct kmem_cache_node *n)
1086 if (alc) { 1085 if (alc) {
1087 ac = &alc->ac; 1086 ac = &alc->ac;
1088 if (ac->avail && spin_trylock_irq(&alc->lock)) { 1087 if (ac->avail && spin_trylock_irq(&alc->lock)) {
1089 __drain_alien_cache(cachep, ac, node); 1088 LIST_HEAD(list);
1089
1090 __drain_alien_cache(cachep, ac, node, &list);
1090 spin_unlock_irq(&alc->lock); 1091 spin_unlock_irq(&alc->lock);
1092 slabs_destroy(cachep, &list);
1091 } 1093 }
1092 } 1094 }
1093 } 1095 }
@@ -1104,10 +1106,13 @@ static void drain_alien_cache(struct kmem_cache *cachep,
1104 for_each_online_node(i) { 1106 for_each_online_node(i) {
1105 alc = alien[i]; 1107 alc = alien[i];
1106 if (alc) { 1108 if (alc) {
1109 LIST_HEAD(list);
1110
1107 ac = &alc->ac; 1111 ac = &alc->ac;
1108 spin_lock_irqsave(&alc->lock, flags); 1112 spin_lock_irqsave(&alc->lock, flags);
1109 __drain_alien_cache(cachep, ac, i); 1113 __drain_alien_cache(cachep, ac, i, &list);
1110 spin_unlock_irqrestore(&alc->lock, flags); 1114 spin_unlock_irqrestore(&alc->lock, flags);
1115 slabs_destroy(cachep, &list);
1111 } 1116 }
1112 } 1117 }
1113} 1118}
@@ -1138,10 +1143,11 @@ static inline int cache_free_alien(struct kmem_cache *cachep, void *objp)
1138 spin_lock(&alien->lock); 1143 spin_lock(&alien->lock);
1139 if (unlikely(ac->avail == ac->limit)) { 1144 if (unlikely(ac->avail == ac->limit)) {
1140 STATS_INC_ACOVERFLOW(cachep); 1145 STATS_INC_ACOVERFLOW(cachep);
1141 __drain_alien_cache(cachep, ac, nodeid); 1146 __drain_alien_cache(cachep, ac, nodeid, &list);
1142 } 1147 }
1143 ac_put_obj(cachep, ac, objp); 1148 ac_put_obj(cachep, ac, objp);
1144 spin_unlock(&alien->lock); 1149 spin_unlock(&alien->lock);
1150 slabs_destroy(cachep, &list);
1145 } else { 1151 } else {
1146 n = get_node(cachep, nodeid); 1152 n = get_node(cachep, nodeid);
1147 spin_lock(&n->list_lock); 1153 spin_lock(&n->list_lock);