diff options
-rw-r--r-- | mm/slab.c | 20 |
1 files changed, 13 insertions, 7 deletions
@@ -1050,10 +1050,10 @@ static void free_alien_cache(struct alien_cache **alc_ptr) | |||
1050 | } | 1050 | } |
1051 | 1051 | ||
1052 | static void __drain_alien_cache(struct kmem_cache *cachep, | 1052 | static void __drain_alien_cache(struct kmem_cache *cachep, |
1053 | struct array_cache *ac, int node) | 1053 | struct array_cache *ac, int node, |
1054 | struct list_head *list) | ||
1054 | { | 1055 | { |
1055 | struct kmem_cache_node *n = get_node(cachep, node); | 1056 | struct kmem_cache_node *n = get_node(cachep, node); |
1056 | LIST_HEAD(list); | ||
1057 | 1057 | ||
1058 | if (ac->avail) { | 1058 | if (ac->avail) { |
1059 | spin_lock(&n->list_lock); | 1059 | spin_lock(&n->list_lock); |
@@ -1065,10 +1065,9 @@ static void __drain_alien_cache(struct kmem_cache *cachep, | |||
1065 | if (n->shared) | 1065 | if (n->shared) |
1066 | transfer_objects(n->shared, ac, ac->limit); | 1066 | transfer_objects(n->shared, ac, ac->limit); |
1067 | 1067 | ||
1068 | free_block(cachep, ac->entry, ac->avail, node, &list); | 1068 | free_block(cachep, ac->entry, ac->avail, node, list); |
1069 | ac->avail = 0; | 1069 | ac->avail = 0; |
1070 | spin_unlock(&n->list_lock); | 1070 | spin_unlock(&n->list_lock); |
1071 | slabs_destroy(cachep, &list); | ||
1072 | } | 1071 | } |
1073 | } | 1072 | } |
1074 | 1073 | ||
@@ -1086,8 +1085,11 @@ static void reap_alien(struct kmem_cache *cachep, struct kmem_cache_node *n) | |||
1086 | if (alc) { | 1085 | if (alc) { |
1087 | ac = &alc->ac; | 1086 | ac = &alc->ac; |
1088 | if (ac->avail && spin_trylock_irq(&alc->lock)) { | 1087 | if (ac->avail && spin_trylock_irq(&alc->lock)) { |
1089 | __drain_alien_cache(cachep, ac, node); | 1088 | LIST_HEAD(list); |
1089 | |||
1090 | __drain_alien_cache(cachep, ac, node, &list); | ||
1090 | spin_unlock_irq(&alc->lock); | 1091 | spin_unlock_irq(&alc->lock); |
1092 | slabs_destroy(cachep, &list); | ||
1091 | } | 1093 | } |
1092 | } | 1094 | } |
1093 | } | 1095 | } |
@@ -1104,10 +1106,13 @@ static void drain_alien_cache(struct kmem_cache *cachep, | |||
1104 | for_each_online_node(i) { | 1106 | for_each_online_node(i) { |
1105 | alc = alien[i]; | 1107 | alc = alien[i]; |
1106 | if (alc) { | 1108 | if (alc) { |
1109 | LIST_HEAD(list); | ||
1110 | |||
1107 | ac = &alc->ac; | 1111 | ac = &alc->ac; |
1108 | spin_lock_irqsave(&alc->lock, flags); | 1112 | spin_lock_irqsave(&alc->lock, flags); |
1109 | __drain_alien_cache(cachep, ac, i); | 1113 | __drain_alien_cache(cachep, ac, i, &list); |
1110 | spin_unlock_irqrestore(&alc->lock, flags); | 1114 | spin_unlock_irqrestore(&alc->lock, flags); |
1115 | slabs_destroy(cachep, &list); | ||
1111 | } | 1116 | } |
1112 | } | 1117 | } |
1113 | } | 1118 | } |
@@ -1138,10 +1143,11 @@ static inline int cache_free_alien(struct kmem_cache *cachep, void *objp) | |||
1138 | spin_lock(&alien->lock); | 1143 | spin_lock(&alien->lock); |
1139 | if (unlikely(ac->avail == ac->limit)) { | 1144 | if (unlikely(ac->avail == ac->limit)) { |
1140 | STATS_INC_ACOVERFLOW(cachep); | 1145 | STATS_INC_ACOVERFLOW(cachep); |
1141 | __drain_alien_cache(cachep, ac, nodeid); | 1146 | __drain_alien_cache(cachep, ac, nodeid, &list); |
1142 | } | 1147 | } |
1143 | ac_put_obj(cachep, ac, objp); | 1148 | ac_put_obj(cachep, ac, objp); |
1144 | spin_unlock(&alien->lock); | 1149 | spin_unlock(&alien->lock); |
1150 | slabs_destroy(cachep, &list); | ||
1145 | } else { | 1151 | } else { |
1146 | n = get_node(cachep, nodeid); | 1152 | n = get_node(cachep, nodeid); |
1147 | spin_lock(&n->list_lock); | 1153 | spin_lock(&n->list_lock); |