diff options
author | Christoph Lameter <cl@linux.com> | 2013-01-10 14:14:19 -0500 |
---|---|---|
committer | Pekka Enberg <penberg@kernel.org> | 2013-02-01 05:32:09 -0500 |
commit | ce8eb6c424c794d7fb4d1a6667d267990ca28072 (patch) | |
tree | 354f6bd0aee939afc82fee6ed6f049d6a96bbfc3 /mm | |
parent | 2c59dd6544212faa5ce761920d2251f4152f408d (diff) |
slab: Rename list3/l3 to node
The list3 or l3 pointers are pointing to per node structures. Reflect
that in the names of variables used.
Signed-off-by: Christoph Lameter <cl@linux.com>
Signed-off-by: Pekka Enberg <penberg@kernel.org>
Diffstat (limited to 'mm')
-rw-r--r-- | mm/slab.c | 516 | ||||
-rw-r--r-- | mm/slab.h | 2 |
2 files changed, 259 insertions, 259 deletions
@@ -306,13 +306,13 @@ struct kmem_cache_node { | |||
306 | * Need this for bootstrapping a per node allocator. | 306 | * Need this for bootstrapping a per node allocator. |
307 | */ | 307 | */ |
308 | #define NUM_INIT_LISTS (3 * MAX_NUMNODES) | 308 | #define NUM_INIT_LISTS (3 * MAX_NUMNODES) |
309 | static struct kmem_cache_node __initdata initkmem_list3[NUM_INIT_LISTS]; | 309 | static struct kmem_cache_node __initdata init_kmem_cache_node[NUM_INIT_LISTS]; |
310 | #define CACHE_CACHE 0 | 310 | #define CACHE_CACHE 0 |
311 | #define SIZE_AC MAX_NUMNODES | 311 | #define SIZE_AC MAX_NUMNODES |
312 | #define SIZE_L3 (2 * MAX_NUMNODES) | 312 | #define SIZE_NODE (2 * MAX_NUMNODES) |
313 | 313 | ||
314 | static int drain_freelist(struct kmem_cache *cache, | 314 | static int drain_freelist(struct kmem_cache *cache, |
315 | struct kmem_cache_node *l3, int tofree); | 315 | struct kmem_cache_node *n, int tofree); |
316 | static void free_block(struct kmem_cache *cachep, void **objpp, int len, | 316 | static void free_block(struct kmem_cache *cachep, void **objpp, int len, |
317 | int node); | 317 | int node); |
318 | static int enable_cpucache(struct kmem_cache *cachep, gfp_t gfp); | 318 | static int enable_cpucache(struct kmem_cache *cachep, gfp_t gfp); |
@@ -321,9 +321,9 @@ static void cache_reap(struct work_struct *unused); | |||
321 | static int slab_early_init = 1; | 321 | static int slab_early_init = 1; |
322 | 322 | ||
323 | #define INDEX_AC kmalloc_index(sizeof(struct arraycache_init)) | 323 | #define INDEX_AC kmalloc_index(sizeof(struct arraycache_init)) |
324 | #define INDEX_L3 kmalloc_index(sizeof(struct kmem_cache_node)) | 324 | #define INDEX_NODE kmalloc_index(sizeof(struct kmem_cache_node)) |
325 | 325 | ||
326 | static void kmem_list3_init(struct kmem_cache_node *parent) | 326 | static void kmem_cache_node_init(struct kmem_cache_node *parent) |
327 | { | 327 | { |
328 | INIT_LIST_HEAD(&parent->slabs_full); | 328 | INIT_LIST_HEAD(&parent->slabs_full); |
329 | INIT_LIST_HEAD(&parent->slabs_partial); | 329 | INIT_LIST_HEAD(&parent->slabs_partial); |
@@ -538,15 +538,15 @@ static void slab_set_lock_classes(struct kmem_cache *cachep, | |||
538 | int q) | 538 | int q) |
539 | { | 539 | { |
540 | struct array_cache **alc; | 540 | struct array_cache **alc; |
541 | struct kmem_cache_node *l3; | 541 | struct kmem_cache_node *n; |
542 | int r; | 542 | int r; |
543 | 543 | ||
544 | l3 = cachep->node[q]; | 544 | n = cachep->node[q]; |
545 | if (!l3) | 545 | if (!n) |
546 | return; | 546 | return; |
547 | 547 | ||
548 | lockdep_set_class(&l3->list_lock, l3_key); | 548 | lockdep_set_class(&n->list_lock, l3_key); |
549 | alc = l3->alien; | 549 | alc = n->alien; |
550 | /* | 550 | /* |
551 | * FIXME: This check for BAD_ALIEN_MAGIC | 551 | * FIXME: This check for BAD_ALIEN_MAGIC |
552 | * should go away when common slab code is taught to | 552 | * should go away when common slab code is taught to |
@@ -583,14 +583,14 @@ static void init_node_lock_keys(int q) | |||
583 | return; | 583 | return; |
584 | 584 | ||
585 | for (i = 1; i < PAGE_SHIFT + MAX_ORDER; i++) { | 585 | for (i = 1; i < PAGE_SHIFT + MAX_ORDER; i++) { |
586 | struct kmem_cache_node *l3; | 586 | struct kmem_cache_node *n; |
587 | struct kmem_cache *cache = kmalloc_caches[i]; | 587 | struct kmem_cache *cache = kmalloc_caches[i]; |
588 | 588 | ||
589 | if (!cache) | 589 | if (!cache) |
590 | continue; | 590 | continue; |
591 | 591 | ||
592 | l3 = cache->node[q]; | 592 | n = cache->node[q]; |
593 | if (!l3 || OFF_SLAB(cache)) | 593 | if (!n || OFF_SLAB(cache)) |
594 | continue; | 594 | continue; |
595 | 595 | ||
596 | slab_set_lock_classes(cache, &on_slab_l3_key, | 596 | slab_set_lock_classes(cache, &on_slab_l3_key, |
@@ -857,29 +857,29 @@ static inline bool is_slab_pfmemalloc(struct slab *slabp) | |||
857 | static void recheck_pfmemalloc_active(struct kmem_cache *cachep, | 857 | static void recheck_pfmemalloc_active(struct kmem_cache *cachep, |
858 | struct array_cache *ac) | 858 | struct array_cache *ac) |
859 | { | 859 | { |
860 | struct kmem_cache_node *l3 = cachep->node[numa_mem_id()]; | 860 | struct kmem_cache_node *n = cachep->node[numa_mem_id()]; |
861 | struct slab *slabp; | 861 | struct slab *slabp; |
862 | unsigned long flags; | 862 | unsigned long flags; |
863 | 863 | ||
864 | if (!pfmemalloc_active) | 864 | if (!pfmemalloc_active) |
865 | return; | 865 | return; |
866 | 866 | ||
867 | spin_lock_irqsave(&l3->list_lock, flags); | 867 | spin_lock_irqsave(&n->list_lock, flags); |
868 | list_for_each_entry(slabp, &l3->slabs_full, list) | 868 | list_for_each_entry(slabp, &n->slabs_full, list) |
869 | if (is_slab_pfmemalloc(slabp)) | 869 | if (is_slab_pfmemalloc(slabp)) |
870 | goto out; | 870 | goto out; |
871 | 871 | ||
872 | list_for_each_entry(slabp, &l3->slabs_partial, list) | 872 | list_for_each_entry(slabp, &n->slabs_partial, list) |
873 | if (is_slab_pfmemalloc(slabp)) | 873 | if (is_slab_pfmemalloc(slabp)) |
874 | goto out; | 874 | goto out; |
875 | 875 | ||
876 | list_for_each_entry(slabp, &l3->slabs_free, list) | 876 | list_for_each_entry(slabp, &n->slabs_free, list) |
877 | if (is_slab_pfmemalloc(slabp)) | 877 | if (is_slab_pfmemalloc(slabp)) |
878 | goto out; | 878 | goto out; |
879 | 879 | ||
880 | pfmemalloc_active = false; | 880 | pfmemalloc_active = false; |
881 | out: | 881 | out: |
882 | spin_unlock_irqrestore(&l3->list_lock, flags); | 882 | spin_unlock_irqrestore(&n->list_lock, flags); |
883 | } | 883 | } |
884 | 884 | ||
885 | static void *__ac_get_obj(struct kmem_cache *cachep, struct array_cache *ac, | 885 | static void *__ac_get_obj(struct kmem_cache *cachep, struct array_cache *ac, |
@@ -890,7 +890,7 @@ static void *__ac_get_obj(struct kmem_cache *cachep, struct array_cache *ac, | |||
890 | 890 | ||
891 | /* Ensure the caller is allowed to use objects from PFMEMALLOC slab */ | 891 | /* Ensure the caller is allowed to use objects from PFMEMALLOC slab */ |
892 | if (unlikely(is_obj_pfmemalloc(objp))) { | 892 | if (unlikely(is_obj_pfmemalloc(objp))) { |
893 | struct kmem_cache_node *l3; | 893 | struct kmem_cache_node *n; |
894 | 894 | ||
895 | if (gfp_pfmemalloc_allowed(flags)) { | 895 | if (gfp_pfmemalloc_allowed(flags)) { |
896 | clear_obj_pfmemalloc(&objp); | 896 | clear_obj_pfmemalloc(&objp); |
@@ -912,8 +912,8 @@ static void *__ac_get_obj(struct kmem_cache *cachep, struct array_cache *ac, | |||
912 | * If there are empty slabs on the slabs_free list and we are | 912 | * If there are empty slabs on the slabs_free list and we are |
913 | * being forced to refill the cache, mark this one !pfmemalloc. | 913 | * being forced to refill the cache, mark this one !pfmemalloc. |
914 | */ | 914 | */ |
915 | l3 = cachep->node[numa_mem_id()]; | 915 | n = cachep->node[numa_mem_id()]; |
916 | if (!list_empty(&l3->slabs_free) && force_refill) { | 916 | if (!list_empty(&n->slabs_free) && force_refill) { |
917 | struct slab *slabp = virt_to_slab(objp); | 917 | struct slab *slabp = virt_to_slab(objp); |
918 | ClearPageSlabPfmemalloc(virt_to_head_page(slabp->s_mem)); | 918 | ClearPageSlabPfmemalloc(virt_to_head_page(slabp->s_mem)); |
919 | clear_obj_pfmemalloc(&objp); | 919 | clear_obj_pfmemalloc(&objp); |
@@ -990,7 +990,7 @@ static int transfer_objects(struct array_cache *to, | |||
990 | #ifndef CONFIG_NUMA | 990 | #ifndef CONFIG_NUMA |
991 | 991 | ||
992 | #define drain_alien_cache(cachep, alien) do { } while (0) | 992 | #define drain_alien_cache(cachep, alien) do { } while (0) |
993 | #define reap_alien(cachep, l3) do { } while (0) | 993 | #define reap_alien(cachep, n) do { } while (0) |
994 | 994 | ||
995 | static inline struct array_cache **alloc_alien_cache(int node, int limit, gfp_t gfp) | 995 | static inline struct array_cache **alloc_alien_cache(int node, int limit, gfp_t gfp) |
996 | { | 996 | { |
@@ -1062,33 +1062,33 @@ static void free_alien_cache(struct array_cache **ac_ptr) | |||
1062 | static void __drain_alien_cache(struct kmem_cache *cachep, | 1062 | static void __drain_alien_cache(struct kmem_cache *cachep, |
1063 | struct array_cache *ac, int node) | 1063 | struct array_cache *ac, int node) |
1064 | { | 1064 | { |
1065 | struct kmem_cache_node *rl3 = cachep->node[node]; | 1065 | struct kmem_cache_node *n = cachep->node[node]; |
1066 | 1066 | ||
1067 | if (ac->avail) { | 1067 | if (ac->avail) { |
1068 | spin_lock(&rl3->list_lock); | 1068 | spin_lock(&n->list_lock); |
1069 | /* | 1069 | /* |
1070 | * Stuff objects into the remote nodes shared array first. | 1070 | * Stuff objects into the remote nodes shared array first. |
1071 | * That way we could avoid the overhead of putting the objects | 1071 | * That way we could avoid the overhead of putting the objects |
1072 | * into the free lists and getting them back later. | 1072 | * into the free lists and getting them back later. |
1073 | */ | 1073 | */ |
1074 | if (rl3->shared) | 1074 | if (n->shared) |
1075 | transfer_objects(rl3->shared, ac, ac->limit); | 1075 | transfer_objects(n->shared, ac, ac->limit); |
1076 | 1076 | ||
1077 | free_block(cachep, ac->entry, ac->avail, node); | 1077 | free_block(cachep, ac->entry, ac->avail, node); |
1078 | ac->avail = 0; | 1078 | ac->avail = 0; |
1079 | spin_unlock(&rl3->list_lock); | 1079 | spin_unlock(&n->list_lock); |
1080 | } | 1080 | } |
1081 | } | 1081 | } |
1082 | 1082 | ||
1083 | /* | 1083 | /* |
1084 | * Called from cache_reap() to regularly drain alien caches round robin. | 1084 | * Called from cache_reap() to regularly drain alien caches round robin. |
1085 | */ | 1085 | */ |
1086 | static void reap_alien(struct kmem_cache *cachep, struct kmem_cache_node *l3) | 1086 | static void reap_alien(struct kmem_cache *cachep, struct kmem_cache_node *n) |
1087 | { | 1087 | { |
1088 | int node = __this_cpu_read(slab_reap_node); | 1088 | int node = __this_cpu_read(slab_reap_node); |
1089 | 1089 | ||
1090 | if (l3->alien) { | 1090 | if (n->alien) { |
1091 | struct array_cache *ac = l3->alien[node]; | 1091 | struct array_cache *ac = n->alien[node]; |
1092 | 1092 | ||
1093 | if (ac && ac->avail && spin_trylock_irq(&ac->lock)) { | 1093 | if (ac && ac->avail && spin_trylock_irq(&ac->lock)) { |
1094 | __drain_alien_cache(cachep, ac, node); | 1094 | __drain_alien_cache(cachep, ac, node); |
@@ -1118,7 +1118,7 @@ static inline int cache_free_alien(struct kmem_cache *cachep, void *objp) | |||
1118 | { | 1118 | { |
1119 | struct slab *slabp = virt_to_slab(objp); | 1119 | struct slab *slabp = virt_to_slab(objp); |
1120 | int nodeid = slabp->nodeid; | 1120 | int nodeid = slabp->nodeid; |
1121 | struct kmem_cache_node *l3; | 1121 | struct kmem_cache_node *n; |
1122 | struct array_cache *alien = NULL; | 1122 | struct array_cache *alien = NULL; |
1123 | int node; | 1123 | int node; |
1124 | 1124 | ||
@@ -1131,10 +1131,10 @@ static inline int cache_free_alien(struct kmem_cache *cachep, void *objp) | |||
1131 | if (likely(slabp->nodeid == node)) | 1131 | if (likely(slabp->nodeid == node)) |
1132 | return 0; | 1132 | return 0; |
1133 | 1133 | ||
1134 | l3 = cachep->node[node]; | 1134 | n = cachep->node[node]; |
1135 | STATS_INC_NODEFREES(cachep); | 1135 | STATS_INC_NODEFREES(cachep); |
1136 | if (l3->alien && l3->alien[nodeid]) { | 1136 | if (n->alien && n->alien[nodeid]) { |
1137 | alien = l3->alien[nodeid]; | 1137 | alien = n->alien[nodeid]; |
1138 | spin_lock(&alien->lock); | 1138 | spin_lock(&alien->lock); |
1139 | if (unlikely(alien->avail == alien->limit)) { | 1139 | if (unlikely(alien->avail == alien->limit)) { |
1140 | STATS_INC_ACOVERFLOW(cachep); | 1140 | STATS_INC_ACOVERFLOW(cachep); |
@@ -1153,7 +1153,7 @@ static inline int cache_free_alien(struct kmem_cache *cachep, void *objp) | |||
1153 | 1153 | ||
1154 | /* | 1154 | /* |
1155 | * Allocates and initializes node for a node on each slab cache, used for | 1155 | * Allocates and initializes node for a node on each slab cache, used for |
1156 | * either memory or cpu hotplug. If memory is being hot-added, the kmem_list3 | 1156 | * either memory or cpu hotplug. If memory is being hot-added, the kmem_cache_node |
1157 | * will be allocated off-node since memory is not yet online for the new node. | 1157 | * will be allocated off-node since memory is not yet online for the new node. |
1158 | * When hotplugging memory or a cpu, existing node are not replaced if | 1158 | * When hotplugging memory or a cpu, existing node are not replaced if |
1159 | * already in use. | 1159 | * already in use. |
@@ -1163,7 +1163,7 @@ static inline int cache_free_alien(struct kmem_cache *cachep, void *objp) | |||
1163 | static int init_cache_node_node(int node) | 1163 | static int init_cache_node_node(int node) |
1164 | { | 1164 | { |
1165 | struct kmem_cache *cachep; | 1165 | struct kmem_cache *cachep; |
1166 | struct kmem_cache_node *l3; | 1166 | struct kmem_cache_node *n; |
1167 | const int memsize = sizeof(struct kmem_cache_node); | 1167 | const int memsize = sizeof(struct kmem_cache_node); |
1168 | 1168 | ||
1169 | list_for_each_entry(cachep, &slab_caches, list) { | 1169 | list_for_each_entry(cachep, &slab_caches, list) { |
@@ -1173,11 +1173,11 @@ static int init_cache_node_node(int node) | |||
1173 | * node has not already allocated this | 1173 | * node has not already allocated this |
1174 | */ | 1174 | */ |
1175 | if (!cachep->node[node]) { | 1175 | if (!cachep->node[node]) { |
1176 | l3 = kmalloc_node(memsize, GFP_KERNEL, node); | 1176 | n = kmalloc_node(memsize, GFP_KERNEL, node); |
1177 | if (!l3) | 1177 | if (!n) |
1178 | return -ENOMEM; | 1178 | return -ENOMEM; |
1179 | kmem_list3_init(l3); | 1179 | kmem_cache_node_init(n); |
1180 | l3->next_reap = jiffies + REAPTIMEOUT_LIST3 + | 1180 | n->next_reap = jiffies + REAPTIMEOUT_LIST3 + |
1181 | ((unsigned long)cachep) % REAPTIMEOUT_LIST3; | 1181 | ((unsigned long)cachep) % REAPTIMEOUT_LIST3; |
1182 | 1182 | ||
1183 | /* | 1183 | /* |
@@ -1185,7 +1185,7 @@ static int init_cache_node_node(int node) | |||
1185 | * go. slab_mutex is sufficient | 1185 | * go. slab_mutex is sufficient |
1186 | * protection here. | 1186 | * protection here. |
1187 | */ | 1187 | */ |
1188 | cachep->node[node] = l3; | 1188 | cachep->node[node] = n; |
1189 | } | 1189 | } |
1190 | 1190 | ||
1191 | spin_lock_irq(&cachep->node[node]->list_lock); | 1191 | spin_lock_irq(&cachep->node[node]->list_lock); |
@@ -1200,7 +1200,7 @@ static int init_cache_node_node(int node) | |||
1200 | static void __cpuinit cpuup_canceled(long cpu) | 1200 | static void __cpuinit cpuup_canceled(long cpu) |
1201 | { | 1201 | { |
1202 | struct kmem_cache *cachep; | 1202 | struct kmem_cache *cachep; |
1203 | struct kmem_cache_node *l3 = NULL; | 1203 | struct kmem_cache_node *n = NULL; |
1204 | int node = cpu_to_mem(cpu); | 1204 | int node = cpu_to_mem(cpu); |
1205 | const struct cpumask *mask = cpumask_of_node(node); | 1205 | const struct cpumask *mask = cpumask_of_node(node); |
1206 | 1206 | ||
@@ -1212,34 +1212,34 @@ static void __cpuinit cpuup_canceled(long cpu) | |||
1212 | /* cpu is dead; no one can alloc from it. */ | 1212 | /* cpu is dead; no one can alloc from it. */ |
1213 | nc = cachep->array[cpu]; | 1213 | nc = cachep->array[cpu]; |
1214 | cachep->array[cpu] = NULL; | 1214 | cachep->array[cpu] = NULL; |
1215 | l3 = cachep->node[node]; | 1215 | n = cachep->node[node]; |
1216 | 1216 | ||
1217 | if (!l3) | 1217 | if (!n) |
1218 | goto free_array_cache; | 1218 | goto free_array_cache; |
1219 | 1219 | ||
1220 | spin_lock_irq(&l3->list_lock); | 1220 | spin_lock_irq(&n->list_lock); |
1221 | 1221 | ||
1222 | /* Free limit for this kmem_list3 */ | 1222 | /* Free limit for this kmem_cache_node */ |
1223 | l3->free_limit -= cachep->batchcount; | 1223 | n->free_limit -= cachep->batchcount; |
1224 | if (nc) | 1224 | if (nc) |
1225 | free_block(cachep, nc->entry, nc->avail, node); | 1225 | free_block(cachep, nc->entry, nc->avail, node); |
1226 | 1226 | ||
1227 | if (!cpumask_empty(mask)) { | 1227 | if (!cpumask_empty(mask)) { |
1228 | spin_unlock_irq(&l3->list_lock); | 1228 | spin_unlock_irq(&n->list_lock); |
1229 | goto free_array_cache; | 1229 | goto free_array_cache; |
1230 | } | 1230 | } |
1231 | 1231 | ||
1232 | shared = l3->shared; | 1232 | shared = n->shared; |
1233 | if (shared) { | 1233 | if (shared) { |
1234 | free_block(cachep, shared->entry, | 1234 | free_block(cachep, shared->entry, |
1235 | shared->avail, node); | 1235 | shared->avail, node); |
1236 | l3->shared = NULL; | 1236 | n->shared = NULL; |
1237 | } | 1237 | } |
1238 | 1238 | ||
1239 | alien = l3->alien; | 1239 | alien = n->alien; |
1240 | l3->alien = NULL; | 1240 | n->alien = NULL; |
1241 | 1241 | ||
1242 | spin_unlock_irq(&l3->list_lock); | 1242 | spin_unlock_irq(&n->list_lock); |
1243 | 1243 | ||
1244 | kfree(shared); | 1244 | kfree(shared); |
1245 | if (alien) { | 1245 | if (alien) { |
@@ -1255,17 +1255,17 @@ free_array_cache: | |||
1255 | * shrink each nodelist to its limit. | 1255 | * shrink each nodelist to its limit. |
1256 | */ | 1256 | */ |
1257 | list_for_each_entry(cachep, &slab_caches, list) { | 1257 | list_for_each_entry(cachep, &slab_caches, list) { |
1258 | l3 = cachep->node[node]; | 1258 | n = cachep->node[node]; |
1259 | if (!l3) | 1259 | if (!n) |
1260 | continue; | 1260 | continue; |
1261 | drain_freelist(cachep, l3, l3->free_objects); | 1261 | drain_freelist(cachep, n, n->free_objects); |
1262 | } | 1262 | } |
1263 | } | 1263 | } |
1264 | 1264 | ||
1265 | static int __cpuinit cpuup_prepare(long cpu) | 1265 | static int __cpuinit cpuup_prepare(long cpu) |
1266 | { | 1266 | { |
1267 | struct kmem_cache *cachep; | 1267 | struct kmem_cache *cachep; |
1268 | struct kmem_cache_node *l3 = NULL; | 1268 | struct kmem_cache_node *n = NULL; |
1269 | int node = cpu_to_mem(cpu); | 1269 | int node = cpu_to_mem(cpu); |
1270 | int err; | 1270 | int err; |
1271 | 1271 | ||
@@ -1273,7 +1273,7 @@ static int __cpuinit cpuup_prepare(long cpu) | |||
1273 | * We need to do this right in the beginning since | 1273 | * We need to do this right in the beginning since |
1274 | * alloc_arraycache's are going to use this list. | 1274 | * alloc_arraycache's are going to use this list. |
1275 | * kmalloc_node allows us to add the slab to the right | 1275 | * kmalloc_node allows us to add the slab to the right |
1276 | * kmem_list3 and not this cpu's kmem_list3 | 1276 | * kmem_cache_node and not this cpu's kmem_cache_node |
1277 | */ | 1277 | */ |
1278 | err = init_cache_node_node(node); | 1278 | err = init_cache_node_node(node); |
1279 | if (err < 0) | 1279 | if (err < 0) |
@@ -1310,25 +1310,25 @@ static int __cpuinit cpuup_prepare(long cpu) | |||
1310 | } | 1310 | } |
1311 | } | 1311 | } |
1312 | cachep->array[cpu] = nc; | 1312 | cachep->array[cpu] = nc; |
1313 | l3 = cachep->node[node]; | 1313 | n = cachep->node[node]; |
1314 | BUG_ON(!l3); | 1314 | BUG_ON(!n); |
1315 | 1315 | ||
1316 | spin_lock_irq(&l3->list_lock); | 1316 | spin_lock_irq(&n->list_lock); |
1317 | if (!l3->shared) { | 1317 | if (!n->shared) { |
1318 | /* | 1318 | /* |
1319 | * We are serialised from CPU_DEAD or | 1319 | * We are serialised from CPU_DEAD or |
1320 | * CPU_UP_CANCELLED by the cpucontrol lock | 1320 | * CPU_UP_CANCELLED by the cpucontrol lock |
1321 | */ | 1321 | */ |
1322 | l3->shared = shared; | 1322 | n->shared = shared; |
1323 | shared = NULL; | 1323 | shared = NULL; |
1324 | } | 1324 | } |
1325 | #ifdef CONFIG_NUMA | 1325 | #ifdef CONFIG_NUMA |
1326 | if (!l3->alien) { | 1326 | if (!n->alien) { |
1327 | l3->alien = alien; | 1327 | n->alien = alien; |
1328 | alien = NULL; | 1328 | alien = NULL; |
1329 | } | 1329 | } |
1330 | #endif | 1330 | #endif |
1331 | spin_unlock_irq(&l3->list_lock); | 1331 | spin_unlock_irq(&n->list_lock); |
1332 | kfree(shared); | 1332 | kfree(shared); |
1333 | free_alien_cache(alien); | 1333 | free_alien_cache(alien); |
1334 | if (cachep->flags & SLAB_DEBUG_OBJECTS) | 1334 | if (cachep->flags & SLAB_DEBUG_OBJECTS) |
@@ -1383,9 +1383,9 @@ static int __cpuinit cpuup_callback(struct notifier_block *nfb, | |||
1383 | case CPU_DEAD_FROZEN: | 1383 | case CPU_DEAD_FROZEN: |
1384 | /* | 1384 | /* |
1385 | * Even if all the cpus of a node are down, we don't free the | 1385 | * Even if all the cpus of a node are down, we don't free the |
1386 | * kmem_list3 of any cache. This to avoid a race between | 1386 | * kmem_cache_node of any cache. This to avoid a race between |
1387 | * cpu_down, and a kmalloc allocation from another cpu for | 1387 | * cpu_down, and a kmalloc allocation from another cpu for |
1388 | * memory from the node of the cpu going down. The list3 | 1388 | * memory from the node of the cpu going down. The node |
1389 | * structure is usually allocated from kmem_cache_create() and | 1389 | * structure is usually allocated from kmem_cache_create() and |
1390 | * gets destroyed at kmem_cache_destroy(). | 1390 | * gets destroyed at kmem_cache_destroy(). |
1391 | */ | 1391 | */ |
@@ -1419,16 +1419,16 @@ static int __meminit drain_cache_node_node(int node) | |||
1419 | int ret = 0; | 1419 | int ret = 0; |
1420 | 1420 | ||
1421 | list_for_each_entry(cachep, &slab_caches, list) { | 1421 | list_for_each_entry(cachep, &slab_caches, list) { |
1422 | struct kmem_cache_node *l3; | 1422 | struct kmem_cache_node *n; |
1423 | 1423 | ||
1424 | l3 = cachep->node[node]; | 1424 | n = cachep->node[node]; |
1425 | if (!l3) | 1425 | if (!n) |
1426 | continue; | 1426 | continue; |
1427 | 1427 | ||
1428 | drain_freelist(cachep, l3, l3->free_objects); | 1428 | drain_freelist(cachep, n, n->free_objects); |
1429 | 1429 | ||
1430 | if (!list_empty(&l3->slabs_full) || | 1430 | if (!list_empty(&n->slabs_full) || |
1431 | !list_empty(&l3->slabs_partial)) { | 1431 | !list_empty(&n->slabs_partial)) { |
1432 | ret = -EBUSY; | 1432 | ret = -EBUSY; |
1433 | break; | 1433 | break; |
1434 | } | 1434 | } |
@@ -1470,7 +1470,7 @@ out: | |||
1470 | #endif /* CONFIG_NUMA && CONFIG_MEMORY_HOTPLUG */ | 1470 | #endif /* CONFIG_NUMA && CONFIG_MEMORY_HOTPLUG */ |
1471 | 1471 | ||
1472 | /* | 1472 | /* |
1473 | * swap the static kmem_list3 with kmalloced memory | 1473 | * swap the static kmem_cache_node with kmalloced memory |
1474 | */ | 1474 | */ |
1475 | static void __init init_list(struct kmem_cache *cachep, struct kmem_cache_node *list, | 1475 | static void __init init_list(struct kmem_cache *cachep, struct kmem_cache_node *list, |
1476 | int nodeid) | 1476 | int nodeid) |
@@ -1491,15 +1491,15 @@ static void __init init_list(struct kmem_cache *cachep, struct kmem_cache_node * | |||
1491 | } | 1491 | } |
1492 | 1492 | ||
1493 | /* | 1493 | /* |
1494 | * For setting up all the kmem_list3s for cache whose buffer_size is same as | 1494 | * For setting up all the kmem_cache_node for cache whose buffer_size is same as |
1495 | * size of kmem_list3. | 1495 | * size of kmem_cache_node. |
1496 | */ | 1496 | */ |
1497 | static void __init set_up_list3s(struct kmem_cache *cachep, int index) | 1497 | static void __init set_up_node(struct kmem_cache *cachep, int index) |
1498 | { | 1498 | { |
1499 | int node; | 1499 | int node; |
1500 | 1500 | ||
1501 | for_each_online_node(node) { | 1501 | for_each_online_node(node) { |
1502 | cachep->node[node] = &initkmem_list3[index + node]; | 1502 | cachep->node[node] = &init_kmem_cache_node[index + node]; |
1503 | cachep->node[node]->next_reap = jiffies + | 1503 | cachep->node[node]->next_reap = jiffies + |
1504 | REAPTIMEOUT_LIST3 + | 1504 | REAPTIMEOUT_LIST3 + |
1505 | ((unsigned long)cachep) % REAPTIMEOUT_LIST3; | 1505 | ((unsigned long)cachep) % REAPTIMEOUT_LIST3; |
@@ -1530,9 +1530,9 @@ void __init kmem_cache_init(void) | |||
1530 | use_alien_caches = 0; | 1530 | use_alien_caches = 0; |
1531 | 1531 | ||
1532 | for (i = 0; i < NUM_INIT_LISTS; i++) | 1532 | for (i = 0; i < NUM_INIT_LISTS; i++) |
1533 | kmem_list3_init(&initkmem_list3[i]); | 1533 | kmem_cache_node_init(&init_kmem_cache_node[i]); |
1534 | 1534 | ||
1535 | set_up_list3s(kmem_cache, CACHE_CACHE); | 1535 | set_up_node(kmem_cache, CACHE_CACHE); |
1536 | 1536 | ||
1537 | /* | 1537 | /* |
1538 | * Fragmentation resistance on low memory - only use bigger | 1538 | * Fragmentation resistance on low memory - only use bigger |
@@ -1548,7 +1548,7 @@ void __init kmem_cache_init(void) | |||
1548 | * kmem_cache structures of all caches, except kmem_cache itself: | 1548 | * kmem_cache structures of all caches, except kmem_cache itself: |
1549 | * kmem_cache is statically allocated. | 1549 | * kmem_cache is statically allocated. |
1550 | * Initially an __init data area is used for the head array and the | 1550 | * Initially an __init data area is used for the head array and the |
1551 | * kmem_list3 structures, it's replaced with a kmalloc allocated | 1551 | * kmem_cache_node structures, it's replaced with a kmalloc allocated |
1552 | * array at the end of the bootstrap. | 1552 | * array at the end of the bootstrap. |
1553 | * 2) Create the first kmalloc cache. | 1553 | * 2) Create the first kmalloc cache. |
1554 | * The struct kmem_cache for the new cache is allocated normally. | 1554 | * The struct kmem_cache for the new cache is allocated normally. |
@@ -1557,7 +1557,7 @@ void __init kmem_cache_init(void) | |||
1557 | * head arrays. | 1557 | * head arrays. |
1558 | * 4) Replace the __init data head arrays for kmem_cache and the first | 1558 | * 4) Replace the __init data head arrays for kmem_cache and the first |
1559 | * kmalloc cache with kmalloc allocated arrays. | 1559 | * kmalloc cache with kmalloc allocated arrays. |
1560 | * 5) Replace the __init data for kmem_list3 for kmem_cache and | 1560 | * 5) Replace the __init data for kmem_cache_node for kmem_cache and |
1561 | * the other cache's with kmalloc allocated memory. | 1561 | * the other cache's with kmalloc allocated memory. |
1562 | * 6) Resize the head arrays of the kmalloc caches to their final sizes. | 1562 | * 6) Resize the head arrays of the kmalloc caches to their final sizes. |
1563 | */ | 1563 | */ |
@@ -1577,17 +1577,17 @@ void __init kmem_cache_init(void) | |||
1577 | 1577 | ||
1578 | /* | 1578 | /* |
1579 | * Initialize the caches that provide memory for the array cache and the | 1579 | * Initialize the caches that provide memory for the array cache and the |
1580 | * kmem_list3 structures first. Without this, further allocations will | 1580 | * kmem_cache_node structures first. Without this, further allocations will |
1581 | * bug. | 1581 | * bug. |
1582 | */ | 1582 | */ |
1583 | 1583 | ||
1584 | kmalloc_caches[INDEX_AC] = create_kmalloc_cache("kmalloc-ac", | 1584 | kmalloc_caches[INDEX_AC] = create_kmalloc_cache("kmalloc-ac", |
1585 | kmalloc_size(INDEX_AC), ARCH_KMALLOC_FLAGS); | 1585 | kmalloc_size(INDEX_AC), ARCH_KMALLOC_FLAGS); |
1586 | 1586 | ||
1587 | if (INDEX_AC != INDEX_L3) | 1587 | if (INDEX_AC != INDEX_NODE) |
1588 | kmalloc_caches[INDEX_L3] = | 1588 | kmalloc_caches[INDEX_NODE] = |
1589 | create_kmalloc_cache("kmalloc-l3", | 1589 | create_kmalloc_cache("kmalloc-node", |
1590 | kmalloc_size(INDEX_L3), ARCH_KMALLOC_FLAGS); | 1590 | kmalloc_size(INDEX_NODE), ARCH_KMALLOC_FLAGS); |
1591 | 1591 | ||
1592 | slab_early_init = 0; | 1592 | slab_early_init = 0; |
1593 | 1593 | ||
@@ -1619,19 +1619,19 @@ void __init kmem_cache_init(void) | |||
1619 | 1619 | ||
1620 | kmalloc_caches[INDEX_AC]->array[smp_processor_id()] = ptr; | 1620 | kmalloc_caches[INDEX_AC]->array[smp_processor_id()] = ptr; |
1621 | } | 1621 | } |
1622 | /* 5) Replace the bootstrap kmem_list3's */ | 1622 | /* 5) Replace the bootstrap kmem_cache_node */ |
1623 | { | 1623 | { |
1624 | int nid; | 1624 | int nid; |
1625 | 1625 | ||
1626 | for_each_online_node(nid) { | 1626 | for_each_online_node(nid) { |
1627 | init_list(kmem_cache, &initkmem_list3[CACHE_CACHE + nid], nid); | 1627 | init_list(kmem_cache, &init_kmem_cache_node[CACHE_CACHE + nid], nid); |
1628 | 1628 | ||
1629 | init_list(kmalloc_caches[INDEX_AC], | 1629 | init_list(kmalloc_caches[INDEX_AC], |
1630 | &initkmem_list3[SIZE_AC + nid], nid); | 1630 | &init_kmem_cache_node[SIZE_AC + nid], nid); |
1631 | 1631 | ||
1632 | if (INDEX_AC != INDEX_L3) { | 1632 | if (INDEX_AC != INDEX_NODE) { |
1633 | init_list(kmalloc_caches[INDEX_L3], | 1633 | init_list(kmalloc_caches[INDEX_NODE], |
1634 | &initkmem_list3[SIZE_L3 + nid], nid); | 1634 | &init_kmem_cache_node[SIZE_NODE + nid], nid); |
1635 | } | 1635 | } |
1636 | } | 1636 | } |
1637 | } | 1637 | } |
@@ -1697,7 +1697,7 @@ __initcall(cpucache_init); | |||
1697 | static noinline void | 1697 | static noinline void |
1698 | slab_out_of_memory(struct kmem_cache *cachep, gfp_t gfpflags, int nodeid) | 1698 | slab_out_of_memory(struct kmem_cache *cachep, gfp_t gfpflags, int nodeid) |
1699 | { | 1699 | { |
1700 | struct kmem_cache_node *l3; | 1700 | struct kmem_cache_node *n; |
1701 | struct slab *slabp; | 1701 | struct slab *slabp; |
1702 | unsigned long flags; | 1702 | unsigned long flags; |
1703 | int node; | 1703 | int node; |
@@ -1712,24 +1712,24 @@ slab_out_of_memory(struct kmem_cache *cachep, gfp_t gfpflags, int nodeid) | |||
1712 | unsigned long active_objs = 0, num_objs = 0, free_objects = 0; | 1712 | unsigned long active_objs = 0, num_objs = 0, free_objects = 0; |
1713 | unsigned long active_slabs = 0, num_slabs = 0; | 1713 | unsigned long active_slabs = 0, num_slabs = 0; |
1714 | 1714 | ||
1715 | l3 = cachep->node[node]; | 1715 | n = cachep->node[node]; |
1716 | if (!l3) | 1716 | if (!n) |
1717 | continue; | 1717 | continue; |
1718 | 1718 | ||
1719 | spin_lock_irqsave(&l3->list_lock, flags); | 1719 | spin_lock_irqsave(&n->list_lock, flags); |
1720 | list_for_each_entry(slabp, &l3->slabs_full, list) { | 1720 | list_for_each_entry(slabp, &n->slabs_full, list) { |
1721 | active_objs += cachep->num; | 1721 | active_objs += cachep->num; |
1722 | active_slabs++; | 1722 | active_slabs++; |
1723 | } | 1723 | } |
1724 | list_for_each_entry(slabp, &l3->slabs_partial, list) { | 1724 | list_for_each_entry(slabp, &n->slabs_partial, list) { |
1725 | active_objs += slabp->inuse; | 1725 | active_objs += slabp->inuse; |
1726 | active_slabs++; | 1726 | active_slabs++; |
1727 | } | 1727 | } |
1728 | list_for_each_entry(slabp, &l3->slabs_free, list) | 1728 | list_for_each_entry(slabp, &n->slabs_free, list) |
1729 | num_slabs++; | 1729 | num_slabs++; |
1730 | 1730 | ||
1731 | free_objects += l3->free_objects; | 1731 | free_objects += n->free_objects; |
1732 | spin_unlock_irqrestore(&l3->list_lock, flags); | 1732 | spin_unlock_irqrestore(&n->list_lock, flags); |
1733 | 1733 | ||
1734 | num_slabs += active_slabs; | 1734 | num_slabs += active_slabs; |
1735 | num_objs = num_slabs * cachep->num; | 1735 | num_objs = num_slabs * cachep->num; |
@@ -2154,7 +2154,7 @@ static int __init_refok setup_cpu_cache(struct kmem_cache *cachep, gfp_t gfp) | |||
2154 | if (slab_state == DOWN) { | 2154 | if (slab_state == DOWN) { |
2155 | /* | 2155 | /* |
2156 | * Note: Creation of first cache (kmem_cache). | 2156 | * Note: Creation of first cache (kmem_cache). |
2157 | * The setup_list3s is taken care | 2157 | * The setup_node is taken care |
2158 | * of by the caller of __kmem_cache_create | 2158 | * of by the caller of __kmem_cache_create |
2159 | */ | 2159 | */ |
2160 | cachep->array[smp_processor_id()] = &initarray_generic.cache; | 2160 | cachep->array[smp_processor_id()] = &initarray_generic.cache; |
@@ -2168,13 +2168,13 @@ static int __init_refok setup_cpu_cache(struct kmem_cache *cachep, gfp_t gfp) | |||
2168 | cachep->array[smp_processor_id()] = &initarray_generic.cache; | 2168 | cachep->array[smp_processor_id()] = &initarray_generic.cache; |
2169 | 2169 | ||
2170 | /* | 2170 | /* |
2171 | * If the cache that's used by kmalloc(sizeof(kmem_list3)) is | 2171 | * If the cache that's used by kmalloc(sizeof(kmem_cache_node)) is |
2172 | * the second cache, then we need to set up all its list3s, | 2172 | * the second cache, then we need to set up all its node/, |
2173 | * otherwise the creation of further caches will BUG(). | 2173 | * otherwise the creation of further caches will BUG(). |
2174 | */ | 2174 | */ |
2175 | set_up_list3s(cachep, SIZE_AC); | 2175 | set_up_node(cachep, SIZE_AC); |
2176 | if (INDEX_AC == INDEX_L3) | 2176 | if (INDEX_AC == INDEX_NODE) |
2177 | slab_state = PARTIAL_L3; | 2177 | slab_state = PARTIAL_NODE; |
2178 | else | 2178 | else |
2179 | slab_state = PARTIAL_ARRAYCACHE; | 2179 | slab_state = PARTIAL_ARRAYCACHE; |
2180 | } else { | 2180 | } else { |
@@ -2183,8 +2183,8 @@ static int __init_refok setup_cpu_cache(struct kmem_cache *cachep, gfp_t gfp) | |||
2183 | kmalloc(sizeof(struct arraycache_init), gfp); | 2183 | kmalloc(sizeof(struct arraycache_init), gfp); |
2184 | 2184 | ||
2185 | if (slab_state == PARTIAL_ARRAYCACHE) { | 2185 | if (slab_state == PARTIAL_ARRAYCACHE) { |
2186 | set_up_list3s(cachep, SIZE_L3); | 2186 | set_up_node(cachep, SIZE_NODE); |
2187 | slab_state = PARTIAL_L3; | 2187 | slab_state = PARTIAL_NODE; |
2188 | } else { | 2188 | } else { |
2189 | int node; | 2189 | int node; |
2190 | for_each_online_node(node) { | 2190 | for_each_online_node(node) { |
@@ -2192,7 +2192,7 @@ static int __init_refok setup_cpu_cache(struct kmem_cache *cachep, gfp_t gfp) | |||
2192 | kmalloc_node(sizeof(struct kmem_cache_node), | 2192 | kmalloc_node(sizeof(struct kmem_cache_node), |
2193 | gfp, node); | 2193 | gfp, node); |
2194 | BUG_ON(!cachep->node[node]); | 2194 | BUG_ON(!cachep->node[node]); |
2195 | kmem_list3_init(cachep->node[node]); | 2195 | kmem_cache_node_init(cachep->node[node]); |
2196 | } | 2196 | } |
2197 | } | 2197 | } |
2198 | } | 2198 | } |
@@ -2322,7 +2322,7 @@ __kmem_cache_create (struct kmem_cache *cachep, unsigned long flags) | |||
2322 | size += BYTES_PER_WORD; | 2322 | size += BYTES_PER_WORD; |
2323 | } | 2323 | } |
2324 | #if FORCED_DEBUG && defined(CONFIG_DEBUG_PAGEALLOC) | 2324 | #if FORCED_DEBUG && defined(CONFIG_DEBUG_PAGEALLOC) |
2325 | if (size >= kmalloc_size(INDEX_L3 + 1) | 2325 | if (size >= kmalloc_size(INDEX_NODE + 1) |
2326 | && cachep->object_size > cache_line_size() && ALIGN(size, align) < PAGE_SIZE) { | 2326 | && cachep->object_size > cache_line_size() && ALIGN(size, align) < PAGE_SIZE) { |
2327 | cachep->obj_offset += PAGE_SIZE - ALIGN(size, align); | 2327 | cachep->obj_offset += PAGE_SIZE - ALIGN(size, align); |
2328 | size = PAGE_SIZE; | 2328 | size = PAGE_SIZE; |
@@ -2457,7 +2457,7 @@ static void check_spinlock_acquired_node(struct kmem_cache *cachep, int node) | |||
2457 | #define check_spinlock_acquired_node(x, y) do { } while(0) | 2457 | #define check_spinlock_acquired_node(x, y) do { } while(0) |
2458 | #endif | 2458 | #endif |
2459 | 2459 | ||
2460 | static void drain_array(struct kmem_cache *cachep, struct kmem_cache_node *l3, | 2460 | static void drain_array(struct kmem_cache *cachep, struct kmem_cache_node *n, |
2461 | struct array_cache *ac, | 2461 | struct array_cache *ac, |
2462 | int force, int node); | 2462 | int force, int node); |
2463 | 2463 | ||
@@ -2477,21 +2477,21 @@ static void do_drain(void *arg) | |||
2477 | 2477 | ||
2478 | static void drain_cpu_caches(struct kmem_cache *cachep) | 2478 | static void drain_cpu_caches(struct kmem_cache *cachep) |
2479 | { | 2479 | { |
2480 | struct kmem_cache_node *l3; | 2480 | struct kmem_cache_node *n; |
2481 | int node; | 2481 | int node; |
2482 | 2482 | ||
2483 | on_each_cpu(do_drain, cachep, 1); | 2483 | on_each_cpu(do_drain, cachep, 1); |
2484 | check_irq_on(); | 2484 | check_irq_on(); |
2485 | for_each_online_node(node) { | 2485 | for_each_online_node(node) { |
2486 | l3 = cachep->node[node]; | 2486 | n = cachep->node[node]; |
2487 | if (l3 && l3->alien) | 2487 | if (n && n->alien) |
2488 | drain_alien_cache(cachep, l3->alien); | 2488 | drain_alien_cache(cachep, n->alien); |
2489 | } | 2489 | } |
2490 | 2490 | ||
2491 | for_each_online_node(node) { | 2491 | for_each_online_node(node) { |
2492 | l3 = cachep->node[node]; | 2492 | n = cachep->node[node]; |
2493 | if (l3) | 2493 | if (n) |
2494 | drain_array(cachep, l3, l3->shared, 1, node); | 2494 | drain_array(cachep, n, n->shared, 1, node); |
2495 | } | 2495 | } |
2496 | } | 2496 | } |
2497 | 2497 | ||
@@ -2502,19 +2502,19 @@ static void drain_cpu_caches(struct kmem_cache *cachep) | |||
2502 | * Returns the actual number of slabs released. | 2502 | * Returns the actual number of slabs released. |
2503 | */ | 2503 | */ |
2504 | static int drain_freelist(struct kmem_cache *cache, | 2504 | static int drain_freelist(struct kmem_cache *cache, |
2505 | struct kmem_cache_node *l3, int tofree) | 2505 | struct kmem_cache_node *n, int tofree) |
2506 | { | 2506 | { |
2507 | struct list_head *p; | 2507 | struct list_head *p; |
2508 | int nr_freed; | 2508 | int nr_freed; |
2509 | struct slab *slabp; | 2509 | struct slab *slabp; |
2510 | 2510 | ||
2511 | nr_freed = 0; | 2511 | nr_freed = 0; |
2512 | while (nr_freed < tofree && !list_empty(&l3->slabs_free)) { | 2512 | while (nr_freed < tofree && !list_empty(&n->slabs_free)) { |
2513 | 2513 | ||
2514 | spin_lock_irq(&l3->list_lock); | 2514 | spin_lock_irq(&n->list_lock); |
2515 | p = l3->slabs_free.prev; | 2515 | p = n->slabs_free.prev; |
2516 | if (p == &l3->slabs_free) { | 2516 | if (p == &n->slabs_free) { |
2517 | spin_unlock_irq(&l3->list_lock); | 2517 | spin_unlock_irq(&n->list_lock); |
2518 | goto out; | 2518 | goto out; |
2519 | } | 2519 | } |
2520 | 2520 | ||
@@ -2527,8 +2527,8 @@ static int drain_freelist(struct kmem_cache *cache, | |||
2527 | * Safe to drop the lock. The slab is no longer linked | 2527 | * Safe to drop the lock. The slab is no longer linked |
2528 | * to the cache. | 2528 | * to the cache. |
2529 | */ | 2529 | */ |
2530 | l3->free_objects -= cache->num; | 2530 | n->free_objects -= cache->num; |
2531 | spin_unlock_irq(&l3->list_lock); | 2531 | spin_unlock_irq(&n->list_lock); |
2532 | slab_destroy(cache, slabp); | 2532 | slab_destroy(cache, slabp); |
2533 | nr_freed++; | 2533 | nr_freed++; |
2534 | } | 2534 | } |
@@ -2540,20 +2540,20 @@ out: | |||
2540 | static int __cache_shrink(struct kmem_cache *cachep) | 2540 | static int __cache_shrink(struct kmem_cache *cachep) |
2541 | { | 2541 | { |
2542 | int ret = 0, i = 0; | 2542 | int ret = 0, i = 0; |
2543 | struct kmem_cache_node *l3; | 2543 | struct kmem_cache_node *n; |
2544 | 2544 | ||
2545 | drain_cpu_caches(cachep); | 2545 | drain_cpu_caches(cachep); |
2546 | 2546 | ||
2547 | check_irq_on(); | 2547 | check_irq_on(); |
2548 | for_each_online_node(i) { | 2548 | for_each_online_node(i) { |
2549 | l3 = cachep->node[i]; | 2549 | n = cachep->node[i]; |
2550 | if (!l3) | 2550 | if (!n) |
2551 | continue; | 2551 | continue; |
2552 | 2552 | ||
2553 | drain_freelist(cachep, l3, l3->free_objects); | 2553 | drain_freelist(cachep, n, n->free_objects); |
2554 | 2554 | ||
2555 | ret += !list_empty(&l3->slabs_full) || | 2555 | ret += !list_empty(&n->slabs_full) || |
2556 | !list_empty(&l3->slabs_partial); | 2556 | !list_empty(&n->slabs_partial); |
2557 | } | 2557 | } |
2558 | return (ret ? 1 : 0); | 2558 | return (ret ? 1 : 0); |
2559 | } | 2559 | } |
@@ -2582,7 +2582,7 @@ EXPORT_SYMBOL(kmem_cache_shrink); | |||
2582 | int __kmem_cache_shutdown(struct kmem_cache *cachep) | 2582 | int __kmem_cache_shutdown(struct kmem_cache *cachep) |
2583 | { | 2583 | { |
2584 | int i; | 2584 | int i; |
2585 | struct kmem_cache_node *l3; | 2585 | struct kmem_cache_node *n; |
2586 | int rc = __cache_shrink(cachep); | 2586 | int rc = __cache_shrink(cachep); |
2587 | 2587 | ||
2588 | if (rc) | 2588 | if (rc) |
@@ -2591,13 +2591,13 @@ int __kmem_cache_shutdown(struct kmem_cache *cachep) | |||
2591 | for_each_online_cpu(i) | 2591 | for_each_online_cpu(i) |
2592 | kfree(cachep->array[i]); | 2592 | kfree(cachep->array[i]); |
2593 | 2593 | ||
2594 | /* NUMA: free the list3 structures */ | 2594 | /* NUMA: free the node structures */ |
2595 | for_each_online_node(i) { | 2595 | for_each_online_node(i) { |
2596 | l3 = cachep->node[i]; | 2596 | n = cachep->node[i]; |
2597 | if (l3) { | 2597 | if (n) { |
2598 | kfree(l3->shared); | 2598 | kfree(n->shared); |
2599 | free_alien_cache(l3->alien); | 2599 | free_alien_cache(n->alien); |
2600 | kfree(l3); | 2600 | kfree(n); |
2601 | } | 2601 | } |
2602 | } | 2602 | } |
2603 | return 0; | 2603 | return 0; |
@@ -2779,7 +2779,7 @@ static int cache_grow(struct kmem_cache *cachep, | |||
2779 | struct slab *slabp; | 2779 | struct slab *slabp; |
2780 | size_t offset; | 2780 | size_t offset; |
2781 | gfp_t local_flags; | 2781 | gfp_t local_flags; |
2782 | struct kmem_cache_node *l3; | 2782 | struct kmem_cache_node *n; |
2783 | 2783 | ||
2784 | /* | 2784 | /* |
2785 | * Be lazy and only check for valid flags here, keeping it out of the | 2785 | * Be lazy and only check for valid flags here, keeping it out of the |
@@ -2788,17 +2788,17 @@ static int cache_grow(struct kmem_cache *cachep, | |||
2788 | BUG_ON(flags & GFP_SLAB_BUG_MASK); | 2788 | BUG_ON(flags & GFP_SLAB_BUG_MASK); |
2789 | local_flags = flags & (GFP_CONSTRAINT_MASK|GFP_RECLAIM_MASK); | 2789 | local_flags = flags & (GFP_CONSTRAINT_MASK|GFP_RECLAIM_MASK); |
2790 | 2790 | ||
2791 | /* Take the l3 list lock to change the colour_next on this node */ | 2791 | /* Take the node list lock to change the colour_next on this node */ |
2792 | check_irq_off(); | 2792 | check_irq_off(); |
2793 | l3 = cachep->node[nodeid]; | 2793 | n = cachep->node[nodeid]; |
2794 | spin_lock(&l3->list_lock); | 2794 | spin_lock(&n->list_lock); |
2795 | 2795 | ||
2796 | /* Get colour for the slab, and cal the next value. */ | 2796 | /* Get colour for the slab, and cal the next value. */ |
2797 | offset = l3->colour_next; | 2797 | offset = n->colour_next; |
2798 | l3->colour_next++; | 2798 | n->colour_next++; |
2799 | if (l3->colour_next >= cachep->colour) | 2799 | if (n->colour_next >= cachep->colour) |
2800 | l3->colour_next = 0; | 2800 | n->colour_next = 0; |
2801 | spin_unlock(&l3->list_lock); | 2801 | spin_unlock(&n->list_lock); |
2802 | 2802 | ||
2803 | offset *= cachep->colour_off; | 2803 | offset *= cachep->colour_off; |
2804 | 2804 | ||
@@ -2835,13 +2835,13 @@ static int cache_grow(struct kmem_cache *cachep, | |||
2835 | if (local_flags & __GFP_WAIT) | 2835 | if (local_flags & __GFP_WAIT) |
2836 | local_irq_disable(); | 2836 | local_irq_disable(); |
2837 | check_irq_off(); | 2837 | check_irq_off(); |
2838 | spin_lock(&l3->list_lock); | 2838 | spin_lock(&n->list_lock); |
2839 | 2839 | ||
2840 | /* Make slab active. */ | 2840 | /* Make slab active. */ |
2841 | list_add_tail(&slabp->list, &(l3->slabs_free)); | 2841 | list_add_tail(&slabp->list, &(n->slabs_free)); |
2842 | STATS_INC_GROWN(cachep); | 2842 | STATS_INC_GROWN(cachep); |
2843 | l3->free_objects += cachep->num; | 2843 | n->free_objects += cachep->num; |
2844 | spin_unlock(&l3->list_lock); | 2844 | spin_unlock(&n->list_lock); |
2845 | return 1; | 2845 | return 1; |
2846 | opps1: | 2846 | opps1: |
2847 | kmem_freepages(cachep, objp); | 2847 | kmem_freepages(cachep, objp); |
@@ -2969,7 +2969,7 @@ static void *cache_alloc_refill(struct kmem_cache *cachep, gfp_t flags, | |||
2969 | bool force_refill) | 2969 | bool force_refill) |
2970 | { | 2970 | { |
2971 | int batchcount; | 2971 | int batchcount; |
2972 | struct kmem_cache_node *l3; | 2972 | struct kmem_cache_node *n; |
2973 | struct array_cache *ac; | 2973 | struct array_cache *ac; |
2974 | int node; | 2974 | int node; |
2975 | 2975 | ||
@@ -2988,14 +2988,14 @@ retry: | |||
2988 | */ | 2988 | */ |
2989 | batchcount = BATCHREFILL_LIMIT; | 2989 | batchcount = BATCHREFILL_LIMIT; |
2990 | } | 2990 | } |
2991 | l3 = cachep->node[node]; | 2991 | n = cachep->node[node]; |
2992 | 2992 | ||
2993 | BUG_ON(ac->avail > 0 || !l3); | 2993 | BUG_ON(ac->avail > 0 || !n); |
2994 | spin_lock(&l3->list_lock); | 2994 | spin_lock(&n->list_lock); |
2995 | 2995 | ||
2996 | /* See if we can refill from the shared array */ | 2996 | /* See if we can refill from the shared array */ |
2997 | if (l3->shared && transfer_objects(ac, l3->shared, batchcount)) { | 2997 | if (n->shared && transfer_objects(ac, n->shared, batchcount)) { |
2998 | l3->shared->touched = 1; | 2998 | n->shared->touched = 1; |
2999 | goto alloc_done; | 2999 | goto alloc_done; |
3000 | } | 3000 | } |
3001 | 3001 | ||
@@ -3003,11 +3003,11 @@ retry: | |||
3003 | struct list_head *entry; | 3003 | struct list_head *entry; |
3004 | struct slab *slabp; | 3004 | struct slab *slabp; |
3005 | /* Get slab alloc is to come from. */ | 3005 | /* Get slab alloc is to come from. */ |
3006 | entry = l3->slabs_partial.next; | 3006 | entry = n->slabs_partial.next; |
3007 | if (entry == &l3->slabs_partial) { | 3007 | if (entry == &n->slabs_partial) { |
3008 | l3->free_touched = 1; | 3008 | n->free_touched = 1; |
3009 | entry = l3->slabs_free.next; | 3009 | entry = n->slabs_free.next; |
3010 | if (entry == &l3->slabs_free) | 3010 | if (entry == &n->slabs_free) |
3011 | goto must_grow; | 3011 | goto must_grow; |
3012 | } | 3012 | } |
3013 | 3013 | ||
@@ -3035,15 +3035,15 @@ retry: | |||
3035 | /* move slabp to correct slabp list: */ | 3035 | /* move slabp to correct slabp list: */ |
3036 | list_del(&slabp->list); | 3036 | list_del(&slabp->list); |
3037 | if (slabp->free == BUFCTL_END) | 3037 | if (slabp->free == BUFCTL_END) |
3038 | list_add(&slabp->list, &l3->slabs_full); | 3038 | list_add(&slabp->list, &n->slabs_full); |
3039 | else | 3039 | else |
3040 | list_add(&slabp->list, &l3->slabs_partial); | 3040 | list_add(&slabp->list, &n->slabs_partial); |
3041 | } | 3041 | } |
3042 | 3042 | ||
3043 | must_grow: | 3043 | must_grow: |
3044 | l3->free_objects -= ac->avail; | 3044 | n->free_objects -= ac->avail; |
3045 | alloc_done: | 3045 | alloc_done: |
3046 | spin_unlock(&l3->list_lock); | 3046 | spin_unlock(&n->list_lock); |
3047 | 3047 | ||
3048 | if (unlikely(!ac->avail)) { | 3048 | if (unlikely(!ac->avail)) { |
3049 | int x; | 3049 | int x; |
@@ -3301,21 +3301,21 @@ static void *____cache_alloc_node(struct kmem_cache *cachep, gfp_t flags, | |||
3301 | { | 3301 | { |
3302 | struct list_head *entry; | 3302 | struct list_head *entry; |
3303 | struct slab *slabp; | 3303 | struct slab *slabp; |
3304 | struct kmem_cache_node *l3; | 3304 | struct kmem_cache_node *n; |
3305 | void *obj; | 3305 | void *obj; |
3306 | int x; | 3306 | int x; |
3307 | 3307 | ||
3308 | l3 = cachep->node[nodeid]; | 3308 | n = cachep->node[nodeid]; |
3309 | BUG_ON(!l3); | 3309 | BUG_ON(!n); |
3310 | 3310 | ||
3311 | retry: | 3311 | retry: |
3312 | check_irq_off(); | 3312 | check_irq_off(); |
3313 | spin_lock(&l3->list_lock); | 3313 | spin_lock(&n->list_lock); |
3314 | entry = l3->slabs_partial.next; | 3314 | entry = n->slabs_partial.next; |
3315 | if (entry == &l3->slabs_partial) { | 3315 | if (entry == &n->slabs_partial) { |
3316 | l3->free_touched = 1; | 3316 | n->free_touched = 1; |
3317 | entry = l3->slabs_free.next; | 3317 | entry = n->slabs_free.next; |
3318 | if (entry == &l3->slabs_free) | 3318 | if (entry == &n->slabs_free) |
3319 | goto must_grow; | 3319 | goto must_grow; |
3320 | } | 3320 | } |
3321 | 3321 | ||
@@ -3331,20 +3331,20 @@ retry: | |||
3331 | 3331 | ||
3332 | obj = slab_get_obj(cachep, slabp, nodeid); | 3332 | obj = slab_get_obj(cachep, slabp, nodeid); |
3333 | check_slabp(cachep, slabp); | 3333 | check_slabp(cachep, slabp); |
3334 | l3->free_objects--; | 3334 | n->free_objects--; |
3335 | /* move slabp to correct slabp list: */ | 3335 | /* move slabp to correct slabp list: */ |
3336 | list_del(&slabp->list); | 3336 | list_del(&slabp->list); |
3337 | 3337 | ||
3338 | if (slabp->free == BUFCTL_END) | 3338 | if (slabp->free == BUFCTL_END) |
3339 | list_add(&slabp->list, &l3->slabs_full); | 3339 | list_add(&slabp->list, &n->slabs_full); |
3340 | else | 3340 | else |
3341 | list_add(&slabp->list, &l3->slabs_partial); | 3341 | list_add(&slabp->list, &n->slabs_partial); |
3342 | 3342 | ||
3343 | spin_unlock(&l3->list_lock); | 3343 | spin_unlock(&n->list_lock); |
3344 | goto done; | 3344 | goto done; |
3345 | 3345 | ||
3346 | must_grow: | 3346 | must_grow: |
3347 | spin_unlock(&l3->list_lock); | 3347 | spin_unlock(&n->list_lock); |
3348 | x = cache_grow(cachep, flags | GFP_THISNODE, nodeid, NULL); | 3348 | x = cache_grow(cachep, flags | GFP_THISNODE, nodeid, NULL); |
3349 | if (x) | 3349 | if (x) |
3350 | goto retry; | 3350 | goto retry; |
@@ -3496,7 +3496,7 @@ static void free_block(struct kmem_cache *cachep, void **objpp, int nr_objects, | |||
3496 | int node) | 3496 | int node) |
3497 | { | 3497 | { |
3498 | int i; | 3498 | int i; |
3499 | struct kmem_cache_node *l3; | 3499 | struct kmem_cache_node *n; |
3500 | 3500 | ||
3501 | for (i = 0; i < nr_objects; i++) { | 3501 | for (i = 0; i < nr_objects; i++) { |
3502 | void *objp; | 3502 | void *objp; |
@@ -3506,19 +3506,19 @@ static void free_block(struct kmem_cache *cachep, void **objpp, int nr_objects, | |||
3506 | objp = objpp[i]; | 3506 | objp = objpp[i]; |
3507 | 3507 | ||
3508 | slabp = virt_to_slab(objp); | 3508 | slabp = virt_to_slab(objp); |
3509 | l3 = cachep->node[node]; | 3509 | n = cachep->node[node]; |
3510 | list_del(&slabp->list); | 3510 | list_del(&slabp->list); |
3511 | check_spinlock_acquired_node(cachep, node); | 3511 | check_spinlock_acquired_node(cachep, node); |
3512 | check_slabp(cachep, slabp); | 3512 | check_slabp(cachep, slabp); |
3513 | slab_put_obj(cachep, slabp, objp, node); | 3513 | slab_put_obj(cachep, slabp, objp, node); |
3514 | STATS_DEC_ACTIVE(cachep); | 3514 | STATS_DEC_ACTIVE(cachep); |
3515 | l3->free_objects++; | 3515 | n->free_objects++; |
3516 | check_slabp(cachep, slabp); | 3516 | check_slabp(cachep, slabp); |
3517 | 3517 | ||
3518 | /* fixup slab chains */ | 3518 | /* fixup slab chains */ |
3519 | if (slabp->inuse == 0) { | 3519 | if (slabp->inuse == 0) { |
3520 | if (l3->free_objects > l3->free_limit) { | 3520 | if (n->free_objects > n->free_limit) { |
3521 | l3->free_objects -= cachep->num; | 3521 | n->free_objects -= cachep->num; |
3522 | /* No need to drop any previously held | 3522 | /* No need to drop any previously held |
3523 | * lock here, even if we have a off-slab slab | 3523 | * lock here, even if we have a off-slab slab |
3524 | * descriptor it is guaranteed to come from | 3524 | * descriptor it is guaranteed to come from |
@@ -3527,14 +3527,14 @@ static void free_block(struct kmem_cache *cachep, void **objpp, int nr_objects, | |||
3527 | */ | 3527 | */ |
3528 | slab_destroy(cachep, slabp); | 3528 | slab_destroy(cachep, slabp); |
3529 | } else { | 3529 | } else { |
3530 | list_add(&slabp->list, &l3->slabs_free); | 3530 | list_add(&slabp->list, &n->slabs_free); |
3531 | } | 3531 | } |
3532 | } else { | 3532 | } else { |
3533 | /* Unconditionally move a slab to the end of the | 3533 | /* Unconditionally move a slab to the end of the |
3534 | * partial list on free - maximum time for the | 3534 | * partial list on free - maximum time for the |
3535 | * other objects to be freed, too. | 3535 | * other objects to be freed, too. |
3536 | */ | 3536 | */ |
3537 | list_add_tail(&slabp->list, &l3->slabs_partial); | 3537 | list_add_tail(&slabp->list, &n->slabs_partial); |
3538 | } | 3538 | } |
3539 | } | 3539 | } |
3540 | } | 3540 | } |
@@ -3542,7 +3542,7 @@ static void free_block(struct kmem_cache *cachep, void **objpp, int nr_objects, | |||
3542 | static void cache_flusharray(struct kmem_cache *cachep, struct array_cache *ac) | 3542 | static void cache_flusharray(struct kmem_cache *cachep, struct array_cache *ac) |
3543 | { | 3543 | { |
3544 | int batchcount; | 3544 | int batchcount; |
3545 | struct kmem_cache_node *l3; | 3545 | struct kmem_cache_node *n; |
3546 | int node = numa_mem_id(); | 3546 | int node = numa_mem_id(); |
3547 | 3547 | ||
3548 | batchcount = ac->batchcount; | 3548 | batchcount = ac->batchcount; |
@@ -3550,10 +3550,10 @@ static void cache_flusharray(struct kmem_cache *cachep, struct array_cache *ac) | |||
3550 | BUG_ON(!batchcount || batchcount > ac->avail); | 3550 | BUG_ON(!batchcount || batchcount > ac->avail); |
3551 | #endif | 3551 | #endif |
3552 | check_irq_off(); | 3552 | check_irq_off(); |
3553 | l3 = cachep->node[node]; | 3553 | n = cachep->node[node]; |
3554 | spin_lock(&l3->list_lock); | 3554 | spin_lock(&n->list_lock); |
3555 | if (l3->shared) { | 3555 | if (n->shared) { |
3556 | struct array_cache *shared_array = l3->shared; | 3556 | struct array_cache *shared_array = n->shared; |
3557 | int max = shared_array->limit - shared_array->avail; | 3557 | int max = shared_array->limit - shared_array->avail; |
3558 | if (max) { | 3558 | if (max) { |
3559 | if (batchcount > max) | 3559 | if (batchcount > max) |
@@ -3572,8 +3572,8 @@ free_done: | |||
3572 | int i = 0; | 3572 | int i = 0; |
3573 | struct list_head *p; | 3573 | struct list_head *p; |
3574 | 3574 | ||
3575 | p = l3->slabs_free.next; | 3575 | p = n->slabs_free.next; |
3576 | while (p != &(l3->slabs_free)) { | 3576 | while (p != &(n->slabs_free)) { |
3577 | struct slab *slabp; | 3577 | struct slab *slabp; |
3578 | 3578 | ||
3579 | slabp = list_entry(p, struct slab, list); | 3579 | slabp = list_entry(p, struct slab, list); |
@@ -3585,7 +3585,7 @@ free_done: | |||
3585 | STATS_SET_FREEABLE(cachep, i); | 3585 | STATS_SET_FREEABLE(cachep, i); |
3586 | } | 3586 | } |
3587 | #endif | 3587 | #endif |
3588 | spin_unlock(&l3->list_lock); | 3588 | spin_unlock(&n->list_lock); |
3589 | ac->avail -= batchcount; | 3589 | ac->avail -= batchcount; |
3590 | memmove(ac->entry, &(ac->entry[batchcount]), sizeof(void *)*ac->avail); | 3590 | memmove(ac->entry, &(ac->entry[batchcount]), sizeof(void *)*ac->avail); |
3591 | } | 3591 | } |
@@ -3829,12 +3829,12 @@ void kfree(const void *objp) | |||
3829 | EXPORT_SYMBOL(kfree); | 3829 | EXPORT_SYMBOL(kfree); |
3830 | 3830 | ||
3831 | /* | 3831 | /* |
3832 | * This initializes kmem_list3 or resizes various caches for all nodes. | 3832 | * This initializes kmem_cache_node or resizes various caches for all nodes. |
3833 | */ | 3833 | */ |
3834 | static int alloc_kmemlist(struct kmem_cache *cachep, gfp_t gfp) | 3834 | static int alloc_kmemlist(struct kmem_cache *cachep, gfp_t gfp) |
3835 | { | 3835 | { |
3836 | int node; | 3836 | int node; |
3837 | struct kmem_cache_node *l3; | 3837 | struct kmem_cache_node *n; |
3838 | struct array_cache *new_shared; | 3838 | struct array_cache *new_shared; |
3839 | struct array_cache **new_alien = NULL; | 3839 | struct array_cache **new_alien = NULL; |
3840 | 3840 | ||
@@ -3857,43 +3857,43 @@ static int alloc_kmemlist(struct kmem_cache *cachep, gfp_t gfp) | |||
3857 | } | 3857 | } |
3858 | } | 3858 | } |
3859 | 3859 | ||
3860 | l3 = cachep->node[node]; | 3860 | n = cachep->node[node]; |
3861 | if (l3) { | 3861 | if (n) { |
3862 | struct array_cache *shared = l3->shared; | 3862 | struct array_cache *shared = n->shared; |
3863 | 3863 | ||
3864 | spin_lock_irq(&l3->list_lock); | 3864 | spin_lock_irq(&n->list_lock); |
3865 | 3865 | ||
3866 | if (shared) | 3866 | if (shared) |
3867 | free_block(cachep, shared->entry, | 3867 | free_block(cachep, shared->entry, |
3868 | shared->avail, node); | 3868 | shared->avail, node); |
3869 | 3869 | ||
3870 | l3->shared = new_shared; | 3870 | n->shared = new_shared; |
3871 | if (!l3->alien) { | 3871 | if (!n->alien) { |
3872 | l3->alien = new_alien; | 3872 | n->alien = new_alien; |
3873 | new_alien = NULL; | 3873 | new_alien = NULL; |
3874 | } | 3874 | } |
3875 | l3->free_limit = (1 + nr_cpus_node(node)) * | 3875 | n->free_limit = (1 + nr_cpus_node(node)) * |
3876 | cachep->batchcount + cachep->num; | 3876 | cachep->batchcount + cachep->num; |
3877 | spin_unlock_irq(&l3->list_lock); | 3877 | spin_unlock_irq(&n->list_lock); |
3878 | kfree(shared); | 3878 | kfree(shared); |
3879 | free_alien_cache(new_alien); | 3879 | free_alien_cache(new_alien); |
3880 | continue; | 3880 | continue; |
3881 | } | 3881 | } |
3882 | l3 = kmalloc_node(sizeof(struct kmem_cache_node), gfp, node); | 3882 | n = kmalloc_node(sizeof(struct kmem_cache_node), gfp, node); |
3883 | if (!l3) { | 3883 | if (!n) { |
3884 | free_alien_cache(new_alien); | 3884 | free_alien_cache(new_alien); |
3885 | kfree(new_shared); | 3885 | kfree(new_shared); |
3886 | goto fail; | 3886 | goto fail; |
3887 | } | 3887 | } |
3888 | 3888 | ||
3889 | kmem_list3_init(l3); | 3889 | kmem_cache_node_init(n); |
3890 | l3->next_reap = jiffies + REAPTIMEOUT_LIST3 + | 3890 | n->next_reap = jiffies + REAPTIMEOUT_LIST3 + |
3891 | ((unsigned long)cachep) % REAPTIMEOUT_LIST3; | 3891 | ((unsigned long)cachep) % REAPTIMEOUT_LIST3; |
3892 | l3->shared = new_shared; | 3892 | n->shared = new_shared; |
3893 | l3->alien = new_alien; | 3893 | n->alien = new_alien; |
3894 | l3->free_limit = (1 + nr_cpus_node(node)) * | 3894 | n->free_limit = (1 + nr_cpus_node(node)) * |
3895 | cachep->batchcount + cachep->num; | 3895 | cachep->batchcount + cachep->num; |
3896 | cachep->node[node] = l3; | 3896 | cachep->node[node] = n; |
3897 | } | 3897 | } |
3898 | return 0; | 3898 | return 0; |
3899 | 3899 | ||
@@ -3903,11 +3903,11 @@ fail: | |||
3903 | node--; | 3903 | node--; |
3904 | while (node >= 0) { | 3904 | while (node >= 0) { |
3905 | if (cachep->node[node]) { | 3905 | if (cachep->node[node]) { |
3906 | l3 = cachep->node[node]; | 3906 | n = cachep->node[node]; |
3907 | 3907 | ||
3908 | kfree(l3->shared); | 3908 | kfree(n->shared); |
3909 | free_alien_cache(l3->alien); | 3909 | free_alien_cache(n->alien); |
3910 | kfree(l3); | 3910 | kfree(n); |
3911 | cachep->node[node] = NULL; | 3911 | cachep->node[node] = NULL; |
3912 | } | 3912 | } |
3913 | node--; | 3913 | node--; |
@@ -4071,11 +4071,11 @@ skip_setup: | |||
4071 | } | 4071 | } |
4072 | 4072 | ||
4073 | /* | 4073 | /* |
4074 | * Drain an array if it contains any elements taking the l3 lock only if | 4074 | * Drain an array if it contains any elements taking the node lock only if |
4075 | * necessary. Note that the l3 listlock also protects the array_cache | 4075 | * necessary. Note that the node listlock also protects the array_cache |
4076 | * if drain_array() is used on the shared array. | 4076 | * if drain_array() is used on the shared array. |
4077 | */ | 4077 | */ |
4078 | static void drain_array(struct kmem_cache *cachep, struct kmem_cache_node *l3, | 4078 | static void drain_array(struct kmem_cache *cachep, struct kmem_cache_node *n, |
4079 | struct array_cache *ac, int force, int node) | 4079 | struct array_cache *ac, int force, int node) |
4080 | { | 4080 | { |
4081 | int tofree; | 4081 | int tofree; |
@@ -4085,7 +4085,7 @@ static void drain_array(struct kmem_cache *cachep, struct kmem_cache_node *l3, | |||
4085 | if (ac->touched && !force) { | 4085 | if (ac->touched && !force) { |
4086 | ac->touched = 0; | 4086 | ac->touched = 0; |
4087 | } else { | 4087 | } else { |
4088 | spin_lock_irq(&l3->list_lock); | 4088 | spin_lock_irq(&n->list_lock); |
4089 | if (ac->avail) { | 4089 | if (ac->avail) { |
4090 | tofree = force ? ac->avail : (ac->limit + 4) / 5; | 4090 | tofree = force ? ac->avail : (ac->limit + 4) / 5; |
4091 | if (tofree > ac->avail) | 4091 | if (tofree > ac->avail) |
@@ -4095,7 +4095,7 @@ static void drain_array(struct kmem_cache *cachep, struct kmem_cache_node *l3, | |||
4095 | memmove(ac->entry, &(ac->entry[tofree]), | 4095 | memmove(ac->entry, &(ac->entry[tofree]), |
4096 | sizeof(void *) * ac->avail); | 4096 | sizeof(void *) * ac->avail); |
4097 | } | 4097 | } |
4098 | spin_unlock_irq(&l3->list_lock); | 4098 | spin_unlock_irq(&n->list_lock); |
4099 | } | 4099 | } |
4100 | } | 4100 | } |
4101 | 4101 | ||
@@ -4114,7 +4114,7 @@ static void drain_array(struct kmem_cache *cachep, struct kmem_cache_node *l3, | |||
4114 | static void cache_reap(struct work_struct *w) | 4114 | static void cache_reap(struct work_struct *w) |
4115 | { | 4115 | { |
4116 | struct kmem_cache *searchp; | 4116 | struct kmem_cache *searchp; |
4117 | struct kmem_cache_node *l3; | 4117 | struct kmem_cache_node *n; |
4118 | int node = numa_mem_id(); | 4118 | int node = numa_mem_id(); |
4119 | struct delayed_work *work = to_delayed_work(w); | 4119 | struct delayed_work *work = to_delayed_work(w); |
4120 | 4120 | ||
@@ -4126,33 +4126,33 @@ static void cache_reap(struct work_struct *w) | |||
4126 | check_irq_on(); | 4126 | check_irq_on(); |
4127 | 4127 | ||
4128 | /* | 4128 | /* |
4129 | * We only take the l3 lock if absolutely necessary and we | 4129 | * We only take the node lock if absolutely necessary and we |
4130 | * have established with reasonable certainty that | 4130 | * have established with reasonable certainty that |
4131 | * we can do some work if the lock was obtained. | 4131 | * we can do some work if the lock was obtained. |
4132 | */ | 4132 | */ |
4133 | l3 = searchp->node[node]; | 4133 | n = searchp->node[node]; |
4134 | 4134 | ||
4135 | reap_alien(searchp, l3); | 4135 | reap_alien(searchp, n); |
4136 | 4136 | ||
4137 | drain_array(searchp, l3, cpu_cache_get(searchp), 0, node); | 4137 | drain_array(searchp, n, cpu_cache_get(searchp), 0, node); |
4138 | 4138 | ||
4139 | /* | 4139 | /* |
4140 | * These are racy checks but it does not matter | 4140 | * These are racy checks but it does not matter |
4141 | * if we skip one check or scan twice. | 4141 | * if we skip one check or scan twice. |
4142 | */ | 4142 | */ |
4143 | if (time_after(l3->next_reap, jiffies)) | 4143 | if (time_after(n->next_reap, jiffies)) |
4144 | goto next; | 4144 | goto next; |
4145 | 4145 | ||
4146 | l3->next_reap = jiffies + REAPTIMEOUT_LIST3; | 4146 | n->next_reap = jiffies + REAPTIMEOUT_LIST3; |
4147 | 4147 | ||
4148 | drain_array(searchp, l3, l3->shared, 0, node); | 4148 | drain_array(searchp, n, n->shared, 0, node); |
4149 | 4149 | ||
4150 | if (l3->free_touched) | 4150 | if (n->free_touched) |
4151 | l3->free_touched = 0; | 4151 | n->free_touched = 0; |
4152 | else { | 4152 | else { |
4153 | int freed; | 4153 | int freed; |
4154 | 4154 | ||
4155 | freed = drain_freelist(searchp, l3, (l3->free_limit + | 4155 | freed = drain_freelist(searchp, n, (n->free_limit + |
4156 | 5 * searchp->num - 1) / (5 * searchp->num)); | 4156 | 5 * searchp->num - 1) / (5 * searchp->num)); |
4157 | STATS_ADD_REAPED(searchp, freed); | 4157 | STATS_ADD_REAPED(searchp, freed); |
4158 | } | 4158 | } |
@@ -4178,25 +4178,25 @@ void get_slabinfo(struct kmem_cache *cachep, struct slabinfo *sinfo) | |||
4178 | const char *name; | 4178 | const char *name; |
4179 | char *error = NULL; | 4179 | char *error = NULL; |
4180 | int node; | 4180 | int node; |
4181 | struct kmem_cache_node *l3; | 4181 | struct kmem_cache_node *n; |
4182 | 4182 | ||
4183 | active_objs = 0; | 4183 | active_objs = 0; |
4184 | num_slabs = 0; | 4184 | num_slabs = 0; |
4185 | for_each_online_node(node) { | 4185 | for_each_online_node(node) { |
4186 | l3 = cachep->node[node]; | 4186 | n = cachep->node[node]; |
4187 | if (!l3) | 4187 | if (!n) |
4188 | continue; | 4188 | continue; |
4189 | 4189 | ||
4190 | check_irq_on(); | 4190 | check_irq_on(); |
4191 | spin_lock_irq(&l3->list_lock); | 4191 | spin_lock_irq(&n->list_lock); |
4192 | 4192 | ||
4193 | list_for_each_entry(slabp, &l3->slabs_full, list) { | 4193 | list_for_each_entry(slabp, &n->slabs_full, list) { |
4194 | if (slabp->inuse != cachep->num && !error) | 4194 | if (slabp->inuse != cachep->num && !error) |
4195 | error = "slabs_full accounting error"; | 4195 | error = "slabs_full accounting error"; |
4196 | active_objs += cachep->num; | 4196 | active_objs += cachep->num; |
4197 | active_slabs++; | 4197 | active_slabs++; |
4198 | } | 4198 | } |
4199 | list_for_each_entry(slabp, &l3->slabs_partial, list) { | 4199 | list_for_each_entry(slabp, &n->slabs_partial, list) { |
4200 | if (slabp->inuse == cachep->num && !error) | 4200 | if (slabp->inuse == cachep->num && !error) |
4201 | error = "slabs_partial inuse accounting error"; | 4201 | error = "slabs_partial inuse accounting error"; |
4202 | if (!slabp->inuse && !error) | 4202 | if (!slabp->inuse && !error) |
@@ -4204,16 +4204,16 @@ void get_slabinfo(struct kmem_cache *cachep, struct slabinfo *sinfo) | |||
4204 | active_objs += slabp->inuse; | 4204 | active_objs += slabp->inuse; |
4205 | active_slabs++; | 4205 | active_slabs++; |
4206 | } | 4206 | } |
4207 | list_for_each_entry(slabp, &l3->slabs_free, list) { | 4207 | list_for_each_entry(slabp, &n->slabs_free, list) { |
4208 | if (slabp->inuse && !error) | 4208 | if (slabp->inuse && !error) |
4209 | error = "slabs_free/inuse accounting error"; | 4209 | error = "slabs_free/inuse accounting error"; |
4210 | num_slabs++; | 4210 | num_slabs++; |
4211 | } | 4211 | } |
4212 | free_objects += l3->free_objects; | 4212 | free_objects += n->free_objects; |
4213 | if (l3->shared) | 4213 | if (n->shared) |
4214 | shared_avail += l3->shared->avail; | 4214 | shared_avail += n->shared->avail; |
4215 | 4215 | ||
4216 | spin_unlock_irq(&l3->list_lock); | 4216 | spin_unlock_irq(&n->list_lock); |
4217 | } | 4217 | } |
4218 | num_slabs += active_slabs; | 4218 | num_slabs += active_slabs; |
4219 | num_objs = num_slabs * cachep->num; | 4219 | num_objs = num_slabs * cachep->num; |
@@ -4239,7 +4239,7 @@ void get_slabinfo(struct kmem_cache *cachep, struct slabinfo *sinfo) | |||
4239 | void slabinfo_show_stats(struct seq_file *m, struct kmem_cache *cachep) | 4239 | void slabinfo_show_stats(struct seq_file *m, struct kmem_cache *cachep) |
4240 | { | 4240 | { |
4241 | #if STATS | 4241 | #if STATS |
4242 | { /* list3 stats */ | 4242 | { /* node stats */ |
4243 | unsigned long high = cachep->high_mark; | 4243 | unsigned long high = cachep->high_mark; |
4244 | unsigned long allocs = cachep->num_allocations; | 4244 | unsigned long allocs = cachep->num_allocations; |
4245 | unsigned long grown = cachep->grown; | 4245 | unsigned long grown = cachep->grown; |
@@ -4392,7 +4392,7 @@ static int leaks_show(struct seq_file *m, void *p) | |||
4392 | { | 4392 | { |
4393 | struct kmem_cache *cachep = list_entry(p, struct kmem_cache, list); | 4393 | struct kmem_cache *cachep = list_entry(p, struct kmem_cache, list); |
4394 | struct slab *slabp; | 4394 | struct slab *slabp; |
4395 | struct kmem_cache_node *l3; | 4395 | struct kmem_cache_node *n; |
4396 | const char *name; | 4396 | const char *name; |
4397 | unsigned long *n = m->private; | 4397 | unsigned long *n = m->private; |
4398 | int node; | 4398 | int node; |
@@ -4408,18 +4408,18 @@ static int leaks_show(struct seq_file *m, void *p) | |||
4408 | n[1] = 0; | 4408 | n[1] = 0; |
4409 | 4409 | ||
4410 | for_each_online_node(node) { | 4410 | for_each_online_node(node) { |
4411 | l3 = cachep->node[node]; | 4411 | n = cachep->node[node]; |
4412 | if (!l3) | 4412 | if (!n) |
4413 | continue; | 4413 | continue; |
4414 | 4414 | ||
4415 | check_irq_on(); | 4415 | check_irq_on(); |
4416 | spin_lock_irq(&l3->list_lock); | 4416 | spin_lock_irq(&n->list_lock); |
4417 | 4417 | ||
4418 | list_for_each_entry(slabp, &l3->slabs_full, list) | 4418 | list_for_each_entry(slabp, &n->slabs_full, list) |
4419 | handle_slab(n, cachep, slabp); | 4419 | handle_slab(n, cachep, slabp); |
4420 | list_for_each_entry(slabp, &l3->slabs_partial, list) | 4420 | list_for_each_entry(slabp, &n->slabs_partial, list) |
4421 | handle_slab(n, cachep, slabp); | 4421 | handle_slab(n, cachep, slabp); |
4422 | spin_unlock_irq(&l3->list_lock); | 4422 | spin_unlock_irq(&n->list_lock); |
4423 | } | 4423 | } |
4424 | name = cachep->name; | 4424 | name = cachep->name; |
4425 | if (n[0] == n[1]) { | 4425 | if (n[0] == n[1]) { |
@@ -16,7 +16,7 @@ enum slab_state { | |||
16 | DOWN, /* No slab functionality yet */ | 16 | DOWN, /* No slab functionality yet */ |
17 | PARTIAL, /* SLUB: kmem_cache_node available */ | 17 | PARTIAL, /* SLUB: kmem_cache_node available */ |
18 | PARTIAL_ARRAYCACHE, /* SLAB: kmalloc size for arraycache available */ | 18 | PARTIAL_ARRAYCACHE, /* SLAB: kmalloc size for arraycache available */ |
19 | PARTIAL_L3, /* SLAB: kmalloc size for l3 struct available */ | 19 | PARTIAL_NODE, /* SLAB: kmalloc size for node struct available */ |
20 | UP, /* Slab caches usable but not all extras yet */ | 20 | UP, /* Slab caches usable but not all extras yet */ |
21 | FULL /* Everything is working */ | 21 | FULL /* Everything is working */ |
22 | }; | 22 | }; |