aboutsummaryrefslogtreecommitdiffstats
path: root/mm/slab.c
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2013-05-07 11:42:20 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2013-05-07 11:42:20 -0400
commit0f47c9423c0fe468d0b5b153f9b9d6e8e20707eb (patch)
tree9eaec7fb4dc5fbfae07d168d0493a0a0a67c7d47 /mm/slab.c
parentb9e306e07ed58fc354bbd58124b281dd7dc697b7 (diff)
parent69df2ac1288b456a95aceadafbf88cd891a577c8 (diff)
Merge branch 'slab/for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/penberg/linux
Pull slab changes from Pekka Enberg: "The bulk of the changes are more slab unification from Christoph. There's also few fixes from Aaron, Glauber, and Joonsoo thrown into the mix." * 'slab/for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/penberg/linux: (24 commits) mm, slab_common: Fix bootstrap creation of kmalloc caches slab: Return NULL for oversized allocations mm: slab: Verify the nodeid passed to ____cache_alloc_node slub: tid must be retrieved from the percpu area of the current processor slub: Do not dereference NULL pointer in node_match slub: add 'likely' macro to inc_slabs_node() slub: correct to calculate num of acquired objects in get_partial_node() slub: correctly bootstrap boot caches mm/sl[au]b: correct allocation type check in kmalloc_slab() slab: Fixup CONFIG_PAGE_ALLOC/DEBUG_SLAB_LEAK sections slab: Handle ARCH_DMA_MINALIGN correctly slab: Common definition for kmem_cache_node slab: Rename list3/l3 to node slab: Common Kmalloc cache determination stat: Use size_t for sizes instead of unsigned slab: Common function to create the kmalloc array slab: Common definition for the array of kmalloc caches slab: Common constants for kmalloc boundaries slab: Rename nodelists to node slab: Common name for the per node structures ...
Diffstat (limited to 'mm/slab.c')
-rw-r--r--mm/slab.c790
1 files changed, 334 insertions, 456 deletions
diff --git a/mm/slab.c b/mm/slab.c
index 96079244c860..8ccd296c6d9c 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -286,68 +286,27 @@ struct arraycache_init {
286}; 286};
287 287
288/* 288/*
289 * The slab lists for all objects.
290 */
291struct kmem_list3 {
292 struct list_head slabs_partial; /* partial list first, better asm code */
293 struct list_head slabs_full;
294 struct list_head slabs_free;
295 unsigned long free_objects;
296 unsigned int free_limit;
297 unsigned int colour_next; /* Per-node cache coloring */
298 spinlock_t list_lock;
299 struct array_cache *shared; /* shared per node */
300 struct array_cache **alien; /* on other nodes */
301 unsigned long next_reap; /* updated without locking */
302 int free_touched; /* updated without locking */
303};
304
305/*
306 * Need this for bootstrapping a per node allocator. 289 * Need this for bootstrapping a per node allocator.
307 */ 290 */
308#define NUM_INIT_LISTS (3 * MAX_NUMNODES) 291#define NUM_INIT_LISTS (3 * MAX_NUMNODES)
309static struct kmem_list3 __initdata initkmem_list3[NUM_INIT_LISTS]; 292static struct kmem_cache_node __initdata init_kmem_cache_node[NUM_INIT_LISTS];
310#define CACHE_CACHE 0 293#define CACHE_CACHE 0
311#define SIZE_AC MAX_NUMNODES 294#define SIZE_AC MAX_NUMNODES
312#define SIZE_L3 (2 * MAX_NUMNODES) 295#define SIZE_NODE (2 * MAX_NUMNODES)
313 296
314static int drain_freelist(struct kmem_cache *cache, 297static int drain_freelist(struct kmem_cache *cache,
315 struct kmem_list3 *l3, int tofree); 298 struct kmem_cache_node *n, int tofree);
316static void free_block(struct kmem_cache *cachep, void **objpp, int len, 299static void free_block(struct kmem_cache *cachep, void **objpp, int len,
317 int node); 300 int node);
318static int enable_cpucache(struct kmem_cache *cachep, gfp_t gfp); 301static int enable_cpucache(struct kmem_cache *cachep, gfp_t gfp);
319static void cache_reap(struct work_struct *unused); 302static void cache_reap(struct work_struct *unused);
320 303
321/*
322 * This function must be completely optimized away if a constant is passed to
323 * it. Mostly the same as what is in linux/slab.h except it returns an index.
324 */
325static __always_inline int index_of(const size_t size)
326{
327 extern void __bad_size(void);
328
329 if (__builtin_constant_p(size)) {
330 int i = 0;
331
332#define CACHE(x) \
333 if (size <=x) \
334 return i; \
335 else \
336 i++;
337#include <linux/kmalloc_sizes.h>
338#undef CACHE
339 __bad_size();
340 } else
341 __bad_size();
342 return 0;
343}
344
345static int slab_early_init = 1; 304static int slab_early_init = 1;
346 305
347#define INDEX_AC index_of(sizeof(struct arraycache_init)) 306#define INDEX_AC kmalloc_index(sizeof(struct arraycache_init))
348#define INDEX_L3 index_of(sizeof(struct kmem_list3)) 307#define INDEX_NODE kmalloc_index(sizeof(struct kmem_cache_node))
349 308
350static void kmem_list3_init(struct kmem_list3 *parent) 309static void kmem_cache_node_init(struct kmem_cache_node *parent)
351{ 310{
352 INIT_LIST_HEAD(&parent->slabs_full); 311 INIT_LIST_HEAD(&parent->slabs_full);
353 INIT_LIST_HEAD(&parent->slabs_partial); 312 INIT_LIST_HEAD(&parent->slabs_partial);
@@ -363,7 +322,7 @@ static void kmem_list3_init(struct kmem_list3 *parent)
363#define MAKE_LIST(cachep, listp, slab, nodeid) \ 322#define MAKE_LIST(cachep, listp, slab, nodeid) \
364 do { \ 323 do { \
365 INIT_LIST_HEAD(listp); \ 324 INIT_LIST_HEAD(listp); \
366 list_splice(&(cachep->nodelists[nodeid]->slab), listp); \ 325 list_splice(&(cachep->node[nodeid]->slab), listp); \
367 } while (0) 326 } while (0)
368 327
369#define MAKE_ALL_LISTS(cachep, ptr, nodeid) \ 328#define MAKE_ALL_LISTS(cachep, ptr, nodeid) \
@@ -524,30 +483,6 @@ static inline unsigned int obj_to_index(const struct kmem_cache *cache,
524 return reciprocal_divide(offset, cache->reciprocal_buffer_size); 483 return reciprocal_divide(offset, cache->reciprocal_buffer_size);
525} 484}
526 485
527/*
528 * These are the default caches for kmalloc. Custom caches can have other sizes.
529 */
530struct cache_sizes malloc_sizes[] = {
531#define CACHE(x) { .cs_size = (x) },
532#include <linux/kmalloc_sizes.h>
533 CACHE(ULONG_MAX)
534#undef CACHE
535};
536EXPORT_SYMBOL(malloc_sizes);
537
538/* Must match cache_sizes above. Out of line to keep cache footprint low. */
539struct cache_names {
540 char *name;
541 char *name_dma;
542};
543
544static struct cache_names __initdata cache_names[] = {
545#define CACHE(x) { .name = "size-" #x, .name_dma = "size-" #x "(DMA)" },
546#include <linux/kmalloc_sizes.h>
547 {NULL,}
548#undef CACHE
549};
550
551static struct arraycache_init initarray_generic = 486static struct arraycache_init initarray_generic =
552 { {0, BOOT_CPUCACHE_ENTRIES, 1, 0} }; 487 { {0, BOOT_CPUCACHE_ENTRIES, 1, 0} };
553 488
@@ -586,15 +521,15 @@ static void slab_set_lock_classes(struct kmem_cache *cachep,
586 int q) 521 int q)
587{ 522{
588 struct array_cache **alc; 523 struct array_cache **alc;
589 struct kmem_list3 *l3; 524 struct kmem_cache_node *n;
590 int r; 525 int r;
591 526
592 l3 = cachep->nodelists[q]; 527 n = cachep->node[q];
593 if (!l3) 528 if (!n)
594 return; 529 return;
595 530
596 lockdep_set_class(&l3->list_lock, l3_key); 531 lockdep_set_class(&n->list_lock, l3_key);
597 alc = l3->alien; 532 alc = n->alien;
598 /* 533 /*
599 * FIXME: This check for BAD_ALIEN_MAGIC 534 * FIXME: This check for BAD_ALIEN_MAGIC
600 * should go away when common slab code is taught to 535 * should go away when common slab code is taught to
@@ -625,28 +560,30 @@ static void slab_set_debugobj_lock_classes(struct kmem_cache *cachep)
625 560
626static void init_node_lock_keys(int q) 561static void init_node_lock_keys(int q)
627{ 562{
628 struct cache_sizes *s = malloc_sizes; 563 int i;
629 564
630 if (slab_state < UP) 565 if (slab_state < UP)
631 return; 566 return;
632 567
633 for (s = malloc_sizes; s->cs_size != ULONG_MAX; s++) { 568 for (i = 1; i < PAGE_SHIFT + MAX_ORDER; i++) {
634 struct kmem_list3 *l3; 569 struct kmem_cache_node *n;
570 struct kmem_cache *cache = kmalloc_caches[i];
571
572 if (!cache)
573 continue;
635 574
636 l3 = s->cs_cachep->nodelists[q]; 575 n = cache->node[q];
637 if (!l3 || OFF_SLAB(s->cs_cachep)) 576 if (!n || OFF_SLAB(cache))
638 continue; 577 continue;
639 578
640 slab_set_lock_classes(s->cs_cachep, &on_slab_l3_key, 579 slab_set_lock_classes(cache, &on_slab_l3_key,
641 &on_slab_alc_key, q); 580 &on_slab_alc_key, q);
642 } 581 }
643} 582}
644 583
645static void on_slab_lock_classes_node(struct kmem_cache *cachep, int q) 584static void on_slab_lock_classes_node(struct kmem_cache *cachep, int q)
646{ 585{
647 struct kmem_list3 *l3; 586 if (!cachep->node[q])
648 l3 = cachep->nodelists[q];
649 if (!l3)
650 return; 587 return;
651 588
652 slab_set_lock_classes(cachep, &on_slab_l3_key, 589 slab_set_lock_classes(cachep, &on_slab_l3_key,
@@ -702,41 +639,6 @@ static inline struct array_cache *cpu_cache_get(struct kmem_cache *cachep)
702 return cachep->array[smp_processor_id()]; 639 return cachep->array[smp_processor_id()];
703} 640}
704 641
705static inline struct kmem_cache *__find_general_cachep(size_t size,
706 gfp_t gfpflags)
707{
708 struct cache_sizes *csizep = malloc_sizes;
709
710#if DEBUG
711 /* This happens if someone tries to call
712 * kmem_cache_create(), or __kmalloc(), before
713 * the generic caches are initialized.
714 */
715 BUG_ON(malloc_sizes[INDEX_AC].cs_cachep == NULL);
716#endif
717 if (!size)
718 return ZERO_SIZE_PTR;
719
720 while (size > csizep->cs_size)
721 csizep++;
722
723 /*
724 * Really subtle: The last entry with cs->cs_size==ULONG_MAX
725 * has cs_{dma,}cachep==NULL. Thus no special case
726 * for large kmalloc calls required.
727 */
728#ifdef CONFIG_ZONE_DMA
729 if (unlikely(gfpflags & GFP_DMA))
730 return csizep->cs_dmacachep;
731#endif
732 return csizep->cs_cachep;
733}
734
735static struct kmem_cache *kmem_find_general_cachep(size_t size, gfp_t gfpflags)
736{
737 return __find_general_cachep(size, gfpflags);
738}
739
740static size_t slab_mgmt_size(size_t nr_objs, size_t align) 642static size_t slab_mgmt_size(size_t nr_objs, size_t align)
741{ 643{
742 return ALIGN(sizeof(struct slab)+nr_objs*sizeof(kmem_bufctl_t), align); 644 return ALIGN(sizeof(struct slab)+nr_objs*sizeof(kmem_bufctl_t), align);
@@ -938,29 +840,29 @@ static inline bool is_slab_pfmemalloc(struct slab *slabp)
938static void recheck_pfmemalloc_active(struct kmem_cache *cachep, 840static void recheck_pfmemalloc_active(struct kmem_cache *cachep,
939 struct array_cache *ac) 841 struct array_cache *ac)
940{ 842{
941 struct kmem_list3 *l3 = cachep->nodelists[numa_mem_id()]; 843 struct kmem_cache_node *n = cachep->node[numa_mem_id()];
942 struct slab *slabp; 844 struct slab *slabp;
943 unsigned long flags; 845 unsigned long flags;
944 846
945 if (!pfmemalloc_active) 847 if (!pfmemalloc_active)
946 return; 848 return;
947 849
948 spin_lock_irqsave(&l3->list_lock, flags); 850 spin_lock_irqsave(&n->list_lock, flags);
949 list_for_each_entry(slabp, &l3->slabs_full, list) 851 list_for_each_entry(slabp, &n->slabs_full, list)
950 if (is_slab_pfmemalloc(slabp)) 852 if (is_slab_pfmemalloc(slabp))
951 goto out; 853 goto out;
952 854
953 list_for_each_entry(slabp, &l3->slabs_partial, list) 855 list_for_each_entry(slabp, &n->slabs_partial, list)
954 if (is_slab_pfmemalloc(slabp)) 856 if (is_slab_pfmemalloc(slabp))
955 goto out; 857 goto out;
956 858
957 list_for_each_entry(slabp, &l3->slabs_free, list) 859 list_for_each_entry(slabp, &n->slabs_free, list)
958 if (is_slab_pfmemalloc(slabp)) 860 if (is_slab_pfmemalloc(slabp))
959 goto out; 861 goto out;
960 862
961 pfmemalloc_active = false; 863 pfmemalloc_active = false;
962out: 864out:
963 spin_unlock_irqrestore(&l3->list_lock, flags); 865 spin_unlock_irqrestore(&n->list_lock, flags);
964} 866}
965 867
966static void *__ac_get_obj(struct kmem_cache *cachep, struct array_cache *ac, 868static void *__ac_get_obj(struct kmem_cache *cachep, struct array_cache *ac,
@@ -971,7 +873,7 @@ static void *__ac_get_obj(struct kmem_cache *cachep, struct array_cache *ac,
971 873
972 /* Ensure the caller is allowed to use objects from PFMEMALLOC slab */ 874 /* Ensure the caller is allowed to use objects from PFMEMALLOC slab */
973 if (unlikely(is_obj_pfmemalloc(objp))) { 875 if (unlikely(is_obj_pfmemalloc(objp))) {
974 struct kmem_list3 *l3; 876 struct kmem_cache_node *n;
975 877
976 if (gfp_pfmemalloc_allowed(flags)) { 878 if (gfp_pfmemalloc_allowed(flags)) {
977 clear_obj_pfmemalloc(&objp); 879 clear_obj_pfmemalloc(&objp);
@@ -993,8 +895,8 @@ static void *__ac_get_obj(struct kmem_cache *cachep, struct array_cache *ac,
993 * If there are empty slabs on the slabs_free list and we are 895 * If there are empty slabs on the slabs_free list and we are
994 * being forced to refill the cache, mark this one !pfmemalloc. 896 * being forced to refill the cache, mark this one !pfmemalloc.
995 */ 897 */
996 l3 = cachep->nodelists[numa_mem_id()]; 898 n = cachep->node[numa_mem_id()];
997 if (!list_empty(&l3->slabs_free) && force_refill) { 899 if (!list_empty(&n->slabs_free) && force_refill) {
998 struct slab *slabp = virt_to_slab(objp); 900 struct slab *slabp = virt_to_slab(objp);
999 ClearPageSlabPfmemalloc(virt_to_head_page(slabp->s_mem)); 901 ClearPageSlabPfmemalloc(virt_to_head_page(slabp->s_mem));
1000 clear_obj_pfmemalloc(&objp); 902 clear_obj_pfmemalloc(&objp);
@@ -1071,7 +973,7 @@ static int transfer_objects(struct array_cache *to,
1071#ifndef CONFIG_NUMA 973#ifndef CONFIG_NUMA
1072 974
1073#define drain_alien_cache(cachep, alien) do { } while (0) 975#define drain_alien_cache(cachep, alien) do { } while (0)
1074#define reap_alien(cachep, l3) do { } while (0) 976#define reap_alien(cachep, n) do { } while (0)
1075 977
1076static inline struct array_cache **alloc_alien_cache(int node, int limit, gfp_t gfp) 978static inline struct array_cache **alloc_alien_cache(int node, int limit, gfp_t gfp)
1077{ 979{
@@ -1143,33 +1045,33 @@ static void free_alien_cache(struct array_cache **ac_ptr)
1143static void __drain_alien_cache(struct kmem_cache *cachep, 1045static void __drain_alien_cache(struct kmem_cache *cachep,
1144 struct array_cache *ac, int node) 1046 struct array_cache *ac, int node)
1145{ 1047{
1146 struct kmem_list3 *rl3 = cachep->nodelists[node]; 1048 struct kmem_cache_node *n = cachep->node[node];
1147 1049
1148 if (ac->avail) { 1050 if (ac->avail) {
1149 spin_lock(&rl3->list_lock); 1051 spin_lock(&n->list_lock);
1150 /* 1052 /*
1151 * Stuff objects into the remote nodes shared array first. 1053 * Stuff objects into the remote nodes shared array first.
1152 * That way we could avoid the overhead of putting the objects 1054 * That way we could avoid the overhead of putting the objects
1153 * into the free lists and getting them back later. 1055 * into the free lists and getting them back later.
1154 */ 1056 */
1155 if (rl3->shared) 1057 if (n->shared)
1156 transfer_objects(rl3->shared, ac, ac->limit); 1058 transfer_objects(n->shared, ac, ac->limit);
1157 1059
1158 free_block(cachep, ac->entry, ac->avail, node); 1060 free_block(cachep, ac->entry, ac->avail, node);
1159 ac->avail = 0; 1061 ac->avail = 0;
1160 spin_unlock(&rl3->list_lock); 1062 spin_unlock(&n->list_lock);
1161 } 1063 }
1162} 1064}
1163 1065
1164/* 1066/*
1165 * Called from cache_reap() to regularly drain alien caches round robin. 1067 * Called from cache_reap() to regularly drain alien caches round robin.
1166 */ 1068 */
1167static void reap_alien(struct kmem_cache *cachep, struct kmem_list3 *l3) 1069static void reap_alien(struct kmem_cache *cachep, struct kmem_cache_node *n)
1168{ 1070{
1169 int node = __this_cpu_read(slab_reap_node); 1071 int node = __this_cpu_read(slab_reap_node);
1170 1072
1171 if (l3->alien) { 1073 if (n->alien) {
1172 struct array_cache *ac = l3->alien[node]; 1074 struct array_cache *ac = n->alien[node];
1173 1075
1174 if (ac && ac->avail && spin_trylock_irq(&ac->lock)) { 1076 if (ac && ac->avail && spin_trylock_irq(&ac->lock)) {
1175 __drain_alien_cache(cachep, ac, node); 1077 __drain_alien_cache(cachep, ac, node);
@@ -1199,7 +1101,7 @@ static inline int cache_free_alien(struct kmem_cache *cachep, void *objp)
1199{ 1101{
1200 struct slab *slabp = virt_to_slab(objp); 1102 struct slab *slabp = virt_to_slab(objp);
1201 int nodeid = slabp->nodeid; 1103 int nodeid = slabp->nodeid;
1202 struct kmem_list3 *l3; 1104 struct kmem_cache_node *n;
1203 struct array_cache *alien = NULL; 1105 struct array_cache *alien = NULL;
1204 int node; 1106 int node;
1205 1107
@@ -1212,10 +1114,10 @@ static inline int cache_free_alien(struct kmem_cache *cachep, void *objp)
1212 if (likely(slabp->nodeid == node)) 1114 if (likely(slabp->nodeid == node))
1213 return 0; 1115 return 0;
1214 1116
1215 l3 = cachep->nodelists[node]; 1117 n = cachep->node[node];
1216 STATS_INC_NODEFREES(cachep); 1118 STATS_INC_NODEFREES(cachep);
1217 if (l3->alien && l3->alien[nodeid]) { 1119 if (n->alien && n->alien[nodeid]) {
1218 alien = l3->alien[nodeid]; 1120 alien = n->alien[nodeid];
1219 spin_lock(&alien->lock); 1121 spin_lock(&alien->lock);
1220 if (unlikely(alien->avail == alien->limit)) { 1122 if (unlikely(alien->avail == alien->limit)) {
1221 STATS_INC_ACOVERFLOW(cachep); 1123 STATS_INC_ACOVERFLOW(cachep);
@@ -1224,28 +1126,28 @@ static inline int cache_free_alien(struct kmem_cache *cachep, void *objp)
1224 ac_put_obj(cachep, alien, objp); 1126 ac_put_obj(cachep, alien, objp);
1225 spin_unlock(&alien->lock); 1127 spin_unlock(&alien->lock);
1226 } else { 1128 } else {
1227 spin_lock(&(cachep->nodelists[nodeid])->list_lock); 1129 spin_lock(&(cachep->node[nodeid])->list_lock);
1228 free_block(cachep, &objp, 1, nodeid); 1130 free_block(cachep, &objp, 1, nodeid);
1229 spin_unlock(&(cachep->nodelists[nodeid])->list_lock); 1131 spin_unlock(&(cachep->node[nodeid])->list_lock);
1230 } 1132 }
1231 return 1; 1133 return 1;
1232} 1134}
1233#endif 1135#endif
1234 1136
1235/* 1137/*
1236 * Allocates and initializes nodelists for a node on each slab cache, used for 1138 * Allocates and initializes node for a node on each slab cache, used for
1237 * either memory or cpu hotplug. If memory is being hot-added, the kmem_list3 1139 * either memory or cpu hotplug. If memory is being hot-added, the kmem_cache_node
1238 * will be allocated off-node since memory is not yet online for the new node. 1140 * will be allocated off-node since memory is not yet online for the new node.
1239 * When hotplugging memory or a cpu, existing nodelists are not replaced if 1141 * When hotplugging memory or a cpu, existing node are not replaced if
1240 * already in use. 1142 * already in use.
1241 * 1143 *
1242 * Must hold slab_mutex. 1144 * Must hold slab_mutex.
1243 */ 1145 */
1244static int init_cache_nodelists_node(int node) 1146static int init_cache_node_node(int node)
1245{ 1147{
1246 struct kmem_cache *cachep; 1148 struct kmem_cache *cachep;
1247 struct kmem_list3 *l3; 1149 struct kmem_cache_node *n;
1248 const int memsize = sizeof(struct kmem_list3); 1150 const int memsize = sizeof(struct kmem_cache_node);
1249 1151
1250 list_for_each_entry(cachep, &slab_caches, list) { 1152 list_for_each_entry(cachep, &slab_caches, list) {
1251 /* 1153 /*
@@ -1253,12 +1155,12 @@ static int init_cache_nodelists_node(int node)
1253 * begin anything. Make sure some other cpu on this 1155 * begin anything. Make sure some other cpu on this
1254 * node has not already allocated this 1156 * node has not already allocated this
1255 */ 1157 */
1256 if (!cachep->nodelists[node]) { 1158 if (!cachep->node[node]) {
1257 l3 = kmalloc_node(memsize, GFP_KERNEL, node); 1159 n = kmalloc_node(memsize, GFP_KERNEL, node);
1258 if (!l3) 1160 if (!n)
1259 return -ENOMEM; 1161 return -ENOMEM;
1260 kmem_list3_init(l3); 1162 kmem_cache_node_init(n);
1261 l3->next_reap = jiffies + REAPTIMEOUT_LIST3 + 1163 n->next_reap = jiffies + REAPTIMEOUT_LIST3 +
1262 ((unsigned long)cachep) % REAPTIMEOUT_LIST3; 1164 ((unsigned long)cachep) % REAPTIMEOUT_LIST3;
1263 1165
1264 /* 1166 /*
@@ -1266,14 +1168,14 @@ static int init_cache_nodelists_node(int node)
1266 * go. slab_mutex is sufficient 1168 * go. slab_mutex is sufficient
1267 * protection here. 1169 * protection here.
1268 */ 1170 */
1269 cachep->nodelists[node] = l3; 1171 cachep->node[node] = n;
1270 } 1172 }
1271 1173
1272 spin_lock_irq(&cachep->nodelists[node]->list_lock); 1174 spin_lock_irq(&cachep->node[node]->list_lock);
1273 cachep->nodelists[node]->free_limit = 1175 cachep->node[node]->free_limit =
1274 (1 + nr_cpus_node(node)) * 1176 (1 + nr_cpus_node(node)) *
1275 cachep->batchcount + cachep->num; 1177 cachep->batchcount + cachep->num;
1276 spin_unlock_irq(&cachep->nodelists[node]->list_lock); 1178 spin_unlock_irq(&cachep->node[node]->list_lock);
1277 } 1179 }
1278 return 0; 1180 return 0;
1279} 1181}
@@ -1281,7 +1183,7 @@ static int init_cache_nodelists_node(int node)
1281static void __cpuinit cpuup_canceled(long cpu) 1183static void __cpuinit cpuup_canceled(long cpu)
1282{ 1184{
1283 struct kmem_cache *cachep; 1185 struct kmem_cache *cachep;
1284 struct kmem_list3 *l3 = NULL; 1186 struct kmem_cache_node *n = NULL;
1285 int node = cpu_to_mem(cpu); 1187 int node = cpu_to_mem(cpu);
1286 const struct cpumask *mask = cpumask_of_node(node); 1188 const struct cpumask *mask = cpumask_of_node(node);
1287 1189
@@ -1293,34 +1195,34 @@ static void __cpuinit cpuup_canceled(long cpu)
1293 /* cpu is dead; no one can alloc from it. */ 1195 /* cpu is dead; no one can alloc from it. */
1294 nc = cachep->array[cpu]; 1196 nc = cachep->array[cpu];
1295 cachep->array[cpu] = NULL; 1197 cachep->array[cpu] = NULL;
1296 l3 = cachep->nodelists[node]; 1198 n = cachep->node[node];
1297 1199
1298 if (!l3) 1200 if (!n)
1299 goto free_array_cache; 1201 goto free_array_cache;
1300 1202
1301 spin_lock_irq(&l3->list_lock); 1203 spin_lock_irq(&n->list_lock);
1302 1204
1303 /* Free limit for this kmem_list3 */ 1205 /* Free limit for this kmem_cache_node */
1304 l3->free_limit -= cachep->batchcount; 1206 n->free_limit -= cachep->batchcount;
1305 if (nc) 1207 if (nc)
1306 free_block(cachep, nc->entry, nc->avail, node); 1208 free_block(cachep, nc->entry, nc->avail, node);
1307 1209
1308 if (!cpumask_empty(mask)) { 1210 if (!cpumask_empty(mask)) {
1309 spin_unlock_irq(&l3->list_lock); 1211 spin_unlock_irq(&n->list_lock);
1310 goto free_array_cache; 1212 goto free_array_cache;
1311 } 1213 }
1312 1214
1313 shared = l3->shared; 1215 shared = n->shared;
1314 if (shared) { 1216 if (shared) {
1315 free_block(cachep, shared->entry, 1217 free_block(cachep, shared->entry,
1316 shared->avail, node); 1218 shared->avail, node);
1317 l3->shared = NULL; 1219 n->shared = NULL;
1318 } 1220 }
1319 1221
1320 alien = l3->alien; 1222 alien = n->alien;
1321 l3->alien = NULL; 1223 n->alien = NULL;
1322 1224
1323 spin_unlock_irq(&l3->list_lock); 1225 spin_unlock_irq(&n->list_lock);
1324 1226
1325 kfree(shared); 1227 kfree(shared);
1326 if (alien) { 1228 if (alien) {
@@ -1336,17 +1238,17 @@ free_array_cache:
1336 * shrink each nodelist to its limit. 1238 * shrink each nodelist to its limit.
1337 */ 1239 */
1338 list_for_each_entry(cachep, &slab_caches, list) { 1240 list_for_each_entry(cachep, &slab_caches, list) {
1339 l3 = cachep->nodelists[node]; 1241 n = cachep->node[node];
1340 if (!l3) 1242 if (!n)
1341 continue; 1243 continue;
1342 drain_freelist(cachep, l3, l3->free_objects); 1244 drain_freelist(cachep, n, n->free_objects);
1343 } 1245 }
1344} 1246}
1345 1247
1346static int __cpuinit cpuup_prepare(long cpu) 1248static int __cpuinit cpuup_prepare(long cpu)
1347{ 1249{
1348 struct kmem_cache *cachep; 1250 struct kmem_cache *cachep;
1349 struct kmem_list3 *l3 = NULL; 1251 struct kmem_cache_node *n = NULL;
1350 int node = cpu_to_mem(cpu); 1252 int node = cpu_to_mem(cpu);
1351 int err; 1253 int err;
1352 1254
@@ -1354,9 +1256,9 @@ static int __cpuinit cpuup_prepare(long cpu)
1354 * We need to do this right in the beginning since 1256 * We need to do this right in the beginning since
1355 * alloc_arraycache's are going to use this list. 1257 * alloc_arraycache's are going to use this list.
1356 * kmalloc_node allows us to add the slab to the right 1258 * kmalloc_node allows us to add the slab to the right
1357 * kmem_list3 and not this cpu's kmem_list3 1259 * kmem_cache_node and not this cpu's kmem_cache_node
1358 */ 1260 */
1359 err = init_cache_nodelists_node(node); 1261 err = init_cache_node_node(node);
1360 if (err < 0) 1262 if (err < 0)
1361 goto bad; 1263 goto bad;
1362 1264
@@ -1391,25 +1293,25 @@ static int __cpuinit cpuup_prepare(long cpu)
1391 } 1293 }
1392 } 1294 }
1393 cachep->array[cpu] = nc; 1295 cachep->array[cpu] = nc;
1394 l3 = cachep->nodelists[node]; 1296 n = cachep->node[node];
1395 BUG_ON(!l3); 1297 BUG_ON(!n);
1396 1298
1397 spin_lock_irq(&l3->list_lock); 1299 spin_lock_irq(&n->list_lock);
1398 if (!l3->shared) { 1300 if (!n->shared) {
1399 /* 1301 /*
1400 * We are serialised from CPU_DEAD or 1302 * We are serialised from CPU_DEAD or
1401 * CPU_UP_CANCELLED by the cpucontrol lock 1303 * CPU_UP_CANCELLED by the cpucontrol lock
1402 */ 1304 */
1403 l3->shared = shared; 1305 n->shared = shared;
1404 shared = NULL; 1306 shared = NULL;
1405 } 1307 }
1406#ifdef CONFIG_NUMA 1308#ifdef CONFIG_NUMA
1407 if (!l3->alien) { 1309 if (!n->alien) {
1408 l3->alien = alien; 1310 n->alien = alien;
1409 alien = NULL; 1311 alien = NULL;
1410 } 1312 }
1411#endif 1313#endif
1412 spin_unlock_irq(&l3->list_lock); 1314 spin_unlock_irq(&n->list_lock);
1413 kfree(shared); 1315 kfree(shared);
1414 free_alien_cache(alien); 1316 free_alien_cache(alien);
1415 if (cachep->flags & SLAB_DEBUG_OBJECTS) 1317 if (cachep->flags & SLAB_DEBUG_OBJECTS)
@@ -1464,9 +1366,9 @@ static int __cpuinit cpuup_callback(struct notifier_block *nfb,
1464 case CPU_DEAD_FROZEN: 1366 case CPU_DEAD_FROZEN:
1465 /* 1367 /*
1466 * Even if all the cpus of a node are down, we don't free the 1368 * Even if all the cpus of a node are down, we don't free the
1467 * kmem_list3 of any cache. This to avoid a race between 1369 * kmem_cache_node of any cache. This to avoid a race between
1468 * cpu_down, and a kmalloc allocation from another cpu for 1370 * cpu_down, and a kmalloc allocation from another cpu for
1469 * memory from the node of the cpu going down. The list3 1371 * memory from the node of the cpu going down. The node
1470 * structure is usually allocated from kmem_cache_create() and 1372 * structure is usually allocated from kmem_cache_create() and
1471 * gets destroyed at kmem_cache_destroy(). 1373 * gets destroyed at kmem_cache_destroy().
1472 */ 1374 */
@@ -1494,22 +1396,22 @@ static struct notifier_block __cpuinitdata cpucache_notifier = {
1494 * 1396 *
1495 * Must hold slab_mutex. 1397 * Must hold slab_mutex.
1496 */ 1398 */
1497static int __meminit drain_cache_nodelists_node(int node) 1399static int __meminit drain_cache_node_node(int node)
1498{ 1400{
1499 struct kmem_cache *cachep; 1401 struct kmem_cache *cachep;
1500 int ret = 0; 1402 int ret = 0;
1501 1403
1502 list_for_each_entry(cachep, &slab_caches, list) { 1404 list_for_each_entry(cachep, &slab_caches, list) {
1503 struct kmem_list3 *l3; 1405 struct kmem_cache_node *n;
1504 1406
1505 l3 = cachep->nodelists[node]; 1407 n = cachep->node[node];
1506 if (!l3) 1408 if (!n)
1507 continue; 1409 continue;
1508 1410
1509 drain_freelist(cachep, l3, l3->free_objects); 1411 drain_freelist(cachep, n, n->free_objects);
1510 1412
1511 if (!list_empty(&l3->slabs_full) || 1413 if (!list_empty(&n->slabs_full) ||
1512 !list_empty(&l3->slabs_partial)) { 1414 !list_empty(&n->slabs_partial)) {
1513 ret = -EBUSY; 1415 ret = -EBUSY;
1514 break; 1416 break;
1515 } 1417 }
@@ -1531,12 +1433,12 @@ static int __meminit slab_memory_callback(struct notifier_block *self,
1531 switch (action) { 1433 switch (action) {
1532 case MEM_GOING_ONLINE: 1434 case MEM_GOING_ONLINE:
1533 mutex_lock(&slab_mutex); 1435 mutex_lock(&slab_mutex);
1534 ret = init_cache_nodelists_node(nid); 1436 ret = init_cache_node_node(nid);
1535 mutex_unlock(&slab_mutex); 1437 mutex_unlock(&slab_mutex);
1536 break; 1438 break;
1537 case MEM_GOING_OFFLINE: 1439 case MEM_GOING_OFFLINE:
1538 mutex_lock(&slab_mutex); 1440 mutex_lock(&slab_mutex);
1539 ret = drain_cache_nodelists_node(nid); 1441 ret = drain_cache_node_node(nid);
1540 mutex_unlock(&slab_mutex); 1442 mutex_unlock(&slab_mutex);
1541 break; 1443 break;
1542 case MEM_ONLINE: 1444 case MEM_ONLINE:
@@ -1551,37 +1453,37 @@ out:
1551#endif /* CONFIG_NUMA && CONFIG_MEMORY_HOTPLUG */ 1453#endif /* CONFIG_NUMA && CONFIG_MEMORY_HOTPLUG */
1552 1454
1553/* 1455/*
1554 * swap the static kmem_list3 with kmalloced memory 1456 * swap the static kmem_cache_node with kmalloced memory
1555 */ 1457 */
1556static void __init init_list(struct kmem_cache *cachep, struct kmem_list3 *list, 1458static void __init init_list(struct kmem_cache *cachep, struct kmem_cache_node *list,
1557 int nodeid) 1459 int nodeid)
1558{ 1460{
1559 struct kmem_list3 *ptr; 1461 struct kmem_cache_node *ptr;
1560 1462
1561 ptr = kmalloc_node(sizeof(struct kmem_list3), GFP_NOWAIT, nodeid); 1463 ptr = kmalloc_node(sizeof(struct kmem_cache_node), GFP_NOWAIT, nodeid);
1562 BUG_ON(!ptr); 1464 BUG_ON(!ptr);
1563 1465
1564 memcpy(ptr, list, sizeof(struct kmem_list3)); 1466 memcpy(ptr, list, sizeof(struct kmem_cache_node));
1565 /* 1467 /*
1566 * Do not assume that spinlocks can be initialized via memcpy: 1468 * Do not assume that spinlocks can be initialized via memcpy:
1567 */ 1469 */
1568 spin_lock_init(&ptr->list_lock); 1470 spin_lock_init(&ptr->list_lock);
1569 1471
1570 MAKE_ALL_LISTS(cachep, ptr, nodeid); 1472 MAKE_ALL_LISTS(cachep, ptr, nodeid);
1571 cachep->nodelists[nodeid] = ptr; 1473 cachep->node[nodeid] = ptr;
1572} 1474}
1573 1475
1574/* 1476/*
1575 * For setting up all the kmem_list3s for cache whose buffer_size is same as 1477 * For setting up all the kmem_cache_node for cache whose buffer_size is same as
1576 * size of kmem_list3. 1478 * size of kmem_cache_node.
1577 */ 1479 */
1578static void __init set_up_list3s(struct kmem_cache *cachep, int index) 1480static void __init set_up_node(struct kmem_cache *cachep, int index)
1579{ 1481{
1580 int node; 1482 int node;
1581 1483
1582 for_each_online_node(node) { 1484 for_each_online_node(node) {
1583 cachep->nodelists[node] = &initkmem_list3[index + node]; 1485 cachep->node[node] = &init_kmem_cache_node[index + node];
1584 cachep->nodelists[node]->next_reap = jiffies + 1486 cachep->node[node]->next_reap = jiffies +
1585 REAPTIMEOUT_LIST3 + 1487 REAPTIMEOUT_LIST3 +
1586 ((unsigned long)cachep) % REAPTIMEOUT_LIST3; 1488 ((unsigned long)cachep) % REAPTIMEOUT_LIST3;
1587 } 1489 }
@@ -1589,11 +1491,11 @@ static void __init set_up_list3s(struct kmem_cache *cachep, int index)
1589 1491
1590/* 1492/*
1591 * The memory after the last cpu cache pointer is used for the 1493 * The memory after the last cpu cache pointer is used for the
1592 * the nodelists pointer. 1494 * the node pointer.
1593 */ 1495 */
1594static void setup_nodelists_pointer(struct kmem_cache *cachep) 1496static void setup_node_pointer(struct kmem_cache *cachep)
1595{ 1497{
1596 cachep->nodelists = (struct kmem_list3 **)&cachep->array[nr_cpu_ids]; 1498 cachep->node = (struct kmem_cache_node **)&cachep->array[nr_cpu_ids];
1597} 1499}
1598 1500
1599/* 1501/*
@@ -1602,20 +1504,18 @@ static void setup_nodelists_pointer(struct kmem_cache *cachep)
1602 */ 1504 */
1603void __init kmem_cache_init(void) 1505void __init kmem_cache_init(void)
1604{ 1506{
1605 struct cache_sizes *sizes;
1606 struct cache_names *names;
1607 int i; 1507 int i;
1608 1508
1609 kmem_cache = &kmem_cache_boot; 1509 kmem_cache = &kmem_cache_boot;
1610 setup_nodelists_pointer(kmem_cache); 1510 setup_node_pointer(kmem_cache);
1611 1511
1612 if (num_possible_nodes() == 1) 1512 if (num_possible_nodes() == 1)
1613 use_alien_caches = 0; 1513 use_alien_caches = 0;
1614 1514
1615 for (i = 0; i < NUM_INIT_LISTS; i++) 1515 for (i = 0; i < NUM_INIT_LISTS; i++)
1616 kmem_list3_init(&initkmem_list3[i]); 1516 kmem_cache_node_init(&init_kmem_cache_node[i]);
1617 1517
1618 set_up_list3s(kmem_cache, CACHE_CACHE); 1518 set_up_node(kmem_cache, CACHE_CACHE);
1619 1519
1620 /* 1520 /*
1621 * Fragmentation resistance on low memory - only use bigger 1521 * Fragmentation resistance on low memory - only use bigger
@@ -1631,7 +1531,7 @@ void __init kmem_cache_init(void)
1631 * kmem_cache structures of all caches, except kmem_cache itself: 1531 * kmem_cache structures of all caches, except kmem_cache itself:
1632 * kmem_cache is statically allocated. 1532 * kmem_cache is statically allocated.
1633 * Initially an __init data area is used for the head array and the 1533 * Initially an __init data area is used for the head array and the
1634 * kmem_list3 structures, it's replaced with a kmalloc allocated 1534 * kmem_cache_node structures, it's replaced with a kmalloc allocated
1635 * array at the end of the bootstrap. 1535 * array at the end of the bootstrap.
1636 * 2) Create the first kmalloc cache. 1536 * 2) Create the first kmalloc cache.
1637 * The struct kmem_cache for the new cache is allocated normally. 1537 * The struct kmem_cache for the new cache is allocated normally.
@@ -1640,7 +1540,7 @@ void __init kmem_cache_init(void)
1640 * head arrays. 1540 * head arrays.
1641 * 4) Replace the __init data head arrays for kmem_cache and the first 1541 * 4) Replace the __init data head arrays for kmem_cache and the first
1642 * kmalloc cache with kmalloc allocated arrays. 1542 * kmalloc cache with kmalloc allocated arrays.
1643 * 5) Replace the __init data for kmem_list3 for kmem_cache and 1543 * 5) Replace the __init data for kmem_cache_node for kmem_cache and
1644 * the other cache's with kmalloc allocated memory. 1544 * the other cache's with kmalloc allocated memory.
1645 * 6) Resize the head arrays of the kmalloc caches to their final sizes. 1545 * 6) Resize the head arrays of the kmalloc caches to their final sizes.
1646 */ 1546 */
@@ -1652,50 +1552,28 @@ void __init kmem_cache_init(void)
1652 */ 1552 */
1653 create_boot_cache(kmem_cache, "kmem_cache", 1553 create_boot_cache(kmem_cache, "kmem_cache",
1654 offsetof(struct kmem_cache, array[nr_cpu_ids]) + 1554 offsetof(struct kmem_cache, array[nr_cpu_ids]) +
1655 nr_node_ids * sizeof(struct kmem_list3 *), 1555 nr_node_ids * sizeof(struct kmem_cache_node *),
1656 SLAB_HWCACHE_ALIGN); 1556 SLAB_HWCACHE_ALIGN);
1657 list_add(&kmem_cache->list, &slab_caches); 1557 list_add(&kmem_cache->list, &slab_caches);
1658 1558
1659 /* 2+3) create the kmalloc caches */ 1559 /* 2+3) create the kmalloc caches */
1660 sizes = malloc_sizes;
1661 names = cache_names;
1662 1560
1663 /* 1561 /*
1664 * Initialize the caches that provide memory for the array cache and the 1562 * Initialize the caches that provide memory for the array cache and the
1665 * kmem_list3 structures first. Without this, further allocations will 1563 * kmem_cache_node structures first. Without this, further allocations will
1666 * bug. 1564 * bug.
1667 */ 1565 */
1668 1566
1669 sizes[INDEX_AC].cs_cachep = create_kmalloc_cache(names[INDEX_AC].name, 1567 kmalloc_caches[INDEX_AC] = create_kmalloc_cache("kmalloc-ac",
1670 sizes[INDEX_AC].cs_size, ARCH_KMALLOC_FLAGS); 1568 kmalloc_size(INDEX_AC), ARCH_KMALLOC_FLAGS);
1671 1569
1672 if (INDEX_AC != INDEX_L3) 1570 if (INDEX_AC != INDEX_NODE)
1673 sizes[INDEX_L3].cs_cachep = 1571 kmalloc_caches[INDEX_NODE] =
1674 create_kmalloc_cache(names[INDEX_L3].name, 1572 create_kmalloc_cache("kmalloc-node",
1675 sizes[INDEX_L3].cs_size, ARCH_KMALLOC_FLAGS); 1573 kmalloc_size(INDEX_NODE), ARCH_KMALLOC_FLAGS);
1676 1574
1677 slab_early_init = 0; 1575 slab_early_init = 0;
1678 1576
1679 while (sizes->cs_size != ULONG_MAX) {
1680 /*
1681 * For performance, all the general caches are L1 aligned.
1682 * This should be particularly beneficial on SMP boxes, as it
1683 * eliminates "false sharing".
1684 * Note for systems short on memory removing the alignment will
1685 * allow tighter packing of the smaller caches.
1686 */
1687 if (!sizes->cs_cachep)
1688 sizes->cs_cachep = create_kmalloc_cache(names->name,
1689 sizes->cs_size, ARCH_KMALLOC_FLAGS);
1690
1691#ifdef CONFIG_ZONE_DMA
1692 sizes->cs_dmacachep = create_kmalloc_cache(
1693 names->name_dma, sizes->cs_size,
1694 SLAB_CACHE_DMA|ARCH_KMALLOC_FLAGS);
1695#endif
1696 sizes++;
1697 names++;
1698 }
1699 /* 4) Replace the bootstrap head arrays */ 1577 /* 4) Replace the bootstrap head arrays */
1700 { 1578 {
1701 struct array_cache *ptr; 1579 struct array_cache *ptr;
@@ -1713,36 +1591,35 @@ void __init kmem_cache_init(void)
1713 1591
1714 ptr = kmalloc(sizeof(struct arraycache_init), GFP_NOWAIT); 1592 ptr = kmalloc(sizeof(struct arraycache_init), GFP_NOWAIT);
1715 1593
1716 BUG_ON(cpu_cache_get(malloc_sizes[INDEX_AC].cs_cachep) 1594 BUG_ON(cpu_cache_get(kmalloc_caches[INDEX_AC])
1717 != &initarray_generic.cache); 1595 != &initarray_generic.cache);
1718 memcpy(ptr, cpu_cache_get(malloc_sizes[INDEX_AC].cs_cachep), 1596 memcpy(ptr, cpu_cache_get(kmalloc_caches[INDEX_AC]),
1719 sizeof(struct arraycache_init)); 1597 sizeof(struct arraycache_init));
1720 /* 1598 /*
1721 * Do not assume that spinlocks can be initialized via memcpy: 1599 * Do not assume that spinlocks can be initialized via memcpy:
1722 */ 1600 */
1723 spin_lock_init(&ptr->lock); 1601 spin_lock_init(&ptr->lock);
1724 1602
1725 malloc_sizes[INDEX_AC].cs_cachep->array[smp_processor_id()] = 1603 kmalloc_caches[INDEX_AC]->array[smp_processor_id()] = ptr;
1726 ptr;
1727 } 1604 }
1728 /* 5) Replace the bootstrap kmem_list3's */ 1605 /* 5) Replace the bootstrap kmem_cache_node */
1729 { 1606 {
1730 int nid; 1607 int nid;
1731 1608
1732 for_each_online_node(nid) { 1609 for_each_online_node(nid) {
1733 init_list(kmem_cache, &initkmem_list3[CACHE_CACHE + nid], nid); 1610 init_list(kmem_cache, &init_kmem_cache_node[CACHE_CACHE + nid], nid);
1734 1611
1735 init_list(malloc_sizes[INDEX_AC].cs_cachep, 1612 init_list(kmalloc_caches[INDEX_AC],
1736 &initkmem_list3[SIZE_AC + nid], nid); 1613 &init_kmem_cache_node[SIZE_AC + nid], nid);
1737 1614
1738 if (INDEX_AC != INDEX_L3) { 1615 if (INDEX_AC != INDEX_NODE) {
1739 init_list(malloc_sizes[INDEX_L3].cs_cachep, 1616 init_list(kmalloc_caches[INDEX_NODE],
1740 &initkmem_list3[SIZE_L3 + nid], nid); 1617 &init_kmem_cache_node[SIZE_NODE + nid], nid);
1741 } 1618 }
1742 } 1619 }
1743 } 1620 }
1744 1621
1745 slab_state = UP; 1622 create_kmalloc_caches(ARCH_KMALLOC_FLAGS);
1746} 1623}
1747 1624
1748void __init kmem_cache_init_late(void) 1625void __init kmem_cache_init_late(void)
@@ -1773,7 +1650,7 @@ void __init kmem_cache_init_late(void)
1773#ifdef CONFIG_NUMA 1650#ifdef CONFIG_NUMA
1774 /* 1651 /*
1775 * Register a memory hotplug callback that initializes and frees 1652 * Register a memory hotplug callback that initializes and frees
1776 * nodelists. 1653 * node.
1777 */ 1654 */
1778 hotplug_memory_notifier(slab_memory_callback, SLAB_CALLBACK_PRI); 1655 hotplug_memory_notifier(slab_memory_callback, SLAB_CALLBACK_PRI);
1779#endif 1656#endif
@@ -1803,7 +1680,7 @@ __initcall(cpucache_init);
1803static noinline void 1680static noinline void
1804slab_out_of_memory(struct kmem_cache *cachep, gfp_t gfpflags, int nodeid) 1681slab_out_of_memory(struct kmem_cache *cachep, gfp_t gfpflags, int nodeid)
1805{ 1682{
1806 struct kmem_list3 *l3; 1683 struct kmem_cache_node *n;
1807 struct slab *slabp; 1684 struct slab *slabp;
1808 unsigned long flags; 1685 unsigned long flags;
1809 int node; 1686 int node;
@@ -1818,24 +1695,24 @@ slab_out_of_memory(struct kmem_cache *cachep, gfp_t gfpflags, int nodeid)
1818 unsigned long active_objs = 0, num_objs = 0, free_objects = 0; 1695 unsigned long active_objs = 0, num_objs = 0, free_objects = 0;
1819 unsigned long active_slabs = 0, num_slabs = 0; 1696 unsigned long active_slabs = 0, num_slabs = 0;
1820 1697
1821 l3 = cachep->nodelists[node]; 1698 n = cachep->node[node];
1822 if (!l3) 1699 if (!n)
1823 continue; 1700 continue;
1824 1701
1825 spin_lock_irqsave(&l3->list_lock, flags); 1702 spin_lock_irqsave(&n->list_lock, flags);
1826 list_for_each_entry(slabp, &l3->slabs_full, list) { 1703 list_for_each_entry(slabp, &n->slabs_full, list) {
1827 active_objs += cachep->num; 1704 active_objs += cachep->num;
1828 active_slabs++; 1705 active_slabs++;
1829 } 1706 }
1830 list_for_each_entry(slabp, &l3->slabs_partial, list) { 1707 list_for_each_entry(slabp, &n->slabs_partial, list) {
1831 active_objs += slabp->inuse; 1708 active_objs += slabp->inuse;
1832 active_slabs++; 1709 active_slabs++;
1833 } 1710 }
1834 list_for_each_entry(slabp, &l3->slabs_free, list) 1711 list_for_each_entry(slabp, &n->slabs_free, list)
1835 num_slabs++; 1712 num_slabs++;
1836 1713
1837 free_objects += l3->free_objects; 1714 free_objects += n->free_objects;
1838 spin_unlock_irqrestore(&l3->list_lock, flags); 1715 spin_unlock_irqrestore(&n->list_lock, flags);
1839 1716
1840 num_slabs += active_slabs; 1717 num_slabs += active_slabs;
1841 num_objs = num_slabs * cachep->num; 1718 num_objs = num_slabs * cachep->num;
@@ -2258,7 +2135,7 @@ static int __init_refok setup_cpu_cache(struct kmem_cache *cachep, gfp_t gfp)
2258 if (slab_state == DOWN) { 2135 if (slab_state == DOWN) {
2259 /* 2136 /*
2260 * Note: Creation of first cache (kmem_cache). 2137 * Note: Creation of first cache (kmem_cache).
2261 * The setup_list3s is taken care 2138 * The setup_node is taken care
2262 * of by the caller of __kmem_cache_create 2139 * of by the caller of __kmem_cache_create
2263 */ 2140 */
2264 cachep->array[smp_processor_id()] = &initarray_generic.cache; 2141 cachep->array[smp_processor_id()] = &initarray_generic.cache;
@@ -2272,13 +2149,13 @@ static int __init_refok setup_cpu_cache(struct kmem_cache *cachep, gfp_t gfp)
2272 cachep->array[smp_processor_id()] = &initarray_generic.cache; 2149 cachep->array[smp_processor_id()] = &initarray_generic.cache;
2273 2150
2274 /* 2151 /*
2275 * If the cache that's used by kmalloc(sizeof(kmem_list3)) is 2152 * If the cache that's used by kmalloc(sizeof(kmem_cache_node)) is
2276 * the second cache, then we need to set up all its list3s, 2153 * the second cache, then we need to set up all its node/,
2277 * otherwise the creation of further caches will BUG(). 2154 * otherwise the creation of further caches will BUG().
2278 */ 2155 */
2279 set_up_list3s(cachep, SIZE_AC); 2156 set_up_node(cachep, SIZE_AC);
2280 if (INDEX_AC == INDEX_L3) 2157 if (INDEX_AC == INDEX_NODE)
2281 slab_state = PARTIAL_L3; 2158 slab_state = PARTIAL_NODE;
2282 else 2159 else
2283 slab_state = PARTIAL_ARRAYCACHE; 2160 slab_state = PARTIAL_ARRAYCACHE;
2284 } else { 2161 } else {
@@ -2287,20 +2164,20 @@ static int __init_refok setup_cpu_cache(struct kmem_cache *cachep, gfp_t gfp)
2287 kmalloc(sizeof(struct arraycache_init), gfp); 2164 kmalloc(sizeof(struct arraycache_init), gfp);
2288 2165
2289 if (slab_state == PARTIAL_ARRAYCACHE) { 2166 if (slab_state == PARTIAL_ARRAYCACHE) {
2290 set_up_list3s(cachep, SIZE_L3); 2167 set_up_node(cachep, SIZE_NODE);
2291 slab_state = PARTIAL_L3; 2168 slab_state = PARTIAL_NODE;
2292 } else { 2169 } else {
2293 int node; 2170 int node;
2294 for_each_online_node(node) { 2171 for_each_online_node(node) {
2295 cachep->nodelists[node] = 2172 cachep->node[node] =
2296 kmalloc_node(sizeof(struct kmem_list3), 2173 kmalloc_node(sizeof(struct kmem_cache_node),
2297 gfp, node); 2174 gfp, node);
2298 BUG_ON(!cachep->nodelists[node]); 2175 BUG_ON(!cachep->node[node]);
2299 kmem_list3_init(cachep->nodelists[node]); 2176 kmem_cache_node_init(cachep->node[node]);
2300 } 2177 }
2301 } 2178 }
2302 } 2179 }
2303 cachep->nodelists[numa_mem_id()]->next_reap = 2180 cachep->node[numa_mem_id()]->next_reap =
2304 jiffies + REAPTIMEOUT_LIST3 + 2181 jiffies + REAPTIMEOUT_LIST3 +
2305 ((unsigned long)cachep) % REAPTIMEOUT_LIST3; 2182 ((unsigned long)cachep) % REAPTIMEOUT_LIST3;
2306 2183
@@ -2403,7 +2280,7 @@ __kmem_cache_create (struct kmem_cache *cachep, unsigned long flags)
2403 else 2280 else
2404 gfp = GFP_NOWAIT; 2281 gfp = GFP_NOWAIT;
2405 2282
2406 setup_nodelists_pointer(cachep); 2283 setup_node_pointer(cachep);
2407#if DEBUG 2284#if DEBUG
2408 2285
2409 /* 2286 /*
@@ -2426,7 +2303,7 @@ __kmem_cache_create (struct kmem_cache *cachep, unsigned long flags)
2426 size += BYTES_PER_WORD; 2303 size += BYTES_PER_WORD;
2427 } 2304 }
2428#if FORCED_DEBUG && defined(CONFIG_DEBUG_PAGEALLOC) 2305#if FORCED_DEBUG && defined(CONFIG_DEBUG_PAGEALLOC)
2429 if (size >= malloc_sizes[INDEX_L3 + 1].cs_size 2306 if (size >= kmalloc_size(INDEX_NODE + 1)
2430 && cachep->object_size > cache_line_size() 2307 && cachep->object_size > cache_line_size()
2431 && ALIGN(size, cachep->align) < PAGE_SIZE) { 2308 && ALIGN(size, cachep->align) < PAGE_SIZE) {
2432 cachep->obj_offset += PAGE_SIZE - ALIGN(size, cachep->align); 2309 cachep->obj_offset += PAGE_SIZE - ALIGN(size, cachep->align);
@@ -2497,7 +2374,7 @@ __kmem_cache_create (struct kmem_cache *cachep, unsigned long flags)
2497 cachep->reciprocal_buffer_size = reciprocal_value(size); 2374 cachep->reciprocal_buffer_size = reciprocal_value(size);
2498 2375
2499 if (flags & CFLGS_OFF_SLAB) { 2376 if (flags & CFLGS_OFF_SLAB) {
2500 cachep->slabp_cache = kmem_find_general_cachep(slab_size, 0u); 2377 cachep->slabp_cache = kmalloc_slab(slab_size, 0u);
2501 /* 2378 /*
2502 * This is a possibility for one of the malloc_sizes caches. 2379 * This is a possibility for one of the malloc_sizes caches.
2503 * But since we go off slab only for object size greater than 2380 * But since we go off slab only for object size greater than
@@ -2543,7 +2420,7 @@ static void check_spinlock_acquired(struct kmem_cache *cachep)
2543{ 2420{
2544#ifdef CONFIG_SMP 2421#ifdef CONFIG_SMP
2545 check_irq_off(); 2422 check_irq_off();
2546 assert_spin_locked(&cachep->nodelists[numa_mem_id()]->list_lock); 2423 assert_spin_locked(&cachep->node[numa_mem_id()]->list_lock);
2547#endif 2424#endif
2548} 2425}
2549 2426
@@ -2551,7 +2428,7 @@ static void check_spinlock_acquired_node(struct kmem_cache *cachep, int node)
2551{ 2428{
2552#ifdef CONFIG_SMP 2429#ifdef CONFIG_SMP
2553 check_irq_off(); 2430 check_irq_off();
2554 assert_spin_locked(&cachep->nodelists[node]->list_lock); 2431 assert_spin_locked(&cachep->node[node]->list_lock);
2555#endif 2432#endif
2556} 2433}
2557 2434
@@ -2562,7 +2439,7 @@ static void check_spinlock_acquired_node(struct kmem_cache *cachep, int node)
2562#define check_spinlock_acquired_node(x, y) do { } while(0) 2439#define check_spinlock_acquired_node(x, y) do { } while(0)
2563#endif 2440#endif
2564 2441
2565static void drain_array(struct kmem_cache *cachep, struct kmem_list3 *l3, 2442static void drain_array(struct kmem_cache *cachep, struct kmem_cache_node *n,
2566 struct array_cache *ac, 2443 struct array_cache *ac,
2567 int force, int node); 2444 int force, int node);
2568 2445
@@ -2574,29 +2451,29 @@ static void do_drain(void *arg)
2574 2451
2575 check_irq_off(); 2452 check_irq_off();
2576 ac = cpu_cache_get(cachep); 2453 ac = cpu_cache_get(cachep);
2577 spin_lock(&cachep->nodelists[node]->list_lock); 2454 spin_lock(&cachep->node[node]->list_lock);
2578 free_block(cachep, ac->entry, ac->avail, node); 2455 free_block(cachep, ac->entry, ac->avail, node);
2579 spin_unlock(&cachep->nodelists[node]->list_lock); 2456 spin_unlock(&cachep->node[node]->list_lock);
2580 ac->avail = 0; 2457 ac->avail = 0;
2581} 2458}
2582 2459
2583static void drain_cpu_caches(struct kmem_cache *cachep) 2460static void drain_cpu_caches(struct kmem_cache *cachep)
2584{ 2461{
2585 struct kmem_list3 *l3; 2462 struct kmem_cache_node *n;
2586 int node; 2463 int node;
2587 2464
2588 on_each_cpu(do_drain, cachep, 1); 2465 on_each_cpu(do_drain, cachep, 1);
2589 check_irq_on(); 2466 check_irq_on();
2590 for_each_online_node(node) { 2467 for_each_online_node(node) {
2591 l3 = cachep->nodelists[node]; 2468 n = cachep->node[node];
2592 if (l3 && l3->alien) 2469 if (n && n->alien)
2593 drain_alien_cache(cachep, l3->alien); 2470 drain_alien_cache(cachep, n->alien);
2594 } 2471 }
2595 2472
2596 for_each_online_node(node) { 2473 for_each_online_node(node) {
2597 l3 = cachep->nodelists[node]; 2474 n = cachep->node[node];
2598 if (l3) 2475 if (n)
2599 drain_array(cachep, l3, l3->shared, 1, node); 2476 drain_array(cachep, n, n->shared, 1, node);
2600 } 2477 }
2601} 2478}
2602 2479
@@ -2607,19 +2484,19 @@ static void drain_cpu_caches(struct kmem_cache *cachep)
2607 * Returns the actual number of slabs released. 2484 * Returns the actual number of slabs released.
2608 */ 2485 */
2609static int drain_freelist(struct kmem_cache *cache, 2486static int drain_freelist(struct kmem_cache *cache,
2610 struct kmem_list3 *l3, int tofree) 2487 struct kmem_cache_node *n, int tofree)
2611{ 2488{
2612 struct list_head *p; 2489 struct list_head *p;
2613 int nr_freed; 2490 int nr_freed;
2614 struct slab *slabp; 2491 struct slab *slabp;
2615 2492
2616 nr_freed = 0; 2493 nr_freed = 0;
2617 while (nr_freed < tofree && !list_empty(&l3->slabs_free)) { 2494 while (nr_freed < tofree && !list_empty(&n->slabs_free)) {
2618 2495
2619 spin_lock_irq(&l3->list_lock); 2496 spin_lock_irq(&n->list_lock);
2620 p = l3->slabs_free.prev; 2497 p = n->slabs_free.prev;
2621 if (p == &l3->slabs_free) { 2498 if (p == &n->slabs_free) {
2622 spin_unlock_irq(&l3->list_lock); 2499 spin_unlock_irq(&n->list_lock);
2623 goto out; 2500 goto out;
2624 } 2501 }
2625 2502
@@ -2632,8 +2509,8 @@ static int drain_freelist(struct kmem_cache *cache,
2632 * Safe to drop the lock. The slab is no longer linked 2509 * Safe to drop the lock. The slab is no longer linked
2633 * to the cache. 2510 * to the cache.
2634 */ 2511 */
2635 l3->free_objects -= cache->num; 2512 n->free_objects -= cache->num;
2636 spin_unlock_irq(&l3->list_lock); 2513 spin_unlock_irq(&n->list_lock);
2637 slab_destroy(cache, slabp); 2514 slab_destroy(cache, slabp);
2638 nr_freed++; 2515 nr_freed++;
2639 } 2516 }
@@ -2645,20 +2522,20 @@ out:
2645static int __cache_shrink(struct kmem_cache *cachep) 2522static int __cache_shrink(struct kmem_cache *cachep)
2646{ 2523{
2647 int ret = 0, i = 0; 2524 int ret = 0, i = 0;
2648 struct kmem_list3 *l3; 2525 struct kmem_cache_node *n;
2649 2526
2650 drain_cpu_caches(cachep); 2527 drain_cpu_caches(cachep);
2651 2528
2652 check_irq_on(); 2529 check_irq_on();
2653 for_each_online_node(i) { 2530 for_each_online_node(i) {
2654 l3 = cachep->nodelists[i]; 2531 n = cachep->node[i];
2655 if (!l3) 2532 if (!n)
2656 continue; 2533 continue;
2657 2534
2658 drain_freelist(cachep, l3, l3->free_objects); 2535 drain_freelist(cachep, n, n->free_objects);
2659 2536
2660 ret += !list_empty(&l3->slabs_full) || 2537 ret += !list_empty(&n->slabs_full) ||
2661 !list_empty(&l3->slabs_partial); 2538 !list_empty(&n->slabs_partial);
2662 } 2539 }
2663 return (ret ? 1 : 0); 2540 return (ret ? 1 : 0);
2664} 2541}
@@ -2687,7 +2564,7 @@ EXPORT_SYMBOL(kmem_cache_shrink);
2687int __kmem_cache_shutdown(struct kmem_cache *cachep) 2564int __kmem_cache_shutdown(struct kmem_cache *cachep)
2688{ 2565{
2689 int i; 2566 int i;
2690 struct kmem_list3 *l3; 2567 struct kmem_cache_node *n;
2691 int rc = __cache_shrink(cachep); 2568 int rc = __cache_shrink(cachep);
2692 2569
2693 if (rc) 2570 if (rc)
@@ -2696,13 +2573,13 @@ int __kmem_cache_shutdown(struct kmem_cache *cachep)
2696 for_each_online_cpu(i) 2573 for_each_online_cpu(i)
2697 kfree(cachep->array[i]); 2574 kfree(cachep->array[i]);
2698 2575
2699 /* NUMA: free the list3 structures */ 2576 /* NUMA: free the node structures */
2700 for_each_online_node(i) { 2577 for_each_online_node(i) {
2701 l3 = cachep->nodelists[i]; 2578 n = cachep->node[i];
2702 if (l3) { 2579 if (n) {
2703 kfree(l3->shared); 2580 kfree(n->shared);
2704 free_alien_cache(l3->alien); 2581 free_alien_cache(n->alien);
2705 kfree(l3); 2582 kfree(n);
2706 } 2583 }
2707 } 2584 }
2708 return 0; 2585 return 0;
@@ -2884,7 +2761,7 @@ static int cache_grow(struct kmem_cache *cachep,
2884 struct slab *slabp; 2761 struct slab *slabp;
2885 size_t offset; 2762 size_t offset;
2886 gfp_t local_flags; 2763 gfp_t local_flags;
2887 struct kmem_list3 *l3; 2764 struct kmem_cache_node *n;
2888 2765
2889 /* 2766 /*
2890 * Be lazy and only check for valid flags here, keeping it out of the 2767 * Be lazy and only check for valid flags here, keeping it out of the
@@ -2893,17 +2770,17 @@ static int cache_grow(struct kmem_cache *cachep,
2893 BUG_ON(flags & GFP_SLAB_BUG_MASK); 2770 BUG_ON(flags & GFP_SLAB_BUG_MASK);
2894 local_flags = flags & (GFP_CONSTRAINT_MASK|GFP_RECLAIM_MASK); 2771 local_flags = flags & (GFP_CONSTRAINT_MASK|GFP_RECLAIM_MASK);
2895 2772
2896 /* Take the l3 list lock to change the colour_next on this node */ 2773 /* Take the node list lock to change the colour_next on this node */
2897 check_irq_off(); 2774 check_irq_off();
2898 l3 = cachep->nodelists[nodeid]; 2775 n = cachep->node[nodeid];
2899 spin_lock(&l3->list_lock); 2776 spin_lock(&n->list_lock);
2900 2777
2901 /* Get colour for the slab, and cal the next value. */ 2778 /* Get colour for the slab, and cal the next value. */
2902 offset = l3->colour_next; 2779 offset = n->colour_next;
2903 l3->colour_next++; 2780 n->colour_next++;
2904 if (l3->colour_next >= cachep->colour) 2781 if (n->colour_next >= cachep->colour)
2905 l3->colour_next = 0; 2782 n->colour_next = 0;
2906 spin_unlock(&l3->list_lock); 2783 spin_unlock(&n->list_lock);
2907 2784
2908 offset *= cachep->colour_off; 2785 offset *= cachep->colour_off;
2909 2786
@@ -2940,13 +2817,13 @@ static int cache_grow(struct kmem_cache *cachep,
2940 if (local_flags & __GFP_WAIT) 2817 if (local_flags & __GFP_WAIT)
2941 local_irq_disable(); 2818 local_irq_disable();
2942 check_irq_off(); 2819 check_irq_off();
2943 spin_lock(&l3->list_lock); 2820 spin_lock(&n->list_lock);
2944 2821
2945 /* Make slab active. */ 2822 /* Make slab active. */
2946 list_add_tail(&slabp->list, &(l3->slabs_free)); 2823 list_add_tail(&slabp->list, &(n->slabs_free));
2947 STATS_INC_GROWN(cachep); 2824 STATS_INC_GROWN(cachep);
2948 l3->free_objects += cachep->num; 2825 n->free_objects += cachep->num;
2949 spin_unlock(&l3->list_lock); 2826 spin_unlock(&n->list_lock);
2950 return 1; 2827 return 1;
2951opps1: 2828opps1:
2952 kmem_freepages(cachep, objp); 2829 kmem_freepages(cachep, objp);
@@ -3074,7 +2951,7 @@ static void *cache_alloc_refill(struct kmem_cache *cachep, gfp_t flags,
3074 bool force_refill) 2951 bool force_refill)
3075{ 2952{
3076 int batchcount; 2953 int batchcount;
3077 struct kmem_list3 *l3; 2954 struct kmem_cache_node *n;
3078 struct array_cache *ac; 2955 struct array_cache *ac;
3079 int node; 2956 int node;
3080 2957
@@ -3093,14 +2970,14 @@ retry:
3093 */ 2970 */
3094 batchcount = BATCHREFILL_LIMIT; 2971 batchcount = BATCHREFILL_LIMIT;
3095 } 2972 }
3096 l3 = cachep->nodelists[node]; 2973 n = cachep->node[node];
3097 2974
3098 BUG_ON(ac->avail > 0 || !l3); 2975 BUG_ON(ac->avail > 0 || !n);
3099 spin_lock(&l3->list_lock); 2976 spin_lock(&n->list_lock);
3100 2977
3101 /* See if we can refill from the shared array */ 2978 /* See if we can refill from the shared array */
3102 if (l3->shared && transfer_objects(ac, l3->shared, batchcount)) { 2979 if (n->shared && transfer_objects(ac, n->shared, batchcount)) {
3103 l3->shared->touched = 1; 2980 n->shared->touched = 1;
3104 goto alloc_done; 2981 goto alloc_done;
3105 } 2982 }
3106 2983
@@ -3108,11 +2985,11 @@ retry:
3108 struct list_head *entry; 2985 struct list_head *entry;
3109 struct slab *slabp; 2986 struct slab *slabp;
3110 /* Get slab alloc is to come from. */ 2987 /* Get slab alloc is to come from. */
3111 entry = l3->slabs_partial.next; 2988 entry = n->slabs_partial.next;
3112 if (entry == &l3->slabs_partial) { 2989 if (entry == &n->slabs_partial) {
3113 l3->free_touched = 1; 2990 n->free_touched = 1;
3114 entry = l3->slabs_free.next; 2991 entry = n->slabs_free.next;
3115 if (entry == &l3->slabs_free) 2992 if (entry == &n->slabs_free)
3116 goto must_grow; 2993 goto must_grow;
3117 } 2994 }
3118 2995
@@ -3140,15 +3017,15 @@ retry:
3140 /* move slabp to correct slabp list: */ 3017 /* move slabp to correct slabp list: */
3141 list_del(&slabp->list); 3018 list_del(&slabp->list);
3142 if (slabp->free == BUFCTL_END) 3019 if (slabp->free == BUFCTL_END)
3143 list_add(&slabp->list, &l3->slabs_full); 3020 list_add(&slabp->list, &n->slabs_full);
3144 else 3021 else
3145 list_add(&slabp->list, &l3->slabs_partial); 3022 list_add(&slabp->list, &n->slabs_partial);
3146 } 3023 }
3147 3024
3148must_grow: 3025must_grow:
3149 l3->free_objects -= ac->avail; 3026 n->free_objects -= ac->avail;
3150alloc_done: 3027alloc_done:
3151 spin_unlock(&l3->list_lock); 3028 spin_unlock(&n->list_lock);
3152 3029
3153 if (unlikely(!ac->avail)) { 3030 if (unlikely(!ac->avail)) {
3154 int x; 3031 int x;
@@ -3315,7 +3192,7 @@ static void *alternate_node_alloc(struct kmem_cache *cachep, gfp_t flags)
3315/* 3192/*
3316 * Fallback function if there was no memory available and no objects on a 3193 * Fallback function if there was no memory available and no objects on a
3317 * certain node and fall back is permitted. First we scan all the 3194 * certain node and fall back is permitted. First we scan all the
3318 * available nodelists for available objects. If that fails then we 3195 * available node for available objects. If that fails then we
3319 * perform an allocation without specifying a node. This allows the page 3196 * perform an allocation without specifying a node. This allows the page
3320 * allocator to do its reclaim / fallback magic. We then insert the 3197 * allocator to do its reclaim / fallback magic. We then insert the
3321 * slab into the proper nodelist and then allocate from it. 3198 * slab into the proper nodelist and then allocate from it.
@@ -3349,8 +3226,8 @@ retry:
3349 nid = zone_to_nid(zone); 3226 nid = zone_to_nid(zone);
3350 3227
3351 if (cpuset_zone_allowed_hardwall(zone, flags) && 3228 if (cpuset_zone_allowed_hardwall(zone, flags) &&
3352 cache->nodelists[nid] && 3229 cache->node[nid] &&
3353 cache->nodelists[nid]->free_objects) { 3230 cache->node[nid]->free_objects) {
3354 obj = ____cache_alloc_node(cache, 3231 obj = ____cache_alloc_node(cache,
3355 flags | GFP_THISNODE, nid); 3232 flags | GFP_THISNODE, nid);
3356 if (obj) 3233 if (obj)
@@ -3406,21 +3283,22 @@ static void *____cache_alloc_node(struct kmem_cache *cachep, gfp_t flags,
3406{ 3283{
3407 struct list_head *entry; 3284 struct list_head *entry;
3408 struct slab *slabp; 3285 struct slab *slabp;
3409 struct kmem_list3 *l3; 3286 struct kmem_cache_node *n;
3410 void *obj; 3287 void *obj;
3411 int x; 3288 int x;
3412 3289
3413 l3 = cachep->nodelists[nodeid]; 3290 VM_BUG_ON(nodeid > num_online_nodes());
3414 BUG_ON(!l3); 3291 n = cachep->node[nodeid];
3292 BUG_ON(!n);
3415 3293
3416retry: 3294retry:
3417 check_irq_off(); 3295 check_irq_off();
3418 spin_lock(&l3->list_lock); 3296 spin_lock(&n->list_lock);
3419 entry = l3->slabs_partial.next; 3297 entry = n->slabs_partial.next;
3420 if (entry == &l3->slabs_partial) { 3298 if (entry == &n->slabs_partial) {
3421 l3->free_touched = 1; 3299 n->free_touched = 1;
3422 entry = l3->slabs_free.next; 3300 entry = n->slabs_free.next;
3423 if (entry == &l3->slabs_free) 3301 if (entry == &n->slabs_free)
3424 goto must_grow; 3302 goto must_grow;
3425 } 3303 }
3426 3304
@@ -3436,20 +3314,20 @@ retry:
3436 3314
3437 obj = slab_get_obj(cachep, slabp, nodeid); 3315 obj = slab_get_obj(cachep, slabp, nodeid);
3438 check_slabp(cachep, slabp); 3316 check_slabp(cachep, slabp);
3439 l3->free_objects--; 3317 n->free_objects--;
3440 /* move slabp to correct slabp list: */ 3318 /* move slabp to correct slabp list: */
3441 list_del(&slabp->list); 3319 list_del(&slabp->list);
3442 3320
3443 if (slabp->free == BUFCTL_END) 3321 if (slabp->free == BUFCTL_END)
3444 list_add(&slabp->list, &l3->slabs_full); 3322 list_add(&slabp->list, &n->slabs_full);
3445 else 3323 else
3446 list_add(&slabp->list, &l3->slabs_partial); 3324 list_add(&slabp->list, &n->slabs_partial);
3447 3325
3448 spin_unlock(&l3->list_lock); 3326 spin_unlock(&n->list_lock);
3449 goto done; 3327 goto done;
3450 3328
3451must_grow: 3329must_grow:
3452 spin_unlock(&l3->list_lock); 3330 spin_unlock(&n->list_lock);
3453 x = cache_grow(cachep, flags | GFP_THISNODE, nodeid, NULL); 3331 x = cache_grow(cachep, flags | GFP_THISNODE, nodeid, NULL);
3454 if (x) 3332 if (x)
3455 goto retry; 3333 goto retry;
@@ -3495,7 +3373,7 @@ slab_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid,
3495 if (nodeid == NUMA_NO_NODE) 3373 if (nodeid == NUMA_NO_NODE)
3496 nodeid = slab_node; 3374 nodeid = slab_node;
3497 3375
3498 if (unlikely(!cachep->nodelists[nodeid])) { 3376 if (unlikely(!cachep->node[nodeid])) {
3499 /* Node not bootstrapped yet */ 3377 /* Node not bootstrapped yet */
3500 ptr = fallback_alloc(cachep, flags); 3378 ptr = fallback_alloc(cachep, flags);
3501 goto out; 3379 goto out;
@@ -3601,7 +3479,7 @@ static void free_block(struct kmem_cache *cachep, void **objpp, int nr_objects,
3601 int node) 3479 int node)
3602{ 3480{
3603 int i; 3481 int i;
3604 struct kmem_list3 *l3; 3482 struct kmem_cache_node *n;
3605 3483
3606 for (i = 0; i < nr_objects; i++) { 3484 for (i = 0; i < nr_objects; i++) {
3607 void *objp; 3485 void *objp;
@@ -3611,19 +3489,19 @@ static void free_block(struct kmem_cache *cachep, void **objpp, int nr_objects,
3611 objp = objpp[i]; 3489 objp = objpp[i];
3612 3490
3613 slabp = virt_to_slab(objp); 3491 slabp = virt_to_slab(objp);
3614 l3 = cachep->nodelists[node]; 3492 n = cachep->node[node];
3615 list_del(&slabp->list); 3493 list_del(&slabp->list);
3616 check_spinlock_acquired_node(cachep, node); 3494 check_spinlock_acquired_node(cachep, node);
3617 check_slabp(cachep, slabp); 3495 check_slabp(cachep, slabp);
3618 slab_put_obj(cachep, slabp, objp, node); 3496 slab_put_obj(cachep, slabp, objp, node);
3619 STATS_DEC_ACTIVE(cachep); 3497 STATS_DEC_ACTIVE(cachep);
3620 l3->free_objects++; 3498 n->free_objects++;
3621 check_slabp(cachep, slabp); 3499 check_slabp(cachep, slabp);
3622 3500
3623 /* fixup slab chains */ 3501 /* fixup slab chains */
3624 if (slabp->inuse == 0) { 3502 if (slabp->inuse == 0) {
3625 if (l3->free_objects > l3->free_limit) { 3503 if (n->free_objects > n->free_limit) {
3626 l3->free_objects -= cachep->num; 3504 n->free_objects -= cachep->num;
3627 /* No need to drop any previously held 3505 /* No need to drop any previously held
3628 * lock here, even if we have a off-slab slab 3506 * lock here, even if we have a off-slab slab
3629 * descriptor it is guaranteed to come from 3507 * descriptor it is guaranteed to come from
@@ -3632,14 +3510,14 @@ static void free_block(struct kmem_cache *cachep, void **objpp, int nr_objects,
3632 */ 3510 */
3633 slab_destroy(cachep, slabp); 3511 slab_destroy(cachep, slabp);
3634 } else { 3512 } else {
3635 list_add(&slabp->list, &l3->slabs_free); 3513 list_add(&slabp->list, &n->slabs_free);
3636 } 3514 }
3637 } else { 3515 } else {
3638 /* Unconditionally move a slab to the end of the 3516 /* Unconditionally move a slab to the end of the
3639 * partial list on free - maximum time for the 3517 * partial list on free - maximum time for the
3640 * other objects to be freed, too. 3518 * other objects to be freed, too.
3641 */ 3519 */
3642 list_add_tail(&slabp->list, &l3->slabs_partial); 3520 list_add_tail(&slabp->list, &n->slabs_partial);
3643 } 3521 }
3644 } 3522 }
3645} 3523}
@@ -3647,7 +3525,7 @@ static void free_block(struct kmem_cache *cachep, void **objpp, int nr_objects,
3647static void cache_flusharray(struct kmem_cache *cachep, struct array_cache *ac) 3525static void cache_flusharray(struct kmem_cache *cachep, struct array_cache *ac)
3648{ 3526{
3649 int batchcount; 3527 int batchcount;
3650 struct kmem_list3 *l3; 3528 struct kmem_cache_node *n;
3651 int node = numa_mem_id(); 3529 int node = numa_mem_id();
3652 3530
3653 batchcount = ac->batchcount; 3531 batchcount = ac->batchcount;
@@ -3655,10 +3533,10 @@ static void cache_flusharray(struct kmem_cache *cachep, struct array_cache *ac)
3655 BUG_ON(!batchcount || batchcount > ac->avail); 3533 BUG_ON(!batchcount || batchcount > ac->avail);
3656#endif 3534#endif
3657 check_irq_off(); 3535 check_irq_off();
3658 l3 = cachep->nodelists[node]; 3536 n = cachep->node[node];
3659 spin_lock(&l3->list_lock); 3537 spin_lock(&n->list_lock);
3660 if (l3->shared) { 3538 if (n->shared) {
3661 struct array_cache *shared_array = l3->shared; 3539 struct array_cache *shared_array = n->shared;
3662 int max = shared_array->limit - shared_array->avail; 3540 int max = shared_array->limit - shared_array->avail;
3663 if (max) { 3541 if (max) {
3664 if (batchcount > max) 3542 if (batchcount > max)
@@ -3677,8 +3555,8 @@ free_done:
3677 int i = 0; 3555 int i = 0;
3678 struct list_head *p; 3556 struct list_head *p;
3679 3557
3680 p = l3->slabs_free.next; 3558 p = n->slabs_free.next;
3681 while (p != &(l3->slabs_free)) { 3559 while (p != &(n->slabs_free)) {
3682 struct slab *slabp; 3560 struct slab *slabp;
3683 3561
3684 slabp = list_entry(p, struct slab, list); 3562 slabp = list_entry(p, struct slab, list);
@@ -3690,7 +3568,7 @@ free_done:
3690 STATS_SET_FREEABLE(cachep, i); 3568 STATS_SET_FREEABLE(cachep, i);
3691 } 3569 }
3692#endif 3570#endif
3693 spin_unlock(&l3->list_lock); 3571 spin_unlock(&n->list_lock);
3694 ac->avail -= batchcount; 3572 ac->avail -= batchcount;
3695 memmove(ac->entry, &(ac->entry[batchcount]), sizeof(void *)*ac->avail); 3573 memmove(ac->entry, &(ac->entry[batchcount]), sizeof(void *)*ac->avail);
3696} 3574}
@@ -3800,7 +3678,7 @@ __do_kmalloc_node(size_t size, gfp_t flags, int node, unsigned long caller)
3800{ 3678{
3801 struct kmem_cache *cachep; 3679 struct kmem_cache *cachep;
3802 3680
3803 cachep = kmem_find_general_cachep(size, flags); 3681 cachep = kmalloc_slab(size, flags);
3804 if (unlikely(ZERO_OR_NULL_PTR(cachep))) 3682 if (unlikely(ZERO_OR_NULL_PTR(cachep)))
3805 return cachep; 3683 return cachep;
3806 return kmem_cache_alloc_node_trace(cachep, flags, node, size); 3684 return kmem_cache_alloc_node_trace(cachep, flags, node, size);
@@ -3845,7 +3723,7 @@ static __always_inline void *__do_kmalloc(size_t size, gfp_t flags,
3845 * Then kmalloc uses the uninlined functions instead of the inline 3723 * Then kmalloc uses the uninlined functions instead of the inline
3846 * functions. 3724 * functions.
3847 */ 3725 */
3848 cachep = __find_general_cachep(size, flags); 3726 cachep = kmalloc_slab(size, flags);
3849 if (unlikely(ZERO_OR_NULL_PTR(cachep))) 3727 if (unlikely(ZERO_OR_NULL_PTR(cachep)))
3850 return cachep; 3728 return cachep;
3851 ret = slab_alloc(cachep, flags, caller); 3729 ret = slab_alloc(cachep, flags, caller);
@@ -3934,12 +3812,12 @@ void kfree(const void *objp)
3934EXPORT_SYMBOL(kfree); 3812EXPORT_SYMBOL(kfree);
3935 3813
3936/* 3814/*
3937 * This initializes kmem_list3 or resizes various caches for all nodes. 3815 * This initializes kmem_cache_node or resizes various caches for all nodes.
3938 */ 3816 */
3939static int alloc_kmemlist(struct kmem_cache *cachep, gfp_t gfp) 3817static int alloc_kmemlist(struct kmem_cache *cachep, gfp_t gfp)
3940{ 3818{
3941 int node; 3819 int node;
3942 struct kmem_list3 *l3; 3820 struct kmem_cache_node *n;
3943 struct array_cache *new_shared; 3821 struct array_cache *new_shared;
3944 struct array_cache **new_alien = NULL; 3822 struct array_cache **new_alien = NULL;
3945 3823
@@ -3962,43 +3840,43 @@ static int alloc_kmemlist(struct kmem_cache *cachep, gfp_t gfp)
3962 } 3840 }
3963 } 3841 }
3964 3842
3965 l3 = cachep->nodelists[node]; 3843 n = cachep->node[node];
3966 if (l3) { 3844 if (n) {
3967 struct array_cache *shared = l3->shared; 3845 struct array_cache *shared = n->shared;
3968 3846
3969 spin_lock_irq(&l3->list_lock); 3847 spin_lock_irq(&n->list_lock);
3970 3848
3971 if (shared) 3849 if (shared)
3972 free_block(cachep, shared->entry, 3850 free_block(cachep, shared->entry,
3973 shared->avail, node); 3851 shared->avail, node);
3974 3852
3975 l3->shared = new_shared; 3853 n->shared = new_shared;
3976 if (!l3->alien) { 3854 if (!n->alien) {
3977 l3->alien = new_alien; 3855 n->alien = new_alien;
3978 new_alien = NULL; 3856 new_alien = NULL;
3979 } 3857 }
3980 l3->free_limit = (1 + nr_cpus_node(node)) * 3858 n->free_limit = (1 + nr_cpus_node(node)) *
3981 cachep->batchcount + cachep->num; 3859 cachep->batchcount + cachep->num;
3982 spin_unlock_irq(&l3->list_lock); 3860 spin_unlock_irq(&n->list_lock);
3983 kfree(shared); 3861 kfree(shared);
3984 free_alien_cache(new_alien); 3862 free_alien_cache(new_alien);
3985 continue; 3863 continue;
3986 } 3864 }
3987 l3 = kmalloc_node(sizeof(struct kmem_list3), gfp, node); 3865 n = kmalloc_node(sizeof(struct kmem_cache_node), gfp, node);
3988 if (!l3) { 3866 if (!n) {
3989 free_alien_cache(new_alien); 3867 free_alien_cache(new_alien);
3990 kfree(new_shared); 3868 kfree(new_shared);
3991 goto fail; 3869 goto fail;
3992 } 3870 }
3993 3871
3994 kmem_list3_init(l3); 3872 kmem_cache_node_init(n);
3995 l3->next_reap = jiffies + REAPTIMEOUT_LIST3 + 3873 n->next_reap = jiffies + REAPTIMEOUT_LIST3 +
3996 ((unsigned long)cachep) % REAPTIMEOUT_LIST3; 3874 ((unsigned long)cachep) % REAPTIMEOUT_LIST3;
3997 l3->shared = new_shared; 3875 n->shared = new_shared;
3998 l3->alien = new_alien; 3876 n->alien = new_alien;
3999 l3->free_limit = (1 + nr_cpus_node(node)) * 3877 n->free_limit = (1 + nr_cpus_node(node)) *
4000 cachep->batchcount + cachep->num; 3878 cachep->batchcount + cachep->num;
4001 cachep->nodelists[node] = l3; 3879 cachep->node[node] = n;
4002 } 3880 }
4003 return 0; 3881 return 0;
4004 3882
@@ -4007,13 +3885,13 @@ fail:
4007 /* Cache is not active yet. Roll back what we did */ 3885 /* Cache is not active yet. Roll back what we did */
4008 node--; 3886 node--;
4009 while (node >= 0) { 3887 while (node >= 0) {
4010 if (cachep->nodelists[node]) { 3888 if (cachep->node[node]) {
4011 l3 = cachep->nodelists[node]; 3889 n = cachep->node[node];
4012 3890
4013 kfree(l3->shared); 3891 kfree(n->shared);
4014 free_alien_cache(l3->alien); 3892 free_alien_cache(n->alien);
4015 kfree(l3); 3893 kfree(n);
4016 cachep->nodelists[node] = NULL; 3894 cachep->node[node] = NULL;
4017 } 3895 }
4018 node--; 3896 node--;
4019 } 3897 }
@@ -4073,9 +3951,9 @@ static int __do_tune_cpucache(struct kmem_cache *cachep, int limit,
4073 struct array_cache *ccold = new->new[i]; 3951 struct array_cache *ccold = new->new[i];
4074 if (!ccold) 3952 if (!ccold)
4075 continue; 3953 continue;
4076 spin_lock_irq(&cachep->nodelists[cpu_to_mem(i)]->list_lock); 3954 spin_lock_irq(&cachep->node[cpu_to_mem(i)]->list_lock);
4077 free_block(cachep, ccold->entry, ccold->avail, cpu_to_mem(i)); 3955 free_block(cachep, ccold->entry, ccold->avail, cpu_to_mem(i));
4078 spin_unlock_irq(&cachep->nodelists[cpu_to_mem(i)]->list_lock); 3956 spin_unlock_irq(&cachep->node[cpu_to_mem(i)]->list_lock);
4079 kfree(ccold); 3957 kfree(ccold);
4080 } 3958 }
4081 kfree(new); 3959 kfree(new);
@@ -4176,11 +4054,11 @@ skip_setup:
4176} 4054}
4177 4055
4178/* 4056/*
4179 * Drain an array if it contains any elements taking the l3 lock only if 4057 * Drain an array if it contains any elements taking the node lock only if
4180 * necessary. Note that the l3 listlock also protects the array_cache 4058 * necessary. Note that the node listlock also protects the array_cache
4181 * if drain_array() is used on the shared array. 4059 * if drain_array() is used on the shared array.
4182 */ 4060 */
4183static void drain_array(struct kmem_cache *cachep, struct kmem_list3 *l3, 4061static void drain_array(struct kmem_cache *cachep, struct kmem_cache_node *n,
4184 struct array_cache *ac, int force, int node) 4062 struct array_cache *ac, int force, int node)
4185{ 4063{
4186 int tofree; 4064 int tofree;
@@ -4190,7 +4068,7 @@ static void drain_array(struct kmem_cache *cachep, struct kmem_list3 *l3,
4190 if (ac->touched && !force) { 4068 if (ac->touched && !force) {
4191 ac->touched = 0; 4069 ac->touched = 0;
4192 } else { 4070 } else {
4193 spin_lock_irq(&l3->list_lock); 4071 spin_lock_irq(&n->list_lock);
4194 if (ac->avail) { 4072 if (ac->avail) {
4195 tofree = force ? ac->avail : (ac->limit + 4) / 5; 4073 tofree = force ? ac->avail : (ac->limit + 4) / 5;
4196 if (tofree > ac->avail) 4074 if (tofree > ac->avail)
@@ -4200,7 +4078,7 @@ static void drain_array(struct kmem_cache *cachep, struct kmem_list3 *l3,
4200 memmove(ac->entry, &(ac->entry[tofree]), 4078 memmove(ac->entry, &(ac->entry[tofree]),
4201 sizeof(void *) * ac->avail); 4079 sizeof(void *) * ac->avail);
4202 } 4080 }
4203 spin_unlock_irq(&l3->list_lock); 4081 spin_unlock_irq(&n->list_lock);
4204 } 4082 }
4205} 4083}
4206 4084
@@ -4219,7 +4097,7 @@ static void drain_array(struct kmem_cache *cachep, struct kmem_list3 *l3,
4219static void cache_reap(struct work_struct *w) 4097static void cache_reap(struct work_struct *w)
4220{ 4098{
4221 struct kmem_cache *searchp; 4099 struct kmem_cache *searchp;
4222 struct kmem_list3 *l3; 4100 struct kmem_cache_node *n;
4223 int node = numa_mem_id(); 4101 int node = numa_mem_id();
4224 struct delayed_work *work = to_delayed_work(w); 4102 struct delayed_work *work = to_delayed_work(w);
4225 4103
@@ -4231,33 +4109,33 @@ static void cache_reap(struct work_struct *w)
4231 check_irq_on(); 4109 check_irq_on();
4232 4110
4233 /* 4111 /*
4234 * We only take the l3 lock if absolutely necessary and we 4112 * We only take the node lock if absolutely necessary and we
4235 * have established with reasonable certainty that 4113 * have established with reasonable certainty that
4236 * we can do some work if the lock was obtained. 4114 * we can do some work if the lock was obtained.
4237 */ 4115 */
4238 l3 = searchp->nodelists[node]; 4116 n = searchp->node[node];
4239 4117
4240 reap_alien(searchp, l3); 4118 reap_alien(searchp, n);
4241 4119
4242 drain_array(searchp, l3, cpu_cache_get(searchp), 0, node); 4120 drain_array(searchp, n, cpu_cache_get(searchp), 0, node);
4243 4121
4244 /* 4122 /*
4245 * These are racy checks but it does not matter 4123 * These are racy checks but it does not matter
4246 * if we skip one check or scan twice. 4124 * if we skip one check or scan twice.
4247 */ 4125 */
4248 if (time_after(l3->next_reap, jiffies)) 4126 if (time_after(n->next_reap, jiffies))
4249 goto next; 4127 goto next;
4250 4128
4251 l3->next_reap = jiffies + REAPTIMEOUT_LIST3; 4129 n->next_reap = jiffies + REAPTIMEOUT_LIST3;
4252 4130
4253 drain_array(searchp, l3, l3->shared, 0, node); 4131 drain_array(searchp, n, n->shared, 0, node);
4254 4132
4255 if (l3->free_touched) 4133 if (n->free_touched)
4256 l3->free_touched = 0; 4134 n->free_touched = 0;
4257 else { 4135 else {
4258 int freed; 4136 int freed;
4259 4137
4260 freed = drain_freelist(searchp, l3, (l3->free_limit + 4138 freed = drain_freelist(searchp, n, (n->free_limit +
4261 5 * searchp->num - 1) / (5 * searchp->num)); 4139 5 * searchp->num - 1) / (5 * searchp->num));
4262 STATS_ADD_REAPED(searchp, freed); 4140 STATS_ADD_REAPED(searchp, freed);
4263 } 4141 }
@@ -4283,25 +4161,25 @@ void get_slabinfo(struct kmem_cache *cachep, struct slabinfo *sinfo)
4283 const char *name; 4161 const char *name;
4284 char *error = NULL; 4162 char *error = NULL;
4285 int node; 4163 int node;
4286 struct kmem_list3 *l3; 4164 struct kmem_cache_node *n;
4287 4165
4288 active_objs = 0; 4166 active_objs = 0;
4289 num_slabs = 0; 4167 num_slabs = 0;
4290 for_each_online_node(node) { 4168 for_each_online_node(node) {
4291 l3 = cachep->nodelists[node]; 4169 n = cachep->node[node];
4292 if (!l3) 4170 if (!n)
4293 continue; 4171 continue;
4294 4172
4295 check_irq_on(); 4173 check_irq_on();
4296 spin_lock_irq(&l3->list_lock); 4174 spin_lock_irq(&n->list_lock);
4297 4175
4298 list_for_each_entry(slabp, &l3->slabs_full, list) { 4176 list_for_each_entry(slabp, &n->slabs_full, list) {
4299 if (slabp->inuse != cachep->num && !error) 4177 if (slabp->inuse != cachep->num && !error)
4300 error = "slabs_full accounting error"; 4178 error = "slabs_full accounting error";
4301 active_objs += cachep->num; 4179 active_objs += cachep->num;
4302 active_slabs++; 4180 active_slabs++;
4303 } 4181 }
4304 list_for_each_entry(slabp, &l3->slabs_partial, list) { 4182 list_for_each_entry(slabp, &n->slabs_partial, list) {
4305 if (slabp->inuse == cachep->num && !error) 4183 if (slabp->inuse == cachep->num && !error)
4306 error = "slabs_partial inuse accounting error"; 4184 error = "slabs_partial inuse accounting error";
4307 if (!slabp->inuse && !error) 4185 if (!slabp->inuse && !error)
@@ -4309,16 +4187,16 @@ void get_slabinfo(struct kmem_cache *cachep, struct slabinfo *sinfo)
4309 active_objs += slabp->inuse; 4187 active_objs += slabp->inuse;
4310 active_slabs++; 4188 active_slabs++;
4311 } 4189 }
4312 list_for_each_entry(slabp, &l3->slabs_free, list) { 4190 list_for_each_entry(slabp, &n->slabs_free, list) {
4313 if (slabp->inuse && !error) 4191 if (slabp->inuse && !error)
4314 error = "slabs_free/inuse accounting error"; 4192 error = "slabs_free/inuse accounting error";
4315 num_slabs++; 4193 num_slabs++;
4316 } 4194 }
4317 free_objects += l3->free_objects; 4195 free_objects += n->free_objects;
4318 if (l3->shared) 4196 if (n->shared)
4319 shared_avail += l3->shared->avail; 4197 shared_avail += n->shared->avail;
4320 4198
4321 spin_unlock_irq(&l3->list_lock); 4199 spin_unlock_irq(&n->list_lock);
4322 } 4200 }
4323 num_slabs += active_slabs; 4201 num_slabs += active_slabs;
4324 num_objs = num_slabs * cachep->num; 4202 num_objs = num_slabs * cachep->num;
@@ -4344,7 +4222,7 @@ void get_slabinfo(struct kmem_cache *cachep, struct slabinfo *sinfo)
4344void slabinfo_show_stats(struct seq_file *m, struct kmem_cache *cachep) 4222void slabinfo_show_stats(struct seq_file *m, struct kmem_cache *cachep)
4345{ 4223{
4346#if STATS 4224#if STATS
4347 { /* list3 stats */ 4225 { /* node stats */
4348 unsigned long high = cachep->high_mark; 4226 unsigned long high = cachep->high_mark;
4349 unsigned long allocs = cachep->num_allocations; 4227 unsigned long allocs = cachep->num_allocations;
4350 unsigned long grown = cachep->grown; 4228 unsigned long grown = cachep->grown;
@@ -4497,9 +4375,9 @@ static int leaks_show(struct seq_file *m, void *p)
4497{ 4375{
4498 struct kmem_cache *cachep = list_entry(p, struct kmem_cache, list); 4376 struct kmem_cache *cachep = list_entry(p, struct kmem_cache, list);
4499 struct slab *slabp; 4377 struct slab *slabp;
4500 struct kmem_list3 *l3; 4378 struct kmem_cache_node *n;
4501 const char *name; 4379 const char *name;
4502 unsigned long *n = m->private; 4380 unsigned long *x = m->private;
4503 int node; 4381 int node;
4504 int i; 4382 int i;
4505 4383
@@ -4510,43 +4388,43 @@ static int leaks_show(struct seq_file *m, void *p)
4510 4388
4511 /* OK, we can do it */ 4389 /* OK, we can do it */
4512 4390
4513 n[1] = 0; 4391 x[1] = 0;
4514 4392
4515 for_each_online_node(node) { 4393 for_each_online_node(node) {
4516 l3 = cachep->nodelists[node]; 4394 n = cachep->node[node];
4517 if (!l3) 4395 if (!n)
4518 continue; 4396 continue;
4519 4397
4520 check_irq_on(); 4398 check_irq_on();
4521 spin_lock_irq(&l3->list_lock); 4399 spin_lock_irq(&n->list_lock);
4522 4400
4523 list_for_each_entry(slabp, &l3->slabs_full, list) 4401 list_for_each_entry(slabp, &n->slabs_full, list)
4524 handle_slab(n, cachep, slabp); 4402 handle_slab(x, cachep, slabp);
4525 list_for_each_entry(slabp, &l3->slabs_partial, list) 4403 list_for_each_entry(slabp, &n->slabs_partial, list)
4526 handle_slab(n, cachep, slabp); 4404 handle_slab(x, cachep, slabp);
4527 spin_unlock_irq(&l3->list_lock); 4405 spin_unlock_irq(&n->list_lock);
4528 } 4406 }
4529 name = cachep->name; 4407 name = cachep->name;
4530 if (n[0] == n[1]) { 4408 if (x[0] == x[1]) {
4531 /* Increase the buffer size */ 4409 /* Increase the buffer size */
4532 mutex_unlock(&slab_mutex); 4410 mutex_unlock(&slab_mutex);
4533 m->private = kzalloc(n[0] * 4 * sizeof(unsigned long), GFP_KERNEL); 4411 m->private = kzalloc(x[0] * 4 * sizeof(unsigned long), GFP_KERNEL);
4534 if (!m->private) { 4412 if (!m->private) {
4535 /* Too bad, we are really out */ 4413 /* Too bad, we are really out */
4536 m->private = n; 4414 m->private = x;
4537 mutex_lock(&slab_mutex); 4415 mutex_lock(&slab_mutex);
4538 return -ENOMEM; 4416 return -ENOMEM;
4539 } 4417 }
4540 *(unsigned long *)m->private = n[0] * 2; 4418 *(unsigned long *)m->private = x[0] * 2;
4541 kfree(n); 4419 kfree(x);
4542 mutex_lock(&slab_mutex); 4420 mutex_lock(&slab_mutex);
4543 /* Now make sure this entry will be retried */ 4421 /* Now make sure this entry will be retried */
4544 m->count = m->size; 4422 m->count = m->size;
4545 return 0; 4423 return 0;
4546 } 4424 }
4547 for (i = 0; i < n[1]; i++) { 4425 for (i = 0; i < x[1]; i++) {
4548 seq_printf(m, "%s: %lu ", name, n[2*i+3]); 4426 seq_printf(m, "%s: %lu ", name, x[2*i+3]);
4549 show_symbol(m, n[2*i+2]); 4427 show_symbol(m, x[2*i+2]);
4550 seq_putc(m, '\n'); 4428 seq_putc(m, '\n');
4551 } 4429 }
4552 4430