aboutsummaryrefslogtreecommitdiffstats
path: root/mm/slab.c
diff options
context:
space:
mode:
authorChristoph Lameter <cl@linux.com>2013-01-10 14:14:19 -0500
committerPekka Enberg <penberg@kernel.org>2013-02-01 05:32:06 -0500
commit6a67368c36e2c0c2578ba62f6264ab739af08cce (patch)
treeee678d5cbcdebb8207a14eb898352c47f45ac9cf /mm/slab.c
parent6744f087ba2a49f6d6935d9daa0b20a0f03567b5 (diff)
slab: Rename nodelists to node
Have a common naming between both slab caches for future changes. Acked-by: Glauber Costa <glommer@parallels.com> Signed-off-by: Christoph Lameter <cl@linux.com> Signed-off-by: Pekka Enberg <penberg@kernel.org>
Diffstat (limited to 'mm/slab.c')
-rw-r--r--mm/slab.c135
1 files changed, 67 insertions, 68 deletions
diff --git a/mm/slab.c b/mm/slab.c
index 7c0da4c86973..3416f4c544b3 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -347,7 +347,7 @@ static void kmem_list3_init(struct kmem_cache_node *parent)
347#define MAKE_LIST(cachep, listp, slab, nodeid) \ 347#define MAKE_LIST(cachep, listp, slab, nodeid) \
348 do { \ 348 do { \
349 INIT_LIST_HEAD(listp); \ 349 INIT_LIST_HEAD(listp); \
350 list_splice(&(cachep->nodelists[nodeid]->slab), listp); \ 350 list_splice(&(cachep->node[nodeid]->slab), listp); \
351 } while (0) 351 } while (0)
352 352
353#define MAKE_ALL_LISTS(cachep, ptr, nodeid) \ 353#define MAKE_ALL_LISTS(cachep, ptr, nodeid) \
@@ -549,7 +549,7 @@ static void slab_set_lock_classes(struct kmem_cache *cachep,
549 struct kmem_cache_node *l3; 549 struct kmem_cache_node *l3;
550 int r; 550 int r;
551 551
552 l3 = cachep->nodelists[q]; 552 l3 = cachep->node[q];
553 if (!l3) 553 if (!l3)
554 return; 554 return;
555 555
@@ -597,7 +597,7 @@ static void init_node_lock_keys(int q)
597 if (!cache) 597 if (!cache)
598 continue; 598 continue;
599 599
600 l3 = cache->nodelists[q]; 600 l3 = cache->node[q];
601 if (!l3 || OFF_SLAB(cache)) 601 if (!l3 || OFF_SLAB(cache))
602 continue; 602 continue;
603 603
@@ -608,8 +608,7 @@ static void init_node_lock_keys(int q)
608 608
609static void on_slab_lock_classes_node(struct kmem_cache *cachep, int q) 609static void on_slab_lock_classes_node(struct kmem_cache *cachep, int q)
610{ 610{
611 611 if (!cachep->node[q])
612 if (!cachep->nodelists[q])
613 return; 612 return;
614 613
615 slab_set_lock_classes(cachep, &on_slab_l3_key, 614 slab_set_lock_classes(cachep, &on_slab_l3_key,
@@ -900,7 +899,7 @@ static inline bool is_slab_pfmemalloc(struct slab *slabp)
900static void recheck_pfmemalloc_active(struct kmem_cache *cachep, 899static void recheck_pfmemalloc_active(struct kmem_cache *cachep,
901 struct array_cache *ac) 900 struct array_cache *ac)
902{ 901{
903 struct kmem_cache_node *l3 = cachep->nodelists[numa_mem_id()]; 902 struct kmem_cache_node *l3 = cachep->node[numa_mem_id()];
904 struct slab *slabp; 903 struct slab *slabp;
905 unsigned long flags; 904 unsigned long flags;
906 905
@@ -955,7 +954,7 @@ static void *__ac_get_obj(struct kmem_cache *cachep, struct array_cache *ac,
955 * If there are empty slabs on the slabs_free list and we are 954 * If there are empty slabs on the slabs_free list and we are
956 * being forced to refill the cache, mark this one !pfmemalloc. 955 * being forced to refill the cache, mark this one !pfmemalloc.
957 */ 956 */
958 l3 = cachep->nodelists[numa_mem_id()]; 957 l3 = cachep->node[numa_mem_id()];
959 if (!list_empty(&l3->slabs_free) && force_refill) { 958 if (!list_empty(&l3->slabs_free) && force_refill) {
960 struct slab *slabp = virt_to_slab(objp); 959 struct slab *slabp = virt_to_slab(objp);
961 ClearPageSlabPfmemalloc(virt_to_head_page(slabp->s_mem)); 960 ClearPageSlabPfmemalloc(virt_to_head_page(slabp->s_mem));
@@ -1105,7 +1104,7 @@ static void free_alien_cache(struct array_cache **ac_ptr)
1105static void __drain_alien_cache(struct kmem_cache *cachep, 1104static void __drain_alien_cache(struct kmem_cache *cachep,
1106 struct array_cache *ac, int node) 1105 struct array_cache *ac, int node)
1107{ 1106{
1108 struct kmem_cache_node *rl3 = cachep->nodelists[node]; 1107 struct kmem_cache_node *rl3 = cachep->node[node];
1109 1108
1110 if (ac->avail) { 1109 if (ac->avail) {
1111 spin_lock(&rl3->list_lock); 1110 spin_lock(&rl3->list_lock);
@@ -1174,7 +1173,7 @@ static inline int cache_free_alien(struct kmem_cache *cachep, void *objp)
1174 if (likely(slabp->nodeid == node)) 1173 if (likely(slabp->nodeid == node))
1175 return 0; 1174 return 0;
1176 1175
1177 l3 = cachep->nodelists[node]; 1176 l3 = cachep->node[node];
1178 STATS_INC_NODEFREES(cachep); 1177 STATS_INC_NODEFREES(cachep);
1179 if (l3->alien && l3->alien[nodeid]) { 1178 if (l3->alien && l3->alien[nodeid]) {
1180 alien = l3->alien[nodeid]; 1179 alien = l3->alien[nodeid];
@@ -1186,24 +1185,24 @@ static inline int cache_free_alien(struct kmem_cache *cachep, void *objp)
1186 ac_put_obj(cachep, alien, objp); 1185 ac_put_obj(cachep, alien, objp);
1187 spin_unlock(&alien->lock); 1186 spin_unlock(&alien->lock);
1188 } else { 1187 } else {
1189 spin_lock(&(cachep->nodelists[nodeid])->list_lock); 1188 spin_lock(&(cachep->node[nodeid])->list_lock);
1190 free_block(cachep, &objp, 1, nodeid); 1189 free_block(cachep, &objp, 1, nodeid);
1191 spin_unlock(&(cachep->nodelists[nodeid])->list_lock); 1190 spin_unlock(&(cachep->node[nodeid])->list_lock);
1192 } 1191 }
1193 return 1; 1192 return 1;
1194} 1193}
1195#endif 1194#endif
1196 1195
1197/* 1196/*
1198 * Allocates and initializes nodelists for a node on each slab cache, used for 1197 * Allocates and initializes node for a node on each slab cache, used for
1199 * either memory or cpu hotplug. If memory is being hot-added, the kmem_list3 1198 * either memory or cpu hotplug. If memory is being hot-added, the kmem_list3
1200 * will be allocated off-node since memory is not yet online for the new node. 1199 * will be allocated off-node since memory is not yet online for the new node.
1201 * When hotplugging memory or a cpu, existing nodelists are not replaced if 1200 * When hotplugging memory or a cpu, existing node are not replaced if
1202 * already in use. 1201 * already in use.
1203 * 1202 *
1204 * Must hold slab_mutex. 1203 * Must hold slab_mutex.
1205 */ 1204 */
1206static int init_cache_nodelists_node(int node) 1205static int init_cache_node_node(int node)
1207{ 1206{
1208 struct kmem_cache *cachep; 1207 struct kmem_cache *cachep;
1209 struct kmem_cache_node *l3; 1208 struct kmem_cache_node *l3;
@@ -1215,7 +1214,7 @@ static int init_cache_nodelists_node(int node)
1215 * begin anything. Make sure some other cpu on this 1214 * begin anything. Make sure some other cpu on this
1216 * node has not already allocated this 1215 * node has not already allocated this
1217 */ 1216 */
1218 if (!cachep->nodelists[node]) { 1217 if (!cachep->node[node]) {
1219 l3 = kmalloc_node(memsize, GFP_KERNEL, node); 1218 l3 = kmalloc_node(memsize, GFP_KERNEL, node);
1220 if (!l3) 1219 if (!l3)
1221 return -ENOMEM; 1220 return -ENOMEM;
@@ -1228,14 +1227,14 @@ static int init_cache_nodelists_node(int node)
1228 * go. slab_mutex is sufficient 1227 * go. slab_mutex is sufficient
1229 * protection here. 1228 * protection here.
1230 */ 1229 */
1231 cachep->nodelists[node] = l3; 1230 cachep->node[node] = l3;
1232 } 1231 }
1233 1232
1234 spin_lock_irq(&cachep->nodelists[node]->list_lock); 1233 spin_lock_irq(&cachep->node[node]->list_lock);
1235 cachep->nodelists[node]->free_limit = 1234 cachep->node[node]->free_limit =
1236 (1 + nr_cpus_node(node)) * 1235 (1 + nr_cpus_node(node)) *
1237 cachep->batchcount + cachep->num; 1236 cachep->batchcount + cachep->num;
1238 spin_unlock_irq(&cachep->nodelists[node]->list_lock); 1237 spin_unlock_irq(&cachep->node[node]->list_lock);
1239 } 1238 }
1240 return 0; 1239 return 0;
1241} 1240}
@@ -1255,7 +1254,7 @@ static void __cpuinit cpuup_canceled(long cpu)
1255 /* cpu is dead; no one can alloc from it. */ 1254 /* cpu is dead; no one can alloc from it. */
1256 nc = cachep->array[cpu]; 1255 nc = cachep->array[cpu];
1257 cachep->array[cpu] = NULL; 1256 cachep->array[cpu] = NULL;
1258 l3 = cachep->nodelists[node]; 1257 l3 = cachep->node[node];
1259 1258
1260 if (!l3) 1259 if (!l3)
1261 goto free_array_cache; 1260 goto free_array_cache;
@@ -1298,7 +1297,7 @@ free_array_cache:
1298 * shrink each nodelist to its limit. 1297 * shrink each nodelist to its limit.
1299 */ 1298 */
1300 list_for_each_entry(cachep, &slab_caches, list) { 1299 list_for_each_entry(cachep, &slab_caches, list) {
1301 l3 = cachep->nodelists[node]; 1300 l3 = cachep->node[node];
1302 if (!l3) 1301 if (!l3)
1303 continue; 1302 continue;
1304 drain_freelist(cachep, l3, l3->free_objects); 1303 drain_freelist(cachep, l3, l3->free_objects);
@@ -1318,7 +1317,7 @@ static int __cpuinit cpuup_prepare(long cpu)
1318 * kmalloc_node allows us to add the slab to the right 1317 * kmalloc_node allows us to add the slab to the right
1319 * kmem_list3 and not this cpu's kmem_list3 1318 * kmem_list3 and not this cpu's kmem_list3
1320 */ 1319 */
1321 err = init_cache_nodelists_node(node); 1320 err = init_cache_node_node(node);
1322 if (err < 0) 1321 if (err < 0)
1323 goto bad; 1322 goto bad;
1324 1323
@@ -1353,7 +1352,7 @@ static int __cpuinit cpuup_prepare(long cpu)
1353 } 1352 }
1354 } 1353 }
1355 cachep->array[cpu] = nc; 1354 cachep->array[cpu] = nc;
1356 l3 = cachep->nodelists[node]; 1355 l3 = cachep->node[node];
1357 BUG_ON(!l3); 1356 BUG_ON(!l3);
1358 1357
1359 spin_lock_irq(&l3->list_lock); 1358 spin_lock_irq(&l3->list_lock);
@@ -1456,7 +1455,7 @@ static struct notifier_block __cpuinitdata cpucache_notifier = {
1456 * 1455 *
1457 * Must hold slab_mutex. 1456 * Must hold slab_mutex.
1458 */ 1457 */
1459static int __meminit drain_cache_nodelists_node(int node) 1458static int __meminit drain_cache_node_node(int node)
1460{ 1459{
1461 struct kmem_cache *cachep; 1460 struct kmem_cache *cachep;
1462 int ret = 0; 1461 int ret = 0;
@@ -1464,7 +1463,7 @@ static int __meminit drain_cache_nodelists_node(int node)
1464 list_for_each_entry(cachep, &slab_caches, list) { 1463 list_for_each_entry(cachep, &slab_caches, list) {
1465 struct kmem_cache_node *l3; 1464 struct kmem_cache_node *l3;
1466 1465
1467 l3 = cachep->nodelists[node]; 1466 l3 = cachep->node[node];
1468 if (!l3) 1467 if (!l3)
1469 continue; 1468 continue;
1470 1469
@@ -1493,12 +1492,12 @@ static int __meminit slab_memory_callback(struct notifier_block *self,
1493 switch (action) { 1492 switch (action) {
1494 case MEM_GOING_ONLINE: 1493 case MEM_GOING_ONLINE:
1495 mutex_lock(&slab_mutex); 1494 mutex_lock(&slab_mutex);
1496 ret = init_cache_nodelists_node(nid); 1495 ret = init_cache_node_node(nid);
1497 mutex_unlock(&slab_mutex); 1496 mutex_unlock(&slab_mutex);
1498 break; 1497 break;
1499 case MEM_GOING_OFFLINE: 1498 case MEM_GOING_OFFLINE:
1500 mutex_lock(&slab_mutex); 1499 mutex_lock(&slab_mutex);
1501 ret = drain_cache_nodelists_node(nid); 1500 ret = drain_cache_node_node(nid);
1502 mutex_unlock(&slab_mutex); 1501 mutex_unlock(&slab_mutex);
1503 break; 1502 break;
1504 case MEM_ONLINE: 1503 case MEM_ONLINE:
@@ -1530,7 +1529,7 @@ static void __init init_list(struct kmem_cache *cachep, struct kmem_cache_node *
1530 spin_lock_init(&ptr->list_lock); 1529 spin_lock_init(&ptr->list_lock);
1531 1530
1532 MAKE_ALL_LISTS(cachep, ptr, nodeid); 1531 MAKE_ALL_LISTS(cachep, ptr, nodeid);
1533 cachep->nodelists[nodeid] = ptr; 1532 cachep->node[nodeid] = ptr;
1534} 1533}
1535 1534
1536/* 1535/*
@@ -1542,8 +1541,8 @@ static void __init set_up_list3s(struct kmem_cache *cachep, int index)
1542 int node; 1541 int node;
1543 1542
1544 for_each_online_node(node) { 1543 for_each_online_node(node) {
1545 cachep->nodelists[node] = &initkmem_list3[index + node]; 1544 cachep->node[node] = &initkmem_list3[index + node];
1546 cachep->nodelists[node]->next_reap = jiffies + 1545 cachep->node[node]->next_reap = jiffies +
1547 REAPTIMEOUT_LIST3 + 1546 REAPTIMEOUT_LIST3 +
1548 ((unsigned long)cachep) % REAPTIMEOUT_LIST3; 1547 ((unsigned long)cachep) % REAPTIMEOUT_LIST3;
1549 } 1548 }
@@ -1551,11 +1550,11 @@ static void __init set_up_list3s(struct kmem_cache *cachep, int index)
1551 1550
1552/* 1551/*
1553 * The memory after the last cpu cache pointer is used for the 1552 * The memory after the last cpu cache pointer is used for the
1554 * the nodelists pointer. 1553 * the node pointer.
1555 */ 1554 */
1556static void setup_nodelists_pointer(struct kmem_cache *cachep) 1555static void setup_node_pointer(struct kmem_cache *cachep)
1557{ 1556{
1558 cachep->nodelists = (struct kmem_cache_node **)&cachep->array[nr_cpu_ids]; 1557 cachep->node = (struct kmem_cache_node **)&cachep->array[nr_cpu_ids];
1559} 1558}
1560 1559
1561/* 1560/*
@@ -1567,7 +1566,7 @@ void __init kmem_cache_init(void)
1567 int i; 1566 int i;
1568 1567
1569 kmem_cache = &kmem_cache_boot; 1568 kmem_cache = &kmem_cache_boot;
1570 setup_nodelists_pointer(kmem_cache); 1569 setup_node_pointer(kmem_cache);
1571 1570
1572 if (num_possible_nodes() == 1) 1571 if (num_possible_nodes() == 1)
1573 use_alien_caches = 0; 1572 use_alien_caches = 0;
@@ -1756,7 +1755,7 @@ void __init kmem_cache_init_late(void)
1756#ifdef CONFIG_NUMA 1755#ifdef CONFIG_NUMA
1757 /* 1756 /*
1758 * Register a memory hotplug callback that initializes and frees 1757 * Register a memory hotplug callback that initializes and frees
1759 * nodelists. 1758 * node.
1760 */ 1759 */
1761 hotplug_memory_notifier(slab_memory_callback, SLAB_CALLBACK_PRI); 1760 hotplug_memory_notifier(slab_memory_callback, SLAB_CALLBACK_PRI);
1762#endif 1761#endif
@@ -1801,7 +1800,7 @@ slab_out_of_memory(struct kmem_cache *cachep, gfp_t gfpflags, int nodeid)
1801 unsigned long active_objs = 0, num_objs = 0, free_objects = 0; 1800 unsigned long active_objs = 0, num_objs = 0, free_objects = 0;
1802 unsigned long active_slabs = 0, num_slabs = 0; 1801 unsigned long active_slabs = 0, num_slabs = 0;
1803 1802
1804 l3 = cachep->nodelists[node]; 1803 l3 = cachep->node[node];
1805 if (!l3) 1804 if (!l3)
1806 continue; 1805 continue;
1807 1806
@@ -2277,15 +2276,15 @@ static int __init_refok setup_cpu_cache(struct kmem_cache *cachep, gfp_t gfp)
2277 } else { 2276 } else {
2278 int node; 2277 int node;
2279 for_each_online_node(node) { 2278 for_each_online_node(node) {
2280 cachep->nodelists[node] = 2279 cachep->node[node] =
2281 kmalloc_node(sizeof(struct kmem_cache_node), 2280 kmalloc_node(sizeof(struct kmem_cache_node),
2282 gfp, node); 2281 gfp, node);
2283 BUG_ON(!cachep->nodelists[node]); 2282 BUG_ON(!cachep->node[node]);
2284 kmem_list3_init(cachep->nodelists[node]); 2283 kmem_list3_init(cachep->node[node]);
2285 } 2284 }
2286 } 2285 }
2287 } 2286 }
2288 cachep->nodelists[numa_mem_id()]->next_reap = 2287 cachep->node[numa_mem_id()]->next_reap =
2289 jiffies + REAPTIMEOUT_LIST3 + 2288 jiffies + REAPTIMEOUT_LIST3 +
2290 ((unsigned long)cachep) % REAPTIMEOUT_LIST3; 2289 ((unsigned long)cachep) % REAPTIMEOUT_LIST3;
2291 2290
@@ -2388,7 +2387,7 @@ __kmem_cache_create (struct kmem_cache *cachep, unsigned long flags)
2388 else 2387 else
2389 gfp = GFP_NOWAIT; 2388 gfp = GFP_NOWAIT;
2390 2389
2391 setup_nodelists_pointer(cachep); 2390 setup_node_pointer(cachep);
2392#if DEBUG 2391#if DEBUG
2393 2392
2394 /* 2393 /*
@@ -2527,7 +2526,7 @@ static void check_spinlock_acquired(struct kmem_cache *cachep)
2527{ 2526{
2528#ifdef CONFIG_SMP 2527#ifdef CONFIG_SMP
2529 check_irq_off(); 2528 check_irq_off();
2530 assert_spin_locked(&cachep->nodelists[numa_mem_id()]->list_lock); 2529 assert_spin_locked(&cachep->node[numa_mem_id()]->list_lock);
2531#endif 2530#endif
2532} 2531}
2533 2532
@@ -2535,7 +2534,7 @@ static void check_spinlock_acquired_node(struct kmem_cache *cachep, int node)
2535{ 2534{
2536#ifdef CONFIG_SMP 2535#ifdef CONFIG_SMP
2537 check_irq_off(); 2536 check_irq_off();
2538 assert_spin_locked(&cachep->nodelists[node]->list_lock); 2537 assert_spin_locked(&cachep->node[node]->list_lock);
2539#endif 2538#endif
2540} 2539}
2541 2540
@@ -2558,9 +2557,9 @@ static void do_drain(void *arg)
2558 2557
2559 check_irq_off(); 2558 check_irq_off();
2560 ac = cpu_cache_get(cachep); 2559 ac = cpu_cache_get(cachep);
2561 spin_lock(&cachep->nodelists[node]->list_lock); 2560 spin_lock(&cachep->node[node]->list_lock);
2562 free_block(cachep, ac->entry, ac->avail, node); 2561 free_block(cachep, ac->entry, ac->avail, node);
2563 spin_unlock(&cachep->nodelists[node]->list_lock); 2562 spin_unlock(&cachep->node[node]->list_lock);
2564 ac->avail = 0; 2563 ac->avail = 0;
2565} 2564}
2566 2565
@@ -2572,13 +2571,13 @@ static void drain_cpu_caches(struct kmem_cache *cachep)
2572 on_each_cpu(do_drain, cachep, 1); 2571 on_each_cpu(do_drain, cachep, 1);
2573 check_irq_on(); 2572 check_irq_on();
2574 for_each_online_node(node) { 2573 for_each_online_node(node) {
2575 l3 = cachep->nodelists[node]; 2574 l3 = cachep->node[node];
2576 if (l3 && l3->alien) 2575 if (l3 && l3->alien)
2577 drain_alien_cache(cachep, l3->alien); 2576 drain_alien_cache(cachep, l3->alien);
2578 } 2577 }
2579 2578
2580 for_each_online_node(node) { 2579 for_each_online_node(node) {
2581 l3 = cachep->nodelists[node]; 2580 l3 = cachep->node[node];
2582 if (l3) 2581 if (l3)
2583 drain_array(cachep, l3, l3->shared, 1, node); 2582 drain_array(cachep, l3, l3->shared, 1, node);
2584 } 2583 }
@@ -2635,7 +2634,7 @@ static int __cache_shrink(struct kmem_cache *cachep)
2635 2634
2636 check_irq_on(); 2635 check_irq_on();
2637 for_each_online_node(i) { 2636 for_each_online_node(i) {
2638 l3 = cachep->nodelists[i]; 2637 l3 = cachep->node[i];
2639 if (!l3) 2638 if (!l3)
2640 continue; 2639 continue;
2641 2640
@@ -2682,7 +2681,7 @@ int __kmem_cache_shutdown(struct kmem_cache *cachep)
2682 2681
2683 /* NUMA: free the list3 structures */ 2682 /* NUMA: free the list3 structures */
2684 for_each_online_node(i) { 2683 for_each_online_node(i) {
2685 l3 = cachep->nodelists[i]; 2684 l3 = cachep->node[i];
2686 if (l3) { 2685 if (l3) {
2687 kfree(l3->shared); 2686 kfree(l3->shared);
2688 free_alien_cache(l3->alien); 2687 free_alien_cache(l3->alien);
@@ -2879,7 +2878,7 @@ static int cache_grow(struct kmem_cache *cachep,
2879 2878
2880 /* Take the l3 list lock to change the colour_next on this node */ 2879 /* Take the l3 list lock to change the colour_next on this node */
2881 check_irq_off(); 2880 check_irq_off();
2882 l3 = cachep->nodelists[nodeid]; 2881 l3 = cachep->node[nodeid];
2883 spin_lock(&l3->list_lock); 2882 spin_lock(&l3->list_lock);
2884 2883
2885 /* Get colour for the slab, and cal the next value. */ 2884 /* Get colour for the slab, and cal the next value. */
@@ -3077,7 +3076,7 @@ retry:
3077 */ 3076 */
3078 batchcount = BATCHREFILL_LIMIT; 3077 batchcount = BATCHREFILL_LIMIT;
3079 } 3078 }
3080 l3 = cachep->nodelists[node]; 3079 l3 = cachep->node[node];
3081 3080
3082 BUG_ON(ac->avail > 0 || !l3); 3081 BUG_ON(ac->avail > 0 || !l3);
3083 spin_lock(&l3->list_lock); 3082 spin_lock(&l3->list_lock);
@@ -3299,7 +3298,7 @@ static void *alternate_node_alloc(struct kmem_cache *cachep, gfp_t flags)
3299/* 3298/*
3300 * Fallback function if there was no memory available and no objects on a 3299 * Fallback function if there was no memory available and no objects on a
3301 * certain node and fall back is permitted. First we scan all the 3300 * certain node and fall back is permitted. First we scan all the
3302 * available nodelists for available objects. If that fails then we 3301 * available node for available objects. If that fails then we
3303 * perform an allocation without specifying a node. This allows the page 3302 * perform an allocation without specifying a node. This allows the page
3304 * allocator to do its reclaim / fallback magic. We then insert the 3303 * allocator to do its reclaim / fallback magic. We then insert the
3305 * slab into the proper nodelist and then allocate from it. 3304 * slab into the proper nodelist and then allocate from it.
@@ -3333,8 +3332,8 @@ retry:
3333 nid = zone_to_nid(zone); 3332 nid = zone_to_nid(zone);
3334 3333
3335 if (cpuset_zone_allowed_hardwall(zone, flags) && 3334 if (cpuset_zone_allowed_hardwall(zone, flags) &&
3336 cache->nodelists[nid] && 3335 cache->node[nid] &&
3337 cache->nodelists[nid]->free_objects) { 3336 cache->node[nid]->free_objects) {
3338 obj = ____cache_alloc_node(cache, 3337 obj = ____cache_alloc_node(cache,
3339 flags | GFP_THISNODE, nid); 3338 flags | GFP_THISNODE, nid);
3340 if (obj) 3339 if (obj)
@@ -3394,7 +3393,7 @@ static void *____cache_alloc_node(struct kmem_cache *cachep, gfp_t flags,
3394 void *obj; 3393 void *obj;
3395 int x; 3394 int x;
3396 3395
3397 l3 = cachep->nodelists[nodeid]; 3396 l3 = cachep->node[nodeid];
3398 BUG_ON(!l3); 3397 BUG_ON(!l3);
3399 3398
3400retry: 3399retry:
@@ -3479,7 +3478,7 @@ slab_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid,
3479 if (nodeid == NUMA_NO_NODE) 3478 if (nodeid == NUMA_NO_NODE)
3480 nodeid = slab_node; 3479 nodeid = slab_node;
3481 3480
3482 if (unlikely(!cachep->nodelists[nodeid])) { 3481 if (unlikely(!cachep->node[nodeid])) {
3483 /* Node not bootstrapped yet */ 3482 /* Node not bootstrapped yet */
3484 ptr = fallback_alloc(cachep, flags); 3483 ptr = fallback_alloc(cachep, flags);
3485 goto out; 3484 goto out;
@@ -3595,7 +3594,7 @@ static void free_block(struct kmem_cache *cachep, void **objpp, int nr_objects,
3595 objp = objpp[i]; 3594 objp = objpp[i];
3596 3595
3597 slabp = virt_to_slab(objp); 3596 slabp = virt_to_slab(objp);
3598 l3 = cachep->nodelists[node]; 3597 l3 = cachep->node[node];
3599 list_del(&slabp->list); 3598 list_del(&slabp->list);
3600 check_spinlock_acquired_node(cachep, node); 3599 check_spinlock_acquired_node(cachep, node);
3601 check_slabp(cachep, slabp); 3600 check_slabp(cachep, slabp);
@@ -3639,7 +3638,7 @@ static void cache_flusharray(struct kmem_cache *cachep, struct array_cache *ac)
3639 BUG_ON(!batchcount || batchcount > ac->avail); 3638 BUG_ON(!batchcount || batchcount > ac->avail);
3640#endif 3639#endif
3641 check_irq_off(); 3640 check_irq_off();
3642 l3 = cachep->nodelists[node]; 3641 l3 = cachep->node[node];
3643 spin_lock(&l3->list_lock); 3642 spin_lock(&l3->list_lock);
3644 if (l3->shared) { 3643 if (l3->shared) {
3645 struct array_cache *shared_array = l3->shared; 3644 struct array_cache *shared_array = l3->shared;
@@ -3946,7 +3945,7 @@ static int alloc_kmemlist(struct kmem_cache *cachep, gfp_t gfp)
3946 } 3945 }
3947 } 3946 }
3948 3947
3949 l3 = cachep->nodelists[node]; 3948 l3 = cachep->node[node];
3950 if (l3) { 3949 if (l3) {
3951 struct array_cache *shared = l3->shared; 3950 struct array_cache *shared = l3->shared;
3952 3951
@@ -3982,7 +3981,7 @@ static int alloc_kmemlist(struct kmem_cache *cachep, gfp_t gfp)
3982 l3->alien = new_alien; 3981 l3->alien = new_alien;
3983 l3->free_limit = (1 + nr_cpus_node(node)) * 3982 l3->free_limit = (1 + nr_cpus_node(node)) *
3984 cachep->batchcount + cachep->num; 3983 cachep->batchcount + cachep->num;
3985 cachep->nodelists[node] = l3; 3984 cachep->node[node] = l3;
3986 } 3985 }
3987 return 0; 3986 return 0;
3988 3987
@@ -3991,13 +3990,13 @@ fail:
3991 /* Cache is not active yet. Roll back what we did */ 3990 /* Cache is not active yet. Roll back what we did */
3992 node--; 3991 node--;
3993 while (node >= 0) { 3992 while (node >= 0) {
3994 if (cachep->nodelists[node]) { 3993 if (cachep->node[node]) {
3995 l3 = cachep->nodelists[node]; 3994 l3 = cachep->node[node];
3996 3995
3997 kfree(l3->shared); 3996 kfree(l3->shared);
3998 free_alien_cache(l3->alien); 3997 free_alien_cache(l3->alien);
3999 kfree(l3); 3998 kfree(l3);
4000 cachep->nodelists[node] = NULL; 3999 cachep->node[node] = NULL;
4001 } 4000 }
4002 node--; 4001 node--;
4003 } 4002 }
@@ -4057,9 +4056,9 @@ static int __do_tune_cpucache(struct kmem_cache *cachep, int limit,
4057 struct array_cache *ccold = new->new[i]; 4056 struct array_cache *ccold = new->new[i];
4058 if (!ccold) 4057 if (!ccold)
4059 continue; 4058 continue;
4060 spin_lock_irq(&cachep->nodelists[cpu_to_mem(i)]->list_lock); 4059 spin_lock_irq(&cachep->node[cpu_to_mem(i)]->list_lock);
4061 free_block(cachep, ccold->entry, ccold->avail, cpu_to_mem(i)); 4060 free_block(cachep, ccold->entry, ccold->avail, cpu_to_mem(i));
4062 spin_unlock_irq(&cachep->nodelists[cpu_to_mem(i)]->list_lock); 4061 spin_unlock_irq(&cachep->node[cpu_to_mem(i)]->list_lock);
4063 kfree(ccold); 4062 kfree(ccold);
4064 } 4063 }
4065 kfree(new); 4064 kfree(new);
@@ -4219,7 +4218,7 @@ static void cache_reap(struct work_struct *w)
4219 * have established with reasonable certainty that 4218 * have established with reasonable certainty that
4220 * we can do some work if the lock was obtained. 4219 * we can do some work if the lock was obtained.
4221 */ 4220 */
4222 l3 = searchp->nodelists[node]; 4221 l3 = searchp->node[node];
4223 4222
4224 reap_alien(searchp, l3); 4223 reap_alien(searchp, l3);
4225 4224
@@ -4272,7 +4271,7 @@ void get_slabinfo(struct kmem_cache *cachep, struct slabinfo *sinfo)
4272 active_objs = 0; 4271 active_objs = 0;
4273 num_slabs = 0; 4272 num_slabs = 0;
4274 for_each_online_node(node) { 4273 for_each_online_node(node) {
4275 l3 = cachep->nodelists[node]; 4274 l3 = cachep->node[node];
4276 if (!l3) 4275 if (!l3)
4277 continue; 4276 continue;
4278 4277
@@ -4497,7 +4496,7 @@ static int leaks_show(struct seq_file *m, void *p)
4497 n[1] = 0; 4496 n[1] = 0;
4498 4497
4499 for_each_online_node(node) { 4498 for_each_online_node(node) {
4500 l3 = cachep->nodelists[node]; 4499 l3 = cachep->node[node];
4501 if (!l3) 4500 if (!l3)
4502 continue; 4501 continue;
4503 4502