diff options
author | Christoph Lameter <cl@linux.com> | 2014-08-06 19:04:11 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2014-08-06 21:01:13 -0400 |
commit | 18bf854117c6caa4d0083bd42411895163467cb9 (patch) | |
tree | 8d411ff12caf678b48232673b6c5db4b61b67428 | |
parent | fa45dc254bcf740852752effa35387be684947f8 (diff) |
slab: use get_node() and kmem_cache_node() functions
Use the two functions to simplify the code avoiding numerous explicit
checks coded checking for a certain node to be online.
Get rid of various repeated calculations of kmem_cache_node structures.
[akpm@linux-foundation.org: fix build]
Signed-off-by: Christoph Lameter <cl@linux.com>
Cc: Christoph Lameter <cl@linux.com>
Cc: Pekka Enberg <penberg@kernel.org>
Acked-by: David Rientjes <rientjes@google.com>
Cc: Joonsoo Kim <iamjoonsoo.kim@lge.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
-rw-r--r-- | mm/slab.c | 173 |
1 files changed, 80 insertions, 93 deletions
@@ -267,7 +267,7 @@ static void kmem_cache_node_init(struct kmem_cache_node *parent) | |||
267 | #define MAKE_LIST(cachep, listp, slab, nodeid) \ | 267 | #define MAKE_LIST(cachep, listp, slab, nodeid) \ |
268 | do { \ | 268 | do { \ |
269 | INIT_LIST_HEAD(listp); \ | 269 | INIT_LIST_HEAD(listp); \ |
270 | list_splice(&(cachep->node[nodeid]->slab), listp); \ | 270 | list_splice(&get_node(cachep, nodeid)->slab, listp); \ |
271 | } while (0) | 271 | } while (0) |
272 | 272 | ||
273 | #define MAKE_ALL_LISTS(cachep, ptr, nodeid) \ | 273 | #define MAKE_ALL_LISTS(cachep, ptr, nodeid) \ |
@@ -488,16 +488,11 @@ static struct lock_class_key debugobj_alc_key; | |||
488 | 488 | ||
489 | static void slab_set_lock_classes(struct kmem_cache *cachep, | 489 | static void slab_set_lock_classes(struct kmem_cache *cachep, |
490 | struct lock_class_key *l3_key, struct lock_class_key *alc_key, | 490 | struct lock_class_key *l3_key, struct lock_class_key *alc_key, |
491 | int q) | 491 | struct kmem_cache_node *n) |
492 | { | 492 | { |
493 | struct array_cache **alc; | 493 | struct array_cache **alc; |
494 | struct kmem_cache_node *n; | ||
495 | int r; | 494 | int r; |
496 | 495 | ||
497 | n = cachep->node[q]; | ||
498 | if (!n) | ||
499 | return; | ||
500 | |||
501 | lockdep_set_class(&n->list_lock, l3_key); | 496 | lockdep_set_class(&n->list_lock, l3_key); |
502 | alc = n->alien; | 497 | alc = n->alien; |
503 | /* | 498 | /* |
@@ -515,17 +510,19 @@ static void slab_set_lock_classes(struct kmem_cache *cachep, | |||
515 | } | 510 | } |
516 | } | 511 | } |
517 | 512 | ||
518 | static void slab_set_debugobj_lock_classes_node(struct kmem_cache *cachep, int node) | 513 | static void slab_set_debugobj_lock_classes_node(struct kmem_cache *cachep, |
514 | struct kmem_cache_node *n) | ||
519 | { | 515 | { |
520 | slab_set_lock_classes(cachep, &debugobj_l3_key, &debugobj_alc_key, node); | 516 | slab_set_lock_classes(cachep, &debugobj_l3_key, &debugobj_alc_key, n); |
521 | } | 517 | } |
522 | 518 | ||
523 | static void slab_set_debugobj_lock_classes(struct kmem_cache *cachep) | 519 | static void slab_set_debugobj_lock_classes(struct kmem_cache *cachep) |
524 | { | 520 | { |
525 | int node; | 521 | int node; |
522 | struct kmem_cache_node *n; | ||
526 | 523 | ||
527 | for_each_online_node(node) | 524 | for_each_kmem_cache_node(cachep, node, n) |
528 | slab_set_debugobj_lock_classes_node(cachep, node); | 525 | slab_set_debugobj_lock_classes_node(cachep, n); |
529 | } | 526 | } |
530 | 527 | ||
531 | static void init_node_lock_keys(int q) | 528 | static void init_node_lock_keys(int q) |
@@ -542,31 +539,30 @@ static void init_node_lock_keys(int q) | |||
542 | if (!cache) | 539 | if (!cache) |
543 | continue; | 540 | continue; |
544 | 541 | ||
545 | n = cache->node[q]; | 542 | n = get_node(cache, q); |
546 | if (!n || OFF_SLAB(cache)) | 543 | if (!n || OFF_SLAB(cache)) |
547 | continue; | 544 | continue; |
548 | 545 | ||
549 | slab_set_lock_classes(cache, &on_slab_l3_key, | 546 | slab_set_lock_classes(cache, &on_slab_l3_key, |
550 | &on_slab_alc_key, q); | 547 | &on_slab_alc_key, n); |
551 | } | 548 | } |
552 | } | 549 | } |
553 | 550 | ||
554 | static void on_slab_lock_classes_node(struct kmem_cache *cachep, int q) | 551 | static void on_slab_lock_classes_node(struct kmem_cache *cachep, |
552 | struct kmem_cache_node *n) | ||
555 | { | 553 | { |
556 | if (!cachep->node[q]) | ||
557 | return; | ||
558 | |||
559 | slab_set_lock_classes(cachep, &on_slab_l3_key, | 554 | slab_set_lock_classes(cachep, &on_slab_l3_key, |
560 | &on_slab_alc_key, q); | 555 | &on_slab_alc_key, n); |
561 | } | 556 | } |
562 | 557 | ||
563 | static inline void on_slab_lock_classes(struct kmem_cache *cachep) | 558 | static inline void on_slab_lock_classes(struct kmem_cache *cachep) |
564 | { | 559 | { |
565 | int node; | 560 | int node; |
561 | struct kmem_cache_node *n; | ||
566 | 562 | ||
567 | VM_BUG_ON(OFF_SLAB(cachep)); | 563 | VM_BUG_ON(OFF_SLAB(cachep)); |
568 | for_each_node(node) | 564 | for_each_kmem_cache_node(cachep, node, n) |
569 | on_slab_lock_classes_node(cachep, node); | 565 | on_slab_lock_classes_node(cachep, n); |
570 | } | 566 | } |
571 | 567 | ||
572 | static inline void __init init_lock_keys(void) | 568 | static inline void __init init_lock_keys(void) |
@@ -589,11 +585,13 @@ static inline void on_slab_lock_classes(struct kmem_cache *cachep) | |||
589 | { | 585 | { |
590 | } | 586 | } |
591 | 587 | ||
592 | static inline void on_slab_lock_classes_node(struct kmem_cache *cachep, int node) | 588 | static inline void on_slab_lock_classes_node(struct kmem_cache *cachep, |
589 | struct kmem_cache_node *n) | ||
593 | { | 590 | { |
594 | } | 591 | } |
595 | 592 | ||
596 | static void slab_set_debugobj_lock_classes_node(struct kmem_cache *cachep, int node) | 593 | static void slab_set_debugobj_lock_classes_node(struct kmem_cache *cachep, |
594 | struct kmem_cache_node *n) | ||
597 | { | 595 | { |
598 | } | 596 | } |
599 | 597 | ||
@@ -826,7 +824,7 @@ static inline bool is_slab_pfmemalloc(struct page *page) | |||
826 | static void recheck_pfmemalloc_active(struct kmem_cache *cachep, | 824 | static void recheck_pfmemalloc_active(struct kmem_cache *cachep, |
827 | struct array_cache *ac) | 825 | struct array_cache *ac) |
828 | { | 826 | { |
829 | struct kmem_cache_node *n = cachep->node[numa_mem_id()]; | 827 | struct kmem_cache_node *n = get_node(cachep, numa_mem_id()); |
830 | struct page *page; | 828 | struct page *page; |
831 | unsigned long flags; | 829 | unsigned long flags; |
832 | 830 | ||
@@ -881,7 +879,7 @@ static void *__ac_get_obj(struct kmem_cache *cachep, struct array_cache *ac, | |||
881 | * If there are empty slabs on the slabs_free list and we are | 879 | * If there are empty slabs on the slabs_free list and we are |
882 | * being forced to refill the cache, mark this one !pfmemalloc. | 880 | * being forced to refill the cache, mark this one !pfmemalloc. |
883 | */ | 881 | */ |
884 | n = cachep->node[numa_mem_id()]; | 882 | n = get_node(cachep, numa_mem_id()); |
885 | if (!list_empty(&n->slabs_free) && force_refill) { | 883 | if (!list_empty(&n->slabs_free) && force_refill) { |
886 | struct page *page = virt_to_head_page(objp); | 884 | struct page *page = virt_to_head_page(objp); |
887 | ClearPageSlabPfmemalloc(page); | 885 | ClearPageSlabPfmemalloc(page); |
@@ -1031,7 +1029,7 @@ static void free_alien_cache(struct array_cache **ac_ptr) | |||
1031 | static void __drain_alien_cache(struct kmem_cache *cachep, | 1029 | static void __drain_alien_cache(struct kmem_cache *cachep, |
1032 | struct array_cache *ac, int node) | 1030 | struct array_cache *ac, int node) |
1033 | { | 1031 | { |
1034 | struct kmem_cache_node *n = cachep->node[node]; | 1032 | struct kmem_cache_node *n = get_node(cachep, node); |
1035 | 1033 | ||
1036 | if (ac->avail) { | 1034 | if (ac->avail) { |
1037 | spin_lock(&n->list_lock); | 1035 | spin_lock(&n->list_lock); |
@@ -1099,7 +1097,7 @@ static inline int cache_free_alien(struct kmem_cache *cachep, void *objp) | |||
1099 | if (likely(nodeid == node)) | 1097 | if (likely(nodeid == node)) |
1100 | return 0; | 1098 | return 0; |
1101 | 1099 | ||
1102 | n = cachep->node[node]; | 1100 | n = get_node(cachep, node); |
1103 | STATS_INC_NODEFREES(cachep); | 1101 | STATS_INC_NODEFREES(cachep); |
1104 | if (n->alien && n->alien[nodeid]) { | 1102 | if (n->alien && n->alien[nodeid]) { |
1105 | alien = n->alien[nodeid]; | 1103 | alien = n->alien[nodeid]; |
@@ -1111,9 +1109,10 @@ static inline int cache_free_alien(struct kmem_cache *cachep, void *objp) | |||
1111 | ac_put_obj(cachep, alien, objp); | 1109 | ac_put_obj(cachep, alien, objp); |
1112 | spin_unlock(&alien->lock); | 1110 | spin_unlock(&alien->lock); |
1113 | } else { | 1111 | } else { |
1114 | spin_lock(&(cachep->node[nodeid])->list_lock); | 1112 | n = get_node(cachep, nodeid); |
1113 | spin_lock(&n->list_lock); | ||
1115 | free_block(cachep, &objp, 1, nodeid); | 1114 | free_block(cachep, &objp, 1, nodeid); |
1116 | spin_unlock(&(cachep->node[nodeid])->list_lock); | 1115 | spin_unlock(&n->list_lock); |
1117 | } | 1116 | } |
1118 | return 1; | 1117 | return 1; |
1119 | } | 1118 | } |
@@ -1140,7 +1139,8 @@ static int init_cache_node_node(int node) | |||
1140 | * begin anything. Make sure some other cpu on this | 1139 | * begin anything. Make sure some other cpu on this |
1141 | * node has not already allocated this | 1140 | * node has not already allocated this |
1142 | */ | 1141 | */ |
1143 | if (!cachep->node[node]) { | 1142 | n = get_node(cachep, node); |
1143 | if (!n) { | ||
1144 | n = kmalloc_node(memsize, GFP_KERNEL, node); | 1144 | n = kmalloc_node(memsize, GFP_KERNEL, node); |
1145 | if (!n) | 1145 | if (!n) |
1146 | return -ENOMEM; | 1146 | return -ENOMEM; |
@@ -1156,11 +1156,11 @@ static int init_cache_node_node(int node) | |||
1156 | cachep->node[node] = n; | 1156 | cachep->node[node] = n; |
1157 | } | 1157 | } |
1158 | 1158 | ||
1159 | spin_lock_irq(&cachep->node[node]->list_lock); | 1159 | spin_lock_irq(&n->list_lock); |
1160 | cachep->node[node]->free_limit = | 1160 | n->free_limit = |
1161 | (1 + nr_cpus_node(node)) * | 1161 | (1 + nr_cpus_node(node)) * |
1162 | cachep->batchcount + cachep->num; | 1162 | cachep->batchcount + cachep->num; |
1163 | spin_unlock_irq(&cachep->node[node]->list_lock); | 1163 | spin_unlock_irq(&n->list_lock); |
1164 | } | 1164 | } |
1165 | return 0; | 1165 | return 0; |
1166 | } | 1166 | } |
@@ -1186,7 +1186,7 @@ static void cpuup_canceled(long cpu) | |||
1186 | /* cpu is dead; no one can alloc from it. */ | 1186 | /* cpu is dead; no one can alloc from it. */ |
1187 | nc = cachep->array[cpu]; | 1187 | nc = cachep->array[cpu]; |
1188 | cachep->array[cpu] = NULL; | 1188 | cachep->array[cpu] = NULL; |
1189 | n = cachep->node[node]; | 1189 | n = get_node(cachep, node); |
1190 | 1190 | ||
1191 | if (!n) | 1191 | if (!n) |
1192 | goto free_array_cache; | 1192 | goto free_array_cache; |
@@ -1229,7 +1229,7 @@ free_array_cache: | |||
1229 | * shrink each nodelist to its limit. | 1229 | * shrink each nodelist to its limit. |
1230 | */ | 1230 | */ |
1231 | list_for_each_entry(cachep, &slab_caches, list) { | 1231 | list_for_each_entry(cachep, &slab_caches, list) { |
1232 | n = cachep->node[node]; | 1232 | n = get_node(cachep, node); |
1233 | if (!n) | 1233 | if (!n) |
1234 | continue; | 1234 | continue; |
1235 | drain_freelist(cachep, n, slabs_tofree(cachep, n)); | 1235 | drain_freelist(cachep, n, slabs_tofree(cachep, n)); |
@@ -1284,7 +1284,7 @@ static int cpuup_prepare(long cpu) | |||
1284 | } | 1284 | } |
1285 | } | 1285 | } |
1286 | cachep->array[cpu] = nc; | 1286 | cachep->array[cpu] = nc; |
1287 | n = cachep->node[node]; | 1287 | n = get_node(cachep, node); |
1288 | BUG_ON(!n); | 1288 | BUG_ON(!n); |
1289 | 1289 | ||
1290 | spin_lock_irq(&n->list_lock); | 1290 | spin_lock_irq(&n->list_lock); |
@@ -1306,10 +1306,10 @@ static int cpuup_prepare(long cpu) | |||
1306 | kfree(shared); | 1306 | kfree(shared); |
1307 | free_alien_cache(alien); | 1307 | free_alien_cache(alien); |
1308 | if (cachep->flags & SLAB_DEBUG_OBJECTS) | 1308 | if (cachep->flags & SLAB_DEBUG_OBJECTS) |
1309 | slab_set_debugobj_lock_classes_node(cachep, node); | 1309 | slab_set_debugobj_lock_classes_node(cachep, n); |
1310 | else if (!OFF_SLAB(cachep) && | 1310 | else if (!OFF_SLAB(cachep) && |
1311 | !(cachep->flags & SLAB_DESTROY_BY_RCU)) | 1311 | !(cachep->flags & SLAB_DESTROY_BY_RCU)) |
1312 | on_slab_lock_classes_node(cachep, node); | 1312 | on_slab_lock_classes_node(cachep, n); |
1313 | } | 1313 | } |
1314 | init_node_lock_keys(node); | 1314 | init_node_lock_keys(node); |
1315 | 1315 | ||
@@ -1395,7 +1395,7 @@ static int __meminit drain_cache_node_node(int node) | |||
1395 | list_for_each_entry(cachep, &slab_caches, list) { | 1395 | list_for_each_entry(cachep, &slab_caches, list) { |
1396 | struct kmem_cache_node *n; | 1396 | struct kmem_cache_node *n; |
1397 | 1397 | ||
1398 | n = cachep->node[node]; | 1398 | n = get_node(cachep, node); |
1399 | if (!n) | 1399 | if (!n) |
1400 | continue; | 1400 | continue; |
1401 | 1401 | ||
@@ -1690,14 +1690,10 @@ slab_out_of_memory(struct kmem_cache *cachep, gfp_t gfpflags, int nodeid) | |||
1690 | printk(KERN_WARNING " cache: %s, object size: %d, order: %d\n", | 1690 | printk(KERN_WARNING " cache: %s, object size: %d, order: %d\n", |
1691 | cachep->name, cachep->size, cachep->gfporder); | 1691 | cachep->name, cachep->size, cachep->gfporder); |
1692 | 1692 | ||
1693 | for_each_online_node(node) { | 1693 | for_each_kmem_cache_node(cachep, node, n) { |
1694 | unsigned long active_objs = 0, num_objs = 0, free_objects = 0; | 1694 | unsigned long active_objs = 0, num_objs = 0, free_objects = 0; |
1695 | unsigned long active_slabs = 0, num_slabs = 0; | 1695 | unsigned long active_slabs = 0, num_slabs = 0; |
1696 | 1696 | ||
1697 | n = cachep->node[node]; | ||
1698 | if (!n) | ||
1699 | continue; | ||
1700 | |||
1701 | spin_lock_irqsave(&n->list_lock, flags); | 1697 | spin_lock_irqsave(&n->list_lock, flags); |
1702 | list_for_each_entry(page, &n->slabs_full, lru) { | 1698 | list_for_each_entry(page, &n->slabs_full, lru) { |
1703 | active_objs += cachep->num; | 1699 | active_objs += cachep->num; |
@@ -2434,7 +2430,7 @@ static void check_spinlock_acquired(struct kmem_cache *cachep) | |||
2434 | { | 2430 | { |
2435 | #ifdef CONFIG_SMP | 2431 | #ifdef CONFIG_SMP |
2436 | check_irq_off(); | 2432 | check_irq_off(); |
2437 | assert_spin_locked(&cachep->node[numa_mem_id()]->list_lock); | 2433 | assert_spin_locked(&get_node(cachep, numa_mem_id())->list_lock); |
2438 | #endif | 2434 | #endif |
2439 | } | 2435 | } |
2440 | 2436 | ||
@@ -2442,7 +2438,7 @@ static void check_spinlock_acquired_node(struct kmem_cache *cachep, int node) | |||
2442 | { | 2438 | { |
2443 | #ifdef CONFIG_SMP | 2439 | #ifdef CONFIG_SMP |
2444 | check_irq_off(); | 2440 | check_irq_off(); |
2445 | assert_spin_locked(&cachep->node[node]->list_lock); | 2441 | assert_spin_locked(&get_node(cachep, node)->list_lock); |
2446 | #endif | 2442 | #endif |
2447 | } | 2443 | } |
2448 | 2444 | ||
@@ -2462,12 +2458,14 @@ static void do_drain(void *arg) | |||
2462 | struct kmem_cache *cachep = arg; | 2458 | struct kmem_cache *cachep = arg; |
2463 | struct array_cache *ac; | 2459 | struct array_cache *ac; |
2464 | int node = numa_mem_id(); | 2460 | int node = numa_mem_id(); |
2461 | struct kmem_cache_node *n; | ||
2465 | 2462 | ||
2466 | check_irq_off(); | 2463 | check_irq_off(); |
2467 | ac = cpu_cache_get(cachep); | 2464 | ac = cpu_cache_get(cachep); |
2468 | spin_lock(&cachep->node[node]->list_lock); | 2465 | n = get_node(cachep, node); |
2466 | spin_lock(&n->list_lock); | ||
2469 | free_block(cachep, ac->entry, ac->avail, node); | 2467 | free_block(cachep, ac->entry, ac->avail, node); |
2470 | spin_unlock(&cachep->node[node]->list_lock); | 2468 | spin_unlock(&n->list_lock); |
2471 | ac->avail = 0; | 2469 | ac->avail = 0; |
2472 | } | 2470 | } |
2473 | 2471 | ||
@@ -2478,17 +2476,12 @@ static void drain_cpu_caches(struct kmem_cache *cachep) | |||
2478 | 2476 | ||
2479 | on_each_cpu(do_drain, cachep, 1); | 2477 | on_each_cpu(do_drain, cachep, 1); |
2480 | check_irq_on(); | 2478 | check_irq_on(); |
2481 | for_each_online_node(node) { | 2479 | for_each_kmem_cache_node(cachep, node, n) |
2482 | n = cachep->node[node]; | 2480 | if (n->alien) |
2483 | if (n && n->alien) | ||
2484 | drain_alien_cache(cachep, n->alien); | 2481 | drain_alien_cache(cachep, n->alien); |
2485 | } | ||
2486 | 2482 | ||
2487 | for_each_online_node(node) { | 2483 | for_each_kmem_cache_node(cachep, node, n) |
2488 | n = cachep->node[node]; | 2484 | drain_array(cachep, n, n->shared, 1, node); |
2489 | if (n) | ||
2490 | drain_array(cachep, n, n->shared, 1, node); | ||
2491 | } | ||
2492 | } | 2485 | } |
2493 | 2486 | ||
2494 | /* | 2487 | /* |
@@ -2534,17 +2527,14 @@ out: | |||
2534 | 2527 | ||
2535 | int __kmem_cache_shrink(struct kmem_cache *cachep) | 2528 | int __kmem_cache_shrink(struct kmem_cache *cachep) |
2536 | { | 2529 | { |
2537 | int ret = 0, i = 0; | 2530 | int ret = 0; |
2531 | int node; | ||
2538 | struct kmem_cache_node *n; | 2532 | struct kmem_cache_node *n; |
2539 | 2533 | ||
2540 | drain_cpu_caches(cachep); | 2534 | drain_cpu_caches(cachep); |
2541 | 2535 | ||
2542 | check_irq_on(); | 2536 | check_irq_on(); |
2543 | for_each_online_node(i) { | 2537 | for_each_kmem_cache_node(cachep, node, n) { |
2544 | n = cachep->node[i]; | ||
2545 | if (!n) | ||
2546 | continue; | ||
2547 | |||
2548 | drain_freelist(cachep, n, slabs_tofree(cachep, n)); | 2538 | drain_freelist(cachep, n, slabs_tofree(cachep, n)); |
2549 | 2539 | ||
2550 | ret += !list_empty(&n->slabs_full) || | 2540 | ret += !list_empty(&n->slabs_full) || |
@@ -2566,13 +2556,11 @@ int __kmem_cache_shutdown(struct kmem_cache *cachep) | |||
2566 | kfree(cachep->array[i]); | 2556 | kfree(cachep->array[i]); |
2567 | 2557 | ||
2568 | /* NUMA: free the node structures */ | 2558 | /* NUMA: free the node structures */ |
2569 | for_each_online_node(i) { | 2559 | for_each_kmem_cache_node(cachep, i, n) { |
2570 | n = cachep->node[i]; | 2560 | kfree(n->shared); |
2571 | if (n) { | 2561 | free_alien_cache(n->alien); |
2572 | kfree(n->shared); | 2562 | kfree(n); |
2573 | free_alien_cache(n->alien); | 2563 | cachep->node[i] = NULL; |
2574 | kfree(n); | ||
2575 | } | ||
2576 | } | 2564 | } |
2577 | return 0; | 2565 | return 0; |
2578 | } | 2566 | } |
@@ -2751,7 +2739,7 @@ static int cache_grow(struct kmem_cache *cachep, | |||
2751 | 2739 | ||
2752 | /* Take the node list lock to change the colour_next on this node */ | 2740 | /* Take the node list lock to change the colour_next on this node */ |
2753 | check_irq_off(); | 2741 | check_irq_off(); |
2754 | n = cachep->node[nodeid]; | 2742 | n = get_node(cachep, nodeid); |
2755 | spin_lock(&n->list_lock); | 2743 | spin_lock(&n->list_lock); |
2756 | 2744 | ||
2757 | /* Get colour for the slab, and cal the next value. */ | 2745 | /* Get colour for the slab, and cal the next value. */ |
@@ -2920,7 +2908,7 @@ retry: | |||
2920 | */ | 2908 | */ |
2921 | batchcount = BATCHREFILL_LIMIT; | 2909 | batchcount = BATCHREFILL_LIMIT; |
2922 | } | 2910 | } |
2923 | n = cachep->node[node]; | 2911 | n = get_node(cachep, node); |
2924 | 2912 | ||
2925 | BUG_ON(ac->avail > 0 || !n); | 2913 | BUG_ON(ac->avail > 0 || !n); |
2926 | spin_lock(&n->list_lock); | 2914 | spin_lock(&n->list_lock); |
@@ -3169,8 +3157,8 @@ retry: | |||
3169 | nid = zone_to_nid(zone); | 3157 | nid = zone_to_nid(zone); |
3170 | 3158 | ||
3171 | if (cpuset_zone_allowed_hardwall(zone, flags) && | 3159 | if (cpuset_zone_allowed_hardwall(zone, flags) && |
3172 | cache->node[nid] && | 3160 | get_node(cache, nid) && |
3173 | cache->node[nid]->free_objects) { | 3161 | get_node(cache, nid)->free_objects) { |
3174 | obj = ____cache_alloc_node(cache, | 3162 | obj = ____cache_alloc_node(cache, |
3175 | flags | GFP_THISNODE, nid); | 3163 | flags | GFP_THISNODE, nid); |
3176 | if (obj) | 3164 | if (obj) |
@@ -3233,7 +3221,7 @@ static void *____cache_alloc_node(struct kmem_cache *cachep, gfp_t flags, | |||
3233 | int x; | 3221 | int x; |
3234 | 3222 | ||
3235 | VM_BUG_ON(nodeid > num_online_nodes()); | 3223 | VM_BUG_ON(nodeid > num_online_nodes()); |
3236 | n = cachep->node[nodeid]; | 3224 | n = get_node(cachep, nodeid); |
3237 | BUG_ON(!n); | 3225 | BUG_ON(!n); |
3238 | 3226 | ||
3239 | retry: | 3227 | retry: |
@@ -3304,7 +3292,7 @@ slab_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid, | |||
3304 | if (nodeid == NUMA_NO_NODE) | 3292 | if (nodeid == NUMA_NO_NODE) |
3305 | nodeid = slab_node; | 3293 | nodeid = slab_node; |
3306 | 3294 | ||
3307 | if (unlikely(!cachep->node[nodeid])) { | 3295 | if (unlikely(!get_node(cachep, nodeid))) { |
3308 | /* Node not bootstrapped yet */ | 3296 | /* Node not bootstrapped yet */ |
3309 | ptr = fallback_alloc(cachep, flags); | 3297 | ptr = fallback_alloc(cachep, flags); |
3310 | goto out; | 3298 | goto out; |
@@ -3420,7 +3408,7 @@ static void free_block(struct kmem_cache *cachep, void **objpp, int nr_objects, | |||
3420 | objp = objpp[i]; | 3408 | objp = objpp[i]; |
3421 | 3409 | ||
3422 | page = virt_to_head_page(objp); | 3410 | page = virt_to_head_page(objp); |
3423 | n = cachep->node[node]; | 3411 | n = get_node(cachep, node); |
3424 | list_del(&page->lru); | 3412 | list_del(&page->lru); |
3425 | check_spinlock_acquired_node(cachep, node); | 3413 | check_spinlock_acquired_node(cachep, node); |
3426 | slab_put_obj(cachep, page, objp, node); | 3414 | slab_put_obj(cachep, page, objp, node); |
@@ -3462,7 +3450,7 @@ static void cache_flusharray(struct kmem_cache *cachep, struct array_cache *ac) | |||
3462 | BUG_ON(!batchcount || batchcount > ac->avail); | 3450 | BUG_ON(!batchcount || batchcount > ac->avail); |
3463 | #endif | 3451 | #endif |
3464 | check_irq_off(); | 3452 | check_irq_off(); |
3465 | n = cachep->node[node]; | 3453 | n = get_node(cachep, node); |
3466 | spin_lock(&n->list_lock); | 3454 | spin_lock(&n->list_lock); |
3467 | if (n->shared) { | 3455 | if (n->shared) { |
3468 | struct array_cache *shared_array = n->shared; | 3456 | struct array_cache *shared_array = n->shared; |
@@ -3775,7 +3763,7 @@ static int alloc_kmem_cache_node(struct kmem_cache *cachep, gfp_t gfp) | |||
3775 | } | 3763 | } |
3776 | } | 3764 | } |
3777 | 3765 | ||
3778 | n = cachep->node[node]; | 3766 | n = get_node(cachep, node); |
3779 | if (n) { | 3767 | if (n) { |
3780 | struct array_cache *shared = n->shared; | 3768 | struct array_cache *shared = n->shared; |
3781 | 3769 | ||
@@ -3820,9 +3808,8 @@ fail: | |||
3820 | /* Cache is not active yet. Roll back what we did */ | 3808 | /* Cache is not active yet. Roll back what we did */ |
3821 | node--; | 3809 | node--; |
3822 | while (node >= 0) { | 3810 | while (node >= 0) { |
3823 | if (cachep->node[node]) { | 3811 | n = get_node(cachep, node); |
3824 | n = cachep->node[node]; | 3812 | if (n) { |
3825 | |||
3826 | kfree(n->shared); | 3813 | kfree(n->shared); |
3827 | free_alien_cache(n->alien); | 3814 | free_alien_cache(n->alien); |
3828 | kfree(n); | 3815 | kfree(n); |
@@ -3884,11 +3871,17 @@ static int __do_tune_cpucache(struct kmem_cache *cachep, int limit, | |||
3884 | 3871 | ||
3885 | for_each_online_cpu(i) { | 3872 | for_each_online_cpu(i) { |
3886 | struct array_cache *ccold = new->new[i]; | 3873 | struct array_cache *ccold = new->new[i]; |
3874 | int node; | ||
3875 | struct kmem_cache_node *n; | ||
3876 | |||
3887 | if (!ccold) | 3877 | if (!ccold) |
3888 | continue; | 3878 | continue; |
3889 | spin_lock_irq(&cachep->node[cpu_to_mem(i)]->list_lock); | 3879 | |
3890 | free_block(cachep, ccold->entry, ccold->avail, cpu_to_mem(i)); | 3880 | node = cpu_to_mem(i); |
3891 | spin_unlock_irq(&cachep->node[cpu_to_mem(i)]->list_lock); | 3881 | n = get_node(cachep, node); |
3882 | spin_lock_irq(&n->list_lock); | ||
3883 | free_block(cachep, ccold->entry, ccold->avail, node); | ||
3884 | spin_unlock_irq(&n->list_lock); | ||
3892 | kfree(ccold); | 3885 | kfree(ccold); |
3893 | } | 3886 | } |
3894 | kfree(new); | 3887 | kfree(new); |
@@ -4048,7 +4041,7 @@ static void cache_reap(struct work_struct *w) | |||
4048 | * have established with reasonable certainty that | 4041 | * have established with reasonable certainty that |
4049 | * we can do some work if the lock was obtained. | 4042 | * we can do some work if the lock was obtained. |
4050 | */ | 4043 | */ |
4051 | n = searchp->node[node]; | 4044 | n = get_node(searchp, node); |
4052 | 4045 | ||
4053 | reap_alien(searchp, n); | 4046 | reap_alien(searchp, n); |
4054 | 4047 | ||
@@ -4100,10 +4093,7 @@ void get_slabinfo(struct kmem_cache *cachep, struct slabinfo *sinfo) | |||
4100 | 4093 | ||
4101 | active_objs = 0; | 4094 | active_objs = 0; |
4102 | num_slabs = 0; | 4095 | num_slabs = 0; |
4103 | for_each_online_node(node) { | 4096 | for_each_kmem_cache_node(cachep, node, n) { |
4104 | n = cachep->node[node]; | ||
4105 | if (!n) | ||
4106 | continue; | ||
4107 | 4097 | ||
4108 | check_irq_on(); | 4098 | check_irq_on(); |
4109 | spin_lock_irq(&n->list_lock); | 4099 | spin_lock_irq(&n->list_lock); |
@@ -4328,10 +4318,7 @@ static int leaks_show(struct seq_file *m, void *p) | |||
4328 | 4318 | ||
4329 | x[1] = 0; | 4319 | x[1] = 0; |
4330 | 4320 | ||
4331 | for_each_online_node(node) { | 4321 | for_each_kmem_cache_node(cachep, node, n) { |
4332 | n = cachep->node[node]; | ||
4333 | if (!n) | ||
4334 | continue; | ||
4335 | 4322 | ||
4336 | check_irq_on(); | 4323 | check_irq_on(); |
4337 | spin_lock_irq(&n->list_lock); | 4324 | spin_lock_irq(&n->list_lock); |