aboutsummaryrefslogtreecommitdiffstats
path: root/mm/slab.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/slab.c')
-rw-r--r--mm/slab.c34
1 files changed, 21 insertions, 13 deletions
diff --git a/mm/slab.c b/mm/slab.c
index e9a63b5a7fb9..64fb0d770b06 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -1106,15 +1106,18 @@ static inline int cache_free_alien(struct kmem_cache *cachep, void *objp)
1106 int nodeid = slabp->nodeid; 1106 int nodeid = slabp->nodeid;
1107 struct kmem_list3 *l3; 1107 struct kmem_list3 *l3;
1108 struct array_cache *alien = NULL; 1108 struct array_cache *alien = NULL;
1109 int node;
1110
1111 node = numa_node_id();
1109 1112
1110 /* 1113 /*
1111 * Make sure we are not freeing a object from another node to the array 1114 * Make sure we are not freeing a object from another node to the array
1112 * cache on this cpu. 1115 * cache on this cpu.
1113 */ 1116 */
1114 if (likely(slabp->nodeid == numa_node_id())) 1117 if (likely(slabp->nodeid == node))
1115 return 0; 1118 return 0;
1116 1119
1117 l3 = cachep->nodelists[numa_node_id()]; 1120 l3 = cachep->nodelists[node];
1118 STATS_INC_NODEFREES(cachep); 1121 STATS_INC_NODEFREES(cachep);
1119 if (l3->alien && l3->alien[nodeid]) { 1122 if (l3->alien && l3->alien[nodeid]) {
1120 alien = l3->alien[nodeid]; 1123 alien = l3->alien[nodeid];
@@ -1352,6 +1355,7 @@ void __init kmem_cache_init(void)
1352 struct cache_names *names; 1355 struct cache_names *names;
1353 int i; 1356 int i;
1354 int order; 1357 int order;
1358 int node;
1355 1359
1356 for (i = 0; i < NUM_INIT_LISTS; i++) { 1360 for (i = 0; i < NUM_INIT_LISTS; i++) {
1357 kmem_list3_init(&initkmem_list3[i]); 1361 kmem_list3_init(&initkmem_list3[i]);
@@ -1386,12 +1390,14 @@ void __init kmem_cache_init(void)
1386 * 6) Resize the head arrays of the kmalloc caches to their final sizes. 1390 * 6) Resize the head arrays of the kmalloc caches to their final sizes.
1387 */ 1391 */
1388 1392
1393 node = numa_node_id();
1394
1389 /* 1) create the cache_cache */ 1395 /* 1) create the cache_cache */
1390 INIT_LIST_HEAD(&cache_chain); 1396 INIT_LIST_HEAD(&cache_chain);
1391 list_add(&cache_cache.next, &cache_chain); 1397 list_add(&cache_cache.next, &cache_chain);
1392 cache_cache.colour_off = cache_line_size(); 1398 cache_cache.colour_off = cache_line_size();
1393 cache_cache.array[smp_processor_id()] = &initarray_cache.cache; 1399 cache_cache.array[smp_processor_id()] = &initarray_cache.cache;
1394 cache_cache.nodelists[numa_node_id()] = &initkmem_list3[CACHE_CACHE]; 1400 cache_cache.nodelists[node] = &initkmem_list3[CACHE_CACHE];
1395 1401
1396 cache_cache.buffer_size = ALIGN(cache_cache.buffer_size, 1402 cache_cache.buffer_size = ALIGN(cache_cache.buffer_size,
1397 cache_line_size()); 1403 cache_line_size());
@@ -1496,19 +1502,18 @@ void __init kmem_cache_init(void)
1496 } 1502 }
1497 /* 5) Replace the bootstrap kmem_list3's */ 1503 /* 5) Replace the bootstrap kmem_list3's */
1498 { 1504 {
1499 int node; 1505 int nid;
1506
1500 /* Replace the static kmem_list3 structures for the boot cpu */ 1507 /* Replace the static kmem_list3 structures for the boot cpu */
1501 init_list(&cache_cache, &initkmem_list3[CACHE_CACHE], 1508 init_list(&cache_cache, &initkmem_list3[CACHE_CACHE], node);
1502 numa_node_id());
1503 1509
1504 for_each_online_node(node) { 1510 for_each_online_node(nid) {
1505 init_list(malloc_sizes[INDEX_AC].cs_cachep, 1511 init_list(malloc_sizes[INDEX_AC].cs_cachep,
1506 &initkmem_list3[SIZE_AC + node], node); 1512 &initkmem_list3[SIZE_AC + nid], nid);
1507 1513
1508 if (INDEX_AC != INDEX_L3) { 1514 if (INDEX_AC != INDEX_L3) {
1509 init_list(malloc_sizes[INDEX_L3].cs_cachep, 1515 init_list(malloc_sizes[INDEX_L3].cs_cachep,
1510 &initkmem_list3[SIZE_L3 + node], 1516 &initkmem_list3[SIZE_L3 + nid], nid);
1511 node);
1512 } 1517 }
1513 } 1518 }
1514 } 1519 }
@@ -2918,6 +2923,9 @@ static void *cache_alloc_refill(struct kmem_cache *cachep, gfp_t flags)
2918 int batchcount; 2923 int batchcount;
2919 struct kmem_list3 *l3; 2924 struct kmem_list3 *l3;
2920 struct array_cache *ac; 2925 struct array_cache *ac;
2926 int node;
2927
2928 node = numa_node_id();
2921 2929
2922 check_irq_off(); 2930 check_irq_off();
2923 ac = cpu_cache_get(cachep); 2931 ac = cpu_cache_get(cachep);
@@ -2931,7 +2939,7 @@ retry:
2931 */ 2939 */
2932 batchcount = BATCHREFILL_LIMIT; 2940 batchcount = BATCHREFILL_LIMIT;
2933 } 2941 }
2934 l3 = cachep->nodelists[numa_node_id()]; 2942 l3 = cachep->nodelists[node];
2935 2943
2936 BUG_ON(ac->avail > 0 || !l3); 2944 BUG_ON(ac->avail > 0 || !l3);
2937 spin_lock(&l3->list_lock); 2945 spin_lock(&l3->list_lock);
@@ -2961,7 +2969,7 @@ retry:
2961 STATS_SET_HIGH(cachep); 2969 STATS_SET_HIGH(cachep);
2962 2970
2963 ac->entry[ac->avail++] = slab_get_obj(cachep, slabp, 2971 ac->entry[ac->avail++] = slab_get_obj(cachep, slabp,
2964 numa_node_id()); 2972 node);
2965 } 2973 }
2966 check_slabp(cachep, slabp); 2974 check_slabp(cachep, slabp);
2967 2975
@@ -2980,7 +2988,7 @@ alloc_done:
2980 2988
2981 if (unlikely(!ac->avail)) { 2989 if (unlikely(!ac->avail)) {
2982 int x; 2990 int x;
2983 x = cache_grow(cachep, flags, numa_node_id()); 2991 x = cache_grow(cachep, flags, node);
2984 2992
2985 /* cache_grow can reenable interrupts, then ac could change. */ 2993 /* cache_grow can reenable interrupts, then ac could change. */
2986 ac = cpu_cache_get(cachep); 2994 ac = cpu_cache_get(cachep);