aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--mm/slab.c4
-rw-r--r--mm/slub.c2
2 files changed, 4 insertions, 2 deletions
diff --git a/mm/slab.c b/mm/slab.c
index bb4070e1079f..04b308c3bc54 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -1481,7 +1481,7 @@ void __init kmem_cache_init(void)
1481 list_add(&cache_cache.next, &cache_chain); 1481 list_add(&cache_cache.next, &cache_chain);
1482 cache_cache.colour_off = cache_line_size(); 1482 cache_cache.colour_off = cache_line_size();
1483 cache_cache.array[smp_processor_id()] = &initarray_cache.cache; 1483 cache_cache.array[smp_processor_id()] = &initarray_cache.cache;
1484 cache_cache.nodelists[node] = &initkmem_list3[CACHE_CACHE]; 1484 cache_cache.nodelists[node] = &initkmem_list3[CACHE_CACHE + node];
1485 1485
1486 /* 1486 /*
1487 * struct kmem_cache size depends on nr_node_ids, which 1487 * struct kmem_cache size depends on nr_node_ids, which
@@ -1602,7 +1602,7 @@ void __init kmem_cache_init(void)
1602 int nid; 1602 int nid;
1603 1603
1604 for_each_online_node(nid) { 1604 for_each_online_node(nid) {
1605 init_list(&cache_cache, &initkmem_list3[CACHE_CACHE], nid); 1605 init_list(&cache_cache, &initkmem_list3[CACHE_CACHE + nid], nid);
1606 1606
1607 init_list(malloc_sizes[INDEX_AC].cs_cachep, 1607 init_list(malloc_sizes[INDEX_AC].cs_cachep,
1608 &initkmem_list3[SIZE_AC + nid], nid); 1608 &initkmem_list3[SIZE_AC + nid], nid);
diff --git a/mm/slub.c b/mm/slub.c
index ca71d5b81e4a..b72bc98e2dc1 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -2685,6 +2685,7 @@ void kfree(const void *x)
2685} 2685}
2686EXPORT_SYMBOL(kfree); 2686EXPORT_SYMBOL(kfree);
2687 2687
2688#if defined(SLUB_DEBUG) || defined(CONFIG_SLABINFO)
2688static unsigned long count_partial(struct kmem_cache_node *n) 2689static unsigned long count_partial(struct kmem_cache_node *n)
2689{ 2690{
2690 unsigned long flags; 2691 unsigned long flags;
@@ -2697,6 +2698,7 @@ static unsigned long count_partial(struct kmem_cache_node *n)
2697 spin_unlock_irqrestore(&n->list_lock, flags); 2698 spin_unlock_irqrestore(&n->list_lock, flags);
2698 return x; 2699 return x;
2699} 2700}
2701#endif
2700 2702
2701/* 2703/*
2702 * kmem_cache_shrink removes empty slabs from the partial lists and sorts 2704 * kmem_cache_shrink removes empty slabs from the partial lists and sorts