diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2008-03-26 14:30:17 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2008-03-26 14:30:17 -0400 |
commit | 5254149f6c4e938fea3735183434e208097bd188 (patch) | |
tree | 452fbfbcd405d4d97d520d561a7c0236398e228f | |
parent | 8f404faa72f4e458e7bd81ac75ce55ae829e953d (diff) | |
parent | ec1f5eeeb5a79a0d48036de649a3498da42db565 (diff) |
Merge branch 'slab-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/christoph/vm
* 'slab-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/christoph/vm:
slab: fix cache_cache bootstrap in kmem_cache_init()
count_partial() is not used if !SLUB_DEBUG and !CONFIG_SLABINFO
-rw-r--r-- | mm/slab.c | 4 | ||||
-rw-r--r-- | mm/slub.c | 2 |
2 files changed, 4 insertions, 2 deletions
@@ -1481,7 +1481,7 @@ void __init kmem_cache_init(void) | |||
1481 | list_add(&cache_cache.next, &cache_chain); | 1481 | list_add(&cache_cache.next, &cache_chain); |
1482 | cache_cache.colour_off = cache_line_size(); | 1482 | cache_cache.colour_off = cache_line_size(); |
1483 | cache_cache.array[smp_processor_id()] = &initarray_cache.cache; | 1483 | cache_cache.array[smp_processor_id()] = &initarray_cache.cache; |
1484 | cache_cache.nodelists[node] = &initkmem_list3[CACHE_CACHE]; | 1484 | cache_cache.nodelists[node] = &initkmem_list3[CACHE_CACHE + node]; |
1485 | 1485 | ||
1486 | /* | 1486 | /* |
1487 | * struct kmem_cache size depends on nr_node_ids, which | 1487 | * struct kmem_cache size depends on nr_node_ids, which |
@@ -1602,7 +1602,7 @@ void __init kmem_cache_init(void) | |||
1602 | int nid; | 1602 | int nid; |
1603 | 1603 | ||
1604 | for_each_online_node(nid) { | 1604 | for_each_online_node(nid) { |
1605 | init_list(&cache_cache, &initkmem_list3[CACHE_CACHE], nid); | 1605 | init_list(&cache_cache, &initkmem_list3[CACHE_CACHE + nid], nid); |
1606 | 1606 | ||
1607 | init_list(malloc_sizes[INDEX_AC].cs_cachep, | 1607 | init_list(malloc_sizes[INDEX_AC].cs_cachep, |
1608 | &initkmem_list3[SIZE_AC + nid], nid); | 1608 | &initkmem_list3[SIZE_AC + nid], nid); |
@@ -2685,6 +2685,7 @@ void kfree(const void *x) | |||
2685 | } | 2685 | } |
2686 | EXPORT_SYMBOL(kfree); | 2686 | EXPORT_SYMBOL(kfree); |
2687 | 2687 | ||
2688 | #if defined(SLUB_DEBUG) || defined(CONFIG_SLABINFO) | ||
2688 | static unsigned long count_partial(struct kmem_cache_node *n) | 2689 | static unsigned long count_partial(struct kmem_cache_node *n) |
2689 | { | 2690 | { |
2690 | unsigned long flags; | 2691 | unsigned long flags; |
@@ -2697,6 +2698,7 @@ static unsigned long count_partial(struct kmem_cache_node *n) | |||
2697 | spin_unlock_irqrestore(&n->list_lock, flags); | 2698 | spin_unlock_irqrestore(&n->list_lock, flags); |
2698 | return x; | 2699 | return x; |
2699 | } | 2700 | } |
2701 | #endif | ||
2700 | 2702 | ||
2701 | /* | 2703 | /* |
2702 | * kmem_cache_shrink removes empty slabs from the partial lists and sorts | 2704 | * kmem_cache_shrink removes empty slabs from the partial lists and sorts |