aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorChristoph Lameter <clameter@sgi.com>2007-07-17 07:03:32 -0400
committerLinus Torvalds <torvalds@woody.linux-foundation.org>2007-07-17 13:23:02 -0400
commit8ab1372fac5684de56c68f0da1ddc90e1c4ce740 (patch)
tree42594d334c83ff18655731bf4d9f5d023c9c2886
parenta0e1d1be204612ee83b3afe8aa24c5d27e63d464 (diff)
SLUB: Fix CONFIG_SLUB_DEBUG use for CONFIG_NUMA
We currently cannot disable CONFIG_SLUB_DEBUG for CONFIG_NUMA. Now that embedded systems start to use NUMA we may need this. Put an #ifdef around places where NUMA only code uses fields only valid for CONFIG_SLUB_DEBUG. Signed-off-by: Christoph Lameter <clameter@sgi.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
-rw-r--r--mm/slub.c4
1 files changed, 4 insertions, 0 deletions
diff --git a/mm/slub.c b/mm/slub.c
index 1c5d6a068556..52a4f44be394 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -1853,7 +1853,9 @@ static void init_kmem_cache_node(struct kmem_cache_node *n)
1853 atomic_long_set(&n->nr_slabs, 0); 1853 atomic_long_set(&n->nr_slabs, 0);
1854 spin_lock_init(&n->list_lock); 1854 spin_lock_init(&n->list_lock);
1855 INIT_LIST_HEAD(&n->partial); 1855 INIT_LIST_HEAD(&n->partial);
1856#ifdef CONFIG_SLUB_DEBUG
1856 INIT_LIST_HEAD(&n->full); 1857 INIT_LIST_HEAD(&n->full);
1858#endif
1857} 1859}
1858 1860
1859#ifdef CONFIG_NUMA 1861#ifdef CONFIG_NUMA
@@ -1881,8 +1883,10 @@ static struct kmem_cache_node * __init early_kmem_cache_node_alloc(gfp_t gfpflag
1881 page->freelist = get_freepointer(kmalloc_caches, n); 1883 page->freelist = get_freepointer(kmalloc_caches, n);
1882 page->inuse++; 1884 page->inuse++;
1883 kmalloc_caches->node[node] = n; 1885 kmalloc_caches->node[node] = n;
1886#ifdef CONFIG_SLUB_DEBUG
1884 init_object(kmalloc_caches, n, 1); 1887 init_object(kmalloc_caches, n, 1);
1885 init_tracking(kmalloc_caches, n); 1888 init_tracking(kmalloc_caches, n);
1889#endif
1886 init_kmem_cache_node(n); 1890 init_kmem_cache_node(n);
1887 atomic_long_inc(&n->nr_slabs); 1891 atomic_long_inc(&n->nr_slabs);
1888 add_partial(n, page); 1892 add_partial(n, page);