aboutsummaryrefslogtreecommitdiffstats
path: root/mm/slab.c
diff options
context:
space:
mode:
authorChristoph Lameter <clameter@sgi.com>2007-10-16 04:25:32 -0400
committerLinus Torvalds <torvalds@woody.linux-foundation.org>2007-10-16 12:42:58 -0400
commit04231b3002ac53f8a64a7bd142fde3fa4b6808c6 (patch)
tree7c55f86dbe697621943176cfa2a341dc0e6760ef /mm/slab.c
parent9422ffba4adc82b4b67a3ca6ef51516aa61f8248 (diff)
Memoryless nodes: Slab support
Slab should not allocate control structures for nodes without memory. This may seem to work right now but its unreliable since not all allocations can fall back due to the use of GFP_THISNODE. Switching a few for_each_online_node's to N_NORMAL_MEMORY will allow us to only allocate for nodes that have regular memory. Signed-off-by: Christoph Lameter <clameter@sgi.com> Acked-by: Nishanth Aravamudan <nacc@us.ibm.com> Acked-by: Lee Schermerhorn <lee.schermerhorn@hp.com> Acked-by: Bob Picco <bob.picco@hp.com> Cc: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com> Cc: Mel Gorman <mel@skynet.ie> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/slab.c')
-rw-r--r--mm/slab.c8
1 files changed, 4 insertions, 4 deletions
diff --git a/mm/slab.c b/mm/slab.c
index 1b240a3029d6..368a47d80eaf 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -1568,7 +1568,7 @@ void __init kmem_cache_init(void)
1568 /* Replace the static kmem_list3 structures for the boot cpu */ 1568 /* Replace the static kmem_list3 structures for the boot cpu */
1569 init_list(&cache_cache, &initkmem_list3[CACHE_CACHE], node); 1569 init_list(&cache_cache, &initkmem_list3[CACHE_CACHE], node);
1570 1570
1571 for_each_online_node(nid) { 1571 for_each_node_state(nid, N_NORMAL_MEMORY) {
1572 init_list(malloc_sizes[INDEX_AC].cs_cachep, 1572 init_list(malloc_sizes[INDEX_AC].cs_cachep,
1573 &initkmem_list3[SIZE_AC + nid], nid); 1573 &initkmem_list3[SIZE_AC + nid], nid);
1574 1574
@@ -1944,7 +1944,7 @@ static void __init set_up_list3s(struct kmem_cache *cachep, int index)
1944{ 1944{
1945 int node; 1945 int node;
1946 1946
1947 for_each_online_node(node) { 1947 for_each_node_state(node, N_NORMAL_MEMORY) {
1948 cachep->nodelists[node] = &initkmem_list3[index + node]; 1948 cachep->nodelists[node] = &initkmem_list3[index + node];
1949 cachep->nodelists[node]->next_reap = jiffies + 1949 cachep->nodelists[node]->next_reap = jiffies +
1950 REAPTIMEOUT_LIST3 + 1950 REAPTIMEOUT_LIST3 +
@@ -2075,7 +2075,7 @@ static int __init_refok setup_cpu_cache(struct kmem_cache *cachep)
2075 g_cpucache_up = PARTIAL_L3; 2075 g_cpucache_up = PARTIAL_L3;
2076 } else { 2076 } else {
2077 int node; 2077 int node;
2078 for_each_online_node(node) { 2078 for_each_node_state(node, N_NORMAL_MEMORY) {
2079 cachep->nodelists[node] = 2079 cachep->nodelists[node] =
2080 kmalloc_node(sizeof(struct kmem_list3), 2080 kmalloc_node(sizeof(struct kmem_list3),
2081 GFP_KERNEL, node); 2081 GFP_KERNEL, node);
@@ -3792,7 +3792,7 @@ static int alloc_kmemlist(struct kmem_cache *cachep)
3792 struct array_cache *new_shared; 3792 struct array_cache *new_shared;
3793 struct array_cache **new_alien = NULL; 3793 struct array_cache **new_alien = NULL;
3794 3794
3795 for_each_online_node(node) { 3795 for_each_node_state(node, N_NORMAL_MEMORY) {
3796 3796
3797 if (use_alien_caches) { 3797 if (use_alien_caches) {
3798 new_alien = alloc_alien_cache(node, cachep->limit); 3798 new_alien = alloc_alien_cache(node, cachep->limit);