diff options
author | Christoph Lameter <clameter@sgi.com> | 2007-02-20 16:57:52 -0500 |
---|---|---|
committer | Linus Torvalds <torvalds@woody.linux-foundation.org> | 2007-02-20 20:10:13 -0500 |
commit | 8ef8286689c6b5bc76212437b85bdd2ba749ee44 (patch) | |
tree | 9ef088691bd06699adc6c7875bc1b2e6e96ce066 | |
parent | 53b8a315b76a3f3c70a5644976c0095460eb13d8 (diff) |
[PATCH] slab: reduce size of alien cache to cover only possible nodes
The alien cache is a per cpu per node array allocated for every slab on the
system. Currently we size this array for all nodes that the kernel does
support. For IA64 this is 1024 nodes. So we allocate an array with 1024
objects even if we only boot a system with 4 nodes.
This patch uses "nr_node_ids" to determine the number of possible nodes
supported by a hardware configuration and only allocates an alien cache
sized for possible nodes.
The initialization of nr_node_ids occurred too late relative to the bootstrap
of the slab allocator and so I moved the setup_nr_node_ids() into
free_area_init_nodes().
Signed-off-by: Christoph Lameter <clameter@sgi.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
-rw-r--r-- | mm/page_alloc.c | 2 | ||||
-rw-r--r-- | mm/slab.c | 2 |
2 files changed, 2 insertions, 2 deletions
diff --git a/mm/page_alloc.c b/mm/page_alloc.c index f66538b3c31b..41737395bbcc 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c | |||
@@ -2964,6 +2964,7 @@ void __init free_area_init_nodes(unsigned long *max_zone_pfn) | |||
2964 | early_node_map[i].end_pfn); | 2964 | early_node_map[i].end_pfn); |
2965 | 2965 | ||
2966 | /* Initialise every node */ | 2966 | /* Initialise every node */ |
2967 | setup_nr_node_ids(); | ||
2967 | for_each_online_node(nid) { | 2968 | for_each_online_node(nid) { |
2968 | pg_data_t *pgdat = NODE_DATA(nid); | 2969 | pg_data_t *pgdat = NODE_DATA(nid); |
2969 | free_area_init_node(nid, pgdat, NULL, | 2970 | free_area_init_node(nid, pgdat, NULL, |
@@ -3189,7 +3190,6 @@ static int __init init_per_zone_pages_min(void) | |||
3189 | min_free_kbytes = 65536; | 3190 | min_free_kbytes = 65536; |
3190 | setup_per_zone_pages_min(); | 3191 | setup_per_zone_pages_min(); |
3191 | setup_per_zone_lowmem_reserve(); | 3192 | setup_per_zone_lowmem_reserve(); |
3192 | setup_nr_node_ids(); | ||
3193 | return 0; | 3193 | return 0; |
3194 | } | 3194 | } |
3195 | module_init(init_per_zone_pages_min) | 3195 | module_init(init_per_zone_pages_min) |
@@ -1042,7 +1042,7 @@ static void *alternate_node_alloc(struct kmem_cache *, gfp_t); | |||
1042 | static struct array_cache **alloc_alien_cache(int node, int limit) | 1042 | static struct array_cache **alloc_alien_cache(int node, int limit) |
1043 | { | 1043 | { |
1044 | struct array_cache **ac_ptr; | 1044 | struct array_cache **ac_ptr; |
1045 | int memsize = sizeof(void *) * MAX_NUMNODES; | 1045 | int memsize = sizeof(void *) * nr_node_ids; |
1046 | int i; | 1046 | int i; |
1047 | 1047 | ||
1048 | if (limit > 1) | 1048 | if (limit > 1) |