diff options
author | Tejun Heo <tj@kernel.org> | 2011-07-14 05:43:42 -0400 |
---|---|---|
committer | H. Peter Anvin <hpa@linux.intel.com> | 2011-07-14 14:47:43 -0400 |
commit | 7c0caeb866b0f648d91bb75b8bc6f86af95bb033 (patch) | |
tree | 042804fe716310a4de4effbbaa4461237e2b5d4a /mm/page_alloc.c | |
parent | 67e24bcb725cabd15ef577bf301275d03d6086d7 (diff) |
memblock: Add optional region->nid
From 83103b92f3234ec830852bbc5c45911bd6cbdb20 Mon Sep 17 00:00:00 2001
From: Tejun Heo <tj@kernel.org>
Date: Thu, 14 Jul 2011 11:22:16 +0200
Add optional region->nid which can be enabled by arch using
CONFIG_HAVE_MEMBLOCK_NODE_MAP. When enabled, memblock also carries
NUMA node information and replaces early_node_map[].
Newly added memblocks have MAX_NUMNODES as nid. Arch can then call
memblock_set_node() to set node information. memblock takes care of
merging and node affine allocations w.r.t. node information.
When MEMBLOCK_NODE_MAP is enabled, early_node_map[], related data
structures and functions to manipulate and iterate it are disabled.
memblock version of __next_mem_pfn_range() is provided such that
for_each_mem_pfn_range() behaves the same and its users don't have to
be updated.
-v2: Yinghai spotted section mismatch caused by missing
__init_memblock in memblock_set_node(). Fixed.
Signed-off-by: Tejun Heo <tj@kernel.org>
Link: http://lkml.kernel.org/r/20110714094342.GF3455@htj.dyndns.org
Cc: Yinghai Lu <yinghai@kernel.org>
Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Signed-off-by: H. Peter Anvin <hpa@linux.intel.com>
Diffstat (limited to 'mm/page_alloc.c')
-rw-r--r-- | mm/page_alloc.c | 47 |
1 files changed, 28 insertions, 19 deletions
diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 8ab5e5e7fdad..3c7ea45ffba9 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c | |||
@@ -182,28 +182,31 @@ static unsigned long __meminitdata nr_all_pages; | |||
182 | static unsigned long __meminitdata dma_reserve; | 182 | static unsigned long __meminitdata dma_reserve; |
183 | 183 | ||
184 | #ifdef CONFIG_ARCH_POPULATES_NODE_MAP | 184 | #ifdef CONFIG_ARCH_POPULATES_NODE_MAP |
185 | /* | 185 | #ifndef CONFIG_HAVE_MEMBLOCK_NODE_MAP |
186 | * MAX_ACTIVE_REGIONS determines the maximum number of distinct | 186 | /* |
187 | * ranges of memory (RAM) that may be registered with add_active_range(). | 187 | * MAX_ACTIVE_REGIONS determines the maximum number of distinct ranges |
188 | * Ranges passed to add_active_range() will be merged if possible | 188 | * of memory (RAM) that may be registered with add_active_range(). |
189 | * so the number of times add_active_range() can be called is | 189 | * Ranges passed to add_active_range() will be merged if possible so |
190 | * related to the number of nodes and the number of holes | 190 | * the number of times add_active_range() can be called is related to |
191 | */ | 191 | * the number of nodes and the number of holes |
192 | #ifdef CONFIG_MAX_ACTIVE_REGIONS | 192 | */ |
193 | /* Allow an architecture to set MAX_ACTIVE_REGIONS to save memory */ | 193 | #ifdef CONFIG_MAX_ACTIVE_REGIONS |
194 | #define MAX_ACTIVE_REGIONS CONFIG_MAX_ACTIVE_REGIONS | 194 | /* Allow an architecture to set MAX_ACTIVE_REGIONS to save memory */ |
195 | #else | 195 | #define MAX_ACTIVE_REGIONS CONFIG_MAX_ACTIVE_REGIONS |
196 | #if MAX_NUMNODES >= 32 | ||
197 | /* If there can be many nodes, allow up to 50 holes per node */ | ||
198 | #define MAX_ACTIVE_REGIONS (MAX_NUMNODES*50) | ||
199 | #else | 196 | #else |
200 | /* By default, allow up to 256 distinct regions */ | 197 | #if MAX_NUMNODES >= 32 |
201 | #define MAX_ACTIVE_REGIONS 256 | 198 | /* If there can be many nodes, allow up to 50 holes per node */ |
199 | #define MAX_ACTIVE_REGIONS (MAX_NUMNODES*50) | ||
200 | #else | ||
201 | /* By default, allow up to 256 distinct regions */ | ||
202 | #define MAX_ACTIVE_REGIONS 256 | ||
203 | #endif | ||
202 | #endif | 204 | #endif |
203 | #endif | ||
204 | 205 | ||
205 | static struct node_active_region __meminitdata early_node_map[MAX_ACTIVE_REGIONS]; | 206 | static struct node_active_region __meminitdata early_node_map[MAX_ACTIVE_REGIONS]; |
206 | static int __meminitdata nr_nodemap_entries; | 207 | static int __meminitdata nr_nodemap_entries; |
208 | #endif /* !CONFIG_HAVE_MEMBLOCK_NODE_MAP */ | ||
209 | |||
207 | static unsigned long __meminitdata arch_zone_lowest_possible_pfn[MAX_NR_ZONES]; | 210 | static unsigned long __meminitdata arch_zone_lowest_possible_pfn[MAX_NR_ZONES]; |
208 | static unsigned long __meminitdata arch_zone_highest_possible_pfn[MAX_NR_ZONES]; | 211 | static unsigned long __meminitdata arch_zone_highest_possible_pfn[MAX_NR_ZONES]; |
209 | static unsigned long __initdata required_kernelcore; | 212 | static unsigned long __initdata required_kernelcore; |
@@ -4268,6 +4271,7 @@ static inline void setup_nr_node_ids(void) | |||
4268 | } | 4271 | } |
4269 | #endif | 4272 | #endif |
4270 | 4273 | ||
4274 | #ifndef CONFIG_HAVE_MEMBLOCK_NODE_MAP | ||
4271 | /* | 4275 | /* |
4272 | * Common iterator interface used to define for_each_mem_pfn_range(). | 4276 | * Common iterator interface used to define for_each_mem_pfn_range(). |
4273 | */ | 4277 | */ |
@@ -4456,6 +4460,11 @@ void __init sort_node_map(void) | |||
4456 | sizeof(struct node_active_region), | 4460 | sizeof(struct node_active_region), |
4457 | cmp_node_active_region, NULL); | 4461 | cmp_node_active_region, NULL); |
4458 | } | 4462 | } |
4463 | #else /* !CONFIG_HAVE_MEMBLOCK_NODE_MAP */ | ||
4464 | static inline void sort_node_map(void) | ||
4465 | { | ||
4466 | } | ||
4467 | #endif | ||
4459 | 4468 | ||
4460 | /** | 4469 | /** |
4461 | * node_map_pfn_alignment - determine the maximum internode alignment | 4470 | * node_map_pfn_alignment - determine the maximum internode alignment |