aboutsummaryrefslogtreecommitdiffstats
path: root/mm/page_alloc.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/page_alloc.c')
-rw-r--r--mm/page_alloc.c65
1 files changed, 58 insertions, 7 deletions
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 0a53728a12f5..ac4f8c6b5c10 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -137,6 +137,7 @@ static unsigned long __meminitdata dma_reserve;
137 static unsigned long __meminitdata node_boundary_end_pfn[MAX_NUMNODES]; 137 static unsigned long __meminitdata node_boundary_end_pfn[MAX_NUMNODES];
138#endif /* CONFIG_MEMORY_HOTPLUG_RESERVE */ 138#endif /* CONFIG_MEMORY_HOTPLUG_RESERVE */
139 unsigned long __initdata required_kernelcore; 139 unsigned long __initdata required_kernelcore;
140 unsigned long __initdata required_movablecore;
140 unsigned long __initdata zone_movable_pfn[MAX_NUMNODES]; 141 unsigned long __initdata zone_movable_pfn[MAX_NUMNODES];
141 142
142 /* movable_zone is the "real" zone pages in ZONE_MOVABLE are taken from */ 143 /* movable_zone is the "real" zone pages in ZONE_MOVABLE are taken from */
@@ -3219,6 +3220,18 @@ unsigned long __init find_max_pfn_with_active_regions(void)
3219 return max_pfn; 3220 return max_pfn;
3220} 3221}
3221 3222
3223unsigned long __init early_calculate_totalpages(void)
3224{
3225 int i;
3226 unsigned long totalpages = 0;
3227
3228 for (i = 0; i < nr_nodemap_entries; i++)
3229 totalpages += early_node_map[i].end_pfn -
3230 early_node_map[i].start_pfn;
3231
3232 return totalpages;
3233}
3234
3222/* 3235/*
3223 * Find the PFN the Movable zone begins in each node. Kernel memory 3236 * Find the PFN the Movable zone begins in each node. Kernel memory
3224 * is spread evenly between nodes as long as the nodes have enough 3237 * is spread evenly between nodes as long as the nodes have enough
@@ -3232,6 +3245,29 @@ void __init find_zone_movable_pfns_for_nodes(unsigned long *movable_pfn)
3232 unsigned long kernelcore_node, kernelcore_remaining; 3245 unsigned long kernelcore_node, kernelcore_remaining;
3233 int usable_nodes = num_online_nodes(); 3246 int usable_nodes = num_online_nodes();
3234 3247
3248 /*
3249 * If movablecore was specified, calculate what size of
3250 * kernelcore that corresponds so that memory usable for
3251 * any allocation type is evenly spread. If both kernelcore
3252 * and movablecore are specified, then the value of kernelcore
3253 * will be used for required_kernelcore if it's greater than
3254 * what movablecore would have allowed.
3255 */
3256 if (required_movablecore) {
3257 unsigned long totalpages = early_calculate_totalpages();
3258 unsigned long corepages;
3259
3260 /*
3261 * Round-up so that ZONE_MOVABLE is at least as large as what
3262 * was requested by the user
3263 */
3264 required_movablecore =
3265 roundup(required_movablecore, MAX_ORDER_NR_PAGES);
3266 corepages = totalpages - required_movablecore;
3267
3268 required_kernelcore = max(required_kernelcore, corepages);
3269 }
3270
3235 /* If kernelcore was not specified, there is no ZONE_MOVABLE */ 3271 /* If kernelcore was not specified, there is no ZONE_MOVABLE */
3236 if (!required_kernelcore) 3272 if (!required_kernelcore)
3237 return; 3273 return;
@@ -3412,26 +3448,41 @@ void __init free_area_init_nodes(unsigned long *max_zone_pfn)
3412 } 3448 }
3413} 3449}
3414 3450
3415/* 3451static int __init cmdline_parse_core(char *p, unsigned long *core)
3416 * kernelcore=size sets the amount of memory for use for allocations that
3417 * cannot be reclaimed or migrated.
3418 */
3419static int __init cmdline_parse_kernelcore(char *p)
3420{ 3452{
3421 unsigned long long coremem; 3453 unsigned long long coremem;
3422 if (!p) 3454 if (!p)
3423 return -EINVAL; 3455 return -EINVAL;
3424 3456
3425 coremem = memparse(p, &p); 3457 coremem = memparse(p, &p);
3426 required_kernelcore = coremem >> PAGE_SHIFT; 3458 *core = coremem >> PAGE_SHIFT;
3427 3459
3428 /* Paranoid check that UL is enough for required_kernelcore */ 3460 /* Paranoid check that UL is enough for the coremem value */
3429 WARN_ON((coremem >> PAGE_SHIFT) > ULONG_MAX); 3461 WARN_ON((coremem >> PAGE_SHIFT) > ULONG_MAX);
3430 3462
3431 return 0; 3463 return 0;
3432} 3464}
3433 3465
3466/*
3467 * kernelcore=size sets the amount of memory for use for allocations that
3468 * cannot be reclaimed or migrated.
3469 */
3470static int __init cmdline_parse_kernelcore(char *p)
3471{
3472 return cmdline_parse_core(p, &required_kernelcore);
3473}
3474
3475/*
3476 * movablecore=size sets the amount of memory for use for allocations that
3477 * can be reclaimed or migrated.
3478 */
3479static int __init cmdline_parse_movablecore(char *p)
3480{
3481 return cmdline_parse_core(p, &required_movablecore);
3482}
3483
3434early_param("kernelcore", cmdline_parse_kernelcore); 3484early_param("kernelcore", cmdline_parse_kernelcore);
3485early_param("movablecore", cmdline_parse_movablecore);
3435 3486
3436#endif /* CONFIG_ARCH_POPULATES_NODE_MAP */ 3487#endif /* CONFIG_ARCH_POPULATES_NODE_MAP */
3437 3488