diff options
author | Tang Chen <tangchen@cn.fujitsu.com> | 2013-11-12 18:08:02 -0500 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2013-11-12 22:09:08 -0500 |
commit | 0167d7d8b0beb4cf12076b47e4dc73897ae5acb0 (patch) | |
tree | c7430b0ad44e46fdb1537e1605d85d9e38028095 /arch | |
parent | 79442ed189acb8b949662676e750eda173c06f9b (diff) |
x86/mm: factor out of top-down direct mapping setup
Create a new function memory_map_top_down to factor out of the top-down
direct memory mapping pagetable setup. This is also a preparation for the
following patch, which will introduce the bottom-up memory mapping. That
said, we will put the two ways of pagetable setup into separate functions,
and choose to use which way in init_mem_mapping, which makes the code more
clear.
Signed-off-by: Tang Chen <tangchen@cn.fujitsu.com>
Signed-off-by: Zhang Yanfei <zhangyanfei@cn.fujitsu.com>
Acked-by: Tejun Heo <tj@kernel.org>
Acked-by: Toshi Kani <toshi.kani@hp.com>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Ingo Molnar <mingo@elte.hu>
Cc: "H. Peter Anvin" <hpa@zytor.com>
Cc: Wanpeng Li <liwanp@linux.vnet.ibm.com>
Cc: Thomas Renninger <trenn@suse.de>
Cc: Yinghai Lu <yinghai@kernel.org>
Cc: Jiang Liu <jiang.liu@huawei.com>
Cc: Wen Congyang <wency@cn.fujitsu.com>
Cc: Lai Jiangshan <laijs@cn.fujitsu.com>
Cc: Yasuaki Ishimatsu <isimatu.yasuaki@jp.fujitsu.com>
Cc: Taku Izumi <izumi.taku@jp.fujitsu.com>
Cc: Mel Gorman <mgorman@suse.de>
Cc: Michal Nazarewicz <mina86@mina86.com>
Cc: Minchan Kim <minchan@kernel.org>
Cc: Rik van Riel <riel@redhat.com>
Cc: Johannes Weiner <hannes@cmpxchg.org>
Cc: Kamezawa Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'arch')
-rw-r--r-- | arch/x86/mm/init.c | 59 |
1 files changed, 39 insertions, 20 deletions
diff --git a/arch/x86/mm/init.c b/arch/x86/mm/init.c index ce32017c5e38..742d6d4ad9eb 100644 --- a/arch/x86/mm/init.c +++ b/arch/x86/mm/init.c | |||
@@ -418,27 +418,27 @@ static unsigned long __init get_new_step_size(unsigned long step_size) | |||
418 | return step_size << 5; | 418 | return step_size << 5; |
419 | } | 419 | } |
420 | 420 | ||
421 | void __init init_mem_mapping(void) | 421 | /** |
422 | * memory_map_top_down - Map [map_start, map_end) top down | ||
423 | * @map_start: start address of the target memory range | ||
424 | * @map_end: end address of the target memory range | ||
425 | * | ||
426 | * This function will setup direct mapping for memory range | ||
427 | * [map_start, map_end) in top-down. That said, the page tables | ||
428 | * will be allocated at the end of the memory, and we map the | ||
429 | * memory in top-down. | ||
430 | */ | ||
431 | static void __init memory_map_top_down(unsigned long map_start, | ||
432 | unsigned long map_end) | ||
422 | { | 433 | { |
423 | unsigned long end, real_end, start, last_start; | 434 | unsigned long real_end, start, last_start; |
424 | unsigned long step_size; | 435 | unsigned long step_size; |
425 | unsigned long addr; | 436 | unsigned long addr; |
426 | unsigned long mapped_ram_size = 0; | 437 | unsigned long mapped_ram_size = 0; |
427 | unsigned long new_mapped_ram_size; | 438 | unsigned long new_mapped_ram_size; |
428 | 439 | ||
429 | probe_page_size_mask(); | ||
430 | |||
431 | #ifdef CONFIG_X86_64 | ||
432 | end = max_pfn << PAGE_SHIFT; | ||
433 | #else | ||
434 | end = max_low_pfn << PAGE_SHIFT; | ||
435 | #endif | ||
436 | |||
437 | /* the ISA range is always mapped regardless of memory holes */ | ||
438 | init_memory_mapping(0, ISA_END_ADDRESS); | ||
439 | |||
440 | /* xen has big range in reserved near end of ram, skip it at first.*/ | 440 | /* xen has big range in reserved near end of ram, skip it at first.*/ |
441 | addr = memblock_find_in_range(ISA_END_ADDRESS, end, PMD_SIZE, PMD_SIZE); | 441 | addr = memblock_find_in_range(map_start, map_end, PMD_SIZE, PMD_SIZE); |
442 | real_end = addr + PMD_SIZE; | 442 | real_end = addr + PMD_SIZE; |
443 | 443 | ||
444 | /* step_size need to be small so pgt_buf from BRK could cover it */ | 444 | /* step_size need to be small so pgt_buf from BRK could cover it */ |
@@ -453,13 +453,13 @@ void __init init_mem_mapping(void) | |||
453 | * end of RAM in [min_pfn_mapped, max_pfn_mapped) used as new pages | 453 | * end of RAM in [min_pfn_mapped, max_pfn_mapped) used as new pages |
454 | * for page table. | 454 | * for page table. |
455 | */ | 455 | */ |
456 | while (last_start > ISA_END_ADDRESS) { | 456 | while (last_start > map_start) { |
457 | if (last_start > step_size) { | 457 | if (last_start > step_size) { |
458 | start = round_down(last_start - 1, step_size); | 458 | start = round_down(last_start - 1, step_size); |
459 | if (start < ISA_END_ADDRESS) | 459 | if (start < map_start) |
460 | start = ISA_END_ADDRESS; | 460 | start = map_start; |
461 | } else | 461 | } else |
462 | start = ISA_END_ADDRESS; | 462 | start = map_start; |
463 | new_mapped_ram_size = init_range_memory_mapping(start, | 463 | new_mapped_ram_size = init_range_memory_mapping(start, |
464 | last_start); | 464 | last_start); |
465 | last_start = start; | 465 | last_start = start; |
@@ -470,8 +470,27 @@ void __init init_mem_mapping(void) | |||
470 | mapped_ram_size += new_mapped_ram_size; | 470 | mapped_ram_size += new_mapped_ram_size; |
471 | } | 471 | } |
472 | 472 | ||
473 | if (real_end < end) | 473 | if (real_end < map_end) |
474 | init_range_memory_mapping(real_end, end); | 474 | init_range_memory_mapping(real_end, map_end); |
475 | } | ||
476 | |||
477 | void __init init_mem_mapping(void) | ||
478 | { | ||
479 | unsigned long end; | ||
480 | |||
481 | probe_page_size_mask(); | ||
482 | |||
483 | #ifdef CONFIG_X86_64 | ||
484 | end = max_pfn << PAGE_SHIFT; | ||
485 | #else | ||
486 | end = max_low_pfn << PAGE_SHIFT; | ||
487 | #endif | ||
488 | |||
489 | /* the ISA range is always mapped regardless of memory holes */ | ||
490 | init_memory_mapping(0, ISA_END_ADDRESS); | ||
491 | |||
492 | /* setup direct mapping for range [ISA_END_ADDRESS, end) in top-down*/ | ||
493 | memory_map_top_down(ISA_END_ADDRESS, end); | ||
475 | 494 | ||
476 | #ifdef CONFIG_X86_64 | 495 | #ifdef CONFIG_X86_64 |
477 | if (max_pfn > max_low_pfn) { | 496 | if (max_pfn > max_low_pfn) { |