aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86
diff options
context:
space:
mode:
authorHuang, Ying <ying.huang@intel.com>2008-06-02 02:26:18 -0400
committerThomas Gleixner <tglx@linutronix.de>2008-06-05 09:10:02 -0400
commitd0ec2c6f2c2f0478b34ae78b3e65f60a561ac807 (patch)
tree4d4f6d1e11aa45bc4084e733f52d402e9582d1c7 /arch/x86
parentd3fbe5ea9518b46a68e6b278974e92e2c3acef4a (diff)
x86: reserve highmem pages via reserve_early
This patch makes early reserved highmem pages become reserved pages. This can be used for highmem pages allocated by bootloader such as EFI memory map, linked list of setup_data, etc. Signed-off-by: Huang Ying <ying.huang@intel.com> Cc: andi@firstfloor.org Cc: mingo@redhat.com Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Diffstat (limited to 'arch/x86')
-rw-r--r--arch/x86/kernel/e820.c11
-rw-r--r--arch/x86/mm/init_32.c3
2 files changed, 13 insertions, 1 deletions
diff --git a/arch/x86/kernel/e820.c b/arch/x86/kernel/e820.c
index ac5e9ebf70e..a706e9057ba 100644
--- a/arch/x86/kernel/e820.c
+++ b/arch/x86/kernel/e820.c
@@ -612,6 +612,17 @@ void __init free_early(u64 start, u64 end)
612 early_res[j - 1].end = 0; 612 early_res[j - 1].end = 0;
613} 613}
614 614
615int __init page_is_reserved_early(unsigned long pagenr)
616{
617 u64 start = (u64)pagenr << PAGE_SHIFT;
618 int i;
619 struct early_res *r;
620
621 i = find_overlapped_early(start, start + PAGE_SIZE);
622 r = &early_res[i];
623 return (i < MAX_EARLY_RES && r->end);
624}
625
615void __init early_res_to_bootmem(u64 start, u64 end) 626void __init early_res_to_bootmem(u64 start, u64 end)
616{ 627{
617 int i; 628 int i;
diff --git a/arch/x86/mm/init_32.c b/arch/x86/mm/init_32.c
index ec30d10154b..0e7bb5e8167 100644
--- a/arch/x86/mm/init_32.c
+++ b/arch/x86/mm/init_32.c
@@ -289,7 +289,8 @@ static void __init permanent_kmaps_init(pgd_t *pgd_base)
289 289
290void __init add_one_highpage_init(struct page *page, int pfn, int bad_ppro) 290void __init add_one_highpage_init(struct page *page, int pfn, int bad_ppro)
291{ 291{
292 if (page_is_ram(pfn) && !(bad_ppro && page_kills_ppro(pfn))) { 292 if (page_is_ram(pfn) && !(bad_ppro && page_kills_ppro(pfn)) &&
293 !page_is_reserved_early(pfn)) {
293 ClearPageReserved(page); 294 ClearPageReserved(page);
294 init_page_count(page); 295 init_page_count(page);
295 __free_page(page); 296 __free_page(page);