diff options
author | Yinghai Lu <yhlu.kernel@gmail.com> | 2008-07-10 23:38:26 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2008-07-11 04:24:04 -0400 |
commit | f361a450bf1ad14e2b003217dbf3958638631265 (patch) | |
tree | 10c1e4dcc0047f6c37387cada6a0bceba088d2d2 /arch/x86/kernel | |
parent | f302a5bbe5eb95f3d4227d5bd0e9b92b1b125f4f (diff) |
x86: introduce max_low_pfn_mapped for 64-bit
when more than 4g memory is installed, don't map the big hole below 4g.
Signed-off-by: Yinghai Lu <yhlu.kernel@gmail.com>
Cc: Suresh Siddha <suresh.b.siddha@intel.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'arch/x86/kernel')
-rw-r--r-- | arch/x86/kernel/acpi/boot.c | 2 | ||||
-rw-r--r-- | arch/x86/kernel/cpu/amd_64.c | 10 | ||||
-rw-r--r-- | arch/x86/kernel/e820.c | 23 | ||||
-rw-r--r-- | arch/x86/kernel/efi.c | 2 | ||||
-rw-r--r-- | arch/x86/kernel/setup.c | 22 |
5 files changed, 47 insertions, 12 deletions
diff --git a/arch/x86/kernel/acpi/boot.c b/arch/x86/kernel/acpi/boot.c index a31a579a47ca..9c981c4a3644 100644 --- a/arch/x86/kernel/acpi/boot.c +++ b/arch/x86/kernel/acpi/boot.c | |||
@@ -130,7 +130,7 @@ char *__init __acpi_map_table(unsigned long phys, unsigned long size) | |||
130 | if (!phys || !size) | 130 | if (!phys || !size) |
131 | return NULL; | 131 | return NULL; |
132 | 132 | ||
133 | if (phys+size <= (max_pfn_mapped << PAGE_SHIFT)) | 133 | if (phys+size <= (max_low_pfn_mapped << PAGE_SHIFT)) |
134 | return __va(phys); | 134 | return __va(phys); |
135 | 135 | ||
136 | offset = phys & (PAGE_SIZE - 1); | 136 | offset = phys & (PAGE_SIZE - 1); |
diff --git a/arch/x86/kernel/cpu/amd_64.c b/arch/x86/kernel/cpu/amd_64.c index 958526d6a74a..bd182b7616ee 100644 --- a/arch/x86/kernel/cpu/amd_64.c +++ b/arch/x86/kernel/cpu/amd_64.c | |||
@@ -199,10 +199,14 @@ static void __cpuinit init_amd(struct cpuinfo_x86 *c) | |||
199 | * Don't do it for gbpages because there seems very little | 199 | * Don't do it for gbpages because there seems very little |
200 | * benefit in doing so. | 200 | * benefit in doing so. |
201 | */ | 201 | */ |
202 | if (!rdmsrl_safe(MSR_K8_TSEG_ADDR, &tseg) && | 202 | if (!rdmsrl_safe(MSR_K8_TSEG_ADDR, &tseg)) { |
203 | (tseg >> PMD_SHIFT) < | 203 | if ((tseg>>PMD_SHIFT) < |
204 | (max_pfn_mapped >> (PMD_SHIFT-PAGE_SHIFT))) | 204 | (max_low_pfn_mapped>>(PMD_SHIFT-PAGE_SHIFT)) || |
205 | ((tseg>>PMD_SHIFT) < | ||
206 | (max_pfn_mapped>>(PMD_SHIFT-PAGE_SHIFT)) && | ||
207 | (tseg>>PMD_SHIFT) >= (1ULL<<(32 - PMD_SHIFT)))) | ||
205 | set_memory_4k((unsigned long)__va(tseg), 1); | 208 | set_memory_4k((unsigned long)__va(tseg), 1); |
209 | } | ||
206 | } | 210 | } |
207 | } | 211 | } |
208 | 212 | ||
diff --git a/arch/x86/kernel/e820.c b/arch/x86/kernel/e820.c index 3451e0b3f324..9f5002e0b35c 100644 --- a/arch/x86/kernel/e820.c +++ b/arch/x86/kernel/e820.c | |||
@@ -1056,7 +1056,7 @@ unsigned long __initdata end_user_pfn = MAX_ARCH_PFN; | |||
1056 | /* | 1056 | /* |
1057 | * Find the highest page frame number we have available | 1057 | * Find the highest page frame number we have available |
1058 | */ | 1058 | */ |
1059 | unsigned long __init e820_end(void) | 1059 | static unsigned long __init e820_end_pfn(unsigned long limit_pfn, unsigned type) |
1060 | { | 1060 | { |
1061 | int i; | 1061 | int i; |
1062 | unsigned long last_pfn = 0; | 1062 | unsigned long last_pfn = 0; |
@@ -1064,12 +1064,21 @@ unsigned long __init e820_end(void) | |||
1064 | 1064 | ||
1065 | for (i = 0; i < e820.nr_map; i++) { | 1065 | for (i = 0; i < e820.nr_map; i++) { |
1066 | struct e820entry *ei = &e820.map[i]; | 1066 | struct e820entry *ei = &e820.map[i]; |
1067 | unsigned long start_pfn; | ||
1067 | unsigned long end_pfn; | 1068 | unsigned long end_pfn; |
1068 | 1069 | ||
1069 | if (ei->type != E820_RAM) | 1070 | if (ei->type != type) |
1070 | continue; | 1071 | continue; |
1071 | 1072 | ||
1073 | start_pfn = ei->addr >> PAGE_SHIFT; | ||
1072 | end_pfn = (ei->addr + ei->size) >> PAGE_SHIFT; | 1074 | end_pfn = (ei->addr + ei->size) >> PAGE_SHIFT; |
1075 | |||
1076 | if (start_pfn >= limit_pfn) | ||
1077 | continue; | ||
1078 | if (end_pfn > limit_pfn) { | ||
1079 | last_pfn = limit_pfn; | ||
1080 | break; | ||
1081 | } | ||
1073 | if (end_pfn > last_pfn) | 1082 | if (end_pfn > last_pfn) |
1074 | last_pfn = end_pfn; | 1083 | last_pfn = end_pfn; |
1075 | } | 1084 | } |
@@ -1083,7 +1092,15 @@ unsigned long __init e820_end(void) | |||
1083 | last_pfn, max_arch_pfn); | 1092 | last_pfn, max_arch_pfn); |
1084 | return last_pfn; | 1093 | return last_pfn; |
1085 | } | 1094 | } |
1095 | unsigned long __init e820_end_of_ram_pfn(void) | ||
1096 | { | ||
1097 | return e820_end_pfn(MAX_ARCH_PFN, E820_RAM); | ||
1098 | } | ||
1086 | 1099 | ||
1100 | unsigned long __init e820_end_of_low_ram_pfn(void) | ||
1101 | { | ||
1102 | return e820_end_pfn(1UL<<(32 - PAGE_SHIFT), E820_RAM); | ||
1103 | } | ||
1087 | /* | 1104 | /* |
1088 | * Finds an active region in the address range from start_pfn to last_pfn and | 1105 | * Finds an active region in the address range from start_pfn to last_pfn and |
1089 | * returns its range in ei_startpfn and ei_endpfn for the e820 entry. | 1106 | * returns its range in ei_startpfn and ei_endpfn for the e820 entry. |
@@ -1206,7 +1223,7 @@ static int __init parse_memmap_opt(char *p) | |||
1206 | * the real mem size before original memory map is | 1223 | * the real mem size before original memory map is |
1207 | * reset. | 1224 | * reset. |
1208 | */ | 1225 | */ |
1209 | saved_max_pfn = e820_end(); | 1226 | saved_max_pfn = e820_end_of_ram_pfn(); |
1210 | #endif | 1227 | #endif |
1211 | e820.nr_map = 0; | 1228 | e820.nr_map = 0; |
1212 | userdef = 1; | 1229 | userdef = 1; |
diff --git a/arch/x86/kernel/efi.c b/arch/x86/kernel/efi.c index 94382faeadb6..06cc8d4254b1 100644 --- a/arch/x86/kernel/efi.c +++ b/arch/x86/kernel/efi.c | |||
@@ -473,7 +473,7 @@ void __init efi_enter_virtual_mode(void) | |||
473 | size = md->num_pages << EFI_PAGE_SHIFT; | 473 | size = md->num_pages << EFI_PAGE_SHIFT; |
474 | end = md->phys_addr + size; | 474 | end = md->phys_addr + size; |
475 | 475 | ||
476 | if (PFN_UP(end) <= max_pfn_mapped) | 476 | if (PFN_UP(end) <= max_low_pfn_mapped) |
477 | va = __va(md->phys_addr); | 477 | va = __va(md->phys_addr); |
478 | else | 478 | else |
479 | va = efi_ioremap(md->phys_addr, size); | 479 | va = efi_ioremap(md->phys_addr, size); |
diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c index a7c3471ea17c..86fc2d624270 100644 --- a/arch/x86/kernel/setup.c +++ b/arch/x86/kernel/setup.c | |||
@@ -713,14 +713,14 @@ void __init setup_arch(char **cmdline_p) | |||
713 | * partially used pages are not usable - thus | 713 | * partially used pages are not usable - thus |
714 | * we are rounding upwards: | 714 | * we are rounding upwards: |
715 | */ | 715 | */ |
716 | max_pfn = e820_end(); | 716 | max_pfn = e820_end_of_ram_pfn(); |
717 | 717 | ||
718 | /* preallocate 4k for mptable mpc */ | 718 | /* preallocate 4k for mptable mpc */ |
719 | early_reserve_e820_mpc_new(); | 719 | early_reserve_e820_mpc_new(); |
720 | /* update e820 for memory not covered by WB MTRRs */ | 720 | /* update e820 for memory not covered by WB MTRRs */ |
721 | mtrr_bp_init(); | 721 | mtrr_bp_init(); |
722 | if (mtrr_trim_uncached_memory(max_pfn)) | 722 | if (mtrr_trim_uncached_memory(max_pfn)) |
723 | max_pfn = e820_end(); | 723 | max_pfn = e820_end_of_ram_pfn(); |
724 | 724 | ||
725 | #ifdef CONFIG_X86_32 | 725 | #ifdef CONFIG_X86_32 |
726 | /* max_low_pfn get updated here */ | 726 | /* max_low_pfn get updated here */ |
@@ -732,12 +732,26 @@ void __init setup_arch(char **cmdline_p) | |||
732 | 732 | ||
733 | /* How many end-of-memory variables you have, grandma! */ | 733 | /* How many end-of-memory variables you have, grandma! */ |
734 | /* need this before calling reserve_initrd */ | 734 | /* need this before calling reserve_initrd */ |
735 | max_low_pfn = max_pfn; | 735 | if (max_pfn > (1UL<<(32 - PAGE_SHIFT))) |
736 | max_low_pfn = e820_end_of_low_ram_pfn(); | ||
737 | else | ||
738 | max_low_pfn = max_pfn; | ||
739 | |||
736 | high_memory = (void *)__va(max_pfn * PAGE_SIZE - 1) + 1; | 740 | high_memory = (void *)__va(max_pfn * PAGE_SIZE - 1) + 1; |
737 | #endif | 741 | #endif |
738 | 742 | ||
739 | /* max_pfn_mapped is updated here */ | 743 | /* max_pfn_mapped is updated here */ |
740 | max_pfn_mapped = init_memory_mapping(0, (max_low_pfn << PAGE_SHIFT)); | 744 | max_low_pfn_mapped = init_memory_mapping(0, max_low_pfn<<PAGE_SHIFT); |
745 | max_pfn_mapped = max_low_pfn_mapped; | ||
746 | |||
747 | #ifdef CONFIG_X86_64 | ||
748 | if (max_pfn > max_low_pfn) { | ||
749 | max_pfn_mapped = init_memory_mapping(1UL<<32, | ||
750 | max_pfn<<PAGE_SHIFT); | ||
751 | /* can we preseve max_low_pfn ?*/ | ||
752 | max_low_pfn = max_pfn; | ||
753 | } | ||
754 | #endif | ||
741 | 755 | ||
742 | /* | 756 | /* |
743 | * NOTE: On x86-32, only from this point on, fixmaps are ready for use. | 757 | * NOTE: On x86-32, only from this point on, fixmaps are ready for use. |