aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
authorYinghai Lu <yhlu.kernel.send@gmail.com>2008-04-29 04:59:49 -0400
committerThomas Gleixner <tglx@linutronix.de>2008-05-25 04:55:09 -0400
commit42651f15824d003e8357693ab72c4dbb3e280836 (patch)
tree1114071a407bfeb38edc12b7c822039641cd5c13 /arch
parent95ffa2438d0e9c48779f0106b1c0eb36165e759c (diff)
x86: fix trimming e820 with MTRR holes.
converting MTRR layout from continous to discrete, some time could run out of MTRRs. So add gran_sizek to prevent that by dumpping small RAM piece less than gran_sizek. previous trimming only can handle highest_pfn from mtrr to end_pfn from e820. when have more than 4g RAM installed, there will be holes below 4g. so need to check ram below 4g is coverred well. need to be applied after [PATCH] x86: mtrr cleanup for converting continuous to discrete layout v7 Signed-off-by: Yinghai Lu <yinghai.lu@gmail.com> Signed-off-by: Ingo Molnar <mingo@elte.hu> Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Diffstat (limited to 'arch')
-rw-r--r--arch/x86/kernel/cpu/mtrr/main.c101
-rw-r--r--arch/x86/kernel/e820_32.c7
-rw-r--r--arch/x86/kernel/e820_64.c6
3 files changed, 95 insertions, 19 deletions
diff --git a/arch/x86/kernel/cpu/mtrr/main.c b/arch/x86/kernel/cpu/mtrr/main.c
index 8a6f68b45e3e..9ab5c16b0d52 100644
--- a/arch/x86/kernel/cpu/mtrr/main.c
+++ b/arch/x86/kernel/cpu/mtrr/main.c
@@ -1095,6 +1095,17 @@ int __init amd_special_default_mtrr(void)
1095 return 0; 1095 return 0;
1096} 1096}
1097 1097
1098static u64 __init real_trim_memory(unsigned long start_pfn, unsigned long limit_pfn)
1099{
1100 u64 trim_start, trim_size;
1101 trim_start = start_pfn;
1102 trim_start <<= PAGE_SHIFT;
1103 trim_size = limit_pfn;
1104 trim_size <<= PAGE_SHIFT;
1105 trim_size -= trim_start;
1106 return update_memory_range(trim_start, trim_size, E820_RAM,
1107 E820_RESERVED);
1108}
1098/** 1109/**
1099 * mtrr_trim_uncached_memory - trim RAM not covered by MTRRs 1110 * mtrr_trim_uncached_memory - trim RAM not covered by MTRRs
1100 * @end_pfn: ending page frame number 1111 * @end_pfn: ending page frame number
@@ -1110,8 +1121,13 @@ int __init mtrr_trim_uncached_memory(unsigned long end_pfn)
1110{ 1121{
1111 unsigned long i, base, size, highest_pfn = 0, def, dummy; 1122 unsigned long i, base, size, highest_pfn = 0, def, dummy;
1112 mtrr_type type; 1123 mtrr_type type;
1113 u64 trim_start, trim_size; 1124 struct res_range range[RANGE_NUM];
1125 int nr_range;
1126 u64 total_real_trim_size;
1127 int changed;
1114 1128
1129 /* extra one for all 0 */
1130 int num[MTRR_NUM_TYPES + 1];
1115 /* 1131 /*
1116 * Make sure we only trim uncachable memory on machines that 1132 * Make sure we only trim uncachable memory on machines that
1117 * support the Intel MTRR architecture: 1133 * support the Intel MTRR architecture:
@@ -1123,9 +1139,6 @@ int __init mtrr_trim_uncached_memory(unsigned long end_pfn)
1123 if (def != MTRR_TYPE_UNCACHABLE) 1139 if (def != MTRR_TYPE_UNCACHABLE)
1124 return 0; 1140 return 0;
1125 1141
1126 if (amd_special_default_mtrr())
1127 return 0;
1128
1129 /* Find highest cached pfn */ 1142 /* Find highest cached pfn */
1130 for (i = 0; i < num_var_ranges; i++) { 1143 for (i = 0; i < num_var_ranges; i++) {
1131 mtrr_if->get(i, &base, &size, &type); 1144 mtrr_if->get(i, &base, &size, &type);
@@ -1145,26 +1158,80 @@ int __init mtrr_trim_uncached_memory(unsigned long end_pfn)
1145 return 0; 1158 return 0;
1146 } 1159 }
1147 1160
1148 if (highest_pfn < end_pfn) { 1161 /* check entries number */
1162 memset(num, 0, sizeof(num));
1163 for (i = 0; i < num_var_ranges; i++) {
1164 mtrr_if->get(i, &base, &size, &type);
1165 if (type >= MTRR_NUM_TYPES)
1166 continue;
1167 if (!size)
1168 type = MTRR_NUM_TYPES;
1169 num[type]++;
1170 }
1171
1172 /* no entry for WB? */
1173 if (!num[MTRR_TYPE_WRBACK])
1174 return 0;
1175
1176 /* check if we only had WB and UC */
1177 if (num[MTRR_TYPE_WRBACK] + num[MTRR_TYPE_UNCACHABLE] !=
1178 num_var_ranges - num[MTRR_NUM_TYPES])
1179 return 0;
1180
1181 memset(range, 0, sizeof(range));
1182 nr_range = 0;
1183 if (mtrr_tom2) {
1184 range[nr_range].start = (1ULL<<(32 - PAGE_SHIFT));
1185 range[nr_range].end = (mtrr_tom2 >> PAGE_SHIFT) - 1;
1186 if (highest_pfn < range[nr_range].end + 1)
1187 highest_pfn = range[nr_range].end + 1;
1188 nr_range++;
1189 }
1190 nr_range = x86_get_mtrr_mem_range(range, nr_range, 0, 0);
1191
1192 changed = 0;
1193 total_real_trim_size = 0;
1194
1195 /* check the top at first */
1196 i = nr_range - 1;
1197 if (range[i].end + 1 < end_pfn) {
1198 total_real_trim_size += real_trim_memory(range[i].end + 1, end_pfn);
1199 }
1200
1201 if (total_real_trim_size) {
1149 printk(KERN_WARNING "WARNING: BIOS bug: CPU MTRRs don't cover" 1202 printk(KERN_WARNING "WARNING: BIOS bug: CPU MTRRs don't cover"
1150 " all of memory, losing %luMB of RAM.\n", 1203 " all of memory, losing %lluMB of RAM.\n",
1151 (end_pfn - highest_pfn) >> (20 - PAGE_SHIFT)); 1204 total_real_trim_size >> 20);
1152 1205
1153 WARN_ON(1); 1206 WARN_ON(1);
1154 1207
1155 printk(KERN_INFO "update e820 for mtrr\n"); 1208 printk(KERN_INFO "update e820 for mtrr -- end_pfn\n");
1156 trim_start = highest_pfn;
1157 trim_start <<= PAGE_SHIFT;
1158 trim_size = end_pfn;
1159 trim_size <<= PAGE_SHIFT;
1160 trim_size -= trim_start;
1161 update_memory_range(trim_start, trim_size, E820_RAM,
1162 E820_RESERVED);
1163 update_e820(); 1209 update_e820();
1164 return 1; 1210 changed = 1;
1165 } 1211 }
1166 1212
1167 return 0; 1213 total_real_trim_size = 0;
1214 if (range[0].start)
1215 total_real_trim_size += real_trim_memory(0, range[0].start);
1216
1217 for (i = 0; i < nr_range - 1; i--) {
1218 if (range[i].end + 1 < range[i+1].start)
1219 total_real_trim_size += real_trim_memory(range[i].end + 1, range[i+1].start);
1220 }
1221
1222 if (total_real_trim_size) {
1223 printk(KERN_WARNING "WARNING: BIOS bug: CPU MTRRs don't cover"
1224 " all of memory, losing %lluMB of RAM.\n",
1225 total_real_trim_size >> 20);
1226
1227 WARN_ON(1);
1228
1229 printk(KERN_INFO "update e820 for mtrr -- holes\n");
1230 update_e820();
1231 changed = 1;
1232 }
1233
1234 return changed;
1168} 1235}
1169 1236
1170/** 1237/**
diff --git a/arch/x86/kernel/e820_32.c b/arch/x86/kernel/e820_32.c
index 31ea2bb8c91a..857f706273a8 100644
--- a/arch/x86/kernel/e820_32.c
+++ b/arch/x86/kernel/e820_32.c
@@ -783,10 +783,11 @@ static int __init parse_memmap(char *arg)
783 return 0; 783 return 0;
784} 784}
785early_param("memmap", parse_memmap); 785early_param("memmap", parse_memmap);
786void __init update_memory_range(u64 start, u64 size, unsigned old_type, 786u64 __init update_memory_range(u64 start, u64 size, unsigned old_type,
787 unsigned new_type) 787 unsigned new_type)
788{ 788{
789 int i; 789 int i;
790 u64 real_updated_size = 0;
790 791
791 BUG_ON(old_type == new_type); 792 BUG_ON(old_type == new_type);
792 793
@@ -798,6 +799,7 @@ void __init update_memory_range(u64 start, u64 size, unsigned old_type,
798 /* totally covered? */ 799 /* totally covered? */
799 if (ei->addr >= start && ei->size <= size) { 800 if (ei->addr >= start && ei->size <= size) {
800 ei->type = new_type; 801 ei->type = new_type;
802 real_updated_size += ei->size;
801 continue; 803 continue;
802 } 804 }
803 /* partially covered */ 805 /* partially covered */
@@ -807,7 +809,10 @@ void __init update_memory_range(u64 start, u64 size, unsigned old_type,
807 continue; 809 continue;
808 add_memory_region(final_start, final_end - final_start, 810 add_memory_region(final_start, final_end - final_start,
809 new_type); 811 new_type);
812 real_updated_size += final_end - final_start;
810 } 813 }
814
815 return real_updated_size;
811} 816}
812 817
813void __init finish_e820_parsing(void) 818void __init finish_e820_parsing(void)
diff --git a/arch/x86/kernel/e820_64.c b/arch/x86/kernel/e820_64.c
index 124480c0008d..848b2cd2d1dd 100644
--- a/arch/x86/kernel/e820_64.c
+++ b/arch/x86/kernel/e820_64.c
@@ -829,10 +829,11 @@ void __init finish_e820_parsing(void)
829 } 829 }
830} 830}
831 831
832void __init update_memory_range(u64 start, u64 size, unsigned old_type, 832u64 __init update_memory_range(u64 start, u64 size, unsigned old_type,
833 unsigned new_type) 833 unsigned new_type)
834{ 834{
835 int i; 835 int i;
836 u64 real_updated_size = 0;
836 837
837 BUG_ON(old_type == new_type); 838 BUG_ON(old_type == new_type);
838 839
@@ -844,6 +845,7 @@ void __init update_memory_range(u64 start, u64 size, unsigned old_type,
844 /* totally covered? */ 845 /* totally covered? */
845 if (ei->addr >= start && ei->size <= size) { 846 if (ei->addr >= start && ei->size <= size) {
846 ei->type = new_type; 847 ei->type = new_type;
848 real_updated_size += ei->size;
847 continue; 849 continue;
848 } 850 }
849 /* partially covered */ 851 /* partially covered */
@@ -853,7 +855,9 @@ void __init update_memory_range(u64 start, u64 size, unsigned old_type,
853 continue; 855 continue;
854 add_memory_region(final_start, final_end - final_start, 856 add_memory_region(final_start, final_end - final_start,
855 new_type); 857 new_type);
858 real_updated_size += final_end - final_start;
856 } 859 }
860 return real_updated_size;
857} 861}
858 862
859void __init update_e820(void) 863void __init update_e820(void)