aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/mm
diff options
context:
space:
mode:
authorIngo Molnar <mingo@elte.hu>2008-06-16 05:16:46 -0400
committerIngo Molnar <mingo@elte.hu>2008-06-16 05:16:46 -0400
commitcb9aa97c21c59ad01c9514d7faf45dc166fba226 (patch)
tree66a530f154db78b85f5b1406ebc51401df8d3913 /arch/x86/mm
parent668a6c3654560aef8741642478973e205a4f02bf (diff)
parent066519068ad2fbe98c7f45552b1f592903a9c8c8 (diff)
Merge branch 'linus' into tracing/mmiotrace-mergefixups
Diffstat (limited to 'arch/x86/mm')
-rw-r--r--arch/x86/mm/fault.c5
-rw-r--r--arch/x86/mm/init_64.c6
-rw-r--r--arch/x86/mm/ioremap.c5
-rw-r--r--arch/x86/mm/pat.c51
-rw-r--r--arch/x86/mm/srat_64.c27
5 files changed, 36 insertions, 58 deletions
diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c
index 8c828a68d3b6..0a778e3c43ee 100644
--- a/arch/x86/mm/fault.c
+++ b/arch/x86/mm/fault.c
@@ -508,6 +508,11 @@ static int vmalloc_fault(unsigned long address)
508 unsigned long pgd_paddr; 508 unsigned long pgd_paddr;
509 pmd_t *pmd_k; 509 pmd_t *pmd_k;
510 pte_t *pte_k; 510 pte_t *pte_k;
511
512 /* Make sure we are in vmalloc area */
513 if (!(address >= VMALLOC_START && address < VMALLOC_END))
514 return -1;
515
511 /* 516 /*
512 * Synchronize this task's top level page-table 517 * Synchronize this task's top level page-table
513 * with the 'reference' page table. 518 * with the 'reference' page table.
diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c
index 295be1d07b82..a5fd2e06f5c9 100644
--- a/arch/x86/mm/init_64.c
+++ b/arch/x86/mm/init_64.c
@@ -206,7 +206,7 @@ void __init cleanup_highmap(void)
206 pmd_t *last_pmd = pmd + PTRS_PER_PMD; 206 pmd_t *last_pmd = pmd + PTRS_PER_PMD;
207 207
208 for (; pmd < last_pmd; pmd++, vaddr += PMD_SIZE) { 208 for (; pmd < last_pmd; pmd++, vaddr += PMD_SIZE) {
209 if (!pmd_present(*pmd)) 209 if (pmd_none(*pmd))
210 continue; 210 continue;
211 if (vaddr < (unsigned long) _text || vaddr > end) 211 if (vaddr < (unsigned long) _text || vaddr > end)
212 set_pmd(pmd, __pmd(0)); 212 set_pmd(pmd, __pmd(0));
@@ -506,7 +506,7 @@ early_param("memtest", parse_memtest);
506 506
507static void __init early_memtest(unsigned long start, unsigned long end) 507static void __init early_memtest(unsigned long start, unsigned long end)
508{ 508{
509 unsigned long t_start, t_size; 509 u64 t_start, t_size;
510 unsigned pattern; 510 unsigned pattern;
511 511
512 if (!memtest_pattern) 512 if (!memtest_pattern)
@@ -525,7 +525,7 @@ static void __init early_memtest(unsigned long start, unsigned long end)
525 if (t_start + t_size > end) 525 if (t_start + t_size > end)
526 t_size = end - t_start; 526 t_size = end - t_start;
527 527
528 printk(KERN_CONT "\n %016lx - %016lx pattern %d", 528 printk(KERN_CONT "\n %016llx - %016llx pattern %d",
529 t_start, t_start + t_size, pattern); 529 t_start, t_start + t_size, pattern);
530 530
531 memtest(t_start, t_size, pattern); 531 memtest(t_start, t_size, pattern);
diff --git a/arch/x86/mm/ioremap.c b/arch/x86/mm/ioremap.c
index a7c80a6e8622..e92aa461f4d6 100644
--- a/arch/x86/mm/ioremap.c
+++ b/arch/x86/mm/ioremap.c
@@ -602,10 +602,11 @@ void __init early_iounmap(void *addr, unsigned long size)
602 unsigned long offset; 602 unsigned long offset;
603 unsigned int nrpages; 603 unsigned int nrpages;
604 enum fixed_addresses idx; 604 enum fixed_addresses idx;
605 unsigned int nesting; 605 int nesting;
606 606
607 nesting = --early_ioremap_nested; 607 nesting = --early_ioremap_nested;
608 WARN_ON(nesting < 0); 608 if (WARN_ON(nesting < 0))
609 return;
609 610
610 if (early_ioremap_debug) { 611 if (early_ioremap_debug) {
611 printk(KERN_INFO "early_iounmap(%p, %08lx) [%d]\n", addr, 612 printk(KERN_INFO "early_iounmap(%p, %08lx) [%d]\n", addr,
diff --git a/arch/x86/mm/pat.c b/arch/x86/mm/pat.c
index de3a99812450..06b7a1c90fb8 100644
--- a/arch/x86/mm/pat.c
+++ b/arch/x86/mm/pat.c
@@ -34,7 +34,7 @@ void __cpuinit pat_disable(char *reason)
34 printk(KERN_INFO "%s\n", reason); 34 printk(KERN_INFO "%s\n", reason);
35} 35}
36 36
37static int nopat(char *str) 37static int __init nopat(char *str)
38{ 38{
39 pat_disable("PAT support disabled."); 39 pat_disable("PAT support disabled.");
40 return 0; 40 return 0;
@@ -151,32 +151,33 @@ static int pat_x_mtrr_type(u64 start, u64 end, unsigned long prot,
151 unsigned long pat_type; 151 unsigned long pat_type;
152 u8 mtrr_type; 152 u8 mtrr_type;
153 153
154 mtrr_type = mtrr_type_lookup(start, end);
155 if (mtrr_type == 0xFF) { /* MTRR not enabled */
156 *ret_prot = prot;
157 return 0;
158 }
159 if (mtrr_type == 0xFE) { /* MTRR match error */
160 *ret_prot = _PAGE_CACHE_UC;
161 return -1;
162 }
163 if (mtrr_type != MTRR_TYPE_UNCACHABLE &&
164 mtrr_type != MTRR_TYPE_WRBACK &&
165 mtrr_type != MTRR_TYPE_WRCOMB) { /* MTRR type unhandled */
166 *ret_prot = _PAGE_CACHE_UC;
167 return -1;
168 }
169
170 pat_type = prot & _PAGE_CACHE_MASK; 154 pat_type = prot & _PAGE_CACHE_MASK;
171 prot &= (~_PAGE_CACHE_MASK); 155 prot &= (~_PAGE_CACHE_MASK);
172 156
173 /* Currently doing intersection by hand. Optimize it later. */ 157 /*
158 * We return the PAT request directly for types where PAT takes
159 * precedence with respect to MTRR and for UC_MINUS.
160 * Consistency checks with other PAT requests is done later
161 * while going through memtype list.
162 */
174 if (pat_type == _PAGE_CACHE_WC) { 163 if (pat_type == _PAGE_CACHE_WC) {
175 *ret_prot = prot | _PAGE_CACHE_WC; 164 *ret_prot = prot | _PAGE_CACHE_WC;
165 return 0;
176 } else if (pat_type == _PAGE_CACHE_UC_MINUS) { 166 } else if (pat_type == _PAGE_CACHE_UC_MINUS) {
177 *ret_prot = prot | _PAGE_CACHE_UC_MINUS; 167 *ret_prot = prot | _PAGE_CACHE_UC_MINUS;
178 } else if (pat_type == _PAGE_CACHE_UC || 168 return 0;
179 mtrr_type == MTRR_TYPE_UNCACHABLE) { 169 } else if (pat_type == _PAGE_CACHE_UC) {
170 *ret_prot = prot | _PAGE_CACHE_UC;
171 return 0;
172 }
173
174 /*
175 * Look for MTRR hint to get the effective type in case where PAT
176 * request is for WB.
177 */
178 mtrr_type = mtrr_type_lookup(start, end);
179
180 if (mtrr_type == MTRR_TYPE_UNCACHABLE) {
180 *ret_prot = prot | _PAGE_CACHE_UC; 181 *ret_prot = prot | _PAGE_CACHE_UC;
181 } else if (mtrr_type == MTRR_TYPE_WRCOMB) { 182 } else if (mtrr_type == MTRR_TYPE_WRCOMB) {
182 *ret_prot = prot | _PAGE_CACHE_WC; 183 *ret_prot = prot | _PAGE_CACHE_WC;
@@ -233,14 +234,12 @@ int reserve_memtype(u64 start, u64 end, unsigned long req_type,
233 234
234 if (req_type == -1) { 235 if (req_type == -1) {
235 /* 236 /*
236 * Special case where caller wants to inherit from mtrr or 237 * Call mtrr_lookup to get the type hint. This is an
237 * existing pat mapping, defaulting to UC_MINUS in case of 238 * optimization for /dev/mem mmap'ers into WB memory (BIOS
238 * no match. 239 * tools and ACPI tools). Use WB request for WB memory and use
240 * UC_MINUS otherwise.
239 */ 241 */
240 u8 mtrr_type = mtrr_type_lookup(start, end); 242 u8 mtrr_type = mtrr_type_lookup(start, end);
241 if (mtrr_type == 0xFE) { /* MTRR match error */
242 err = -1;
243 }
244 243
245 if (mtrr_type == MTRR_TYPE_WRBACK) { 244 if (mtrr_type == MTRR_TYPE_WRBACK) {
246 req_type = _PAGE_CACHE_WB; 245 req_type = _PAGE_CACHE_WB;
diff --git a/arch/x86/mm/srat_64.c b/arch/x86/mm/srat_64.c
index 3890234e5b26..99649dccad28 100644
--- a/arch/x86/mm/srat_64.c
+++ b/arch/x86/mm/srat_64.c
@@ -97,36 +97,9 @@ static __init inline int srat_disabled(void)
97 return numa_off || acpi_numa < 0; 97 return numa_off || acpi_numa < 0;
98} 98}
99 99
100/*
101 * A lot of BIOS fill in 10 (= no distance) everywhere. This messes
102 * up the NUMA heuristics which wants the local node to have a smaller
103 * distance than the others.
104 * Do some quick checks here and only use the SLIT if it passes.
105 */
106static __init int slit_valid(struct acpi_table_slit *slit)
107{
108 int i, j;
109 int d = slit->locality_count;
110 for (i = 0; i < d; i++) {
111 for (j = 0; j < d; j++) {
112 u8 val = slit->entry[d*i + j];
113 if (i == j) {
114 if (val != LOCAL_DISTANCE)
115 return 0;
116 } else if (val <= LOCAL_DISTANCE)
117 return 0;
118 }
119 }
120 return 1;
121}
122
123/* Callback for SLIT parsing */ 100/* Callback for SLIT parsing */
124void __init acpi_numa_slit_init(struct acpi_table_slit *slit) 101void __init acpi_numa_slit_init(struct acpi_table_slit *slit)
125{ 102{
126 if (!slit_valid(slit)) {
127 printk(KERN_INFO "ACPI: SLIT table looks invalid. Not used.\n");
128 return;
129 }
130 acpi_slit = slit; 103 acpi_slit = slit;
131} 104}
132 105