aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/mm
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2016-05-16 18:15:17 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2016-05-16 18:15:17 -0400
commit168f1a7163b37294a0ef33829e1ed54d41e33c42 (patch)
tree16fa34f24156c28f0a3060d984e98bf4df878f91 /arch/x86/mm
parent825a3b2605c3aa193e0075d0f9c72e33c17ab16a (diff)
parent4afd0565552c87f23834db9121dd9cf6955d0b43 (diff)
Merge branch 'x86-asm-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull x86 asm updates from Ingo Molnar: "The main changes in this cycle were: - MSR access API fixes and enhancements (Andy Lutomirski) - early exception handling improvements (Andy Lutomirski) - user-space FS/GS prctl usage fixes and improvements (Andy Lutomirski) - Remove the cpu_has_*() APIs and replace them with equivalents (Borislav Petkov) - task switch micro-optimization (Brian Gerst) - 32-bit entry code simplification (Denys Vlasenko) - enhance PAT handling in enumated CPUs (Toshi Kani) ... and lots of other cleanups/fixlets" * 'x86-asm-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (70 commits) x86/arch_prctl/64: Restore accidentally removed put_cpu() in ARCH_SET_GS x86/entry/32: Remove asmlinkage_protect() x86/entry/32: Remove GET_THREAD_INFO() from entry code x86/entry, sched/x86: Don't save/restore EFLAGS on task switch x86/asm/entry/32: Simplify pushes of zeroed pt_regs->REGs selftests/x86/ldt_gdt: Test set_thread_area() deletion of an active segment x86/tls: Synchronize segment registers in set_thread_area() x86/asm/64: Rename thread_struct's fs and gs to fsbase and gsbase x86/arch_prctl/64: Remove FSBASE/GSBASE < 4G optimization x86/segments/64: When load_gs_index fails, clear the base x86/segments/64: When loadsegment(fs, ...) fails, clear the base x86/asm: Make asm/alternative.h safe from assembly x86/asm: Stop depending on ptrace.h in alternative.h x86/entry: Rename is_{ia32,x32}_task() to in_{ia32,x32}_syscall() x86/asm: Make sure verify_cpu() has a good stack x86/extable: Add a comment about early exception handlers x86/msr: Set the return value to zero when native_rdmsr_safe() fails x86/paravirt: Make "unsafe" MSR accesses unsafe even if PARAVIRT=y x86/paravirt: Add paravirt_{read,write}_msr() x86/msr: Carry on after a non-"safe" MSR access fails ...
Diffstat (limited to 'arch/x86/mm')
-rw-r--r--arch/x86/mm/extable.c96
-rw-r--r--arch/x86/mm/hugetlbpage.c4
-rw-r--r--arch/x86/mm/init.c8
-rw-r--r--arch/x86/mm/init_32.c2
-rw-r--r--arch/x86/mm/init_64.c4
-rw-r--r--arch/x86/mm/ioremap.c4
-rw-r--r--arch/x86/mm/pageattr.c4
-rw-r--r--arch/x86/mm/pat.c109
8 files changed, 158 insertions, 73 deletions
diff --git a/arch/x86/mm/extable.c b/arch/x86/mm/extable.c
index 82447b3fba38..4bb53b89f3c5 100644
--- a/arch/x86/mm/extable.c
+++ b/arch/x86/mm/extable.c
@@ -1,5 +1,6 @@
1#include <linux/module.h> 1#include <linux/module.h>
2#include <asm/uaccess.h> 2#include <asm/uaccess.h>
3#include <asm/traps.h>
3 4
4typedef bool (*ex_handler_t)(const struct exception_table_entry *, 5typedef bool (*ex_handler_t)(const struct exception_table_entry *,
5 struct pt_regs *, int); 6 struct pt_regs *, int);
@@ -42,6 +43,43 @@ bool ex_handler_ext(const struct exception_table_entry *fixup,
42} 43}
43EXPORT_SYMBOL(ex_handler_ext); 44EXPORT_SYMBOL(ex_handler_ext);
44 45
46bool ex_handler_rdmsr_unsafe(const struct exception_table_entry *fixup,
47 struct pt_regs *regs, int trapnr)
48{
49 WARN_ONCE(1, "unchecked MSR access error: RDMSR from 0x%x\n",
50 (unsigned int)regs->cx);
51
52 /* Pretend that the read succeeded and returned 0. */
53 regs->ip = ex_fixup_addr(fixup);
54 regs->ax = 0;
55 regs->dx = 0;
56 return true;
57}
58EXPORT_SYMBOL(ex_handler_rdmsr_unsafe);
59
60bool ex_handler_wrmsr_unsafe(const struct exception_table_entry *fixup,
61 struct pt_regs *regs, int trapnr)
62{
63 WARN_ONCE(1, "unchecked MSR access error: WRMSR to 0x%x (tried to write 0x%08x%08x)\n",
64 (unsigned int)regs->cx,
65 (unsigned int)regs->dx, (unsigned int)regs->ax);
66
67 /* Pretend that the write succeeded. */
68 regs->ip = ex_fixup_addr(fixup);
69 return true;
70}
71EXPORT_SYMBOL(ex_handler_wrmsr_unsafe);
72
73bool ex_handler_clear_fs(const struct exception_table_entry *fixup,
74 struct pt_regs *regs, int trapnr)
75{
76 if (static_cpu_has(X86_BUG_NULL_SEG))
77 asm volatile ("mov %0, %%fs" : : "rm" (__USER_DS));
78 asm volatile ("mov %0, %%fs" : : "rm" (0));
79 return ex_handler_default(fixup, regs, trapnr);
80}
81EXPORT_SYMBOL(ex_handler_clear_fs);
82
45bool ex_has_fault_handler(unsigned long ip) 83bool ex_has_fault_handler(unsigned long ip)
46{ 84{
47 const struct exception_table_entry *e; 85 const struct exception_table_entry *e;
@@ -82,24 +120,46 @@ int fixup_exception(struct pt_regs *regs, int trapnr)
82 return handler(e, regs, trapnr); 120 return handler(e, regs, trapnr);
83} 121}
84 122
123extern unsigned int early_recursion_flag;
124
85/* Restricted version used during very early boot */ 125/* Restricted version used during very early boot */
86int __init early_fixup_exception(unsigned long *ip) 126void __init early_fixup_exception(struct pt_regs *regs, int trapnr)
87{ 127{
88 const struct exception_table_entry *e; 128 /* Ignore early NMIs. */
89 unsigned long new_ip; 129 if (trapnr == X86_TRAP_NMI)
90 ex_handler_t handler; 130 return;
91 131
92 e = search_exception_tables(*ip); 132 if (early_recursion_flag > 2)
93 if (!e) 133 goto halt_loop;
94 return 0; 134
95 135 if (regs->cs != __KERNEL_CS)
96 new_ip = ex_fixup_addr(e); 136 goto fail;
97 handler = ex_fixup_handler(e); 137
98 138 /*
99 /* special handling not supported during early boot */ 139 * The full exception fixup machinery is available as soon as
100 if (handler != ex_handler_default) 140 * the early IDT is loaded. This means that it is the
101 return 0; 141 * responsibility of extable users to either function correctly
102 142 * when handlers are invoked early or to simply avoid causing
103 *ip = new_ip; 143 * exceptions before they're ready to handle them.
104 return 1; 144 *
145 * This is better than filtering which handlers can be used,
146 * because refusing to call a handler here is guaranteed to
147 * result in a hard-to-debug panic.
148 *
149 * Keep in mind that not all vectors actually get here. Early
150 * fage faults, for example, are special.
151 */
152 if (fixup_exception(regs, trapnr))
153 return;
154
155fail:
156 early_printk("PANIC: early exception 0x%02x IP %lx:%lx error %lx cr2 0x%lx\n",
157 (unsigned)trapnr, (unsigned long)regs->cs, regs->ip,
158 regs->orig_ax, read_cr2());
159
160 show_regs(regs);
161
162halt_loop:
163 while (true)
164 halt();
105} 165}
diff --git a/arch/x86/mm/hugetlbpage.c b/arch/x86/mm/hugetlbpage.c
index 740d7ac03a55..14a95054d4e0 100644
--- a/arch/x86/mm/hugetlbpage.c
+++ b/arch/x86/mm/hugetlbpage.c
@@ -162,7 +162,7 @@ static __init int setup_hugepagesz(char *opt)
162 unsigned long ps = memparse(opt, &opt); 162 unsigned long ps = memparse(opt, &opt);
163 if (ps == PMD_SIZE) { 163 if (ps == PMD_SIZE) {
164 hugetlb_add_hstate(PMD_SHIFT - PAGE_SHIFT); 164 hugetlb_add_hstate(PMD_SHIFT - PAGE_SHIFT);
165 } else if (ps == PUD_SIZE && cpu_has_gbpages) { 165 } else if (ps == PUD_SIZE && boot_cpu_has(X86_FEATURE_GBPAGES)) {
166 hugetlb_add_hstate(PUD_SHIFT - PAGE_SHIFT); 166 hugetlb_add_hstate(PUD_SHIFT - PAGE_SHIFT);
167 } else { 167 } else {
168 printk(KERN_ERR "hugepagesz: Unsupported page size %lu M\n", 168 printk(KERN_ERR "hugepagesz: Unsupported page size %lu M\n",
@@ -177,7 +177,7 @@ __setup("hugepagesz=", setup_hugepagesz);
177static __init int gigantic_pages_init(void) 177static __init int gigantic_pages_init(void)
178{ 178{
179 /* With compaction or CMA we can allocate gigantic pages at runtime */ 179 /* With compaction or CMA we can allocate gigantic pages at runtime */
180 if (cpu_has_gbpages && !size_to_hstate(1UL << PUD_SHIFT)) 180 if (boot_cpu_has(X86_FEATURE_GBPAGES) && !size_to_hstate(1UL << PUD_SHIFT))
181 hugetlb_add_hstate(PUD_SHIFT - PAGE_SHIFT); 181 hugetlb_add_hstate(PUD_SHIFT - PAGE_SHIFT);
182 return 0; 182 return 0;
183} 183}
diff --git a/arch/x86/mm/init.c b/arch/x86/mm/init.c
index 9d56f271d519..372aad2b3291 100644
--- a/arch/x86/mm/init.c
+++ b/arch/x86/mm/init.c
@@ -157,23 +157,23 @@ static void __init probe_page_size_mask(void)
157 * This will simplify cpa(), which otherwise needs to support splitting 157 * This will simplify cpa(), which otherwise needs to support splitting
158 * large pages into small in interrupt context, etc. 158 * large pages into small in interrupt context, etc.
159 */ 159 */
160 if (cpu_has_pse && !debug_pagealloc_enabled()) 160 if (boot_cpu_has(X86_FEATURE_PSE) && !debug_pagealloc_enabled())
161 page_size_mask |= 1 << PG_LEVEL_2M; 161 page_size_mask |= 1 << PG_LEVEL_2M;
162#endif 162#endif
163 163
164 /* Enable PSE if available */ 164 /* Enable PSE if available */
165 if (cpu_has_pse) 165 if (boot_cpu_has(X86_FEATURE_PSE))
166 cr4_set_bits_and_update_boot(X86_CR4_PSE); 166 cr4_set_bits_and_update_boot(X86_CR4_PSE);
167 167
168 /* Enable PGE if available */ 168 /* Enable PGE if available */
169 if (cpu_has_pge) { 169 if (boot_cpu_has(X86_FEATURE_PGE)) {
170 cr4_set_bits_and_update_boot(X86_CR4_PGE); 170 cr4_set_bits_and_update_boot(X86_CR4_PGE);
171 __supported_pte_mask |= _PAGE_GLOBAL; 171 __supported_pte_mask |= _PAGE_GLOBAL;
172 } else 172 } else
173 __supported_pte_mask &= ~_PAGE_GLOBAL; 173 __supported_pte_mask &= ~_PAGE_GLOBAL;
174 174
175 /* Enable 1 GB linear kernel mappings if available: */ 175 /* Enable 1 GB linear kernel mappings if available: */
176 if (direct_gbpages && cpu_has_gbpages) { 176 if (direct_gbpages && boot_cpu_has(X86_FEATURE_GBPAGES)) {
177 printk(KERN_INFO "Using GB pages for direct mapping\n"); 177 printk(KERN_INFO "Using GB pages for direct mapping\n");
178 page_size_mask |= 1 << PG_LEVEL_1G; 178 page_size_mask |= 1 << PG_LEVEL_1G;
179 } else { 179 } else {
diff --git a/arch/x86/mm/init_32.c b/arch/x86/mm/init_32.c
index bd7a9b9e2e14..85af914e3d27 100644
--- a/arch/x86/mm/init_32.c
+++ b/arch/x86/mm/init_32.c
@@ -284,7 +284,7 @@ kernel_physical_mapping_init(unsigned long start,
284 */ 284 */
285 mapping_iter = 1; 285 mapping_iter = 1;
286 286
287 if (!cpu_has_pse) 287 if (!boot_cpu_has(X86_FEATURE_PSE))
288 use_pse = 0; 288 use_pse = 0;
289 289
290repeat: 290repeat:
diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c
index 214afda97911..89d97477c1d9 100644
--- a/arch/x86/mm/init_64.c
+++ b/arch/x86/mm/init_64.c
@@ -1295,7 +1295,7 @@ int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node)
1295 struct vmem_altmap *altmap = to_vmem_altmap(start); 1295 struct vmem_altmap *altmap = to_vmem_altmap(start);
1296 int err; 1296 int err;
1297 1297
1298 if (cpu_has_pse) 1298 if (boot_cpu_has(X86_FEATURE_PSE))
1299 err = vmemmap_populate_hugepages(start, end, node, altmap); 1299 err = vmemmap_populate_hugepages(start, end, node, altmap);
1300 else if (altmap) { 1300 else if (altmap) {
1301 pr_err_once("%s: no cpu support for altmap allocations\n", 1301 pr_err_once("%s: no cpu support for altmap allocations\n",
@@ -1338,7 +1338,7 @@ void register_page_bootmem_memmap(unsigned long section_nr,
1338 } 1338 }
1339 get_page_bootmem(section_nr, pud_page(*pud), MIX_SECTION_INFO); 1339 get_page_bootmem(section_nr, pud_page(*pud), MIX_SECTION_INFO);
1340 1340
1341 if (!cpu_has_pse) { 1341 if (!boot_cpu_has(X86_FEATURE_PSE)) {
1342 next = (addr + PAGE_SIZE) & PAGE_MASK; 1342 next = (addr + PAGE_SIZE) & PAGE_MASK;
1343 pmd = pmd_offset(pud, addr); 1343 pmd = pmd_offset(pud, addr);
1344 if (pmd_none(*pmd)) 1344 if (pmd_none(*pmd))
diff --git a/arch/x86/mm/ioremap.c b/arch/x86/mm/ioremap.c
index 0d8d53d1f5cc..f0894910bdd7 100644
--- a/arch/x86/mm/ioremap.c
+++ b/arch/x86/mm/ioremap.c
@@ -378,7 +378,7 @@ EXPORT_SYMBOL(iounmap);
378int __init arch_ioremap_pud_supported(void) 378int __init arch_ioremap_pud_supported(void)
379{ 379{
380#ifdef CONFIG_X86_64 380#ifdef CONFIG_X86_64
381 return cpu_has_gbpages; 381 return boot_cpu_has(X86_FEATURE_GBPAGES);
382#else 382#else
383 return 0; 383 return 0;
384#endif 384#endif
@@ -386,7 +386,7 @@ int __init arch_ioremap_pud_supported(void)
386 386
387int __init arch_ioremap_pmd_supported(void) 387int __init arch_ioremap_pmd_supported(void)
388{ 388{
389 return cpu_has_pse; 389 return boot_cpu_has(X86_FEATURE_PSE);
390} 390}
391 391
392/* 392/*
diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c
index a1f0e1d0ddc2..7a1f7bbf4105 100644
--- a/arch/x86/mm/pageattr.c
+++ b/arch/x86/mm/pageattr.c
@@ -1055,7 +1055,7 @@ static int populate_pud(struct cpa_data *cpa, unsigned long start, pgd_t *pgd,
1055 /* 1055 /*
1056 * Map everything starting from the Gb boundary, possibly with 1G pages 1056 * Map everything starting from the Gb boundary, possibly with 1G pages
1057 */ 1057 */
1058 while (cpu_has_gbpages && end - start >= PUD_SIZE) { 1058 while (boot_cpu_has(X86_FEATURE_GBPAGES) && end - start >= PUD_SIZE) {
1059 set_pud(pud, __pud(cpa->pfn << PAGE_SHIFT | _PAGE_PSE | 1059 set_pud(pud, __pud(cpa->pfn << PAGE_SHIFT | _PAGE_PSE |
1060 massage_pgprot(pud_pgprot))); 1060 massage_pgprot(pud_pgprot)));
1061 1061
@@ -1466,7 +1466,7 @@ static int change_page_attr_set_clr(unsigned long *addr, int numpages,
1466 * error case we fall back to cpa_flush_all (which uses 1466 * error case we fall back to cpa_flush_all (which uses
1467 * WBINVD): 1467 * WBINVD):
1468 */ 1468 */
1469 if (!ret && cpu_has_clflush) { 1469 if (!ret && boot_cpu_has(X86_FEATURE_CLFLUSH)) {
1470 if (cpa.flags & (CPA_PAGES_ARRAY | CPA_ARRAY)) { 1470 if (cpa.flags & (CPA_PAGES_ARRAY | CPA_ARRAY)) {
1471 cpa_flush_array(addr, numpages, cache, 1471 cpa_flush_array(addr, numpages, cache,
1472 cpa.flags, pages); 1472 cpa.flags, pages);
diff --git a/arch/x86/mm/pat.c b/arch/x86/mm/pat.c
index faec01e7a17d..fb0604f11eec 100644
--- a/arch/x86/mm/pat.c
+++ b/arch/x86/mm/pat.c
@@ -40,11 +40,22 @@
40static bool boot_cpu_done; 40static bool boot_cpu_done;
41 41
42static int __read_mostly __pat_enabled = IS_ENABLED(CONFIG_X86_PAT); 42static int __read_mostly __pat_enabled = IS_ENABLED(CONFIG_X86_PAT);
43static void init_cache_modes(void);
43 44
44static inline void pat_disable(const char *reason) 45void pat_disable(const char *reason)
45{ 46{
47 if (!__pat_enabled)
48 return;
49
50 if (boot_cpu_done) {
51 WARN_ONCE(1, "x86/PAT: PAT cannot be disabled after initialization\n");
52 return;
53 }
54
46 __pat_enabled = 0; 55 __pat_enabled = 0;
47 pr_info("x86/PAT: %s\n", reason); 56 pr_info("x86/PAT: %s\n", reason);
57
58 init_cache_modes();
48} 59}
49 60
50static int __init nopat(char *str) 61static int __init nopat(char *str)
@@ -181,7 +192,7 @@ static enum page_cache_mode pat_get_cache_mode(unsigned pat_val, char *msg)
181 * configuration. 192 * configuration.
182 * Using lower indices is preferred, so we start with highest index. 193 * Using lower indices is preferred, so we start with highest index.
183 */ 194 */
184void pat_init_cache_modes(u64 pat) 195static void __init_cache_modes(u64 pat)
185{ 196{
186 enum page_cache_mode cache; 197 enum page_cache_mode cache;
187 char pat_msg[33]; 198 char pat_msg[33];
@@ -202,14 +213,11 @@ static void pat_bsp_init(u64 pat)
202{ 213{
203 u64 tmp_pat; 214 u64 tmp_pat;
204 215
205 if (!cpu_has_pat) { 216 if (!boot_cpu_has(X86_FEATURE_PAT)) {
206 pat_disable("PAT not supported by CPU."); 217 pat_disable("PAT not supported by CPU.");
207 return; 218 return;
208 } 219 }
209 220
210 if (!pat_enabled())
211 goto done;
212
213 rdmsrl(MSR_IA32_CR_PAT, tmp_pat); 221 rdmsrl(MSR_IA32_CR_PAT, tmp_pat);
214 if (!tmp_pat) { 222 if (!tmp_pat) {
215 pat_disable("PAT MSR is 0, disabled."); 223 pat_disable("PAT MSR is 0, disabled.");
@@ -218,16 +226,12 @@ static void pat_bsp_init(u64 pat)
218 226
219 wrmsrl(MSR_IA32_CR_PAT, pat); 227 wrmsrl(MSR_IA32_CR_PAT, pat);
220 228
221done: 229 __init_cache_modes(pat);
222 pat_init_cache_modes(pat);
223} 230}
224 231
225static void pat_ap_init(u64 pat) 232static void pat_ap_init(u64 pat)
226{ 233{
227 if (!pat_enabled()) 234 if (!boot_cpu_has(X86_FEATURE_PAT)) {
228 return;
229
230 if (!cpu_has_pat) {
231 /* 235 /*
232 * If this happens we are on a secondary CPU, but switched to 236 * If this happens we are on a secondary CPU, but switched to
233 * PAT on the boot CPU. We have no way to undo PAT. 237 * PAT on the boot CPU. We have no way to undo PAT.
@@ -238,18 +242,32 @@ static void pat_ap_init(u64 pat)
238 wrmsrl(MSR_IA32_CR_PAT, pat); 242 wrmsrl(MSR_IA32_CR_PAT, pat);
239} 243}
240 244
241void pat_init(void) 245static void init_cache_modes(void)
242{ 246{
243 u64 pat; 247 u64 pat = 0;
244 struct cpuinfo_x86 *c = &boot_cpu_data; 248 static int init_cm_done;
245 249
246 if (!pat_enabled()) { 250 if (init_cm_done)
251 return;
252
253 if (boot_cpu_has(X86_FEATURE_PAT)) {
254 /*
255 * CPU supports PAT. Set PAT table to be consistent with
256 * PAT MSR. This case supports "nopat" boot option, and
257 * virtual machine environments which support PAT without
258 * MTRRs. In specific, Xen has unique setup to PAT MSR.
259 *
260 * If PAT MSR returns 0, it is considered invalid and emulates
261 * as No PAT.
262 */
263 rdmsrl(MSR_IA32_CR_PAT, pat);
264 }
265
266 if (!pat) {
247 /* 267 /*
248 * No PAT. Emulate the PAT table that corresponds to the two 268 * No PAT. Emulate the PAT table that corresponds to the two
249 * cache bits, PWT (Write Through) and PCD (Cache Disable). This 269 * cache bits, PWT (Write Through) and PCD (Cache Disable).
250 * setup is the same as the BIOS default setup when the system 270 * This setup is also the same as the BIOS default setup.
251 * has PAT but the "nopat" boot option has been specified. This
252 * emulated PAT table is used when MSR_IA32_CR_PAT returns 0.
253 * 271 *
254 * PTE encoding: 272 * PTE encoding:
255 * 273 *
@@ -266,10 +284,36 @@ void pat_init(void)
266 */ 284 */
267 pat = PAT(0, WB) | PAT(1, WT) | PAT(2, UC_MINUS) | PAT(3, UC) | 285 pat = PAT(0, WB) | PAT(1, WT) | PAT(2, UC_MINUS) | PAT(3, UC) |
268 PAT(4, WB) | PAT(5, WT) | PAT(6, UC_MINUS) | PAT(7, UC); 286 PAT(4, WB) | PAT(5, WT) | PAT(6, UC_MINUS) | PAT(7, UC);
287 }
288
289 __init_cache_modes(pat);
290
291 init_cm_done = 1;
292}
293
294/**
295 * pat_init - Initialize PAT MSR and PAT table
296 *
297 * This function initializes PAT MSR and PAT table with an OS-defined value
298 * to enable additional cache attributes, WC and WT.
299 *
300 * This function must be called on all CPUs using the specific sequence of
301 * operations defined in Intel SDM. mtrr_rendezvous_handler() provides this
302 * procedure for PAT.
303 */
304void pat_init(void)
305{
306 u64 pat;
307 struct cpuinfo_x86 *c = &boot_cpu_data;
308
309 if (!pat_enabled()) {
310 init_cache_modes();
311 return;
312 }
269 313
270 } else if ((c->x86_vendor == X86_VENDOR_INTEL) && 314 if ((c->x86_vendor == X86_VENDOR_INTEL) &&
271 (((c->x86 == 0x6) && (c->x86_model <= 0xd)) || 315 (((c->x86 == 0x6) && (c->x86_model <= 0xd)) ||
272 ((c->x86 == 0xf) && (c->x86_model <= 0x6)))) { 316 ((c->x86 == 0xf) && (c->x86_model <= 0x6)))) {
273 /* 317 /*
274 * PAT support with the lower four entries. Intel Pentium 2, 318 * PAT support with the lower four entries. Intel Pentium 2,
275 * 3, M, and 4 are affected by PAT errata, which makes the 319 * 3, M, and 4 are affected by PAT errata, which makes the
@@ -734,25 +778,6 @@ int phys_mem_access_prot_allowed(struct file *file, unsigned long pfn,
734 if (file->f_flags & O_DSYNC) 778 if (file->f_flags & O_DSYNC)
735 pcm = _PAGE_CACHE_MODE_UC_MINUS; 779 pcm = _PAGE_CACHE_MODE_UC_MINUS;
736 780
737#ifdef CONFIG_X86_32
738 /*
739 * On the PPro and successors, the MTRRs are used to set
740 * memory types for physical addresses outside main memory,
741 * so blindly setting UC or PWT on those pages is wrong.
742 * For Pentiums and earlier, the surround logic should disable
743 * caching for the high addresses through the KEN pin, but
744 * we maintain the tradition of paranoia in this code.
745 */
746 if (!pat_enabled() &&
747 !(boot_cpu_has(X86_FEATURE_MTRR) ||
748 boot_cpu_has(X86_FEATURE_K6_MTRR) ||
749 boot_cpu_has(X86_FEATURE_CYRIX_ARR) ||
750 boot_cpu_has(X86_FEATURE_CENTAUR_MCR)) &&
751 (pfn << PAGE_SHIFT) >= __pa(high_memory)) {
752 pcm = _PAGE_CACHE_MODE_UC;
753 }
754#endif
755
756 *vma_prot = __pgprot((pgprot_val(*vma_prot) & ~_PAGE_CACHE_MASK) | 781 *vma_prot = __pgprot((pgprot_val(*vma_prot) & ~_PAGE_CACHE_MASK) |
757 cachemode2protval(pcm)); 782 cachemode2protval(pcm));
758 return 1; 783 return 1;