aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86
diff options
context:
space:
mode:
Diffstat (limited to 'arch/x86')
-rw-r--r--arch/x86/mm/ioremap.c4
-rw-r--r--arch/x86/mm/pageattr.c2
-rw-r--r--arch/x86/mm/pat.c16
-rw-r--r--arch/x86/pci/i386.c4
4 files changed, 13 insertions, 13 deletions
diff --git a/arch/x86/mm/ioremap.c b/arch/x86/mm/ioremap.c
index 71bb3159031a..ddeafed1171e 100644
--- a/arch/x86/mm/ioremap.c
+++ b/arch/x86/mm/ioremap.c
@@ -261,7 +261,7 @@ void __iomem *ioremap_nocache(resource_size_t phys_addr, unsigned long size)
261{ 261{
262 /* 262 /*
263 * Ideally, this should be: 263 * Ideally, this should be:
264 * pat_wc_enabled ? _PAGE_CACHE_UC : _PAGE_CACHE_UC_MINUS; 264 * pat_enabled ? _PAGE_CACHE_UC : _PAGE_CACHE_UC_MINUS;
265 * 265 *
266 * Till we fix all X drivers to use ioremap_wc(), we will use 266 * Till we fix all X drivers to use ioremap_wc(), we will use
267 * UC MINUS. 267 * UC MINUS.
@@ -285,7 +285,7 @@ EXPORT_SYMBOL(ioremap_nocache);
285 */ 285 */
286void __iomem *ioremap_wc(unsigned long phys_addr, unsigned long size) 286void __iomem *ioremap_wc(unsigned long phys_addr, unsigned long size)
287{ 287{
288 if (pat_wc_enabled) 288 if (pat_enabled)
289 return __ioremap_caller(phys_addr, size, _PAGE_CACHE_WC, 289 return __ioremap_caller(phys_addr, size, _PAGE_CACHE_WC,
290 __builtin_return_address(0)); 290 __builtin_return_address(0));
291 else 291 else
diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c
index 60bcb5b6a37e..6916fe4bf0cb 100644
--- a/arch/x86/mm/pageattr.c
+++ b/arch/x86/mm/pageattr.c
@@ -805,7 +805,7 @@ int _set_memory_wc(unsigned long addr, int numpages)
805 805
806int set_memory_wc(unsigned long addr, int numpages) 806int set_memory_wc(unsigned long addr, int numpages)
807{ 807{
808 if (!pat_wc_enabled) 808 if (!pat_enabled)
809 return set_memory_uc(addr, numpages); 809 return set_memory_uc(addr, numpages);
810 810
811 if (reserve_memtype(addr, addr + numpages * PAGE_SIZE, 811 if (reserve_memtype(addr, addr + numpages * PAGE_SIZE,
diff --git a/arch/x86/mm/pat.c b/arch/x86/mm/pat.c
index a8b69bb26972..4beccea0897f 100644
--- a/arch/x86/mm/pat.c
+++ b/arch/x86/mm/pat.c
@@ -26,11 +26,11 @@
26#include <asm/io.h> 26#include <asm/io.h>
27 27
28#ifdef CONFIG_X86_PAT 28#ifdef CONFIG_X86_PAT
29int __read_mostly pat_wc_enabled = 1; 29int __read_mostly pat_enabled = 1;
30 30
31void __cpuinit pat_disable(char *reason) 31void __cpuinit pat_disable(char *reason)
32{ 32{
33 pat_wc_enabled = 0; 33 pat_enabled = 0;
34 printk(KERN_INFO "%s\n", reason); 34 printk(KERN_INFO "%s\n", reason);
35} 35}
36 36
@@ -72,7 +72,7 @@ void pat_init(void)
72{ 72{
73 u64 pat; 73 u64 pat;
74 74
75 if (!pat_wc_enabled) 75 if (!pat_enabled)
76 return; 76 return;
77 77
78 /* Paranoia check. */ 78 /* Paranoia check. */
@@ -225,8 +225,8 @@ int reserve_memtype(u64 start, u64 end, unsigned long req_type,
225 unsigned long actual_type; 225 unsigned long actual_type;
226 int err = 0; 226 int err = 0;
227 227
228 /* Only track when pat_wc_enabled */ 228 /* Only track when pat_enabled */
229 if (!pat_wc_enabled) { 229 if (!pat_enabled) {
230 /* This is identical to page table setting without PAT */ 230 /* This is identical to page table setting without PAT */
231 if (ret_type) { 231 if (ret_type) {
232 if (req_type == -1) { 232 if (req_type == -1) {
@@ -440,8 +440,8 @@ int free_memtype(u64 start, u64 end)
440 struct memtype *ml; 440 struct memtype *ml;
441 int err = -EINVAL; 441 int err = -EINVAL;
442 442
443 /* Only track when pat_wc_enabled */ 443 /* Only track when pat_enabled */
444 if (!pat_wc_enabled) { 444 if (!pat_enabled) {
445 return 0; 445 return 0;
446 } 446 }
447 447
@@ -535,7 +535,7 @@ int phys_mem_access_prot_allowed(struct file *file, unsigned long pfn,
535 * caching for the high addresses through the KEN pin, but 535 * caching for the high addresses through the KEN pin, but
536 * we maintain the tradition of paranoia in this code. 536 * we maintain the tradition of paranoia in this code.
537 */ 537 */
538 if (!pat_wc_enabled && 538 if (!pat_enabled &&
539 !(boot_cpu_has(X86_FEATURE_MTRR) || 539 !(boot_cpu_has(X86_FEATURE_MTRR) ||
540 boot_cpu_has(X86_FEATURE_K6_MTRR) || 540 boot_cpu_has(X86_FEATURE_K6_MTRR) ||
541 boot_cpu_has(X86_FEATURE_CYRIX_ARR) || 541 boot_cpu_has(X86_FEATURE_CYRIX_ARR) ||
diff --git a/arch/x86/pci/i386.c b/arch/x86/pci/i386.c
index 10fb308fded8..6ccd7a108cd4 100644
--- a/arch/x86/pci/i386.c
+++ b/arch/x86/pci/i386.c
@@ -299,9 +299,9 @@ int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma,
299 return -EINVAL; 299 return -EINVAL;
300 300
301 prot = pgprot_val(vma->vm_page_prot); 301 prot = pgprot_val(vma->vm_page_prot);
302 if (pat_wc_enabled && write_combine) 302 if (pat_enabled && write_combine)
303 prot |= _PAGE_CACHE_WC; 303 prot |= _PAGE_CACHE_WC;
304 else if (pat_wc_enabled || boot_cpu_data.x86 > 3) 304 else if (pat_enabled || boot_cpu_data.x86 > 3)
305 /* 305 /*
306 * ioremap() and ioremap_nocache() defaults to UC MINUS for now. 306 * ioremap() and ioremap_nocache() defaults to UC MINUS for now.
307 * To avoid attribute conflicts, request UC MINUS here 307 * To avoid attribute conflicts, request UC MINUS here