aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--arch/x86/include/asm/pat.h7
-rw-r--r--arch/x86/kernel/cpu/mtrr/main.c2
-rw-r--r--arch/x86/mm/iomap_32.c2
-rw-r--r--arch/x86/mm/ioremap.c4
-rw-r--r--arch/x86/mm/pageattr.c2
-rw-r--r--arch/x86/mm/pat.c33
-rw-r--r--arch/x86/pci/i386.c6
7 files changed, 24 insertions, 32 deletions
diff --git a/arch/x86/include/asm/pat.h b/arch/x86/include/asm/pat.h
index 91bc4ba95f91..cdcff7f7f694 100644
--- a/arch/x86/include/asm/pat.h
+++ b/arch/x86/include/asm/pat.h
@@ -4,12 +4,7 @@
4#include <linux/types.h> 4#include <linux/types.h>
5#include <asm/pgtable_types.h> 5#include <asm/pgtable_types.h>
6 6
7#ifdef CONFIG_X86_PAT 7bool pat_enabled(void);
8extern int pat_enabled;
9#else
10static const int pat_enabled;
11#endif
12
13extern void pat_init(void); 8extern void pat_init(void);
14void pat_init_cache_modes(void); 9void pat_init_cache_modes(void);
15 10
diff --git a/arch/x86/kernel/cpu/mtrr/main.c b/arch/x86/kernel/cpu/mtrr/main.c
index 383efb26e516..e7ed0d8ebacb 100644
--- a/arch/x86/kernel/cpu/mtrr/main.c
+++ b/arch/x86/kernel/cpu/mtrr/main.c
@@ -558,7 +558,7 @@ int arch_phys_wc_add(unsigned long base, unsigned long size)
558{ 558{
559 int ret; 559 int ret;
560 560
561 if (pat_enabled || !mtrr_enabled()) 561 if (pat_enabled() || !mtrr_enabled())
562 return 0; /* Success! (We don't need to do anything.) */ 562 return 0; /* Success! (We don't need to do anything.) */
563 563
564 ret = mtrr_add(base, size, MTRR_TYPE_WRCOMB, true); 564 ret = mtrr_add(base, size, MTRR_TYPE_WRCOMB, true);
diff --git a/arch/x86/mm/iomap_32.c b/arch/x86/mm/iomap_32.c
index 9ca35fc60cfe..3a2ec8790ca7 100644
--- a/arch/x86/mm/iomap_32.c
+++ b/arch/x86/mm/iomap_32.c
@@ -82,7 +82,7 @@ iomap_atomic_prot_pfn(unsigned long pfn, pgprot_t prot)
82 * MTRR is UC or WC. UC_MINUS gets the real intention, of the 82 * MTRR is UC or WC. UC_MINUS gets the real intention, of the
83 * user, which is "WC if the MTRR is WC, UC if you can't do that." 83 * user, which is "WC if the MTRR is WC, UC if you can't do that."
84 */ 84 */
85 if (!pat_enabled && pgprot_val(prot) == 85 if (!pat_enabled() && pgprot_val(prot) ==
86 (__PAGE_KERNEL | cachemode2protval(_PAGE_CACHE_MODE_WC))) 86 (__PAGE_KERNEL | cachemode2protval(_PAGE_CACHE_MODE_WC)))
87 prot = __pgprot(__PAGE_KERNEL | 87 prot = __pgprot(__PAGE_KERNEL |
88 cachemode2protval(_PAGE_CACHE_MODE_UC_MINUS)); 88 cachemode2protval(_PAGE_CACHE_MODE_UC_MINUS));
diff --git a/arch/x86/mm/ioremap.c b/arch/x86/mm/ioremap.c
index a493bb83aa89..82d63ed70045 100644
--- a/arch/x86/mm/ioremap.c
+++ b/arch/x86/mm/ioremap.c
@@ -234,7 +234,7 @@ void __iomem *ioremap_nocache(resource_size_t phys_addr, unsigned long size)
234{ 234{
235 /* 235 /*
236 * Ideally, this should be: 236 * Ideally, this should be:
237 * pat_enabled ? _PAGE_CACHE_MODE_UC : _PAGE_CACHE_MODE_UC_MINUS; 237 * pat_enabled() ? _PAGE_CACHE_MODE_UC : _PAGE_CACHE_MODE_UC_MINUS;
238 * 238 *
239 * Till we fix all X drivers to use ioremap_wc(), we will use 239 * Till we fix all X drivers to use ioremap_wc(), we will use
240 * UC MINUS. Drivers that are certain they need or can already 240 * UC MINUS. Drivers that are certain they need or can already
@@ -292,7 +292,7 @@ EXPORT_SYMBOL_GPL(ioremap_uc);
292 */ 292 */
293void __iomem *ioremap_wc(resource_size_t phys_addr, unsigned long size) 293void __iomem *ioremap_wc(resource_size_t phys_addr, unsigned long size)
294{ 294{
295 if (pat_enabled) 295 if (pat_enabled())
296 return __ioremap_caller(phys_addr, size, _PAGE_CACHE_MODE_WC, 296 return __ioremap_caller(phys_addr, size, _PAGE_CACHE_MODE_WC,
297 __builtin_return_address(0)); 297 __builtin_return_address(0));
298 else 298 else
diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c
index 397838eb292b..70d221fe2eb4 100644
--- a/arch/x86/mm/pageattr.c
+++ b/arch/x86/mm/pageattr.c
@@ -1571,7 +1571,7 @@ int set_memory_wc(unsigned long addr, int numpages)
1571{ 1571{
1572 int ret; 1572 int ret;
1573 1573
1574 if (!pat_enabled) 1574 if (!pat_enabled())
1575 return set_memory_uc(addr, numpages); 1575 return set_memory_uc(addr, numpages);
1576 1576
1577 ret = reserve_memtype(__pa(addr), __pa(addr) + numpages * PAGE_SIZE, 1577 ret = reserve_memtype(__pa(addr), __pa(addr) + numpages * PAGE_SIZE,
diff --git a/arch/x86/mm/pat.c b/arch/x86/mm/pat.c
index 8c50b9bfa996..484dce7f759b 100644
--- a/arch/x86/mm/pat.c
+++ b/arch/x86/mm/pat.c
@@ -36,12 +36,11 @@
36#undef pr_fmt 36#undef pr_fmt
37#define pr_fmt(fmt) "" fmt 37#define pr_fmt(fmt) "" fmt
38 38
39#ifdef CONFIG_X86_PAT 39static int __read_mostly __pat_enabled = IS_ENABLED(CONFIG_X86_PAT);
40int __read_mostly pat_enabled = 1;
41 40
42static inline void pat_disable(const char *reason) 41static inline void pat_disable(const char *reason)
43{ 42{
44 pat_enabled = 0; 43 __pat_enabled = 0;
45 pr_info("x86/PAT: %s\n", reason); 44 pr_info("x86/PAT: %s\n", reason);
46} 45}
47 46
@@ -51,13 +50,11 @@ static int __init nopat(char *str)
51 return 0; 50 return 0;
52} 51}
53early_param("nopat", nopat); 52early_param("nopat", nopat);
54#else 53
55static inline void pat_disable(const char *reason) 54bool pat_enabled(void)
56{ 55{
57 (void)reason; 56 return !!__pat_enabled;
58} 57}
59#endif
60
61 58
62int pat_debug_enable; 59int pat_debug_enable;
63 60
@@ -201,7 +198,7 @@ void pat_init(void)
201 u64 pat; 198 u64 pat;
202 bool boot_cpu = !boot_pat_state; 199 bool boot_cpu = !boot_pat_state;
203 200
204 if (!pat_enabled) 201 if (!pat_enabled())
205 return; 202 return;
206 203
207 if (!cpu_has_pat) { 204 if (!cpu_has_pat) {
@@ -402,7 +399,7 @@ int reserve_memtype(u64 start, u64 end, enum page_cache_mode req_type,
402 399
403 BUG_ON(start >= end); /* end is exclusive */ 400 BUG_ON(start >= end); /* end is exclusive */
404 401
405 if (!pat_enabled) { 402 if (!pat_enabled()) {
406 /* This is identical to page table setting without PAT */ 403 /* This is identical to page table setting without PAT */
407 if (new_type) { 404 if (new_type) {
408 if (req_type == _PAGE_CACHE_MODE_WC) 405 if (req_type == _PAGE_CACHE_MODE_WC)
@@ -477,7 +474,7 @@ int free_memtype(u64 start, u64 end)
477 int is_range_ram; 474 int is_range_ram;
478 struct memtype *entry; 475 struct memtype *entry;
479 476
480 if (!pat_enabled) 477 if (!pat_enabled())
481 return 0; 478 return 0;
482 479
483 /* Low ISA region is always mapped WB. No need to track */ 480 /* Low ISA region is always mapped WB. No need to track */
@@ -625,7 +622,7 @@ static inline int range_is_allowed(unsigned long pfn, unsigned long size)
625 u64 to = from + size; 622 u64 to = from + size;
626 u64 cursor = from; 623 u64 cursor = from;
627 624
628 if (!pat_enabled) 625 if (!pat_enabled())
629 return 1; 626 return 1;
630 627
631 while (cursor < to) { 628 while (cursor < to) {
@@ -661,7 +658,7 @@ int phys_mem_access_prot_allowed(struct file *file, unsigned long pfn,
661 * caching for the high addresses through the KEN pin, but 658 * caching for the high addresses through the KEN pin, but
662 * we maintain the tradition of paranoia in this code. 659 * we maintain the tradition of paranoia in this code.
663 */ 660 */
664 if (!pat_enabled && 661 if (!pat_enabled() &&
665 !(boot_cpu_has(X86_FEATURE_MTRR) || 662 !(boot_cpu_has(X86_FEATURE_MTRR) ||
666 boot_cpu_has(X86_FEATURE_K6_MTRR) || 663 boot_cpu_has(X86_FEATURE_K6_MTRR) ||
667 boot_cpu_has(X86_FEATURE_CYRIX_ARR) || 664 boot_cpu_has(X86_FEATURE_CYRIX_ARR) ||
@@ -730,7 +727,7 @@ static int reserve_pfn_range(u64 paddr, unsigned long size, pgprot_t *vma_prot,
730 * the type requested matches the type of first page in the range. 727 * the type requested matches the type of first page in the range.
731 */ 728 */
732 if (is_ram) { 729 if (is_ram) {
733 if (!pat_enabled) 730 if (!pat_enabled())
734 return 0; 731 return 0;
735 732
736 pcm = lookup_memtype(paddr); 733 pcm = lookup_memtype(paddr);
@@ -844,7 +841,7 @@ int track_pfn_remap(struct vm_area_struct *vma, pgprot_t *prot,
844 return ret; 841 return ret;
845 } 842 }
846 843
847 if (!pat_enabled) 844 if (!pat_enabled())
848 return 0; 845 return 0;
849 846
850 /* 847 /*
@@ -872,7 +869,7 @@ int track_pfn_insert(struct vm_area_struct *vma, pgprot_t *prot,
872{ 869{
873 enum page_cache_mode pcm; 870 enum page_cache_mode pcm;
874 871
875 if (!pat_enabled) 872 if (!pat_enabled())
876 return 0; 873 return 0;
877 874
878 /* Set prot based on lookup */ 875 /* Set prot based on lookup */
@@ -913,7 +910,7 @@ void untrack_pfn(struct vm_area_struct *vma, unsigned long pfn,
913 910
914pgprot_t pgprot_writecombine(pgprot_t prot) 911pgprot_t pgprot_writecombine(pgprot_t prot)
915{ 912{
916 if (pat_enabled) 913 if (pat_enabled())
917 return __pgprot(pgprot_val(prot) | 914 return __pgprot(pgprot_val(prot) |
918 cachemode2protval(_PAGE_CACHE_MODE_WC)); 915 cachemode2protval(_PAGE_CACHE_MODE_WC));
919 else 916 else
@@ -996,7 +993,7 @@ static const struct file_operations memtype_fops = {
996 993
997static int __init pat_memtype_list_init(void) 994static int __init pat_memtype_list_init(void)
998{ 995{
999 if (pat_enabled) { 996 if (pat_enabled()) {
1000 debugfs_create_file("pat_memtype_list", S_IRUSR, 997 debugfs_create_file("pat_memtype_list", S_IRUSR,
1001 arch_debugfs_dir, NULL, &memtype_fops); 998 arch_debugfs_dir, NULL, &memtype_fops);
1002 } 999 }
diff --git a/arch/x86/pci/i386.c b/arch/x86/pci/i386.c
index 349c0d32cc0b..0a9f2caf358f 100644
--- a/arch/x86/pci/i386.c
+++ b/arch/x86/pci/i386.c
@@ -429,12 +429,12 @@ int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma,
429 * Caller can followup with UC MINUS request and add a WC mtrr if there 429 * Caller can followup with UC MINUS request and add a WC mtrr if there
430 * is a free mtrr slot. 430 * is a free mtrr slot.
431 */ 431 */
432 if (!pat_enabled && write_combine) 432 if (!pat_enabled() && write_combine)
433 return -EINVAL; 433 return -EINVAL;
434 434
435 if (pat_enabled && write_combine) 435 if (pat_enabled() && write_combine)
436 prot |= cachemode2protval(_PAGE_CACHE_MODE_WC); 436 prot |= cachemode2protval(_PAGE_CACHE_MODE_WC);
437 else if (pat_enabled || boot_cpu_data.x86 > 3) 437 else if (pat_enabled() || boot_cpu_data.x86 > 3)
438 /* 438 /*
439 * ioremap() and ioremap_nocache() defaults to UC MINUS for now. 439 * ioremap() and ioremap_nocache() defaults to UC MINUS for now.
440 * To avoid attribute conflicts, request UC MINUS here 440 * To avoid attribute conflicts, request UC MINUS here