diff options
author | Borislav Petkov <bp@suse.de> | 2015-06-04 12:55:11 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@kernel.org> | 2015-06-07 09:28:53 -0400 |
commit | 7202fdb1b3299ec78dc1e7702260947ec20dd9e9 (patch) | |
tree | cd66662e0d8e2875d603d994be0960d47de48306 | |
parent | 9cd25aac1f44f269de5ecea11f7d927f37f1d01c (diff) |
x86/mm/pat: Remove pat_enabled() checks
Now that we emulate a PAT table when PAT is disabled, there's no
need for those checks anymore as the PAT abstraction will handle
those cases too.
Based on a conglomerate patch from Toshi Kani.
Signed-off-by: Borislav Petkov <bp@suse.de>
Reviewed-by: Toshi Kani <toshi.kani@hp.com>
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: Andy Lutomirski <luto@amacapital.net>
Cc: Elliott@hp.com
Cc: H. Peter Anvin <hpa@zytor.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Luis R. Rodriguez <mcgrof@suse.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: arnd@arndb.de
Cc: hch@lst.de
Cc: hmh@hmh.eng.br
Cc: jgross@suse.com
Cc: konrad.wilk@oracle.com
Cc: linux-mm <linux-mm@kvack.org>
Cc: linux-nvdimm@lists.01.org
Cc: stefan.bader@canonical.com
Cc: yigal@plexistor.com
Link: http://lkml.kernel.org/r/1433436928-31903-4-git-send-email-bp@alien8.de
Signed-off-by: Ingo Molnar <mingo@kernel.org>
-rw-r--r-- | arch/x86/mm/iomap_32.c | 12 | ||||
-rw-r--r-- | arch/x86/mm/ioremap.c | 5 | ||||
-rw-r--r-- | arch/x86/mm/pageattr.c | 3 | ||||
-rw-r--r-- | arch/x86/mm/pat.c | 13 |
4 files changed, 10 insertions, 23 deletions
diff --git a/arch/x86/mm/iomap_32.c b/arch/x86/mm/iomap_32.c index 3a2ec8790ca7..a9dc7a37e6a2 100644 --- a/arch/x86/mm/iomap_32.c +++ b/arch/x86/mm/iomap_32.c | |||
@@ -77,13 +77,13 @@ void __iomem * | |||
77 | iomap_atomic_prot_pfn(unsigned long pfn, pgprot_t prot) | 77 | iomap_atomic_prot_pfn(unsigned long pfn, pgprot_t prot) |
78 | { | 78 | { |
79 | /* | 79 | /* |
80 | * For non-PAT systems, promote PAGE_KERNEL_WC to PAGE_KERNEL_UC_MINUS. | 80 | * For non-PAT systems, translate non-WB request to UC- just in |
81 | * PAGE_KERNEL_WC maps to PWT, which translates to uncached if the | 81 | * case the caller set the PWT bit to prot directly without using |
82 | * MTRR is UC or WC. UC_MINUS gets the real intention, of the | 82 | * pgprot_writecombine(). UC- translates to uncached if the MTRR |
83 | * user, which is "WC if the MTRR is WC, UC if you can't do that." | 83 | * is UC or WC. UC- gets the real intention, of the user, which is |
84 | * "WC if the MTRR is WC, UC if you can't do that." | ||
84 | */ | 85 | */ |
85 | if (!pat_enabled() && pgprot_val(prot) == | 86 | if (!pat_enabled() && pgprot2cachemode(prot) != _PAGE_CACHE_MODE_WB) |
86 | (__PAGE_KERNEL | cachemode2protval(_PAGE_CACHE_MODE_WC))) | ||
87 | prot = __pgprot(__PAGE_KERNEL | | 87 | prot = __pgprot(__PAGE_KERNEL | |
88 | cachemode2protval(_PAGE_CACHE_MODE_UC_MINUS)); | 88 | cachemode2protval(_PAGE_CACHE_MODE_UC_MINUS)); |
89 | 89 | ||
diff --git a/arch/x86/mm/ioremap.c b/arch/x86/mm/ioremap.c index b0da3588b452..cc0f17c5ad9f 100644 --- a/arch/x86/mm/ioremap.c +++ b/arch/x86/mm/ioremap.c | |||
@@ -292,11 +292,8 @@ EXPORT_SYMBOL_GPL(ioremap_uc); | |||
292 | */ | 292 | */ |
293 | void __iomem *ioremap_wc(resource_size_t phys_addr, unsigned long size) | 293 | void __iomem *ioremap_wc(resource_size_t phys_addr, unsigned long size) |
294 | { | 294 | { |
295 | if (pat_enabled()) | 295 | return __ioremap_caller(phys_addr, size, _PAGE_CACHE_MODE_WC, |
296 | return __ioremap_caller(phys_addr, size, _PAGE_CACHE_MODE_WC, | ||
297 | __builtin_return_address(0)); | 296 | __builtin_return_address(0)); |
298 | else | ||
299 | return ioremap_nocache(phys_addr, size); | ||
300 | } | 297 | } |
301 | EXPORT_SYMBOL(ioremap_wc); | 298 | EXPORT_SYMBOL(ioremap_wc); |
302 | 299 | ||
diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c index fae3c5366ac0..31b4f3fd1207 100644 --- a/arch/x86/mm/pageattr.c +++ b/arch/x86/mm/pageattr.c | |||
@@ -1572,9 +1572,6 @@ int set_memory_wc(unsigned long addr, int numpages) | |||
1572 | { | 1572 | { |
1573 | int ret; | 1573 | int ret; |
1574 | 1574 | ||
1575 | if (!pat_enabled()) | ||
1576 | return set_memory_uc(addr, numpages); | ||
1577 | |||
1578 | ret = reserve_memtype(__pa(addr), __pa(addr) + numpages * PAGE_SIZE, | 1575 | ret = reserve_memtype(__pa(addr), __pa(addr) + numpages * PAGE_SIZE, |
1579 | _PAGE_CACHE_MODE_WC, NULL); | 1576 | _PAGE_CACHE_MODE_WC, NULL); |
1580 | if (ret) | 1577 | if (ret) |
diff --git a/arch/x86/mm/pat.c b/arch/x86/mm/pat.c index 6dc7826e4797..f89e460c55a8 100644 --- a/arch/x86/mm/pat.c +++ b/arch/x86/mm/pat.c | |||
@@ -438,12 +438,8 @@ int reserve_memtype(u64 start, u64 end, enum page_cache_mode req_type, | |||
438 | 438 | ||
439 | if (!pat_enabled()) { | 439 | if (!pat_enabled()) { |
440 | /* This is identical to page table setting without PAT */ | 440 | /* This is identical to page table setting without PAT */ |
441 | if (new_type) { | 441 | if (new_type) |
442 | if (req_type == _PAGE_CACHE_MODE_WC) | 442 | *new_type = req_type; |
443 | *new_type = _PAGE_CACHE_MODE_UC_MINUS; | ||
444 | else | ||
445 | *new_type = req_type; | ||
446 | } | ||
447 | return 0; | 443 | return 0; |
448 | } | 444 | } |
449 | 445 | ||
@@ -947,11 +943,8 @@ void untrack_pfn(struct vm_area_struct *vma, unsigned long pfn, | |||
947 | 943 | ||
948 | pgprot_t pgprot_writecombine(pgprot_t prot) | 944 | pgprot_t pgprot_writecombine(pgprot_t prot) |
949 | { | 945 | { |
950 | if (pat_enabled()) | 946 | return __pgprot(pgprot_val(prot) | |
951 | return __pgprot(pgprot_val(prot) | | ||
952 | cachemode2protval(_PAGE_CACHE_MODE_WC)); | 947 | cachemode2protval(_PAGE_CACHE_MODE_WC)); |
953 | else | ||
954 | return pgprot_noncached(prot); | ||
955 | } | 948 | } |
956 | EXPORT_SYMBOL_GPL(pgprot_writecombine); | 949 | EXPORT_SYMBOL_GPL(pgprot_writecombine); |
957 | 950 | ||