aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/mm
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2014-12-10 16:59:34 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2014-12-10 16:59:34 -0500
commita023748d53c10850650fe86b1c4a7d421d576451 (patch)
tree761d3f6d2a402ec0835c0ede44b7d55c1b15ec98 /arch/x86/mm
parent773fed910d41e443e495a6bfa9ab1c2b7b13e012 (diff)
parent0dbcae884779fdf7e2239a97ac7488877f0693d9 (diff)
Merge branch 'x86-mm-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull x86 mm tree changes from Ingo Molnar: "The biggest change is full PAT support from Jürgen Gross: The x86 architecture offers via the PAT (Page Attribute Table) a way to specify different caching modes in page table entries. The PAT MSR contains 8 entries each specifying one of 6 possible cache modes. A pte references one of those entries via 3 bits: _PAGE_PAT, _PAGE_PWT and _PAGE_PCD. The Linux kernel currently supports only 4 different cache modes. The PAT MSR is set up in a way that the setting of _PAGE_PAT in a pte doesn't matter: the top 4 entries in the PAT MSR are the same as the 4 lower entries. This results in the kernel not supporting e.g. write-through mode. Especially this cache mode would speed up drivers of video cards which now have to use uncached accesses. OTOH some old processors (Pentium) don't support PAT correctly and the Xen hypervisor has been using a different PAT MSR configuration for some time now and can't change that as this setting is part of the ABI. This patch set abstracts the cache mode from the pte and introduces tables to translate between cache mode and pte bits (the default cache mode "write back" is hard-wired to PAT entry 0). The tables are statically initialized with values being compatible to old processors and current usage. As soon as the PAT MSR is changed (or - in case of Xen - is read at boot time) the tables are changed accordingly. Requests of mappings with special cache modes are always possible now, in case they are not supported there will be a fallback to a compatible but slower mode. Summing it up, this patch set adds the following features: - capability to support WT and WP cache modes on processors with full PAT support - processors with no or uncorrect PAT support are still working as today, even if WT or WP cache mode are selected by drivers for some pages - reduction of Xen special handling regarding cache mode Another change is a boot speedup on ridiculously large RAM systems, plus other smaller fixes" * 'x86-mm-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (22 commits) x86: mm: Move PAT only functions to mm/pat.c xen: Support Xen pv-domains using PAT x86: Enable PAT to use cache mode translation tables x86: Respect PAT bit when copying pte values between large and normal pages x86: Support PAT bit in pagetable dump for lower levels x86: Clean up pgtable_types.h x86: Use new cache mode type in memtype related functions x86: Use new cache mode type in mm/ioremap.c x86: Use new cache mode type in setting page attributes x86: Remove looking for setting of _PAGE_PAT_LARGE in pageattr.c x86: Use new cache mode type in track_pfn_remap() and track_pfn_insert() x86: Use new cache mode type in mm/iomap_32.c x86: Use new cache mode type in asm/pgtable.h x86: Use new cache mode type in arch/x86/mm/init_64.c x86: Use new cache mode type in arch/x86/pci x86: Use new cache mode type in drivers/video/fbdev/vermilion x86: Use new cache mode type in drivers/video/fbdev/gbefb.c x86: Use new cache mode type in include/asm/fb.h x86: Make page cache mode a real type x86: mm: Use 2GB memory block size on large-memory x86-64 systems ...
Diffstat (limited to 'arch/x86/mm')
-rw-r--r--arch/x86/mm/dump_pagetables.c24
-rw-r--r--arch/x86/mm/init.c37
-rw-r--r--arch/x86/mm/init_64.c16
-rw-r--r--arch/x86/mm/iomap_32.c12
-rw-r--r--arch/x86/mm/ioremap.c63
-rw-r--r--arch/x86/mm/mm_internal.h2
-rw-r--r--arch/x86/mm/pageattr.c84
-rw-r--r--arch/x86/mm/pat.c245
-rw-r--r--arch/x86/mm/pat_internal.h22
-rw-r--r--arch/x86/mm/pat_rbtree.c8
10 files changed, 347 insertions, 166 deletions
diff --git a/arch/x86/mm/dump_pagetables.c b/arch/x86/mm/dump_pagetables.c
index 1a8053d1012e..f0cedf3395af 100644
--- a/arch/x86/mm/dump_pagetables.c
+++ b/arch/x86/mm/dump_pagetables.c
@@ -129,7 +129,7 @@ static void printk_prot(struct seq_file *m, pgprot_t prot, int level, bool dmsg)
129 129
130 if (!pgprot_val(prot)) { 130 if (!pgprot_val(prot)) {
131 /* Not present */ 131 /* Not present */
132 pt_dump_cont_printf(m, dmsg, " "); 132 pt_dump_cont_printf(m, dmsg, " ");
133 } else { 133 } else {
134 if (pr & _PAGE_USER) 134 if (pr & _PAGE_USER)
135 pt_dump_cont_printf(m, dmsg, "USR "); 135 pt_dump_cont_printf(m, dmsg, "USR ");
@@ -148,18 +148,16 @@ static void printk_prot(struct seq_file *m, pgprot_t prot, int level, bool dmsg)
148 else 148 else
149 pt_dump_cont_printf(m, dmsg, " "); 149 pt_dump_cont_printf(m, dmsg, " ");
150 150
151 /* Bit 9 has a different meaning on level 3 vs 4 */ 151 /* Bit 7 has a different meaning on level 3 vs 4 */
152 if (level <= 3) { 152 if (level <= 3 && pr & _PAGE_PSE)
153 if (pr & _PAGE_PSE) 153 pt_dump_cont_printf(m, dmsg, "PSE ");
154 pt_dump_cont_printf(m, dmsg, "PSE "); 154 else
155 else 155 pt_dump_cont_printf(m, dmsg, " ");
156 pt_dump_cont_printf(m, dmsg, " "); 156 if ((level == 4 && pr & _PAGE_PAT) ||
157 } else { 157 ((level == 3 || level == 2) && pr & _PAGE_PAT_LARGE))
158 if (pr & _PAGE_PAT) 158 pt_dump_cont_printf(m, dmsg, "pat ");
159 pt_dump_cont_printf(m, dmsg, "pat "); 159 else
160 else 160 pt_dump_cont_printf(m, dmsg, " ");
161 pt_dump_cont_printf(m, dmsg, " ");
162 }
163 if (pr & _PAGE_GLOBAL) 161 if (pr & _PAGE_GLOBAL)
164 pt_dump_cont_printf(m, dmsg, "GLB "); 162 pt_dump_cont_printf(m, dmsg, "GLB ");
165 else 163 else
diff --git a/arch/x86/mm/init.c b/arch/x86/mm/init.c
index 66dba36f2343..82b41d56bb98 100644
--- a/arch/x86/mm/init.c
+++ b/arch/x86/mm/init.c
@@ -27,6 +27,35 @@
27 27
28#include "mm_internal.h" 28#include "mm_internal.h"
29 29
30/*
31 * Tables translating between page_cache_type_t and pte encoding.
32 * Minimal supported modes are defined statically, modified if more supported
33 * cache modes are available.
34 * Index into __cachemode2pte_tbl is the cachemode.
35 * Index into __pte2cachemode_tbl are the caching attribute bits of the pte
36 * (_PAGE_PWT, _PAGE_PCD, _PAGE_PAT) at index bit positions 0, 1, 2.
37 */
38uint16_t __cachemode2pte_tbl[_PAGE_CACHE_MODE_NUM] = {
39 [_PAGE_CACHE_MODE_WB] = 0,
40 [_PAGE_CACHE_MODE_WC] = _PAGE_PWT,
41 [_PAGE_CACHE_MODE_UC_MINUS] = _PAGE_PCD,
42 [_PAGE_CACHE_MODE_UC] = _PAGE_PCD | _PAGE_PWT,
43 [_PAGE_CACHE_MODE_WT] = _PAGE_PCD,
44 [_PAGE_CACHE_MODE_WP] = _PAGE_PCD,
45};
46EXPORT_SYMBOL_GPL(__cachemode2pte_tbl);
47uint8_t __pte2cachemode_tbl[8] = {
48 [__pte2cm_idx(0)] = _PAGE_CACHE_MODE_WB,
49 [__pte2cm_idx(_PAGE_PWT)] = _PAGE_CACHE_MODE_WC,
50 [__pte2cm_idx(_PAGE_PCD)] = _PAGE_CACHE_MODE_UC_MINUS,
51 [__pte2cm_idx(_PAGE_PWT | _PAGE_PCD)] = _PAGE_CACHE_MODE_UC,
52 [__pte2cm_idx(_PAGE_PAT)] = _PAGE_CACHE_MODE_WB,
53 [__pte2cm_idx(_PAGE_PWT | _PAGE_PAT)] = _PAGE_CACHE_MODE_WC,
54 [__pte2cm_idx(_PAGE_PCD | _PAGE_PAT)] = _PAGE_CACHE_MODE_UC_MINUS,
55 [__pte2cm_idx(_PAGE_PWT | _PAGE_PCD | _PAGE_PAT)] = _PAGE_CACHE_MODE_UC,
56};
57EXPORT_SYMBOL_GPL(__pte2cachemode_tbl);
58
30static unsigned long __initdata pgt_buf_start; 59static unsigned long __initdata pgt_buf_start;
31static unsigned long __initdata pgt_buf_end; 60static unsigned long __initdata pgt_buf_end;
32static unsigned long __initdata pgt_buf_top; 61static unsigned long __initdata pgt_buf_top;
@@ -687,3 +716,11 @@ void __init zone_sizes_init(void)
687 free_area_init_nodes(max_zone_pfns); 716 free_area_init_nodes(max_zone_pfns);
688} 717}
689 718
719void update_cache_mode_entry(unsigned entry, enum page_cache_mode cache)
720{
721 /* entry 0 MUST be WB (hardwired to speed up translations) */
722 BUG_ON(!entry && cache != _PAGE_CACHE_MODE_WB);
723
724 __cachemode2pte_tbl[cache] = __cm_idx2pte(entry);
725 __pte2cachemode_tbl[entry] = cache;
726}
diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c
index 4e5dfec750fc..78e53c80fc12 100644
--- a/arch/x86/mm/init_64.c
+++ b/arch/x86/mm/init_64.c
@@ -52,7 +52,6 @@
52#include <asm/numa.h> 52#include <asm/numa.h>
53#include <asm/cacheflush.h> 53#include <asm/cacheflush.h>
54#include <asm/init.h> 54#include <asm/init.h>
55#include <asm/uv/uv.h>
56#include <asm/setup.h> 55#include <asm/setup.h>
57 56
58#include "mm_internal.h" 57#include "mm_internal.h"
@@ -338,12 +337,15 @@ pte_t * __init populate_extra_pte(unsigned long vaddr)
338 * Create large page table mappings for a range of physical addresses. 337 * Create large page table mappings for a range of physical addresses.
339 */ 338 */
340static void __init __init_extra_mapping(unsigned long phys, unsigned long size, 339static void __init __init_extra_mapping(unsigned long phys, unsigned long size,
341 pgprot_t prot) 340 enum page_cache_mode cache)
342{ 341{
343 pgd_t *pgd; 342 pgd_t *pgd;
344 pud_t *pud; 343 pud_t *pud;
345 pmd_t *pmd; 344 pmd_t *pmd;
345 pgprot_t prot;
346 346
347 pgprot_val(prot) = pgprot_val(PAGE_KERNEL_LARGE) |
348 pgprot_val(pgprot_4k_2_large(cachemode2pgprot(cache)));
347 BUG_ON((phys & ~PMD_MASK) || (size & ~PMD_MASK)); 349 BUG_ON((phys & ~PMD_MASK) || (size & ~PMD_MASK));
348 for (; size; phys += PMD_SIZE, size -= PMD_SIZE) { 350 for (; size; phys += PMD_SIZE, size -= PMD_SIZE) {
349 pgd = pgd_offset_k((unsigned long)__va(phys)); 351 pgd = pgd_offset_k((unsigned long)__va(phys));
@@ -366,12 +368,12 @@ static void __init __init_extra_mapping(unsigned long phys, unsigned long size,
366 368
367void __init init_extra_mapping_wb(unsigned long phys, unsigned long size) 369void __init init_extra_mapping_wb(unsigned long phys, unsigned long size)
368{ 370{
369 __init_extra_mapping(phys, size, PAGE_KERNEL_LARGE); 371 __init_extra_mapping(phys, size, _PAGE_CACHE_MODE_WB);
370} 372}
371 373
372void __init init_extra_mapping_uc(unsigned long phys, unsigned long size) 374void __init init_extra_mapping_uc(unsigned long phys, unsigned long size)
373{ 375{
374 __init_extra_mapping(phys, size, PAGE_KERNEL_LARGE_NOCACHE); 376 __init_extra_mapping(phys, size, _PAGE_CACHE_MODE_UC);
375} 377}
376 378
377/* 379/*
@@ -1256,12 +1258,10 @@ static unsigned long probe_memory_block_size(void)
1256 /* start from 2g */ 1258 /* start from 2g */
1257 unsigned long bz = 1UL<<31; 1259 unsigned long bz = 1UL<<31;
1258 1260
1259#ifdef CONFIG_X86_UV 1261 if (totalram_pages >= (64ULL << (30 - PAGE_SHIFT))) {
1260 if (is_uv_system()) { 1262 pr_info("Using 2GB memory block size for large-memory system\n");
1261 printk(KERN_INFO "UV: memory block size 2GB\n");
1262 return 2UL * 1024 * 1024 * 1024; 1263 return 2UL * 1024 * 1024 * 1024;
1263 } 1264 }
1264#endif
1265 1265
1266 /* less than 64g installed */ 1266 /* less than 64g installed */
1267 if ((max_pfn << PAGE_SHIFT) < (16UL << 32)) 1267 if ((max_pfn << PAGE_SHIFT) < (16UL << 32))
diff --git a/arch/x86/mm/iomap_32.c b/arch/x86/mm/iomap_32.c
index 7b179b499fa3..9ca35fc60cfe 100644
--- a/arch/x86/mm/iomap_32.c
+++ b/arch/x86/mm/iomap_32.c
@@ -33,17 +33,17 @@ static int is_io_mapping_possible(resource_size_t base, unsigned long size)
33 33
34int iomap_create_wc(resource_size_t base, unsigned long size, pgprot_t *prot) 34int iomap_create_wc(resource_size_t base, unsigned long size, pgprot_t *prot)
35{ 35{
36 unsigned long flag = _PAGE_CACHE_WC; 36 enum page_cache_mode pcm = _PAGE_CACHE_MODE_WC;
37 int ret; 37 int ret;
38 38
39 if (!is_io_mapping_possible(base, size)) 39 if (!is_io_mapping_possible(base, size))
40 return -EINVAL; 40 return -EINVAL;
41 41
42 ret = io_reserve_memtype(base, base + size, &flag); 42 ret = io_reserve_memtype(base, base + size, &pcm);
43 if (ret) 43 if (ret)
44 return ret; 44 return ret;
45 45
46 *prot = __pgprot(__PAGE_KERNEL | flag); 46 *prot = __pgprot(__PAGE_KERNEL | cachemode2protval(pcm));
47 return 0; 47 return 0;
48} 48}
49EXPORT_SYMBOL_GPL(iomap_create_wc); 49EXPORT_SYMBOL_GPL(iomap_create_wc);
@@ -82,8 +82,10 @@ iomap_atomic_prot_pfn(unsigned long pfn, pgprot_t prot)
82 * MTRR is UC or WC. UC_MINUS gets the real intention, of the 82 * MTRR is UC or WC. UC_MINUS gets the real intention, of the
83 * user, which is "WC if the MTRR is WC, UC if you can't do that." 83 * user, which is "WC if the MTRR is WC, UC if you can't do that."
84 */ 84 */
85 if (!pat_enabled && pgprot_val(prot) == pgprot_val(PAGE_KERNEL_WC)) 85 if (!pat_enabled && pgprot_val(prot) ==
86 prot = PAGE_KERNEL_UC_MINUS; 86 (__PAGE_KERNEL | cachemode2protval(_PAGE_CACHE_MODE_WC)))
87 prot = __pgprot(__PAGE_KERNEL |
88 cachemode2protval(_PAGE_CACHE_MODE_UC_MINUS));
87 89
88 return (void __force __iomem *) kmap_atomic_prot_pfn(pfn, prot); 90 return (void __force __iomem *) kmap_atomic_prot_pfn(pfn, prot);
89} 91}
diff --git a/arch/x86/mm/ioremap.c b/arch/x86/mm/ioremap.c
index b12f43c192cf..fdf617c00e2f 100644
--- a/arch/x86/mm/ioremap.c
+++ b/arch/x86/mm/ioremap.c
@@ -29,20 +29,20 @@
29 * conflicts. 29 * conflicts.
30 */ 30 */
31int ioremap_change_attr(unsigned long vaddr, unsigned long size, 31int ioremap_change_attr(unsigned long vaddr, unsigned long size,
32 unsigned long prot_val) 32 enum page_cache_mode pcm)
33{ 33{
34 unsigned long nrpages = size >> PAGE_SHIFT; 34 unsigned long nrpages = size >> PAGE_SHIFT;
35 int err; 35 int err;
36 36
37 switch (prot_val) { 37 switch (pcm) {
38 case _PAGE_CACHE_UC: 38 case _PAGE_CACHE_MODE_UC:
39 default: 39 default:
40 err = _set_memory_uc(vaddr, nrpages); 40 err = _set_memory_uc(vaddr, nrpages);
41 break; 41 break;
42 case _PAGE_CACHE_WC: 42 case _PAGE_CACHE_MODE_WC:
43 err = _set_memory_wc(vaddr, nrpages); 43 err = _set_memory_wc(vaddr, nrpages);
44 break; 44 break;
45 case _PAGE_CACHE_WB: 45 case _PAGE_CACHE_MODE_WB:
46 err = _set_memory_wb(vaddr, nrpages); 46 err = _set_memory_wb(vaddr, nrpages);
47 break; 47 break;
48 } 48 }
@@ -75,14 +75,14 @@ static int __ioremap_check_ram(unsigned long start_pfn, unsigned long nr_pages,
75 * caller shouldn't need to know that small detail. 75 * caller shouldn't need to know that small detail.
76 */ 76 */
77static void __iomem *__ioremap_caller(resource_size_t phys_addr, 77static void __iomem *__ioremap_caller(resource_size_t phys_addr,
78 unsigned long size, unsigned long prot_val, void *caller) 78 unsigned long size, enum page_cache_mode pcm, void *caller)
79{ 79{
80 unsigned long offset, vaddr; 80 unsigned long offset, vaddr;
81 resource_size_t pfn, last_pfn, last_addr; 81 resource_size_t pfn, last_pfn, last_addr;
82 const resource_size_t unaligned_phys_addr = phys_addr; 82 const resource_size_t unaligned_phys_addr = phys_addr;
83 const unsigned long unaligned_size = size; 83 const unsigned long unaligned_size = size;
84 struct vm_struct *area; 84 struct vm_struct *area;
85 unsigned long new_prot_val; 85 enum page_cache_mode new_pcm;
86 pgprot_t prot; 86 pgprot_t prot;
87 int retval; 87 int retval;
88 void __iomem *ret_addr; 88 void __iomem *ret_addr;
@@ -134,38 +134,40 @@ static void __iomem *__ioremap_caller(resource_size_t phys_addr,
134 size = PAGE_ALIGN(last_addr+1) - phys_addr; 134 size = PAGE_ALIGN(last_addr+1) - phys_addr;
135 135
136 retval = reserve_memtype(phys_addr, (u64)phys_addr + size, 136 retval = reserve_memtype(phys_addr, (u64)phys_addr + size,
137 prot_val, &new_prot_val); 137 pcm, &new_pcm);
138 if (retval) { 138 if (retval) {
139 printk(KERN_ERR "ioremap reserve_memtype failed %d\n", retval); 139 printk(KERN_ERR "ioremap reserve_memtype failed %d\n", retval);
140 return NULL; 140 return NULL;
141 } 141 }
142 142
143 if (prot_val != new_prot_val) { 143 if (pcm != new_pcm) {
144 if (!is_new_memtype_allowed(phys_addr, size, 144 if (!is_new_memtype_allowed(phys_addr, size, pcm, new_pcm)) {
145 prot_val, new_prot_val)) {
146 printk(KERN_ERR 145 printk(KERN_ERR
147 "ioremap error for 0x%llx-0x%llx, requested 0x%lx, got 0x%lx\n", 146 "ioremap error for 0x%llx-0x%llx, requested 0x%x, got 0x%x\n",
148 (unsigned long long)phys_addr, 147 (unsigned long long)phys_addr,
149 (unsigned long long)(phys_addr + size), 148 (unsigned long long)(phys_addr + size),
150 prot_val, new_prot_val); 149 pcm, new_pcm);
151 goto err_free_memtype; 150 goto err_free_memtype;
152 } 151 }
153 prot_val = new_prot_val; 152 pcm = new_pcm;
154 } 153 }
155 154
156 switch (prot_val) { 155 prot = PAGE_KERNEL_IO;
157 case _PAGE_CACHE_UC: 156 switch (pcm) {
157 case _PAGE_CACHE_MODE_UC:
158 default: 158 default:
159 prot = PAGE_KERNEL_IO_NOCACHE; 159 prot = __pgprot(pgprot_val(prot) |
160 cachemode2protval(_PAGE_CACHE_MODE_UC));
160 break; 161 break;
161 case _PAGE_CACHE_UC_MINUS: 162 case _PAGE_CACHE_MODE_UC_MINUS:
162 prot = PAGE_KERNEL_IO_UC_MINUS; 163 prot = __pgprot(pgprot_val(prot) |
164 cachemode2protval(_PAGE_CACHE_MODE_UC_MINUS));
163 break; 165 break;
164 case _PAGE_CACHE_WC: 166 case _PAGE_CACHE_MODE_WC:
165 prot = PAGE_KERNEL_IO_WC; 167 prot = __pgprot(pgprot_val(prot) |
168 cachemode2protval(_PAGE_CACHE_MODE_WC));
166 break; 169 break;
167 case _PAGE_CACHE_WB: 170 case _PAGE_CACHE_MODE_WB:
168 prot = PAGE_KERNEL_IO;
169 break; 171 break;
170 } 172 }
171 173
@@ -178,7 +180,7 @@ static void __iomem *__ioremap_caller(resource_size_t phys_addr,
178 area->phys_addr = phys_addr; 180 area->phys_addr = phys_addr;
179 vaddr = (unsigned long) area->addr; 181 vaddr = (unsigned long) area->addr;
180 182
181 if (kernel_map_sync_memtype(phys_addr, size, prot_val)) 183 if (kernel_map_sync_memtype(phys_addr, size, pcm))
182 goto err_free_area; 184 goto err_free_area;
183 185
184 if (ioremap_page_range(vaddr, vaddr + size, phys_addr, prot)) 186 if (ioremap_page_range(vaddr, vaddr + size, phys_addr, prot))
@@ -227,14 +229,14 @@ void __iomem *ioremap_nocache(resource_size_t phys_addr, unsigned long size)
227{ 229{
228 /* 230 /*
229 * Ideally, this should be: 231 * Ideally, this should be:
230 * pat_enabled ? _PAGE_CACHE_UC : _PAGE_CACHE_UC_MINUS; 232 * pat_enabled ? _PAGE_CACHE_MODE_UC : _PAGE_CACHE_MODE_UC_MINUS;
231 * 233 *
232 * Till we fix all X drivers to use ioremap_wc(), we will use 234 * Till we fix all X drivers to use ioremap_wc(), we will use
233 * UC MINUS. 235 * UC MINUS.
234 */ 236 */
235 unsigned long val = _PAGE_CACHE_UC_MINUS; 237 enum page_cache_mode pcm = _PAGE_CACHE_MODE_UC_MINUS;
236 238
237 return __ioremap_caller(phys_addr, size, val, 239 return __ioremap_caller(phys_addr, size, pcm,
238 __builtin_return_address(0)); 240 __builtin_return_address(0));
239} 241}
240EXPORT_SYMBOL(ioremap_nocache); 242EXPORT_SYMBOL(ioremap_nocache);
@@ -252,7 +254,7 @@ EXPORT_SYMBOL(ioremap_nocache);
252void __iomem *ioremap_wc(resource_size_t phys_addr, unsigned long size) 254void __iomem *ioremap_wc(resource_size_t phys_addr, unsigned long size)
253{ 255{
254 if (pat_enabled) 256 if (pat_enabled)
255 return __ioremap_caller(phys_addr, size, _PAGE_CACHE_WC, 257 return __ioremap_caller(phys_addr, size, _PAGE_CACHE_MODE_WC,
256 __builtin_return_address(0)); 258 __builtin_return_address(0));
257 else 259 else
258 return ioremap_nocache(phys_addr, size); 260 return ioremap_nocache(phys_addr, size);
@@ -261,7 +263,7 @@ EXPORT_SYMBOL(ioremap_wc);
261 263
262void __iomem *ioremap_cache(resource_size_t phys_addr, unsigned long size) 264void __iomem *ioremap_cache(resource_size_t phys_addr, unsigned long size)
263{ 265{
264 return __ioremap_caller(phys_addr, size, _PAGE_CACHE_WB, 266 return __ioremap_caller(phys_addr, size, _PAGE_CACHE_MODE_WB,
265 __builtin_return_address(0)); 267 __builtin_return_address(0));
266} 268}
267EXPORT_SYMBOL(ioremap_cache); 269EXPORT_SYMBOL(ioremap_cache);
@@ -269,7 +271,8 @@ EXPORT_SYMBOL(ioremap_cache);
269void __iomem *ioremap_prot(resource_size_t phys_addr, unsigned long size, 271void __iomem *ioremap_prot(resource_size_t phys_addr, unsigned long size,
270 unsigned long prot_val) 272 unsigned long prot_val)
271{ 273{
272 return __ioremap_caller(phys_addr, size, (prot_val & _PAGE_CACHE_MASK), 274 return __ioremap_caller(phys_addr, size,
275 pgprot2cachemode(__pgprot(prot_val)),
273 __builtin_return_address(0)); 276 __builtin_return_address(0));
274} 277}
275EXPORT_SYMBOL(ioremap_prot); 278EXPORT_SYMBOL(ioremap_prot);
diff --git a/arch/x86/mm/mm_internal.h b/arch/x86/mm/mm_internal.h
index 6b563a118891..62474ba66c8e 100644
--- a/arch/x86/mm/mm_internal.h
+++ b/arch/x86/mm/mm_internal.h
@@ -16,4 +16,6 @@ void zone_sizes_init(void);
16 16
17extern int after_bootmem; 17extern int after_bootmem;
18 18
19void update_cache_mode_entry(unsigned entry, enum page_cache_mode cache);
20
19#endif /* __X86_MM_INTERNAL_H */ 21#endif /* __X86_MM_INTERNAL_H */
diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c
index 36de293caf25..a3a5d46605d2 100644
--- a/arch/x86/mm/pageattr.c
+++ b/arch/x86/mm/pageattr.c
@@ -485,14 +485,23 @@ try_preserve_large_page(pte_t *kpte, unsigned long address,
485 485
486 /* 486 /*
487 * We are safe now. Check whether the new pgprot is the same: 487 * We are safe now. Check whether the new pgprot is the same:
488 * Convert protection attributes to 4k-format, as cpa->mask* are set
489 * up accordingly.
488 */ 490 */
489 old_pte = *kpte; 491 old_pte = *kpte;
490 old_prot = req_prot = pte_pgprot(old_pte); 492 old_prot = req_prot = pgprot_large_2_4k(pte_pgprot(old_pte));
491 493
492 pgprot_val(req_prot) &= ~pgprot_val(cpa->mask_clr); 494 pgprot_val(req_prot) &= ~pgprot_val(cpa->mask_clr);
493 pgprot_val(req_prot) |= pgprot_val(cpa->mask_set); 495 pgprot_val(req_prot) |= pgprot_val(cpa->mask_set);
494 496
495 /* 497 /*
498 * req_prot is in format of 4k pages. It must be converted to large
499 * page format: the caching mode includes the PAT bit located at
500 * different bit positions in the two formats.
501 */
502 req_prot = pgprot_4k_2_large(req_prot);
503
504 /*
496 * Set the PSE and GLOBAL flags only if the PRESENT flag is 505 * Set the PSE and GLOBAL flags only if the PRESENT flag is
497 * set otherwise pmd_present/pmd_huge will return true even on 506 * set otherwise pmd_present/pmd_huge will return true even on
498 * a non present pmd. The canon_pgprot will clear _PAGE_GLOBAL 507 * a non present pmd. The canon_pgprot will clear _PAGE_GLOBAL
@@ -585,13 +594,10 @@ __split_large_page(struct cpa_data *cpa, pte_t *kpte, unsigned long address,
585 594
586 paravirt_alloc_pte(&init_mm, page_to_pfn(base)); 595 paravirt_alloc_pte(&init_mm, page_to_pfn(base));
587 ref_prot = pte_pgprot(pte_clrhuge(*kpte)); 596 ref_prot = pte_pgprot(pte_clrhuge(*kpte));
588 /* 597
589 * If we ever want to utilize the PAT bit, we need to 598 /* promote PAT bit to correct position */
590 * update this function to make sure it's converted from 599 if (level == PG_LEVEL_2M)
591 * bit 12 to bit 7 when we cross from the 2MB level to 600 ref_prot = pgprot_large_2_4k(ref_prot);
592 * the 4K level:
593 */
594 WARN_ON_ONCE(pgprot_val(ref_prot) & _PAGE_PAT_LARGE);
595 601
596#ifdef CONFIG_X86_64 602#ifdef CONFIG_X86_64
597 if (level == PG_LEVEL_1G) { 603 if (level == PG_LEVEL_1G) {
@@ -879,6 +885,7 @@ static int populate_pmd(struct cpa_data *cpa,
879{ 885{
880 unsigned int cur_pages = 0; 886 unsigned int cur_pages = 0;
881 pmd_t *pmd; 887 pmd_t *pmd;
888 pgprot_t pmd_pgprot;
882 889
883 /* 890 /*
884 * Not on a 2M boundary? 891 * Not on a 2M boundary?
@@ -910,6 +917,8 @@ static int populate_pmd(struct cpa_data *cpa,
910 if (num_pages == cur_pages) 917 if (num_pages == cur_pages)
911 return cur_pages; 918 return cur_pages;
912 919
920 pmd_pgprot = pgprot_4k_2_large(pgprot);
921
913 while (end - start >= PMD_SIZE) { 922 while (end - start >= PMD_SIZE) {
914 923
915 /* 924 /*
@@ -921,7 +930,8 @@ static int populate_pmd(struct cpa_data *cpa,
921 930
922 pmd = pmd_offset(pud, start); 931 pmd = pmd_offset(pud, start);
923 932
924 set_pmd(pmd, __pmd(cpa->pfn | _PAGE_PSE | massage_pgprot(pgprot))); 933 set_pmd(pmd, __pmd(cpa->pfn | _PAGE_PSE |
934 massage_pgprot(pmd_pgprot)));
925 935
926 start += PMD_SIZE; 936 start += PMD_SIZE;
927 cpa->pfn += PMD_SIZE; 937 cpa->pfn += PMD_SIZE;
@@ -949,6 +959,7 @@ static int populate_pud(struct cpa_data *cpa, unsigned long start, pgd_t *pgd,
949 pud_t *pud; 959 pud_t *pud;
950 unsigned long end; 960 unsigned long end;
951 int cur_pages = 0; 961 int cur_pages = 0;
962 pgprot_t pud_pgprot;
952 963
953 end = start + (cpa->numpages << PAGE_SHIFT); 964 end = start + (cpa->numpages << PAGE_SHIFT);
954 965
@@ -986,12 +997,14 @@ static int populate_pud(struct cpa_data *cpa, unsigned long start, pgd_t *pgd,
986 return cur_pages; 997 return cur_pages;
987 998
988 pud = pud_offset(pgd, start); 999 pud = pud_offset(pgd, start);
1000 pud_pgprot = pgprot_4k_2_large(pgprot);
989 1001
990 /* 1002 /*
991 * Map everything starting from the Gb boundary, possibly with 1G pages 1003 * Map everything starting from the Gb boundary, possibly with 1G pages
992 */ 1004 */
993 while (end - start >= PUD_SIZE) { 1005 while (end - start >= PUD_SIZE) {
994 set_pud(pud, __pud(cpa->pfn | _PAGE_PSE | massage_pgprot(pgprot))); 1006 set_pud(pud, __pud(cpa->pfn | _PAGE_PSE |
1007 massage_pgprot(pud_pgprot)));
995 1008
996 start += PUD_SIZE; 1009 start += PUD_SIZE;
997 cpa->pfn += PUD_SIZE; 1010 cpa->pfn += PUD_SIZE;
@@ -1304,12 +1317,6 @@ static int __change_page_attr_set_clr(struct cpa_data *cpa, int checkalias)
1304 return 0; 1317 return 0;
1305} 1318}
1306 1319
1307static inline int cache_attr(pgprot_t attr)
1308{
1309 return pgprot_val(attr) &
1310 (_PAGE_PAT | _PAGE_PAT_LARGE | _PAGE_PWT | _PAGE_PCD);
1311}
1312
1313static int change_page_attr_set_clr(unsigned long *addr, int numpages, 1320static int change_page_attr_set_clr(unsigned long *addr, int numpages,
1314 pgprot_t mask_set, pgprot_t mask_clr, 1321 pgprot_t mask_set, pgprot_t mask_clr,
1315 int force_split, int in_flag, 1322 int force_split, int in_flag,
@@ -1390,7 +1397,7 @@ static int change_page_attr_set_clr(unsigned long *addr, int numpages,
1390 * No need to flush, when we did not set any of the caching 1397 * No need to flush, when we did not set any of the caching
1391 * attributes: 1398 * attributes:
1392 */ 1399 */
1393 cache = cache_attr(mask_set); 1400 cache = !!pgprot2cachemode(mask_set);
1394 1401
1395 /* 1402 /*
1396 * On success we use CLFLUSH, when the CPU supports it to 1403 * On success we use CLFLUSH, when the CPU supports it to
@@ -1445,7 +1452,8 @@ int _set_memory_uc(unsigned long addr, int numpages)
1445 * for now UC MINUS. see comments in ioremap_nocache() 1452 * for now UC MINUS. see comments in ioremap_nocache()
1446 */ 1453 */
1447 return change_page_attr_set(&addr, numpages, 1454 return change_page_attr_set(&addr, numpages,
1448 __pgprot(_PAGE_CACHE_UC_MINUS), 0); 1455 cachemode2pgprot(_PAGE_CACHE_MODE_UC_MINUS),
1456 0);
1449} 1457}
1450 1458
1451int set_memory_uc(unsigned long addr, int numpages) 1459int set_memory_uc(unsigned long addr, int numpages)
@@ -1456,7 +1464,7 @@ int set_memory_uc(unsigned long addr, int numpages)
1456 * for now UC MINUS. see comments in ioremap_nocache() 1464 * for now UC MINUS. see comments in ioremap_nocache()
1457 */ 1465 */
1458 ret = reserve_memtype(__pa(addr), __pa(addr) + numpages * PAGE_SIZE, 1466 ret = reserve_memtype(__pa(addr), __pa(addr) + numpages * PAGE_SIZE,
1459 _PAGE_CACHE_UC_MINUS, NULL); 1467 _PAGE_CACHE_MODE_UC_MINUS, NULL);
1460 if (ret) 1468 if (ret)
1461 goto out_err; 1469 goto out_err;
1462 1470
@@ -1474,7 +1482,7 @@ out_err:
1474EXPORT_SYMBOL(set_memory_uc); 1482EXPORT_SYMBOL(set_memory_uc);
1475 1483
1476static int _set_memory_array(unsigned long *addr, int addrinarray, 1484static int _set_memory_array(unsigned long *addr, int addrinarray,
1477 unsigned long new_type) 1485 enum page_cache_mode new_type)
1478{ 1486{
1479 int i, j; 1487 int i, j;
1480 int ret; 1488 int ret;
@@ -1490,11 +1498,13 @@ static int _set_memory_array(unsigned long *addr, int addrinarray,
1490 } 1498 }
1491 1499
1492 ret = change_page_attr_set(addr, addrinarray, 1500 ret = change_page_attr_set(addr, addrinarray,
1493 __pgprot(_PAGE_CACHE_UC_MINUS), 1); 1501 cachemode2pgprot(_PAGE_CACHE_MODE_UC_MINUS),
1502 1);
1494 1503
1495 if (!ret && new_type == _PAGE_CACHE_WC) 1504 if (!ret && new_type == _PAGE_CACHE_MODE_WC)
1496 ret = change_page_attr_set_clr(addr, addrinarray, 1505 ret = change_page_attr_set_clr(addr, addrinarray,
1497 __pgprot(_PAGE_CACHE_WC), 1506 cachemode2pgprot(
1507 _PAGE_CACHE_MODE_WC),
1498 __pgprot(_PAGE_CACHE_MASK), 1508 __pgprot(_PAGE_CACHE_MASK),
1499 0, CPA_ARRAY, NULL); 1509 0, CPA_ARRAY, NULL);
1500 if (ret) 1510 if (ret)
@@ -1511,13 +1521,13 @@ out_free:
1511 1521
1512int set_memory_array_uc(unsigned long *addr, int addrinarray) 1522int set_memory_array_uc(unsigned long *addr, int addrinarray)
1513{ 1523{
1514 return _set_memory_array(addr, addrinarray, _PAGE_CACHE_UC_MINUS); 1524 return _set_memory_array(addr, addrinarray, _PAGE_CACHE_MODE_UC_MINUS);
1515} 1525}
1516EXPORT_SYMBOL(set_memory_array_uc); 1526EXPORT_SYMBOL(set_memory_array_uc);
1517 1527
1518int set_memory_array_wc(unsigned long *addr, int addrinarray) 1528int set_memory_array_wc(unsigned long *addr, int addrinarray)
1519{ 1529{
1520 return _set_memory_array(addr, addrinarray, _PAGE_CACHE_WC); 1530 return _set_memory_array(addr, addrinarray, _PAGE_CACHE_MODE_WC);
1521} 1531}
1522EXPORT_SYMBOL(set_memory_array_wc); 1532EXPORT_SYMBOL(set_memory_array_wc);
1523 1533
@@ -1527,10 +1537,12 @@ int _set_memory_wc(unsigned long addr, int numpages)
1527 unsigned long addr_copy = addr; 1537 unsigned long addr_copy = addr;
1528 1538
1529 ret = change_page_attr_set(&addr, numpages, 1539 ret = change_page_attr_set(&addr, numpages,
1530 __pgprot(_PAGE_CACHE_UC_MINUS), 0); 1540 cachemode2pgprot(_PAGE_CACHE_MODE_UC_MINUS),
1541 0);
1531 if (!ret) { 1542 if (!ret) {
1532 ret = change_page_attr_set_clr(&addr_copy, numpages, 1543 ret = change_page_attr_set_clr(&addr_copy, numpages,
1533 __pgprot(_PAGE_CACHE_WC), 1544 cachemode2pgprot(
1545 _PAGE_CACHE_MODE_WC),
1534 __pgprot(_PAGE_CACHE_MASK), 1546 __pgprot(_PAGE_CACHE_MASK),
1535 0, 0, NULL); 1547 0, 0, NULL);
1536 } 1548 }
@@ -1545,7 +1557,7 @@ int set_memory_wc(unsigned long addr, int numpages)
1545 return set_memory_uc(addr, numpages); 1557 return set_memory_uc(addr, numpages);
1546 1558
1547 ret = reserve_memtype(__pa(addr), __pa(addr) + numpages * PAGE_SIZE, 1559 ret = reserve_memtype(__pa(addr), __pa(addr) + numpages * PAGE_SIZE,
1548 _PAGE_CACHE_WC, NULL); 1560 _PAGE_CACHE_MODE_WC, NULL);
1549 if (ret) 1561 if (ret)
1550 goto out_err; 1562 goto out_err;
1551 1563
@@ -1564,6 +1576,7 @@ EXPORT_SYMBOL(set_memory_wc);
1564 1576
1565int _set_memory_wb(unsigned long addr, int numpages) 1577int _set_memory_wb(unsigned long addr, int numpages)
1566{ 1578{
1579 /* WB cache mode is hard wired to all cache attribute bits being 0 */
1567 return change_page_attr_clear(&addr, numpages, 1580 return change_page_attr_clear(&addr, numpages,
1568 __pgprot(_PAGE_CACHE_MASK), 0); 1581 __pgprot(_PAGE_CACHE_MASK), 0);
1569} 1582}
@@ -1586,6 +1599,7 @@ int set_memory_array_wb(unsigned long *addr, int addrinarray)
1586 int i; 1599 int i;
1587 int ret; 1600 int ret;
1588 1601
1602 /* WB cache mode is hard wired to all cache attribute bits being 0 */
1589 ret = change_page_attr_clear(addr, addrinarray, 1603 ret = change_page_attr_clear(addr, addrinarray,
1590 __pgprot(_PAGE_CACHE_MASK), 1); 1604 __pgprot(_PAGE_CACHE_MASK), 1);
1591 if (ret) 1605 if (ret)
@@ -1648,7 +1662,7 @@ int set_pages_uc(struct page *page, int numpages)
1648EXPORT_SYMBOL(set_pages_uc); 1662EXPORT_SYMBOL(set_pages_uc);
1649 1663
1650static int _set_pages_array(struct page **pages, int addrinarray, 1664static int _set_pages_array(struct page **pages, int addrinarray,
1651 unsigned long new_type) 1665 enum page_cache_mode new_type)
1652{ 1666{
1653 unsigned long start; 1667 unsigned long start;
1654 unsigned long end; 1668 unsigned long end;
@@ -1666,10 +1680,11 @@ static int _set_pages_array(struct page **pages, int addrinarray,
1666 } 1680 }
1667 1681
1668 ret = cpa_set_pages_array(pages, addrinarray, 1682 ret = cpa_set_pages_array(pages, addrinarray,
1669 __pgprot(_PAGE_CACHE_UC_MINUS)); 1683 cachemode2pgprot(_PAGE_CACHE_MODE_UC_MINUS));
1670 if (!ret && new_type == _PAGE_CACHE_WC) 1684 if (!ret && new_type == _PAGE_CACHE_MODE_WC)
1671 ret = change_page_attr_set_clr(NULL, addrinarray, 1685 ret = change_page_attr_set_clr(NULL, addrinarray,
1672 __pgprot(_PAGE_CACHE_WC), 1686 cachemode2pgprot(
1687 _PAGE_CACHE_MODE_WC),
1673 __pgprot(_PAGE_CACHE_MASK), 1688 __pgprot(_PAGE_CACHE_MASK),
1674 0, CPA_PAGES_ARRAY, pages); 1689 0, CPA_PAGES_ARRAY, pages);
1675 if (ret) 1690 if (ret)
@@ -1689,13 +1704,13 @@ err_out:
1689 1704
1690int set_pages_array_uc(struct page **pages, int addrinarray) 1705int set_pages_array_uc(struct page **pages, int addrinarray)
1691{ 1706{
1692 return _set_pages_array(pages, addrinarray, _PAGE_CACHE_UC_MINUS); 1707 return _set_pages_array(pages, addrinarray, _PAGE_CACHE_MODE_UC_MINUS);
1693} 1708}
1694EXPORT_SYMBOL(set_pages_array_uc); 1709EXPORT_SYMBOL(set_pages_array_uc);
1695 1710
1696int set_pages_array_wc(struct page **pages, int addrinarray) 1711int set_pages_array_wc(struct page **pages, int addrinarray)
1697{ 1712{
1698 return _set_pages_array(pages, addrinarray, _PAGE_CACHE_WC); 1713 return _set_pages_array(pages, addrinarray, _PAGE_CACHE_MODE_WC);
1699} 1714}
1700EXPORT_SYMBOL(set_pages_array_wc); 1715EXPORT_SYMBOL(set_pages_array_wc);
1701 1716
@@ -1714,6 +1729,7 @@ int set_pages_array_wb(struct page **pages, int addrinarray)
1714 unsigned long end; 1729 unsigned long end;
1715 int i; 1730 int i;
1716 1731
1732 /* WB cache mode is hard wired to all cache attribute bits being 0 */
1717 retval = cpa_clear_pages_array(pages, addrinarray, 1733 retval = cpa_clear_pages_array(pages, addrinarray,
1718 __pgprot(_PAGE_CACHE_MASK)); 1734 __pgprot(_PAGE_CACHE_MASK));
1719 if (retval) 1735 if (retval)
diff --git a/arch/x86/mm/pat.c b/arch/x86/mm/pat.c
index c7eddbe6a612..edf299c8ff6c 100644
--- a/arch/x86/mm/pat.c
+++ b/arch/x86/mm/pat.c
@@ -31,6 +31,7 @@
31#include <asm/io.h> 31#include <asm/io.h>
32 32
33#include "pat_internal.h" 33#include "pat_internal.h"
34#include "mm_internal.h"
34 35
35#ifdef CONFIG_X86_PAT 36#ifdef CONFIG_X86_PAT
36int __read_mostly pat_enabled = 1; 37int __read_mostly pat_enabled = 1;
@@ -66,6 +67,75 @@ __setup("debugpat", pat_debug_setup);
66 67
67static u64 __read_mostly boot_pat_state; 68static u64 __read_mostly boot_pat_state;
68 69
70#ifdef CONFIG_X86_PAT
71/*
72 * X86 PAT uses page flags WC and Uncached together to keep track of
73 * memory type of pages that have backing page struct. X86 PAT supports 3
74 * different memory types, _PAGE_CACHE_MODE_WB, _PAGE_CACHE_MODE_WC and
75 * _PAGE_CACHE_MODE_UC_MINUS and fourth state where page's memory type has not
76 * been changed from its default (value of -1 used to denote this).
77 * Note we do not support _PAGE_CACHE_MODE_UC here.
78 */
79
80#define _PGMT_DEFAULT 0
81#define _PGMT_WC (1UL << PG_arch_1)
82#define _PGMT_UC_MINUS (1UL << PG_uncached)
83#define _PGMT_WB (1UL << PG_uncached | 1UL << PG_arch_1)
84#define _PGMT_MASK (1UL << PG_uncached | 1UL << PG_arch_1)
85#define _PGMT_CLEAR_MASK (~_PGMT_MASK)
86
87static inline enum page_cache_mode get_page_memtype(struct page *pg)
88{
89 unsigned long pg_flags = pg->flags & _PGMT_MASK;
90
91 if (pg_flags == _PGMT_DEFAULT)
92 return -1;
93 else if (pg_flags == _PGMT_WC)
94 return _PAGE_CACHE_MODE_WC;
95 else if (pg_flags == _PGMT_UC_MINUS)
96 return _PAGE_CACHE_MODE_UC_MINUS;
97 else
98 return _PAGE_CACHE_MODE_WB;
99}
100
101static inline void set_page_memtype(struct page *pg,
102 enum page_cache_mode memtype)
103{
104 unsigned long memtype_flags;
105 unsigned long old_flags;
106 unsigned long new_flags;
107
108 switch (memtype) {
109 case _PAGE_CACHE_MODE_WC:
110 memtype_flags = _PGMT_WC;
111 break;
112 case _PAGE_CACHE_MODE_UC_MINUS:
113 memtype_flags = _PGMT_UC_MINUS;
114 break;
115 case _PAGE_CACHE_MODE_WB:
116 memtype_flags = _PGMT_WB;
117 break;
118 default:
119 memtype_flags = _PGMT_DEFAULT;
120 break;
121 }
122
123 do {
124 old_flags = pg->flags;
125 new_flags = (old_flags & _PGMT_CLEAR_MASK) | memtype_flags;
126 } while (cmpxchg(&pg->flags, old_flags, new_flags) != old_flags);
127}
128#else
129static inline enum page_cache_mode get_page_memtype(struct page *pg)
130{
131 return -1;
132}
133static inline void set_page_memtype(struct page *pg,
134 enum page_cache_mode memtype)
135{
136}
137#endif
138
69enum { 139enum {
70 PAT_UC = 0, /* uncached */ 140 PAT_UC = 0, /* uncached */
71 PAT_WC = 1, /* Write combining */ 141 PAT_WC = 1, /* Write combining */
@@ -75,6 +145,52 @@ enum {
75 PAT_UC_MINUS = 7, /* UC, but can be overriden by MTRR */ 145 PAT_UC_MINUS = 7, /* UC, but can be overriden by MTRR */
76}; 146};
77 147
148#define CM(c) (_PAGE_CACHE_MODE_ ## c)
149
150static enum page_cache_mode pat_get_cache_mode(unsigned pat_val, char *msg)
151{
152 enum page_cache_mode cache;
153 char *cache_mode;
154
155 switch (pat_val) {
156 case PAT_UC: cache = CM(UC); cache_mode = "UC "; break;
157 case PAT_WC: cache = CM(WC); cache_mode = "WC "; break;
158 case PAT_WT: cache = CM(WT); cache_mode = "WT "; break;
159 case PAT_WP: cache = CM(WP); cache_mode = "WP "; break;
160 case PAT_WB: cache = CM(WB); cache_mode = "WB "; break;
161 case PAT_UC_MINUS: cache = CM(UC_MINUS); cache_mode = "UC- "; break;
162 default: cache = CM(WB); cache_mode = "WB "; break;
163 }
164
165 memcpy(msg, cache_mode, 4);
166
167 return cache;
168}
169
170#undef CM
171
172/*
173 * Update the cache mode to pgprot translation tables according to PAT
174 * configuration.
175 * Using lower indices is preferred, so we start with highest index.
176 */
177void pat_init_cache_modes(void)
178{
179 int i;
180 enum page_cache_mode cache;
181 char pat_msg[33];
182 u64 pat;
183
184 rdmsrl(MSR_IA32_CR_PAT, pat);
185 pat_msg[32] = 0;
186 for (i = 7; i >= 0; i--) {
187 cache = pat_get_cache_mode((pat >> (i * 8)) & 7,
188 pat_msg + 4 * i);
189 update_cache_mode_entry(i, cache);
190 }
191 pr_info("PAT configuration [0-7]: %s\n", pat_msg);
192}
193
78#define PAT(x, y) ((u64)PAT_ ## y << ((x)*8)) 194#define PAT(x, y) ((u64)PAT_ ## y << ((x)*8))
79 195
80void pat_init(void) 196void pat_init(void)
@@ -124,8 +240,7 @@ void pat_init(void)
124 wrmsrl(MSR_IA32_CR_PAT, pat); 240 wrmsrl(MSR_IA32_CR_PAT, pat);
125 241
126 if (boot_cpu) 242 if (boot_cpu)
127 printk(KERN_INFO "x86 PAT enabled: cpu %d, old 0x%Lx, new 0x%Lx\n", 243 pat_init_cache_modes();
128 smp_processor_id(), boot_pat_state, pat);
129} 244}
130 245
131#undef PAT 246#undef PAT
@@ -139,20 +254,21 @@ static DEFINE_SPINLOCK(memtype_lock); /* protects memtype accesses */
139 * The intersection is based on "Effective Memory Type" tables in IA-32 254 * The intersection is based on "Effective Memory Type" tables in IA-32
140 * SDM vol 3a 255 * SDM vol 3a
141 */ 256 */
142static unsigned long pat_x_mtrr_type(u64 start, u64 end, unsigned long req_type) 257static unsigned long pat_x_mtrr_type(u64 start, u64 end,
258 enum page_cache_mode req_type)
143{ 259{
144 /* 260 /*
145 * Look for MTRR hint to get the effective type in case where PAT 261 * Look for MTRR hint to get the effective type in case where PAT
146 * request is for WB. 262 * request is for WB.
147 */ 263 */
148 if (req_type == _PAGE_CACHE_WB) { 264 if (req_type == _PAGE_CACHE_MODE_WB) {
149 u8 mtrr_type; 265 u8 mtrr_type;
150 266
151 mtrr_type = mtrr_type_lookup(start, end); 267 mtrr_type = mtrr_type_lookup(start, end);
152 if (mtrr_type != MTRR_TYPE_WRBACK) 268 if (mtrr_type != MTRR_TYPE_WRBACK)
153 return _PAGE_CACHE_UC_MINUS; 269 return _PAGE_CACHE_MODE_UC_MINUS;
154 270
155 return _PAGE_CACHE_WB; 271 return _PAGE_CACHE_MODE_WB;
156 } 272 }
157 273
158 return req_type; 274 return req_type;
@@ -207,25 +323,26 @@ static int pat_pagerange_is_ram(resource_size_t start, resource_size_t end)
207 * - Find the memtype of all the pages in the range, look for any conflicts 323 * - Find the memtype of all the pages in the range, look for any conflicts
208 * - In case of no conflicts, set the new memtype for pages in the range 324 * - In case of no conflicts, set the new memtype for pages in the range
209 */ 325 */
210static int reserve_ram_pages_type(u64 start, u64 end, unsigned long req_type, 326static int reserve_ram_pages_type(u64 start, u64 end,
211 unsigned long *new_type) 327 enum page_cache_mode req_type,
328 enum page_cache_mode *new_type)
212{ 329{
213 struct page *page; 330 struct page *page;
214 u64 pfn; 331 u64 pfn;
215 332
216 if (req_type == _PAGE_CACHE_UC) { 333 if (req_type == _PAGE_CACHE_MODE_UC) {
217 /* We do not support strong UC */ 334 /* We do not support strong UC */
218 WARN_ON_ONCE(1); 335 WARN_ON_ONCE(1);
219 req_type = _PAGE_CACHE_UC_MINUS; 336 req_type = _PAGE_CACHE_MODE_UC_MINUS;
220 } 337 }
221 338
222 for (pfn = (start >> PAGE_SHIFT); pfn < (end >> PAGE_SHIFT); ++pfn) { 339 for (pfn = (start >> PAGE_SHIFT); pfn < (end >> PAGE_SHIFT); ++pfn) {
223 unsigned long type; 340 enum page_cache_mode type;
224 341
225 page = pfn_to_page(pfn); 342 page = pfn_to_page(pfn);
226 type = get_page_memtype(page); 343 type = get_page_memtype(page);
227 if (type != -1) { 344 if (type != -1) {
228 printk(KERN_INFO "reserve_ram_pages_type failed [mem %#010Lx-%#010Lx], track 0x%lx, req 0x%lx\n", 345 pr_info("reserve_ram_pages_type failed [mem %#010Lx-%#010Lx], track 0x%x, req 0x%x\n",
229 start, end - 1, type, req_type); 346 start, end - 1, type, req_type);
230 if (new_type) 347 if (new_type)
231 *new_type = type; 348 *new_type = type;
@@ -258,21 +375,21 @@ static int free_ram_pages_type(u64 start, u64 end)
258 375
259/* 376/*
260 * req_type typically has one of the: 377 * req_type typically has one of the:
261 * - _PAGE_CACHE_WB 378 * - _PAGE_CACHE_MODE_WB
262 * - _PAGE_CACHE_WC 379 * - _PAGE_CACHE_MODE_WC
263 * - _PAGE_CACHE_UC_MINUS 380 * - _PAGE_CACHE_MODE_UC_MINUS
264 * - _PAGE_CACHE_UC 381 * - _PAGE_CACHE_MODE_UC
265 * 382 *
266 * If new_type is NULL, function will return an error if it cannot reserve the 383 * If new_type is NULL, function will return an error if it cannot reserve the
267 * region with req_type. If new_type is non-NULL, function will return 384 * region with req_type. If new_type is non-NULL, function will return
268 * available type in new_type in case of no error. In case of any error 385 * available type in new_type in case of no error. In case of any error
269 * it will return a negative return value. 386 * it will return a negative return value.
270 */ 387 */
271int reserve_memtype(u64 start, u64 end, unsigned long req_type, 388int reserve_memtype(u64 start, u64 end, enum page_cache_mode req_type,
272 unsigned long *new_type) 389 enum page_cache_mode *new_type)
273{ 390{
274 struct memtype *new; 391 struct memtype *new;
275 unsigned long actual_type; 392 enum page_cache_mode actual_type;
276 int is_range_ram; 393 int is_range_ram;
277 int err = 0; 394 int err = 0;
278 395
@@ -281,10 +398,10 @@ int reserve_memtype(u64 start, u64 end, unsigned long req_type,
281 if (!pat_enabled) { 398 if (!pat_enabled) {
282 /* This is identical to page table setting without PAT */ 399 /* This is identical to page table setting without PAT */
283 if (new_type) { 400 if (new_type) {
284 if (req_type == _PAGE_CACHE_WC) 401 if (req_type == _PAGE_CACHE_MODE_WC)
285 *new_type = _PAGE_CACHE_UC_MINUS; 402 *new_type = _PAGE_CACHE_MODE_UC_MINUS;
286 else 403 else
287 *new_type = req_type & _PAGE_CACHE_MASK; 404 *new_type = req_type;
288 } 405 }
289 return 0; 406 return 0;
290 } 407 }
@@ -292,7 +409,7 @@ int reserve_memtype(u64 start, u64 end, unsigned long req_type,
292 /* Low ISA region is always mapped WB in page table. No need to track */ 409 /* Low ISA region is always mapped WB in page table. No need to track */
293 if (x86_platform.is_untracked_pat_range(start, end)) { 410 if (x86_platform.is_untracked_pat_range(start, end)) {
294 if (new_type) 411 if (new_type)
295 *new_type = _PAGE_CACHE_WB; 412 *new_type = _PAGE_CACHE_MODE_WB;
296 return 0; 413 return 0;
297 } 414 }
298 415
@@ -302,7 +419,7 @@ int reserve_memtype(u64 start, u64 end, unsigned long req_type,
302 * tools and ACPI tools). Use WB request for WB memory and use 419 * tools and ACPI tools). Use WB request for WB memory and use
303 * UC_MINUS otherwise. 420 * UC_MINUS otherwise.
304 */ 421 */
305 actual_type = pat_x_mtrr_type(start, end, req_type & _PAGE_CACHE_MASK); 422 actual_type = pat_x_mtrr_type(start, end, req_type);
306 423
307 if (new_type) 424 if (new_type)
308 *new_type = actual_type; 425 *new_type = actual_type;
@@ -394,12 +511,12 @@ int free_memtype(u64 start, u64 end)
394 * 511 *
395 * Only to be called when PAT is enabled 512 * Only to be called when PAT is enabled
396 * 513 *
397 * Returns _PAGE_CACHE_WB, _PAGE_CACHE_WC, _PAGE_CACHE_UC_MINUS or 514 * Returns _PAGE_CACHE_MODE_WB, _PAGE_CACHE_MODE_WC, _PAGE_CACHE_MODE_UC_MINUS
398 * _PAGE_CACHE_UC 515 * or _PAGE_CACHE_MODE_UC
399 */ 516 */
400static unsigned long lookup_memtype(u64 paddr) 517static enum page_cache_mode lookup_memtype(u64 paddr)
401{ 518{
402 int rettype = _PAGE_CACHE_WB; 519 enum page_cache_mode rettype = _PAGE_CACHE_MODE_WB;
403 struct memtype *entry; 520 struct memtype *entry;
404 521
405 if (x86_platform.is_untracked_pat_range(paddr, paddr + PAGE_SIZE)) 522 if (x86_platform.is_untracked_pat_range(paddr, paddr + PAGE_SIZE))
@@ -414,7 +531,7 @@ static unsigned long lookup_memtype(u64 paddr)
414 * default state and not reserved, and hence of type WB 531 * default state and not reserved, and hence of type WB
415 */ 532 */
416 if (rettype == -1) 533 if (rettype == -1)
417 rettype = _PAGE_CACHE_WB; 534 rettype = _PAGE_CACHE_MODE_WB;
418 535
419 return rettype; 536 return rettype;
420 } 537 }
@@ -425,7 +542,7 @@ static unsigned long lookup_memtype(u64 paddr)
425 if (entry != NULL) 542 if (entry != NULL)
426 rettype = entry->type; 543 rettype = entry->type;
427 else 544 else
428 rettype = _PAGE_CACHE_UC_MINUS; 545 rettype = _PAGE_CACHE_MODE_UC_MINUS;
429 546
430 spin_unlock(&memtype_lock); 547 spin_unlock(&memtype_lock);
431 return rettype; 548 return rettype;
@@ -442,11 +559,11 @@ static unsigned long lookup_memtype(u64 paddr)
442 * On failure, returns non-zero 559 * On failure, returns non-zero
443 */ 560 */
444int io_reserve_memtype(resource_size_t start, resource_size_t end, 561int io_reserve_memtype(resource_size_t start, resource_size_t end,
445 unsigned long *type) 562 enum page_cache_mode *type)
446{ 563{
447 resource_size_t size = end - start; 564 resource_size_t size = end - start;
448 unsigned long req_type = *type; 565 enum page_cache_mode req_type = *type;
449 unsigned long new_type; 566 enum page_cache_mode new_type;
450 int ret; 567 int ret;
451 568
452 WARN_ON_ONCE(iomem_map_sanity_check(start, size)); 569 WARN_ON_ONCE(iomem_map_sanity_check(start, size));
@@ -520,13 +637,13 @@ static inline int range_is_allowed(unsigned long pfn, unsigned long size)
520int phys_mem_access_prot_allowed(struct file *file, unsigned long pfn, 637int phys_mem_access_prot_allowed(struct file *file, unsigned long pfn,
521 unsigned long size, pgprot_t *vma_prot) 638 unsigned long size, pgprot_t *vma_prot)
522{ 639{
523 unsigned long flags = _PAGE_CACHE_WB; 640 enum page_cache_mode pcm = _PAGE_CACHE_MODE_WB;
524 641
525 if (!range_is_allowed(pfn, size)) 642 if (!range_is_allowed(pfn, size))
526 return 0; 643 return 0;
527 644
528 if (file->f_flags & O_DSYNC) 645 if (file->f_flags & O_DSYNC)
529 flags = _PAGE_CACHE_UC_MINUS; 646 pcm = _PAGE_CACHE_MODE_UC_MINUS;
530 647
531#ifdef CONFIG_X86_32 648#ifdef CONFIG_X86_32
532 /* 649 /*
@@ -543,12 +660,12 @@ int phys_mem_access_prot_allowed(struct file *file, unsigned long pfn,
543 boot_cpu_has(X86_FEATURE_CYRIX_ARR) || 660 boot_cpu_has(X86_FEATURE_CYRIX_ARR) ||
544 boot_cpu_has(X86_FEATURE_CENTAUR_MCR)) && 661 boot_cpu_has(X86_FEATURE_CENTAUR_MCR)) &&
545 (pfn << PAGE_SHIFT) >= __pa(high_memory)) { 662 (pfn << PAGE_SHIFT) >= __pa(high_memory)) {
546 flags = _PAGE_CACHE_UC; 663 pcm = _PAGE_CACHE_MODE_UC;
547 } 664 }
548#endif 665#endif
549 666
550 *vma_prot = __pgprot((pgprot_val(*vma_prot) & ~_PAGE_CACHE_MASK) | 667 *vma_prot = __pgprot((pgprot_val(*vma_prot) & ~_PAGE_CACHE_MASK) |
551 flags); 668 cachemode2protval(pcm));
552 return 1; 669 return 1;
553} 670}
554 671
@@ -556,7 +673,8 @@ int phys_mem_access_prot_allowed(struct file *file, unsigned long pfn,
556 * Change the memory type for the physial address range in kernel identity 673 * Change the memory type for the physial address range in kernel identity
557 * mapping space if that range is a part of identity map. 674 * mapping space if that range is a part of identity map.
558 */ 675 */
559int kernel_map_sync_memtype(u64 base, unsigned long size, unsigned long flags) 676int kernel_map_sync_memtype(u64 base, unsigned long size,
677 enum page_cache_mode pcm)
560{ 678{
561 unsigned long id_sz; 679 unsigned long id_sz;
562 680
@@ -574,11 +692,11 @@ int kernel_map_sync_memtype(u64 base, unsigned long size, unsigned long flags)
574 __pa(high_memory) - base : 692 __pa(high_memory) - base :
575 size; 693 size;
576 694
577 if (ioremap_change_attr((unsigned long)__va(base), id_sz, flags) < 0) { 695 if (ioremap_change_attr((unsigned long)__va(base), id_sz, pcm) < 0) {
578 printk(KERN_INFO "%s:%d ioremap_change_attr failed %s " 696 printk(KERN_INFO "%s:%d ioremap_change_attr failed %s "
579 "for [mem %#010Lx-%#010Lx]\n", 697 "for [mem %#010Lx-%#010Lx]\n",
580 current->comm, current->pid, 698 current->comm, current->pid,
581 cattr_name(flags), 699 cattr_name(pcm),
582 base, (unsigned long long)(base + size-1)); 700 base, (unsigned long long)(base + size-1));
583 return -EINVAL; 701 return -EINVAL;
584 } 702 }
@@ -595,8 +713,8 @@ static int reserve_pfn_range(u64 paddr, unsigned long size, pgprot_t *vma_prot,
595{ 713{
596 int is_ram = 0; 714 int is_ram = 0;
597 int ret; 715 int ret;
598 unsigned long want_flags = (pgprot_val(*vma_prot) & _PAGE_CACHE_MASK); 716 enum page_cache_mode want_pcm = pgprot2cachemode(*vma_prot);
599 unsigned long flags = want_flags; 717 enum page_cache_mode pcm = want_pcm;
600 718
601 is_ram = pat_pagerange_is_ram(paddr, paddr + size); 719 is_ram = pat_pagerange_is_ram(paddr, paddr + size);
602 720
@@ -609,36 +727,36 @@ static int reserve_pfn_range(u64 paddr, unsigned long size, pgprot_t *vma_prot,
609 if (!pat_enabled) 727 if (!pat_enabled)
610 return 0; 728 return 0;
611 729
612 flags = lookup_memtype(paddr); 730 pcm = lookup_memtype(paddr);
613 if (want_flags != flags) { 731 if (want_pcm != pcm) {
614 printk(KERN_WARNING "%s:%d map pfn RAM range req %s for [mem %#010Lx-%#010Lx], got %s\n", 732 printk(KERN_WARNING "%s:%d map pfn RAM range req %s for [mem %#010Lx-%#010Lx], got %s\n",
615 current->comm, current->pid, 733 current->comm, current->pid,
616 cattr_name(want_flags), 734 cattr_name(want_pcm),
617 (unsigned long long)paddr, 735 (unsigned long long)paddr,
618 (unsigned long long)(paddr + size - 1), 736 (unsigned long long)(paddr + size - 1),
619 cattr_name(flags)); 737 cattr_name(pcm));
620 *vma_prot = __pgprot((pgprot_val(*vma_prot) & 738 *vma_prot = __pgprot((pgprot_val(*vma_prot) &
621 (~_PAGE_CACHE_MASK)) | 739 (~_PAGE_CACHE_MASK)) |
622 flags); 740 cachemode2protval(pcm));
623 } 741 }
624 return 0; 742 return 0;
625 } 743 }
626 744
627 ret = reserve_memtype(paddr, paddr + size, want_flags, &flags); 745 ret = reserve_memtype(paddr, paddr + size, want_pcm, &pcm);
628 if (ret) 746 if (ret)
629 return ret; 747 return ret;
630 748
631 if (flags != want_flags) { 749 if (pcm != want_pcm) {
632 if (strict_prot || 750 if (strict_prot ||
633 !is_new_memtype_allowed(paddr, size, want_flags, flags)) { 751 !is_new_memtype_allowed(paddr, size, want_pcm, pcm)) {
634 free_memtype(paddr, paddr + size); 752 free_memtype(paddr, paddr + size);
635 printk(KERN_ERR "%s:%d map pfn expected mapping type %s" 753 printk(KERN_ERR "%s:%d map pfn expected mapping type %s"
636 " for [mem %#010Lx-%#010Lx], got %s\n", 754 " for [mem %#010Lx-%#010Lx], got %s\n",
637 current->comm, current->pid, 755 current->comm, current->pid,
638 cattr_name(want_flags), 756 cattr_name(want_pcm),
639 (unsigned long long)paddr, 757 (unsigned long long)paddr,
640 (unsigned long long)(paddr + size - 1), 758 (unsigned long long)(paddr + size - 1),
641 cattr_name(flags)); 759 cattr_name(pcm));
642 return -EINVAL; 760 return -EINVAL;
643 } 761 }
644 /* 762 /*
@@ -647,10 +765,10 @@ static int reserve_pfn_range(u64 paddr, unsigned long size, pgprot_t *vma_prot,
647 */ 765 */
648 *vma_prot = __pgprot((pgprot_val(*vma_prot) & 766 *vma_prot = __pgprot((pgprot_val(*vma_prot) &
649 (~_PAGE_CACHE_MASK)) | 767 (~_PAGE_CACHE_MASK)) |
650 flags); 768 cachemode2protval(pcm));
651 } 769 }
652 770
653 if (kernel_map_sync_memtype(paddr, size, flags) < 0) { 771 if (kernel_map_sync_memtype(paddr, size, pcm) < 0) {
654 free_memtype(paddr, paddr + size); 772 free_memtype(paddr, paddr + size);
655 return -EINVAL; 773 return -EINVAL;
656 } 774 }
@@ -709,7 +827,7 @@ int track_pfn_remap(struct vm_area_struct *vma, pgprot_t *prot,
709 unsigned long pfn, unsigned long addr, unsigned long size) 827 unsigned long pfn, unsigned long addr, unsigned long size)
710{ 828{
711 resource_size_t paddr = (resource_size_t)pfn << PAGE_SHIFT; 829 resource_size_t paddr = (resource_size_t)pfn << PAGE_SHIFT;
712 unsigned long flags; 830 enum page_cache_mode pcm;
713 831
714 /* reserve the whole chunk starting from paddr */ 832 /* reserve the whole chunk starting from paddr */
715 if (addr == vma->vm_start && size == (vma->vm_end - vma->vm_start)) { 833 if (addr == vma->vm_start && size == (vma->vm_end - vma->vm_start)) {
@@ -728,18 +846,18 @@ int track_pfn_remap(struct vm_area_struct *vma, pgprot_t *prot,
728 * For anything smaller than the vma size we set prot based on the 846 * For anything smaller than the vma size we set prot based on the
729 * lookup. 847 * lookup.
730 */ 848 */
731 flags = lookup_memtype(paddr); 849 pcm = lookup_memtype(paddr);
732 850
733 /* Check memtype for the remaining pages */ 851 /* Check memtype for the remaining pages */
734 while (size > PAGE_SIZE) { 852 while (size > PAGE_SIZE) {
735 size -= PAGE_SIZE; 853 size -= PAGE_SIZE;
736 paddr += PAGE_SIZE; 854 paddr += PAGE_SIZE;
737 if (flags != lookup_memtype(paddr)) 855 if (pcm != lookup_memtype(paddr))
738 return -EINVAL; 856 return -EINVAL;
739 } 857 }
740 858
741 *prot = __pgprot((pgprot_val(vma->vm_page_prot) & (~_PAGE_CACHE_MASK)) | 859 *prot = __pgprot((pgprot_val(vma->vm_page_prot) & (~_PAGE_CACHE_MASK)) |
742 flags); 860 cachemode2protval(pcm));
743 861
744 return 0; 862 return 0;
745} 863}
@@ -747,15 +865,15 @@ int track_pfn_remap(struct vm_area_struct *vma, pgprot_t *prot,
747int track_pfn_insert(struct vm_area_struct *vma, pgprot_t *prot, 865int track_pfn_insert(struct vm_area_struct *vma, pgprot_t *prot,
748 unsigned long pfn) 866 unsigned long pfn)
749{ 867{
750 unsigned long flags; 868 enum page_cache_mode pcm;
751 869
752 if (!pat_enabled) 870 if (!pat_enabled)
753 return 0; 871 return 0;
754 872
755 /* Set prot based on lookup */ 873 /* Set prot based on lookup */
756 flags = lookup_memtype((resource_size_t)pfn << PAGE_SHIFT); 874 pcm = lookup_memtype((resource_size_t)pfn << PAGE_SHIFT);
757 *prot = __pgprot((pgprot_val(vma->vm_page_prot) & (~_PAGE_CACHE_MASK)) | 875 *prot = __pgprot((pgprot_val(vma->vm_page_prot) & (~_PAGE_CACHE_MASK)) |
758 flags); 876 cachemode2protval(pcm));
759 877
760 return 0; 878 return 0;
761} 879}
@@ -791,7 +909,8 @@ void untrack_pfn(struct vm_area_struct *vma, unsigned long pfn,
791pgprot_t pgprot_writecombine(pgprot_t prot) 909pgprot_t pgprot_writecombine(pgprot_t prot)
792{ 910{
793 if (pat_enabled) 911 if (pat_enabled)
794 return __pgprot(pgprot_val(prot) | _PAGE_CACHE_WC); 912 return __pgprot(pgprot_val(prot) |
913 cachemode2protval(_PAGE_CACHE_MODE_WC));
795 else 914 else
796 return pgprot_noncached(prot); 915 return pgprot_noncached(prot);
797} 916}
diff --git a/arch/x86/mm/pat_internal.h b/arch/x86/mm/pat_internal.h
index 77e5ba153fac..f6411620305d 100644
--- a/arch/x86/mm/pat_internal.h
+++ b/arch/x86/mm/pat_internal.h
@@ -10,30 +10,32 @@ struct memtype {
10 u64 start; 10 u64 start;
11 u64 end; 11 u64 end;
12 u64 subtree_max_end; 12 u64 subtree_max_end;
13 unsigned long type; 13 enum page_cache_mode type;
14 struct rb_node rb; 14 struct rb_node rb;
15}; 15};
16 16
17static inline char *cattr_name(unsigned long flags) 17static inline char *cattr_name(enum page_cache_mode pcm)
18{ 18{
19 switch (flags & _PAGE_CACHE_MASK) { 19 switch (pcm) {
20 case _PAGE_CACHE_UC: return "uncached"; 20 case _PAGE_CACHE_MODE_UC: return "uncached";
21 case _PAGE_CACHE_UC_MINUS: return "uncached-minus"; 21 case _PAGE_CACHE_MODE_UC_MINUS: return "uncached-minus";
22 case _PAGE_CACHE_WB: return "write-back"; 22 case _PAGE_CACHE_MODE_WB: return "write-back";
23 case _PAGE_CACHE_WC: return "write-combining"; 23 case _PAGE_CACHE_MODE_WC: return "write-combining";
24 default: return "broken"; 24 case _PAGE_CACHE_MODE_WT: return "write-through";
25 case _PAGE_CACHE_MODE_WP: return "write-protected";
26 default: return "broken";
25 } 27 }
26} 28}
27 29
28#ifdef CONFIG_X86_PAT 30#ifdef CONFIG_X86_PAT
29extern int rbt_memtype_check_insert(struct memtype *new, 31extern int rbt_memtype_check_insert(struct memtype *new,
30 unsigned long *new_type); 32 enum page_cache_mode *new_type);
31extern struct memtype *rbt_memtype_erase(u64 start, u64 end); 33extern struct memtype *rbt_memtype_erase(u64 start, u64 end);
32extern struct memtype *rbt_memtype_lookup(u64 addr); 34extern struct memtype *rbt_memtype_lookup(u64 addr);
33extern int rbt_memtype_copy_nth_element(struct memtype *out, loff_t pos); 35extern int rbt_memtype_copy_nth_element(struct memtype *out, loff_t pos);
34#else 36#else
35static inline int rbt_memtype_check_insert(struct memtype *new, 37static inline int rbt_memtype_check_insert(struct memtype *new,
36 unsigned long *new_type) 38 enum page_cache_mode *new_type)
37{ return 0; } 39{ return 0; }
38static inline struct memtype *rbt_memtype_erase(u64 start, u64 end) 40static inline struct memtype *rbt_memtype_erase(u64 start, u64 end)
39{ return NULL; } 41{ return NULL; }
diff --git a/arch/x86/mm/pat_rbtree.c b/arch/x86/mm/pat_rbtree.c
index 415f6c4ced36..6582adcc8bd9 100644
--- a/arch/x86/mm/pat_rbtree.c
+++ b/arch/x86/mm/pat_rbtree.c
@@ -122,11 +122,12 @@ static struct memtype *memtype_rb_exact_match(struct rb_root *root,
122 122
123static int memtype_rb_check_conflict(struct rb_root *root, 123static int memtype_rb_check_conflict(struct rb_root *root,
124 u64 start, u64 end, 124 u64 start, u64 end,
125 unsigned long reqtype, unsigned long *newtype) 125 enum page_cache_mode reqtype,
126 enum page_cache_mode *newtype)
126{ 127{
127 struct rb_node *node; 128 struct rb_node *node;
128 struct memtype *match; 129 struct memtype *match;
129 int found_type = reqtype; 130 enum page_cache_mode found_type = reqtype;
130 131
131 match = memtype_rb_lowest_match(&memtype_rbroot, start, end); 132 match = memtype_rb_lowest_match(&memtype_rbroot, start, end);
132 if (match == NULL) 133 if (match == NULL)
@@ -187,7 +188,8 @@ static void memtype_rb_insert(struct rb_root *root, struct memtype *newdata)
187 rb_insert_augmented(&newdata->rb, root, &memtype_rb_augment_cb); 188 rb_insert_augmented(&newdata->rb, root, &memtype_rb_augment_cb);
188} 189}
189 190
190int rbt_memtype_check_insert(struct memtype *new, unsigned long *ret_type) 191int rbt_memtype_check_insert(struct memtype *new,
192 enum page_cache_mode *ret_type)
191{ 193{
192 int err = 0; 194 int err = 0;
193 195