aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorJuergen Gross <jgross@suse.com>2014-11-03 08:01:59 -0500
committerThomas Gleixner <tglx@linutronix.de>2014-11-16 05:04:26 -0500
commite00c8cc93c1ac01ecd5049929a50fb47b62bb041 (patch)
treea3833abca982fc685332340bb604dc925eb91c24
parentb14097bd911c2554b0b5271b3a6b2d84044d1843 (diff)
x86: Use new cache mode type in memtype related functions
Instead of directly using the cache mode bits in the pte switch to using the cache mode type. Based-on-patch-by: Stefan Bader <stefan.bader@canonical.com> Signed-off-by: Juergen Gross <jgross@suse.com> Reviewed-by: Thomas Gleixner <tglx@linutronix.de> Cc: stefan.bader@canonical.com Cc: xen-devel@lists.xensource.com Cc: konrad.wilk@oracle.com Cc: ville.syrjala@linux.intel.com Cc: david.vrabel@citrix.com Cc: jbeulich@suse.com Cc: toshi.kani@hp.com Cc: plagnioj@jcrosoft.com Cc: tomi.valkeinen@ti.com Cc: bhelgaas@google.com Link: http://lkml.kernel.org/r/1415019724-4317-14-git-send-email-jgross@suse.com Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
-rw-r--r--arch/x86/include/asm/cacheflush.h38
-rw-r--r--arch/x86/include/asm/pat.h2
-rw-r--r--arch/x86/mm/ioremap.c5
-rw-r--r--arch/x86/mm/pageattr.c9
-rw-r--r--arch/x86/mm/pat.c102
-rw-r--r--arch/x86/mm/pat_internal.h22
-rw-r--r--arch/x86/mm/pat_rbtree.c8
7 files changed, 96 insertions, 90 deletions
diff --git a/arch/x86/include/asm/cacheflush.h b/arch/x86/include/asm/cacheflush.h
index 9863ee3747da..157644bdf70e 100644
--- a/arch/x86/include/asm/cacheflush.h
+++ b/arch/x86/include/asm/cacheflush.h
@@ -9,10 +9,10 @@
9/* 9/*
10 * X86 PAT uses page flags WC and Uncached together to keep track of 10 * X86 PAT uses page flags WC and Uncached together to keep track of
11 * memory type of pages that have backing page struct. X86 PAT supports 3 11 * memory type of pages that have backing page struct. X86 PAT supports 3
12 * different memory types, _PAGE_CACHE_WB, _PAGE_CACHE_WC and 12 * different memory types, _PAGE_CACHE_MODE_WB, _PAGE_CACHE_MODE_WC and
13 * _PAGE_CACHE_UC_MINUS and fourth state where page's memory type has not 13 * _PAGE_CACHE_MODE_UC_MINUS and fourth state where page's memory type has not
14 * been changed from its default (value of -1 used to denote this). 14 * been changed from its default (value of -1 used to denote this).
15 * Note we do not support _PAGE_CACHE_UC here. 15 * Note we do not support _PAGE_CACHE_MODE_UC here.
16 */ 16 */
17 17
18#define _PGMT_DEFAULT 0 18#define _PGMT_DEFAULT 0
@@ -22,36 +22,40 @@
22#define _PGMT_MASK (1UL << PG_uncached | 1UL << PG_arch_1) 22#define _PGMT_MASK (1UL << PG_uncached | 1UL << PG_arch_1)
23#define _PGMT_CLEAR_MASK (~_PGMT_MASK) 23#define _PGMT_CLEAR_MASK (~_PGMT_MASK)
24 24
25static inline unsigned long get_page_memtype(struct page *pg) 25static inline enum page_cache_mode get_page_memtype(struct page *pg)
26{ 26{
27 unsigned long pg_flags = pg->flags & _PGMT_MASK; 27 unsigned long pg_flags = pg->flags & _PGMT_MASK;
28 28
29 if (pg_flags == _PGMT_DEFAULT) 29 if (pg_flags == _PGMT_DEFAULT)
30 return -1; 30 return -1;
31 else if (pg_flags == _PGMT_WC) 31 else if (pg_flags == _PGMT_WC)
32 return _PAGE_CACHE_WC; 32 return _PAGE_CACHE_MODE_WC;
33 else if (pg_flags == _PGMT_UC_MINUS) 33 else if (pg_flags == _PGMT_UC_MINUS)
34 return _PAGE_CACHE_UC_MINUS; 34 return _PAGE_CACHE_MODE_UC_MINUS;
35 else 35 else
36 return _PAGE_CACHE_WB; 36 return _PAGE_CACHE_MODE_WB;
37} 37}
38 38
39static inline void set_page_memtype(struct page *pg, unsigned long memtype) 39static inline void set_page_memtype(struct page *pg,
40 enum page_cache_mode memtype)
40{ 41{
41 unsigned long memtype_flags = _PGMT_DEFAULT; 42 unsigned long memtype_flags;
42 unsigned long old_flags; 43 unsigned long old_flags;
43 unsigned long new_flags; 44 unsigned long new_flags;
44 45
45 switch (memtype) { 46 switch (memtype) {
46 case _PAGE_CACHE_WC: 47 case _PAGE_CACHE_MODE_WC:
47 memtype_flags = _PGMT_WC; 48 memtype_flags = _PGMT_WC;
48 break; 49 break;
49 case _PAGE_CACHE_UC_MINUS: 50 case _PAGE_CACHE_MODE_UC_MINUS:
50 memtype_flags = _PGMT_UC_MINUS; 51 memtype_flags = _PGMT_UC_MINUS;
51 break; 52 break;
52 case _PAGE_CACHE_WB: 53 case _PAGE_CACHE_MODE_WB:
53 memtype_flags = _PGMT_WB; 54 memtype_flags = _PGMT_WB;
54 break; 55 break;
56 default:
57 memtype_flags = _PGMT_DEFAULT;
58 break;
55 } 59 }
56 60
57 do { 61 do {
@@ -60,8 +64,14 @@ static inline void set_page_memtype(struct page *pg, unsigned long memtype)
60 } while (cmpxchg(&pg->flags, old_flags, new_flags) != old_flags); 64 } while (cmpxchg(&pg->flags, old_flags, new_flags) != old_flags);
61} 65}
62#else 66#else
63static inline unsigned long get_page_memtype(struct page *pg) { return -1; } 67static inline enum page_cache_mode get_page_memtype(struct page *pg)
64static inline void set_page_memtype(struct page *pg, unsigned long memtype) { } 68{
69 return -1;
70}
71static inline void set_page_memtype(struct page *pg,
72 enum page_cache_mode memtype)
73{
74}
65#endif 75#endif
66 76
67/* 77/*
diff --git a/arch/x86/include/asm/pat.h b/arch/x86/include/asm/pat.h
index d35ee2d976ca..150407a7234d 100644
--- a/arch/x86/include/asm/pat.h
+++ b/arch/x86/include/asm/pat.h
@@ -13,7 +13,7 @@ static const int pat_enabled;
13extern void pat_init(void); 13extern void pat_init(void);
14 14
15extern int reserve_memtype(u64 start, u64 end, 15extern int reserve_memtype(u64 start, u64 end,
16 unsigned long req_type, unsigned long *ret_type); 16 enum page_cache_mode req_pcm, enum page_cache_mode *ret_pcm);
17extern int free_memtype(u64 start, u64 end); 17extern int free_memtype(u64 start, u64 end);
18 18
19extern int kernel_map_sync_memtype(u64 base, unsigned long size, 19extern int kernel_map_sync_memtype(u64 base, unsigned long size,
diff --git a/arch/x86/mm/ioremap.c b/arch/x86/mm/ioremap.c
index f31507f6f60b..8832e510941e 100644
--- a/arch/x86/mm/ioremap.c
+++ b/arch/x86/mm/ioremap.c
@@ -83,7 +83,6 @@ static void __iomem *__ioremap_caller(resource_size_t phys_addr,
83 const unsigned long unaligned_size = size; 83 const unsigned long unaligned_size = size;
84 struct vm_struct *area; 84 struct vm_struct *area;
85 enum page_cache_mode new_pcm; 85 enum page_cache_mode new_pcm;
86 unsigned long new_prot_val;
87 pgprot_t prot; 86 pgprot_t prot;
88 int retval; 87 int retval;
89 void __iomem *ret_addr; 88 void __iomem *ret_addr;
@@ -135,14 +134,12 @@ static void __iomem *__ioremap_caller(resource_size_t phys_addr,
135 size = PAGE_ALIGN(last_addr+1) - phys_addr; 134 size = PAGE_ALIGN(last_addr+1) - phys_addr;
136 135
137 retval = reserve_memtype(phys_addr, (u64)phys_addr + size, 136 retval = reserve_memtype(phys_addr, (u64)phys_addr + size,
138 cachemode2protval(pcm), &new_prot_val); 137 pcm, &new_pcm);
139 if (retval) { 138 if (retval) {
140 printk(KERN_ERR "ioremap reserve_memtype failed %d\n", retval); 139 printk(KERN_ERR "ioremap reserve_memtype failed %d\n", retval);
141 return NULL; 140 return NULL;
142 } 141 }
143 142
144 new_pcm = pgprot2cachemode(__pgprot(new_prot_val));
145
146 if (pcm != new_pcm) { 143 if (pcm != new_pcm) {
147 if (!is_new_memtype_allowed(phys_addr, size, pcm, new_pcm)) { 144 if (!is_new_memtype_allowed(phys_addr, size, pcm, new_pcm)) {
148 printk(KERN_ERR 145 printk(KERN_ERR
diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c
index 9f7e1b445e66..de807c9daad1 100644
--- a/arch/x86/mm/pageattr.c
+++ b/arch/x86/mm/pageattr.c
@@ -1451,7 +1451,7 @@ int set_memory_uc(unsigned long addr, int numpages)
1451 * for now UC MINUS. see comments in ioremap_nocache() 1451 * for now UC MINUS. see comments in ioremap_nocache()
1452 */ 1452 */
1453 ret = reserve_memtype(__pa(addr), __pa(addr) + numpages * PAGE_SIZE, 1453 ret = reserve_memtype(__pa(addr), __pa(addr) + numpages * PAGE_SIZE,
1454 _PAGE_CACHE_UC_MINUS, NULL); 1454 _PAGE_CACHE_MODE_UC_MINUS, NULL);
1455 if (ret) 1455 if (ret)
1456 goto out_err; 1456 goto out_err;
1457 1457
@@ -1479,7 +1479,7 @@ static int _set_memory_array(unsigned long *addr, int addrinarray,
1479 */ 1479 */
1480 for (i = 0; i < addrinarray; i++) { 1480 for (i = 0; i < addrinarray; i++) {
1481 ret = reserve_memtype(__pa(addr[i]), __pa(addr[i]) + PAGE_SIZE, 1481 ret = reserve_memtype(__pa(addr[i]), __pa(addr[i]) + PAGE_SIZE,
1482 cachemode2protval(new_type), NULL); 1482 new_type, NULL);
1483 if (ret) 1483 if (ret)
1484 goto out_free; 1484 goto out_free;
1485 } 1485 }
@@ -1544,7 +1544,7 @@ int set_memory_wc(unsigned long addr, int numpages)
1544 return set_memory_uc(addr, numpages); 1544 return set_memory_uc(addr, numpages);
1545 1545
1546 ret = reserve_memtype(__pa(addr), __pa(addr) + numpages * PAGE_SIZE, 1546 ret = reserve_memtype(__pa(addr), __pa(addr) + numpages * PAGE_SIZE,
1547 _PAGE_CACHE_WC, NULL); 1547 _PAGE_CACHE_MODE_WC, NULL);
1548 if (ret) 1548 if (ret)
1549 goto out_err; 1549 goto out_err;
1550 1550
@@ -1662,8 +1662,7 @@ static int _set_pages_array(struct page **pages, int addrinarray,
1662 continue; 1662 continue;
1663 start = page_to_pfn(pages[i]) << PAGE_SHIFT; 1663 start = page_to_pfn(pages[i]) << PAGE_SHIFT;
1664 end = start + PAGE_SIZE; 1664 end = start + PAGE_SIZE;
1665 if (reserve_memtype(start, end, cachemode2protval(new_type), 1665 if (reserve_memtype(start, end, new_type, NULL))
1666 NULL))
1667 goto err_out; 1666 goto err_out;
1668 } 1667 }
1669 1668
diff --git a/arch/x86/mm/pat.c b/arch/x86/mm/pat.c
index 8f68a83491ba..ef75f3f89810 100644
--- a/arch/x86/mm/pat.c
+++ b/arch/x86/mm/pat.c
@@ -139,20 +139,21 @@ static DEFINE_SPINLOCK(memtype_lock); /* protects memtype accesses */
139 * The intersection is based on "Effective Memory Type" tables in IA-32 139 * The intersection is based on "Effective Memory Type" tables in IA-32
140 * SDM vol 3a 140 * SDM vol 3a
141 */ 141 */
142static unsigned long pat_x_mtrr_type(u64 start, u64 end, unsigned long req_type) 142static unsigned long pat_x_mtrr_type(u64 start, u64 end,
143 enum page_cache_mode req_type)
143{ 144{
144 /* 145 /*
145 * Look for MTRR hint to get the effective type in case where PAT 146 * Look for MTRR hint to get the effective type in case where PAT
146 * request is for WB. 147 * request is for WB.
147 */ 148 */
148 if (req_type == _PAGE_CACHE_WB) { 149 if (req_type == _PAGE_CACHE_MODE_WB) {
149 u8 mtrr_type; 150 u8 mtrr_type;
150 151
151 mtrr_type = mtrr_type_lookup(start, end); 152 mtrr_type = mtrr_type_lookup(start, end);
152 if (mtrr_type != MTRR_TYPE_WRBACK) 153 if (mtrr_type != MTRR_TYPE_WRBACK)
153 return _PAGE_CACHE_UC_MINUS; 154 return _PAGE_CACHE_MODE_UC_MINUS;
154 155
155 return _PAGE_CACHE_WB; 156 return _PAGE_CACHE_MODE_WB;
156 } 157 }
157 158
158 return req_type; 159 return req_type;
@@ -207,25 +208,26 @@ static int pat_pagerange_is_ram(resource_size_t start, resource_size_t end)
207 * - Find the memtype of all the pages in the range, look for any conflicts 208 * - Find the memtype of all the pages in the range, look for any conflicts
208 * - In case of no conflicts, set the new memtype for pages in the range 209 * - In case of no conflicts, set the new memtype for pages in the range
209 */ 210 */
210static int reserve_ram_pages_type(u64 start, u64 end, unsigned long req_type, 211static int reserve_ram_pages_type(u64 start, u64 end,
211 unsigned long *new_type) 212 enum page_cache_mode req_type,
213 enum page_cache_mode *new_type)
212{ 214{
213 struct page *page; 215 struct page *page;
214 u64 pfn; 216 u64 pfn;
215 217
216 if (req_type == _PAGE_CACHE_UC) { 218 if (req_type == _PAGE_CACHE_MODE_UC) {
217 /* We do not support strong UC */ 219 /* We do not support strong UC */
218 WARN_ON_ONCE(1); 220 WARN_ON_ONCE(1);
219 req_type = _PAGE_CACHE_UC_MINUS; 221 req_type = _PAGE_CACHE_MODE_UC_MINUS;
220 } 222 }
221 223
222 for (pfn = (start >> PAGE_SHIFT); pfn < (end >> PAGE_SHIFT); ++pfn) { 224 for (pfn = (start >> PAGE_SHIFT); pfn < (end >> PAGE_SHIFT); ++pfn) {
223 unsigned long type; 225 enum page_cache_mode type;
224 226
225 page = pfn_to_page(pfn); 227 page = pfn_to_page(pfn);
226 type = get_page_memtype(page); 228 type = get_page_memtype(page);
227 if (type != -1) { 229 if (type != -1) {
228 printk(KERN_INFO "reserve_ram_pages_type failed [mem %#010Lx-%#010Lx], track 0x%lx, req 0x%lx\n", 230 pr_info("reserve_ram_pages_type failed [mem %#010Lx-%#010Lx], track 0x%x, req 0x%x\n",
229 start, end - 1, type, req_type); 231 start, end - 1, type, req_type);
230 if (new_type) 232 if (new_type)
231 *new_type = type; 233 *new_type = type;
@@ -258,21 +260,21 @@ static int free_ram_pages_type(u64 start, u64 end)
258 260
259/* 261/*
260 * req_type typically has one of the: 262 * req_type typically has one of the:
261 * - _PAGE_CACHE_WB 263 * - _PAGE_CACHE_MODE_WB
262 * - _PAGE_CACHE_WC 264 * - _PAGE_CACHE_MODE_WC
263 * - _PAGE_CACHE_UC_MINUS 265 * - _PAGE_CACHE_MODE_UC_MINUS
264 * - _PAGE_CACHE_UC 266 * - _PAGE_CACHE_MODE_UC
265 * 267 *
266 * If new_type is NULL, function will return an error if it cannot reserve the 268 * If new_type is NULL, function will return an error if it cannot reserve the
267 * region with req_type. If new_type is non-NULL, function will return 269 * region with req_type. If new_type is non-NULL, function will return
268 * available type in new_type in case of no error. In case of any error 270 * available type in new_type in case of no error. In case of any error
269 * it will return a negative return value. 271 * it will return a negative return value.
270 */ 272 */
271int reserve_memtype(u64 start, u64 end, unsigned long req_type, 273int reserve_memtype(u64 start, u64 end, enum page_cache_mode req_type,
272 unsigned long *new_type) 274 enum page_cache_mode *new_type)
273{ 275{
274 struct memtype *new; 276 struct memtype *new;
275 unsigned long actual_type; 277 enum page_cache_mode actual_type;
276 int is_range_ram; 278 int is_range_ram;
277 int err = 0; 279 int err = 0;
278 280
@@ -281,10 +283,10 @@ int reserve_memtype(u64 start, u64 end, unsigned long req_type,
281 if (!pat_enabled) { 283 if (!pat_enabled) {
282 /* This is identical to page table setting without PAT */ 284 /* This is identical to page table setting without PAT */
283 if (new_type) { 285 if (new_type) {
284 if (req_type == _PAGE_CACHE_WC) 286 if (req_type == _PAGE_CACHE_MODE_WC)
285 *new_type = _PAGE_CACHE_UC_MINUS; 287 *new_type = _PAGE_CACHE_MODE_UC_MINUS;
286 else 288 else
287 *new_type = req_type & _PAGE_CACHE_MASK; 289 *new_type = req_type;
288 } 290 }
289 return 0; 291 return 0;
290 } 292 }
@@ -292,7 +294,7 @@ int reserve_memtype(u64 start, u64 end, unsigned long req_type,
292 /* Low ISA region is always mapped WB in page table. No need to track */ 294 /* Low ISA region is always mapped WB in page table. No need to track */
293 if (x86_platform.is_untracked_pat_range(start, end)) { 295 if (x86_platform.is_untracked_pat_range(start, end)) {
294 if (new_type) 296 if (new_type)
295 *new_type = _PAGE_CACHE_WB; 297 *new_type = _PAGE_CACHE_MODE_WB;
296 return 0; 298 return 0;
297 } 299 }
298 300
@@ -302,7 +304,7 @@ int reserve_memtype(u64 start, u64 end, unsigned long req_type,
302 * tools and ACPI tools). Use WB request for WB memory and use 304 * tools and ACPI tools). Use WB request for WB memory and use
303 * UC_MINUS otherwise. 305 * UC_MINUS otherwise.
304 */ 306 */
305 actual_type = pat_x_mtrr_type(start, end, req_type & _PAGE_CACHE_MASK); 307 actual_type = pat_x_mtrr_type(start, end, req_type);
306 308
307 if (new_type) 309 if (new_type)
308 *new_type = actual_type; 310 *new_type = actual_type;
@@ -408,7 +410,7 @@ static enum page_cache_mode lookup_memtype(u64 paddr)
408 if (pat_pagerange_is_ram(paddr, paddr + PAGE_SIZE)) { 410 if (pat_pagerange_is_ram(paddr, paddr + PAGE_SIZE)) {
409 struct page *page; 411 struct page *page;
410 page = pfn_to_page(paddr >> PAGE_SHIFT); 412 page = pfn_to_page(paddr >> PAGE_SHIFT);
411 rettype = pgprot2cachemode(__pgprot(get_page_memtype(page))); 413 rettype = get_page_memtype(page);
412 /* 414 /*
413 * -1 from get_page_memtype() implies RAM page is in its 415 * -1 from get_page_memtype() implies RAM page is in its
414 * default state and not reserved, and hence of type WB 416 * default state and not reserved, and hence of type WB
@@ -423,7 +425,7 @@ static enum page_cache_mode lookup_memtype(u64 paddr)
423 425
424 entry = rbt_memtype_lookup(paddr); 426 entry = rbt_memtype_lookup(paddr);
425 if (entry != NULL) 427 if (entry != NULL)
426 rettype = pgprot2cachemode(__pgprot(entry->type)); 428 rettype = entry->type;
427 else 429 else
428 rettype = _PAGE_CACHE_MODE_UC_MINUS; 430 rettype = _PAGE_CACHE_MODE_UC_MINUS;
429 431
@@ -447,18 +449,14 @@ int io_reserve_memtype(resource_size_t start, resource_size_t end,
447 resource_size_t size = end - start; 449 resource_size_t size = end - start;
448 enum page_cache_mode req_type = *type; 450 enum page_cache_mode req_type = *type;
449 enum page_cache_mode new_type; 451 enum page_cache_mode new_type;
450 unsigned long new_prot;
451 int ret; 452 int ret;
452 453
453 WARN_ON_ONCE(iomem_map_sanity_check(start, size)); 454 WARN_ON_ONCE(iomem_map_sanity_check(start, size));
454 455
455 ret = reserve_memtype(start, end, cachemode2protval(req_type), 456 ret = reserve_memtype(start, end, req_type, &new_type);
456 &new_prot);
457 if (ret) 457 if (ret)
458 goto out_err; 458 goto out_err;
459 459
460 new_type = pgprot2cachemode(__pgprot(new_prot));
461
462 if (!is_new_memtype_allowed(start, size, req_type, new_type)) 460 if (!is_new_memtype_allowed(start, size, req_type, new_type))
463 goto out_free; 461 goto out_free;
464 462
@@ -524,13 +522,13 @@ static inline int range_is_allowed(unsigned long pfn, unsigned long size)
524int phys_mem_access_prot_allowed(struct file *file, unsigned long pfn, 522int phys_mem_access_prot_allowed(struct file *file, unsigned long pfn,
525 unsigned long size, pgprot_t *vma_prot) 523 unsigned long size, pgprot_t *vma_prot)
526{ 524{
527 unsigned long flags = _PAGE_CACHE_WB; 525 enum page_cache_mode pcm = _PAGE_CACHE_MODE_WB;
528 526
529 if (!range_is_allowed(pfn, size)) 527 if (!range_is_allowed(pfn, size))
530 return 0; 528 return 0;
531 529
532 if (file->f_flags & O_DSYNC) 530 if (file->f_flags & O_DSYNC)
533 flags = _PAGE_CACHE_UC_MINUS; 531 pcm = _PAGE_CACHE_MODE_UC_MINUS;
534 532
535#ifdef CONFIG_X86_32 533#ifdef CONFIG_X86_32
536 /* 534 /*
@@ -547,12 +545,12 @@ int phys_mem_access_prot_allowed(struct file *file, unsigned long pfn,
547 boot_cpu_has(X86_FEATURE_CYRIX_ARR) || 545 boot_cpu_has(X86_FEATURE_CYRIX_ARR) ||
548 boot_cpu_has(X86_FEATURE_CENTAUR_MCR)) && 546 boot_cpu_has(X86_FEATURE_CENTAUR_MCR)) &&
549 (pfn << PAGE_SHIFT) >= __pa(high_memory)) { 547 (pfn << PAGE_SHIFT) >= __pa(high_memory)) {
550 flags = _PAGE_CACHE_UC; 548 pcm = _PAGE_CACHE_MODE_UC;
551 } 549 }
552#endif 550#endif
553 551
554 *vma_prot = __pgprot((pgprot_val(*vma_prot) & ~_PAGE_CACHE_MASK) | 552 *vma_prot = __pgprot((pgprot_val(*vma_prot) & ~_PAGE_CACHE_MASK) |
555 flags); 553 cachemode2protval(pcm));
556 return 1; 554 return 1;
557} 555}
558 556
@@ -583,7 +581,7 @@ int kernel_map_sync_memtype(u64 base, unsigned long size,
583 printk(KERN_INFO "%s:%d ioremap_change_attr failed %s " 581 printk(KERN_INFO "%s:%d ioremap_change_attr failed %s "
584 "for [mem %#010Lx-%#010Lx]\n", 582 "for [mem %#010Lx-%#010Lx]\n",
585 current->comm, current->pid, 583 current->comm, current->pid,
586 cattr_name(cachemode2protval(pcm)), 584 cattr_name(pcm),
587 base, (unsigned long long)(base + size-1)); 585 base, (unsigned long long)(base + size-1));
588 return -EINVAL; 586 return -EINVAL;
589 } 587 }
@@ -600,8 +598,8 @@ static int reserve_pfn_range(u64 paddr, unsigned long size, pgprot_t *vma_prot,
600{ 598{
601 int is_ram = 0; 599 int is_ram = 0;
602 int ret; 600 int ret;
603 unsigned long want_flags = (pgprot_val(*vma_prot) & _PAGE_CACHE_MASK); 601 enum page_cache_mode want_pcm = pgprot2cachemode(*vma_prot);
604 unsigned long flags = want_flags; 602 enum page_cache_mode pcm = want_pcm;
605 603
606 is_ram = pat_pagerange_is_ram(paddr, paddr + size); 604 is_ram = pat_pagerange_is_ram(paddr, paddr + size);
607 605
@@ -614,38 +612,36 @@ static int reserve_pfn_range(u64 paddr, unsigned long size, pgprot_t *vma_prot,
614 if (!pat_enabled) 612 if (!pat_enabled)
615 return 0; 613 return 0;
616 614
617 flags = cachemode2protval(lookup_memtype(paddr)); 615 pcm = lookup_memtype(paddr);
618 if (want_flags != flags) { 616 if (want_pcm != pcm) {
619 printk(KERN_WARNING "%s:%d map pfn RAM range req %s for [mem %#010Lx-%#010Lx], got %s\n", 617 printk(KERN_WARNING "%s:%d map pfn RAM range req %s for [mem %#010Lx-%#010Lx], got %s\n",
620 current->comm, current->pid, 618 current->comm, current->pid,
621 cattr_name(want_flags), 619 cattr_name(want_pcm),
622 (unsigned long long)paddr, 620 (unsigned long long)paddr,
623 (unsigned long long)(paddr + size - 1), 621 (unsigned long long)(paddr + size - 1),
624 cattr_name(flags)); 622 cattr_name(pcm));
625 *vma_prot = __pgprot((pgprot_val(*vma_prot) & 623 *vma_prot = __pgprot((pgprot_val(*vma_prot) &
626 (~_PAGE_CACHE_MASK)) | 624 (~_PAGE_CACHE_MASK)) |
627 flags); 625 cachemode2protval(pcm));
628 } 626 }
629 return 0; 627 return 0;
630 } 628 }
631 629
632 ret = reserve_memtype(paddr, paddr + size, want_flags, &flags); 630 ret = reserve_memtype(paddr, paddr + size, want_pcm, &pcm);
633 if (ret) 631 if (ret)
634 return ret; 632 return ret;
635 633
636 if (flags != want_flags) { 634 if (pcm != want_pcm) {
637 if (strict_prot || 635 if (strict_prot ||
638 !is_new_memtype_allowed(paddr, size, 636 !is_new_memtype_allowed(paddr, size, want_pcm, pcm)) {
639 pgprot2cachemode(__pgprot(want_flags)),
640 pgprot2cachemode(__pgprot(flags)))) {
641 free_memtype(paddr, paddr + size); 637 free_memtype(paddr, paddr + size);
642 printk(KERN_ERR "%s:%d map pfn expected mapping type %s" 638 printk(KERN_ERR "%s:%d map pfn expected mapping type %s"
643 " for [mem %#010Lx-%#010Lx], got %s\n", 639 " for [mem %#010Lx-%#010Lx], got %s\n",
644 current->comm, current->pid, 640 current->comm, current->pid,
645 cattr_name(want_flags), 641 cattr_name(want_pcm),
646 (unsigned long long)paddr, 642 (unsigned long long)paddr,
647 (unsigned long long)(paddr + size - 1), 643 (unsigned long long)(paddr + size - 1),
648 cattr_name(flags)); 644 cattr_name(pcm));
649 return -EINVAL; 645 return -EINVAL;
650 } 646 }
651 /* 647 /*
@@ -654,11 +650,10 @@ static int reserve_pfn_range(u64 paddr, unsigned long size, pgprot_t *vma_prot,
654 */ 650 */
655 *vma_prot = __pgprot((pgprot_val(*vma_prot) & 651 *vma_prot = __pgprot((pgprot_val(*vma_prot) &
656 (~_PAGE_CACHE_MASK)) | 652 (~_PAGE_CACHE_MASK)) |
657 flags); 653 cachemode2protval(pcm));
658 } 654 }
659 655
660 if (kernel_map_sync_memtype(paddr, size, 656 if (kernel_map_sync_memtype(paddr, size, pcm) < 0) {
661 pgprot2cachemode(__pgprot(flags))) < 0) {
662 free_memtype(paddr, paddr + size); 657 free_memtype(paddr, paddr + size);
663 return -EINVAL; 658 return -EINVAL;
664 } 659 }
@@ -799,7 +794,8 @@ void untrack_pfn(struct vm_area_struct *vma, unsigned long pfn,
799pgprot_t pgprot_writecombine(pgprot_t prot) 794pgprot_t pgprot_writecombine(pgprot_t prot)
800{ 795{
801 if (pat_enabled) 796 if (pat_enabled)
802 return __pgprot(pgprot_val(prot) | _PAGE_CACHE_WC); 797 return __pgprot(pgprot_val(prot) |
798 cachemode2protval(_PAGE_CACHE_MODE_WC));
803 else 799 else
804 return pgprot_noncached(prot); 800 return pgprot_noncached(prot);
805} 801}
diff --git a/arch/x86/mm/pat_internal.h b/arch/x86/mm/pat_internal.h
index 77e5ba153fac..f6411620305d 100644
--- a/arch/x86/mm/pat_internal.h
+++ b/arch/x86/mm/pat_internal.h
@@ -10,30 +10,32 @@ struct memtype {
10 u64 start; 10 u64 start;
11 u64 end; 11 u64 end;
12 u64 subtree_max_end; 12 u64 subtree_max_end;
13 unsigned long type; 13 enum page_cache_mode type;
14 struct rb_node rb; 14 struct rb_node rb;
15}; 15};
16 16
17static inline char *cattr_name(unsigned long flags) 17static inline char *cattr_name(enum page_cache_mode pcm)
18{ 18{
19 switch (flags & _PAGE_CACHE_MASK) { 19 switch (pcm) {
20 case _PAGE_CACHE_UC: return "uncached"; 20 case _PAGE_CACHE_MODE_UC: return "uncached";
21 case _PAGE_CACHE_UC_MINUS: return "uncached-minus"; 21 case _PAGE_CACHE_MODE_UC_MINUS: return "uncached-minus";
22 case _PAGE_CACHE_WB: return "write-back"; 22 case _PAGE_CACHE_MODE_WB: return "write-back";
23 case _PAGE_CACHE_WC: return "write-combining"; 23 case _PAGE_CACHE_MODE_WC: return "write-combining";
24 default: return "broken"; 24 case _PAGE_CACHE_MODE_WT: return "write-through";
25 case _PAGE_CACHE_MODE_WP: return "write-protected";
26 default: return "broken";
25 } 27 }
26} 28}
27 29
28#ifdef CONFIG_X86_PAT 30#ifdef CONFIG_X86_PAT
29extern int rbt_memtype_check_insert(struct memtype *new, 31extern int rbt_memtype_check_insert(struct memtype *new,
30 unsigned long *new_type); 32 enum page_cache_mode *new_type);
31extern struct memtype *rbt_memtype_erase(u64 start, u64 end); 33extern struct memtype *rbt_memtype_erase(u64 start, u64 end);
32extern struct memtype *rbt_memtype_lookup(u64 addr); 34extern struct memtype *rbt_memtype_lookup(u64 addr);
33extern int rbt_memtype_copy_nth_element(struct memtype *out, loff_t pos); 35extern int rbt_memtype_copy_nth_element(struct memtype *out, loff_t pos);
34#else 36#else
35static inline int rbt_memtype_check_insert(struct memtype *new, 37static inline int rbt_memtype_check_insert(struct memtype *new,
36 unsigned long *new_type) 38 enum page_cache_mode *new_type)
37{ return 0; } 39{ return 0; }
38static inline struct memtype *rbt_memtype_erase(u64 start, u64 end) 40static inline struct memtype *rbt_memtype_erase(u64 start, u64 end)
39{ return NULL; } 41{ return NULL; }
diff --git a/arch/x86/mm/pat_rbtree.c b/arch/x86/mm/pat_rbtree.c
index 415f6c4ced36..6582adcc8bd9 100644
--- a/arch/x86/mm/pat_rbtree.c
+++ b/arch/x86/mm/pat_rbtree.c
@@ -122,11 +122,12 @@ static struct memtype *memtype_rb_exact_match(struct rb_root *root,
122 122
123static int memtype_rb_check_conflict(struct rb_root *root, 123static int memtype_rb_check_conflict(struct rb_root *root,
124 u64 start, u64 end, 124 u64 start, u64 end,
125 unsigned long reqtype, unsigned long *newtype) 125 enum page_cache_mode reqtype,
126 enum page_cache_mode *newtype)
126{ 127{
127 struct rb_node *node; 128 struct rb_node *node;
128 struct memtype *match; 129 struct memtype *match;
129 int found_type = reqtype; 130 enum page_cache_mode found_type = reqtype;
130 131
131 match = memtype_rb_lowest_match(&memtype_rbroot, start, end); 132 match = memtype_rb_lowest_match(&memtype_rbroot, start, end);
132 if (match == NULL) 133 if (match == NULL)
@@ -187,7 +188,8 @@ static void memtype_rb_insert(struct rb_root *root, struct memtype *newdata)
187 rb_insert_augmented(&newdata->rb, root, &memtype_rb_augment_cb); 188 rb_insert_augmented(&newdata->rb, root, &memtype_rb_augment_cb);
188} 189}
189 190
190int rbt_memtype_check_insert(struct memtype *new, unsigned long *ret_type) 191int rbt_memtype_check_insert(struct memtype *new,
192 enum page_cache_mode *ret_type)
191{ 193{
192 int err = 0; 194 int err = 0;
193 195