aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/mm/pat.c
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2014-12-10 16:59:34 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2014-12-10 16:59:34 -0500
commita023748d53c10850650fe86b1c4a7d421d576451 (patch)
tree761d3f6d2a402ec0835c0ede44b7d55c1b15ec98 /arch/x86/mm/pat.c
parent773fed910d41e443e495a6bfa9ab1c2b7b13e012 (diff)
parent0dbcae884779fdf7e2239a97ac7488877f0693d9 (diff)
Merge branch 'x86-mm-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull x86 mm tree changes from Ingo Molnar: "The biggest change is full PAT support from Jürgen Gross: The x86 architecture offers via the PAT (Page Attribute Table) a way to specify different caching modes in page table entries. The PAT MSR contains 8 entries each specifying one of 6 possible cache modes. A pte references one of those entries via 3 bits: _PAGE_PAT, _PAGE_PWT and _PAGE_PCD. The Linux kernel currently supports only 4 different cache modes. The PAT MSR is set up in a way that the setting of _PAGE_PAT in a pte doesn't matter: the top 4 entries in the PAT MSR are the same as the 4 lower entries. This results in the kernel not supporting e.g. write-through mode. Especially this cache mode would speed up drivers of video cards which now have to use uncached accesses. OTOH some old processors (Pentium) don't support PAT correctly and the Xen hypervisor has been using a different PAT MSR configuration for some time now and can't change that as this setting is part of the ABI. This patch set abstracts the cache mode from the pte and introduces tables to translate between cache mode and pte bits (the default cache mode "write back" is hard-wired to PAT entry 0). The tables are statically initialized with values being compatible to old processors and current usage. As soon as the PAT MSR is changed (or - in case of Xen - is read at boot time) the tables are changed accordingly. Requests of mappings with special cache modes are always possible now, in case they are not supported there will be a fallback to a compatible but slower mode. Summing it up, this patch set adds the following features: - capability to support WT and WP cache modes on processors with full PAT support - processors with no or uncorrect PAT support are still working as today, even if WT or WP cache mode are selected by drivers for some pages - reduction of Xen special handling regarding cache mode Another change is a boot speedup on ridiculously large RAM systems, plus other smaller fixes" * 'x86-mm-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (22 commits) x86: mm: Move PAT only functions to mm/pat.c xen: Support Xen pv-domains using PAT x86: Enable PAT to use cache mode translation tables x86: Respect PAT bit when copying pte values between large and normal pages x86: Support PAT bit in pagetable dump for lower levels x86: Clean up pgtable_types.h x86: Use new cache mode type in memtype related functions x86: Use new cache mode type in mm/ioremap.c x86: Use new cache mode type in setting page attributes x86: Remove looking for setting of _PAGE_PAT_LARGE in pageattr.c x86: Use new cache mode type in track_pfn_remap() and track_pfn_insert() x86: Use new cache mode type in mm/iomap_32.c x86: Use new cache mode type in asm/pgtable.h x86: Use new cache mode type in arch/x86/mm/init_64.c x86: Use new cache mode type in arch/x86/pci x86: Use new cache mode type in drivers/video/fbdev/vermilion x86: Use new cache mode type in drivers/video/fbdev/gbefb.c x86: Use new cache mode type in include/asm/fb.h x86: Make page cache mode a real type x86: mm: Use 2GB memory block size on large-memory x86-64 systems ...
Diffstat (limited to 'arch/x86/mm/pat.c')
-rw-r--r--arch/x86/mm/pat.c245
1 files changed, 182 insertions, 63 deletions
diff --git a/arch/x86/mm/pat.c b/arch/x86/mm/pat.c
index c7eddbe6a612..edf299c8ff6c 100644
--- a/arch/x86/mm/pat.c
+++ b/arch/x86/mm/pat.c
@@ -31,6 +31,7 @@
31#include <asm/io.h> 31#include <asm/io.h>
32 32
33#include "pat_internal.h" 33#include "pat_internal.h"
34#include "mm_internal.h"
34 35
35#ifdef CONFIG_X86_PAT 36#ifdef CONFIG_X86_PAT
36int __read_mostly pat_enabled = 1; 37int __read_mostly pat_enabled = 1;
@@ -66,6 +67,75 @@ __setup("debugpat", pat_debug_setup);
66 67
67static u64 __read_mostly boot_pat_state; 68static u64 __read_mostly boot_pat_state;
68 69
70#ifdef CONFIG_X86_PAT
71/*
72 * X86 PAT uses page flags WC and Uncached together to keep track of
73 * memory type of pages that have backing page struct. X86 PAT supports 3
74 * different memory types, _PAGE_CACHE_MODE_WB, _PAGE_CACHE_MODE_WC and
75 * _PAGE_CACHE_MODE_UC_MINUS and fourth state where page's memory type has not
76 * been changed from its default (value of -1 used to denote this).
77 * Note we do not support _PAGE_CACHE_MODE_UC here.
78 */
79
80#define _PGMT_DEFAULT 0
81#define _PGMT_WC (1UL << PG_arch_1)
82#define _PGMT_UC_MINUS (1UL << PG_uncached)
83#define _PGMT_WB (1UL << PG_uncached | 1UL << PG_arch_1)
84#define _PGMT_MASK (1UL << PG_uncached | 1UL << PG_arch_1)
85#define _PGMT_CLEAR_MASK (~_PGMT_MASK)
86
87static inline enum page_cache_mode get_page_memtype(struct page *pg)
88{
89 unsigned long pg_flags = pg->flags & _PGMT_MASK;
90
91 if (pg_flags == _PGMT_DEFAULT)
92 return -1;
93 else if (pg_flags == _PGMT_WC)
94 return _PAGE_CACHE_MODE_WC;
95 else if (pg_flags == _PGMT_UC_MINUS)
96 return _PAGE_CACHE_MODE_UC_MINUS;
97 else
98 return _PAGE_CACHE_MODE_WB;
99}
100
101static inline void set_page_memtype(struct page *pg,
102 enum page_cache_mode memtype)
103{
104 unsigned long memtype_flags;
105 unsigned long old_flags;
106 unsigned long new_flags;
107
108 switch (memtype) {
109 case _PAGE_CACHE_MODE_WC:
110 memtype_flags = _PGMT_WC;
111 break;
112 case _PAGE_CACHE_MODE_UC_MINUS:
113 memtype_flags = _PGMT_UC_MINUS;
114 break;
115 case _PAGE_CACHE_MODE_WB:
116 memtype_flags = _PGMT_WB;
117 break;
118 default:
119 memtype_flags = _PGMT_DEFAULT;
120 break;
121 }
122
123 do {
124 old_flags = pg->flags;
125 new_flags = (old_flags & _PGMT_CLEAR_MASK) | memtype_flags;
126 } while (cmpxchg(&pg->flags, old_flags, new_flags) != old_flags);
127}
128#else
129static inline enum page_cache_mode get_page_memtype(struct page *pg)
130{
131 return -1;
132}
133static inline void set_page_memtype(struct page *pg,
134 enum page_cache_mode memtype)
135{
136}
137#endif
138
69enum { 139enum {
70 PAT_UC = 0, /* uncached */ 140 PAT_UC = 0, /* uncached */
71 PAT_WC = 1, /* Write combining */ 141 PAT_WC = 1, /* Write combining */
@@ -75,6 +145,52 @@ enum {
75 PAT_UC_MINUS = 7, /* UC, but can be overriden by MTRR */ 145 PAT_UC_MINUS = 7, /* UC, but can be overriden by MTRR */
76}; 146};
77 147
148#define CM(c) (_PAGE_CACHE_MODE_ ## c)
149
150static enum page_cache_mode pat_get_cache_mode(unsigned pat_val, char *msg)
151{
152 enum page_cache_mode cache;
153 char *cache_mode;
154
155 switch (pat_val) {
156 case PAT_UC: cache = CM(UC); cache_mode = "UC "; break;
157 case PAT_WC: cache = CM(WC); cache_mode = "WC "; break;
158 case PAT_WT: cache = CM(WT); cache_mode = "WT "; break;
159 case PAT_WP: cache = CM(WP); cache_mode = "WP "; break;
160 case PAT_WB: cache = CM(WB); cache_mode = "WB "; break;
161 case PAT_UC_MINUS: cache = CM(UC_MINUS); cache_mode = "UC- "; break;
162 default: cache = CM(WB); cache_mode = "WB "; break;
163 }
164
165 memcpy(msg, cache_mode, 4);
166
167 return cache;
168}
169
170#undef CM
171
172/*
173 * Update the cache mode to pgprot translation tables according to PAT
174 * configuration.
175 * Using lower indices is preferred, so we start with highest index.
176 */
177void pat_init_cache_modes(void)
178{
179 int i;
180 enum page_cache_mode cache;
181 char pat_msg[33];
182 u64 pat;
183
184 rdmsrl(MSR_IA32_CR_PAT, pat);
185 pat_msg[32] = 0;
186 for (i = 7; i >= 0; i--) {
187 cache = pat_get_cache_mode((pat >> (i * 8)) & 7,
188 pat_msg + 4 * i);
189 update_cache_mode_entry(i, cache);
190 }
191 pr_info("PAT configuration [0-7]: %s\n", pat_msg);
192}
193
78#define PAT(x, y) ((u64)PAT_ ## y << ((x)*8)) 194#define PAT(x, y) ((u64)PAT_ ## y << ((x)*8))
79 195
80void pat_init(void) 196void pat_init(void)
@@ -124,8 +240,7 @@ void pat_init(void)
124 wrmsrl(MSR_IA32_CR_PAT, pat); 240 wrmsrl(MSR_IA32_CR_PAT, pat);
125 241
126 if (boot_cpu) 242 if (boot_cpu)
127 printk(KERN_INFO "x86 PAT enabled: cpu %d, old 0x%Lx, new 0x%Lx\n", 243 pat_init_cache_modes();
128 smp_processor_id(), boot_pat_state, pat);
129} 244}
130 245
131#undef PAT 246#undef PAT
@@ -139,20 +254,21 @@ static DEFINE_SPINLOCK(memtype_lock); /* protects memtype accesses */
139 * The intersection is based on "Effective Memory Type" tables in IA-32 254 * The intersection is based on "Effective Memory Type" tables in IA-32
140 * SDM vol 3a 255 * SDM vol 3a
141 */ 256 */
142static unsigned long pat_x_mtrr_type(u64 start, u64 end, unsigned long req_type) 257static unsigned long pat_x_mtrr_type(u64 start, u64 end,
258 enum page_cache_mode req_type)
143{ 259{
144 /* 260 /*
145 * Look for MTRR hint to get the effective type in case where PAT 261 * Look for MTRR hint to get the effective type in case where PAT
146 * request is for WB. 262 * request is for WB.
147 */ 263 */
148 if (req_type == _PAGE_CACHE_WB) { 264 if (req_type == _PAGE_CACHE_MODE_WB) {
149 u8 mtrr_type; 265 u8 mtrr_type;
150 266
151 mtrr_type = mtrr_type_lookup(start, end); 267 mtrr_type = mtrr_type_lookup(start, end);
152 if (mtrr_type != MTRR_TYPE_WRBACK) 268 if (mtrr_type != MTRR_TYPE_WRBACK)
153 return _PAGE_CACHE_UC_MINUS; 269 return _PAGE_CACHE_MODE_UC_MINUS;
154 270
155 return _PAGE_CACHE_WB; 271 return _PAGE_CACHE_MODE_WB;
156 } 272 }
157 273
158 return req_type; 274 return req_type;
@@ -207,25 +323,26 @@ static int pat_pagerange_is_ram(resource_size_t start, resource_size_t end)
207 * - Find the memtype of all the pages in the range, look for any conflicts 323 * - Find the memtype of all the pages in the range, look for any conflicts
208 * - In case of no conflicts, set the new memtype for pages in the range 324 * - In case of no conflicts, set the new memtype for pages in the range
209 */ 325 */
210static int reserve_ram_pages_type(u64 start, u64 end, unsigned long req_type, 326static int reserve_ram_pages_type(u64 start, u64 end,
211 unsigned long *new_type) 327 enum page_cache_mode req_type,
328 enum page_cache_mode *new_type)
212{ 329{
213 struct page *page; 330 struct page *page;
214 u64 pfn; 331 u64 pfn;
215 332
216 if (req_type == _PAGE_CACHE_UC) { 333 if (req_type == _PAGE_CACHE_MODE_UC) {
217 /* We do not support strong UC */ 334 /* We do not support strong UC */
218 WARN_ON_ONCE(1); 335 WARN_ON_ONCE(1);
219 req_type = _PAGE_CACHE_UC_MINUS; 336 req_type = _PAGE_CACHE_MODE_UC_MINUS;
220 } 337 }
221 338
222 for (pfn = (start >> PAGE_SHIFT); pfn < (end >> PAGE_SHIFT); ++pfn) { 339 for (pfn = (start >> PAGE_SHIFT); pfn < (end >> PAGE_SHIFT); ++pfn) {
223 unsigned long type; 340 enum page_cache_mode type;
224 341
225 page = pfn_to_page(pfn); 342 page = pfn_to_page(pfn);
226 type = get_page_memtype(page); 343 type = get_page_memtype(page);
227 if (type != -1) { 344 if (type != -1) {
228 printk(KERN_INFO "reserve_ram_pages_type failed [mem %#010Lx-%#010Lx], track 0x%lx, req 0x%lx\n", 345 pr_info("reserve_ram_pages_type failed [mem %#010Lx-%#010Lx], track 0x%x, req 0x%x\n",
229 start, end - 1, type, req_type); 346 start, end - 1, type, req_type);
230 if (new_type) 347 if (new_type)
231 *new_type = type; 348 *new_type = type;
@@ -258,21 +375,21 @@ static int free_ram_pages_type(u64 start, u64 end)
258 375
259/* 376/*
260 * req_type typically has one of the: 377 * req_type typically has one of the:
261 * - _PAGE_CACHE_WB 378 * - _PAGE_CACHE_MODE_WB
262 * - _PAGE_CACHE_WC 379 * - _PAGE_CACHE_MODE_WC
263 * - _PAGE_CACHE_UC_MINUS 380 * - _PAGE_CACHE_MODE_UC_MINUS
264 * - _PAGE_CACHE_UC 381 * - _PAGE_CACHE_MODE_UC
265 * 382 *
266 * If new_type is NULL, function will return an error if it cannot reserve the 383 * If new_type is NULL, function will return an error if it cannot reserve the
267 * region with req_type. If new_type is non-NULL, function will return 384 * region with req_type. If new_type is non-NULL, function will return
268 * available type in new_type in case of no error. In case of any error 385 * available type in new_type in case of no error. In case of any error
269 * it will return a negative return value. 386 * it will return a negative return value.
270 */ 387 */
271int reserve_memtype(u64 start, u64 end, unsigned long req_type, 388int reserve_memtype(u64 start, u64 end, enum page_cache_mode req_type,
272 unsigned long *new_type) 389 enum page_cache_mode *new_type)
273{ 390{
274 struct memtype *new; 391 struct memtype *new;
275 unsigned long actual_type; 392 enum page_cache_mode actual_type;
276 int is_range_ram; 393 int is_range_ram;
277 int err = 0; 394 int err = 0;
278 395
@@ -281,10 +398,10 @@ int reserve_memtype(u64 start, u64 end, unsigned long req_type,
281 if (!pat_enabled) { 398 if (!pat_enabled) {
282 /* This is identical to page table setting without PAT */ 399 /* This is identical to page table setting without PAT */
283 if (new_type) { 400 if (new_type) {
284 if (req_type == _PAGE_CACHE_WC) 401 if (req_type == _PAGE_CACHE_MODE_WC)
285 *new_type = _PAGE_CACHE_UC_MINUS; 402 *new_type = _PAGE_CACHE_MODE_UC_MINUS;
286 else 403 else
287 *new_type = req_type & _PAGE_CACHE_MASK; 404 *new_type = req_type;
288 } 405 }
289 return 0; 406 return 0;
290 } 407 }
@@ -292,7 +409,7 @@ int reserve_memtype(u64 start, u64 end, unsigned long req_type,
292 /* Low ISA region is always mapped WB in page table. No need to track */ 409 /* Low ISA region is always mapped WB in page table. No need to track */
293 if (x86_platform.is_untracked_pat_range(start, end)) { 410 if (x86_platform.is_untracked_pat_range(start, end)) {
294 if (new_type) 411 if (new_type)
295 *new_type = _PAGE_CACHE_WB; 412 *new_type = _PAGE_CACHE_MODE_WB;
296 return 0; 413 return 0;
297 } 414 }
298 415
@@ -302,7 +419,7 @@ int reserve_memtype(u64 start, u64 end, unsigned long req_type,
302 * tools and ACPI tools). Use WB request for WB memory and use 419 * tools and ACPI tools). Use WB request for WB memory and use
303 * UC_MINUS otherwise. 420 * UC_MINUS otherwise.
304 */ 421 */
305 actual_type = pat_x_mtrr_type(start, end, req_type & _PAGE_CACHE_MASK); 422 actual_type = pat_x_mtrr_type(start, end, req_type);
306 423
307 if (new_type) 424 if (new_type)
308 *new_type = actual_type; 425 *new_type = actual_type;
@@ -394,12 +511,12 @@ int free_memtype(u64 start, u64 end)
394 * 511 *
395 * Only to be called when PAT is enabled 512 * Only to be called when PAT is enabled
396 * 513 *
397 * Returns _PAGE_CACHE_WB, _PAGE_CACHE_WC, _PAGE_CACHE_UC_MINUS or 514 * Returns _PAGE_CACHE_MODE_WB, _PAGE_CACHE_MODE_WC, _PAGE_CACHE_MODE_UC_MINUS
398 * _PAGE_CACHE_UC 515 * or _PAGE_CACHE_MODE_UC
399 */ 516 */
400static unsigned long lookup_memtype(u64 paddr) 517static enum page_cache_mode lookup_memtype(u64 paddr)
401{ 518{
402 int rettype = _PAGE_CACHE_WB; 519 enum page_cache_mode rettype = _PAGE_CACHE_MODE_WB;
403 struct memtype *entry; 520 struct memtype *entry;
404 521
405 if (x86_platform.is_untracked_pat_range(paddr, paddr + PAGE_SIZE)) 522 if (x86_platform.is_untracked_pat_range(paddr, paddr + PAGE_SIZE))
@@ -414,7 +531,7 @@ static unsigned long lookup_memtype(u64 paddr)
414 * default state and not reserved, and hence of type WB 531 * default state and not reserved, and hence of type WB
415 */ 532 */
416 if (rettype == -1) 533 if (rettype == -1)
417 rettype = _PAGE_CACHE_WB; 534 rettype = _PAGE_CACHE_MODE_WB;
418 535
419 return rettype; 536 return rettype;
420 } 537 }
@@ -425,7 +542,7 @@ static unsigned long lookup_memtype(u64 paddr)
425 if (entry != NULL) 542 if (entry != NULL)
426 rettype = entry->type; 543 rettype = entry->type;
427 else 544 else
428 rettype = _PAGE_CACHE_UC_MINUS; 545 rettype = _PAGE_CACHE_MODE_UC_MINUS;
429 546
430 spin_unlock(&memtype_lock); 547 spin_unlock(&memtype_lock);
431 return rettype; 548 return rettype;
@@ -442,11 +559,11 @@ static unsigned long lookup_memtype(u64 paddr)
442 * On failure, returns non-zero 559 * On failure, returns non-zero
443 */ 560 */
444int io_reserve_memtype(resource_size_t start, resource_size_t end, 561int io_reserve_memtype(resource_size_t start, resource_size_t end,
445 unsigned long *type) 562 enum page_cache_mode *type)
446{ 563{
447 resource_size_t size = end - start; 564 resource_size_t size = end - start;
448 unsigned long req_type = *type; 565 enum page_cache_mode req_type = *type;
449 unsigned long new_type; 566 enum page_cache_mode new_type;
450 int ret; 567 int ret;
451 568
452 WARN_ON_ONCE(iomem_map_sanity_check(start, size)); 569 WARN_ON_ONCE(iomem_map_sanity_check(start, size));
@@ -520,13 +637,13 @@ static inline int range_is_allowed(unsigned long pfn, unsigned long size)
520int phys_mem_access_prot_allowed(struct file *file, unsigned long pfn, 637int phys_mem_access_prot_allowed(struct file *file, unsigned long pfn,
521 unsigned long size, pgprot_t *vma_prot) 638 unsigned long size, pgprot_t *vma_prot)
522{ 639{
523 unsigned long flags = _PAGE_CACHE_WB; 640 enum page_cache_mode pcm = _PAGE_CACHE_MODE_WB;
524 641
525 if (!range_is_allowed(pfn, size)) 642 if (!range_is_allowed(pfn, size))
526 return 0; 643 return 0;
527 644
528 if (file->f_flags & O_DSYNC) 645 if (file->f_flags & O_DSYNC)
529 flags = _PAGE_CACHE_UC_MINUS; 646 pcm = _PAGE_CACHE_MODE_UC_MINUS;
530 647
531#ifdef CONFIG_X86_32 648#ifdef CONFIG_X86_32
532 /* 649 /*
@@ -543,12 +660,12 @@ int phys_mem_access_prot_allowed(struct file *file, unsigned long pfn,
543 boot_cpu_has(X86_FEATURE_CYRIX_ARR) || 660 boot_cpu_has(X86_FEATURE_CYRIX_ARR) ||
544 boot_cpu_has(X86_FEATURE_CENTAUR_MCR)) && 661 boot_cpu_has(X86_FEATURE_CENTAUR_MCR)) &&
545 (pfn << PAGE_SHIFT) >= __pa(high_memory)) { 662 (pfn << PAGE_SHIFT) >= __pa(high_memory)) {
546 flags = _PAGE_CACHE_UC; 663 pcm = _PAGE_CACHE_MODE_UC;
547 } 664 }
548#endif 665#endif
549 666
550 *vma_prot = __pgprot((pgprot_val(*vma_prot) & ~_PAGE_CACHE_MASK) | 667 *vma_prot = __pgprot((pgprot_val(*vma_prot) & ~_PAGE_CACHE_MASK) |
551 flags); 668 cachemode2protval(pcm));
552 return 1; 669 return 1;
553} 670}
554 671
@@ -556,7 +673,8 @@ int phys_mem_access_prot_allowed(struct file *file, unsigned long pfn,
556 * Change the memory type for the physial address range in kernel identity 673 * Change the memory type for the physial address range in kernel identity
557 * mapping space if that range is a part of identity map. 674 * mapping space if that range is a part of identity map.
558 */ 675 */
559int kernel_map_sync_memtype(u64 base, unsigned long size, unsigned long flags) 676int kernel_map_sync_memtype(u64 base, unsigned long size,
677 enum page_cache_mode pcm)
560{ 678{
561 unsigned long id_sz; 679 unsigned long id_sz;
562 680
@@ -574,11 +692,11 @@ int kernel_map_sync_memtype(u64 base, unsigned long size, unsigned long flags)
574 __pa(high_memory) - base : 692 __pa(high_memory) - base :
575 size; 693 size;
576 694
577 if (ioremap_change_attr((unsigned long)__va(base), id_sz, flags) < 0) { 695 if (ioremap_change_attr((unsigned long)__va(base), id_sz, pcm) < 0) {
578 printk(KERN_INFO "%s:%d ioremap_change_attr failed %s " 696 printk(KERN_INFO "%s:%d ioremap_change_attr failed %s "
579 "for [mem %#010Lx-%#010Lx]\n", 697 "for [mem %#010Lx-%#010Lx]\n",
580 current->comm, current->pid, 698 current->comm, current->pid,
581 cattr_name(flags), 699 cattr_name(pcm),
582 base, (unsigned long long)(base + size-1)); 700 base, (unsigned long long)(base + size-1));
583 return -EINVAL; 701 return -EINVAL;
584 } 702 }
@@ -595,8 +713,8 @@ static int reserve_pfn_range(u64 paddr, unsigned long size, pgprot_t *vma_prot,
595{ 713{
596 int is_ram = 0; 714 int is_ram = 0;
597 int ret; 715 int ret;
598 unsigned long want_flags = (pgprot_val(*vma_prot) & _PAGE_CACHE_MASK); 716 enum page_cache_mode want_pcm = pgprot2cachemode(*vma_prot);
599 unsigned long flags = want_flags; 717 enum page_cache_mode pcm = want_pcm;
600 718
601 is_ram = pat_pagerange_is_ram(paddr, paddr + size); 719 is_ram = pat_pagerange_is_ram(paddr, paddr + size);
602 720
@@ -609,36 +727,36 @@ static int reserve_pfn_range(u64 paddr, unsigned long size, pgprot_t *vma_prot,
609 if (!pat_enabled) 727 if (!pat_enabled)
610 return 0; 728 return 0;
611 729
612 flags = lookup_memtype(paddr); 730 pcm = lookup_memtype(paddr);
613 if (want_flags != flags) { 731 if (want_pcm != pcm) {
614 printk(KERN_WARNING "%s:%d map pfn RAM range req %s for [mem %#010Lx-%#010Lx], got %s\n", 732 printk(KERN_WARNING "%s:%d map pfn RAM range req %s for [mem %#010Lx-%#010Lx], got %s\n",
615 current->comm, current->pid, 733 current->comm, current->pid,
616 cattr_name(want_flags), 734 cattr_name(want_pcm),
617 (unsigned long long)paddr, 735 (unsigned long long)paddr,
618 (unsigned long long)(paddr + size - 1), 736 (unsigned long long)(paddr + size - 1),
619 cattr_name(flags)); 737 cattr_name(pcm));
620 *vma_prot = __pgprot((pgprot_val(*vma_prot) & 738 *vma_prot = __pgprot((pgprot_val(*vma_prot) &
621 (~_PAGE_CACHE_MASK)) | 739 (~_PAGE_CACHE_MASK)) |
622 flags); 740 cachemode2protval(pcm));
623 } 741 }
624 return 0; 742 return 0;
625 } 743 }
626 744
627 ret = reserve_memtype(paddr, paddr + size, want_flags, &flags); 745 ret = reserve_memtype(paddr, paddr + size, want_pcm, &pcm);
628 if (ret) 746 if (ret)
629 return ret; 747 return ret;
630 748
631 if (flags != want_flags) { 749 if (pcm != want_pcm) {
632 if (strict_prot || 750 if (strict_prot ||
633 !is_new_memtype_allowed(paddr, size, want_flags, flags)) { 751 !is_new_memtype_allowed(paddr, size, want_pcm, pcm)) {
634 free_memtype(paddr, paddr + size); 752 free_memtype(paddr, paddr + size);
635 printk(KERN_ERR "%s:%d map pfn expected mapping type %s" 753 printk(KERN_ERR "%s:%d map pfn expected mapping type %s"
636 " for [mem %#010Lx-%#010Lx], got %s\n", 754 " for [mem %#010Lx-%#010Lx], got %s\n",
637 current->comm, current->pid, 755 current->comm, current->pid,
638 cattr_name(want_flags), 756 cattr_name(want_pcm),
639 (unsigned long long)paddr, 757 (unsigned long long)paddr,
640 (unsigned long long)(paddr + size - 1), 758 (unsigned long long)(paddr + size - 1),
641 cattr_name(flags)); 759 cattr_name(pcm));
642 return -EINVAL; 760 return -EINVAL;
643 } 761 }
644 /* 762 /*
@@ -647,10 +765,10 @@ static int reserve_pfn_range(u64 paddr, unsigned long size, pgprot_t *vma_prot,
647 */ 765 */
648 *vma_prot = __pgprot((pgprot_val(*vma_prot) & 766 *vma_prot = __pgprot((pgprot_val(*vma_prot) &
649 (~_PAGE_CACHE_MASK)) | 767 (~_PAGE_CACHE_MASK)) |
650 flags); 768 cachemode2protval(pcm));
651 } 769 }
652 770
653 if (kernel_map_sync_memtype(paddr, size, flags) < 0) { 771 if (kernel_map_sync_memtype(paddr, size, pcm) < 0) {
654 free_memtype(paddr, paddr + size); 772 free_memtype(paddr, paddr + size);
655 return -EINVAL; 773 return -EINVAL;
656 } 774 }
@@ -709,7 +827,7 @@ int track_pfn_remap(struct vm_area_struct *vma, pgprot_t *prot,
709 unsigned long pfn, unsigned long addr, unsigned long size) 827 unsigned long pfn, unsigned long addr, unsigned long size)
710{ 828{
711 resource_size_t paddr = (resource_size_t)pfn << PAGE_SHIFT; 829 resource_size_t paddr = (resource_size_t)pfn << PAGE_SHIFT;
712 unsigned long flags; 830 enum page_cache_mode pcm;
713 831
714 /* reserve the whole chunk starting from paddr */ 832 /* reserve the whole chunk starting from paddr */
715 if (addr == vma->vm_start && size == (vma->vm_end - vma->vm_start)) { 833 if (addr == vma->vm_start && size == (vma->vm_end - vma->vm_start)) {
@@ -728,18 +846,18 @@ int track_pfn_remap(struct vm_area_struct *vma, pgprot_t *prot,
728 * For anything smaller than the vma size we set prot based on the 846 * For anything smaller than the vma size we set prot based on the
729 * lookup. 847 * lookup.
730 */ 848 */
731 flags = lookup_memtype(paddr); 849 pcm = lookup_memtype(paddr);
732 850
733 /* Check memtype for the remaining pages */ 851 /* Check memtype for the remaining pages */
734 while (size > PAGE_SIZE) { 852 while (size > PAGE_SIZE) {
735 size -= PAGE_SIZE; 853 size -= PAGE_SIZE;
736 paddr += PAGE_SIZE; 854 paddr += PAGE_SIZE;
737 if (flags != lookup_memtype(paddr)) 855 if (pcm != lookup_memtype(paddr))
738 return -EINVAL; 856 return -EINVAL;
739 } 857 }
740 858
741 *prot = __pgprot((pgprot_val(vma->vm_page_prot) & (~_PAGE_CACHE_MASK)) | 859 *prot = __pgprot((pgprot_val(vma->vm_page_prot) & (~_PAGE_CACHE_MASK)) |
742 flags); 860 cachemode2protval(pcm));
743 861
744 return 0; 862 return 0;
745} 863}
@@ -747,15 +865,15 @@ int track_pfn_remap(struct vm_area_struct *vma, pgprot_t *prot,
747int track_pfn_insert(struct vm_area_struct *vma, pgprot_t *prot, 865int track_pfn_insert(struct vm_area_struct *vma, pgprot_t *prot,
748 unsigned long pfn) 866 unsigned long pfn)
749{ 867{
750 unsigned long flags; 868 enum page_cache_mode pcm;
751 869
752 if (!pat_enabled) 870 if (!pat_enabled)
753 return 0; 871 return 0;
754 872
755 /* Set prot based on lookup */ 873 /* Set prot based on lookup */
756 flags = lookup_memtype((resource_size_t)pfn << PAGE_SHIFT); 874 pcm = lookup_memtype((resource_size_t)pfn << PAGE_SHIFT);
757 *prot = __pgprot((pgprot_val(vma->vm_page_prot) & (~_PAGE_CACHE_MASK)) | 875 *prot = __pgprot((pgprot_val(vma->vm_page_prot) & (~_PAGE_CACHE_MASK)) |
758 flags); 876 cachemode2protval(pcm));
759 877
760 return 0; 878 return 0;
761} 879}
@@ -791,7 +909,8 @@ void untrack_pfn(struct vm_area_struct *vma, unsigned long pfn,
791pgprot_t pgprot_writecombine(pgprot_t prot) 909pgprot_t pgprot_writecombine(pgprot_t prot)
792{ 910{
793 if (pat_enabled) 911 if (pat_enabled)
794 return __pgprot(pgprot_val(prot) | _PAGE_CACHE_WC); 912 return __pgprot(pgprot_val(prot) |
913 cachemode2protval(_PAGE_CACHE_MODE_WC));
795 else 914 else
796 return pgprot_noncached(prot); 915 return pgprot_noncached(prot);
797} 916}