aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/mm
diff options
context:
space:
mode:
authorJuergen Gross <jgross@suse.com>2014-11-03 08:01:54 -0500
committerThomas Gleixner <tglx@linutronix.de>2014-11-16 05:04:25 -0500
commit49a3b3cbdf1621678a39bd95a3e67c0f858539c7 (patch)
treefb59b9624d615faf4a3e32dec9bde28d53863a4c /arch/x86/mm
parentd85f33342a0f57acfbe078cdd0c4f590d5608bb7 (diff)
x86: Use new cache mode type in mm/iomap_32.c
Instead of directly using the cache mode bits in the pte switch to using the cache mode type. This requires to change io_reserve_memtype() as well. Based-on-patch-by: Stefan Bader <stefan.bader@canonical.com> Signed-off-by: Juergen Gross <jgross@suse.com> Reviewed-by: Thomas Gleixner <tglx@linutronix.de> Cc: stefan.bader@canonical.com Cc: xen-devel@lists.xensource.com Cc: konrad.wilk@oracle.com Cc: ville.syrjala@linux.intel.com Cc: david.vrabel@citrix.com Cc: jbeulich@suse.com Cc: toshi.kani@hp.com Cc: plagnioj@jcrosoft.com Cc: tomi.valkeinen@ti.com Cc: bhelgaas@google.com Link: http://lkml.kernel.org/r/1415019724-4317-9-git-send-email-jgross@suse.com Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Diffstat (limited to 'arch/x86/mm')
-rw-r--r--arch/x86/mm/iomap_32.c12
-rw-r--r--arch/x86/mm/pat.c18
2 files changed, 17 insertions, 13 deletions
diff --git a/arch/x86/mm/iomap_32.c b/arch/x86/mm/iomap_32.c
index 7b179b499fa3..9ca35fc60cfe 100644
--- a/arch/x86/mm/iomap_32.c
+++ b/arch/x86/mm/iomap_32.c
@@ -33,17 +33,17 @@ static int is_io_mapping_possible(resource_size_t base, unsigned long size)
33 33
34int iomap_create_wc(resource_size_t base, unsigned long size, pgprot_t *prot) 34int iomap_create_wc(resource_size_t base, unsigned long size, pgprot_t *prot)
35{ 35{
36 unsigned long flag = _PAGE_CACHE_WC; 36 enum page_cache_mode pcm = _PAGE_CACHE_MODE_WC;
37 int ret; 37 int ret;
38 38
39 if (!is_io_mapping_possible(base, size)) 39 if (!is_io_mapping_possible(base, size))
40 return -EINVAL; 40 return -EINVAL;
41 41
42 ret = io_reserve_memtype(base, base + size, &flag); 42 ret = io_reserve_memtype(base, base + size, &pcm);
43 if (ret) 43 if (ret)
44 return ret; 44 return ret;
45 45
46 *prot = __pgprot(__PAGE_KERNEL | flag); 46 *prot = __pgprot(__PAGE_KERNEL | cachemode2protval(pcm));
47 return 0; 47 return 0;
48} 48}
49EXPORT_SYMBOL_GPL(iomap_create_wc); 49EXPORT_SYMBOL_GPL(iomap_create_wc);
@@ -82,8 +82,10 @@ iomap_atomic_prot_pfn(unsigned long pfn, pgprot_t prot)
82 * MTRR is UC or WC. UC_MINUS gets the real intention, of the 82 * MTRR is UC or WC. UC_MINUS gets the real intention, of the
83 * user, which is "WC if the MTRR is WC, UC if you can't do that." 83 * user, which is "WC if the MTRR is WC, UC if you can't do that."
84 */ 84 */
85 if (!pat_enabled && pgprot_val(prot) == pgprot_val(PAGE_KERNEL_WC)) 85 if (!pat_enabled && pgprot_val(prot) ==
86 prot = PAGE_KERNEL_UC_MINUS; 86 (__PAGE_KERNEL | cachemode2protval(_PAGE_CACHE_MODE_WC)))
87 prot = __pgprot(__PAGE_KERNEL |
88 cachemode2protval(_PAGE_CACHE_MODE_UC_MINUS));
87 89
88 return (void __force __iomem *) kmap_atomic_prot_pfn(pfn, prot); 90 return (void __force __iomem *) kmap_atomic_prot_pfn(pfn, prot);
89} 91}
diff --git a/arch/x86/mm/pat.c b/arch/x86/mm/pat.c
index 47282c273647..6d5a8e3ef63d 100644
--- a/arch/x86/mm/pat.c
+++ b/arch/x86/mm/pat.c
@@ -442,25 +442,27 @@ static unsigned long lookup_memtype(u64 paddr)
442 * On failure, returns non-zero 442 * On failure, returns non-zero
443 */ 443 */
444int io_reserve_memtype(resource_size_t start, resource_size_t end, 444int io_reserve_memtype(resource_size_t start, resource_size_t end,
445 unsigned long *type) 445 enum page_cache_mode *type)
446{ 446{
447 resource_size_t size = end - start; 447 resource_size_t size = end - start;
448 unsigned long req_type = *type; 448 enum page_cache_mode req_type = *type;
449 unsigned long new_type; 449 enum page_cache_mode new_type;
450 unsigned long new_prot;
450 int ret; 451 int ret;
451 452
452 WARN_ON_ONCE(iomem_map_sanity_check(start, size)); 453 WARN_ON_ONCE(iomem_map_sanity_check(start, size));
453 454
454 ret = reserve_memtype(start, end, req_type, &new_type); 455 ret = reserve_memtype(start, end, cachemode2protval(req_type),
456 &new_prot);
455 if (ret) 457 if (ret)
456 goto out_err; 458 goto out_err;
457 459
458 if (!is_new_memtype_allowed(start, size, 460 new_type = pgprot2cachemode(__pgprot(new_prot));
459 pgprot2cachemode(__pgprot(req_type)), 461
460 pgprot2cachemode(__pgprot(new_type)))) 462 if (!is_new_memtype_allowed(start, size, req_type, new_type))
461 goto out_free; 463 goto out_free;
462 464
463 if (kernel_map_sync_memtype(start, size, new_type) < 0) 465 if (kernel_map_sync_memtype(start, size, new_prot) < 0)
464 goto out_free; 466 goto out_free;
465 467
466 *type = new_type; 468 *type = new_type;