diff options
author | Juergen Gross <jgross@suse.com> | 2014-11-03 08:01:55 -0500 |
---|---|---|
committer | Thomas Gleixner <tglx@linutronix.de> | 2014-11-16 05:04:25 -0500 |
commit | 2a3746984c98b17b565e6a2c2bbaaaef757db1b4 (patch) | |
tree | 935af5f26372b40cf5d0f026b3880ad3cd53f827 /arch/x86/mm | |
parent | 49a3b3cbdf1621678a39bd95a3e67c0f858539c7 (diff) |
x86: Use new cache mode type in track_pfn_remap() and track_pfn_insert()
Instead of directly using the cache mode bits in the pte switch to
using the cache mode type. As those are the main callers of
lookup_memtype(), change this as well.
Based-on-patch-by: Stefan Bader <stefan.bader@canonical.com>
Signed-off-by: Juergen Gross <jgross@suse.com>
Reviewed-by: Thomas Gleixner <tglx@linutronix.de>
Cc: stefan.bader@canonical.com
Cc: xen-devel@lists.xensource.com
Cc: konrad.wilk@oracle.com
Cc: ville.syrjala@linux.intel.com
Cc: david.vrabel@citrix.com
Cc: jbeulich@suse.com
Cc: toshi.kani@hp.com
Cc: plagnioj@jcrosoft.com
Cc: tomi.valkeinen@ti.com
Cc: bhelgaas@google.com
Link: http://lkml.kernel.org/r/1415019724-4317-10-git-send-email-jgross@suse.com
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Diffstat (limited to 'arch/x86/mm')
-rw-r--r-- | arch/x86/mm/pat.c | 32 |
1 files changed, 16 insertions, 16 deletions
diff --git a/arch/x86/mm/pat.c b/arch/x86/mm/pat.c index 6d5a8e3ef63d..2f3744fdc741 100644 --- a/arch/x86/mm/pat.c +++ b/arch/x86/mm/pat.c | |||
@@ -394,12 +394,12 @@ int free_memtype(u64 start, u64 end) | |||
394 | * | 394 | * |
395 | * Only to be called when PAT is enabled | 395 | * Only to be called when PAT is enabled |
396 | * | 396 | * |
397 | * Returns _PAGE_CACHE_WB, _PAGE_CACHE_WC, _PAGE_CACHE_UC_MINUS or | 397 | * Returns _PAGE_CACHE_MODE_WB, _PAGE_CACHE_MODE_WC, _PAGE_CACHE_MODE_UC_MINUS |
398 | * _PAGE_CACHE_UC | 398 | * or _PAGE_CACHE_MODE_UC |
399 | */ | 399 | */ |
400 | static unsigned long lookup_memtype(u64 paddr) | 400 | static enum page_cache_mode lookup_memtype(u64 paddr) |
401 | { | 401 | { |
402 | int rettype = _PAGE_CACHE_WB; | 402 | enum page_cache_mode rettype = _PAGE_CACHE_MODE_WB; |
403 | struct memtype *entry; | 403 | struct memtype *entry; |
404 | 404 | ||
405 | if (x86_platform.is_untracked_pat_range(paddr, paddr + PAGE_SIZE)) | 405 | if (x86_platform.is_untracked_pat_range(paddr, paddr + PAGE_SIZE)) |
@@ -408,13 +408,13 @@ static unsigned long lookup_memtype(u64 paddr) | |||
408 | if (pat_pagerange_is_ram(paddr, paddr + PAGE_SIZE)) { | 408 | if (pat_pagerange_is_ram(paddr, paddr + PAGE_SIZE)) { |
409 | struct page *page; | 409 | struct page *page; |
410 | page = pfn_to_page(paddr >> PAGE_SHIFT); | 410 | page = pfn_to_page(paddr >> PAGE_SHIFT); |
411 | rettype = get_page_memtype(page); | 411 | rettype = pgprot2cachemode(__pgprot(get_page_memtype(page))); |
412 | /* | 412 | /* |
413 | * -1 from get_page_memtype() implies RAM page is in its | 413 | * -1 from get_page_memtype() implies RAM page is in its |
414 | * default state and not reserved, and hence of type WB | 414 | * default state and not reserved, and hence of type WB |
415 | */ | 415 | */ |
416 | if (rettype == -1) | 416 | if (rettype == -1) |
417 | rettype = _PAGE_CACHE_WB; | 417 | rettype = _PAGE_CACHE_MODE_WB; |
418 | 418 | ||
419 | return rettype; | 419 | return rettype; |
420 | } | 420 | } |
@@ -423,9 +423,9 @@ static unsigned long lookup_memtype(u64 paddr) | |||
423 | 423 | ||
424 | entry = rbt_memtype_lookup(paddr); | 424 | entry = rbt_memtype_lookup(paddr); |
425 | if (entry != NULL) | 425 | if (entry != NULL) |
426 | rettype = entry->type; | 426 | rettype = pgprot2cachemode(__pgprot(entry->type)); |
427 | else | 427 | else |
428 | rettype = _PAGE_CACHE_UC_MINUS; | 428 | rettype = _PAGE_CACHE_MODE_UC_MINUS; |
429 | 429 | ||
430 | spin_unlock(&memtype_lock); | 430 | spin_unlock(&memtype_lock); |
431 | return rettype; | 431 | return rettype; |
@@ -613,7 +613,7 @@ static int reserve_pfn_range(u64 paddr, unsigned long size, pgprot_t *vma_prot, | |||
613 | if (!pat_enabled) | 613 | if (!pat_enabled) |
614 | return 0; | 614 | return 0; |
615 | 615 | ||
616 | flags = lookup_memtype(paddr); | 616 | flags = cachemode2protval(lookup_memtype(paddr)); |
617 | if (want_flags != flags) { | 617 | if (want_flags != flags) { |
618 | printk(KERN_WARNING "%s:%d map pfn RAM range req %s for [mem %#010Lx-%#010Lx], got %s\n", | 618 | printk(KERN_WARNING "%s:%d map pfn RAM range req %s for [mem %#010Lx-%#010Lx], got %s\n", |
619 | current->comm, current->pid, | 619 | current->comm, current->pid, |
@@ -715,7 +715,7 @@ int track_pfn_remap(struct vm_area_struct *vma, pgprot_t *prot, | |||
715 | unsigned long pfn, unsigned long addr, unsigned long size) | 715 | unsigned long pfn, unsigned long addr, unsigned long size) |
716 | { | 716 | { |
717 | resource_size_t paddr = (resource_size_t)pfn << PAGE_SHIFT; | 717 | resource_size_t paddr = (resource_size_t)pfn << PAGE_SHIFT; |
718 | unsigned long flags; | 718 | enum page_cache_mode pcm; |
719 | 719 | ||
720 | /* reserve the whole chunk starting from paddr */ | 720 | /* reserve the whole chunk starting from paddr */ |
721 | if (addr == vma->vm_start && size == (vma->vm_end - vma->vm_start)) { | 721 | if (addr == vma->vm_start && size == (vma->vm_end - vma->vm_start)) { |
@@ -734,18 +734,18 @@ int track_pfn_remap(struct vm_area_struct *vma, pgprot_t *prot, | |||
734 | * For anything smaller than the vma size we set prot based on the | 734 | * For anything smaller than the vma size we set prot based on the |
735 | * lookup. | 735 | * lookup. |
736 | */ | 736 | */ |
737 | flags = lookup_memtype(paddr); | 737 | pcm = lookup_memtype(paddr); |
738 | 738 | ||
739 | /* Check memtype for the remaining pages */ | 739 | /* Check memtype for the remaining pages */ |
740 | while (size > PAGE_SIZE) { | 740 | while (size > PAGE_SIZE) { |
741 | size -= PAGE_SIZE; | 741 | size -= PAGE_SIZE; |
742 | paddr += PAGE_SIZE; | 742 | paddr += PAGE_SIZE; |
743 | if (flags != lookup_memtype(paddr)) | 743 | if (pcm != lookup_memtype(paddr)) |
744 | return -EINVAL; | 744 | return -EINVAL; |
745 | } | 745 | } |
746 | 746 | ||
747 | *prot = __pgprot((pgprot_val(vma->vm_page_prot) & (~_PAGE_CACHE_MASK)) | | 747 | *prot = __pgprot((pgprot_val(vma->vm_page_prot) & (~_PAGE_CACHE_MASK)) | |
748 | flags); | 748 | cachemode2protval(pcm)); |
749 | 749 | ||
750 | return 0; | 750 | return 0; |
751 | } | 751 | } |
@@ -753,15 +753,15 @@ int track_pfn_remap(struct vm_area_struct *vma, pgprot_t *prot, | |||
753 | int track_pfn_insert(struct vm_area_struct *vma, pgprot_t *prot, | 753 | int track_pfn_insert(struct vm_area_struct *vma, pgprot_t *prot, |
754 | unsigned long pfn) | 754 | unsigned long pfn) |
755 | { | 755 | { |
756 | unsigned long flags; | 756 | enum page_cache_mode pcm; |
757 | 757 | ||
758 | if (!pat_enabled) | 758 | if (!pat_enabled) |
759 | return 0; | 759 | return 0; |
760 | 760 | ||
761 | /* Set prot based on lookup */ | 761 | /* Set prot based on lookup */ |
762 | flags = lookup_memtype((resource_size_t)pfn << PAGE_SHIFT); | 762 | pcm = lookup_memtype((resource_size_t)pfn << PAGE_SHIFT); |
763 | *prot = __pgprot((pgprot_val(vma->vm_page_prot) & (~_PAGE_CACHE_MASK)) | | 763 | *prot = __pgprot((pgprot_val(vma->vm_page_prot) & (~_PAGE_CACHE_MASK)) | |
764 | flags); | 764 | cachemode2protval(pcm)); |
765 | 765 | ||
766 | return 0; | 766 | return 0; |
767 | } | 767 | } |