aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/mm/pat.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/x86/mm/pat.c')
-rw-r--r--arch/x86/mm/pat.c45
1 files changed, 29 insertions, 16 deletions
diff --git a/arch/x86/mm/pat.c b/arch/x86/mm/pat.c
index ffc88cc00fda..7b61036427df 100644
--- a/arch/x86/mm/pat.c
+++ b/arch/x86/mm/pat.c
@@ -618,12 +618,13 @@ void unmap_devmem(unsigned long pfn, unsigned long size, pgprot_t vma_prot)
618 * Reserved non RAM regions only and after successful reserve_memtype, 618 * Reserved non RAM regions only and after successful reserve_memtype,
619 * this func also keeps identity mapping (if any) in sync with this new prot. 619 * this func also keeps identity mapping (if any) in sync with this new prot.
620 */ 620 */
621static int reserve_pfn_range(u64 paddr, unsigned long size, pgprot_t vma_prot) 621static int reserve_pfn_range(u64 paddr, unsigned long size, pgprot_t *vma_prot,
622 int strict_prot)
622{ 623{
623 int is_ram = 0; 624 int is_ram = 0;
624 int id_sz, ret; 625 int id_sz, ret;
625 unsigned long flags; 626 unsigned long flags;
626 unsigned long want_flags = (pgprot_val(vma_prot) & _PAGE_CACHE_MASK); 627 unsigned long want_flags = (pgprot_val(*vma_prot) & _PAGE_CACHE_MASK);
627 628
628 is_ram = pagerange_is_ram(paddr, paddr + size); 629 is_ram = pagerange_is_ram(paddr, paddr + size);
629 630
@@ -642,15 +643,24 @@ static int reserve_pfn_range(u64 paddr, unsigned long size, pgprot_t vma_prot)
642 return ret; 643 return ret;
643 644
644 if (flags != want_flags) { 645 if (flags != want_flags) {
645 free_memtype(paddr, paddr + size); 646 if (strict_prot || !is_new_memtype_allowed(want_flags, flags)) {
646 printk(KERN_ERR 647 free_memtype(paddr, paddr + size);
647 "%s:%d map pfn expected mapping type %s for %Lx-%Lx, got %s\n", 648 printk(KERN_ERR "%s:%d map pfn expected mapping type %s"
648 current->comm, current->pid, 649 " for %Lx-%Lx, got %s\n",
649 cattr_name(want_flags), 650 current->comm, current->pid,
650 (unsigned long long)paddr, 651 cattr_name(want_flags),
651 (unsigned long long)(paddr + size), 652 (unsigned long long)paddr,
652 cattr_name(flags)); 653 (unsigned long long)(paddr + size),
653 return -EINVAL; 654 cattr_name(flags));
655 return -EINVAL;
656 }
657 /*
658 * We allow returning different type than the one requested in
659 * non strict case.
660 */
661 *vma_prot = __pgprot((pgprot_val(*vma_prot) &
662 (~_PAGE_CACHE_MASK)) |
663 flags);
654 } 664 }
655 665
656 /* Need to keep identity mapping in sync */ 666 /* Need to keep identity mapping in sync */
@@ -706,6 +716,7 @@ int track_pfn_vma_copy(struct vm_area_struct *vma)
706 unsigned long vma_start = vma->vm_start; 716 unsigned long vma_start = vma->vm_start;
707 unsigned long vma_end = vma->vm_end; 717 unsigned long vma_end = vma->vm_end;
708 unsigned long vma_size = vma_end - vma_start; 718 unsigned long vma_size = vma_end - vma_start;
719 pgprot_t pgprot;
709 720
710 if (!pat_enabled) 721 if (!pat_enabled)
711 return 0; 722 return 0;
@@ -719,7 +730,8 @@ int track_pfn_vma_copy(struct vm_area_struct *vma)
719 WARN_ON_ONCE(1); 730 WARN_ON_ONCE(1);
720 return -EINVAL; 731 return -EINVAL;
721 } 732 }
722 return reserve_pfn_range(paddr, vma_size, __pgprot(prot)); 733 pgprot = __pgprot(prot);
734 return reserve_pfn_range(paddr, vma_size, &pgprot, 1);
723 } 735 }
724 736
725 /* reserve entire vma page by page, using pfn and prot from pte */ 737 /* reserve entire vma page by page, using pfn and prot from pte */
@@ -727,7 +739,8 @@ int track_pfn_vma_copy(struct vm_area_struct *vma)
727 if (follow_phys(vma, vma_start + i, 0, &prot, &paddr)) 739 if (follow_phys(vma, vma_start + i, 0, &prot, &paddr))
728 continue; 740 continue;
729 741
730 retval = reserve_pfn_range(paddr, PAGE_SIZE, __pgprot(prot)); 742 pgprot = __pgprot(prot);
743 retval = reserve_pfn_range(paddr, PAGE_SIZE, &pgprot, 1);
731 if (retval) 744 if (retval)
732 goto cleanup_ret; 745 goto cleanup_ret;
733 } 746 }
@@ -758,7 +771,7 @@ cleanup_ret:
758 * Note that this function can be called with caller trying to map only a 771 * Note that this function can be called with caller trying to map only a
759 * subrange/page inside the vma. 772 * subrange/page inside the vma.
760 */ 773 */
761int track_pfn_vma_new(struct vm_area_struct *vma, pgprot_t prot, 774int track_pfn_vma_new(struct vm_area_struct *vma, pgprot_t *prot,
762 unsigned long pfn, unsigned long size) 775 unsigned long pfn, unsigned long size)
763{ 776{
764 int retval = 0; 777 int retval = 0;
@@ -775,14 +788,14 @@ int track_pfn_vma_new(struct vm_area_struct *vma, pgprot_t prot,
775 if (is_linear_pfn_mapping(vma)) { 788 if (is_linear_pfn_mapping(vma)) {
776 /* reserve the whole chunk starting from vm_pgoff */ 789 /* reserve the whole chunk starting from vm_pgoff */
777 paddr = (resource_size_t)vma->vm_pgoff << PAGE_SHIFT; 790 paddr = (resource_size_t)vma->vm_pgoff << PAGE_SHIFT;
778 return reserve_pfn_range(paddr, vma_size, prot); 791 return reserve_pfn_range(paddr, vma_size, prot, 0);
779 } 792 }
780 793
781 /* reserve page by page using pfn and size */ 794 /* reserve page by page using pfn and size */
782 base_paddr = (resource_size_t)pfn << PAGE_SHIFT; 795 base_paddr = (resource_size_t)pfn << PAGE_SHIFT;
783 for (i = 0; i < size; i += PAGE_SIZE) { 796 for (i = 0; i < size; i += PAGE_SIZE) {
784 paddr = base_paddr + i; 797 paddr = base_paddr + i;
785 retval = reserve_pfn_range(paddr, PAGE_SIZE, prot); 798 retval = reserve_pfn_range(paddr, PAGE_SIZE, prot, 0);
786 if (retval) 799 if (retval)
787 goto cleanup_ret; 800 goto cleanup_ret;
788 } 801 }