aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/mm
diff options
context:
space:
mode:
authorvenkatesh.pallipadi@intel.com <venkatesh.pallipadi@intel.com>2009-01-09 19:13:12 -0500
committerIngo Molnar <mingo@elte.hu>2009-01-13 13:13:02 -0500
commitcdecff6864a1cd352a41d44a65e7451b8ef5cee2 (patch)
tree1e0e2ab289e757cbd34e6b961cd1b2088e62f3e7 /arch/x86/mm
parente4b866ed197cef9989348e0479fed8d864ea465b (diff)
x86 PAT: return compatible mapping to remap_pfn_range callers
Impact: avoid warning message, potentially solve 3D performance regression Change x86 PAT code to return compatible memtype if the exact memtype that was requested in remap_pfn_rage and friends is not available due to some conflict. This is done by returning the compatible type in pgprot parameter of track_pfn_vma_new(), and the caller uses that memtype for page table. Note that track_pfn_vma_copy() which is basically called during fork gets the prot from existing page table and should not have any conflict. Hence we use strict memtype check there and do not allow compatible memtypes. This patch fixes the bug reported here: http://marc.info/?l=linux-kernel&m=123108883716357&w=2 Specifically the error message: X:5010 map pfn expected mapping type write-back for d0000000-d0101000, got write-combining Should go away. Reported-and-bisected-by: Kevin Winchester <kjwinchester@gmail.com> Signed-off-by: Venkatesh Pallipadi <venkatesh.pallipadi@intel.com> Signed-off-by: Suresh Siddha <suresh.b.siddha@intel.com> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'arch/x86/mm')
-rw-r--r--arch/x86/mm/pat.c43
1 files changed, 28 insertions, 15 deletions
diff --git a/arch/x86/mm/pat.c b/arch/x86/mm/pat.c
index f88ac80530c0..8b08fb955274 100644
--- a/arch/x86/mm/pat.c
+++ b/arch/x86/mm/pat.c
@@ -601,12 +601,13 @@ void unmap_devmem(unsigned long pfn, unsigned long size, pgprot_t vma_prot)
601 * Reserved non RAM regions only and after successful reserve_memtype, 601 * Reserved non RAM regions only and after successful reserve_memtype,
602 * this func also keeps identity mapping (if any) in sync with this new prot. 602 * this func also keeps identity mapping (if any) in sync with this new prot.
603 */ 603 */
604static int reserve_pfn_range(u64 paddr, unsigned long size, pgprot_t vma_prot) 604static int reserve_pfn_range(u64 paddr, unsigned long size, pgprot_t *vma_prot,
605 int strict_prot)
605{ 606{
606 int is_ram = 0; 607 int is_ram = 0;
607 int id_sz, ret; 608 int id_sz, ret;
608 unsigned long flags; 609 unsigned long flags;
609 unsigned long want_flags = (pgprot_val(vma_prot) & _PAGE_CACHE_MASK); 610 unsigned long want_flags = (pgprot_val(*vma_prot) & _PAGE_CACHE_MASK);
610 611
611 is_ram = pagerange_is_ram(paddr, paddr + size); 612 is_ram = pagerange_is_ram(paddr, paddr + size);
612 613
@@ -625,15 +626,24 @@ static int reserve_pfn_range(u64 paddr, unsigned long size, pgprot_t vma_prot)
625 return ret; 626 return ret;
626 627
627 if (flags != want_flags) { 628 if (flags != want_flags) {
628 free_memtype(paddr, paddr + size); 629 if (strict_prot || !is_new_memtype_allowed(want_flags, flags)) {
629 printk(KERN_ERR 630 free_memtype(paddr, paddr + size);
630 "%s:%d map pfn expected mapping type %s for %Lx-%Lx, got %s\n", 631 printk(KERN_ERR "%s:%d map pfn expected mapping type %s"
631 current->comm, current->pid, 632 " for %Lx-%Lx, got %s\n",
632 cattr_name(want_flags), 633 current->comm, current->pid,
633 (unsigned long long)paddr, 634 cattr_name(want_flags),
634 (unsigned long long)(paddr + size), 635 (unsigned long long)paddr,
635 cattr_name(flags)); 636 (unsigned long long)(paddr + size),
636 return -EINVAL; 637 cattr_name(flags));
638 return -EINVAL;
639 }
640 /*
641 * We allow returning different type than the one requested in
642 * non strict case.
643 */
644 *vma_prot = __pgprot((pgprot_val(*vma_prot) &
645 (~_PAGE_CACHE_MASK)) |
646 flags);
637 } 647 }
638 648
639 /* Need to keep identity mapping in sync */ 649 /* Need to keep identity mapping in sync */
@@ -689,6 +699,7 @@ int track_pfn_vma_copy(struct vm_area_struct *vma)
689 unsigned long vma_start = vma->vm_start; 699 unsigned long vma_start = vma->vm_start;
690 unsigned long vma_end = vma->vm_end; 700 unsigned long vma_end = vma->vm_end;
691 unsigned long vma_size = vma_end - vma_start; 701 unsigned long vma_size = vma_end - vma_start;
702 pgprot_t pgprot;
692 703
693 if (!pat_enabled) 704 if (!pat_enabled)
694 return 0; 705 return 0;
@@ -702,7 +713,8 @@ int track_pfn_vma_copy(struct vm_area_struct *vma)
702 WARN_ON_ONCE(1); 713 WARN_ON_ONCE(1);
703 return -EINVAL; 714 return -EINVAL;
704 } 715 }
705 return reserve_pfn_range(paddr, vma_size, __pgprot(prot)); 716 pgprot = __pgprot(prot);
717 return reserve_pfn_range(paddr, vma_size, &pgprot, 1);
706 } 718 }
707 719
708 /* reserve entire vma page by page, using pfn and prot from pte */ 720 /* reserve entire vma page by page, using pfn and prot from pte */
@@ -710,7 +722,8 @@ int track_pfn_vma_copy(struct vm_area_struct *vma)
710 if (follow_phys(vma, vma_start + i, 0, &prot, &paddr)) 722 if (follow_phys(vma, vma_start + i, 0, &prot, &paddr))
711 continue; 723 continue;
712 724
713 retval = reserve_pfn_range(paddr, PAGE_SIZE, __pgprot(prot)); 725 pgprot = __pgprot(prot);
726 retval = reserve_pfn_range(paddr, PAGE_SIZE, &pgprot, 1);
714 if (retval) 727 if (retval)
715 goto cleanup_ret; 728 goto cleanup_ret;
716 } 729 }
@@ -758,14 +771,14 @@ int track_pfn_vma_new(struct vm_area_struct *vma, pgprot_t *prot,
758 if (is_linear_pfn_mapping(vma)) { 771 if (is_linear_pfn_mapping(vma)) {
759 /* reserve the whole chunk starting from vm_pgoff */ 772 /* reserve the whole chunk starting from vm_pgoff */
760 paddr = (resource_size_t)vma->vm_pgoff << PAGE_SHIFT; 773 paddr = (resource_size_t)vma->vm_pgoff << PAGE_SHIFT;
761 return reserve_pfn_range(paddr, vma_size, *prot); 774 return reserve_pfn_range(paddr, vma_size, prot, 0);
762 } 775 }
763 776
764 /* reserve page by page using pfn and size */ 777 /* reserve page by page using pfn and size */
765 base_paddr = (resource_size_t)pfn << PAGE_SHIFT; 778 base_paddr = (resource_size_t)pfn << PAGE_SHIFT;
766 for (i = 0; i < size; i += PAGE_SIZE) { 779 for (i = 0; i < size; i += PAGE_SIZE) {
767 paddr = base_paddr + i; 780 paddr = base_paddr + i;
768 retval = reserve_pfn_range(paddr, PAGE_SIZE, *prot); 781 retval = reserve_pfn_range(paddr, PAGE_SIZE, prot, 0);
769 if (retval) 782 if (retval)
770 goto cleanup_ret; 783 goto cleanup_ret;
771 } 784 }