aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86
diff options
context:
space:
mode:
authorKonstantin Khlebnikov <khlebnikov@openvz.org>2012-10-08 19:28:34 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2012-10-09 03:22:16 -0400
commitb3b9c2932c32e0692018ed5f12f3fd8c70eea8ce (patch)
treebca2431f0b0bd2d364f041f0344836cd39b1822c /arch/x86
parent5180da410db6369d1f95c9014da1c9bc33fb043e (diff)
mm, x86, pat: rework linear pfn-mmap tracking
Replace the generic vma-flag VM_PFN_AT_MMAP with x86-only VM_PAT. We can toss mapping address from remap_pfn_range() into track_pfn_vma_new(), and collect all PAT-related logic together in arch/x86/. This patch also restores orignal frustration-free is_cow_mapping() check in remap_pfn_range(), as it was before commit v2.6.28-rc8-88-g3c8bb73 ("x86: PAT: store vm_pgoff for all linear_over_vma_region mappings - v3") is_linear_pfn_mapping() checks can be removed from mm/huge_memory.c, because it already handled by VM_PFNMAP in VM_NO_THP bit-mask. [suresh.b.siddha@intel.com: Reset the VM_PAT flag as part of untrack_pfn_vma()] Signed-off-by: Konstantin Khlebnikov <khlebnikov@openvz.org> Signed-off-by: Suresh Siddha <suresh.b.siddha@intel.com> Cc: Venkatesh Pallipadi <venki@google.com> Cc: H. Peter Anvin <hpa@zytor.com> Cc: Nick Piggin <npiggin@kernel.dk> Cc: Ingo Molnar <mingo@redhat.com> Cc: Alexander Viro <viro@zeniv.linux.org.uk> Cc: Carsten Otte <cotte@de.ibm.com> Cc: Chris Metcalf <cmetcalf@tilera.com> Cc: Cyrill Gorcunov <gorcunov@openvz.org> Cc: Eric Paris <eparis@redhat.com> Cc: Hugh Dickins <hughd@google.com> Cc: James Morris <james.l.morris@oracle.com> Cc: Jason Baron <jbaron@redhat.com> Cc: Kentaro Takeda <takedakn@nttdata.co.jp> Cc: Matt Helsley <matthltc@us.ibm.com> Cc: Oleg Nesterov <oleg@redhat.com> Cc: Peter Zijlstra <a.p.zijlstra@chello.nl> Cc: Robert Richter <robert.richter@amd.com> Cc: Tetsuo Handa <penguin-kernel@I-love.SAKURA.ne.jp> Cc: Venkatesh Pallipadi <venki@google.com> Acked-by: Linus Torvalds <torvalds@linux-foundation.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'arch/x86')
-rw-r--r--arch/x86/mm/pat.c17
1 files changed, 12 insertions, 5 deletions
diff --git a/arch/x86/mm/pat.c b/arch/x86/mm/pat.c
index 74a702674e86..0eb572eda406 100644
--- a/arch/x86/mm/pat.c
+++ b/arch/x86/mm/pat.c
@@ -677,7 +677,7 @@ int track_pfn_copy(struct vm_area_struct *vma)
677 unsigned long vma_size = vma->vm_end - vma->vm_start; 677 unsigned long vma_size = vma->vm_end - vma->vm_start;
678 pgprot_t pgprot; 678 pgprot_t pgprot;
679 679
680 if (is_linear_pfn_mapping(vma)) { 680 if (vma->vm_flags & VM_PAT) {
681 /* 681 /*
682 * reserve the whole chunk covered by vma. We need the 682 * reserve the whole chunk covered by vma. We need the
683 * starting address and protection from pte. 683 * starting address and protection from pte.
@@ -699,14 +699,20 @@ int track_pfn_copy(struct vm_area_struct *vma)
699 * single reserve_pfn_range call. 699 * single reserve_pfn_range call.
700 */ 700 */
701int track_pfn_remap(struct vm_area_struct *vma, pgprot_t *prot, 701int track_pfn_remap(struct vm_area_struct *vma, pgprot_t *prot,
702 unsigned long pfn, unsigned long size) 702 unsigned long pfn, unsigned long addr, unsigned long size)
703{ 703{
704 resource_size_t paddr = (resource_size_t)pfn << PAGE_SHIFT; 704 resource_size_t paddr = (resource_size_t)pfn << PAGE_SHIFT;
705 unsigned long flags; 705 unsigned long flags;
706 706
707 /* reserve the whole chunk starting from paddr */ 707 /* reserve the whole chunk starting from paddr */
708 if (is_linear_pfn_mapping(vma)) 708 if (addr == vma->vm_start && size == (vma->vm_end - vma->vm_start)) {
709 return reserve_pfn_range(paddr, size, prot, 0); 709 int ret;
710
711 ret = reserve_pfn_range(paddr, size, prot, 0);
712 if (!ret)
713 vma->vm_flags |= VM_PAT;
714 return ret;
715 }
710 716
711 if (!pat_enabled) 717 if (!pat_enabled)
712 return 0; 718 return 0;
@@ -758,7 +764,7 @@ void untrack_pfn(struct vm_area_struct *vma, unsigned long pfn,
758 resource_size_t paddr; 764 resource_size_t paddr;
759 unsigned long prot; 765 unsigned long prot;
760 766
761 if (!is_linear_pfn_mapping(vma)) 767 if (!(vma->vm_flags & VM_PAT))
762 return; 768 return;
763 769
764 /* free the chunk starting from pfn or the whole chunk */ 770 /* free the chunk starting from pfn or the whole chunk */
@@ -772,6 +778,7 @@ void untrack_pfn(struct vm_area_struct *vma, unsigned long pfn,
772 size = vma->vm_end - vma->vm_start; 778 size = vma->vm_end - vma->vm_start;
773 } 779 }
774 free_pfn_range(paddr, size); 780 free_pfn_range(paddr, size);
781 vma->vm_flags &= ~VM_PAT;
775} 782}
776 783
777pgprot_t pgprot_writecombine(pgprot_t prot) 784pgprot_t pgprot_writecombine(pgprot_t prot)