diff options
author | Konstantin Khlebnikov <khlebnikov@openvz.org> | 2012-10-08 19:28:34 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2012-10-09 03:22:16 -0400 |
commit | b3b9c2932c32e0692018ed5f12f3fd8c70eea8ce (patch) | |
tree | bca2431f0b0bd2d364f041f0344836cd39b1822c /mm/memory.c | |
parent | 5180da410db6369d1f95c9014da1c9bc33fb043e (diff) |
mm, x86, pat: rework linear pfn-mmap tracking
Replace the generic vma-flag VM_PFN_AT_MMAP with x86-only VM_PAT.
We can toss mapping address from remap_pfn_range() into
track_pfn_vma_new(), and collect all PAT-related logic together in
arch/x86/.
This patch also restores orignal frustration-free is_cow_mapping() check
in remap_pfn_range(), as it was before commit v2.6.28-rc8-88-g3c8bb73
("x86: PAT: store vm_pgoff for all linear_over_vma_region mappings - v3")
is_linear_pfn_mapping() checks can be removed from mm/huge_memory.c,
because it already handled by VM_PFNMAP in VM_NO_THP bit-mask.
[suresh.b.siddha@intel.com: Reset the VM_PAT flag as part of untrack_pfn_vma()]
Signed-off-by: Konstantin Khlebnikov <khlebnikov@openvz.org>
Signed-off-by: Suresh Siddha <suresh.b.siddha@intel.com>
Cc: Venkatesh Pallipadi <venki@google.com>
Cc: H. Peter Anvin <hpa@zytor.com>
Cc: Nick Piggin <npiggin@kernel.dk>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: Alexander Viro <viro@zeniv.linux.org.uk>
Cc: Carsten Otte <cotte@de.ibm.com>
Cc: Chris Metcalf <cmetcalf@tilera.com>
Cc: Cyrill Gorcunov <gorcunov@openvz.org>
Cc: Eric Paris <eparis@redhat.com>
Cc: Hugh Dickins <hughd@google.com>
Cc: James Morris <james.l.morris@oracle.com>
Cc: Jason Baron <jbaron@redhat.com>
Cc: Kentaro Takeda <takedakn@nttdata.co.jp>
Cc: Matt Helsley <matthltc@us.ibm.com>
Cc: Oleg Nesterov <oleg@redhat.com>
Cc: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Robert Richter <robert.richter@amd.com>
Cc: Tetsuo Handa <penguin-kernel@I-love.SAKURA.ne.jp>
Cc: Venkatesh Pallipadi <venki@google.com>
Acked-by: Linus Torvalds <torvalds@linux-foundation.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/memory.c')
-rw-r--r-- | mm/memory.c | 26 |
1 files changed, 10 insertions, 16 deletions
diff --git a/mm/memory.c b/mm/memory.c index 6bef278ad303..655e1429388a 100644 --- a/mm/memory.c +++ b/mm/memory.c | |||
@@ -1055,7 +1055,7 @@ int copy_page_range(struct mm_struct *dst_mm, struct mm_struct *src_mm, | |||
1055 | if (is_vm_hugetlb_page(vma)) | 1055 | if (is_vm_hugetlb_page(vma)) |
1056 | return copy_hugetlb_page_range(dst_mm, src_mm, vma); | 1056 | return copy_hugetlb_page_range(dst_mm, src_mm, vma); |
1057 | 1057 | ||
1058 | if (unlikely(is_pfn_mapping(vma))) { | 1058 | if (unlikely(vma->vm_flags & VM_PFNMAP)) { |
1059 | /* | 1059 | /* |
1060 | * We do not free on error cases below as remove_vma | 1060 | * We do not free on error cases below as remove_vma |
1061 | * gets called on error from higher level routine | 1061 | * gets called on error from higher level routine |
@@ -1327,7 +1327,7 @@ static void unmap_single_vma(struct mmu_gather *tlb, | |||
1327 | if (vma->vm_file) | 1327 | if (vma->vm_file) |
1328 | uprobe_munmap(vma, start, end); | 1328 | uprobe_munmap(vma, start, end); |
1329 | 1329 | ||
1330 | if (unlikely(is_pfn_mapping(vma))) | 1330 | if (unlikely(vma->vm_flags & VM_PFNMAP)) |
1331 | untrack_pfn(vma, 0, 0); | 1331 | untrack_pfn(vma, 0, 0); |
1332 | 1332 | ||
1333 | if (start != end) { | 1333 | if (start != end) { |
@@ -2299,26 +2299,20 @@ int remap_pfn_range(struct vm_area_struct *vma, unsigned long addr, | |||
2299 | * There's a horrible special case to handle copy-on-write | 2299 | * There's a horrible special case to handle copy-on-write |
2300 | * behaviour that some programs depend on. We mark the "original" | 2300 | * behaviour that some programs depend on. We mark the "original" |
2301 | * un-COW'ed pages by matching them up with "vma->vm_pgoff". | 2301 | * un-COW'ed pages by matching them up with "vma->vm_pgoff". |
2302 | * See vm_normal_page() for details. | ||
2302 | */ | 2303 | */ |
2303 | if (addr == vma->vm_start && end == vma->vm_end) { | 2304 | if (is_cow_mapping(vma->vm_flags)) { |
2305 | if (addr != vma->vm_start || end != vma->vm_end) | ||
2306 | return -EINVAL; | ||
2304 | vma->vm_pgoff = pfn; | 2307 | vma->vm_pgoff = pfn; |
2305 | vma->vm_flags |= VM_PFN_AT_MMAP; | 2308 | } |
2306 | } else if (is_cow_mapping(vma->vm_flags)) | 2309 | |
2310 | err = track_pfn_remap(vma, &prot, pfn, addr, PAGE_ALIGN(size)); | ||
2311 | if (err) | ||
2307 | return -EINVAL; | 2312 | return -EINVAL; |
2308 | 2313 | ||
2309 | vma->vm_flags |= VM_IO | VM_RESERVED | VM_PFNMAP; | 2314 | vma->vm_flags |= VM_IO | VM_RESERVED | VM_PFNMAP; |
2310 | 2315 | ||
2311 | err = track_pfn_remap(vma, &prot, pfn, PAGE_ALIGN(size)); | ||
2312 | if (err) { | ||
2313 | /* | ||
2314 | * To indicate that track_pfn related cleanup is not | ||
2315 | * needed from higher level routine calling unmap_vmas | ||
2316 | */ | ||
2317 | vma->vm_flags &= ~(VM_IO | VM_RESERVED | VM_PFNMAP); | ||
2318 | vma->vm_flags &= ~VM_PFN_AT_MMAP; | ||
2319 | return -EINVAL; | ||
2320 | } | ||
2321 | |||
2322 | BUG_ON(addr >= end); | 2316 | BUG_ON(addr >= end); |
2323 | pfn -= addr >> PAGE_SHIFT; | 2317 | pfn -= addr >> PAGE_SHIFT; |
2324 | pgd = pgd_offset(mm, addr); | 2318 | pgd = pgd_offset(mm, addr); |