aboutsummaryrefslogtreecommitdiffstats
path: root/mm/memory.c
diff options
context:
space:
mode:
authorSuresh Siddha <suresh.b.siddha@intel.com>2012-10-08 19:28:29 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2012-10-09 03:22:16 -0400
commit5180da410db6369d1f95c9014da1c9bc33fb043e (patch)
tree1d92975710b33a90f77f39d0ad33669329dea949 /mm/memory.c
parentb1a86e15dc0304366f50ba1720834bc419c801b1 (diff)
x86, pat: separate the pfn attribute tracking for remap_pfn_range and vm_insert_pfn
With PAT enabled, vm_insert_pfn() looks up the existing pfn memory attribute and uses it. Expectation is that the driver reserves the memory attributes for the pfn before calling vm_insert_pfn(). remap_pfn_range() (when called for the whole vma) will setup a new attribute (based on the prot argument) for the specified pfn range. This addresses the legacy usage which typically calls remap_pfn_range() with a desired memory attribute. For ranges smaller than the vma size (which is typically not the case), remap_pfn_range() will use the existing memory attribute for the pfn range. Expose two different API's for these different behaviors. track_pfn_insert() for tracking the pfn attribute set by vm_insert_pfn() and track_pfn_remap() for the remap_pfn_range(). This cleanup also prepares the ground for the track/untrack pfn vma routines to take over the ownership of setting PAT specific vm_flag in the 'vma'. [khlebnikov@openvz.org: Clear checks in track_pfn_remap()] [akpm@linux-foundation.org: tweak a few comments] Signed-off-by: Suresh Siddha <suresh.b.siddha@intel.com> Signed-off-by: Konstantin Khlebnikov <khlebnikov@openvz.org> Cc: Venkatesh Pallipadi <venki@google.com> Cc: H. Peter Anvin <hpa@zytor.com> Cc: Nick Piggin <npiggin@kernel.dk> Cc: Ingo Molnar <mingo@redhat.com> Cc: Alexander Viro <viro@zeniv.linux.org.uk> Cc: Carsten Otte <cotte@de.ibm.com> Cc: Chris Metcalf <cmetcalf@tilera.com> Cc: Cyrill Gorcunov <gorcunov@openvz.org> Cc: Eric Paris <eparis@redhat.com> Cc: Hugh Dickins <hughd@google.com> Cc: James Morris <james.l.morris@oracle.com> Cc: Jason Baron <jbaron@redhat.com> Cc: Kentaro Takeda <takedakn@nttdata.co.jp> Cc: Konstantin Khlebnikov <khlebnikov@openvz.org> Cc: Matt Helsley <matthltc@us.ibm.com> Cc: Oleg Nesterov <oleg@redhat.com> Cc: Peter Zijlstra <a.p.zijlstra@chello.nl> Cc: Robert Richter <robert.richter@amd.com> Cc: Suresh Siddha <suresh.b.siddha@intel.com> Cc: Tetsuo Handa <penguin-kernel@I-love.SAKURA.ne.jp> Acked-by: Linus Torvalds <torvalds@linux-foundation.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/memory.c')
-rw-r--r--mm/memory.c13
1 files changed, 5 insertions, 8 deletions
diff --git a/mm/memory.c b/mm/memory.c
index 57361708d1a5..6bef278ad303 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -1060,7 +1060,7 @@ int copy_page_range(struct mm_struct *dst_mm, struct mm_struct *src_mm,
1060 * We do not free on error cases below as remove_vma 1060 * We do not free on error cases below as remove_vma
1061 * gets called on error from higher level routine 1061 * gets called on error from higher level routine
1062 */ 1062 */
1063 ret = track_pfn_vma_copy(vma); 1063 ret = track_pfn_copy(vma);
1064 if (ret) 1064 if (ret)
1065 return ret; 1065 return ret;
1066 } 1066 }
@@ -1328,7 +1328,7 @@ static void unmap_single_vma(struct mmu_gather *tlb,
1328 uprobe_munmap(vma, start, end); 1328 uprobe_munmap(vma, start, end);
1329 1329
1330 if (unlikely(is_pfn_mapping(vma))) 1330 if (unlikely(is_pfn_mapping(vma)))
1331 untrack_pfn_vma(vma, 0, 0); 1331 untrack_pfn(vma, 0, 0);
1332 1332
1333 if (start != end) { 1333 if (start != end) {
1334 if (unlikely(is_vm_hugetlb_page(vma))) { 1334 if (unlikely(is_vm_hugetlb_page(vma))) {
@@ -2162,14 +2162,11 @@ int vm_insert_pfn(struct vm_area_struct *vma, unsigned long addr,
2162 2162
2163 if (addr < vma->vm_start || addr >= vma->vm_end) 2163 if (addr < vma->vm_start || addr >= vma->vm_end)
2164 return -EFAULT; 2164 return -EFAULT;
2165 if (track_pfn_vma_new(vma, &pgprot, pfn, PAGE_SIZE)) 2165 if (track_pfn_insert(vma, &pgprot, pfn))
2166 return -EINVAL; 2166 return -EINVAL;
2167 2167
2168 ret = insert_pfn(vma, addr, pfn, pgprot); 2168 ret = insert_pfn(vma, addr, pfn, pgprot);
2169 2169
2170 if (ret)
2171 untrack_pfn_vma(vma, pfn, PAGE_SIZE);
2172
2173 return ret; 2170 return ret;
2174} 2171}
2175EXPORT_SYMBOL(vm_insert_pfn); 2172EXPORT_SYMBOL(vm_insert_pfn);
@@ -2311,7 +2308,7 @@ int remap_pfn_range(struct vm_area_struct *vma, unsigned long addr,
2311 2308
2312 vma->vm_flags |= VM_IO | VM_RESERVED | VM_PFNMAP; 2309 vma->vm_flags |= VM_IO | VM_RESERVED | VM_PFNMAP;
2313 2310
2314 err = track_pfn_vma_new(vma, &prot, pfn, PAGE_ALIGN(size)); 2311 err = track_pfn_remap(vma, &prot, pfn, PAGE_ALIGN(size));
2315 if (err) { 2312 if (err) {
2316 /* 2313 /*
2317 * To indicate that track_pfn related cleanup is not 2314 * To indicate that track_pfn related cleanup is not
@@ -2335,7 +2332,7 @@ int remap_pfn_range(struct vm_area_struct *vma, unsigned long addr,
2335 } while (pgd++, addr = next, addr != end); 2332 } while (pgd++, addr = next, addr != end);
2336 2333
2337 if (err) 2334 if (err)
2338 untrack_pfn_vma(vma, pfn, PAGE_ALIGN(size)); 2335 untrack_pfn(vma, pfn, PAGE_ALIGN(size));
2339 2336
2340 return err; 2337 return err;
2341} 2338}