aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorDavid Vrabel <david.vrabel@citrix.com>2014-12-18 09:48:15 -0500
committerDavid Vrabel <david.vrabel@citrix.com>2015-01-28 09:03:03 -0500
commit667a0a06c99d5291433b869ed35dabdd95ba1453 (patch)
tree828e7483093d94529b107be22468da1e2b7f42e7
parent270b79338eb1bd1eb28e62994ffa7b9ecd9975d8 (diff)
mm: provide a find_special_page vma operation
The optional find_special_page VMA operation is used to lookup the pages backing a VMA. This is useful in cases where the normal mechanisms for finding the page don't work. This is only called if the PTE is special. One use case is a Xen PV guest mapping foreign pages into userspace. In a Xen PV guest, the PTEs contain MFNs so get_user_pages() (for example) must do an MFN to PFN (M2P) lookup before it can get the page. For foreign pages (those owned by another guest) the M2P lookup returns the PFN as seen by the foreign guest (which would be completely the wrong page for the local guest). This cannot be fixed up improving the M2P lookup since one MFN may be mapped onto two or more pages so getting the right page is impossible given just the MFN. Signed-off-by: David Vrabel <david.vrabel@citrix.com> Acked-by: Andrew Morton <akpm@linux-foundation.org>
-rw-r--r--include/linux/mm.h8
-rw-r--r--mm/memory.c2
2 files changed, 10 insertions, 0 deletions
diff --git a/include/linux/mm.h b/include/linux/mm.h
index 80fc92a49649..9269af7349fe 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -290,6 +290,14 @@ struct vm_operations_struct {
290 /* called by sys_remap_file_pages() to populate non-linear mapping */ 290 /* called by sys_remap_file_pages() to populate non-linear mapping */
291 int (*remap_pages)(struct vm_area_struct *vma, unsigned long addr, 291 int (*remap_pages)(struct vm_area_struct *vma, unsigned long addr,
292 unsigned long size, pgoff_t pgoff); 292 unsigned long size, pgoff_t pgoff);
293
294 /*
295 * Called by vm_normal_page() for special PTEs to find the
296 * page for @addr. This is useful if the default behavior
297 * (using pte_page()) would not find the correct page.
298 */
299 struct page *(*find_special_page)(struct vm_area_struct *vma,
300 unsigned long addr);
293}; 301};
294 302
295struct mmu_gather; 303struct mmu_gather;
diff --git a/mm/memory.c b/mm/memory.c
index 54f3a9b00956..dc2e01a315e2 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -754,6 +754,8 @@ struct page *vm_normal_page(struct vm_area_struct *vma, unsigned long addr,
754 if (HAVE_PTE_SPECIAL) { 754 if (HAVE_PTE_SPECIAL) {
755 if (likely(!pte_special(pte))) 755 if (likely(!pte_special(pte)))
756 goto check_pfn; 756 goto check_pfn;
757 if (vma->vm_ops && vma->vm_ops->find_special_page)
758 return vma->vm_ops->find_special_page(vma, addr);
757 if (vma->vm_flags & (VM_PFNMAP | VM_MIXEDMAP)) 759 if (vma->vm_flags & (VM_PFNMAP | VM_MIXEDMAP))
758 return NULL; 760 return NULL;
759 if (!is_zero_pfn(pfn)) 761 if (!is_zero_pfn(pfn))