aboutsummaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2008-06-23 14:21:37 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2008-06-23 14:21:37 -0400
commit672ca28e300c17bf8d792a2a7a8631193e580c74 (patch)
tree73b414f017d15cd404528e2fdd555a93bdb69b61 /mm
parent96a331b1d6426726c37242ddbe939ee14b255790 (diff)
Fix ZERO_PAGE breakage with vmware
Commit 89f5b7da2a6bad2e84670422ab8192382a5aeb9f ("Reinstate ZERO_PAGE optimization in 'get_user_pages()' and fix XIP") broke vmware, as reported by Jeff Chua: "This broke vmware 6.0.4. Jun 22 14:53:03.845: vmx| NOT_IMPLEMENTED /build/mts/release/bora-93057/bora/vmx/main/vmmonPosix.c:774" and the reason seems to be that there's an old bug in how we handle do FOLL_ANON on VM_SHARED areas in get_user_pages(), but since it only triggered if the whole page table was missing, nobody had apparently hit it before. The recent changes to 'follow_page()' made the FOLL_ANON logic trigger not just for whole missing page tables, but for individual pages as well, and exposed this problem. This fixes it by making the test for when FOLL_ANON is used more careful, and also makes the code easier to read and understand by moving the logic to a separate inline function. Reported-and-tested-by: Jeff Chua <jeff.chua.linux@gmail.com> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm')
-rw-r--r--mm/memory.c23
1 files changed, 21 insertions, 2 deletions
diff --git a/mm/memory.c b/mm/memory.c
index 9aefaae46858..423e0e7c2f73 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -1045,6 +1045,26 @@ no_page_table:
1045 return page; 1045 return page;
1046} 1046}
1047 1047
1048/* Can we do the FOLL_ANON optimization? */
1049static inline int use_zero_page(struct vm_area_struct *vma)
1050{
1051 /*
1052 * We don't want to optimize FOLL_ANON for make_pages_present()
1053 * when it tries to page in a VM_LOCKED region. As to VM_SHARED,
1054 * we want to get the page from the page tables to make sure
1055 * that we serialize and update with any other user of that
1056 * mapping.
1057 */
1058 if (vma->vm_flags & (VM_LOCKED | VM_SHARED))
1059 return 0;
1060 /*
1061 * And if we have a fault or a nopfn routine, it's not an
1062 * anonymous region.
1063 */
1064 return !vma->vm_ops ||
1065 (!vma->vm_ops->fault && !vma->vm_ops->nopfn);
1066}
1067
1048int get_user_pages(struct task_struct *tsk, struct mm_struct *mm, 1068int get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
1049 unsigned long start, int len, int write, int force, 1069 unsigned long start, int len, int write, int force,
1050 struct page **pages, struct vm_area_struct **vmas) 1070 struct page **pages, struct vm_area_struct **vmas)
@@ -1119,8 +1139,7 @@ int get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
1119 foll_flags = FOLL_TOUCH; 1139 foll_flags = FOLL_TOUCH;
1120 if (pages) 1140 if (pages)
1121 foll_flags |= FOLL_GET; 1141 foll_flags |= FOLL_GET;
1122 if (!write && !(vma->vm_flags & VM_LOCKED) && 1142 if (!write && use_zero_page(vma))
1123 (!vma->vm_ops || !vma->vm_ops->fault))
1124 foll_flags |= FOLL_ANON; 1143 foll_flags |= FOLL_ANON;
1125 1144
1126 do { 1145 do {