aboutsummaryrefslogtreecommitdiffstats
path: root/mm/memory.c
diff options
context:
space:
mode:
authorHugh Dickins <hugh.dickins@tiscali.co.uk>2009-09-21 20:03:26 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2009-09-22 10:17:40 -0400
commit8e4b9a60718970bbc02dfd3abd0b956ab65af231 (patch)
tree4c19152cea19882071a74f92c0cf6a16d5711f41 /mm/memory.c
parentf3e8fccd06d27773186a0094371daf2d84c79469 (diff)
mm: FOLL_DUMP replace FOLL_ANON
The "FOLL_ANON optimization" and its use_zero_page() test have caused confusion and bugs: why does it test VM_SHARED? for the very good but unsatisfying reason that VMware crashed without. As we look to maybe reinstating anonymous use of the ZERO_PAGE, we need to sort this out. Easily done: it's silly for __get_user_pages() and follow_page() to be guessing whether it's safe to assume that they're being used for a coredump (which can take a shortcut snapshot where other uses must handle a fault) - just tell them with GUP_FLAGS_DUMP and FOLL_DUMP. get_dump_page() doesn't even want a ZERO_PAGE: an error suits fine. Signed-off-by: Hugh Dickins <hugh.dickins@tiscali.co.uk> Acked-by: Rik van Riel <riel@redhat.com> Acked-by: Mel Gorman <mel@csn.ul.ie> Reviewed-by: Minchan Kim <minchan.kim@gmail.com> Cc: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com> Cc: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com> Cc: Nick Piggin <npiggin@suse.de> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/memory.c')
-rw-r--r--mm/memory.c43
1 files changed, 12 insertions, 31 deletions
diff --git a/mm/memory.c b/mm/memory.c
index a8430ff13837..532a55bce6a4 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -1174,41 +1174,22 @@ no_page:
1174 pte_unmap_unlock(ptep, ptl); 1174 pte_unmap_unlock(ptep, ptl);
1175 if (!pte_none(pte)) 1175 if (!pte_none(pte))
1176 return page; 1176 return page;
1177 /* Fall through to ZERO_PAGE handling */ 1177
1178no_page_table: 1178no_page_table:
1179 /* 1179 /*
1180 * When core dumping an enormous anonymous area that nobody 1180 * When core dumping an enormous anonymous area that nobody
1181 * has touched so far, we don't want to allocate page tables. 1181 * has touched so far, we don't want to allocate unnecessary pages or
1182 * page tables. Return error instead of NULL to skip handle_mm_fault,
1183 * then get_dump_page() will return NULL to leave a hole in the dump.
1184 * But we can only make this optimization where a hole would surely
1185 * be zero-filled if handle_mm_fault() actually did handle it.
1182 */ 1186 */
1183 if (flags & FOLL_ANON) { 1187 if ((flags & FOLL_DUMP) &&
1184 page = ZERO_PAGE(0); 1188 (!vma->vm_ops || !vma->vm_ops->fault))
1185 if (flags & FOLL_GET) 1189 return ERR_PTR(-EFAULT);
1186 get_page(page);
1187 BUG_ON(flags & FOLL_WRITE);
1188 }
1189 return page; 1190 return page;
1190} 1191}
1191 1192
1192/* Can we do the FOLL_ANON optimization? */
1193static inline int use_zero_page(struct vm_area_struct *vma)
1194{
1195 /*
1196 * We don't want to optimize FOLL_ANON for make_pages_present()
1197 * when it tries to page in a VM_LOCKED region. As to VM_SHARED,
1198 * we want to get the page from the page tables to make sure
1199 * that we serialize and update with any other user of that
1200 * mapping.
1201 */
1202 if (vma->vm_flags & (VM_LOCKED | VM_SHARED))
1203 return 0;
1204 /*
1205 * And if we have a fault routine, it's not an anonymous region.
1206 */
1207 return !vma->vm_ops || !vma->vm_ops->fault;
1208}
1209
1210
1211
1212int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm, 1193int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
1213 unsigned long start, int nr_pages, int flags, 1194 unsigned long start, int nr_pages, int flags,
1214 struct page **pages, struct vm_area_struct **vmas) 1195 struct page **pages, struct vm_area_struct **vmas)
@@ -1288,8 +1269,8 @@ int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
1288 foll_flags = FOLL_TOUCH; 1269 foll_flags = FOLL_TOUCH;
1289 if (pages) 1270 if (pages)
1290 foll_flags |= FOLL_GET; 1271 foll_flags |= FOLL_GET;
1291 if (!write && use_zero_page(vma)) 1272 if (flags & GUP_FLAGS_DUMP)
1292 foll_flags |= FOLL_ANON; 1273 foll_flags |= FOLL_DUMP;
1293 1274
1294 do { 1275 do {
1295 struct page *page; 1276 struct page *page;
@@ -1446,7 +1427,7 @@ struct page *get_dump_page(unsigned long addr)
1446 struct page *page; 1427 struct page *page;
1447 1428
1448 if (__get_user_pages(current, current->mm, addr, 1, 1429 if (__get_user_pages(current, current->mm, addr, 1,
1449 GUP_FLAGS_FORCE, &page, &vma) < 1) 1430 GUP_FLAGS_FORCE | GUP_FLAGS_DUMP, &page, &vma) < 1)
1450 return NULL; 1431 return NULL;
1451 if (page == ZERO_PAGE(0)) { 1432 if (page == ZERO_PAGE(0)) {
1452 page_cache_release(page); 1433 page_cache_release(page);