aboutsummaryrefslogtreecommitdiffstats
path: root/mm/memory.c
diff options
context:
space:
mode:
authorMichel Lespinasse <walken@google.com>2011-01-13 18:46:11 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2011-01-13 20:32:36 -0500
commit110d74a921f4d272b47ef6104fcf937df808f4c8 (patch)
treea2f1705e049f06e1cf8cbaf7d6b3261f0b46b6ab /mm/memory.c
parentfed067da46ad3b9acedaf794a5f05d0bc153280b (diff)
mm: add FOLL_MLOCK follow_page flag.
Move the code to mlock pages from __mlock_vma_pages_range() to follow_page(). This allows __mlock_vma_pages_range() to not have to break down work into 16-page batches. An additional motivation for doing this within the present patch series is that it'll make it easier for a later chagne to drop mmap_sem when blocking on disk (we'd like to be able to resume at the page that was read from disk instead of at the start of a 16-page batch). Signed-off-by: Michel Lespinasse <walken@google.com> Cc: Hugh Dickins <hughd@google.com> Cc: Rik van Riel <riel@redhat.com> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Nick Piggin <npiggin@kernel.dk> Cc: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com> Cc: Ingo Molnar <mingo@elte.hu> Cc: "H. Peter Anvin" <hpa@zytor.com> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: David Howells <dhowells@redhat.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/memory.c')
-rw-r--r--mm/memory.c22
1 files changed, 22 insertions, 0 deletions
diff --git a/mm/memory.c b/mm/memory.c
index b8f97b8575b7..15e1f19a3b10 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -1310,6 +1310,28 @@ struct page *follow_page(struct vm_area_struct *vma, unsigned long address,
1310 */ 1310 */
1311 mark_page_accessed(page); 1311 mark_page_accessed(page);
1312 } 1312 }
1313 if (flags & FOLL_MLOCK) {
1314 /*
1315 * The preliminary mapping check is mainly to avoid the
1316 * pointless overhead of lock_page on the ZERO_PAGE
1317 * which might bounce very badly if there is contention.
1318 *
1319 * If the page is already locked, we don't need to
1320 * handle it now - vmscan will handle it later if and
1321 * when it attempts to reclaim the page.
1322 */
1323 if (page->mapping && trylock_page(page)) {
1324 lru_add_drain(); /* push cached pages to LRU */
1325 /*
1326 * Because we lock page here and migration is
1327 * blocked by the pte's page reference, we need
1328 * only check for file-cache page truncation.
1329 */
1330 if (page->mapping)
1331 mlock_vma_page(page);
1332 unlock_page(page);
1333 }
1334 }
1313unlock: 1335unlock:
1314 pte_unmap_unlock(ptep, ptl); 1336 pte_unmap_unlock(ptep, ptl);
1315out: 1337out: