aboutsummaryrefslogtreecommitdiffstats
path: root/mm/mlock.c
diff options
context:
space:
mode:
authorVlastimil Babka <vbabka@suse.cz>2013-09-11 17:22:33 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2013-09-11 18:58:00 -0400
commit5b40998ae35cf64561868370e6c9f3d3e94b6bf7 (patch)
tree24f7142d850df3512392004501fc9db7573fc031 /mm/mlock.c
parent56afe477df3cbbcd656682d0355ef7d9eb8bdd81 (diff)
mm: munlock: remove redundant get_page/put_page pair on the fast path
The performance of the fast path in munlock_vma_range() can be further improved by avoiding atomic ops of a redundant get_page()/put_page() pair. When calling get_page() during page isolation, we already have the pin from follow_page_mask(). This pin will be then returned by __pagevec_lru_add(), after which we do not reference the pages anymore. After this patch, an 8% speedup was measured for munlocking a 56GB large memory area with THP disabled. Signed-off-by: Vlastimil Babka <vbabka@suse.cz> Reviewed-by: Jörn Engel <joern@logfs.org> Acked-by: Mel Gorman <mgorman@suse.de> Cc: Michel Lespinasse <walken@google.com> Cc: Hugh Dickins <hughd@google.com> Cc: Rik van Riel <riel@redhat.com> Cc: Johannes Weiner <hannes@cmpxchg.org> Cc: Michal Hocko <mhocko@suse.cz> Cc: Vlastimil Babka <vbabka@suse.cz> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/mlock.c')
-rw-r--r--mm/mlock.c26
1 files changed, 14 insertions, 12 deletions
diff --git a/mm/mlock.c b/mm/mlock.c
index abdc612b042d..19a934dce5d6 100644
--- a/mm/mlock.c
+++ b/mm/mlock.c
@@ -303,8 +303,10 @@ static void __munlock_pagevec(struct pagevec *pvec, struct zone *zone)
303 if (PageLRU(page)) { 303 if (PageLRU(page)) {
304 lruvec = mem_cgroup_page_lruvec(page, zone); 304 lruvec = mem_cgroup_page_lruvec(page, zone);
305 lru = page_lru(page); 305 lru = page_lru(page);
306 306 /*
307 get_page(page); 307 * We already have pin from follow_page_mask()
308 * so we can spare the get_page() here.
309 */
308 ClearPageLRU(page); 310 ClearPageLRU(page);
309 del_page_from_lru_list(page, lruvec, lru); 311 del_page_from_lru_list(page, lruvec, lru);
310 } else { 312 } else {
@@ -336,25 +338,25 @@ skip_munlock:
336 lock_page(page); 338 lock_page(page);
337 if (!__putback_lru_fast_prepare(page, &pvec_putback, 339 if (!__putback_lru_fast_prepare(page, &pvec_putback,
338 &pgrescued)) { 340 &pgrescued)) {
339 /* Slow path */ 341 /*
342 * Slow path. We don't want to lose the last
343 * pin before unlock_page()
344 */
345 get_page(page); /* for putback_lru_page() */
340 __munlock_isolated_page(page); 346 __munlock_isolated_page(page);
341 unlock_page(page); 347 unlock_page(page);
348 put_page(page); /* from follow_page_mask() */
342 } 349 }
343 } 350 }
344 } 351 }
345 352
346 /* Phase 3: page putback for pages that qualified for the fast path */ 353 /*
354 * Phase 3: page putback for pages that qualified for the fast path
355 * This will also call put_page() to return pin from follow_page_mask()
356 */
347 if (pagevec_count(&pvec_putback)) 357 if (pagevec_count(&pvec_putback))
348 __putback_lru_fast(&pvec_putback, pgrescued); 358 __putback_lru_fast(&pvec_putback, pgrescued);
349 359
350 /* Phase 4: put_page to return pin from follow_page_mask() */
351 for (i = 0; i < nr; i++) {
352 struct page *page = pvec->pages[i];
353
354 if (page)
355 put_page(page);
356 }
357
358 pagevec_reinit(pvec); 360 pagevec_reinit(pvec);
359} 361}
360 362