aboutsummaryrefslogtreecommitdiffstats
path: root/mm/mlock.c
diff options
context:
space:
mode:
authorKirill A. Shutemov <kirill.shutemov@linux.intel.com>2016-01-15 19:52:35 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2016-01-15 20:56:32 -0500
commit7479df6da9506a624ae214588269ed42221494f9 (patch)
treee8bfc07724037bf238aa0d9d0caf6344fdfc1de5 /mm/mlock.c
parent7aef4172c7957d7e65fc172be4c99becaef855d4 (diff)
thp, mlock: do not allow huge pages in mlocked area
With new refcounting THP can belong to several VMAs. This makes tricky to track THP pages, when they partially mlocked. It can lead to leaking mlocked pages to non-VM_LOCKED vmas and other problems. With this patch we will split all pages on mlock and avoid fault-in/collapse new THP in VM_LOCKED vmas. I've tried alternative approach: do not mark THP pages mlocked and keep them on normal LRUs. This way vmscan could try to split huge pages on memory pressure and free up subpages which doesn't belong to VM_LOCKED vmas. But this is user-visible change: we screw up Mlocked accouting reported in meminfo, so I had to leave this approach aside. We can bring something better later, but this should be good enough for now. Signed-off-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com> Tested-by: Sasha Levin <sasha.levin@oracle.com> Tested-by: Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com> Acked-by: Jerome Marchand <jmarchan@redhat.com> Cc: Vlastimil Babka <vbabka@suse.cz> Cc: Andrea Arcangeli <aarcange@redhat.com> Cc: Hugh Dickins <hughd@google.com> Cc: Dave Hansen <dave.hansen@intel.com> Cc: Mel Gorman <mgorman@suse.de> Cc: Rik van Riel <riel@redhat.com> Cc: Naoya Horiguchi <n-horiguchi@ah.jp.nec.com> Cc: Steve Capper <steve.capper@linaro.org> Cc: Johannes Weiner <hannes@cmpxchg.org> Cc: Michal Hocko <mhocko@suse.cz> Cc: Christoph Lameter <cl@linux.com> Cc: David Rientjes <rientjes@google.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/mlock.c')
-rw-r--r--mm/mlock.c51
1 files changed, 19 insertions, 32 deletions
diff --git a/mm/mlock.c b/mm/mlock.c
index 9cb87cbc4071..c6b139ad356a 100644
--- a/mm/mlock.c
+++ b/mm/mlock.c
@@ -443,39 +443,26 @@ void munlock_vma_pages_range(struct vm_area_struct *vma,
443 page = follow_page_mask(vma, start, FOLL_GET | FOLL_DUMP, 443 page = follow_page_mask(vma, start, FOLL_GET | FOLL_DUMP,
444 &page_mask); 444 &page_mask);
445 445
446 if (page && !IS_ERR(page)) { 446 if (page && !IS_ERR(page) && !PageTransCompound(page)) {
447 if (PageTransHuge(page)) { 447 /*
448 lock_page(page); 448 * Non-huge pages are handled in batches via
449 /* 449 * pagevec. The pin from follow_page_mask()
450 * Any THP page found by follow_page_mask() may 450 * prevents them from collapsing by THP.
451 * have gotten split before reaching 451 */
452 * munlock_vma_page(), so we need to recompute 452 pagevec_add(&pvec, page);
453 * the page_mask here. 453 zone = page_zone(page);
454 */ 454 zoneid = page_zone_id(page);
455 page_mask = munlock_vma_page(page);
456 unlock_page(page);
457 put_page(page); /* follow_page_mask() */
458 } else {
459 /*
460 * Non-huge pages are handled in batches via
461 * pagevec. The pin from follow_page_mask()
462 * prevents them from collapsing by THP.
463 */
464 pagevec_add(&pvec, page);
465 zone = page_zone(page);
466 zoneid = page_zone_id(page);
467 455
468 /* 456 /*
469 * Try to fill the rest of pagevec using fast 457 * Try to fill the rest of pagevec using fast
470 * pte walk. This will also update start to 458 * pte walk. This will also update start to
471 * the next page to process. Then munlock the 459 * the next page to process. Then munlock the
472 * pagevec. 460 * pagevec.
473 */ 461 */
474 start = __munlock_pagevec_fill(&pvec, vma, 462 start = __munlock_pagevec_fill(&pvec, vma,
475 zoneid, start, end); 463 zoneid, start, end);
476 __munlock_pagevec(&pvec, zone); 464 __munlock_pagevec(&pvec, zone);
477 goto next; 465 goto next;
478 }
479 } 466 }
480 /* It's a bug to munlock in the middle of a THP page */ 467 /* It's a bug to munlock in the middle of a THP page */
481 VM_BUG_ON((start >> PAGE_SHIFT) & page_mask); 468 VM_BUG_ON((start >> PAGE_SHIFT) & page_mask);