diff options
author | Jérôme Glisse <jglisse@redhat.com> | 2017-08-31 17:17:27 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2017-08-31 19:12:59 -0400 |
commit | 369ea8242c0fb5239b4ddf0dc568f694bd244de4 (patch) | |
tree | a3d86efccd044739fd13acaea9c98c9bf14f1b94 /mm/rmap.c | |
parent | a4d1a885251382250ec315482bdd8ca52dd61e6a (diff) |
mm/rmap: update to new mmu_notifier semantic v2
Replace all mmu_notifier_invalidate_page() calls by *_invalidate_range()
and make sure it is bracketed by calls to *_invalidate_range_start()/end().
Note that because we can not presume the pmd value or pte value we have
to assume the worst and unconditionaly report an invalidation as
happening.
Changed since v2:
- try_to_unmap_one() only one call to mmu_notifier_invalidate_range()
- compute end with PAGE_SIZE << compound_order(page)
- fix PageHuge() case in try_to_unmap_one()
Signed-off-by: Jérôme Glisse <jglisse@redhat.com>
Reviewed-by: Andrea Arcangeli <aarcange@redhat.com>
Cc: Dan Williams <dan.j.williams@intel.com>
Cc: Ross Zwisler <ross.zwisler@linux.intel.com>
Cc: Bernhard Held <berny156@gmx.de>
Cc: Adam Borowski <kilobyte@angband.pl>
Cc: Radim Krčmář <rkrcmar@redhat.com>
Cc: Wanpeng Li <kernellwp@gmail.com>
Cc: Paolo Bonzini <pbonzini@redhat.com>
Cc: Takashi Iwai <tiwai@suse.de>
Cc: Nadav Amit <nadav.amit@gmail.com>
Cc: Mike Galbraith <efault@gmx.de>
Cc: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
Cc: axie <axie@amd.com>
Cc: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/rmap.c')
-rw-r--r-- | mm/rmap.c | 35 |
1 files changed, 32 insertions, 3 deletions
@@ -887,11 +887,21 @@ static bool page_mkclean_one(struct page *page, struct vm_area_struct *vma, | |||
887 | .address = address, | 887 | .address = address, |
888 | .flags = PVMW_SYNC, | 888 | .flags = PVMW_SYNC, |
889 | }; | 889 | }; |
890 | unsigned long start = address, end; | ||
890 | int *cleaned = arg; | 891 | int *cleaned = arg; |
891 | 892 | ||
893 | /* | ||
894 | * We have to assume the worse case ie pmd for invalidation. Note that | ||
895 | * the page can not be free from this function. | ||
896 | */ | ||
897 | end = min(vma->vm_end, start + (PAGE_SIZE << compound_order(page))); | ||
898 | mmu_notifier_invalidate_range_start(vma->vm_mm, start, end); | ||
899 | |||
892 | while (page_vma_mapped_walk(&pvmw)) { | 900 | while (page_vma_mapped_walk(&pvmw)) { |
901 | unsigned long cstart, cend; | ||
893 | int ret = 0; | 902 | int ret = 0; |
894 | address = pvmw.address; | 903 | |
904 | cstart = address = pvmw.address; | ||
895 | if (pvmw.pte) { | 905 | if (pvmw.pte) { |
896 | pte_t entry; | 906 | pte_t entry; |
897 | pte_t *pte = pvmw.pte; | 907 | pte_t *pte = pvmw.pte; |
@@ -904,6 +914,7 @@ static bool page_mkclean_one(struct page *page, struct vm_area_struct *vma, | |||
904 | entry = pte_wrprotect(entry); | 914 | entry = pte_wrprotect(entry); |
905 | entry = pte_mkclean(entry); | 915 | entry = pte_mkclean(entry); |
906 | set_pte_at(vma->vm_mm, address, pte, entry); | 916 | set_pte_at(vma->vm_mm, address, pte, entry); |
917 | cend = cstart + PAGE_SIZE; | ||
907 | ret = 1; | 918 | ret = 1; |
908 | } else { | 919 | } else { |
909 | #ifdef CONFIG_TRANSPARENT_HUGE_PAGECACHE | 920 | #ifdef CONFIG_TRANSPARENT_HUGE_PAGECACHE |
@@ -918,6 +929,8 @@ static bool page_mkclean_one(struct page *page, struct vm_area_struct *vma, | |||
918 | entry = pmd_wrprotect(entry); | 929 | entry = pmd_wrprotect(entry); |
919 | entry = pmd_mkclean(entry); | 930 | entry = pmd_mkclean(entry); |
920 | set_pmd_at(vma->vm_mm, address, pmd, entry); | 931 | set_pmd_at(vma->vm_mm, address, pmd, entry); |
932 | cstart &= PMD_MASK; | ||
933 | cend = cstart + PMD_SIZE; | ||
921 | ret = 1; | 934 | ret = 1; |
922 | #else | 935 | #else |
923 | /* unexpected pmd-mapped page? */ | 936 | /* unexpected pmd-mapped page? */ |
@@ -926,11 +939,13 @@ static bool page_mkclean_one(struct page *page, struct vm_area_struct *vma, | |||
926 | } | 939 | } |
927 | 940 | ||
928 | if (ret) { | 941 | if (ret) { |
929 | mmu_notifier_invalidate_page(vma->vm_mm, address); | 942 | mmu_notifier_invalidate_range(vma->vm_mm, cstart, cend); |
930 | (*cleaned)++; | 943 | (*cleaned)++; |
931 | } | 944 | } |
932 | } | 945 | } |
933 | 946 | ||
947 | mmu_notifier_invalidate_range_end(vma->vm_mm, start, end); | ||
948 | |||
934 | return true; | 949 | return true; |
935 | } | 950 | } |
936 | 951 | ||
@@ -1324,6 +1339,7 @@ static bool try_to_unmap_one(struct page *page, struct vm_area_struct *vma, | |||
1324 | pte_t pteval; | 1339 | pte_t pteval; |
1325 | struct page *subpage; | 1340 | struct page *subpage; |
1326 | bool ret = true; | 1341 | bool ret = true; |
1342 | unsigned long start = address, end; | ||
1327 | enum ttu_flags flags = (enum ttu_flags)arg; | 1343 | enum ttu_flags flags = (enum ttu_flags)arg; |
1328 | 1344 | ||
1329 | /* munlock has nothing to gain from examining un-locked vmas */ | 1345 | /* munlock has nothing to gain from examining un-locked vmas */ |
@@ -1335,6 +1351,14 @@ static bool try_to_unmap_one(struct page *page, struct vm_area_struct *vma, | |||
1335 | flags & TTU_MIGRATION, page); | 1351 | flags & TTU_MIGRATION, page); |
1336 | } | 1352 | } |
1337 | 1353 | ||
1354 | /* | ||
1355 | * We have to assume the worse case ie pmd for invalidation. Note that | ||
1356 | * the page can not be free in this function as call of try_to_unmap() | ||
1357 | * must hold a reference on the page. | ||
1358 | */ | ||
1359 | end = min(vma->vm_end, start + (PAGE_SIZE << compound_order(page))); | ||
1360 | mmu_notifier_invalidate_range_start(vma->vm_mm, start, end); | ||
1361 | |||
1338 | while (page_vma_mapped_walk(&pvmw)) { | 1362 | while (page_vma_mapped_walk(&pvmw)) { |
1339 | /* | 1363 | /* |
1340 | * If the page is mlock()d, we cannot swap it out. | 1364 | * If the page is mlock()d, we cannot swap it out. |
@@ -1445,6 +1469,7 @@ static bool try_to_unmap_one(struct page *page, struct vm_area_struct *vma, | |||
1445 | if (unlikely(PageSwapBacked(page) != PageSwapCache(page))) { | 1469 | if (unlikely(PageSwapBacked(page) != PageSwapCache(page))) { |
1446 | WARN_ON_ONCE(1); | 1470 | WARN_ON_ONCE(1); |
1447 | ret = false; | 1471 | ret = false; |
1472 | /* We have to invalidate as we cleared the pte */ | ||
1448 | page_vma_mapped_walk_done(&pvmw); | 1473 | page_vma_mapped_walk_done(&pvmw); |
1449 | break; | 1474 | break; |
1450 | } | 1475 | } |
@@ -1490,8 +1515,12 @@ static bool try_to_unmap_one(struct page *page, struct vm_area_struct *vma, | |||
1490 | discard: | 1515 | discard: |
1491 | page_remove_rmap(subpage, PageHuge(page)); | 1516 | page_remove_rmap(subpage, PageHuge(page)); |
1492 | put_page(page); | 1517 | put_page(page); |
1493 | mmu_notifier_invalidate_page(mm, address); | 1518 | mmu_notifier_invalidate_range(mm, address, |
1519 | address + PAGE_SIZE); | ||
1494 | } | 1520 | } |
1521 | |||
1522 | mmu_notifier_invalidate_range_end(vma->vm_mm, start, end); | ||
1523 | |||
1495 | return ret; | 1524 | return ret; |
1496 | } | 1525 | } |
1497 | 1526 | ||