summaryrefslogtreecommitdiffstats
path: root/mm/rmap.c
diff options
context:
space:
mode:
authorMinchan Kim <minchan@kernel.org>2017-05-03 17:54:27 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2017-05-03 18:52:10 -0400
commite4b82222712ed15813d35204c91429883d27d1d9 (patch)
tree99454032438d5ee42907cc08f930d59ab7cedebd /mm/rmap.c
parent1df631ae19819cff343d316eda42eca32d3de7fc (diff)
mm: make rmap_one boolean function
rmap_one's return value controls whether rmap_work should contine to scan other ptes or not so it's target for changing to boolean. Return true if the scan should be continued. Otherwise, return false to stop the scanning. This patch makes rmap_one's return value to boolean. Link: http://lkml.kernel.org/r/1489555493-14659-10-git-send-email-minchan@kernel.org Signed-off-by: Minchan Kim <minchan@kernel.org> Cc: Anshuman Khandual <khandual@linux.vnet.ibm.com> Cc: Hillf Danton <hillf.zj@alibaba-inc.com> Cc: Johannes Weiner <hannes@cmpxchg.org> Cc: Kirill A. Shutemov <kirill.shutemov@linux.intel.com> Cc: Michal Hocko <mhocko@suse.com> Cc: Naoya Horiguchi <n-horiguchi@ah.jp.nec.com> Cc: Vlastimil Babka <vbabka@suse.cz> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/rmap.c')
-rw-r--r--mm/rmap.c30
1 files changed, 15 insertions, 15 deletions
diff --git a/mm/rmap.c b/mm/rmap.c
index 3b40d47e3300..47e8dafc83c8 100644
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -724,7 +724,7 @@ struct page_referenced_arg {
724/* 724/*
725 * arg: page_referenced_arg will be passed 725 * arg: page_referenced_arg will be passed
726 */ 726 */
727static int page_referenced_one(struct page *page, struct vm_area_struct *vma, 727static bool page_referenced_one(struct page *page, struct vm_area_struct *vma,
728 unsigned long address, void *arg) 728 unsigned long address, void *arg)
729{ 729{
730 struct page_referenced_arg *pra = arg; 730 struct page_referenced_arg *pra = arg;
@@ -741,7 +741,7 @@ static int page_referenced_one(struct page *page, struct vm_area_struct *vma,
741 if (vma->vm_flags & VM_LOCKED) { 741 if (vma->vm_flags & VM_LOCKED) {
742 page_vma_mapped_walk_done(&pvmw); 742 page_vma_mapped_walk_done(&pvmw);
743 pra->vm_flags |= VM_LOCKED; 743 pra->vm_flags |= VM_LOCKED;
744 return SWAP_FAIL; /* To break the loop */ 744 return false; /* To break the loop */
745 } 745 }
746 746
747 if (pvmw.pte) { 747 if (pvmw.pte) {
@@ -781,9 +781,9 @@ static int page_referenced_one(struct page *page, struct vm_area_struct *vma,
781 } 781 }
782 782
783 if (!pra->mapcount) 783 if (!pra->mapcount)
784 return SWAP_SUCCESS; /* To break the loop */ 784 return false; /* To break the loop */
785 785
786 return SWAP_AGAIN; 786 return true;
787} 787}
788 788
789static bool invalid_page_referenced_vma(struct vm_area_struct *vma, void *arg) 789static bool invalid_page_referenced_vma(struct vm_area_struct *vma, void *arg)
@@ -854,7 +854,7 @@ int page_referenced(struct page *page,
854 return pra.referenced; 854 return pra.referenced;
855} 855}
856 856
857static int page_mkclean_one(struct page *page, struct vm_area_struct *vma, 857static bool page_mkclean_one(struct page *page, struct vm_area_struct *vma,
858 unsigned long address, void *arg) 858 unsigned long address, void *arg)
859{ 859{
860 struct page_vma_mapped_walk pvmw = { 860 struct page_vma_mapped_walk pvmw = {
@@ -907,7 +907,7 @@ static int page_mkclean_one(struct page *page, struct vm_area_struct *vma,
907 } 907 }
908 } 908 }
909 909
910 return SWAP_AGAIN; 910 return true;
911} 911}
912 912
913static bool invalid_mkclean_vma(struct vm_area_struct *vma, void *arg) 913static bool invalid_mkclean_vma(struct vm_area_struct *vma, void *arg)
@@ -1290,7 +1290,7 @@ void page_remove_rmap(struct page *page, bool compound)
1290/* 1290/*
1291 * @arg: enum ttu_flags will be passed to this argument 1291 * @arg: enum ttu_flags will be passed to this argument
1292 */ 1292 */
1293static int try_to_unmap_one(struct page *page, struct vm_area_struct *vma, 1293static bool try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
1294 unsigned long address, void *arg) 1294 unsigned long address, void *arg)
1295{ 1295{
1296 struct mm_struct *mm = vma->vm_mm; 1296 struct mm_struct *mm = vma->vm_mm;
@@ -1301,12 +1301,12 @@ static int try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
1301 }; 1301 };
1302 pte_t pteval; 1302 pte_t pteval;
1303 struct page *subpage; 1303 struct page *subpage;
1304 int ret = SWAP_AGAIN; 1304 bool ret = true;
1305 enum ttu_flags flags = (enum ttu_flags)arg; 1305 enum ttu_flags flags = (enum ttu_flags)arg;
1306 1306
1307 /* munlock has nothing to gain from examining un-locked vmas */ 1307 /* munlock has nothing to gain from examining un-locked vmas */
1308 if ((flags & TTU_MUNLOCK) && !(vma->vm_flags & VM_LOCKED)) 1308 if ((flags & TTU_MUNLOCK) && !(vma->vm_flags & VM_LOCKED))
1309 return SWAP_AGAIN; 1309 return true;
1310 1310
1311 if (flags & TTU_SPLIT_HUGE_PMD) { 1311 if (flags & TTU_SPLIT_HUGE_PMD) {
1312 split_huge_pmd_address(vma, address, 1312 split_huge_pmd_address(vma, address,
@@ -1329,7 +1329,7 @@ static int try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
1329 */ 1329 */
1330 mlock_vma_page(page); 1330 mlock_vma_page(page);
1331 } 1331 }
1332 ret = SWAP_FAIL; 1332 ret = false;
1333 page_vma_mapped_walk_done(&pvmw); 1333 page_vma_mapped_walk_done(&pvmw);
1334 break; 1334 break;
1335 } 1335 }
@@ -1347,7 +1347,7 @@ static int try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
1347 if (!(flags & TTU_IGNORE_ACCESS)) { 1347 if (!(flags & TTU_IGNORE_ACCESS)) {
1348 if (ptep_clear_flush_young_notify(vma, address, 1348 if (ptep_clear_flush_young_notify(vma, address,
1349 pvmw.pte)) { 1349 pvmw.pte)) {
1350 ret = SWAP_FAIL; 1350 ret = false;
1351 page_vma_mapped_walk_done(&pvmw); 1351 page_vma_mapped_walk_done(&pvmw);
1352 break; 1352 break;
1353 } 1353 }
@@ -1437,14 +1437,14 @@ static int try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
1437 */ 1437 */
1438 set_pte_at(mm, address, pvmw.pte, pteval); 1438 set_pte_at(mm, address, pvmw.pte, pteval);
1439 SetPageSwapBacked(page); 1439 SetPageSwapBacked(page);
1440 ret = SWAP_FAIL; 1440 ret = false;
1441 page_vma_mapped_walk_done(&pvmw); 1441 page_vma_mapped_walk_done(&pvmw);
1442 break; 1442 break;
1443 } 1443 }
1444 1444
1445 if (swap_duplicate(entry) < 0) { 1445 if (swap_duplicate(entry) < 0) {
1446 set_pte_at(mm, address, pvmw.pte, pteval); 1446 set_pte_at(mm, address, pvmw.pte, pteval);
1447 ret = SWAP_FAIL; 1447 ret = false;
1448 page_vma_mapped_walk_done(&pvmw); 1448 page_vma_mapped_walk_done(&pvmw);
1449 break; 1449 break;
1450 } 1450 }
@@ -1636,7 +1636,7 @@ static void rmap_walk_anon(struct page *page, struct rmap_walk_control *rwc,
1636 if (rwc->invalid_vma && rwc->invalid_vma(vma, rwc->arg)) 1636 if (rwc->invalid_vma && rwc->invalid_vma(vma, rwc->arg))
1637 continue; 1637 continue;
1638 1638
1639 if (SWAP_AGAIN != rwc->rmap_one(page, vma, address, rwc->arg)) 1639 if (!rwc->rmap_one(page, vma, address, rwc->arg))
1640 break; 1640 break;
1641 if (rwc->done && rwc->done(page)) 1641 if (rwc->done && rwc->done(page))
1642 break; 1642 break;
@@ -1690,7 +1690,7 @@ static void rmap_walk_file(struct page *page, struct rmap_walk_control *rwc,
1690 if (rwc->invalid_vma && rwc->invalid_vma(vma, rwc->arg)) 1690 if (rwc->invalid_vma && rwc->invalid_vma(vma, rwc->arg))
1691 continue; 1691 continue;
1692 1692
1693 if (SWAP_AGAIN != rwc->rmap_one(page, vma, address, rwc->arg)) 1693 if (!rwc->rmap_one(page, vma, address, rwc->arg))
1694 goto done; 1694 goto done;
1695 if (rwc->done && rwc->done(page)) 1695 if (rwc->done && rwc->done(page))
1696 goto done; 1696 goto done;