diff options
author | Jérôme Glisse <jglisse@redhat.com> | 2018-12-28 03:38:09 -0500 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2018-12-28 15:11:50 -0500 |
commit | ac46d4f3c43241ffa23d5bf36153a0830c0e02cc (patch) | |
tree | 8fb9505fd38170afe5c9ba8ff0ed54bbe6caaf4d /mm/rmap.c | |
parent | 5d6527a784f7a6d247961e046e830de8d71b47d1 (diff) |
mm/mmu_notifier: use structure for invalidate_range_start/end calls v2
To avoid having to change many call sites everytime we want to add a
parameter use a structure to group all parameters for the mmu_notifier
invalidate_range_start/end cakks. No functional changes with this patch.
[akpm@linux-foundation.org: coding style fixes]
Link: http://lkml.kernel.org/r/20181205053628.3210-3-jglisse@redhat.com
Signed-off-by: Jérôme Glisse <jglisse@redhat.com>
Acked-by: Christian König <christian.koenig@amd.com>
Acked-by: Jan Kara <jack@suse.cz>
Cc: Matthew Wilcox <mawilcox@microsoft.com>
Cc: Ross Zwisler <zwisler@kernel.org>
Cc: Dan Williams <dan.j.williams@intel.com>
Cc: Paolo Bonzini <pbonzini@redhat.com>
Cc: Radim Krcmar <rkrcmar@redhat.com>
Cc: Michal Hocko <mhocko@kernel.org>
Cc: Felix Kuehling <felix.kuehling@amd.com>
Cc: Ralph Campbell <rcampbell@nvidia.com>
Cc: John Hubbard <jhubbard@nvidia.com>
From: Jérôme Glisse <jglisse@redhat.com>
Subject: mm/mmu_notifier: use structure for invalidate_range_start/end calls v3
fix build warning in migrate.c when CONFIG_MMU_NOTIFIER=n
Link: http://lkml.kernel.org/r/20181213171330.8489-3-jglisse@redhat.com
Signed-off-by: Jérôme Glisse <jglisse@redhat.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/rmap.c')
-rw-r--r-- | mm/rmap.c | 30 |
1 files changed, 18 insertions, 12 deletions
@@ -889,15 +889,17 @@ static bool page_mkclean_one(struct page *page, struct vm_area_struct *vma, | |||
889 | .address = address, | 889 | .address = address, |
890 | .flags = PVMW_SYNC, | 890 | .flags = PVMW_SYNC, |
891 | }; | 891 | }; |
892 | unsigned long start = address, end; | 892 | struct mmu_notifier_range range; |
893 | int *cleaned = arg; | 893 | int *cleaned = arg; |
894 | 894 | ||
895 | /* | 895 | /* |
896 | * We have to assume the worse case ie pmd for invalidation. Note that | 896 | * We have to assume the worse case ie pmd for invalidation. Note that |
897 | * the page can not be free from this function. | 897 | * the page can not be free from this function. |
898 | */ | 898 | */ |
899 | end = min(vma->vm_end, start + (PAGE_SIZE << compound_order(page))); | 899 | mmu_notifier_range_init(&range, vma->vm_mm, address, |
900 | mmu_notifier_invalidate_range_start(vma->vm_mm, start, end); | 900 | min(vma->vm_end, address + |
901 | (PAGE_SIZE << compound_order(page)))); | ||
902 | mmu_notifier_invalidate_range_start(&range); | ||
901 | 903 | ||
902 | while (page_vma_mapped_walk(&pvmw)) { | 904 | while (page_vma_mapped_walk(&pvmw)) { |
903 | unsigned long cstart; | 905 | unsigned long cstart; |
@@ -949,7 +951,7 @@ static bool page_mkclean_one(struct page *page, struct vm_area_struct *vma, | |||
949 | (*cleaned)++; | 951 | (*cleaned)++; |
950 | } | 952 | } |
951 | 953 | ||
952 | mmu_notifier_invalidate_range_end(vma->vm_mm, start, end); | 954 | mmu_notifier_invalidate_range_end(&range); |
953 | 955 | ||
954 | return true; | 956 | return true; |
955 | } | 957 | } |
@@ -1345,7 +1347,7 @@ static bool try_to_unmap_one(struct page *page, struct vm_area_struct *vma, | |||
1345 | pte_t pteval; | 1347 | pte_t pteval; |
1346 | struct page *subpage; | 1348 | struct page *subpage; |
1347 | bool ret = true; | 1349 | bool ret = true; |
1348 | unsigned long start = address, end; | 1350 | struct mmu_notifier_range range; |
1349 | enum ttu_flags flags = (enum ttu_flags)arg; | 1351 | enum ttu_flags flags = (enum ttu_flags)arg; |
1350 | 1352 | ||
1351 | /* munlock has nothing to gain from examining un-locked vmas */ | 1353 | /* munlock has nothing to gain from examining un-locked vmas */ |
@@ -1369,15 +1371,18 @@ static bool try_to_unmap_one(struct page *page, struct vm_area_struct *vma, | |||
1369 | * Note that the page can not be free in this function as call of | 1371 | * Note that the page can not be free in this function as call of |
1370 | * try_to_unmap() must hold a reference on the page. | 1372 | * try_to_unmap() must hold a reference on the page. |
1371 | */ | 1373 | */ |
1372 | end = min(vma->vm_end, start + (PAGE_SIZE << compound_order(page))); | 1374 | mmu_notifier_range_init(&range, vma->vm_mm, vma->vm_start, |
1375 | min(vma->vm_end, vma->vm_start + | ||
1376 | (PAGE_SIZE << compound_order(page)))); | ||
1373 | if (PageHuge(page)) { | 1377 | if (PageHuge(page)) { |
1374 | /* | 1378 | /* |
1375 | * If sharing is possible, start and end will be adjusted | 1379 | * If sharing is possible, start and end will be adjusted |
1376 | * accordingly. | 1380 | * accordingly. |
1377 | */ | 1381 | */ |
1378 | adjust_range_if_pmd_sharing_possible(vma, &start, &end); | 1382 | adjust_range_if_pmd_sharing_possible(vma, &range.start, |
1383 | &range.end); | ||
1379 | } | 1384 | } |
1380 | mmu_notifier_invalidate_range_start(vma->vm_mm, start, end); | 1385 | mmu_notifier_invalidate_range_start(&range); |
1381 | 1386 | ||
1382 | while (page_vma_mapped_walk(&pvmw)) { | 1387 | while (page_vma_mapped_walk(&pvmw)) { |
1383 | #ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION | 1388 | #ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION |
@@ -1428,9 +1433,10 @@ static bool try_to_unmap_one(struct page *page, struct vm_area_struct *vma, | |||
1428 | * we must flush them all. start/end were | 1433 | * we must flush them all. start/end were |
1429 | * already adjusted above to cover this range. | 1434 | * already adjusted above to cover this range. |
1430 | */ | 1435 | */ |
1431 | flush_cache_range(vma, start, end); | 1436 | flush_cache_range(vma, range.start, range.end); |
1432 | flush_tlb_range(vma, start, end); | 1437 | flush_tlb_range(vma, range.start, range.end); |
1433 | mmu_notifier_invalidate_range(mm, start, end); | 1438 | mmu_notifier_invalidate_range(mm, range.start, |
1439 | range.end); | ||
1434 | 1440 | ||
1435 | /* | 1441 | /* |
1436 | * The ref count of the PMD page was dropped | 1442 | * The ref count of the PMD page was dropped |
@@ -1650,7 +1656,7 @@ discard: | |||
1650 | put_page(page); | 1656 | put_page(page); |
1651 | } | 1657 | } |
1652 | 1658 | ||
1653 | mmu_notifier_invalidate_range_end(vma->vm_mm, start, end); | 1659 | mmu_notifier_invalidate_range_end(&range); |
1654 | 1660 | ||
1655 | return ret; | 1661 | return ret; |
1656 | } | 1662 | } |