diff options
| author | Jérôme Glisse <jglisse@redhat.com> | 2018-12-28 03:38:09 -0500 |
|---|---|---|
| committer | Linus Torvalds <torvalds@linux-foundation.org> | 2018-12-28 15:11:50 -0500 |
| commit | ac46d4f3c43241ffa23d5bf36153a0830c0e02cc (patch) | |
| tree | 8fb9505fd38170afe5c9ba8ff0ed54bbe6caaf4d /mm/huge_memory.c | |
| parent | 5d6527a784f7a6d247961e046e830de8d71b47d1 (diff) | |
mm/mmu_notifier: use structure for invalidate_range_start/end calls v2
To avoid having to change many call sites everytime we want to add a
parameter use a structure to group all parameters for the mmu_notifier
invalidate_range_start/end cakks. No functional changes with this patch.
[akpm@linux-foundation.org: coding style fixes]
Link: http://lkml.kernel.org/r/20181205053628.3210-3-jglisse@redhat.com
Signed-off-by: Jérôme Glisse <jglisse@redhat.com>
Acked-by: Christian König <christian.koenig@amd.com>
Acked-by: Jan Kara <jack@suse.cz>
Cc: Matthew Wilcox <mawilcox@microsoft.com>
Cc: Ross Zwisler <zwisler@kernel.org>
Cc: Dan Williams <dan.j.williams@intel.com>
Cc: Paolo Bonzini <pbonzini@redhat.com>
Cc: Radim Krcmar <rkrcmar@redhat.com>
Cc: Michal Hocko <mhocko@kernel.org>
Cc: Felix Kuehling <felix.kuehling@amd.com>
Cc: Ralph Campbell <rcampbell@nvidia.com>
Cc: John Hubbard <jhubbard@nvidia.com>
From: Jérôme Glisse <jglisse@redhat.com>
Subject: mm/mmu_notifier: use structure for invalidate_range_start/end calls v3
fix build warning in migrate.c when CONFIG_MMU_NOTIFIER=n
Link: http://lkml.kernel.org/r/20181213171330.8489-3-jglisse@redhat.com
Signed-off-by: Jérôme Glisse <jglisse@redhat.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/huge_memory.c')
| -rw-r--r-- | mm/huge_memory.c | 54 |
1 files changed, 25 insertions, 29 deletions
diff --git a/mm/huge_memory.c b/mm/huge_memory.c index 0c0e18409fde..05136ad0f325 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c | |||
| @@ -1134,8 +1134,7 @@ static vm_fault_t do_huge_pmd_wp_page_fallback(struct vm_fault *vmf, | |||
| 1134 | int i; | 1134 | int i; |
| 1135 | vm_fault_t ret = 0; | 1135 | vm_fault_t ret = 0; |
| 1136 | struct page **pages; | 1136 | struct page **pages; |
| 1137 | unsigned long mmun_start; /* For mmu_notifiers */ | 1137 | struct mmu_notifier_range range; |
| 1138 | unsigned long mmun_end; /* For mmu_notifiers */ | ||
| 1139 | 1138 | ||
| 1140 | pages = kmalloc_array(HPAGE_PMD_NR, sizeof(struct page *), | 1139 | pages = kmalloc_array(HPAGE_PMD_NR, sizeof(struct page *), |
| 1141 | GFP_KERNEL); | 1140 | GFP_KERNEL); |
| @@ -1173,9 +1172,9 @@ static vm_fault_t do_huge_pmd_wp_page_fallback(struct vm_fault *vmf, | |||
| 1173 | cond_resched(); | 1172 | cond_resched(); |
| 1174 | } | 1173 | } |
| 1175 | 1174 | ||
| 1176 | mmun_start = haddr; | 1175 | mmu_notifier_range_init(&range, vma->vm_mm, haddr, |
| 1177 | mmun_end = haddr + HPAGE_PMD_SIZE; | 1176 | haddr + HPAGE_PMD_SIZE); |
| 1178 | mmu_notifier_invalidate_range_start(vma->vm_mm, mmun_start, mmun_end); | 1177 | mmu_notifier_invalidate_range_start(&range); |
| 1179 | 1178 | ||
| 1180 | vmf->ptl = pmd_lock(vma->vm_mm, vmf->pmd); | 1179 | vmf->ptl = pmd_lock(vma->vm_mm, vmf->pmd); |
| 1181 | if (unlikely(!pmd_same(*vmf->pmd, orig_pmd))) | 1180 | if (unlikely(!pmd_same(*vmf->pmd, orig_pmd))) |
| @@ -1220,8 +1219,7 @@ static vm_fault_t do_huge_pmd_wp_page_fallback(struct vm_fault *vmf, | |||
| 1220 | * No need to double call mmu_notifier->invalidate_range() callback as | 1219 | * No need to double call mmu_notifier->invalidate_range() callback as |
| 1221 | * the above pmdp_huge_clear_flush_notify() did already call it. | 1220 | * the above pmdp_huge_clear_flush_notify() did already call it. |
| 1222 | */ | 1221 | */ |
| 1223 | mmu_notifier_invalidate_range_only_end(vma->vm_mm, mmun_start, | 1222 | mmu_notifier_invalidate_range_only_end(&range); |
| 1224 | mmun_end); | ||
| 1225 | 1223 | ||
| 1226 | ret |= VM_FAULT_WRITE; | 1224 | ret |= VM_FAULT_WRITE; |
| 1227 | put_page(page); | 1225 | put_page(page); |
| @@ -1231,7 +1229,7 @@ out: | |||
| 1231 | 1229 | ||
| 1232 | out_free_pages: | 1230 | out_free_pages: |
| 1233 | spin_unlock(vmf->ptl); | 1231 | spin_unlock(vmf->ptl); |
| 1234 | mmu_notifier_invalidate_range_end(vma->vm_mm, mmun_start, mmun_end); | 1232 | mmu_notifier_invalidate_range_end(&range); |
| 1235 | for (i = 0; i < HPAGE_PMD_NR; i++) { | 1233 | for (i = 0; i < HPAGE_PMD_NR; i++) { |
| 1236 | memcg = (void *)page_private(pages[i]); | 1234 | memcg = (void *)page_private(pages[i]); |
| 1237 | set_page_private(pages[i], 0); | 1235 | set_page_private(pages[i], 0); |
| @@ -1248,8 +1246,7 @@ vm_fault_t do_huge_pmd_wp_page(struct vm_fault *vmf, pmd_t orig_pmd) | |||
| 1248 | struct page *page = NULL, *new_page; | 1246 | struct page *page = NULL, *new_page; |
| 1249 | struct mem_cgroup *memcg; | 1247 | struct mem_cgroup *memcg; |
| 1250 | unsigned long haddr = vmf->address & HPAGE_PMD_MASK; | 1248 | unsigned long haddr = vmf->address & HPAGE_PMD_MASK; |
| 1251 | unsigned long mmun_start; /* For mmu_notifiers */ | 1249 | struct mmu_notifier_range range; |
| 1252 | unsigned long mmun_end; /* For mmu_notifiers */ | ||
| 1253 | gfp_t huge_gfp; /* for allocation and charge */ | 1250 | gfp_t huge_gfp; /* for allocation and charge */ |
| 1254 | vm_fault_t ret = 0; | 1251 | vm_fault_t ret = 0; |
| 1255 | 1252 | ||
| @@ -1338,9 +1335,9 @@ alloc: | |||
| 1338 | vma, HPAGE_PMD_NR); | 1335 | vma, HPAGE_PMD_NR); |
| 1339 | __SetPageUptodate(new_page); | 1336 | __SetPageUptodate(new_page); |
| 1340 | 1337 | ||
| 1341 | mmun_start = haddr; | 1338 | mmu_notifier_range_init(&range, vma->vm_mm, haddr, |
| 1342 | mmun_end = haddr + HPAGE_PMD_SIZE; | 1339 | haddr + HPAGE_PMD_SIZE); |
| 1343 | mmu_notifier_invalidate_range_start(vma->vm_mm, mmun_start, mmun_end); | 1340 | mmu_notifier_invalidate_range_start(&range); |
| 1344 | 1341 | ||
| 1345 | spin_lock(vmf->ptl); | 1342 | spin_lock(vmf->ptl); |
| 1346 | if (page) | 1343 | if (page) |
| @@ -1375,8 +1372,7 @@ out_mn: | |||
| 1375 | * No need to double call mmu_notifier->invalidate_range() callback as | 1372 | * No need to double call mmu_notifier->invalidate_range() callback as |
| 1376 | * the above pmdp_huge_clear_flush_notify() did already call it. | 1373 | * the above pmdp_huge_clear_flush_notify() did already call it. |
| 1377 | */ | 1374 | */ |
| 1378 | mmu_notifier_invalidate_range_only_end(vma->vm_mm, mmun_start, | 1375 | mmu_notifier_invalidate_range_only_end(&range); |
| 1379 | mmun_end); | ||
| 1380 | out: | 1376 | out: |
| 1381 | return ret; | 1377 | return ret; |
| 1382 | out_unlock: | 1378 | out_unlock: |
| @@ -2015,14 +2011,15 @@ void __split_huge_pud(struct vm_area_struct *vma, pud_t *pud, | |||
| 2015 | unsigned long address) | 2011 | unsigned long address) |
| 2016 | { | 2012 | { |
| 2017 | spinlock_t *ptl; | 2013 | spinlock_t *ptl; |
| 2018 | struct mm_struct *mm = vma->vm_mm; | 2014 | struct mmu_notifier_range range; |
| 2019 | unsigned long haddr = address & HPAGE_PUD_MASK; | ||
| 2020 | 2015 | ||
| 2021 | mmu_notifier_invalidate_range_start(mm, haddr, haddr + HPAGE_PUD_SIZE); | 2016 | mmu_notifier_range_init(&range, vma->vm_mm, address & HPAGE_PUD_MASK, |
| 2022 | ptl = pud_lock(mm, pud); | 2017 | (address & HPAGE_PUD_MASK) + HPAGE_PUD_SIZE); |
| 2018 | mmu_notifier_invalidate_range_start(&range); | ||
| 2019 | ptl = pud_lock(vma->vm_mm, pud); | ||
| 2023 | if (unlikely(!pud_trans_huge(*pud) && !pud_devmap(*pud))) | 2020 | if (unlikely(!pud_trans_huge(*pud) && !pud_devmap(*pud))) |
| 2024 | goto out; | 2021 | goto out; |
| 2025 | __split_huge_pud_locked(vma, pud, haddr); | 2022 | __split_huge_pud_locked(vma, pud, range.start); |
| 2026 | 2023 | ||
| 2027 | out: | 2024 | out: |
| 2028 | spin_unlock(ptl); | 2025 | spin_unlock(ptl); |
| @@ -2030,8 +2027,7 @@ out: | |||
| 2030 | * No need to double call mmu_notifier->invalidate_range() callback as | 2027 | * No need to double call mmu_notifier->invalidate_range() callback as |
| 2031 | * the above pudp_huge_clear_flush_notify() did already call it. | 2028 | * the above pudp_huge_clear_flush_notify() did already call it. |
| 2032 | */ | 2029 | */ |
| 2033 | mmu_notifier_invalidate_range_only_end(mm, haddr, haddr + | 2030 | mmu_notifier_invalidate_range_only_end(&range); |
| 2034 | HPAGE_PUD_SIZE); | ||
| 2035 | } | 2031 | } |
| 2036 | #endif /* CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD */ | 2032 | #endif /* CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD */ |
| 2037 | 2033 | ||
| @@ -2233,11 +2229,12 @@ void __split_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd, | |||
| 2233 | unsigned long address, bool freeze, struct page *page) | 2229 | unsigned long address, bool freeze, struct page *page) |
| 2234 | { | 2230 | { |
| 2235 | spinlock_t *ptl; | 2231 | spinlock_t *ptl; |
| 2236 | struct mm_struct *mm = vma->vm_mm; | 2232 | struct mmu_notifier_range range; |
| 2237 | unsigned long haddr = address & HPAGE_PMD_MASK; | ||
| 2238 | 2233 | ||
| 2239 | mmu_notifier_invalidate_range_start(mm, haddr, haddr + HPAGE_PMD_SIZE); | 2234 | mmu_notifier_range_init(&range, vma->vm_mm, address & HPAGE_PMD_MASK, |
| 2240 | ptl = pmd_lock(mm, pmd); | 2235 | (address & HPAGE_PMD_MASK) + HPAGE_PMD_SIZE); |
| 2236 | mmu_notifier_invalidate_range_start(&range); | ||
| 2237 | ptl = pmd_lock(vma->vm_mm, pmd); | ||
| 2241 | 2238 | ||
| 2242 | /* | 2239 | /* |
| 2243 | * If caller asks to setup a migration entries, we need a page to check | 2240 | * If caller asks to setup a migration entries, we need a page to check |
| @@ -2253,7 +2250,7 @@ void __split_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd, | |||
| 2253 | clear_page_mlock(page); | 2250 | clear_page_mlock(page); |
| 2254 | } else if (!(pmd_devmap(*pmd) || is_pmd_migration_entry(*pmd))) | 2251 | } else if (!(pmd_devmap(*pmd) || is_pmd_migration_entry(*pmd))) |
| 2255 | goto out; | 2252 | goto out; |
| 2256 | __split_huge_pmd_locked(vma, pmd, haddr, freeze); | 2253 | __split_huge_pmd_locked(vma, pmd, range.start, freeze); |
| 2257 | out: | 2254 | out: |
| 2258 | spin_unlock(ptl); | 2255 | spin_unlock(ptl); |
| 2259 | /* | 2256 | /* |
| @@ -2269,8 +2266,7 @@ out: | |||
| 2269 | * any further changes to individual pte will notify. So no need | 2266 | * any further changes to individual pte will notify. So no need |
| 2270 | * to call mmu_notifier->invalidate_range() | 2267 | * to call mmu_notifier->invalidate_range() |
| 2271 | */ | 2268 | */ |
| 2272 | mmu_notifier_invalidate_range_only_end(mm, haddr, haddr + | 2269 | mmu_notifier_invalidate_range_only_end(&range); |
| 2273 | HPAGE_PMD_SIZE); | ||
| 2274 | } | 2270 | } |
| 2275 | 2271 | ||
| 2276 | void split_huge_pmd_address(struct vm_area_struct *vma, unsigned long address, | 2272 | void split_huge_pmd_address(struct vm_area_struct *vma, unsigned long address, |
