aboutsummaryrefslogtreecommitdiffstats
path: root/mm/hugetlb.c
diff options
context:
space:
mode:
authorHillf Danton <dhillf@gmail.com>2012-03-23 18:01:48 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2012-03-23 19:58:31 -0400
commit6629326b89b6e69cc44276e1649a31158bb2c819 (patch)
treeca9463de7f327189cece1111112d2641114f3acc /mm/hugetlb.c
parent934e18b5cb4531cc6e81865bf54115cfd21d1ac6 (diff)
mm: hugetlb: cleanup duplicated code in unmapping vm range
Fix code duplication in __unmap_hugepage_range(), such as pte_page() and huge_pte_none(). Signed-off-by: Hillf Danton <dhillf@gmail.com> Cc: Michal Hocko <mhocko@suse.cz> Cc: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com> Cc: Hugh Dickins <hughd@google.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/hugetlb.c')
-rw-r--r--mm/hugetlb.c25
1 files changed, 11 insertions, 14 deletions
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index afa057a1d3fe..b8ce6f450956 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -2331,16 +2331,23 @@ void __unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start,
2331 if (huge_pmd_unshare(mm, &address, ptep)) 2331 if (huge_pmd_unshare(mm, &address, ptep))
2332 continue; 2332 continue;
2333 2333
2334 pte = huge_ptep_get(ptep);
2335 if (huge_pte_none(pte))
2336 continue;
2337
2338 /*
2339 * HWPoisoned hugepage is already unmapped and dropped reference
2340 */
2341 if (unlikely(is_hugetlb_entry_hwpoisoned(pte)))
2342 continue;
2343
2344 page = pte_page(pte);
2334 /* 2345 /*
2335 * If a reference page is supplied, it is because a specific 2346 * If a reference page is supplied, it is because a specific
2336 * page is being unmapped, not a range. Ensure the page we 2347 * page is being unmapped, not a range. Ensure the page we
2337 * are about to unmap is the actual page of interest. 2348 * are about to unmap is the actual page of interest.
2338 */ 2349 */
2339 if (ref_page) { 2350 if (ref_page) {
2340 pte = huge_ptep_get(ptep);
2341 if (huge_pte_none(pte))
2342 continue;
2343 page = pte_page(pte);
2344 if (page != ref_page) 2351 if (page != ref_page)
2345 continue; 2352 continue;
2346 2353
@@ -2353,16 +2360,6 @@ void __unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start,
2353 } 2360 }
2354 2361
2355 pte = huge_ptep_get_and_clear(mm, address, ptep); 2362 pte = huge_ptep_get_and_clear(mm, address, ptep);
2356 if (huge_pte_none(pte))
2357 continue;
2358
2359 /*
2360 * HWPoisoned hugepage is already unmapped and dropped reference
2361 */
2362 if (unlikely(is_hugetlb_entry_hwpoisoned(pte)))
2363 continue;
2364
2365 page = pte_page(pte);
2366 if (pte_dirty(pte)) 2363 if (pte_dirty(pte))
2367 set_page_dirty(page); 2364 set_page_dirty(page);
2368 list_add(&page->lru, &page_list); 2365 list_add(&page->lru, &page_list);