diff options
Diffstat (limited to 'mm/mlock.c')
-rw-r--r-- | mm/mlock.c | 10 |
1 files changed, 5 insertions, 5 deletions
diff --git a/mm/mlock.c b/mm/mlock.c index 1050511f8b2b..0dd9ca18e19e 100644 --- a/mm/mlock.c +++ b/mm/mlock.c | |||
@@ -380,6 +380,7 @@ static unsigned long __munlock_pagevec_fill(struct pagevec *pvec, | |||
380 | pte = get_locked_pte(vma->vm_mm, start, &ptl); | 380 | pte = get_locked_pte(vma->vm_mm, start, &ptl); |
381 | /* Make sure we do not cross the page table boundary */ | 381 | /* Make sure we do not cross the page table boundary */ |
382 | end = pgd_addr_end(start, end); | 382 | end = pgd_addr_end(start, end); |
383 | end = p4d_addr_end(start, end); | ||
383 | end = pud_addr_end(start, end); | 384 | end = pud_addr_end(start, end); |
384 | end = pmd_addr_end(start, end); | 385 | end = pmd_addr_end(start, end); |
385 | 386 | ||
@@ -442,7 +443,7 @@ void munlock_vma_pages_range(struct vm_area_struct *vma, | |||
442 | 443 | ||
443 | while (start < end) { | 444 | while (start < end) { |
444 | struct page *page; | 445 | struct page *page; |
445 | unsigned int page_mask; | 446 | unsigned int page_mask = 0; |
446 | unsigned long page_increm; | 447 | unsigned long page_increm; |
447 | struct pagevec pvec; | 448 | struct pagevec pvec; |
448 | struct zone *zone; | 449 | struct zone *zone; |
@@ -456,8 +457,7 @@ void munlock_vma_pages_range(struct vm_area_struct *vma, | |||
456 | * suits munlock very well (and if somehow an abnormal page | 457 | * suits munlock very well (and if somehow an abnormal page |
457 | * has sneaked into the range, we won't oops here: great). | 458 | * has sneaked into the range, we won't oops here: great). |
458 | */ | 459 | */ |
459 | page = follow_page_mask(vma, start, FOLL_GET | FOLL_DUMP, | 460 | page = follow_page(vma, start, FOLL_GET | FOLL_DUMP); |
460 | &page_mask); | ||
461 | 461 | ||
462 | if (page && !IS_ERR(page)) { | 462 | if (page && !IS_ERR(page)) { |
463 | if (PageTransTail(page)) { | 463 | if (PageTransTail(page)) { |
@@ -468,8 +468,8 @@ void munlock_vma_pages_range(struct vm_area_struct *vma, | |||
468 | /* | 468 | /* |
469 | * Any THP page found by follow_page_mask() may | 469 | * Any THP page found by follow_page_mask() may |
470 | * have gotten split before reaching | 470 | * have gotten split before reaching |
471 | * munlock_vma_page(), so we need to recompute | 471 | * munlock_vma_page(), so we need to compute |
472 | * the page_mask here. | 472 | * the page_mask here instead. |
473 | */ | 473 | */ |
474 | page_mask = munlock_vma_page(page); | 474 | page_mask = munlock_vma_page(page); |
475 | unlock_page(page); | 475 | unlock_page(page); |