diff options
Diffstat (limited to 'mm/rmap.c')
-rw-r--r-- | mm/rmap.c | 22 |
1 files changed, 11 insertions, 11 deletions
@@ -517,11 +517,7 @@ void page_unlock_anon_vma_read(struct anon_vma *anon_vma) | |||
517 | static inline unsigned long | 517 | static inline unsigned long |
518 | __vma_address(struct page *page, struct vm_area_struct *vma) | 518 | __vma_address(struct page *page, struct vm_area_struct *vma) |
519 | { | 519 | { |
520 | pgoff_t pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT); | 520 | pgoff_t pgoff = page_to_pgoff(page); |
521 | |||
522 | if (unlikely(is_vm_hugetlb_page(vma))) | ||
523 | pgoff = page->index << huge_page_order(page_hstate(page)); | ||
524 | |||
525 | return vma->vm_start + ((pgoff - vma->vm_pgoff) << PAGE_SHIFT); | 521 | return vma->vm_start + ((pgoff - vma->vm_pgoff) << PAGE_SHIFT); |
526 | } | 522 | } |
527 | 523 | ||
@@ -569,6 +565,7 @@ pmd_t *mm_find_pmd(struct mm_struct *mm, unsigned long address) | |||
569 | pgd_t *pgd; | 565 | pgd_t *pgd; |
570 | pud_t *pud; | 566 | pud_t *pud; |
571 | pmd_t *pmd = NULL; | 567 | pmd_t *pmd = NULL; |
568 | pmd_t pmde; | ||
572 | 569 | ||
573 | pgd = pgd_offset(mm, address); | 570 | pgd = pgd_offset(mm, address); |
574 | if (!pgd_present(*pgd)) | 571 | if (!pgd_present(*pgd)) |
@@ -579,7 +576,13 @@ pmd_t *mm_find_pmd(struct mm_struct *mm, unsigned long address) | |||
579 | goto out; | 576 | goto out; |
580 | 577 | ||
581 | pmd = pmd_offset(pud, address); | 578 | pmd = pmd_offset(pud, address); |
582 | if (!pmd_present(*pmd)) | 579 | /* |
580 | * Some THP functions use the sequence pmdp_clear_flush(), set_pmd_at() | ||
581 | * without holding anon_vma lock for write. So when looking for a | ||
582 | * genuine pmde (in which to find pte), test present and !THP together. | ||
583 | */ | ||
584 | pmde = ACCESS_ONCE(*pmd); | ||
585 | if (!pmd_present(pmde) || pmd_trans_huge(pmde)) | ||
583 | pmd = NULL; | 586 | pmd = NULL; |
584 | out: | 587 | out: |
585 | return pmd; | 588 | return pmd; |
@@ -615,9 +618,6 @@ pte_t *__page_check_address(struct page *page, struct mm_struct *mm, | |||
615 | if (!pmd) | 618 | if (!pmd) |
616 | return NULL; | 619 | return NULL; |
617 | 620 | ||
618 | if (pmd_trans_huge(*pmd)) | ||
619 | return NULL; | ||
620 | |||
621 | pte = pte_offset_map(pmd, address); | 621 | pte = pte_offset_map(pmd, address); |
622 | /* Make a quick check before getting the lock */ | 622 | /* Make a quick check before getting the lock */ |
623 | if (!sync && !pte_present(*pte)) { | 623 | if (!sync && !pte_present(*pte)) { |
@@ -1635,7 +1635,7 @@ static struct anon_vma *rmap_walk_anon_lock(struct page *page, | |||
1635 | static int rmap_walk_anon(struct page *page, struct rmap_walk_control *rwc) | 1635 | static int rmap_walk_anon(struct page *page, struct rmap_walk_control *rwc) |
1636 | { | 1636 | { |
1637 | struct anon_vma *anon_vma; | 1637 | struct anon_vma *anon_vma; |
1638 | pgoff_t pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT); | 1638 | pgoff_t pgoff = page_to_pgoff(page); |
1639 | struct anon_vma_chain *avc; | 1639 | struct anon_vma_chain *avc; |
1640 | int ret = SWAP_AGAIN; | 1640 | int ret = SWAP_AGAIN; |
1641 | 1641 | ||
@@ -1676,7 +1676,7 @@ static int rmap_walk_anon(struct page *page, struct rmap_walk_control *rwc) | |||
1676 | static int rmap_walk_file(struct page *page, struct rmap_walk_control *rwc) | 1676 | static int rmap_walk_file(struct page *page, struct rmap_walk_control *rwc) |
1677 | { | 1677 | { |
1678 | struct address_space *mapping = page->mapping; | 1678 | struct address_space *mapping = page->mapping; |
1679 | pgoff_t pgoff = page->index << compound_order(page); | 1679 | pgoff_t pgoff = page_to_pgoff(page); |
1680 | struct vm_area_struct *vma; | 1680 | struct vm_area_struct *vma; |
1681 | int ret = SWAP_AGAIN; | 1681 | int ret = SWAP_AGAIN; |
1682 | 1682 | ||