diff options
| author | Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com> | 2015-05-11 02:28:29 -0400 |
|---|---|---|
| committer | Michael Ellerman <mpe@ellerman.id.au> | 2015-05-11 21:04:29 -0400 |
| commit | 7b868e81be38d5ad4f4aa4be819a5fa543cc5ee8 (patch) | |
| tree | 42fddf3b104664e76634117569eeaafc733731a7 /arch/powerpc | |
| parent | 13bd817bb88499ce1dc1dfdaffcde17fa492aca5 (diff) | |
powerpc/mm: Return NULL for not present hugetlb page
We need to check whether pte is present in follow_huge_addr() and
properly return NULL if mapping is not present. Also use READ_ONCE
when dereferencing pte_t address.
Without this patch, we may wrongly return a zero pfn page in
follow_huge_addr().
Reviewed-by: David Gibson <david@gibson.dropbear.id.au>
Signed-off-by: Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
Diffstat (limited to 'arch/powerpc')
| -rw-r--r-- | arch/powerpc/mm/hugetlbpage.c | 25 |
1 files changed, 16 insertions, 9 deletions
diff --git a/arch/powerpc/mm/hugetlbpage.c b/arch/powerpc/mm/hugetlbpage.c index 0ce968b00b7c..3385e3d0506e 100644 --- a/arch/powerpc/mm/hugetlbpage.c +++ b/arch/powerpc/mm/hugetlbpage.c | |||
| @@ -689,27 +689,34 @@ void hugetlb_free_pgd_range(struct mmu_gather *tlb, | |||
| 689 | struct page * | 689 | struct page * |
| 690 | follow_huge_addr(struct mm_struct *mm, unsigned long address, int write) | 690 | follow_huge_addr(struct mm_struct *mm, unsigned long address, int write) |
| 691 | { | 691 | { |
| 692 | pte_t *ptep; | 692 | pte_t *ptep, pte; |
| 693 | struct page *page; | ||
| 694 | unsigned shift; | 693 | unsigned shift; |
| 695 | unsigned long mask, flags; | 694 | unsigned long mask, flags; |
| 695 | struct page *page = ERR_PTR(-EINVAL); | ||
| 696 | |||
| 697 | local_irq_save(flags); | ||
| 698 | ptep = find_linux_pte_or_hugepte(mm->pgd, address, &shift); | ||
| 699 | if (!ptep) | ||
| 700 | goto no_page; | ||
| 701 | pte = READ_ONCE(*ptep); | ||
| 696 | /* | 702 | /* |
| 703 | * Verify it is a huge page else bail. | ||
| 697 | * Transparent hugepages are handled by generic code. We can skip them | 704 | * Transparent hugepages are handled by generic code. We can skip them |
| 698 | * here. | 705 | * here. |
| 699 | */ | 706 | */ |
| 700 | local_irq_save(flags); | 707 | if (!shift || pmd_trans_huge(__pmd(pte_val(pte)))) |
| 701 | ptep = find_linux_pte_or_hugepte(mm->pgd, address, &shift); | 708 | goto no_page; |
| 702 | 709 | ||
| 703 | /* Verify it is a huge page else bail. */ | 710 | if (!pte_present(pte)) { |
| 704 | if (!ptep || !shift || pmd_trans_huge(*(pmd_t *)ptep)) { | 711 | page = NULL; |
| 705 | local_irq_restore(flags); | 712 | goto no_page; |
| 706 | return ERR_PTR(-EINVAL); | ||
| 707 | } | 713 | } |
| 708 | mask = (1UL << shift) - 1; | 714 | mask = (1UL << shift) - 1; |
| 709 | page = pte_page(*ptep); | 715 | page = pte_page(pte); |
| 710 | if (page) | 716 | if (page) |
| 711 | page += (address & mask) / PAGE_SIZE; | 717 | page += (address & mask) / PAGE_SIZE; |
| 712 | 718 | ||
| 719 | no_page: | ||
| 713 | local_irq_restore(flags); | 720 | local_irq_restore(flags); |
| 714 | return page; | 721 | return page; |
| 715 | } | 722 | } |
