aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc/mm/hugetlbpage.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/powerpc/mm/hugetlbpage.c')
-rw-r--r--arch/powerpc/mm/hugetlbpage.c22
1 files changed, 16 insertions, 6 deletions
diff --git a/arch/powerpc/mm/hugetlbpage.c b/arch/powerpc/mm/hugetlbpage.c
index 2cb278a2f658..a9dbb27ca887 100644
--- a/arch/powerpc/mm/hugetlbpage.c
+++ b/arch/powerpc/mm/hugetlbpage.c
@@ -109,7 +109,7 @@ int pgd_huge(pgd_t pgd)
109pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr) 109pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr)
110{ 110{
111 /* Only called for hugetlbfs pages, hence can ignore THP */ 111 /* Only called for hugetlbfs pages, hence can ignore THP */
112 return find_linux_pte_or_hugepte(mm->pgd, addr, NULL); 112 return __find_linux_pte_or_hugepte(mm->pgd, addr, NULL);
113} 113}
114 114
115static int __hugepte_alloc(struct mm_struct *mm, hugepd_t *hpdp, 115static int __hugepte_alloc(struct mm_struct *mm, hugepd_t *hpdp,
@@ -682,28 +682,35 @@ void hugetlb_free_pgd_range(struct mmu_gather *tlb,
682 } while (addr = next, addr != end); 682 } while (addr = next, addr != end);
683} 683}
684 684
685/*
686 * We are holding mmap_sem, so a parallel huge page collapse cannot run.
687 * To prevent hugepage split, disable irq.
688 */
685struct page * 689struct page *
686follow_huge_addr(struct mm_struct *mm, unsigned long address, int write) 690follow_huge_addr(struct mm_struct *mm, unsigned long address, int write)
687{ 691{
688 pte_t *ptep; 692 pte_t *ptep;
689 struct page *page; 693 struct page *page;
690 unsigned shift; 694 unsigned shift;
691 unsigned long mask; 695 unsigned long mask, flags;
692 /* 696 /*
693 * Transparent hugepages are handled by generic code. We can skip them 697 * Transparent hugepages are handled by generic code. We can skip them
694 * here. 698 * here.
695 */ 699 */
700 local_irq_save(flags);
696 ptep = find_linux_pte_or_hugepte(mm->pgd, address, &shift); 701 ptep = find_linux_pte_or_hugepte(mm->pgd, address, &shift);
697 702
698 /* Verify it is a huge page else bail. */ 703 /* Verify it is a huge page else bail. */
699 if (!ptep || !shift || pmd_trans_huge(*(pmd_t *)ptep)) 704 if (!ptep || !shift || pmd_trans_huge(*(pmd_t *)ptep)) {
705 local_irq_restore(flags);
700 return ERR_PTR(-EINVAL); 706 return ERR_PTR(-EINVAL);
701 707 }
702 mask = (1UL << shift) - 1; 708 mask = (1UL << shift) - 1;
703 page = pte_page(*ptep); 709 page = pte_page(*ptep);
704 if (page) 710 if (page)
705 page += (address & mask) / PAGE_SIZE; 711 page += (address & mask) / PAGE_SIZE;
706 712
713 local_irq_restore(flags);
707 return page; 714 return page;
708} 715}
709 716
@@ -950,9 +957,12 @@ void flush_dcache_icache_hugepage(struct page *page)
950 * 957 *
951 * So long as we atomically load page table pointers we are safe against teardown, 958 * So long as we atomically load page table pointers we are safe against teardown,
952 * we can follow the address down to the the page and take a ref on it. 959 * we can follow the address down to the the page and take a ref on it.
960 * This function need to be called with interrupts disabled. We use this variant
961 * when we have MSR[EE] = 0 but the paca->soft_enabled = 1
953 */ 962 */
954 963
955pte_t *find_linux_pte_or_hugepte(pgd_t *pgdir, unsigned long ea, unsigned *shift) 964pte_t *__find_linux_pte_or_hugepte(pgd_t *pgdir, unsigned long ea,
965 unsigned *shift)
956{ 966{
957 pgd_t pgd, *pgdp; 967 pgd_t pgd, *pgdp;
958 pud_t pud, *pudp; 968 pud_t pud, *pudp;
@@ -1031,7 +1041,7 @@ out:
1031 *shift = pdshift; 1041 *shift = pdshift;
1032 return ret_pte; 1042 return ret_pte;
1033} 1043}
1034EXPORT_SYMBOL_GPL(find_linux_pte_or_hugepte); 1044EXPORT_SYMBOL_GPL(__find_linux_pte_or_hugepte);
1035 1045
1036int gup_hugepte(pte_t *ptep, unsigned long sz, unsigned long addr, 1046int gup_hugepte(pte_t *ptep, unsigned long sz, unsigned long addr,
1037 unsigned long end, int write, struct page **pages, int *nr) 1047 unsigned long end, int write, struct page **pages, int *nr)