aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc/mm/hugetlbpage.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/powerpc/mm/hugetlbpage.c')
-rw-r--r--arch/powerpc/mm/hugetlbpage.c103
1 files changed, 0 insertions, 103 deletions
diff --git a/arch/powerpc/mm/hugetlbpage.c b/arch/powerpc/mm/hugetlbpage.c
index 5f67e7a4d1cc..17915fc389ff 100644
--- a/arch/powerpc/mm/hugetlbpage.c
+++ b/arch/powerpc/mm/hugetlbpage.c
@@ -756,109 +756,6 @@ void flush_dcache_icache_hugepage(struct page *page)
756 756
757#endif /* CONFIG_HUGETLB_PAGE */ 757#endif /* CONFIG_HUGETLB_PAGE */
758 758
759/*
760 * We have 4 cases for pgds and pmds:
761 * (1) invalid (all zeroes)
762 * (2) pointer to next table, as normal; bottom 6 bits == 0
763 * (3) leaf pte for huge page _PAGE_PTE set
764 * (4) hugepd pointer, _PAGE_PTE = 0 and bits [2..6] indicate size of table
765 *
766 * So long as we atomically load page table pointers we are safe against teardown,
767 * we can follow the address down to the the page and take a ref on it.
768 * This function need to be called with interrupts disabled. We use this variant
769 * when we have MSR[EE] = 0 but the paca->irq_soft_mask = IRQS_ENABLED
770 */
771pte_t *__find_linux_pte(pgd_t *pgdir, unsigned long ea,
772 bool *is_thp, unsigned *hpage_shift)
773{
774 pgd_t pgd, *pgdp;
775 pud_t pud, *pudp;
776 pmd_t pmd, *pmdp;
777 pte_t *ret_pte;
778 hugepd_t *hpdp = NULL;
779 unsigned pdshift = PGDIR_SHIFT;
780
781 if (hpage_shift)
782 *hpage_shift = 0;
783
784 if (is_thp)
785 *is_thp = false;
786
787 pgdp = pgdir + pgd_index(ea);
788 pgd = READ_ONCE(*pgdp);
789 /*
790 * Always operate on the local stack value. This make sure the
791 * value don't get updated by a parallel THP split/collapse,
792 * page fault or a page unmap. The return pte_t * is still not
793 * stable. So should be checked there for above conditions.
794 */
795 if (pgd_none(pgd))
796 return NULL;
797 else if (pgd_huge(pgd)) {
798 ret_pte = (pte_t *) pgdp;
799 goto out;
800 } else if (is_hugepd(__hugepd(pgd_val(pgd))))
801 hpdp = (hugepd_t *)&pgd;
802 else {
803 /*
804 * Even if we end up with an unmap, the pgtable will not
805 * be freed, because we do an rcu free and here we are
806 * irq disabled
807 */
808 pdshift = PUD_SHIFT;
809 pudp = pud_offset(&pgd, ea);
810 pud = READ_ONCE(*pudp);
811
812 if (pud_none(pud))
813 return NULL;
814 else if (pud_huge(pud)) {
815 ret_pte = (pte_t *) pudp;
816 goto out;
817 } else if (is_hugepd(__hugepd(pud_val(pud))))
818 hpdp = (hugepd_t *)&pud;
819 else {
820 pdshift = PMD_SHIFT;
821 pmdp = pmd_offset(&pud, ea);
822 pmd = READ_ONCE(*pmdp);
823 /*
824 * A hugepage collapse is captured by pmd_none, because
825 * it mark the pmd none and do a hpte invalidate.
826 */
827 if (pmd_none(pmd))
828 return NULL;
829
830 if (pmd_trans_huge(pmd) || pmd_devmap(pmd)) {
831 if (is_thp)
832 *is_thp = true;
833 ret_pte = (pte_t *) pmdp;
834 goto out;
835 }
836 /*
837 * pmd_large check below will handle the swap pmd pte
838 * we need to do both the check because they are config
839 * dependent.
840 */
841 if (pmd_huge(pmd) || pmd_large(pmd)) {
842 ret_pte = (pte_t *) pmdp;
843 goto out;
844 } else if (is_hugepd(__hugepd(pmd_val(pmd))))
845 hpdp = (hugepd_t *)&pmd;
846 else
847 return pte_offset_kernel(&pmd, ea);
848 }
849 }
850 if (!hpdp)
851 return NULL;
852
853 ret_pte = hugepte_offset(*hpdp, ea, pdshift);
854 pdshift = hugepd_shift(*hpdp);
855out:
856 if (hpage_shift)
857 *hpage_shift = pdshift;
858 return ret_pte;
859}
860EXPORT_SYMBOL_GPL(__find_linux_pte);
861
862int gup_hugepte(pte_t *ptep, unsigned long sz, unsigned long addr, 759int gup_hugepte(pte_t *ptep, unsigned long sz, unsigned long addr,
863 unsigned long end, int write, struct page **pages, int *nr) 760 unsigned long end, int write, struct page **pages, int *nr)
864{ 761{