aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc/mm/hugetlbpage.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/powerpc/mm/hugetlbpage.c')
-rw-r--r--arch/powerpc/mm/hugetlbpage.c251
1 files changed, 128 insertions, 123 deletions
diff --git a/arch/powerpc/mm/hugetlbpage.c b/arch/powerpc/mm/hugetlbpage.c
index 237c8e5f2640..2865077e0159 100644
--- a/arch/powerpc/mm/hugetlbpage.c
+++ b/arch/powerpc/mm/hugetlbpage.c
@@ -21,6 +21,9 @@
21#include <asm/pgalloc.h> 21#include <asm/pgalloc.h>
22#include <asm/tlb.h> 22#include <asm/tlb.h>
23#include <asm/setup.h> 23#include <asm/setup.h>
24#include <asm/hugetlb.h>
25
26#ifdef CONFIG_HUGETLB_PAGE
24 27
25#define PAGE_SHIFT_64K 16 28#define PAGE_SHIFT_64K 16
26#define PAGE_SHIFT_16M 24 29#define PAGE_SHIFT_16M 24
@@ -100,66 +103,6 @@ int pgd_huge(pgd_t pgd)
100} 103}
101#endif 104#endif
102 105
103/*
104 * We have 4 cases for pgds and pmds:
105 * (1) invalid (all zeroes)
106 * (2) pointer to next table, as normal; bottom 6 bits == 0
107 * (3) leaf pte for huge page, bottom two bits != 00
108 * (4) hugepd pointer, bottom two bits == 00, next 4 bits indicate size of table
109 */
110pte_t *find_linux_pte_or_hugepte(pgd_t *pgdir, unsigned long ea, unsigned *shift)
111{
112 pgd_t *pg;
113 pud_t *pu;
114 pmd_t *pm;
115 pte_t *ret_pte;
116 hugepd_t *hpdp = NULL;
117 unsigned pdshift = PGDIR_SHIFT;
118
119 if (shift)
120 *shift = 0;
121
122 pg = pgdir + pgd_index(ea);
123
124 if (pgd_huge(*pg)) {
125 ret_pte = (pte_t *) pg;
126 goto out;
127 } else if (is_hugepd(pg))
128 hpdp = (hugepd_t *)pg;
129 else if (!pgd_none(*pg)) {
130 pdshift = PUD_SHIFT;
131 pu = pud_offset(pg, ea);
132
133 if (pud_huge(*pu)) {
134 ret_pte = (pte_t *) pu;
135 goto out;
136 } else if (is_hugepd(pu))
137 hpdp = (hugepd_t *)pu;
138 else if (!pud_none(*pu)) {
139 pdshift = PMD_SHIFT;
140 pm = pmd_offset(pu, ea);
141
142 if (pmd_huge(*pm)) {
143 ret_pte = (pte_t *) pm;
144 goto out;
145 } else if (is_hugepd(pm))
146 hpdp = (hugepd_t *)pm;
147 else if (!pmd_none(*pm))
148 return pte_offset_kernel(pm, ea);
149 }
150 }
151 if (!hpdp)
152 return NULL;
153
154 ret_pte = hugepte_offset(hpdp, ea, pdshift);
155 pdshift = hugepd_shift(*hpdp);
156out:
157 if (shift)
158 *shift = pdshift;
159 return ret_pte;
160}
161EXPORT_SYMBOL_GPL(find_linux_pte_or_hugepte);
162
163pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr) 106pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr)
164{ 107{
165 return find_linux_pte_or_hugepte(mm->pgd, addr, NULL); 108 return find_linux_pte_or_hugepte(mm->pgd, addr, NULL);
@@ -753,69 +696,6 @@ follow_huge_pmd(struct mm_struct *mm, unsigned long address,
753 return NULL; 696 return NULL;
754} 697}
755 698
756int gup_hugepte(pte_t *ptep, unsigned long sz, unsigned long addr,
757 unsigned long end, int write, struct page **pages, int *nr)
758{
759 unsigned long mask;
760 unsigned long pte_end;
761 struct page *head, *page, *tail;
762 pte_t pte;
763 int refs;
764
765 pte_end = (addr + sz) & ~(sz-1);
766 if (pte_end < end)
767 end = pte_end;
768
769 pte = *ptep;
770 mask = _PAGE_PRESENT | _PAGE_USER;
771 if (write)
772 mask |= _PAGE_RW;
773
774 if ((pte_val(pte) & mask) != mask)
775 return 0;
776
777 /* hugepages are never "special" */
778 VM_BUG_ON(!pfn_valid(pte_pfn(pte)));
779
780 refs = 0;
781 head = pte_page(pte);
782
783 page = head + ((addr & (sz-1)) >> PAGE_SHIFT);
784 tail = page;
785 do {
786 VM_BUG_ON(compound_head(page) != head);
787 pages[*nr] = page;
788 (*nr)++;
789 page++;
790 refs++;
791 } while (addr += PAGE_SIZE, addr != end);
792
793 if (!page_cache_add_speculative(head, refs)) {
794 *nr -= refs;
795 return 0;
796 }
797
798 if (unlikely(pte_val(pte) != pte_val(*ptep))) {
799 /* Could be optimized better */
800 *nr -= refs;
801 while (refs--)
802 put_page(head);
803 return 0;
804 }
805
806 /*
807 * Any tail page need their mapcount reference taken before we
808 * return.
809 */
810 while (refs--) {
811 if (PageTail(tail))
812 get_huge_page_tail(tail);
813 tail++;
814 }
815
816 return 1;
817}
818
819static unsigned long hugepte_addr_end(unsigned long addr, unsigned long end, 699static unsigned long hugepte_addr_end(unsigned long addr, unsigned long end,
820 unsigned long sz) 700 unsigned long sz)
821{ 701{
@@ -1032,3 +912,128 @@ void flush_dcache_icache_hugepage(struct page *page)
1032 } 912 }
1033 } 913 }
1034} 914}
915
916#endif /* CONFIG_HUGETLB_PAGE */
917
918/*
919 * We have 4 cases for pgds and pmds:
920 * (1) invalid (all zeroes)
921 * (2) pointer to next table, as normal; bottom 6 bits == 0
922 * (3) leaf pte for huge page, bottom two bits != 00
923 * (4) hugepd pointer, bottom two bits == 00, next 4 bits indicate size of table
924 */
925pte_t *find_linux_pte_or_hugepte(pgd_t *pgdir, unsigned long ea, unsigned *shift)
926{
927 pgd_t *pg;
928 pud_t *pu;
929 pmd_t *pm;
930 pte_t *ret_pte;
931 hugepd_t *hpdp = NULL;
932 unsigned pdshift = PGDIR_SHIFT;
933
934 if (shift)
935 *shift = 0;
936
937 pg = pgdir + pgd_index(ea);
938
939 if (pgd_huge(*pg)) {
940 ret_pte = (pte_t *) pg;
941 goto out;
942 } else if (is_hugepd(pg))
943 hpdp = (hugepd_t *)pg;
944 else if (!pgd_none(*pg)) {
945 pdshift = PUD_SHIFT;
946 pu = pud_offset(pg, ea);
947
948 if (pud_huge(*pu)) {
949 ret_pte = (pte_t *) pu;
950 goto out;
951 } else if (is_hugepd(pu))
952 hpdp = (hugepd_t *)pu;
953 else if (!pud_none(*pu)) {
954 pdshift = PMD_SHIFT;
955 pm = pmd_offset(pu, ea);
956
957 if (pmd_huge(*pm)) {
958 ret_pte = (pte_t *) pm;
959 goto out;
960 } else if (is_hugepd(pm))
961 hpdp = (hugepd_t *)pm;
962 else if (!pmd_none(*pm))
963 return pte_offset_kernel(pm, ea);
964 }
965 }
966 if (!hpdp)
967 return NULL;
968
969 ret_pte = hugepte_offset(hpdp, ea, pdshift);
970 pdshift = hugepd_shift(*hpdp);
971out:
972 if (shift)
973 *shift = pdshift;
974 return ret_pte;
975}
976EXPORT_SYMBOL_GPL(find_linux_pte_or_hugepte);
977
978int gup_hugepte(pte_t *ptep, unsigned long sz, unsigned long addr,
979 unsigned long end, int write, struct page **pages, int *nr)
980{
981 unsigned long mask;
982 unsigned long pte_end;
983 struct page *head, *page, *tail;
984 pte_t pte;
985 int refs;
986
987 pte_end = (addr + sz) & ~(sz-1);
988 if (pte_end < end)
989 end = pte_end;
990
991 pte = *ptep;
992 mask = _PAGE_PRESENT | _PAGE_USER;
993 if (write)
994 mask |= _PAGE_RW;
995
996 if ((pte_val(pte) & mask) != mask)
997 return 0;
998
999 /* hugepages are never "special" */
1000 VM_BUG_ON(!pfn_valid(pte_pfn(pte)));
1001
1002 refs = 0;
1003 head = pte_page(pte);
1004
1005 page = head + ((addr & (sz-1)) >> PAGE_SHIFT);
1006 tail = page;
1007 do {
1008 VM_BUG_ON(compound_head(page) != head);
1009 pages[*nr] = page;
1010 (*nr)++;
1011 page++;
1012 refs++;
1013 } while (addr += PAGE_SIZE, addr != end);
1014
1015 if (!page_cache_add_speculative(head, refs)) {
1016 *nr -= refs;
1017 return 0;
1018 }
1019
1020 if (unlikely(pte_val(pte) != pte_val(*ptep))) {
1021 /* Could be optimized better */
1022 *nr -= refs;
1023 while (refs--)
1024 put_page(head);
1025 return 0;
1026 }
1027
1028 /*
1029 * Any tail page need their mapcount reference taken before we
1030 * return.
1031 */
1032 while (refs--) {
1033 if (PageTail(tail))
1034 get_huge_page_tail(tail);
1035 tail++;
1036 }
1037
1038 return 1;
1039}