aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorAneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>2013-06-20 05:00:16 -0400
committerBenjamin Herrenschmidt <benh@kernel.crashing.org>2013-06-21 02:01:54 -0400
commit29409997f8d06d693d82127d200eeaf48989fdd2 (patch)
treee659ae3df956d220b212e4f804ab31709efdd371
parent074c2eae3e9b66c03a17a12df8f2cd19382b68ab (diff)
powerpc: move find_linux_pte_or_hugepte and gup_hugepte to common code
We will use this in the later patch for handling THP pages Reviewed-by: David Gibson <dwg@au1.ibm.com> Signed-off-by: Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com> Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
-rw-r--r--arch/powerpc/include/asm/hugetlb.h8
-rw-r--r--arch/powerpc/include/asm/pgtable-ppc64.h13
-rw-r--r--arch/powerpc/include/asm/pgtable.h2
-rw-r--r--arch/powerpc/mm/Makefile2
-rw-r--r--arch/powerpc/mm/hugetlbpage.c251
5 files changed, 138 insertions, 138 deletions
diff --git a/arch/powerpc/include/asm/hugetlb.h b/arch/powerpc/include/asm/hugetlb.h
index f2498c8e595d..d750336b171d 100644
--- a/arch/powerpc/include/asm/hugetlb.h
+++ b/arch/powerpc/include/asm/hugetlb.h
@@ -191,8 +191,14 @@ static inline void flush_hugetlb_page(struct vm_area_struct *vma,
191 unsigned long vmaddr) 191 unsigned long vmaddr)
192{ 192{
193} 193}
194#endif /* CONFIG_HUGETLB_PAGE */
195 194
195#define hugepd_shift(x) 0
196static inline pte_t *hugepte_offset(hugepd_t *hpdp, unsigned long addr,
197 unsigned pdshift)
198{
199 return 0;
200}
201#endif /* CONFIG_HUGETLB_PAGE */
196 202
197/* 203/*
198 * FSL Book3E platforms require special gpage handling - the gpages 204 * FSL Book3E platforms require special gpage handling - the gpages
diff --git a/arch/powerpc/include/asm/pgtable-ppc64.h b/arch/powerpc/include/asm/pgtable-ppc64.h
index 8f9da5e32fea..6c9323f3ab54 100644
--- a/arch/powerpc/include/asm/pgtable-ppc64.h
+++ b/arch/powerpc/include/asm/pgtable-ppc64.h
@@ -368,19 +368,6 @@ static inline pte_t *find_linux_pte(pgd_t *pgdir, unsigned long ea)
368 return pt; 368 return pt;
369} 369}
370 370
371#ifdef CONFIG_HUGETLB_PAGE
372pte_t *find_linux_pte_or_hugepte(pgd_t *pgdir, unsigned long ea,
373 unsigned *shift);
374#else
375static inline pte_t *find_linux_pte_or_hugepte(pgd_t *pgdir, unsigned long ea,
376 unsigned *shift)
377{
378 if (shift)
379 *shift = 0;
380 return find_linux_pte(pgdir, ea);
381}
382#endif /* !CONFIG_HUGETLB_PAGE */
383
384#endif /* __ASSEMBLY__ */ 371#endif /* __ASSEMBLY__ */
385 372
386/* 373/*
diff --git a/arch/powerpc/include/asm/pgtable.h b/arch/powerpc/include/asm/pgtable.h
index d53db937ec75..959d575c37dd 100644
--- a/arch/powerpc/include/asm/pgtable.h
+++ b/arch/powerpc/include/asm/pgtable.h
@@ -224,6 +224,8 @@ extern int gup_hugepte(pte_t *ptep, unsigned long sz, unsigned long addr,
224#define pmd_large(pmd) 0 224#define pmd_large(pmd) 0
225#define has_transparent_hugepage() 0 225#define has_transparent_hugepage() 0
226#endif 226#endif
227pte_t *find_linux_pte_or_hugepte(pgd_t *pgdir, unsigned long ea,
228 unsigned *shift);
227#endif /* __ASSEMBLY__ */ 229#endif /* __ASSEMBLY__ */
228 230
229#endif /* __KERNEL__ */ 231#endif /* __KERNEL__ */
diff --git a/arch/powerpc/mm/Makefile b/arch/powerpc/mm/Makefile
index 26f29a772414..ff0379cdeeca 100644
--- a/arch/powerpc/mm/Makefile
+++ b/arch/powerpc/mm/Makefile
@@ -27,8 +27,8 @@ obj-$(CONFIG_44x) += 44x_mmu.o
27obj-$(CONFIG_PPC_FSL_BOOK3E) += fsl_booke_mmu.o 27obj-$(CONFIG_PPC_FSL_BOOK3E) += fsl_booke_mmu.o
28obj-$(CONFIG_NEED_MULTIPLE_NODES) += numa.o 28obj-$(CONFIG_NEED_MULTIPLE_NODES) += numa.o
29obj-$(CONFIG_PPC_MM_SLICES) += slice.o 29obj-$(CONFIG_PPC_MM_SLICES) += slice.o
30ifeq ($(CONFIG_HUGETLB_PAGE),y)
31obj-y += hugetlbpage.o 30obj-y += hugetlbpage.o
31ifeq ($(CONFIG_HUGETLB_PAGE),y)
32obj-$(CONFIG_PPC_STD_MMU_64) += hugetlbpage-hash64.o 32obj-$(CONFIG_PPC_STD_MMU_64) += hugetlbpage-hash64.o
33obj-$(CONFIG_PPC_BOOK3E_MMU) += hugetlbpage-book3e.o 33obj-$(CONFIG_PPC_BOOK3E_MMU) += hugetlbpage-book3e.o
34endif 34endif
diff --git a/arch/powerpc/mm/hugetlbpage.c b/arch/powerpc/mm/hugetlbpage.c
index 237c8e5f2640..2865077e0159 100644
--- a/arch/powerpc/mm/hugetlbpage.c
+++ b/arch/powerpc/mm/hugetlbpage.c
@@ -21,6 +21,9 @@
21#include <asm/pgalloc.h> 21#include <asm/pgalloc.h>
22#include <asm/tlb.h> 22#include <asm/tlb.h>
23#include <asm/setup.h> 23#include <asm/setup.h>
24#include <asm/hugetlb.h>
25
26#ifdef CONFIG_HUGETLB_PAGE
24 27
25#define PAGE_SHIFT_64K 16 28#define PAGE_SHIFT_64K 16
26#define PAGE_SHIFT_16M 24 29#define PAGE_SHIFT_16M 24
@@ -100,66 +103,6 @@ int pgd_huge(pgd_t pgd)
100} 103}
101#endif 104#endif
102 105
103/*
104 * We have 4 cases for pgds and pmds:
105 * (1) invalid (all zeroes)
106 * (2) pointer to next table, as normal; bottom 6 bits == 0
107 * (3) leaf pte for huge page, bottom two bits != 00
108 * (4) hugepd pointer, bottom two bits == 00, next 4 bits indicate size of table
109 */
110pte_t *find_linux_pte_or_hugepte(pgd_t *pgdir, unsigned long ea, unsigned *shift)
111{
112 pgd_t *pg;
113 pud_t *pu;
114 pmd_t *pm;
115 pte_t *ret_pte;
116 hugepd_t *hpdp = NULL;
117 unsigned pdshift = PGDIR_SHIFT;
118
119 if (shift)
120 *shift = 0;
121
122 pg = pgdir + pgd_index(ea);
123
124 if (pgd_huge(*pg)) {
125 ret_pte = (pte_t *) pg;
126 goto out;
127 } else if (is_hugepd(pg))
128 hpdp = (hugepd_t *)pg;
129 else if (!pgd_none(*pg)) {
130 pdshift = PUD_SHIFT;
131 pu = pud_offset(pg, ea);
132
133 if (pud_huge(*pu)) {
134 ret_pte = (pte_t *) pu;
135 goto out;
136 } else if (is_hugepd(pu))
137 hpdp = (hugepd_t *)pu;
138 else if (!pud_none(*pu)) {
139 pdshift = PMD_SHIFT;
140 pm = pmd_offset(pu, ea);
141
142 if (pmd_huge(*pm)) {
143 ret_pte = (pte_t *) pm;
144 goto out;
145 } else if (is_hugepd(pm))
146 hpdp = (hugepd_t *)pm;
147 else if (!pmd_none(*pm))
148 return pte_offset_kernel(pm, ea);
149 }
150 }
151 if (!hpdp)
152 return NULL;
153
154 ret_pte = hugepte_offset(hpdp, ea, pdshift);
155 pdshift = hugepd_shift(*hpdp);
156out:
157 if (shift)
158 *shift = pdshift;
159 return ret_pte;
160}
161EXPORT_SYMBOL_GPL(find_linux_pte_or_hugepte);
162
163pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr) 106pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr)
164{ 107{
165 return find_linux_pte_or_hugepte(mm->pgd, addr, NULL); 108 return find_linux_pte_or_hugepte(mm->pgd, addr, NULL);
@@ -753,69 +696,6 @@ follow_huge_pmd(struct mm_struct *mm, unsigned long address,
753 return NULL; 696 return NULL;
754} 697}
755 698
756int gup_hugepte(pte_t *ptep, unsigned long sz, unsigned long addr,
757 unsigned long end, int write, struct page **pages, int *nr)
758{
759 unsigned long mask;
760 unsigned long pte_end;
761 struct page *head, *page, *tail;
762 pte_t pte;
763 int refs;
764
765 pte_end = (addr + sz) & ~(sz-1);
766 if (pte_end < end)
767 end = pte_end;
768
769 pte = *ptep;
770 mask = _PAGE_PRESENT | _PAGE_USER;
771 if (write)
772 mask |= _PAGE_RW;
773
774 if ((pte_val(pte) & mask) != mask)
775 return 0;
776
777 /* hugepages are never "special" */
778 VM_BUG_ON(!pfn_valid(pte_pfn(pte)));
779
780 refs = 0;
781 head = pte_page(pte);
782
783 page = head + ((addr & (sz-1)) >> PAGE_SHIFT);
784 tail = page;
785 do {
786 VM_BUG_ON(compound_head(page) != head);
787 pages[*nr] = page;
788 (*nr)++;
789 page++;
790 refs++;
791 } while (addr += PAGE_SIZE, addr != end);
792
793 if (!page_cache_add_speculative(head, refs)) {
794 *nr -= refs;
795 return 0;
796 }
797
798 if (unlikely(pte_val(pte) != pte_val(*ptep))) {
799 /* Could be optimized better */
800 *nr -= refs;
801 while (refs--)
802 put_page(head);
803 return 0;
804 }
805
806 /*
807 * Any tail page need their mapcount reference taken before we
808 * return.
809 */
810 while (refs--) {
811 if (PageTail(tail))
812 get_huge_page_tail(tail);
813 tail++;
814 }
815
816 return 1;
817}
818
819static unsigned long hugepte_addr_end(unsigned long addr, unsigned long end, 699static unsigned long hugepte_addr_end(unsigned long addr, unsigned long end,
820 unsigned long sz) 700 unsigned long sz)
821{ 701{
@@ -1032,3 +912,128 @@ void flush_dcache_icache_hugepage(struct page *page)
1032 } 912 }
1033 } 913 }
1034} 914}
915
916#endif /* CONFIG_HUGETLB_PAGE */
917
918/*
919 * We have 4 cases for pgds and pmds:
920 * (1) invalid (all zeroes)
921 * (2) pointer to next table, as normal; bottom 6 bits == 0
922 * (3) leaf pte for huge page, bottom two bits != 00
923 * (4) hugepd pointer, bottom two bits == 00, next 4 bits indicate size of table
924 */
925pte_t *find_linux_pte_or_hugepte(pgd_t *pgdir, unsigned long ea, unsigned *shift)
926{
927 pgd_t *pg;
928 pud_t *pu;
929 pmd_t *pm;
930 pte_t *ret_pte;
931 hugepd_t *hpdp = NULL;
932 unsigned pdshift = PGDIR_SHIFT;
933
934 if (shift)
935 *shift = 0;
936
937 pg = pgdir + pgd_index(ea);
938
939 if (pgd_huge(*pg)) {
940 ret_pte = (pte_t *) pg;
941 goto out;
942 } else if (is_hugepd(pg))
943 hpdp = (hugepd_t *)pg;
944 else if (!pgd_none(*pg)) {
945 pdshift = PUD_SHIFT;
946 pu = pud_offset(pg, ea);
947
948 if (pud_huge(*pu)) {
949 ret_pte = (pte_t *) pu;
950 goto out;
951 } else if (is_hugepd(pu))
952 hpdp = (hugepd_t *)pu;
953 else if (!pud_none(*pu)) {
954 pdshift = PMD_SHIFT;
955 pm = pmd_offset(pu, ea);
956
957 if (pmd_huge(*pm)) {
958 ret_pte = (pte_t *) pm;
959 goto out;
960 } else if (is_hugepd(pm))
961 hpdp = (hugepd_t *)pm;
962 else if (!pmd_none(*pm))
963 return pte_offset_kernel(pm, ea);
964 }
965 }
966 if (!hpdp)
967 return NULL;
968
969 ret_pte = hugepte_offset(hpdp, ea, pdshift);
970 pdshift = hugepd_shift(*hpdp);
971out:
972 if (shift)
973 *shift = pdshift;
974 return ret_pte;
975}
976EXPORT_SYMBOL_GPL(find_linux_pte_or_hugepte);
977
978int gup_hugepte(pte_t *ptep, unsigned long sz, unsigned long addr,
979 unsigned long end, int write, struct page **pages, int *nr)
980{
981 unsigned long mask;
982 unsigned long pte_end;
983 struct page *head, *page, *tail;
984 pte_t pte;
985 int refs;
986
987 pte_end = (addr + sz) & ~(sz-1);
988 if (pte_end < end)
989 end = pte_end;
990
991 pte = *ptep;
992 mask = _PAGE_PRESENT | _PAGE_USER;
993 if (write)
994 mask |= _PAGE_RW;
995
996 if ((pte_val(pte) & mask) != mask)
997 return 0;
998
999 /* hugepages are never "special" */
1000 VM_BUG_ON(!pfn_valid(pte_pfn(pte)));
1001
1002 refs = 0;
1003 head = pte_page(pte);
1004
1005 page = head + ((addr & (sz-1)) >> PAGE_SHIFT);
1006 tail = page;
1007 do {
1008 VM_BUG_ON(compound_head(page) != head);
1009 pages[*nr] = page;
1010 (*nr)++;
1011 page++;
1012 refs++;
1013 } while (addr += PAGE_SIZE, addr != end);
1014
1015 if (!page_cache_add_speculative(head, refs)) {
1016 *nr -= refs;
1017 return 0;
1018 }
1019
1020 if (unlikely(pte_val(pte) != pte_val(*ptep))) {
1021 /* Could be optimized better */
1022 *nr -= refs;
1023 while (refs--)
1024 put_page(head);
1025 return 0;
1026 }
1027
1028 /*
1029 * Any tail page need their mapcount reference taken before we
1030 * return.
1031 */
1032 while (refs--) {
1033 if (PageTail(tail))
1034 get_huge_page_tail(tail);
1035 tail++;
1036 }
1037
1038 return 1;
1039}