diff options
author | Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com> | 2014-11-05 11:27:39 -0500 |
---|---|---|
committer | Michael Ellerman <mpe@ellerman.id.au> | 2014-11-14 01:24:21 -0500 |
commit | 06743521d0eae1263a09bccb1a92a9fbb94660b3 (patch) | |
tree | 6cfd2a29bb7abe82501c64676f35ead205079ffd | |
parent | 9e819963b45f79e87f5a8c44960a66c0727c80e6 (diff) |
powerpc/mm: Add missing pmd accessors
This patch add documentation and missing accessors.
Signed-off-by: Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
-rw-r--r-- | arch/powerpc/include/asm/pgtable-ppc64-4k.h | 16 | ||||
-rw-r--r-- | arch/powerpc/include/asm/pgtable-ppc64-64k.h | 3 | ||||
-rw-r--r-- | arch/powerpc/include/asm/pgtable-ppc64.h | 51 | ||||
-rw-r--r-- | arch/powerpc/mm/hugetlbpage.c | 3 | ||||
-rw-r--r-- | arch/powerpc/mm/pgtable_64.c | 22 |
5 files changed, 78 insertions, 17 deletions
diff --git a/arch/powerpc/include/asm/pgtable-ppc64-4k.h b/arch/powerpc/include/asm/pgtable-ppc64-4k.h index 7b935683f268..132ee1d482c2 100644 --- a/arch/powerpc/include/asm/pgtable-ppc64-4k.h +++ b/arch/powerpc/include/asm/pgtable-ppc64-4k.h | |||
@@ -57,7 +57,21 @@ | |||
57 | #define pgd_present(pgd) (pgd_val(pgd) != 0) | 57 | #define pgd_present(pgd) (pgd_val(pgd) != 0) |
58 | #define pgd_clear(pgdp) (pgd_val(*(pgdp)) = 0) | 58 | #define pgd_clear(pgdp) (pgd_val(*(pgdp)) = 0) |
59 | #define pgd_page_vaddr(pgd) (pgd_val(pgd) & ~PGD_MASKED_BITS) | 59 | #define pgd_page_vaddr(pgd) (pgd_val(pgd) & ~PGD_MASKED_BITS) |
60 | #define pgd_page(pgd) virt_to_page(pgd_page_vaddr(pgd)) | 60 | |
61 | #ifndef __ASSEMBLY__ | ||
62 | |||
63 | static inline pte_t pgd_pte(pgd_t pgd) | ||
64 | { | ||
65 | return __pte(pgd_val(pgd)); | ||
66 | } | ||
67 | |||
68 | static inline pgd_t pte_pgd(pte_t pte) | ||
69 | { | ||
70 | return __pgd(pte_val(pte)); | ||
71 | } | ||
72 | extern struct page *pgd_page(pgd_t pgd); | ||
73 | |||
74 | #endif /* !__ASSEMBLY__ */ | ||
61 | 75 | ||
62 | #define pud_offset(pgdp, addr) \ | 76 | #define pud_offset(pgdp, addr) \ |
63 | (((pud_t *) pgd_page_vaddr(*(pgdp))) + \ | 77 | (((pud_t *) pgd_page_vaddr(*(pgdp))) + \ |
diff --git a/arch/powerpc/include/asm/pgtable-ppc64-64k.h b/arch/powerpc/include/asm/pgtable-ppc64-64k.h index a56b82fb0609..1de35bbd02a6 100644 --- a/arch/powerpc/include/asm/pgtable-ppc64-64k.h +++ b/arch/powerpc/include/asm/pgtable-ppc64-64k.h | |||
@@ -38,4 +38,7 @@ | |||
38 | /* Bits to mask out from a PGD/PUD to get to the PMD page */ | 38 | /* Bits to mask out from a PGD/PUD to get to the PMD page */ |
39 | #define PUD_MASKED_BITS 0x1ff | 39 | #define PUD_MASKED_BITS 0x1ff |
40 | 40 | ||
41 | #define pgd_pte(pgd) (pud_pte(((pud_t){ pgd }))) | ||
42 | #define pte_pgd(pte) ((pgd_t)pte_pud(pte)) | ||
43 | |||
41 | #endif /* _ASM_POWERPC_PGTABLE_PPC64_64K_H */ | 44 | #endif /* _ASM_POWERPC_PGTABLE_PPC64_64K_H */ |
diff --git a/arch/powerpc/include/asm/pgtable-ppc64.h b/arch/powerpc/include/asm/pgtable-ppc64.h index ae153c40ab7c..d12092420560 100644 --- a/arch/powerpc/include/asm/pgtable-ppc64.h +++ b/arch/powerpc/include/asm/pgtable-ppc64.h | |||
@@ -152,7 +152,7 @@ | |||
152 | #define pmd_none(pmd) (!pmd_val(pmd)) | 152 | #define pmd_none(pmd) (!pmd_val(pmd)) |
153 | #define pmd_bad(pmd) (!is_kernel_addr(pmd_val(pmd)) \ | 153 | #define pmd_bad(pmd) (!is_kernel_addr(pmd_val(pmd)) \ |
154 | || (pmd_val(pmd) & PMD_BAD_BITS)) | 154 | || (pmd_val(pmd) & PMD_BAD_BITS)) |
155 | #define pmd_present(pmd) (pmd_val(pmd) != 0) | 155 | #define pmd_present(pmd) (!pmd_none(pmd)) |
156 | #define pmd_clear(pmdp) (pmd_val(*(pmdp)) = 0) | 156 | #define pmd_clear(pmdp) (pmd_val(*(pmdp)) = 0) |
157 | #define pmd_page_vaddr(pmd) (pmd_val(pmd) & ~PMD_MASKED_BITS) | 157 | #define pmd_page_vaddr(pmd) (pmd_val(pmd) & ~PMD_MASKED_BITS) |
158 | extern struct page *pmd_page(pmd_t pmd); | 158 | extern struct page *pmd_page(pmd_t pmd); |
@@ -164,9 +164,21 @@ extern struct page *pmd_page(pmd_t pmd); | |||
164 | #define pud_present(pud) (pud_val(pud) != 0) | 164 | #define pud_present(pud) (pud_val(pud) != 0) |
165 | #define pud_clear(pudp) (pud_val(*(pudp)) = 0) | 165 | #define pud_clear(pudp) (pud_val(*(pudp)) = 0) |
166 | #define pud_page_vaddr(pud) (pud_val(pud) & ~PUD_MASKED_BITS) | 166 | #define pud_page_vaddr(pud) (pud_val(pud) & ~PUD_MASKED_BITS) |
167 | #define pud_page(pud) virt_to_page(pud_page_vaddr(pud)) | ||
168 | 167 | ||
168 | extern struct page *pud_page(pud_t pud); | ||
169 | |||
170 | static inline pte_t pud_pte(pud_t pud) | ||
171 | { | ||
172 | return __pte(pud_val(pud)); | ||
173 | } | ||
174 | |||
175 | static inline pud_t pte_pud(pte_t pte) | ||
176 | { | ||
177 | return __pud(pte_val(pte)); | ||
178 | } | ||
179 | #define pud_write(pud) pte_write(pud_pte(pud)) | ||
169 | #define pgd_set(pgdp, pudp) ({pgd_val(*(pgdp)) = (unsigned long)(pudp);}) | 180 | #define pgd_set(pgdp, pudp) ({pgd_val(*(pgdp)) = (unsigned long)(pudp);}) |
181 | #define pgd_write(pgd) pte_write(pgd_pte(pgd)) | ||
170 | 182 | ||
171 | /* | 183 | /* |
172 | * Find an entry in a page-table-directory. We combine the address region | 184 | * Find an entry in a page-table-directory. We combine the address region |
@@ -422,7 +434,22 @@ extern void set_pmd_at(struct mm_struct *mm, unsigned long addr, | |||
422 | pmd_t *pmdp, pmd_t pmd); | 434 | pmd_t *pmdp, pmd_t pmd); |
423 | extern void update_mmu_cache_pmd(struct vm_area_struct *vma, unsigned long addr, | 435 | extern void update_mmu_cache_pmd(struct vm_area_struct *vma, unsigned long addr, |
424 | pmd_t *pmd); | 436 | pmd_t *pmd); |
425 | 437 | /* | |
438 | * | ||
439 | * For core kernel code by design pmd_trans_huge is never run on any hugetlbfs | ||
440 | * page. The hugetlbfs page table walking and mangling paths are totally | ||
441 | * separated form the core VM paths and they're differentiated by | ||
442 | * VM_HUGETLB being set on vm_flags well before any pmd_trans_huge could run. | ||
443 | * | ||
444 | * pmd_trans_huge() is defined as false at build time if | ||
445 | * CONFIG_TRANSPARENT_HUGEPAGE=n to optimize away code blocks at build | ||
446 | * time in such case. | ||
447 | * | ||
448 | * For ppc64 we need to differntiate from explicit hugepages from THP, because | ||
449 | * for THP we also track the subpage details at the pmd level. We don't do | ||
450 | * that for explicit huge pages. | ||
451 | * | ||
452 | */ | ||
426 | static inline int pmd_trans_huge(pmd_t pmd) | 453 | static inline int pmd_trans_huge(pmd_t pmd) |
427 | { | 454 | { |
428 | /* | 455 | /* |
@@ -431,16 +458,6 @@ static inline int pmd_trans_huge(pmd_t pmd) | |||
431 | return (pmd_val(pmd) & 0x3) && (pmd_val(pmd) & _PAGE_THP_HUGE); | 458 | return (pmd_val(pmd) & 0x3) && (pmd_val(pmd) & _PAGE_THP_HUGE); |
432 | } | 459 | } |
433 | 460 | ||
434 | static inline int pmd_large(pmd_t pmd) | ||
435 | { | ||
436 | /* | ||
437 | * leaf pte for huge page, bottom two bits != 00 | ||
438 | */ | ||
439 | if (pmd_trans_huge(pmd)) | ||
440 | return pmd_val(pmd) & _PAGE_PRESENT; | ||
441 | return 0; | ||
442 | } | ||
443 | |||
444 | static inline int pmd_trans_splitting(pmd_t pmd) | 461 | static inline int pmd_trans_splitting(pmd_t pmd) |
445 | { | 462 | { |
446 | if (pmd_trans_huge(pmd)) | 463 | if (pmd_trans_huge(pmd)) |
@@ -451,6 +468,14 @@ static inline int pmd_trans_splitting(pmd_t pmd) | |||
451 | extern int has_transparent_hugepage(void); | 468 | extern int has_transparent_hugepage(void); |
452 | #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ | 469 | #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ |
453 | 470 | ||
471 | static inline int pmd_large(pmd_t pmd) | ||
472 | { | ||
473 | /* | ||
474 | * leaf pte for huge page, bottom two bits != 00 | ||
475 | */ | ||
476 | return ((pmd_val(pmd) & 0x3) != 0x0); | ||
477 | } | ||
478 | |||
454 | static inline pte_t pmd_pte(pmd_t pmd) | 479 | static inline pte_t pmd_pte(pmd_t pmd) |
455 | { | 480 | { |
456 | return __pte(pmd_val(pmd)); | 481 | return __pte(pmd_val(pmd)); |
diff --git a/arch/powerpc/mm/hugetlbpage.c b/arch/powerpc/mm/hugetlbpage.c index b460e723f0ec..2b8e5ed28831 100644 --- a/arch/powerpc/mm/hugetlbpage.c +++ b/arch/powerpc/mm/hugetlbpage.c | |||
@@ -62,6 +62,9 @@ static unsigned nr_gpages; | |||
62 | /* | 62 | /* |
63 | * We have PGD_INDEX_SIZ = 12 and PTE_INDEX_SIZE = 8, so that we can have | 63 | * We have PGD_INDEX_SIZ = 12 and PTE_INDEX_SIZE = 8, so that we can have |
64 | * 16GB hugepage pte in PGD and 16MB hugepage pte at PMD; | 64 | * 16GB hugepage pte in PGD and 16MB hugepage pte at PMD; |
65 | * | ||
66 | * Defined in such a way that we can optimize away code block at build time | ||
67 | * if CONFIG_HUGETLB_PAGE=n. | ||
65 | */ | 68 | */ |
66 | int pmd_huge(pmd_t pmd) | 69 | int pmd_huge(pmd_t pmd) |
67 | { | 70 | { |
diff --git a/arch/powerpc/mm/pgtable_64.c b/arch/powerpc/mm/pgtable_64.c index e0c718543174..87ff0c1908a9 100644 --- a/arch/powerpc/mm/pgtable_64.c +++ b/arch/powerpc/mm/pgtable_64.c | |||
@@ -35,6 +35,7 @@ | |||
35 | #include <linux/vmalloc.h> | 35 | #include <linux/vmalloc.h> |
36 | #include <linux/memblock.h> | 36 | #include <linux/memblock.h> |
37 | #include <linux/slab.h> | 37 | #include <linux/slab.h> |
38 | #include <linux/hugetlb.h> | ||
38 | 39 | ||
39 | #include <asm/pgalloc.h> | 40 | #include <asm/pgalloc.h> |
40 | #include <asm/page.h> | 41 | #include <asm/page.h> |
@@ -344,16 +345,31 @@ EXPORT_SYMBOL(iounmap); | |||
344 | EXPORT_SYMBOL(__iounmap); | 345 | EXPORT_SYMBOL(__iounmap); |
345 | EXPORT_SYMBOL(__iounmap_at); | 346 | EXPORT_SYMBOL(__iounmap_at); |
346 | 347 | ||
348 | #ifndef __PAGETABLE_PUD_FOLDED | ||
349 | /* 4 level page table */ | ||
350 | struct page *pgd_page(pgd_t pgd) | ||
351 | { | ||
352 | if (pgd_huge(pgd)) | ||
353 | return pte_page(pgd_pte(pgd)); | ||
354 | return virt_to_page(pgd_page_vaddr(pgd)); | ||
355 | } | ||
356 | #endif | ||
357 | |||
358 | struct page *pud_page(pud_t pud) | ||
359 | { | ||
360 | if (pud_huge(pud)) | ||
361 | return pte_page(pud_pte(pud)); | ||
362 | return virt_to_page(pud_page_vaddr(pud)); | ||
363 | } | ||
364 | |||
347 | /* | 365 | /* |
348 | * For hugepage we have pfn in the pmd, we use PTE_RPN_SHIFT bits for flags | 366 | * For hugepage we have pfn in the pmd, we use PTE_RPN_SHIFT bits for flags |
349 | * For PTE page, we have a PTE_FRAG_SIZE (4K) aligned virtual address. | 367 | * For PTE page, we have a PTE_FRAG_SIZE (4K) aligned virtual address. |
350 | */ | 368 | */ |
351 | struct page *pmd_page(pmd_t pmd) | 369 | struct page *pmd_page(pmd_t pmd) |
352 | { | 370 | { |
353 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE | 371 | if (pmd_trans_huge(pmd) || pmd_huge(pmd)) |
354 | if (pmd_trans_huge(pmd)) | ||
355 | return pfn_to_page(pmd_pfn(pmd)); | 372 | return pfn_to_page(pmd_pfn(pmd)); |
356 | #endif | ||
357 | return virt_to_page(pmd_page_vaddr(pmd)); | 373 | return virt_to_page(pmd_page_vaddr(pmd)); |
358 | } | 374 | } |
359 | 375 | ||