aboutsummaryrefslogtreecommitdiffstats
path: root/include/asm-ppc64/pgalloc.h
diff options
context:
space:
mode:
authorBenjamin Herrenschmidt <benh@kernel.crashing.org>2005-11-06 19:06:55 -0500
committerLinus Torvalds <torvalds@g5.osdl.org>2005-11-06 19:56:47 -0500
commit3c726f8dee6f55e96475574e9f645327e461884c (patch)
treef67c381e8f57959aa4a94bda4c68e24253cd8171 /include/asm-ppc64/pgalloc.h
parentf912696ab330bf539231d1f8032320f2a08b850f (diff)
[PATCH] ppc64: support 64k pages
Adds a new CONFIG_PPC_64K_PAGES which, when enabled, changes the kernel base page size to 64K. The resulting kernel still boots on any hardware. On current machines with 4K pages support only, the kernel will maintain 16 "subpages" for each 64K page transparently. Note that while real 64K capable HW has been tested, the current patch will not enable it yet as such hardware is not released yet, and I'm still verifying with the firmware architects the proper to get the information from the newer hypervisors. Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'include/asm-ppc64/pgalloc.h')
-rw-r--r--include/asm-ppc64/pgalloc.h47
1 files changed, 39 insertions, 8 deletions
diff --git a/include/asm-ppc64/pgalloc.h b/include/asm-ppc64/pgalloc.h
index 26bc49c1108d..98da0e4262bd 100644
--- a/include/asm-ppc64/pgalloc.h
+++ b/include/asm-ppc64/pgalloc.h
@@ -8,10 +8,16 @@
8 8
9extern kmem_cache_t *pgtable_cache[]; 9extern kmem_cache_t *pgtable_cache[];
10 10
11#ifdef CONFIG_PPC_64K_PAGES
12#define PTE_CACHE_NUM 0
13#define PMD_CACHE_NUM 0
14#define PGD_CACHE_NUM 1
15#else
11#define PTE_CACHE_NUM 0 16#define PTE_CACHE_NUM 0
12#define PMD_CACHE_NUM 1 17#define PMD_CACHE_NUM 1
13#define PUD_CACHE_NUM 1 18#define PUD_CACHE_NUM 1
14#define PGD_CACHE_NUM 0 19#define PGD_CACHE_NUM 0
20#endif
15 21
16/* 22/*
17 * This program is free software; you can redistribute it and/or 23 * This program is free software; you can redistribute it and/or
@@ -30,6 +36,8 @@ static inline void pgd_free(pgd_t *pgd)
30 kmem_cache_free(pgtable_cache[PGD_CACHE_NUM], pgd); 36 kmem_cache_free(pgtable_cache[PGD_CACHE_NUM], pgd);
31} 37}
32 38
39#ifndef CONFIG_PPC_64K_PAGES
40
33#define pgd_populate(MM, PGD, PUD) pgd_set(PGD, PUD) 41#define pgd_populate(MM, PGD, PUD) pgd_set(PGD, PUD)
34 42
35static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr) 43static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr)
@@ -43,7 +51,30 @@ static inline void pud_free(pud_t *pud)
43 kmem_cache_free(pgtable_cache[PUD_CACHE_NUM], pud); 51 kmem_cache_free(pgtable_cache[PUD_CACHE_NUM], pud);
44} 52}
45 53
46#define pud_populate(MM, PUD, PMD) pud_set(PUD, PMD) 54static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
55{
56 pud_set(pud, (unsigned long)pmd);
57}
58
59#define pmd_populate(mm, pmd, pte_page) \
60 pmd_populate_kernel(mm, pmd, page_address(pte_page))
61#define pmd_populate_kernel(mm, pmd, pte) pmd_set(pmd, (unsigned long)(pte))
62
63
64#else /* CONFIG_PPC_64K_PAGES */
65
66#define pud_populate(mm, pud, pmd) pud_set(pud, (unsigned long)pmd)
67
68static inline void pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd,
69 pte_t *pte)
70{
71 pmd_set(pmd, (unsigned long)pte);
72}
73
74#define pmd_populate(mm, pmd, pte_page) \
75 pmd_populate_kernel(mm, pmd, page_address(pte_page))
76
77#endif /* CONFIG_PPC_64K_PAGES */
47 78
48static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr) 79static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr)
49{ 80{
@@ -56,17 +87,15 @@ static inline void pmd_free(pmd_t *pmd)
56 kmem_cache_free(pgtable_cache[PMD_CACHE_NUM], pmd); 87 kmem_cache_free(pgtable_cache[PMD_CACHE_NUM], pmd);
57} 88}
58 89
59#define pmd_populate_kernel(mm, pmd, pte) pmd_set(pmd, pte) 90static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm,
60#define pmd_populate(mm, pmd, pte_page) \ 91 unsigned long address)
61 pmd_populate_kernel(mm, pmd, page_address(pte_page))
62
63static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address)
64{ 92{
65 return kmem_cache_alloc(pgtable_cache[PTE_CACHE_NUM], 93 return kmem_cache_alloc(pgtable_cache[PTE_CACHE_NUM],
66 GFP_KERNEL|__GFP_REPEAT); 94 GFP_KERNEL|__GFP_REPEAT);
67} 95}
68 96
69static inline struct page *pte_alloc_one(struct mm_struct *mm, unsigned long address) 97static inline struct page *pte_alloc_one(struct mm_struct *mm,
98 unsigned long address)
70{ 99{
71 return virt_to_page(pte_alloc_one_kernel(mm, address)); 100 return virt_to_page(pte_alloc_one_kernel(mm, address));
72} 101}
@@ -103,7 +132,7 @@ static inline void pgtable_free(pgtable_free_t pgf)
103 kmem_cache_free(pgtable_cache[cachenum], p); 132 kmem_cache_free(pgtable_cache[cachenum], p);
104} 133}
105 134
106void pgtable_free_tlb(struct mmu_gather *tlb, pgtable_free_t pgf); 135extern void pgtable_free_tlb(struct mmu_gather *tlb, pgtable_free_t pgf);
107 136
108#define __pte_free_tlb(tlb, ptepage) \ 137#define __pte_free_tlb(tlb, ptepage) \
109 pgtable_free_tlb(tlb, pgtable_free_cache(page_address(ptepage), \ 138 pgtable_free_tlb(tlb, pgtable_free_cache(page_address(ptepage), \
@@ -111,9 +140,11 @@ void pgtable_free_tlb(struct mmu_gather *tlb, pgtable_free_t pgf);
111#define __pmd_free_tlb(tlb, pmd) \ 140#define __pmd_free_tlb(tlb, pmd) \
112 pgtable_free_tlb(tlb, pgtable_free_cache(pmd, \ 141 pgtable_free_tlb(tlb, pgtable_free_cache(pmd, \
113 PMD_CACHE_NUM, PMD_TABLE_SIZE-1)) 142 PMD_CACHE_NUM, PMD_TABLE_SIZE-1))
143#ifndef CONFIG_PPC_64K_PAGES
114#define __pud_free_tlb(tlb, pmd) \ 144#define __pud_free_tlb(tlb, pmd) \
115 pgtable_free_tlb(tlb, pgtable_free_cache(pud, \ 145 pgtable_free_tlb(tlb, pgtable_free_cache(pud, \
116 PUD_CACHE_NUM, PUD_TABLE_SIZE-1)) 146 PUD_CACHE_NUM, PUD_TABLE_SIZE-1))
147#endif /* CONFIG_PPC_64K_PAGES */
117 148
118#define check_pgt_cache() do { } while (0) 149#define check_pgt_cache() do { } while (0)
119 150