aboutsummaryrefslogtreecommitdiffstats
path: root/include/asm-ppc64/mmu.h
diff options
context:
space:
mode:
authorBenjamin Herrenschmidt <benh@kernel.crashing.org>2005-11-06 19:06:55 -0500
committerLinus Torvalds <torvalds@g5.osdl.org>2005-11-06 19:56:47 -0500
commit3c726f8dee6f55e96475574e9f645327e461884c (patch)
treef67c381e8f57959aa4a94bda4c68e24253cd8171 /include/asm-ppc64/mmu.h
parentf912696ab330bf539231d1f8032320f2a08b850f (diff)
[PATCH] ppc64: support 64k pages
Adds a new CONFIG_PPC_64K_PAGES which, when enabled, changes the kernel base page size to 64K. The resulting kernel still boots on any hardware. On current machines with 4K pages support only, the kernel will maintain 16 "subpages" for each 64K page transparently. Note that while real 64K capable HW has been tested, the current patch will not enable it yet as such hardware is not released yet, and I'm still verifying with the firmware architects the proper to get the information from the newer hypervisors. Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'include/asm-ppc64/mmu.h')
-rw-r--r--include/asm-ppc64/mmu.h208
1 files changed, 121 insertions, 87 deletions
diff --git a/include/asm-ppc64/mmu.h b/include/asm-ppc64/mmu.h
index e0505acb77d9..4c18a5cb69f5 100644
--- a/include/asm-ppc64/mmu.h
+++ b/include/asm-ppc64/mmu.h
@@ -48,13 +48,21 @@ extern char initial_stab[];
48 48
49/* Bits in the SLB VSID word */ 49/* Bits in the SLB VSID word */
50#define SLB_VSID_SHIFT 12 50#define SLB_VSID_SHIFT 12
51#define SLB_VSID_B ASM_CONST(0xc000000000000000)
52#define SLB_VSID_B_256M ASM_CONST(0x0000000000000000)
53#define SLB_VSID_B_1T ASM_CONST(0x4000000000000000)
51#define SLB_VSID_KS ASM_CONST(0x0000000000000800) 54#define SLB_VSID_KS ASM_CONST(0x0000000000000800)
52#define SLB_VSID_KP ASM_CONST(0x0000000000000400) 55#define SLB_VSID_KP ASM_CONST(0x0000000000000400)
53#define SLB_VSID_N ASM_CONST(0x0000000000000200) /* no-execute */ 56#define SLB_VSID_N ASM_CONST(0x0000000000000200) /* no-execute */
54#define SLB_VSID_L ASM_CONST(0x0000000000000100) /* largepage */ 57#define SLB_VSID_L ASM_CONST(0x0000000000000100)
55#define SLB_VSID_C ASM_CONST(0x0000000000000080) /* class */ 58#define SLB_VSID_C ASM_CONST(0x0000000000000080) /* class */
56#define SLB_VSID_LS ASM_CONST(0x0000000000000070) /* size of largepage */ 59#define SLB_VSID_LP ASM_CONST(0x0000000000000030)
57 60#define SLB_VSID_LP_00 ASM_CONST(0x0000000000000000)
61#define SLB_VSID_LP_01 ASM_CONST(0x0000000000000010)
62#define SLB_VSID_LP_10 ASM_CONST(0x0000000000000020)
63#define SLB_VSID_LP_11 ASM_CONST(0x0000000000000030)
64#define SLB_VSID_LLP (SLB_VSID_L|SLB_VSID_LP)
65
58#define SLB_VSID_KERNEL (SLB_VSID_KP) 66#define SLB_VSID_KERNEL (SLB_VSID_KP)
59#define SLB_VSID_USER (SLB_VSID_KP|SLB_VSID_KS|SLB_VSID_C) 67#define SLB_VSID_USER (SLB_VSID_KP|SLB_VSID_KS|SLB_VSID_C)
60 68
@@ -69,6 +77,7 @@ extern char initial_stab[];
69#define HPTE_V_AVPN_SHIFT 7 77#define HPTE_V_AVPN_SHIFT 7
70#define HPTE_V_AVPN ASM_CONST(0xffffffffffffff80) 78#define HPTE_V_AVPN ASM_CONST(0xffffffffffffff80)
71#define HPTE_V_AVPN_VAL(x) (((x) & HPTE_V_AVPN) >> HPTE_V_AVPN_SHIFT) 79#define HPTE_V_AVPN_VAL(x) (((x) & HPTE_V_AVPN) >> HPTE_V_AVPN_SHIFT)
80#define HPTE_V_COMPARE(x,y) (!(((x) ^ (y)) & HPTE_V_AVPN))
72#define HPTE_V_BOLTED ASM_CONST(0x0000000000000010) 81#define HPTE_V_BOLTED ASM_CONST(0x0000000000000010)
73#define HPTE_V_LOCK ASM_CONST(0x0000000000000008) 82#define HPTE_V_LOCK ASM_CONST(0x0000000000000008)
74#define HPTE_V_LARGE ASM_CONST(0x0000000000000004) 83#define HPTE_V_LARGE ASM_CONST(0x0000000000000004)
@@ -81,6 +90,7 @@ extern char initial_stab[];
81#define HPTE_R_RPN ASM_CONST(0x3ffffffffffff000) 90#define HPTE_R_RPN ASM_CONST(0x3ffffffffffff000)
82#define HPTE_R_FLAGS ASM_CONST(0x00000000000003ff) 91#define HPTE_R_FLAGS ASM_CONST(0x00000000000003ff)
83#define HPTE_R_PP ASM_CONST(0x0000000000000003) 92#define HPTE_R_PP ASM_CONST(0x0000000000000003)
93#define HPTE_R_N ASM_CONST(0x0000000000000004)
84 94
85/* Values for PP (assumes Ks=0, Kp=1) */ 95/* Values for PP (assumes Ks=0, Kp=1) */
86/* pp0 will always be 0 for linux */ 96/* pp0 will always be 0 for linux */
@@ -99,100 +109,120 @@ typedef struct {
99extern hpte_t *htab_address; 109extern hpte_t *htab_address;
100extern unsigned long htab_hash_mask; 110extern unsigned long htab_hash_mask;
101 111
102static inline unsigned long hpt_hash(unsigned long vpn, int large) 112/*
113 * Page size definition
114 *
115 * shift : is the "PAGE_SHIFT" value for that page size
116 * sllp : is a bit mask with the value of SLB L || LP to be or'ed
117 * directly to a slbmte "vsid" value
118 * penc : is the HPTE encoding mask for the "LP" field:
119 *
120 */
121struct mmu_psize_def
103{ 122{
104 unsigned long vsid; 123 unsigned int shift; /* number of bits */
105 unsigned long page; 124 unsigned int penc; /* HPTE encoding */
106 125 unsigned int tlbiel; /* tlbiel supported for that page size */
107 if (large) { 126 unsigned long avpnm; /* bits to mask out in AVPN in the HPTE */
108 vsid = vpn >> 4; 127 unsigned long sllp; /* SLB L||LP (exact mask to use in slbmte) */
109 page = vpn & 0xf; 128};
110 } else {
111 vsid = vpn >> 16;
112 page = vpn & 0xffff;
113 }
114 129
115 return (vsid & 0x7fffffffffUL) ^ page; 130#endif /* __ASSEMBLY__ */
116}
117
118static inline void __tlbie(unsigned long va, int large)
119{
120 /* clear top 16 bits, non SLS segment */
121 va &= ~(0xffffULL << 48);
122
123 if (large) {
124 va &= HPAGE_MASK;
125 asm volatile("tlbie %0,1" : : "r"(va) : "memory");
126 } else {
127 va &= PAGE_MASK;
128 asm volatile("tlbie %0,0" : : "r"(va) : "memory");
129 }
130}
131 131
132static inline void tlbie(unsigned long va, int large) 132/*
133{ 133 * The kernel use the constants below to index in the page sizes array.
134 asm volatile("ptesync": : :"memory"); 134 * The use of fixed constants for this purpose is better for performances
135 __tlbie(va, large); 135 * of the low level hash refill handlers.
136 asm volatile("eieio; tlbsync; ptesync": : :"memory"); 136 *
137} 137 * A non supported page size has a "shift" field set to 0
138 *
139 * Any new page size being implemented can get a new entry in here. Whether
140 * the kernel will use it or not is a different matter though. The actual page
141 * size used by hugetlbfs is not defined here and may be made variable
142 */
138 143
139static inline void __tlbiel(unsigned long va) 144#define MMU_PAGE_4K 0 /* 4K */
140{ 145#define MMU_PAGE_64K 1 /* 64K */
141 /* clear top 16 bits, non SLS segment */ 146#define MMU_PAGE_64K_AP 2 /* 64K Admixed (in a 4K segment) */
142 va &= ~(0xffffULL << 48); 147#define MMU_PAGE_1M 3 /* 1M */
143 va &= PAGE_MASK; 148#define MMU_PAGE_16M 4 /* 16M */
144 149#define MMU_PAGE_16G 5 /* 16G */
145 /* 150#define MMU_PAGE_COUNT 6
146 * Thanks to Alan Modra we are now able to use machine specific
147 * assembly instructions (like tlbiel) by using the gas -many flag.
148 * However we have to support older toolchains so for the moment
149 * we hardwire it.
150 */
151#if 0
152 asm volatile("tlbiel %0" : : "r"(va) : "memory");
153#else
154 asm volatile(".long 0x7c000224 | (%0 << 11)" : : "r"(va) : "memory");
155#endif
156}
157 151
158static inline void tlbiel(unsigned long va) 152#ifndef __ASSEMBLY__
159{
160 asm volatile("ptesync": : :"memory");
161 __tlbiel(va);
162 asm volatile("ptesync": : :"memory");
163}
164 153
165static inline unsigned long slot2va(unsigned long hpte_v, unsigned long slot) 154/*
166{ 155 * The current system page sizes
167 unsigned long avpn = HPTE_V_AVPN_VAL(hpte_v); 156 */
168 unsigned long va; 157extern struct mmu_psize_def mmu_psize_defs[MMU_PAGE_COUNT];
158extern int mmu_linear_psize;
159extern int mmu_virtual_psize;
169 160
170 va = avpn << 23; 161#ifdef CONFIG_HUGETLB_PAGE
162/*
163 * The page size index of the huge pages for use by hugetlbfs
164 */
165extern int mmu_huge_psize;
171 166
172 if (! (hpte_v & HPTE_V_LARGE)) { 167#endif /* CONFIG_HUGETLB_PAGE */
173 unsigned long vpi, pteg;
174 168
175 pteg = slot / HPTES_PER_GROUP; 169/*
176 if (hpte_v & HPTE_V_SECONDARY) 170 * This function sets the AVPN and L fields of the HPTE appropriately
177 pteg = ~pteg; 171 * for the page size
172 */
173static inline unsigned long hpte_encode_v(unsigned long va, int psize)
174{
175 unsigned long v =
176 v = (va >> 23) & ~(mmu_psize_defs[psize].avpnm);
177 v <<= HPTE_V_AVPN_SHIFT;
178 if (psize != MMU_PAGE_4K)
179 v |= HPTE_V_LARGE;
180 return v;
181}
178 182
179 vpi = ((va >> 28) ^ pteg) & htab_hash_mask; 183/*
184 * This function sets the ARPN, and LP fields of the HPTE appropriately
185 * for the page size. We assume the pa is already "clean" that is properly
186 * aligned for the requested page size
187 */
188static inline unsigned long hpte_encode_r(unsigned long pa, int psize)
189{
190 unsigned long r;
180 191
181 va |= vpi << PAGE_SHIFT; 192 /* A 4K page needs no special encoding */
193 if (psize == MMU_PAGE_4K)
194 return pa & HPTE_R_RPN;
195 else {
196 unsigned int penc = mmu_psize_defs[psize].penc;
197 unsigned int shift = mmu_psize_defs[psize].shift;
198 return (pa & ~((1ul << shift) - 1)) | (penc << 12);
182 } 199 }
183 200 return r;
184 return va;
185} 201}
186 202
187/* 203/*
188 * Handle a fault by adding an HPTE. If the address can't be determined 204 * This hashes a virtual address for a 256Mb segment only for now
189 * to be valid via Linux page tables, return 1. If handled return 0
190 */ 205 */
191extern int __hash_page(unsigned long ea, unsigned long access, 206
192 unsigned long vsid, pte_t *ptep, unsigned long trap, 207static inline unsigned long hpt_hash(unsigned long va, unsigned int shift)
193 int local); 208{
209 return ((va >> 28) & 0x7fffffffffUL) ^ ((va & 0x0fffffffUL) >> shift);
210}
211
212extern int __hash_page_4K(unsigned long ea, unsigned long access,
213 unsigned long vsid, pte_t *ptep, unsigned long trap,
214 unsigned int local);
215extern int __hash_page_64K(unsigned long ea, unsigned long access,
216 unsigned long vsid, pte_t *ptep, unsigned long trap,
217 unsigned int local);
218struct mm_struct;
219extern int hash_huge_page(struct mm_struct *mm, unsigned long access,
220 unsigned long ea, unsigned long vsid, int local);
194 221
195extern void htab_finish_init(void); 222extern void htab_finish_init(void);
223extern int htab_bolt_mapping(unsigned long vstart, unsigned long vend,
224 unsigned long pstart, unsigned long mode,
225 int psize);
196 226
197extern void hpte_init_native(void); 227extern void hpte_init_native(void);
198extern void hpte_init_lpar(void); 228extern void hpte_init_lpar(void);
@@ -200,17 +230,21 @@ extern void hpte_init_iSeries(void);
200 230
201extern long pSeries_lpar_hpte_insert(unsigned long hpte_group, 231extern long pSeries_lpar_hpte_insert(unsigned long hpte_group,
202 unsigned long va, unsigned long prpn, 232 unsigned long va, unsigned long prpn,
203 unsigned long vflags, 233 unsigned long rflags,
204 unsigned long rflags); 234 unsigned long vflags, int psize);
205extern long native_hpte_insert(unsigned long hpte_group, unsigned long va, 235
206 unsigned long prpn, 236extern long native_hpte_insert(unsigned long hpte_group,
207 unsigned long vflags, unsigned long rflags); 237 unsigned long va, unsigned long prpn,
238 unsigned long rflags,
239 unsigned long vflags, int psize);
208 240
209extern long iSeries_hpte_bolt_or_insert(unsigned long hpte_group, 241extern long iSeries_hpte_insert(unsigned long hpte_group,
210 unsigned long va, unsigned long prpn, 242 unsigned long va, unsigned long prpn,
211 unsigned long vflags, unsigned long rflags); 243 unsigned long rflags,
244 unsigned long vflags, int psize);
212 245
213extern void stabs_alloc(void); 246extern void stabs_alloc(void);
247extern void slb_initialize(void);
214 248
215#endif /* __ASSEMBLY__ */ 249#endif /* __ASSEMBLY__ */
216 250