aboutsummaryrefslogtreecommitdiffstats
path: root/arch/sparc/include/asm/tsb.h
diff options
context:
space:
mode:
Diffstat (limited to 'arch/sparc/include/asm/tsb.h')
-rw-r--r--arch/sparc/include/asm/tsb.h106
1 files changed, 93 insertions, 13 deletions
diff --git a/arch/sparc/include/asm/tsb.h b/arch/sparc/include/asm/tsb.h
index 1a8afd1ad04f..b4c258de4443 100644
--- a/arch/sparc/include/asm/tsb.h
+++ b/arch/sparc/include/asm/tsb.h
@@ -147,20 +147,96 @@ extern struct tsb_phys_patch_entry __tsb_phys_patch, __tsb_phys_patch_end;
147 brz,pn REG1, FAIL_LABEL; \ 147 brz,pn REG1, FAIL_LABEL; \
148 sllx VADDR, 64 - (PMD_SHIFT + PMD_BITS), REG2; \ 148 sllx VADDR, 64 - (PMD_SHIFT + PMD_BITS), REG2; \
149 srlx REG2, 64 - PAGE_SHIFT, REG2; \ 149 srlx REG2, 64 - PAGE_SHIFT, REG2; \
150 sllx REG1, 11, REG1; \ 150 sllx REG1, PGD_PADDR_SHIFT, REG1; \
151 andn REG2, 0x3, REG2; \ 151 andn REG2, 0x3, REG2; \
152 lduwa [REG1 + REG2] ASI_PHYS_USE_EC, REG1; \ 152 lduwa [REG1 + REG2] ASI_PHYS_USE_EC, REG1; \
153 brz,pn REG1, FAIL_LABEL; \ 153 brz,pn REG1, FAIL_LABEL; \
154 sllx VADDR, 64 - PMD_SHIFT, REG2; \ 154 sllx VADDR, 64 - PMD_SHIFT, REG2; \
155 srlx REG2, 64 - PAGE_SHIFT, REG2; \ 155 srlx REG2, 64 - (PAGE_SHIFT - 1), REG2; \
156 sllx REG1, 11, REG1; \ 156 sllx REG1, PMD_PADDR_SHIFT, REG1; \
157 andn REG2, 0x7, REG2; \ 157 andn REG2, 0x7, REG2; \
158 add REG1, REG2, REG1; 158 add REG1, REG2, REG1;
159 159
160 /* Do a user page table walk in MMU globals. Leaves physical PTE 160 /* This macro exists only to make the PMD translator below easier
161 * pointer in REG1. Jumps to FAIL_LABEL on early page table walk 161 * to read. It hides the ELF section switch for the sun4v code
162 * termination. Physical base of page tables is in PHYS_PGD which 162 * patching.
163 * will not be modified. 163 */
164#define OR_PTE_BIT(REG, NAME) \
165661: or REG, _PAGE_##NAME##_4U, REG; \
166 .section .sun4v_1insn_patch, "ax"; \
167 .word 661b; \
168 or REG, _PAGE_##NAME##_4V, REG; \
169 .previous;
170
171 /* Load into REG the PTE value for VALID, CACHE, and SZHUGE. */
172#define BUILD_PTE_VALID_SZHUGE_CACHE(REG) \
173661: sethi %uhi(_PAGE_VALID|_PAGE_SZHUGE_4U), REG; \
174 .section .sun4v_1insn_patch, "ax"; \
175 .word 661b; \
176 sethi %uhi(_PAGE_VALID), REG; \
177 .previous; \
178 sllx REG, 32, REG; \
179661: or REG, _PAGE_CP_4U|_PAGE_CV_4U, REG; \
180 .section .sun4v_1insn_patch, "ax"; \
181 .word 661b; \
182 or REG, _PAGE_CP_4V|_PAGE_CV_4V|_PAGE_SZHUGE_4V, REG; \
183 .previous;
184
185 /* PMD has been loaded into REG1, interpret the value, seeing
186 * if it is a HUGE PMD or a normal one. If it is not valid
187 * then jump to FAIL_LABEL. If it is a HUGE PMD, and it
188 * translates to a valid PTE, branch to PTE_LABEL.
189 *
190 * We translate the PMD by hand, one bit at a time,
191 * constructing the huge PTE.
192 *
193 * So we construct the PTE in REG2 as follows:
194 *
195 * 1) Extract the PMD PFN from REG1 and place it into REG2.
196 *
197 * 2) Translate PMD protection bits in REG1 into REG2, one bit
198 * at a time using andcc tests on REG1 and OR's into REG2.
199 *
200 * Only two bits to be concerned with here, EXEC and WRITE.
201 * Now REG1 is freed up and we can use it as a temporary.
202 *
203 * 3) Construct the VALID, CACHE, and page size PTE bits in
204 * REG1, OR with REG2 to form final PTE.
205 */
206#ifdef CONFIG_TRANSPARENT_HUGEPAGE
207#define USER_PGTABLE_CHECK_PMD_HUGE(VADDR, REG1, REG2, FAIL_LABEL, PTE_LABEL) \
208 brz,pn REG1, FAIL_LABEL; \
209 andcc REG1, PMD_ISHUGE, %g0; \
210 be,pt %xcc, 700f; \
211 and REG1, PMD_HUGE_PRESENT|PMD_HUGE_ACCESSED, REG2; \
212 cmp REG2, PMD_HUGE_PRESENT|PMD_HUGE_ACCESSED; \
213 bne,pn %xcc, FAIL_LABEL; \
214 andn REG1, PMD_HUGE_PROTBITS, REG2; \
215 sllx REG2, PMD_PADDR_SHIFT, REG2; \
216 /* REG2 now holds PFN << PAGE_SHIFT */ \
217 andcc REG1, PMD_HUGE_EXEC, %g0; \
218 bne,a,pt %xcc, 1f; \
219 OR_PTE_BIT(REG2, EXEC); \
2201: andcc REG1, PMD_HUGE_WRITE, %g0; \
221 bne,a,pt %xcc, 1f; \
222 OR_PTE_BIT(REG2, W); \
223 /* REG1 can now be clobbered, build final PTE */ \
2241: BUILD_PTE_VALID_SZHUGE_CACHE(REG1); \
225 ba,pt %xcc, PTE_LABEL; \
226 or REG1, REG2, REG1; \
227700:
228#else
229#define USER_PGTABLE_CHECK_PMD_HUGE(VADDR, REG1, REG2, FAIL_LABEL, PTE_LABEL) \
230 brz,pn REG1, FAIL_LABEL; \
231 nop;
232#endif
233
234 /* Do a user page table walk in MMU globals. Leaves final,
235 * valid, PTE value in REG1. Jumps to FAIL_LABEL on early
236 * page table walk termination or if the PTE is not valid.
237 *
238 * Physical base of page tables is in PHYS_PGD which will not
239 * be modified.
164 * 240 *
165 * VADDR will not be clobbered, but REG1 and REG2 will. 241 * VADDR will not be clobbered, but REG1 and REG2 will.
166 */ 242 */
@@ -172,15 +248,19 @@ extern struct tsb_phys_patch_entry __tsb_phys_patch, __tsb_phys_patch_end;
172 brz,pn REG1, FAIL_LABEL; \ 248 brz,pn REG1, FAIL_LABEL; \
173 sllx VADDR, 64 - (PMD_SHIFT + PMD_BITS), REG2; \ 249 sllx VADDR, 64 - (PMD_SHIFT + PMD_BITS), REG2; \
174 srlx REG2, 64 - PAGE_SHIFT, REG2; \ 250 srlx REG2, 64 - PAGE_SHIFT, REG2; \
175 sllx REG1, 11, REG1; \ 251 sllx REG1, PGD_PADDR_SHIFT, REG1; \
176 andn REG2, 0x3, REG2; \ 252 andn REG2, 0x3, REG2; \
177 lduwa [REG1 + REG2] ASI_PHYS_USE_EC, REG1; \ 253 lduwa [REG1 + REG2] ASI_PHYS_USE_EC, REG1; \
178 brz,pn REG1, FAIL_LABEL; \ 254 USER_PGTABLE_CHECK_PMD_HUGE(VADDR, REG1, REG2, FAIL_LABEL, 800f) \
179 sllx VADDR, 64 - PMD_SHIFT, REG2; \ 255 sllx VADDR, 64 - PMD_SHIFT, REG2; \
180 srlx REG2, 64 - PAGE_SHIFT, REG2; \ 256 srlx REG2, 64 - (PAGE_SHIFT - 1), REG2; \
181 sllx REG1, 11, REG1; \ 257 sllx REG1, PMD_PADDR_SHIFT, REG1; \
182 andn REG2, 0x7, REG2; \ 258 andn REG2, 0x7, REG2; \
183 add REG1, REG2, REG1; 259 add REG1, REG2, REG1; \
260 ldxa [REG1] ASI_PHYS_USE_EC, REG1; \
261 brgez,pn REG1, FAIL_LABEL; \
262 nop; \
263800:
184 264
185/* Lookup a OBP mapping on VADDR in the prom_trans[] table at TL>0. 265/* Lookup a OBP mapping on VADDR in the prom_trans[] table at TL>0.
186 * If no entry is found, FAIL_LABEL will be branched to. On success 266 * If no entry is found, FAIL_LABEL will be branched to. On success