diff options
author | David S. Miller <davem@sunset.davemloft.net> | 2006-02-09 05:52:44 -0500 |
---|---|---|
committer | David S. Miller <davem@sunset.davemloft.net> | 2006-03-20 04:12:03 -0500 |
commit | d82ace7dc4073b090a55b9740700e32b9a9ae302 (patch) | |
tree | d5aa8e10664b05bbfe31eacf95e2066c03cab102 /arch/sparc64/kernel/trampoline.S | |
parent | 1d2f1f90a1e004b0c1b8a73ed4394a93f09104b3 (diff) |
[SPARC64]: Detect sun4v early in boot process.
We look for "SUNW,sun4v" in the 'compatible' property
of the root OBP device tree node.
Protect every %ver register access, to make sure it is
not touched on sun4v, as %ver is hyperprivileged there.
Lock kernel TLB entries using hypervisor calls instead of
calls into OBP.
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'arch/sparc64/kernel/trampoline.S')
-rw-r--r-- | arch/sparc64/kernel/trampoline.S | 62 |
1 files changed, 56 insertions, 6 deletions
diff --git a/arch/sparc64/kernel/trampoline.S b/arch/sparc64/kernel/trampoline.S index fbf844f84a49..ffa8b79632cf 100644 --- a/arch/sparc64/kernel/trampoline.S +++ b/arch/sparc64/kernel/trampoline.S | |||
@@ -16,6 +16,7 @@ | |||
16 | #include <asm/processor.h> | 16 | #include <asm/processor.h> |
17 | #include <asm/thread_info.h> | 17 | #include <asm/thread_info.h> |
18 | #include <asm/mmu.h> | 18 | #include <asm/mmu.h> |
19 | #include <asm/hypervisor.h> | ||
19 | 20 | ||
20 | .data | 21 | .data |
21 | .align 8 | 22 | .align 8 |
@@ -34,8 +35,9 @@ dtlb_load: | |||
34 | sparc64_cpu_startup: | 35 | sparc64_cpu_startup: |
35 | flushw | 36 | flushw |
36 | 37 | ||
37 | BRANCH_IF_CHEETAH_BASE(g1,g5,cheetah_startup) | 38 | BRANCH_IF_SUN4V(g1, niagara_startup) |
38 | BRANCH_IF_CHEETAH_PLUS_OR_FOLLOWON(g1,g5,cheetah_plus_startup) | 39 | BRANCH_IF_CHEETAH_BASE(g1, g5, cheetah_startup) |
40 | BRANCH_IF_CHEETAH_PLUS_OR_FOLLOWON(g1, g5, cheetah_plus_startup) | ||
39 | 41 | ||
40 | ba,pt %xcc, spitfire_startup | 42 | ba,pt %xcc, spitfire_startup |
41 | nop | 43 | nop |
@@ -70,7 +72,9 @@ cheetah_generic_startup: | |||
70 | stxa %g0, [%g3] ASI_DMMU | 72 | stxa %g0, [%g3] ASI_DMMU |
71 | stxa %g0, [%g3] ASI_IMMU | 73 | stxa %g0, [%g3] ASI_IMMU |
72 | membar #Sync | 74 | membar #Sync |
75 | /* fallthru */ | ||
73 | 76 | ||
77 | niagara_startup: | ||
74 | /* Disable STICK_INT interrupts. */ | 78 | /* Disable STICK_INT interrupts. */ |
75 | sethi %hi(0x80000000), %g5 | 79 | sethi %hi(0x80000000), %g5 |
76 | sllx %g5, 32, %g5 | 80 | sllx %g5, 32, %g5 |
@@ -91,6 +95,8 @@ startup_continue: | |||
91 | sllx %g2, 32, %g2 | 95 | sllx %g2, 32, %g2 |
92 | wr %g2, 0, %tick_cmpr | 96 | wr %g2, 0, %tick_cmpr |
93 | 97 | ||
98 | BRANCH_IF_SUN4V(g1, niagara_lock_tlb) | ||
99 | |||
94 | /* Call OBP by hand to lock KERNBASE into i/d tlbs. | 100 | /* Call OBP by hand to lock KERNBASE into i/d tlbs. |
95 | * We lock 2 consequetive entries if we are 'bigkernel'. | 101 | * We lock 2 consequetive entries if we are 'bigkernel'. |
96 | */ | 102 | */ |
@@ -142,8 +148,7 @@ startup_continue: | |||
142 | 148 | ||
143 | sethi %hi(bigkernel), %g2 | 149 | sethi %hi(bigkernel), %g2 |
144 | lduw [%g2 + %lo(bigkernel)], %g2 | 150 | lduw [%g2 + %lo(bigkernel)], %g2 |
145 | cmp %g2, 0 | 151 | brz,pt %g2, do_dtlb |
146 | be,pt %icc, do_dtlb | ||
147 | nop | 152 | nop |
148 | 153 | ||
149 | sethi %hi(call_method), %g2 | 154 | sethi %hi(call_method), %g2 |
@@ -214,8 +219,7 @@ do_dtlb: | |||
214 | 219 | ||
215 | sethi %hi(bigkernel), %g2 | 220 | sethi %hi(bigkernel), %g2 |
216 | lduw [%g2 + %lo(bigkernel)], %g2 | 221 | lduw [%g2 + %lo(bigkernel)], %g2 |
217 | cmp %g2, 0 | 222 | brz,pt %g2, do_unlock |
218 | be,pt %icc, do_unlock | ||
219 | nop | 223 | nop |
220 | 224 | ||
221 | sethi %hi(call_method), %g2 | 225 | sethi %hi(call_method), %g2 |
@@ -257,6 +261,52 @@ do_unlock: | |||
257 | stb %g0, [%g2 + %lo(prom_entry_lock)] | 261 | stb %g0, [%g2 + %lo(prom_entry_lock)] |
258 | membar #StoreStore | #StoreLoad | 262 | membar #StoreStore | #StoreLoad |
259 | 263 | ||
264 | ba,pt %xcc, after_lock_tlb | ||
265 | nop | ||
266 | |||
267 | niagara_lock_tlb: | ||
268 | mov HV_FAST_MMU_MAP_PERM_ADDR, %o0 | ||
269 | sethi %hi(KERNBASE), %o1 | ||
270 | clr %o2 | ||
271 | sethi %hi(kern_locked_tte_data), %o3 | ||
272 | ldx [%o3 + %lo(kern_locked_tte_data)], %o3 | ||
273 | mov HV_MMU_IMMU, %o4 | ||
274 | ta HV_FAST_TRAP | ||
275 | |||
276 | mov HV_FAST_MMU_MAP_PERM_ADDR, %o0 | ||
277 | sethi %hi(KERNBASE), %o1 | ||
278 | clr %o2 | ||
279 | sethi %hi(kern_locked_tte_data), %o3 | ||
280 | ldx [%o3 + %lo(kern_locked_tte_data)], %o3 | ||
281 | mov HV_MMU_DMMU, %o4 | ||
282 | ta HV_FAST_TRAP | ||
283 | |||
284 | sethi %hi(bigkernel), %g2 | ||
285 | lduw [%g2 + %lo(bigkernel)], %g2 | ||
286 | brz,pt %g2, after_lock_tlb | ||
287 | nop | ||
288 | |||
289 | mov HV_FAST_MMU_MAP_PERM_ADDR, %o0 | ||
290 | sethi %hi(KERNBASE + 0x400000), %o1 | ||
291 | clr %o2 | ||
292 | sethi %hi(kern_locked_tte_data), %o3 | ||
293 | ldx [%o3 + %lo(kern_locked_tte_data)], %o3 | ||
294 | sethi %hi(0x400000), %o4 | ||
295 | add %o3, %o4, %o3 | ||
296 | mov HV_MMU_IMMU, %o4 | ||
297 | ta HV_FAST_TRAP | ||
298 | |||
299 | mov HV_FAST_MMU_MAP_PERM_ADDR, %o0 | ||
300 | sethi %hi(KERNBASE + 0x400000), %o1 | ||
301 | clr %o2 | ||
302 | sethi %hi(kern_locked_tte_data), %o3 | ||
303 | ldx [%o3 + %lo(kern_locked_tte_data)], %o3 | ||
304 | sethi %hi(0x400000), %o4 | ||
305 | add %o3, %o4, %o3 | ||
306 | mov HV_MMU_DMMU, %o4 | ||
307 | ta HV_FAST_TRAP | ||
308 | |||
309 | after_lock_tlb: | ||
260 | mov %l1, %sp | 310 | mov %l1, %sp |
261 | flushw | 311 | flushw |
262 | 312 | ||