aboutsummaryrefslogtreecommitdiffstats
path: root/arch/sparc64/kernel
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2005-10-12 15:22:46 -0400
committerDavid S. Miller <davem@davemloft.net>2005-10-12 15:22:46 -0400
commitc9c10830740df1b5e7848d6fbb68c93a73e8f7cd (patch)
treeb614058c3291ebccb996b27cee9b709956df7791 /arch/sparc64/kernel
parentd8e998c58a870770905495a1d45ebf7285b5b1c5 (diff)
[SPARC64]: Fix boot failures on SunBlade-150
The sequence to move over to the Linux trap tables from the firmware ones needs to be more air tight. It turns out that to be %100 safe we do need to be able to translate OBP mappings in our TLB miss handlers early. In order not to eat up a lot of kernel image memory with static page tables, just use the translations array in the OBP TLB miss handlers. That solves the bulk of the problem. Furthermore, to make sure the OBP TLB miss path will work even before the fixed MMU globals are loaded, explicitly load %g1 to TLB_SFSR at the beginning of the i-TLB and d-TLB miss handlers. To ease the OBP TLB miss walking of the prom_trans[] array, we sort it then delete all of the non-OBP entries in there (for example, there are entries for the kernel image itself which we're not interested in at all). We also save about 32K of kernel image size with this change. Not a bad side effect :-) There are still some reasons why trampoline.S can't use the setup_trap_table() yet. The most noteworthy are: 1) OBP boots secondary processors with non-bias'd stack for some reason. This is easily fixed by using a small bootup stack in the kernel image explicitly for this purpose. 2) Doing a firmware call via the normal C call prom_set_trap_table() goes through the whole OBP enter/exit sequence that saves and restores OBP and Linux kernel state in the MMUs. This path unfortunately does a "flush %g6" while loading up the OBP locked TLB entries for the firmware call. If we setup the %g6 in the trampoline.S code properly, that is in the PAGE_OFFSET linear mapping, but we're not on the kernel trap table yet so those addresses won't translate properly. One idea is to do a by-hand firmware call like we do in the early bootup code and elsewhere here in trampoline.S But this fails as well, as aparently the secondary processors are not booted with OBP's special locked TLB entries loaded. These are necessary for the firwmare to processes TLB misses correctly up until the point where we take over the trap table. This does need to be resolved at some point. Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'arch/sparc64/kernel')
-rw-r--r--arch/sparc64/kernel/dtlb_base.S14
-rw-r--r--arch/sparc64/kernel/dtlb_prot.S12
-rw-r--r--arch/sparc64/kernel/head.S61
-rw-r--r--arch/sparc64/kernel/itlb_base.S26
-rw-r--r--arch/sparc64/kernel/ktlb.S92
5 files changed, 92 insertions, 113 deletions
diff --git a/arch/sparc64/kernel/dtlb_base.S b/arch/sparc64/kernel/dtlb_base.S
index 702d349c1e88..6528786840c0 100644
--- a/arch/sparc64/kernel/dtlb_base.S
+++ b/arch/sparc64/kernel/dtlb_base.S
@@ -53,19 +53,18 @@
53 * be guaranteed to be 0 ... mmu_context.h does guarantee this 53 * be guaranteed to be 0 ... mmu_context.h does guarantee this
54 * by only using 10 bits in the hwcontext value. 54 * by only using 10 bits in the hwcontext value.
55 */ 55 */
56#define CREATE_VPTE_OFFSET1(r1, r2) 56#define CREATE_VPTE_OFFSET1(r1, r2) nop
57#define CREATE_VPTE_OFFSET2(r1, r2) \ 57#define CREATE_VPTE_OFFSET2(r1, r2) \
58 srax r1, 10, r2 58 srax r1, 10, r2
59#define CREATE_VPTE_NOP nop
60#else 59#else
61#define CREATE_VPTE_OFFSET1(r1, r2) \ 60#define CREATE_VPTE_OFFSET1(r1, r2) \
62 srax r1, PAGE_SHIFT, r2 61 srax r1, PAGE_SHIFT, r2
63#define CREATE_VPTE_OFFSET2(r1, r2) \ 62#define CREATE_VPTE_OFFSET2(r1, r2) \
64 sllx r2, 3, r2 63 sllx r2, 3, r2
65#define CREATE_VPTE_NOP
66#endif 64#endif
67 65
68/* DTLB ** ICACHE line 1: Quick user TLB misses */ 66/* DTLB ** ICACHE line 1: Quick user TLB misses */
67 mov TLB_SFSR, %g1
69 ldxa [%g1 + %g1] ASI_DMMU, %g4 ! Get TAG_ACCESS 68 ldxa [%g1 + %g1] ASI_DMMU, %g4 ! Get TAG_ACCESS
70 andcc %g4, TAG_CONTEXT_BITS, %g0 ! From Nucleus? 69 andcc %g4, TAG_CONTEXT_BITS, %g0 ! From Nucleus?
71from_tl1_trap: 70from_tl1_trap:
@@ -74,18 +73,16 @@ from_tl1_trap:
74 be,pn %xcc, kvmap ! Yep, special processing 73 be,pn %xcc, kvmap ! Yep, special processing
75 CREATE_VPTE_OFFSET2(%g4, %g6) ! Create VPTE offset 74 CREATE_VPTE_OFFSET2(%g4, %g6) ! Create VPTE offset
76 cmp %g5, 4 ! Last trap level? 75 cmp %g5, 4 ! Last trap level?
77 be,pn %xcc, longpath ! Yep, cannot risk VPTE miss
78 nop ! delay slot
79 76
80/* DTLB ** ICACHE line 2: User finish + quick kernel TLB misses */ 77/* DTLB ** ICACHE line 2: User finish + quick kernel TLB misses */
78 be,pn %xcc, longpath ! Yep, cannot risk VPTE miss
79 nop ! delay slot
81 ldxa [%g3 + %g6] ASI_S, %g5 ! Load VPTE 80 ldxa [%g3 + %g6] ASI_S, %g5 ! Load VPTE
821: brgez,pn %g5, longpath ! Invalid, branch out 811: brgez,pn %g5, longpath ! Invalid, branch out
83 nop ! Delay-slot 82 nop ! Delay-slot
849: stxa %g5, [%g0] ASI_DTLB_DATA_IN ! Reload TLB 839: stxa %g5, [%g0] ASI_DTLB_DATA_IN ! Reload TLB
85 retry ! Trap return 84 retry ! Trap return
86 nop 85 nop
87 nop
88 nop
89 86
90/* DTLB ** ICACHE line 3: winfixups+real_faults */ 87/* DTLB ** ICACHE line 3: winfixups+real_faults */
91longpath: 88longpath:
@@ -106,8 +103,7 @@ longpath:
106 nop 103 nop
107 nop 104 nop
108 nop 105 nop
109 CREATE_VPTE_NOP 106 nop
110 107
111#undef CREATE_VPTE_OFFSET1 108#undef CREATE_VPTE_OFFSET1
112#undef CREATE_VPTE_OFFSET2 109#undef CREATE_VPTE_OFFSET2
113#undef CREATE_VPTE_NOP
diff --git a/arch/sparc64/kernel/dtlb_prot.S b/arch/sparc64/kernel/dtlb_prot.S
index d848bb7374bb..e0a920162604 100644
--- a/arch/sparc64/kernel/dtlb_prot.S
+++ b/arch/sparc64/kernel/dtlb_prot.S
@@ -14,14 +14,14 @@
14 */ 14 */
15 15
16/* PROT ** ICACHE line 1: User DTLB protection trap */ 16/* PROT ** ICACHE line 1: User DTLB protection trap */
17 stxa %g0, [%g1] ASI_DMMU ! Clear SFSR FaultValid bit 17 mov TLB_SFSR, %g1
18 membar #Sync ! Synchronize ASI stores 18 stxa %g0, [%g1] ASI_DMMU ! Clear FaultValid bit
19 rdpr %pstate, %g5 ! Move into alternate globals 19 membar #Sync ! Synchronize stores
20 rdpr %pstate, %g5 ! Move into alt-globals
20 wrpr %g5, PSTATE_AG|PSTATE_MG, %pstate 21 wrpr %g5, PSTATE_AG|PSTATE_MG, %pstate
21 rdpr %tl, %g1 ! Need to do a winfixup? 22 rdpr %tl, %g1 ! Need a winfixup?
22 cmp %g1, 1 ! Trap level >1? 23 cmp %g1, 1 ! Trap level >1?
23 mov TLB_TAG_ACCESS, %g4 ! Prepare reload of vaddr 24 mov TLB_TAG_ACCESS, %g4 ! For reload of vaddr
24 nop
25 25
26/* PROT ** ICACHE line 2: More real fault processing */ 26/* PROT ** ICACHE line 2: More real fault processing */
27 bgu,pn %xcc, winfix_trampoline ! Yes, perform winfixup 27 bgu,pn %xcc, winfix_trampoline ! Yes, perform winfixup
diff --git a/arch/sparc64/kernel/head.S b/arch/sparc64/kernel/head.S
index 4c942f71184d..b49dcd4504b0 100644
--- a/arch/sparc64/kernel/head.S
+++ b/arch/sparc64/kernel/head.S
@@ -28,19 +28,14 @@
28#include <asm/mmu.h> 28#include <asm/mmu.h>
29 29
30/* This section from from _start to sparc64_boot_end should fit into 30/* This section from from _start to sparc64_boot_end should fit into
31 * 0x0000.0000.0040.4000 to 0x0000.0000.0040.8000 and will be sharing space 31 * 0x0000000000404000 to 0x0000000000408000.
32 * with bootup_user_stack, which is from 0x0000.0000.0040.4000 to
33 * 0x0000.0000.0040.6000 and empty_bad_page, which is from
34 * 0x0000.0000.0040.6000 to 0x0000.0000.0040.8000.
35 */ 32 */
36
37 .text 33 .text
38 .globl start, _start, stext, _stext 34 .globl start, _start, stext, _stext
39_start: 35_start:
40start: 36start:
41_stext: 37_stext:
42stext: 38stext:
43bootup_user_stack:
44! 0x0000000000404000 39! 0x0000000000404000
45 b sparc64_boot 40 b sparc64_boot
46 flushw /* Flush register file. */ 41 flushw /* Flush register file. */
@@ -392,31 +387,30 @@ tlb_fixup_done:
392 * former does use this code, the latter does not yet due 387 * former does use this code, the latter does not yet due
393 * to some complexities. That should be fixed up at some 388 * to some complexities. That should be fixed up at some
394 * point. 389 * point.
390 *
391 * There used to be enormous complexity wrt. transferring
392 * over from the firwmare's trap table to the Linux kernel's.
393 * For example, there was a chicken & egg problem wrt. building
394 * the OBP page tables, yet needing to be on the Linux kernel
395 * trap table (to translate PAGE_OFFSET addresses) in order to
396 * do that.
397 *
398 * We now handle OBP tlb misses differently, via linear lookups
399 * into the prom_trans[] array. So that specific problem no
400 * longer exists. Yet, unfortunately there are still some issues
401 * preventing trampoline.S from using this code... ho hum.
395 */ 402 */
396 .globl setup_trap_table 403 .globl setup_trap_table
397setup_trap_table: 404setup_trap_table:
398 save %sp, -192, %sp 405 save %sp, -192, %sp
399 406
400 /* Force interrupts to be disabled. Transferring over to 407 /* Force interrupts to be disabled. */
401 * the Linux trap table is a very delicate operation.
402 * Until we are actually on the Linux trap table, we cannot
403 * get the PAGE_OFFSET linear mappings translated. We need
404 * that mapping to be setup in order to initialize the firmware
405 * page tables.
406 *
407 * So there is this window of time, from the return from
408 * prom_set_trap_table() until inherit_prom_mappings_post()
409 * (in arch/sparc64/mm/init.c) completes, during which no
410 * firmware address space accesses can be made.
411 */
412 rdpr %pstate, %o1 408 rdpr %pstate, %o1
413 andn %o1, PSTATE_IE, %o1 409 andn %o1, PSTATE_IE, %o1
414 wrpr %o1, 0x0, %pstate 410 wrpr %o1, 0x0, %pstate
415 wrpr %g0, 15, %pil 411 wrpr %g0, 15, %pil
416 412
417 /* Ok, now make the final valid firmware call to jump over 413 /* Make the firmware call to jump over to the Linux trap table. */
418 * to the Linux trap table.
419 */
420 call prom_set_trap_table 414 call prom_set_trap_table
421 sethi %hi(sparc64_ttable_tl0), %o0 415 sethi %hi(sparc64_ttable_tl0), %o0
422 416
@@ -540,15 +534,21 @@ setup_tba: /* i0 = is_starfire */
540 534
541 ret 535 ret
542 restore 536 restore
537sparc64_boot_end:
538
539#include "systbls.S"
540#include "ktlb.S"
541#include "etrap.S"
542#include "rtrap.S"
543#include "winfixup.S"
544#include "entry.S"
543 545
544/* 546/*
545 * The following skips make sure the trap table in ttable.S is aligned 547 * The following skip makes sure the trap table in ttable.S is aligned
546 * on a 32K boundary as required by the v9 specs for TBA register. 548 * on a 32K boundary as required by the v9 specs for TBA register.
547 */ 549 */
548sparc64_boot_end: 5501:
549 .skip 0x2000 + _start - sparc64_boot_end 551 .skip 0x4000 + _start - 1b
550bootup_user_stack_end:
551 .skip 0x2000
552 552
553#ifdef CONFIG_SBUS 553#ifdef CONFIG_SBUS
554/* This is just a hack to fool make depend config.h discovering 554/* This is just a hack to fool make depend config.h discovering
@@ -560,15 +560,6 @@ bootup_user_stack_end:
560! 0x0000000000408000 560! 0x0000000000408000
561 561
562#include "ttable.S" 562#include "ttable.S"
563#include "systbls.S"
564#include "ktlb.S"
565#include "etrap.S"
566#include "rtrap.S"
567#include "winfixup.S"
568#include "entry.S"
569
570 /* This is just anal retentiveness on my part... */
571 .align 16384
572 563
573 .data 564 .data
574 .align 8 565 .align 8
diff --git a/arch/sparc64/kernel/itlb_base.S b/arch/sparc64/kernel/itlb_base.S
index b5e32dfa4fbc..4951ff8f6877 100644
--- a/arch/sparc64/kernel/itlb_base.S
+++ b/arch/sparc64/kernel/itlb_base.S
@@ -15,14 +15,12 @@
15 */ 15 */
16#define CREATE_VPTE_OFFSET1(r1, r2) \ 16#define CREATE_VPTE_OFFSET1(r1, r2) \
17 srax r1, 10, r2 17 srax r1, 10, r2
18#define CREATE_VPTE_OFFSET2(r1, r2) 18#define CREATE_VPTE_OFFSET2(r1, r2) nop
19#define CREATE_VPTE_NOP nop
20#else /* PAGE_SHIFT */ 19#else /* PAGE_SHIFT */
21#define CREATE_VPTE_OFFSET1(r1, r2) \ 20#define CREATE_VPTE_OFFSET1(r1, r2) \
22 srax r1, PAGE_SHIFT, r2 21 srax r1, PAGE_SHIFT, r2
23#define CREATE_VPTE_OFFSET2(r1, r2) \ 22#define CREATE_VPTE_OFFSET2(r1, r2) \
24 sllx r2, 3, r2 23 sllx r2, 3, r2
25#define CREATE_VPTE_NOP
26#endif /* PAGE_SHIFT */ 24#endif /* PAGE_SHIFT */
27 25
28 26
@@ -36,6 +34,7 @@
36 */ 34 */
37 35
38/* ITLB ** ICACHE line 1: Quick user TLB misses */ 36/* ITLB ** ICACHE line 1: Quick user TLB misses */
37 mov TLB_SFSR, %g1
39 ldxa [%g1 + %g1] ASI_IMMU, %g4 ! Get TAG_ACCESS 38 ldxa [%g1 + %g1] ASI_IMMU, %g4 ! Get TAG_ACCESS
40 CREATE_VPTE_OFFSET1(%g4, %g6) ! Create VPTE offset 39 CREATE_VPTE_OFFSET1(%g4, %g6) ! Create VPTE offset
41 CREATE_VPTE_OFFSET2(%g4, %g6) ! Create VPTE offset 40 CREATE_VPTE_OFFSET2(%g4, %g6) ! Create VPTE offset
@@ -43,41 +42,38 @@
431: brgez,pn %g5, 3f ! Not valid, branch out 421: brgez,pn %g5, 3f ! Not valid, branch out
44 sethi %hi(_PAGE_EXEC), %g4 ! Delay-slot 43 sethi %hi(_PAGE_EXEC), %g4 ! Delay-slot
45 andcc %g5, %g4, %g0 ! Executable? 44 andcc %g5, %g4, %g0 ! Executable?
45
46/* ITLB ** ICACHE line 2: Real faults */
46 be,pn %xcc, 3f ! Nope, branch. 47 be,pn %xcc, 3f ! Nope, branch.
47 nop ! Delay-slot 48 nop ! Delay-slot
482: stxa %g5, [%g0] ASI_ITLB_DATA_IN ! Load PTE into TLB 492: stxa %g5, [%g0] ASI_ITLB_DATA_IN ! Load PTE into TLB
49 retry ! Trap return 50 retry ! Trap return
503: rdpr %pstate, %g4 ! Move into alternate globals 513: rdpr %pstate, %g4 ! Move into alt-globals
51
52/* ITLB ** ICACHE line 2: Real faults */
53 wrpr %g4, PSTATE_AG|PSTATE_MG, %pstate 52 wrpr %g4, PSTATE_AG|PSTATE_MG, %pstate
54 rdpr %tpc, %g5 ! And load faulting VA 53 rdpr %tpc, %g5 ! And load faulting VA
55 mov FAULT_CODE_ITLB, %g4 ! It was read from ITLB 54 mov FAULT_CODE_ITLB, %g4 ! It was read from ITLB
56sparc64_realfault_common: ! Called by TL0 dtlb_miss too 55
56/* ITLB ** ICACHE line 3: Finish faults */
57sparc64_realfault_common: ! Called by dtlb_miss
57 stb %g4, [%g6 + TI_FAULT_CODE] 58 stb %g4, [%g6 + TI_FAULT_CODE]
58 stx %g5, [%g6 + TI_FAULT_ADDR] 59 stx %g5, [%g6 + TI_FAULT_ADDR]
59 ba,pt %xcc, etrap ! Save state 60 ba,pt %xcc, etrap ! Save state
601: rd %pc, %g7 ! ... 611: rd %pc, %g7 ! ...
61 nop
62
63/* ITLB ** ICACHE line 3: Finish faults + window fixups */
64 call do_sparc64_fault ! Call fault handler 62 call do_sparc64_fault ! Call fault handler
65 add %sp, PTREGS_OFF, %o0! Compute pt_regs arg 63 add %sp, PTREGS_OFF, %o0! Compute pt_regs arg
66 ba,pt %xcc, rtrap_clr_l6 ! Restore cpu state 64 ba,pt %xcc, rtrap_clr_l6 ! Restore cpu state
67 nop 65 nop
66
67/* ITLB ** ICACHE line 4: Window fixups */
68winfix_trampoline: 68winfix_trampoline:
69 rdpr %tpc, %g3 ! Prepare winfixup TNPC 69 rdpr %tpc, %g3 ! Prepare winfixup TNPC
70 or %g3, 0x7c, %g3 ! Compute offset to branch 70 or %g3, 0x7c, %g3 ! Compute branch offset
71 wrpr %g3, %tnpc ! Write it into TNPC 71 wrpr %g3, %tnpc ! Write it into TNPC
72 done ! Do it to it 72 done ! Do it to it
73
74/* ITLB ** ICACHE line 4: Unused... */
75 nop 73 nop
76 nop 74 nop
77 nop 75 nop
78 nop 76 nop
79 CREATE_VPTE_NOP
80 77
81#undef CREATE_VPTE_OFFSET1 78#undef CREATE_VPTE_OFFSET1
82#undef CREATE_VPTE_OFFSET2 79#undef CREATE_VPTE_OFFSET2
83#undef CREATE_VPTE_NOP
diff --git a/arch/sparc64/kernel/ktlb.S b/arch/sparc64/kernel/ktlb.S
index 7796b37f478c..d9244d3c9f73 100644
--- a/arch/sparc64/kernel/ktlb.S
+++ b/arch/sparc64/kernel/ktlb.S
@@ -58,9 +58,6 @@ vpte_noent:
58 done 58 done
59 59
60vpte_insn_obp: 60vpte_insn_obp:
61 sethi %hi(prom_pmd_phys), %g5
62 ldx [%g5 + %lo(prom_pmd_phys)], %g5
63
64 /* Behave as if we are at TL0. */ 61 /* Behave as if we are at TL0. */
65 wrpr %g0, 1, %tl 62 wrpr %g0, 1, %tl
66 rdpr %tpc, %g4 /* Find original faulting iaddr */ 63 rdpr %tpc, %g4 /* Find original faulting iaddr */
@@ -71,58 +68,57 @@ vpte_insn_obp:
71 mov TLB_SFSR, %g1 68 mov TLB_SFSR, %g1
72 stxa %g4, [%g1 + %g1] ASI_IMMU 69 stxa %g4, [%g1 + %g1] ASI_IMMU
73 70
74 /* Get PMD offset. */ 71 sethi %hi(prom_trans), %g5
75 srlx %g4, 23, %g6 72 or %g5, %lo(prom_trans), %g5
76 and %g6, 0x7ff, %g6 73
77 sllx %g6, 2, %g6 741: ldx [%g5 + 0x00], %g6 ! base
78 75 brz,a,pn %g6, longpath ! no more entries, fail
79 /* Load PMD, is it valid? */ 76 mov TLB_SFSR, %g1 ! and restore %g1
80 lduwa [%g5 + %g6] ASI_PHYS_USE_EC, %g5 77 ldx [%g5 + 0x08], %g1 ! len
81 brz,pn %g5, longpath 78 add %g6, %g1, %g1 ! end
82 sllx %g5, 11, %g5 79 cmp %g6, %g4
83 80 bgu,pt %xcc, 2f
84 /* Get PTE offset. */ 81 cmp %g4, %g1
85 srlx %g4, 13, %g6 82 bgeu,pt %xcc, 2f
86 and %g6, 0x3ff, %g6 83 ldx [%g5 + 0x10], %g1 ! PTE
87 sllx %g6, 3, %g6 84
88 85 /* TLB load, restore %g1, and return from trap. */
89 /* Load PTE. */ 86 sub %g4, %g6, %g6
90 ldxa [%g5 + %g6] ASI_PHYS_USE_EC, %g5 87 add %g1, %g6, %g5
91 brgez,pn %g5, longpath 88 mov TLB_SFSR, %g1
92 nop
93
94 /* TLB load and return from trap. */
95 stxa %g5, [%g0] ASI_ITLB_DATA_IN 89 stxa %g5, [%g0] ASI_ITLB_DATA_IN
96 retry 90 retry
97 91
98kvmap_do_obp: 922: ba,pt %xcc, 1b
99 sethi %hi(prom_pmd_phys), %g5 93 add %g5, (3 * 8), %g5 ! next entry
100 ldx [%g5 + %lo(prom_pmd_phys)], %g5
101
102 /* Get PMD offset. */
103 srlx %g4, 23, %g6
104 and %g6, 0x7ff, %g6
105 sllx %g6, 2, %g6
106
107 /* Load PMD, is it valid? */
108 lduwa [%g5 + %g6] ASI_PHYS_USE_EC, %g5
109 brz,pn %g5, longpath
110 sllx %g5, 11, %g5
111
112 /* Get PTE offset. */
113 srlx %g4, 13, %g6
114 and %g6, 0x3ff, %g6
115 sllx %g6, 3, %g6
116
117 /* Load PTE. */
118 ldxa [%g5 + %g6] ASI_PHYS_USE_EC, %g5
119 brgez,pn %g5, longpath
120 nop
121 94
122 /* TLB load and return from trap. */ 95kvmap_do_obp:
96 sethi %hi(prom_trans), %g5
97 or %g5, %lo(prom_trans), %g5
98 srlx %g4, 13, %g4
99 sllx %g4, 13, %g4
100
1011: ldx [%g5 + 0x00], %g6 ! base
102 brz,a,pn %g6, longpath ! no more entries, fail
103 mov TLB_SFSR, %g1 ! and restore %g1
104 ldx [%g5 + 0x08], %g1 ! len
105 add %g6, %g1, %g1 ! end
106 cmp %g6, %g4
107 bgu,pt %xcc, 2f
108 cmp %g4, %g1
109 bgeu,pt %xcc, 2f
110 ldx [%g5 + 0x10], %g1 ! PTE
111
112 /* TLB load, restore %g1, and return from trap. */
113 sub %g4, %g6, %g6
114 add %g1, %g6, %g5
115 mov TLB_SFSR, %g1
123 stxa %g5, [%g0] ASI_DTLB_DATA_IN 116 stxa %g5, [%g0] ASI_DTLB_DATA_IN
124 retry 117 retry
125 118
1192: ba,pt %xcc, 1b
120 add %g5, (3 * 8), %g5 ! next entry
121
126/* 122/*
127 * On a first level data miss, check whether this is to the OBP range (note 123 * On a first level data miss, check whether this is to the OBP range (note
128 * that such accesses can be made by prom, as well as by kernel using 124 * that such accesses can be made by prom, as well as by kernel using