aboutsummaryrefslogtreecommitdiffstats
path: root/arch/sparc64/kernel/tsb.S
diff options
context:
space:
mode:
authorDavid S. Miller <davem@sunset.davemloft.net>2006-02-01 18:55:21 -0500
committerDavid S. Miller <davem@sunset.davemloft.net>2006-03-20 04:11:32 -0500
commit517af33237ecfc3c8a93b335365fa61e741ceca4 (patch)
tree58eff40eb4c517c4fd49fd347d38273ee1e1ee4b /arch/sparc64/kernel/tsb.S
parentb0fd4e49aea8a460afab7bc67cd618e2d19291d4 (diff)
[SPARC64]: Access TSB with physical addresses when possible.
This way we don't need to lock the TSB into the TLB. The trick is that every TSB load/store is registered into a special instruction patch section. The default uses virtual addresses, and the patch instructions use physical address load/stores. We can't do this on all chips because only cheetah+ and later have the physical variant of the atomic quad load. Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'arch/sparc64/kernel/tsb.S')
-rw-r--r--arch/sparc64/kernel/tsb.S35
1 files changed, 30 insertions, 5 deletions
diff --git a/arch/sparc64/kernel/tsb.S b/arch/sparc64/kernel/tsb.S
index e1dd37f5e535..ff6a79beb98d 100644
--- a/arch/sparc64/kernel/tsb.S
+++ b/arch/sparc64/kernel/tsb.S
@@ -53,7 +53,7 @@ tsb_reload:
53 /* Load and check PTE. */ 53 /* Load and check PTE. */
54 ldxa [%g5] ASI_PHYS_USE_EC, %g5 54 ldxa [%g5] ASI_PHYS_USE_EC, %g5
55 brgez,a,pn %g5, tsb_do_fault 55 brgez,a,pn %g5, tsb_do_fault
56 stx %g0, [%g1] 56 TSB_STORE(%g1, %g0)
57 57
58 /* If it is larger than the base page size, don't 58 /* If it is larger than the base page size, don't
59 * bother putting it into the TSB. 59 * bother putting it into the TSB.
@@ -64,7 +64,7 @@ tsb_reload:
64 and %g2, %g4, %g2 64 and %g2, %g4, %g2
65 cmp %g2, %g7 65 cmp %g2, %g7
66 bne,a,pn %xcc, tsb_tlb_reload 66 bne,a,pn %xcc, tsb_tlb_reload
67 stx %g0, [%g1] 67 TSB_STORE(%g1, %g0)
68 68
69 TSB_WRITE(%g1, %g5, %g6) 69 TSB_WRITE(%g1, %g5, %g6)
70 70
@@ -131,13 +131,13 @@ winfix_trampoline:
131 131
132 /* Insert an entry into the TSB. 132 /* Insert an entry into the TSB.
133 * 133 *
134 * %o0: TSB entry pointer 134 * %o0: TSB entry pointer (virt or phys address)
135 * %o1: tag 135 * %o1: tag
136 * %o2: pte 136 * %o2: pte
137 */ 137 */
138 .align 32 138 .align 32
139 .globl tsb_insert 139 .globl __tsb_insert
140tsb_insert: 140__tsb_insert:
141 rdpr %pstate, %o5 141 rdpr %pstate, %o5
142 wrpr %o5, PSTATE_IE, %pstate 142 wrpr %o5, PSTATE_IE, %pstate
143 TSB_LOCK_TAG(%o0, %g2, %g3) 143 TSB_LOCK_TAG(%o0, %g2, %g3)
@@ -146,6 +146,31 @@ tsb_insert:
146 retl 146 retl
147 nop 147 nop
148 148
149 /* Flush the given TSB entry if it has the matching
150 * tag.
151 *
152 * %o0: TSB entry pointer (virt or phys address)
153 * %o1: tag
154 */
155 .align 32
156 .globl tsb_flush
157tsb_flush:
158 sethi %hi(TSB_TAG_LOCK_HIGH), %g2
1591: TSB_LOAD_TAG(%o0, %g1)
160 srlx %g1, 32, %o3
161 andcc %o3, %g2, %g0
162 bne,pn %icc, 1b
163 membar #LoadLoad
164 cmp %g1, %o1
165 bne,pt %xcc, 2f
166 clr %o3
167 TSB_CAS_TAG(%o0, %g1, %o3)
168 cmp %g1, %o3
169 bne,pn %xcc, 1b
170 nop
1712: retl
172 TSB_MEMBAR
173
149 /* Reload MMU related context switch state at 174 /* Reload MMU related context switch state at
150 * schedule() time. 175 * schedule() time.
151 * 176 *