aboutsummaryrefslogtreecommitdiffstats
path: root/arch/sparc64/kernel/tsb.S
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2006-01-31 21:31:20 -0500
committerDavid S. Miller <davem@sunset.davemloft.net>2006-03-20 04:11:17 -0500
commit98c5584cfc47932c4f3ccf5eee2e0bae1447b85e (patch)
treec067ac8bfc081bbe0b3073374cb15708458e04ab /arch/sparc64/kernel/tsb.S
parent09f94287f7260e03bbeab497e743691fafcc22c3 (diff)
[SPARC64]: Add infrastructure for dynamic TSB sizing.
This also cleans up tsb_context_switch(). The assembler routine is now __tsb_context_switch() and the former is an inline function that picks out the bits from the mm_struct and passes it into the assembler code as arguments. setup_tsb_parms() computes the locked TLB entry to map the TSB. Later when we support using the physical address quad load instructions of Cheetah+ and later, we'll simply use the physical address for the TSB register value and set the map virtual and PTE both to zero. Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'arch/sparc64/kernel/tsb.S')
-rw-r--r--arch/sparc64/kernel/tsb.S55
1 files changed, 21 insertions, 34 deletions
diff --git a/arch/sparc64/kernel/tsb.S b/arch/sparc64/kernel/tsb.S
index 76f2c0b01f36..fe266bad0a28 100644
--- a/arch/sparc64/kernel/tsb.S
+++ b/arch/sparc64/kernel/tsb.S
@@ -130,48 +130,36 @@ winfix_trampoline:
130 * schedule() time. 130 * schedule() time.
131 * 131 *
132 * %o0: page table physical address 132 * %o0: page table physical address
133 * %o1: TSB address 133 * %o1: TSB register value
134 * %o2: TSB virtual address
135 * %o3: TSB mapping locked PTE
136 *
137 * We have to run this whole thing with interrupts
138 * disabled so that the current cpu doesn't change
139 * due to preemption.
134 */ 140 */
135 .align 32 141 .align 32
136 .globl tsb_context_switch 142 .globl __tsb_context_switch
137tsb_context_switch: 143__tsb_context_switch:
138 rdpr %pstate, %o5 144 rdpr %pstate, %o5
139 wrpr %o5, PSTATE_IE, %pstate 145 wrpr %o5, PSTATE_IE, %pstate
140 146
141 ldub [%g6 + TI_CPU], %o3 147 ldub [%g6 + TI_CPU], %g1
142 sethi %hi(trap_block), %o4 148 sethi %hi(trap_block), %g2
143 sllx %o3, TRAP_BLOCK_SZ_SHIFT, %o3 149 sllx %g1, TRAP_BLOCK_SZ_SHIFT, %g1
144 or %o4, %lo(trap_block), %o4 150 or %g2, %lo(trap_block), %g2
145 add %o4, %o3, %o4 151 add %g2, %g1, %g2
146 stx %o0, [%o4 + TRAP_PER_CPU_PGD_PADDR] 152 stx %o0, [%g2 + TRAP_PER_CPU_PGD_PADDR]
147
148 brgez %o1, 9f
149 nop
150
151 /* Lock TSB into D-TLB. */
152 sethi %hi(PAGE_SIZE), %o3
153 and %o3, %o1, %o3
154 sethi %hi(TSBMAP_BASE), %o2
155 add %o2, %o3, %o2
156 153
157 /* XXX handle PAGE_SIZE != 8K correctly... */
158 mov TSB_REG, %g1 154 mov TSB_REG, %g1
159 stxa %o2, [%g1] ASI_DMMU 155 stxa %o1, [%g1] ASI_DMMU
160 membar #Sync 156 membar #Sync
161 157
162 stxa %o2, [%g1] ASI_IMMU 158 stxa %o1, [%g1] ASI_IMMU
163 membar #Sync 159 membar #Sync
164 160
165#define KERN_HIGHBITS ((_PAGE_VALID|_PAGE_SZBITS)^0xfffff80000000000) 161 brz %o2, 9f
166#define KERN_LOWBITS (_PAGE_CP | _PAGE_CV | _PAGE_P | _PAGE_W | _PAGE_L) 162 nop
167 sethi %uhi(KERN_HIGHBITS), %g2
168 or %g2, %ulo(KERN_HIGHBITS), %g2
169 sllx %g2, 32, %g2
170 or %g2, KERN_LOWBITS, %g2
171#undef KERN_HIGHBITS
172#undef KERN_LOWBITS
173
174 xor %o1, %g2, %o1
175 163
176 /* We use entry 61 for this locked entry. This is the spitfire 164 /* We use entry 61 for this locked entry. This is the spitfire
177 * TLB entry number, and luckily cheetah masks the value with 165 * TLB entry number, and luckily cheetah masks the value with
@@ -184,11 +172,10 @@ tsb_context_switch:
184 stxa %o2, [%g1] ASI_DMMU 172 stxa %o2, [%g1] ASI_DMMU
185 membar #Sync 173 membar #Sync
186 mov (61 << 3), %g1 174 mov (61 << 3), %g1
187 stxa %o1, [%g1] ASI_DTLB_DATA_ACCESS 175 stxa %o3, [%g1] ASI_DTLB_DATA_ACCESS
188 membar #Sync 176 membar #Sync
189
1909: 1779:
191 wrpr %o5, %pstate 178 wrpr %o5, %pstate
192 179
193 retl 180 retl
194 mov %o2, %o0 181 nop