aboutsummaryrefslogtreecommitdiffstats
path: root/arch/sparc64/kernel/winfixup.S
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2006-01-31 21:29:18 -0500
committerDavid S. Miller <davem@sunset.davemloft.net>2006-03-20 04:11:13 -0500
commit74bf4312fff083ab25c3f357cc653ada7995e5f6 (patch)
treec23dea461e32485f4cd7ca4b8c33c632655eb906 /arch/sparc64/kernel/winfixup.S
parent30d4d1ffed7098afe2641536d67eef150499da02 (diff)
[SPARC64]: Move away from virtual page tables, part 1.
We now use the TSB hardware assist features of the UltraSPARC MMUs. SMP is currently knowingly broken, we need to find another place to store the per-cpu base pointers. We hid them away in the TSB base register, and that obviously will not work any more :-) Another known broken case is non-8KB base page size. Also noticed that flush_tlb_all() is not referenced anywhere, only the internal __flush_tlb_all() (local cpu only) is used by the sparc64 port, so we can get rid of flush_tlb_all(). The kernel gets it's own 8KB TSB (swapper_tsb) and each address space gets it's own private 8K TSB. Later we can add code to dynamically increase the size of per-process TSB as the RSS grows. An 8KB TSB is good enough for up to about a 4MB RSS, after which the TSB starts to incur many capacity and conflict misses. We even accumulate OBP translations into the kernel TSB. Another area for refinement is large page size support. We could use a secondary address space TSB to handle those. Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'arch/sparc64/kernel/winfixup.S')
-rw-r--r--arch/sparc64/kernel/winfixup.S8
1 files changed, 3 insertions, 5 deletions
diff --git a/arch/sparc64/kernel/winfixup.S b/arch/sparc64/kernel/winfixup.S
index 39160926267..f5d93aa99cb 100644
--- a/arch/sparc64/kernel/winfixup.S
+++ b/arch/sparc64/kernel/winfixup.S
@@ -85,6 +85,7 @@ fill_fixup:
85 mov %o7, %g6 85 mov %o7, %g6
86 ldx [%g6 + TI_TASK], %g4 86 ldx [%g6 + TI_TASK], %g4
87#ifdef CONFIG_SMP 87#ifdef CONFIG_SMP
88#error IMMU TSB usage must be fixed
88 mov TSB_REG, %g1 89 mov TSB_REG, %g1
89 ldxa [%g1] ASI_IMMU, %g5 90 ldxa [%g1] ASI_IMMU, %g5
90#endif 91#endif
@@ -209,6 +210,7 @@ fill_fixup_mna:
209 mov %o7, %g6 ! Get current back. 210 mov %o7, %g6 ! Get current back.
210 ldx [%g6 + TI_TASK], %g4 ! Finish it. 211 ldx [%g6 + TI_TASK], %g4 ! Finish it.
211#ifdef CONFIG_SMP 212#ifdef CONFIG_SMP
213#error IMMU TSB usage must be fixed
212 mov TSB_REG, %g1 214 mov TSB_REG, %g1
213 ldxa [%g1] ASI_IMMU, %g5 215 ldxa [%g1] ASI_IMMU, %g5
214#endif 216#endif
@@ -278,11 +280,6 @@ window_mna_from_user_common:
278 ba,pt %xcc, rtrap 280 ba,pt %xcc, rtrap
279 clr %l6 281 clr %l6
280 282
281 /* These are only needed for 64-bit mode processes which
282 * put their stack pointer into the VPTE area and there
283 * happens to be a VPTE tlb entry mapped there during
284 * a spill/fill trap to that stack frame.
285 */
286 .globl winfix_dax, fill_fixup_dax, spill_fixup_dax 283 .globl winfix_dax, fill_fixup_dax, spill_fixup_dax
287winfix_dax: 284winfix_dax:
288 andn %g3, 0x7f, %g3 285 andn %g3, 0x7f, %g3
@@ -318,6 +315,7 @@ fill_fixup_dax:
318 mov %o7, %g6 ! Get current back. 315 mov %o7, %g6 ! Get current back.
319 ldx [%g6 + TI_TASK], %g4 ! Finish it. 316 ldx [%g6 + TI_TASK], %g4 ! Finish it.
320#ifdef CONFIG_SMP 317#ifdef CONFIG_SMP
318#error IMMU TSB usage must be fixed
321 mov TSB_REG, %g1 319 mov TSB_REG, %g1
322 ldxa [%g1] ASI_IMMU, %g5 320 ldxa [%g1] ASI_IMMU, %g5
323#endif 321#endif