aboutsummaryrefslogtreecommitdiffstats
path: root/include/asm-sparc64
diff options
context:
space:
mode:
authorDavid S. Miller <davem@sunset.davemloft.net>2006-02-17 21:01:02 -0500
committerDavid S. Miller <davem@sunset.davemloft.net>2006-03-20 04:13:34 -0500
commit8b234274418d6d79527c4ac3a72da446ca4cb35f (patch)
treeab4ab14fa7f1cab7889ecc2339f0261253a5d0e1 /include/asm-sparc64
parent7adb37fe80d06cbd40de9b225b12a3a9ec40b6bb (diff)
[SPARC64]: More TLB/TSB handling fixes.
The SUN4V convention with non-shared TSBs is that the context bit of the TAG is clear. So we have to choose an "invalid" bit and initialize new TSBs appropriately. Otherwise a zero TAG looks "valid". Make sure, for the window fixup cases, that we use the right global registers and that we don't potentially trample on the live global registers in etrap/rtrap handling (%g2 and %g6) and that we put the missing virtual address properly in %g5. Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'include/asm-sparc64')
-rw-r--r--include/asm-sparc64/tsb.h8
-rw-r--r--include/asm-sparc64/ttable.h12
2 files changed, 14 insertions, 6 deletions
diff --git a/include/asm-sparc64/tsb.h b/include/asm-sparc64/tsb.h
index 7f3abc32c4d..6e6768067e3 100644
--- a/include/asm-sparc64/tsb.h
+++ b/include/asm-sparc64/tsb.h
@@ -12,6 +12,8 @@
12 * 12 *
13 * ldxa [%g0] ASI_{D,I}MMU_TSB_8KB_PTR, %g1 13 * ldxa [%g0] ASI_{D,I}MMU_TSB_8KB_PTR, %g1
14 * ldxa [%g0] ASI_{D,I}MMU, %g6 14 * ldxa [%g0] ASI_{D,I}MMU, %g6
15 * sllx %g6, 22, %g6
16 * srlx %g6, 22, %g6
15 * ldda [%g1] ASI_NUCLEUS_QUAD_LDD, %g4 17 * ldda [%g1] ASI_NUCLEUS_QUAD_LDD, %g4
16 * cmp %g4, %g6 18 * cmp %g4, %g6
17 * bne,pn %xcc, tsb_miss_{d,i}tlb 19 * bne,pn %xcc, tsb_miss_{d,i}tlb
@@ -29,6 +31,9 @@
29 * ------------------------------------------------- 31 * -------------------------------------------------
30 * 63 61 60 48 47 42 41 0 32 * 63 61 60 48 47 42 41 0
31 * 33 *
34 * But actually, since we use per-mm TSB's, we zero out the CONTEXT
35 * field.
36 *
32 * Like the powerpc hashtables we need to use locking in order to 37 * Like the powerpc hashtables we need to use locking in order to
33 * synchronize while we update the entries. PTE updates need locking 38 * synchronize while we update the entries. PTE updates need locking
34 * as well. 39 * as well.
@@ -42,6 +47,9 @@
42#define TSB_TAG_LOCK_BIT 47 47#define TSB_TAG_LOCK_BIT 47
43#define TSB_TAG_LOCK_HIGH (1 << (TSB_TAG_LOCK_BIT - 32)) 48#define TSB_TAG_LOCK_HIGH (1 << (TSB_TAG_LOCK_BIT - 32))
44 49
50#define TSB_TAG_INVALID_BIT 46
51#define TSB_TAG_INVALID_HIGH (1 << (TSB_TAG_INVALID_BIT - 32))
52
45#define TSB_MEMBAR membar #StoreStore 53#define TSB_MEMBAR membar #StoreStore
46 54
47/* Some cpus support physical address quad loads. We want to use 55/* Some cpus support physical address quad loads. We want to use
diff --git a/include/asm-sparc64/ttable.h b/include/asm-sparc64/ttable.h
index 9e28b240f3a..2d5e3c464df 100644
--- a/include/asm-sparc64/ttable.h
+++ b/include/asm-sparc64/ttable.h
@@ -184,20 +184,20 @@
184 ldxa [%g0] ASI_SCRATCHPAD, %g2; \ 184 ldxa [%g0] ASI_SCRATCHPAD, %g2; \
185 ldx [%g2 + HV_FAULT_I_ADDR_OFFSET], %g4; \ 185 ldx [%g2 + HV_FAULT_I_ADDR_OFFSET], %g4; \
186 ldx [%g2 + HV_FAULT_I_CTX_OFFSET], %g5; \ 186 ldx [%g2 + HV_FAULT_I_CTX_OFFSET], %g5; \
187 srlx %g4, 22, %g7; \ 187 srlx %g4, 22, %g6; \
188 sllx %g5, 48, %g6; \
189 ba,pt %xcc, sun4v_itsb_miss; \ 188 ba,pt %xcc, sun4v_itsb_miss; \
190 or %g6, %g7, %g6; \ 189 nop; \
190 nop; \
191 nop; 191 nop;
192 192
193#define SUN4V_DTSB_MISS \ 193#define SUN4V_DTSB_MISS \
194 ldxa [%g0] ASI_SCRATCHPAD, %g2; \ 194 ldxa [%g0] ASI_SCRATCHPAD, %g2; \
195 ldx [%g2 + HV_FAULT_D_ADDR_OFFSET], %g4; \ 195 ldx [%g2 + HV_FAULT_D_ADDR_OFFSET], %g4; \
196 ldx [%g2 + HV_FAULT_D_CTX_OFFSET], %g5; \ 196 ldx [%g2 + HV_FAULT_D_CTX_OFFSET], %g5; \
197 srlx %g4, 22, %g7; \ 197 srlx %g4, 22, %g6; \
198 sllx %g5, 48, %g6; \
199 ba,pt %xcc, sun4v_dtsb_miss; \ 198 ba,pt %xcc, sun4v_dtsb_miss; \
200 or %g6, %g7, %g6; \ 199 nop; \
200 nop; \
201 nop; 201 nop;
202 202
203/* Before touching these macros, you owe it to yourself to go and 203/* Before touching these macros, you owe it to yourself to go and