diff options
author | David S. Miller <davem@sunset.davemloft.net> | 2006-02-17 21:01:02 -0500 |
---|---|---|
committer | David S. Miller <davem@sunset.davemloft.net> | 2006-03-20 04:13:34 -0500 |
commit | 8b234274418d6d79527c4ac3a72da446ca4cb35f (patch) | |
tree | ab4ab14fa7f1cab7889ecc2339f0261253a5d0e1 /include/asm-sparc64/tsb.h | |
parent | 7adb37fe80d06cbd40de9b225b12a3a9ec40b6bb (diff) |
[SPARC64]: More TLB/TSB handling fixes.
The SUN4V convention with non-shared TSBs is that the context
bit of the TAG is clear. So we have to choose an "invalid"
bit and initialize new TSBs appropriately. Otherwise a zero
TAG looks "valid".
Make sure, for the window fixup cases, that we use the right
global registers and that we don't potentially trample on
the live global registers in etrap/rtrap handling (%g2 and
%g6) and that we put the missing virtual address properly
in %g5.
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'include/asm-sparc64/tsb.h')
-rw-r--r-- | include/asm-sparc64/tsb.h | 8 |
1 files changed, 8 insertions, 0 deletions
diff --git a/include/asm-sparc64/tsb.h b/include/asm-sparc64/tsb.h index 7f3abc32c4dd..6e6768067e38 100644 --- a/include/asm-sparc64/tsb.h +++ b/include/asm-sparc64/tsb.h | |||
@@ -12,6 +12,8 @@ | |||
12 | * | 12 | * |
13 | * ldxa [%g0] ASI_{D,I}MMU_TSB_8KB_PTR, %g1 | 13 | * ldxa [%g0] ASI_{D,I}MMU_TSB_8KB_PTR, %g1 |
14 | * ldxa [%g0] ASI_{D,I}MMU, %g6 | 14 | * ldxa [%g0] ASI_{D,I}MMU, %g6 |
15 | * sllx %g6, 22, %g6 | ||
16 | * srlx %g6, 22, %g6 | ||
15 | * ldda [%g1] ASI_NUCLEUS_QUAD_LDD, %g4 | 17 | * ldda [%g1] ASI_NUCLEUS_QUAD_LDD, %g4 |
16 | * cmp %g4, %g6 | 18 | * cmp %g4, %g6 |
17 | * bne,pn %xcc, tsb_miss_{d,i}tlb | 19 | * bne,pn %xcc, tsb_miss_{d,i}tlb |
@@ -29,6 +31,9 @@ | |||
29 | * ------------------------------------------------- | 31 | * ------------------------------------------------- |
30 | * 63 61 60 48 47 42 41 0 | 32 | * 63 61 60 48 47 42 41 0 |
31 | * | 33 | * |
34 | * But actually, since we use per-mm TSB's, we zero out the CONTEXT | ||
35 | * field. | ||
36 | * | ||
32 | * Like the powerpc hashtables we need to use locking in order to | 37 | * Like the powerpc hashtables we need to use locking in order to |
33 | * synchronize while we update the entries. PTE updates need locking | 38 | * synchronize while we update the entries. PTE updates need locking |
34 | * as well. | 39 | * as well. |
@@ -42,6 +47,9 @@ | |||
42 | #define TSB_TAG_LOCK_BIT 47 | 47 | #define TSB_TAG_LOCK_BIT 47 |
43 | #define TSB_TAG_LOCK_HIGH (1 << (TSB_TAG_LOCK_BIT - 32)) | 48 | #define TSB_TAG_LOCK_HIGH (1 << (TSB_TAG_LOCK_BIT - 32)) |
44 | 49 | ||
50 | #define TSB_TAG_INVALID_BIT 46 | ||
51 | #define TSB_TAG_INVALID_HIGH (1 << (TSB_TAG_INVALID_BIT - 32)) | ||
52 | |||
45 | #define TSB_MEMBAR membar #StoreStore | 53 | #define TSB_MEMBAR membar #StoreStore |
46 | 54 | ||
47 | /* Some cpus support physical address quad loads. We want to use | 55 | /* Some cpus support physical address quad loads. We want to use |