aboutsummaryrefslogtreecommitdiffstats
path: root/include/asm-sparc64
diff options
context:
space:
mode:
authorDavid S. Miller <davem@sunset.davemloft.net>2006-02-01 18:55:21 -0500
committerDavid S. Miller <davem@sunset.davemloft.net>2006-03-20 04:11:32 -0500
commit517af33237ecfc3c8a93b335365fa61e741ceca4 (patch)
tree58eff40eb4c517c4fd49fd347d38273ee1e1ee4b /include/asm-sparc64
parentb0fd4e49aea8a460afab7bc67cd618e2d19291d4 (diff)
[SPARC64]: Access TSB with physical addresses when possible.
This way we don't need to lock the TSB into the TLB. The trick is that every TSB load/store is registered into a special instruction patch section. The default uses virtual addresses, and the patch instructions use physical address load/stores. We can't do this on all chips because only cheetah+ and later have the physical variant of the atomic quad load. Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'include/asm-sparc64')
-rw-r--r--include/asm-sparc64/mmu.h3
-rw-r--r--include/asm-sparc64/tsb.h94
2 files changed, 91 insertions, 6 deletions
diff --git a/include/asm-sparc64/mmu.h b/include/asm-sparc64/mmu.h
index 18f98edfbcda..55e622711b96 100644
--- a/include/asm-sparc64/mmu.h
+++ b/include/asm-sparc64/mmu.h
@@ -97,7 +97,8 @@ struct tsb {
97 unsigned long pte; 97 unsigned long pte;
98} __attribute__((aligned(TSB_ENTRY_ALIGNMENT))); 98} __attribute__((aligned(TSB_ENTRY_ALIGNMENT)));
99 99
100extern void tsb_insert(struct tsb *ent, unsigned long tag, unsigned long pte); 100extern void __tsb_insert(unsigned long ent, unsigned long tag, unsigned long pte);
101extern void tsb_flush(unsigned long ent, unsigned long tag);
101 102
102typedef struct { 103typedef struct {
103 unsigned long sparc64_ctx_val; 104 unsigned long sparc64_ctx_val;
diff --git a/include/asm-sparc64/tsb.h b/include/asm-sparc64/tsb.h
index f384565212fe..44709cde5617 100644
--- a/include/asm-sparc64/tsb.h
+++ b/include/asm-sparc64/tsb.h
@@ -44,7 +44,89 @@
44 44
45#define TSB_MEMBAR membar #StoreStore 45#define TSB_MEMBAR membar #StoreStore
46 46
47/* Some cpus support physical address quad loads. We want to use
48 * those if possible so we don't need to hard-lock the TSB mapping
49 * into the TLB. We encode some instruction patching in order to
50 * support this.
51 *
52 * The kernel TSB is locked into the TLB by virtue of being in the
53 * kernel image, so we don't play these games for swapper_tsb access.
54 */
55#ifndef __ASSEMBLY__
56struct tsb_phys_patch_entry {
57 unsigned int addr;
58 unsigned int insn;
59};
60extern struct tsb_phys_patch_entry __tsb_phys_patch, __tsb_phys_patch_end;
61#endif
62#define TSB_LOAD_QUAD(TSB, REG) \
63661: ldda [TSB] ASI_NUCLEUS_QUAD_LDD, REG; \
64 .section .tsb_phys_patch, "ax"; \
65 .word 661b; \
66 ldda [TSB] ASI_QUAD_LDD_PHYS, REG; \
67 .previous
68
69#define TSB_LOAD_TAG_HIGH(TSB, REG) \
70661: lduwa [TSB] ASI_N, REG; \
71 .section .tsb_phys_patch, "ax"; \
72 .word 661b; \
73 lduwa [TSB] ASI_PHYS_USE_EC, REG; \
74 .previous
75
76#define TSB_LOAD_TAG(TSB, REG) \
77661: ldxa [TSB] ASI_N, REG; \
78 .section .tsb_phys_patch, "ax"; \
79 .word 661b; \
80 ldxa [TSB] ASI_PHYS_USE_EC, REG; \
81 .previous
82
83#define TSB_CAS_TAG_HIGH(TSB, REG1, REG2) \
84661: casa [TSB] ASI_N, REG1, REG2; \
85 .section .tsb_phys_patch, "ax"; \
86 .word 661b; \
87 casa [TSB] ASI_PHYS_USE_EC, REG1, REG2; \
88 .previous
89
90#define TSB_CAS_TAG(TSB, REG1, REG2) \
91661: casxa [TSB] ASI_N, REG1, REG2; \
92 .section .tsb_phys_patch, "ax"; \
93 .word 661b; \
94 casxa [TSB] ASI_PHYS_USE_EC, REG1, REG2; \
95 .previous
96
97#define TSB_STORE(ADDR, VAL) \
98661: stxa VAL, [ADDR] ASI_N; \
99 .section .tsb_phys_patch, "ax"; \
100 .word 661b; \
101 stxa VAL, [ADDR] ASI_PHYS_USE_EC; \
102 .previous
103
47#define TSB_LOCK_TAG(TSB, REG1, REG2) \ 104#define TSB_LOCK_TAG(TSB, REG1, REG2) \
10599: TSB_LOAD_TAG_HIGH(TSB, REG1); \
106 sethi %hi(TSB_TAG_LOCK_HIGH), REG2;\
107 andcc REG1, REG2, %g0; \
108 bne,pn %icc, 99b; \
109 nop; \
110 TSB_CAS_TAG_HIGH(TSB, REG1, REG2); \
111 cmp REG1, REG2; \
112 bne,pn %icc, 99b; \
113 nop; \
114 TSB_MEMBAR
115
116#define TSB_WRITE(TSB, TTE, TAG) \
117 add TSB, 0x8, TSB; \
118 TSB_STORE(TSB, TTE); \
119 sub TSB, 0x8, TSB; \
120 TSB_MEMBAR; \
121 TSB_STORE(TSB, TAG);
122
123#define KTSB_LOAD_QUAD(TSB, REG) \
124 ldda [TSB] ASI_NUCLEUS_QUAD_LDD, REG;
125
126#define KTSB_STORE(ADDR, VAL) \
127 stxa VAL, [ADDR] ASI_N;
128
129#define KTSB_LOCK_TAG(TSB, REG1, REG2) \
4899: lduwa [TSB] ASI_N, REG1; \ 13099: lduwa [TSB] ASI_N, REG1; \
49 sethi %hi(TSB_TAG_LOCK_HIGH), REG2;\ 131 sethi %hi(TSB_TAG_LOCK_HIGH), REG2;\
50 andcc REG1, REG2, %g0; \ 132 andcc REG1, REG2, %g0; \
@@ -56,10 +138,12 @@
56 nop; \ 138 nop; \
57 TSB_MEMBAR 139 TSB_MEMBAR
58 140
59#define TSB_WRITE(TSB, TTE, TAG) \ 141#define KTSB_WRITE(TSB, TTE, TAG) \
60 stx TTE, [TSB + 0x08]; \ 142 add TSB, 0x8, TSB; \
61 TSB_MEMBAR; \ 143 stxa TTE, [TSB] ASI_N; \
62 stx TAG, [TSB + 0x00]; 144 sub TSB, 0x8, TSB; \
145 TSB_MEMBAR; \
146 stxa TAG, [TSB] ASI_N;
63 147
64 /* Do a kernel page table walk. Leaves physical PTE pointer in 148 /* Do a kernel page table walk. Leaves physical PTE pointer in
65 * REG1. Jumps to FAIL_LABEL on early page table walk termination. 149 * REG1. Jumps to FAIL_LABEL on early page table walk termination.
@@ -157,7 +241,7 @@
157 and REG2, (KERNEL_TSB_NENTRIES - 1), REG2; \ 241 and REG2, (KERNEL_TSB_NENTRIES - 1), REG2; \
158 sllx REG2, 4, REG2; \ 242 sllx REG2, 4, REG2; \
159 add REG1, REG2, REG2; \ 243 add REG1, REG2, REG2; \
160 ldda [REG2] ASI_NUCLEUS_QUAD_LDD, REG3; \ 244 KTSB_LOAD_QUAD(REG2, REG3); \
161 cmp REG3, TAG; \ 245 cmp REG3, TAG; \
162 be,a,pt %xcc, OK_LABEL; \ 246 be,a,pt %xcc, OK_LABEL; \
163 mov REG4, REG1; 247 mov REG4, REG1;