diff options
author | David S. Miller <davem@sunset.davemloft.net> | 2006-02-11 03:29:34 -0500 |
---|---|---|
committer | David S. Miller <davem@sunset.davemloft.net> | 2006-03-20 04:12:16 -0500 |
commit | 36a68e77c554f1ef1c206fd618e6daf82d3e38a3 (patch) | |
tree | 59a4591b76ef18e54b5b1b2687268e0dd6bb69ef /arch/sparc64 | |
parent | 12eaa328f9fb2d3fcb5afb682c762690d05a3cd8 (diff) |
[SPARC64]: Simplify sun4v TLB handling using macros.
There was also a bug in sun4v_itlb_miss, it loaded the
MMU Fault Status base into %g3 instead of %g2.
This pointed out a fast path for TSB miss processing,
since we have %g2 with the MMU Fault Status base, we
can use that to quickly load up the PGD phys address.
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'arch/sparc64')
-rw-r--r-- | arch/sparc64/kernel/sun4v_tlb_miss.S | 130 | ||||
-rw-r--r-- | arch/sparc64/kernel/tsb.S | 18 |
2 files changed, 61 insertions, 87 deletions
diff --git a/arch/sparc64/kernel/sun4v_tlb_miss.S b/arch/sparc64/kernel/sun4v_tlb_miss.S index f6222623de38..f7129137f9a4 100644 --- a/arch/sparc64/kernel/sun4v_tlb_miss.S +++ b/arch/sparc64/kernel/sun4v_tlb_miss.S | |||
@@ -6,48 +6,55 @@ | |||
6 | .text | 6 | .text |
7 | .align 32 | 7 | .align 32 |
8 | 8 | ||
9 | sun4v_itlb_miss: | 9 | /* Load ITLB fault information into VADDR and CTX, using BASE. */ |
10 | /* Load MMU Miss base into %g2. */ | 10 | #define LOAD_ITLB_INFO(BASE, VADDR, CTX) \ |
11 | ldxa [%g0] ASI_SCRATCHPAD, %g3 | 11 | ldx [BASE + HV_FAULT_I_ADDR_OFFSET], VADDR; \ |
12 | 12 | ldx [BASE + HV_FAULT_I_CTX_OFFSET], CTX; | |
13 | /* Load UTSB reg into %g1. */ | 13 | |
14 | mov SCRATCHPAD_UTSBREG1, %g1 | 14 | /* Load DTLB fault information into VADDR and CTX, using BASE. */ |
15 | ldxa [%g1] ASI_SCRATCHPAD, %g1 | 15 | #define LOAD_DTLB_INFO(BASE, VADDR, CTX) \ |
16 | ldx [BASE + HV_FAULT_D_ADDR_OFFSET], VADDR; \ | ||
17 | ldx [BASE + HV_FAULT_D_CTX_OFFSET], CTX; | ||
16 | 18 | ||
17 | /* Create a TAG TARGET, "(vaddr>>22) | (ctx << 48)", in %g6. | 19 | /* DEST = (CTX << 48) | (VADDR >> 22) |
18 | * Branch if kernel TLB miss. The kernel TSB and user TSB miss | 20 | * |
19 | * code wants the missing virtual address in %g4, so that value | 21 | * Branch to ZERO_CTX_LABEL is context is zero. |
20 | * cannot be modified through the entirety of this handler. | ||
21 | */ | 22 | */ |
22 | ldx [%g2 + HV_FAULT_I_ADDR_OFFSET], %g4 | 23 | #define COMPUTE_TAG_TARGET(DEST, VADDR, CTX, TMP, ZERO_CTX_LABEL) \ |
23 | ldx [%g2 + HV_FAULT_I_CTX_OFFSET], %g5 | 24 | srlx VADDR, 22, TMP; \ |
24 | srlx %g4, 22, %g3 | 25 | sllx CTX, 48, DEST; \ |
25 | sllx %g5, 48, %g6 | 26 | brz,pn CTX, ZERO_CTX_LABEL; \ |
26 | or %g6, %g3, %g6 | 27 | or DEST, TMP, DEST; |
27 | brz,pn %g5, kvmap_itlb_4v | ||
28 | nop | ||
29 | 28 | ||
30 | /* Create TSB pointer. This is something like: | 29 | /* Create TSB pointer. This is something like: |
31 | * | 30 | * |
32 | * index_mask = (512 << (tsb_reg & 0x7UL)) - 1UL; | 31 | * index_mask = (512 << (tsb_reg & 0x7UL)) - 1UL; |
33 | * tsb_base = tsb_reg & ~0x7UL; | 32 | * tsb_base = tsb_reg & ~0x7UL; |
34 | */ | ||
35 | and %g1, 0x7, %g3 | ||
36 | andn %g1, 0x7, %g1 | ||
37 | mov 512, %g7 | ||
38 | sllx %g7, %g3, %g7 | ||
39 | sub %g7, 1, %g7 | ||
40 | |||
41 | /* TSB index mask is in %g7, tsb base is in %g1. Compute | ||
42 | * the TSB entry pointer into %g1: | ||
43 | * | ||
44 | * tsb_index = ((vaddr >> PAGE_SHIFT) & tsb_mask); | 33 | * tsb_index = ((vaddr >> PAGE_SHIFT) & tsb_mask); |
45 | * tsb_ptr = tsb_base + (tsb_index * 16); | 34 | * tsb_ptr = tsb_base + (tsb_index * 16); |
46 | */ | 35 | */ |
47 | srlx %g4, PAGE_SHIFT, %g3 | 36 | #define COMPUTE_TSB_PTR(TSB_PTR, VADDR, TMP1, TMP2) \ |
48 | and %g3, %g7, %g3 | 37 | and TSB_PTR, 0x7, TMP1; \ |
49 | sllx %g3, 4, %g3 | 38 | mov 512, TMP2; \ |
50 | add %g1, %g3, %g1 | 39 | andn TSB_PTR, 0x7, TSB_PTR; \ |
40 | sllx TMP2, TMP1, TMP2; \ | ||
41 | srlx VADDR, PAGE_SHIFT, TMP1; \ | ||
42 | sub TMP2, 1, TMP2; \ | ||
43 | and TMP1, TMP2, TMP1; \ | ||
44 | sllx TMP1, 4, TMP1; \ | ||
45 | add TSB_PTR, TMP1, TSB_PTR; | ||
46 | |||
47 | sun4v_itlb_miss: | ||
48 | /* Load MMU Miss base into %g2. */ | ||
49 | ldxa [%g0] ASI_SCRATCHPAD, %g2 | ||
50 | |||
51 | /* Load UTSB reg into %g1. */ | ||
52 | mov SCRATCHPAD_UTSBREG1, %g1 | ||
53 | ldxa [%g1] ASI_SCRATCHPAD, %g1 | ||
54 | |||
55 | LOAD_ITLB_INFO(%g2, %g4, %g5) | ||
56 | COMPUTE_TAG_TARGET(%g6, %g4, %g5, %g3, kvmap_itlb_4v) | ||
57 | COMPUTE_TSB_PTR(%g1, %g4, %g3, %g7) | ||
51 | 58 | ||
52 | /* Load TSB tag/pte into %g2/%g3 and compare the tag. */ | 59 | /* Load TSB tag/pte into %g2/%g3 and compare the tag. */ |
53 | ldda [%g1] ASI_QUAD_LDD_PHYS, %g2 | 60 | ldda [%g1] ASI_QUAD_LDD_PHYS, %g2 |
@@ -91,40 +98,9 @@ sun4v_dtlb_miss: | |||
91 | mov SCRATCHPAD_UTSBREG1, %g1 | 98 | mov SCRATCHPAD_UTSBREG1, %g1 |
92 | ldxa [%g1 + %g1] ASI_SCRATCHPAD, %g1 | 99 | ldxa [%g1 + %g1] ASI_SCRATCHPAD, %g1 |
93 | 100 | ||
94 | /* Create a TAG TARGET, "(vaddr>>22) | (ctx << 48)", in %g6. | 101 | LOAD_DTLB_INFO(%g2, %g4, %g5) |
95 | * Branch if kernel TLB miss. The kernel TSB and user TSB miss | 102 | COMPUTE_TAG_TARGET(%g6, %g4, %g5, %g3, kvmap_dtlb_4v) |
96 | * code wants the missing virtual address in %g4, so that value | 103 | COMPUTE_TSB_PTR(%g1, %g4, %g3, %g7) |
97 | * cannot be modified through the entirety of this handler. | ||
98 | */ | ||
99 | ldx [%g2 + HV_FAULT_D_ADDR_OFFSET], %g4 | ||
100 | ldx [%g2 + HV_FAULT_D_CTX_OFFSET], %g5 | ||
101 | srlx %g4, 22, %g3 | ||
102 | sllx %g5, 48, %g6 | ||
103 | or %g6, %g3, %g6 | ||
104 | brz,pn %g5, kvmap_dtlb_4v | ||
105 | nop | ||
106 | |||
107 | /* Create TSB pointer. This is something like: | ||
108 | * | ||
109 | * index_mask = (512 << (tsb_reg & 0x7UL)) - 1UL; | ||
110 | * tsb_base = tsb_reg & ~0x7UL; | ||
111 | */ | ||
112 | and %g1, 0x7, %g3 | ||
113 | andn %g1, 0x7, %g1 | ||
114 | mov 512, %g7 | ||
115 | sllx %g7, %g3, %g7 | ||
116 | sub %g7, 1, %g7 | ||
117 | |||
118 | /* TSB index mask is in %g7, tsb base is in %g1. Compute | ||
119 | * the TSB entry pointer into %g1: | ||
120 | * | ||
121 | * tsb_index = ((vaddr >> PAGE_SHIFT) & tsb_mask); | ||
122 | * tsb_ptr = tsb_base + (tsb_index * 16); | ||
123 | */ | ||
124 | srlx %g4, PAGE_SHIFT, %g3 | ||
125 | and %g3, %g7, %g3 | ||
126 | sllx %g3, 4, %g3 | ||
127 | add %g1, %g3, %g1 | ||
128 | 104 | ||
129 | /* Load TSB tag/pte into %g2/%g3 and compare the tag. */ | 105 | /* Load TSB tag/pte into %g2/%g3 and compare the tag. */ |
130 | ldda [%g1] ASI_QUAD_LDD_PHYS, %g2 | 106 | ldda [%g1] ASI_QUAD_LDD_PHYS, %g2 |
@@ -169,7 +145,8 @@ sun4v_dtlb_prot: | |||
169 | mov FAULT_CODE_DTLB | FAULT_CODE_WRITE, %g4 | 145 | mov FAULT_CODE_DTLB | FAULT_CODE_WRITE, %g4 |
170 | 146 | ||
171 | /* Called from trap table with TAG TARGET placed into | 147 | /* Called from trap table with TAG TARGET placed into |
172 | * %g6 and SCRATCHPAD_UTSBREG1 contents in %g1. | 148 | * %g6, SCRATCHPAD_UTSBREG1 contents in %g1, and |
149 | * SCRATCHPAD_MMU_MISS contents in %g2. | ||
173 | */ | 150 | */ |
174 | sun4v_itsb_miss: | 151 | sun4v_itsb_miss: |
175 | ba,pt %xcc, sun4v_tsb_miss_common | 152 | ba,pt %xcc, sun4v_tsb_miss_common |
@@ -189,16 +166,15 @@ sun4v_dtsb_miss: | |||
189 | * tsb_ptr = tsb_base + (tsb_index * 16); | 166 | * tsb_ptr = tsb_base + (tsb_index * 16); |
190 | */ | 167 | */ |
191 | sun4v_tsb_miss_common: | 168 | sun4v_tsb_miss_common: |
192 | and %g1, 0x7, %g2 | 169 | COMPUTE_TSB_PTR(%g1, %g4, %g5, %g7) |
193 | andn %g1, 0x7, %g1 | 170 | |
194 | mov 512, %g7 | 171 | /* Branch directly to page table lookup. We have SCRATCHPAD_MMU_MISS |
195 | sllx %g7, %g2, %g7 | 172 | * still in %g2, so it's quite trivial to get at the PGD PHYS value |
196 | sub %g7, 1, %g7 | 173 | * so we can preload it into %g7. |
197 | srlx %g4, PAGE_SHIFT, %g2 | 174 | */ |
198 | and %g2, %g7, %g2 | 175 | sub %g2, TRAP_PER_CPU_FAULT_INFO, %g2 |
199 | sllx %g2, 4, %g2 | 176 | ba,pt %xcc, tsb_miss_page_table_walk_sun4v_fastpath |
200 | ba,pt %xcc, tsb_miss_page_table_walk | 177 | ldx [%g2 + TRAP_PER_CPU_PGD_PADDR], %g7 |
201 | add %g1, %g2, %g1 | ||
202 | 178 | ||
203 | /* Instruction Access Exception, tl0. */ | 179 | /* Instruction Access Exception, tl0. */ |
204 | sun4v_iacc: | 180 | sun4v_iacc: |
diff --git a/arch/sparc64/kernel/tsb.S b/arch/sparc64/kernel/tsb.S index 8a9351258af8..667dcb077be7 100644 --- a/arch/sparc64/kernel/tsb.S +++ b/arch/sparc64/kernel/tsb.S | |||
@@ -25,26 +25,24 @@ | |||
25 | */ | 25 | */ |
26 | tsb_miss_dtlb: | 26 | tsb_miss_dtlb: |
27 | mov TLB_TAG_ACCESS, %g4 | 27 | mov TLB_TAG_ACCESS, %g4 |
28 | ldxa [%g4] ASI_DMMU, %g4 | ||
29 | ba,pt %xcc, tsb_miss_page_table_walk | 28 | ba,pt %xcc, tsb_miss_page_table_walk |
30 | nop | 29 | ldxa [%g4] ASI_DMMU, %g4 |
31 | 30 | ||
32 | tsb_miss_itlb: | 31 | tsb_miss_itlb: |
33 | mov TLB_TAG_ACCESS, %g4 | 32 | mov TLB_TAG_ACCESS, %g4 |
34 | ldxa [%g4] ASI_IMMU, %g4 | ||
35 | ba,pt %xcc, tsb_miss_page_table_walk | 33 | ba,pt %xcc, tsb_miss_page_table_walk |
36 | nop | 34 | ldxa [%g4] ASI_IMMU, %g4 |
37 | 35 | ||
38 | /* The sun4v TLB miss handlers jump directly here instead | 36 | /* At this point we have: |
39 | * of tsb_miss_{d,i}tlb with registers setup as follows: | 37 | * %g4 -- missing virtual address |
40 | * | 38 | * %g1 -- TSB entry address |
41 | * %g4: missing virtual address | 39 | * %g6 -- TAG TARGET ((vaddr >> 22) | (ctx << 48)) |
42 | * %g1: TSB entry address loaded | ||
43 | * %g6: TAG TARGET ((vaddr >> 22) | (ctx << 48)) | ||
44 | */ | 40 | */ |
45 | tsb_miss_page_table_walk: | 41 | tsb_miss_page_table_walk: |
46 | TRAP_LOAD_PGD_PHYS(%g7, %g5) | 42 | TRAP_LOAD_PGD_PHYS(%g7, %g5) |
47 | 43 | ||
44 | /* And now we have the PGD base physical address in %g7. */ | ||
45 | tsb_miss_page_table_walk_sun4v_fastpath: | ||
48 | USER_PGTABLE_WALK_TL1(%g4, %g7, %g5, %g2, tsb_do_fault) | 46 | USER_PGTABLE_WALK_TL1(%g4, %g7, %g5, %g2, tsb_do_fault) |
49 | 47 | ||
50 | tsb_reload: | 48 | tsb_reload: |