diff options
Diffstat (limited to 'arch/sparc64/kernel/sun4v_tlb_miss.S')
-rw-r--r-- | arch/sparc64/kernel/sun4v_tlb_miss.S | 219 |
1 files changed, 219 insertions, 0 deletions
diff --git a/arch/sparc64/kernel/sun4v_tlb_miss.S b/arch/sparc64/kernel/sun4v_tlb_miss.S new file mode 100644 index 000000000000..58ea5dd8573c --- /dev/null +++ b/arch/sparc64/kernel/sun4v_tlb_miss.S | |||
@@ -0,0 +1,219 @@ | |||
1 | /* sun4v_tlb_miss.S: Sun4v TLB miss handlers. | ||
2 | * | ||
3 | * Copyright (C) 2006 <davem@davemloft.net> | ||
4 | */ | ||
5 | |||
6 | .text | ||
7 | .align 32 | ||
8 | |||
9 | sun4v_itlb_miss: | ||
10 | /* Load CPU ID into %g3. */ | ||
11 | mov SCRATCHPAD_CPUID, %g1 | ||
12 | ldxa [%g1] ASI_SCRATCHPAD, %g3 | ||
13 | |||
14 | /* Load UTSB reg into %g1. */ | ||
15 | ldxa [%g1 + %g1] ASI_SCRATCHPAD, %g1 | ||
16 | |||
17 | /* Load &trap_block[smp_processor_id()] into %g2. */ | ||
18 | sethi %hi(trap_block), %g2 | ||
19 | or %g2, %lo(trap_block), %g2 | ||
20 | sllx %g3, TRAP_BLOCK_SZ_SHIFT, %g3 | ||
21 | add %g2, %g3, %g2 | ||
22 | |||
23 | /* Create a TAG TARGET, "(vaddr>>22) | (ctx << 48)", in %g6. | ||
24 | * Branch if kernel TLB miss. The kernel TSB and user TSB miss | ||
25 | * code wants the missing virtual address in %g4, so that value | ||
26 | * cannot be modified through the entirety of this handler. | ||
27 | */ | ||
28 | ldx [%g2 + TRAP_PER_CPU_FAULT_INFO + HV_FAULT_I_ADDR_OFFSET], %g4 | ||
29 | ldx [%g2 + TRAP_PER_CPU_FAULT_INFO + HV_FAULT_I_CTX_OFFSET], %g5 | ||
30 | srlx %g4, 22, %g3 | ||
31 | sllx %g5, 48, %g6 | ||
32 | or %g6, %g3, %g6 | ||
33 | brz,pn %g5, kvmap_itlb_4v | ||
34 | nop | ||
35 | |||
36 | /* Create TSB pointer. This is something like: | ||
37 | * | ||
38 | * index_mask = (512 << (tsb_reg & 0x7UL)) - 1UL; | ||
39 | * tsb_base = tsb_reg & ~0x7UL; | ||
40 | */ | ||
41 | and %g1, 0x7, %g3 | ||
42 | andn %g1, 0x7, %g1 | ||
43 | mov 512, %g7 | ||
44 | sllx %g7, %g3, %g7 | ||
45 | sub %g7, 1, %g7 | ||
46 | |||
47 | /* TSB index mask is in %g7, tsb base is in %g1. Compute | ||
48 | * the TSB entry pointer into %g1: | ||
49 | * | ||
50 | * tsb_index = ((vaddr >> PAGE_SHIFT) & tsb_mask); | ||
51 | * tsb_ptr = tsb_base + (tsb_index * 16); | ||
52 | */ | ||
53 | srlx %g4, PAGE_SHIFT, %g3 | ||
54 | and %g3, %g7, %g3 | ||
55 | sllx %g3, 4, %g3 | ||
56 | add %g1, %g3, %g1 | ||
57 | |||
58 | /* Load TSB tag/pte into %g2/%g3 and compare the tag. */ | ||
59 | ldda [%g1] ASI_QUAD_LDD_PHYS, %g2 | ||
60 | cmp %g2, %g6 | ||
61 | sethi %hi(_PAGE_EXEC), %g7 | ||
62 | bne,a,pn %xcc, tsb_miss_page_table_walk | ||
63 | mov FAULT_CODE_ITLB, %g3 | ||
64 | andcc %g3, %g7, %g0 | ||
65 | be,a,pn %xcc, tsb_do_fault | ||
66 | mov FAULT_CODE_ITLB, %g3 | ||
67 | |||
68 | /* We have a valid entry, make hypervisor call to load | ||
69 | * I-TLB and return from trap. | ||
70 | * | ||
71 | * %g3: PTE | ||
72 | * %g4: vaddr | ||
73 | * %g6: TAG TARGET (only "CTX << 48" part matters) | ||
74 | */ | ||
75 | sun4v_itlb_load: | ||
76 | mov %o0, %g1 ! save %o0 | ||
77 | mov %o1, %g2 ! save %o1 | ||
78 | mov %o2, %g5 ! save %o2 | ||
79 | mov %o3, %g7 ! save %o3 | ||
80 | mov %g4, %o0 ! vaddr | ||
81 | srlx %g6, 48, %o1 ! ctx | ||
82 | mov %g3, %o2 ! PTE | ||
83 | mov HV_MMU_IMMU, %o3 ! flags | ||
84 | ta HV_MMU_MAP_ADDR_TRAP | ||
85 | mov %g1, %o0 ! restore %o0 | ||
86 | mov %g2, %o1 ! restore %o1 | ||
87 | mov %g5, %o2 ! restore %o2 | ||
88 | mov %g7, %o3 ! restore %o3 | ||
89 | |||
90 | retry | ||
91 | |||
92 | sun4v_dtlb_miss: | ||
93 | /* Load CPU ID into %g3. */ | ||
94 | mov SCRATCHPAD_CPUID, %g1 | ||
95 | ldxa [%g1] ASI_SCRATCHPAD, %g3 | ||
96 | |||
97 | /* Load UTSB reg into %g1. */ | ||
98 | ldxa [%g1 + %g1] ASI_SCRATCHPAD, %g1 | ||
99 | |||
100 | /* Load &trap_block[smp_processor_id()] into %g2. */ | ||
101 | sethi %hi(trap_block), %g2 | ||
102 | or %g2, %lo(trap_block), %g2 | ||
103 | sllx %g3, TRAP_BLOCK_SZ_SHIFT, %g3 | ||
104 | add %g2, %g3, %g2 | ||
105 | |||
106 | /* Create a TAG TARGET, "(vaddr>>22) | (ctx << 48)", in %g6. | ||
107 | * Branch if kernel TLB miss. The kernel TSB and user TSB miss | ||
108 | * code wants the missing virtual address in %g4, so that value | ||
109 | * cannot be modified through the entirety of this handler. | ||
110 | */ | ||
111 | ldx [%g2 + TRAP_PER_CPU_FAULT_INFO + HV_FAULT_D_ADDR_OFFSET], %g4 | ||
112 | ldx [%g2 + TRAP_PER_CPU_FAULT_INFO + HV_FAULT_D_CTX_OFFSET], %g5 | ||
113 | srlx %g4, 22, %g3 | ||
114 | sllx %g5, 48, %g6 | ||
115 | or %g6, %g3, %g6 | ||
116 | brz,pn %g5, kvmap_dtlb_4v | ||
117 | nop | ||
118 | |||
119 | /* Create TSB pointer. This is something like: | ||
120 | * | ||
121 | * index_mask = (512 << (tsb_reg & 0x7UL)) - 1UL; | ||
122 | * tsb_base = tsb_reg & ~0x7UL; | ||
123 | */ | ||
124 | and %g1, 0x7, %g3 | ||
125 | andn %g1, 0x7, %g1 | ||
126 | mov 512, %g7 | ||
127 | sllx %g7, %g3, %g7 | ||
128 | sub %g7, 1, %g7 | ||
129 | |||
130 | /* TSB index mask is in %g7, tsb base is in %g1. Compute | ||
131 | * the TSB entry pointer into %g1: | ||
132 | * | ||
133 | * tsb_index = ((vaddr >> PAGE_SHIFT) & tsb_mask); | ||
134 | * tsb_ptr = tsb_base + (tsb_index * 16); | ||
135 | */ | ||
136 | srlx %g4, PAGE_SHIFT, %g3 | ||
137 | and %g3, %g7, %g3 | ||
138 | sllx %g3, 4, %g3 | ||
139 | add %g1, %g3, %g1 | ||
140 | |||
141 | /* Load TSB tag/pte into %g2/%g3 and compare the tag. */ | ||
142 | ldda [%g1] ASI_QUAD_LDD_PHYS, %g2 | ||
143 | cmp %g2, %g6 | ||
144 | bne,a,pn %xcc, tsb_miss_page_table_walk | ||
145 | mov FAULT_CODE_ITLB, %g3 | ||
146 | |||
147 | /* We have a valid entry, make hypervisor call to load | ||
148 | * D-TLB and return from trap. | ||
149 | * | ||
150 | * %g3: PTE | ||
151 | * %g4: vaddr | ||
152 | * %g6: TAG TARGET (only "CTX << 48" part matters) | ||
153 | */ | ||
154 | sun4v_dtlb_load: | ||
155 | mov %o0, %g1 ! save %o0 | ||
156 | mov %o1, %g2 ! save %o1 | ||
157 | mov %o2, %g5 ! save %o2 | ||
158 | mov %o3, %g7 ! save %o3 | ||
159 | mov %g4, %o0 ! vaddr | ||
160 | srlx %g6, 48, %o1 ! ctx | ||
161 | mov %g3, %o2 ! PTE | ||
162 | mov HV_MMU_DMMU, %o3 ! flags | ||
163 | ta HV_MMU_MAP_ADDR_TRAP | ||
164 | mov %g1, %o0 ! restore %o0 | ||
165 | mov %g2, %o1 ! restore %o1 | ||
166 | mov %g5, %o2 ! restore %o2 | ||
167 | mov %g7, %o3 ! restore %o3 | ||
168 | |||
169 | retry | ||
170 | |||
171 | sun4v_dtlb_prot: | ||
172 | /* Load CPU ID into %g3. */ | ||
173 | mov SCRATCHPAD_CPUID, %g1 | ||
174 | ldxa [%g1] ASI_SCRATCHPAD, %g3 | ||
175 | |||
176 | /* Load &trap_block[smp_processor_id()] into %g2. */ | ||
177 | sethi %hi(trap_block), %g2 | ||
178 | or %g2, %lo(trap_block), %g2 | ||
179 | sllx %g3, TRAP_BLOCK_SZ_SHIFT, %g3 | ||
180 | add %g2, %g3, %g2 | ||
181 | |||
182 | ldx [%g2 + TRAP_PER_CPU_FAULT_INFO + HV_FAULT_D_ADDR_OFFSET], %g5 | ||
183 | rdpr %tl, %g1 | ||
184 | cmp %g1, 1 | ||
185 | bgu,pn %xcc, winfix_trampoline | ||
186 | nop | ||
187 | ba,pt %xcc, sparc64_realfault_common | ||
188 | mov FAULT_CODE_DTLB | FAULT_CODE_WRITE, %g4 | ||
189 | |||
190 | #define BRANCH_ALWAYS 0x10680000 | ||
191 | #define NOP 0x01000000 | ||
192 | #define SUN4V_DO_PATCH(OLD, NEW) \ | ||
193 | sethi %hi(NEW), %g1; \ | ||
194 | or %g1, %lo(NEW), %g1; \ | ||
195 | sethi %hi(OLD), %g2; \ | ||
196 | or %g2, %lo(OLD), %g2; \ | ||
197 | sub %g1, %g2, %g1; \ | ||
198 | sethi %hi(BRANCH_ALWAYS), %g3; \ | ||
199 | srl %g1, 2, %g1; \ | ||
200 | or %g3, %lo(BRANCH_ALWAYS), %g3; \ | ||
201 | or %g3, %g1, %g3; \ | ||
202 | stw %g3, [%g2]; \ | ||
203 | sethi %hi(NOP), %g3; \ | ||
204 | or %g3, %lo(NOP), %g3; \ | ||
205 | stw %g3, [%g2 + 0x4]; \ | ||
206 | flush %g2; | ||
207 | |||
208 | .globl sun4v_patch_tlb_handlers | ||
209 | .type sun4v_patch_tlb_handlers,#function | ||
210 | sun4v_patch_tlb_handlers: | ||
211 | SUN4V_DO_PATCH(tl0_iamiss, sun4v_itlb_miss) | ||
212 | SUN4V_DO_PATCH(tl1_iamiss, sun4v_itlb_miss) | ||
213 | SUN4V_DO_PATCH(tl0_damiss, sun4v_dtlb_miss) | ||
214 | SUN4V_DO_PATCH(tl1_damiss, sun4v_dtlb_miss) | ||
215 | SUN4V_DO_PATCH(tl0_daprot, sun4v_dtlb_prot) | ||
216 | SUN4V_DO_PATCH(tl1_daprot, sun4v_dtlb_prot) | ||
217 | retl | ||
218 | nop | ||
219 | .size sun4v_patch_tlb_handlers,.-sun4v_patch_tlb_handlers | ||