diff options
Diffstat (limited to 'arch/sparc64/kernel/ktlb.S')
-rw-r--r-- | arch/sparc64/kernel/ktlb.S | 263 |
1 files changed, 121 insertions, 142 deletions
diff --git a/arch/sparc64/kernel/ktlb.S b/arch/sparc64/kernel/ktlb.S index d9244d3c9f73..2b5e71b68882 100644 --- a/arch/sparc64/kernel/ktlb.S +++ b/arch/sparc64/kernel/ktlb.S | |||
@@ -4,191 +4,170 @@ | |||
4 | * Copyright (C) 1996 Eddie C. Dost (ecd@brainaid.de) | 4 | * Copyright (C) 1996 Eddie C. Dost (ecd@brainaid.de) |
5 | * Copyright (C) 1996 Miguel de Icaza (miguel@nuclecu.unam.mx) | 5 | * Copyright (C) 1996 Miguel de Icaza (miguel@nuclecu.unam.mx) |
6 | * Copyright (C) 1996,98,99 Jakub Jelinek (jj@sunsite.mff.cuni.cz) | 6 | * Copyright (C) 1996,98,99 Jakub Jelinek (jj@sunsite.mff.cuni.cz) |
7 | */ | 7 | */ |
8 | 8 | ||
9 | #include <linux/config.h> | 9 | #include <linux/config.h> |
10 | #include <asm/head.h> | 10 | #include <asm/head.h> |
11 | #include <asm/asi.h> | 11 | #include <asm/asi.h> |
12 | #include <asm/page.h> | 12 | #include <asm/page.h> |
13 | #include <asm/pgtable.h> | 13 | #include <asm/pgtable.h> |
14 | #include <asm/tsb.h> | ||
14 | 15 | ||
15 | .text | 16 | .text |
16 | .align 32 | 17 | .align 32 |
17 | 18 | ||
18 | /* | 19 | .globl kvmap_itlb |
19 | * On a second level vpte miss, check whether the original fault is to the OBP | 20 | kvmap_itlb: |
20 | * range (note that this is only possible for instruction miss, data misses to | 21 | /* g6: TAG TARGET */ |
21 | * obp range do not use vpte). If so, go back directly to the faulting address. | 22 | mov TLB_TAG_ACCESS, %g4 |
22 | * This is because we want to read the tpc, otherwise we have no way of knowing | 23 | ldxa [%g4] ASI_IMMU, %g4 |
23 | * the 8k aligned faulting address if we are using >8k kernel pagesize. This | 24 | |
24 | * also ensures no vpte range addresses are dropped into tlb while obp is | 25 | kvmap_itlb_nonlinear: |
25 | * executing (see inherit_locked_prom_mappings() rant). | 26 | /* Catch kernel NULL pointer calls. */ |
26 | */ | 27 | sethi %hi(PAGE_SIZE), %g5 |
27 | sparc64_vpte_nucleus: | 28 | cmp %g4, %g5 |
28 | /* Note that kvmap below has verified that the address is | 29 | bleu,pn %xcc, kvmap_dtlb_longpath |
29 | * in the range MODULES_VADDR --> VMALLOC_END already. So | 30 | nop |
30 | * here we need only check if it is an OBP address or not. | 31 | |
31 | */ | 32 | KERN_TSB_LOOKUP_TL1(%g4, %g6, %g5, %g1, %g2, %g3, kvmap_itlb_load) |
33 | |||
34 | kvmap_itlb_tsb_miss: | ||
32 | sethi %hi(LOW_OBP_ADDRESS), %g5 | 35 | sethi %hi(LOW_OBP_ADDRESS), %g5 |
33 | cmp %g4, %g5 | 36 | cmp %g4, %g5 |
34 | blu,pn %xcc, kern_vpte | 37 | blu,pn %xcc, kvmap_itlb_vmalloc_addr |
35 | mov 0x1, %g5 | 38 | mov 0x1, %g5 |
36 | sllx %g5, 32, %g5 | 39 | sllx %g5, 32, %g5 |
37 | cmp %g4, %g5 | 40 | cmp %g4, %g5 |
38 | blu,pn %xcc, vpte_insn_obp | 41 | blu,pn %xcc, kvmap_itlb_obp |
39 | nop | 42 | nop |
40 | 43 | ||
41 | /* These two instructions are patched by paginig_init(). */ | 44 | kvmap_itlb_vmalloc_addr: |
42 | kern_vpte: | 45 | KERN_PGTABLE_WALK(%g4, %g5, %g2, kvmap_itlb_longpath) |
43 | sethi %hi(swapper_pgd_zero), %g5 | 46 | |
44 | lduw [%g5 + %lo(swapper_pgd_zero)], %g5 | 47 | TSB_LOCK_TAG(%g1, %g2, %g4) |
45 | 48 | ||
46 | /* With kernel PGD in %g5, branch back into dtlb_backend. */ | 49 | /* Load and check PTE. */ |
47 | ba,pt %xcc, sparc64_kpte_continue | 50 | ldxa [%g5] ASI_PHYS_USE_EC, %g5 |
48 | andn %g1, 0x3, %g1 /* Finish PMD offset adjustment. */ | 51 | brgez,a,pn %g5, kvmap_itlb_longpath |
49 | 52 | stx %g0, [%g1] | |
50 | vpte_noent: | ||
51 | /* Restore previous TAG_ACCESS, %g5 is zero, and we will | ||
52 | * skip over the trap instruction so that the top level | ||
53 | * TLB miss handler will thing this %g5 value is just an | ||
54 | * invalid PTE, thus branching to full fault processing. | ||
55 | */ | ||
56 | mov TLB_SFSR, %g1 | ||
57 | stxa %g4, [%g1 + %g1] ASI_DMMU | ||
58 | done | ||
59 | |||
60 | vpte_insn_obp: | ||
61 | /* Behave as if we are at TL0. */ | ||
62 | wrpr %g0, 1, %tl | ||
63 | rdpr %tpc, %g4 /* Find original faulting iaddr */ | ||
64 | srlx %g4, 13, %g4 /* Throw out context bits */ | ||
65 | sllx %g4, 13, %g4 /* g4 has vpn + ctx0 now */ | ||
66 | |||
67 | /* Restore previous TAG_ACCESS. */ | ||
68 | mov TLB_SFSR, %g1 | ||
69 | stxa %g4, [%g1 + %g1] ASI_IMMU | ||
70 | |||
71 | sethi %hi(prom_trans), %g5 | ||
72 | or %g5, %lo(prom_trans), %g5 | ||
73 | |||
74 | 1: ldx [%g5 + 0x00], %g6 ! base | ||
75 | brz,a,pn %g6, longpath ! no more entries, fail | ||
76 | mov TLB_SFSR, %g1 ! and restore %g1 | ||
77 | ldx [%g5 + 0x08], %g1 ! len | ||
78 | add %g6, %g1, %g1 ! end | ||
79 | cmp %g6, %g4 | ||
80 | bgu,pt %xcc, 2f | ||
81 | cmp %g4, %g1 | ||
82 | bgeu,pt %xcc, 2f | ||
83 | ldx [%g5 + 0x10], %g1 ! PTE | ||
84 | |||
85 | /* TLB load, restore %g1, and return from trap. */ | ||
86 | sub %g4, %g6, %g6 | ||
87 | add %g1, %g6, %g5 | ||
88 | mov TLB_SFSR, %g1 | ||
89 | stxa %g5, [%g0] ASI_ITLB_DATA_IN | ||
90 | retry | ||
91 | 53 | ||
92 | 2: ba,pt %xcc, 1b | 54 | TSB_WRITE(%g1, %g5, %g6) |
93 | add %g5, (3 * 8), %g5 ! next entry | 55 | |
94 | 56 | /* fallthrough to TLB load */ | |
95 | kvmap_do_obp: | 57 | |
96 | sethi %hi(prom_trans), %g5 | 58 | kvmap_itlb_load: |
97 | or %g5, %lo(prom_trans), %g5 | 59 | stxa %g5, [%g0] ASI_ITLB_DATA_IN ! Reload TLB |
98 | srlx %g4, 13, %g4 | ||
99 | sllx %g4, 13, %g4 | ||
100 | |||
101 | 1: ldx [%g5 + 0x00], %g6 ! base | ||
102 | brz,a,pn %g6, longpath ! no more entries, fail | ||
103 | mov TLB_SFSR, %g1 ! and restore %g1 | ||
104 | ldx [%g5 + 0x08], %g1 ! len | ||
105 | add %g6, %g1, %g1 ! end | ||
106 | cmp %g6, %g4 | ||
107 | bgu,pt %xcc, 2f | ||
108 | cmp %g4, %g1 | ||
109 | bgeu,pt %xcc, 2f | ||
110 | ldx [%g5 + 0x10], %g1 ! PTE | ||
111 | |||
112 | /* TLB load, restore %g1, and return from trap. */ | ||
113 | sub %g4, %g6, %g6 | ||
114 | add %g1, %g6, %g5 | ||
115 | mov TLB_SFSR, %g1 | ||
116 | stxa %g5, [%g0] ASI_DTLB_DATA_IN | ||
117 | retry | 60 | retry |
118 | 61 | ||
119 | 2: ba,pt %xcc, 1b | 62 | kvmap_itlb_longpath: |
120 | add %g5, (3 * 8), %g5 ! next entry | 63 | rdpr %pstate, %g5 |
64 | wrpr %g5, PSTATE_AG | PSTATE_MG, %pstate | ||
65 | rdpr %tpc, %g5 | ||
66 | ba,pt %xcc, sparc64_realfault_common | ||
67 | mov FAULT_CODE_ITLB, %g4 | ||
68 | |||
69 | kvmap_itlb_obp: | ||
70 | OBP_TRANS_LOOKUP(%g4, %g5, %g2, %g3, kvmap_itlb_longpath) | ||
71 | |||
72 | TSB_LOCK_TAG(%g1, %g2, %g4) | ||
73 | |||
74 | TSB_WRITE(%g1, %g5, %g6) | ||
75 | |||
76 | ba,pt %xcc, kvmap_itlb_load | ||
77 | nop | ||
78 | |||
79 | kvmap_dtlb_obp: | ||
80 | OBP_TRANS_LOOKUP(%g4, %g5, %g2, %g3, kvmap_dtlb_longpath) | ||
81 | |||
82 | TSB_LOCK_TAG(%g1, %g2, %g4) | ||
83 | |||
84 | TSB_WRITE(%g1, %g5, %g6) | ||
85 | |||
86 | ba,pt %xcc, kvmap_dtlb_load | ||
87 | nop | ||
121 | 88 | ||
122 | /* | ||
123 | * On a first level data miss, check whether this is to the OBP range (note | ||
124 | * that such accesses can be made by prom, as well as by kernel using | ||
125 | * prom_getproperty on "address"), and if so, do not use vpte access ... | ||
126 | * rather, use information saved during inherit_prom_mappings() using 8k | ||
127 | * pagesize. | ||
128 | */ | ||
129 | .align 32 | 89 | .align 32 |
130 | kvmap: | 90 | .globl kvmap_dtlb |
131 | brgez,pn %g4, kvmap_nonlinear | 91 | kvmap_dtlb: |
92 | /* %g6: TAG TARGET */ | ||
93 | mov TLB_TAG_ACCESS, %g4 | ||
94 | ldxa [%g4] ASI_DMMU, %g4 | ||
95 | brgez,pn %g4, kvmap_dtlb_nonlinear | ||
132 | nop | 96 | nop |
133 | 97 | ||
134 | #ifdef CONFIG_DEBUG_PAGEALLOC | 98 | #define KERN_HIGHBITS ((_PAGE_VALID|_PAGE_SZ4MB)^0xfffff80000000000) |
99 | #define KERN_LOWBITS (_PAGE_CP | _PAGE_CV | _PAGE_P | _PAGE_W) | ||
100 | |||
101 | sethi %uhi(KERN_HIGHBITS), %g2 | ||
102 | or %g2, %ulo(KERN_HIGHBITS), %g2 | ||
103 | sllx %g2, 32, %g2 | ||
104 | or %g2, KERN_LOWBITS, %g2 | ||
105 | |||
106 | #undef KERN_HIGHBITS | ||
107 | #undef KERN_LOWBITS | ||
108 | |||
135 | .globl kvmap_linear_patch | 109 | .globl kvmap_linear_patch |
136 | kvmap_linear_patch: | 110 | kvmap_linear_patch: |
137 | #endif | 111 | ba,pt %xcc, kvmap_dtlb_load |
138 | ba,pt %xcc, kvmap_load | ||
139 | xor %g2, %g4, %g5 | 112 | xor %g2, %g4, %g5 |
140 | 113 | ||
141 | #ifdef CONFIG_DEBUG_PAGEALLOC | 114 | kvmap_dtlb_vmalloc_addr: |
142 | sethi %hi(swapper_pg_dir), %g5 | 115 | KERN_PGTABLE_WALK(%g4, %g5, %g2, kvmap_dtlb_longpath) |
143 | or %g5, %lo(swapper_pg_dir), %g5 | 116 | |
144 | sllx %g4, 64 - (PGDIR_SHIFT + PGDIR_BITS), %g6 | 117 | TSB_LOCK_TAG(%g1, %g2, %g4) |
145 | srlx %g6, 64 - PAGE_SHIFT, %g6 | 118 | |
146 | andn %g6, 0x3, %g6 | 119 | /* Load and check PTE. */ |
147 | lduw [%g5 + %g6], %g5 | 120 | ldxa [%g5] ASI_PHYS_USE_EC, %g5 |
148 | brz,pn %g5, longpath | 121 | brgez,a,pn %g5, kvmap_dtlb_longpath |
149 | sllx %g4, 64 - (PMD_SHIFT + PMD_BITS), %g6 | 122 | stx %g0, [%g1] |
150 | srlx %g6, 64 - PAGE_SHIFT, %g6 | 123 | |
151 | sllx %g5, 11, %g5 | 124 | TSB_WRITE(%g1, %g5, %g6) |
152 | andn %g6, 0x3, %g6 | 125 | |
153 | lduwa [%g5 + %g6] ASI_PHYS_USE_EC, %g5 | 126 | /* fallthrough to TLB load */ |
154 | brz,pn %g5, longpath | 127 | |
155 | sllx %g4, 64 - PMD_SHIFT, %g6 | 128 | kvmap_dtlb_load: |
156 | srlx %g6, 64 - PAGE_SHIFT, %g6 | 129 | stxa %g5, [%g0] ASI_DTLB_DATA_IN ! Reload TLB |
157 | sllx %g5, 11, %g5 | 130 | retry |
158 | andn %g6, 0x7, %g6 | 131 | |
159 | ldxa [%g5 + %g6] ASI_PHYS_USE_EC, %g5 | 132 | kvmap_dtlb_nonlinear: |
160 | brz,pn %g5, longpath | 133 | /* Catch kernel NULL pointer derefs. */ |
134 | sethi %hi(PAGE_SIZE), %g5 | ||
135 | cmp %g4, %g5 | ||
136 | bleu,pn %xcc, kvmap_dtlb_longpath | ||
161 | nop | 137 | nop |
162 | ba,a,pt %xcc, kvmap_load | ||
163 | #endif | ||
164 | 138 | ||
165 | kvmap_nonlinear: | 139 | KERN_TSB_LOOKUP_TL1(%g4, %g6, %g5, %g1, %g2, %g3, kvmap_dtlb_load) |
140 | |||
141 | kvmap_dtlb_tsbmiss: | ||
166 | sethi %hi(MODULES_VADDR), %g5 | 142 | sethi %hi(MODULES_VADDR), %g5 |
167 | cmp %g4, %g5 | 143 | cmp %g4, %g5 |
168 | blu,pn %xcc, longpath | 144 | blu,pn %xcc, kvmap_dtlb_longpath |
169 | mov (VMALLOC_END >> 24), %g5 | 145 | mov (VMALLOC_END >> 24), %g5 |
170 | sllx %g5, 24, %g5 | 146 | sllx %g5, 24, %g5 |
171 | cmp %g4, %g5 | 147 | cmp %g4, %g5 |
172 | bgeu,pn %xcc, longpath | 148 | bgeu,pn %xcc, kvmap_dtlb_longpath |
173 | nop | 149 | nop |
174 | 150 | ||
175 | kvmap_check_obp: | 151 | kvmap_check_obp: |
176 | sethi %hi(LOW_OBP_ADDRESS), %g5 | 152 | sethi %hi(LOW_OBP_ADDRESS), %g5 |
177 | cmp %g4, %g5 | 153 | cmp %g4, %g5 |
178 | blu,pn %xcc, kvmap_vmalloc_addr | 154 | blu,pn %xcc, kvmap_dtlb_vmalloc_addr |
179 | mov 0x1, %g5 | 155 | mov 0x1, %g5 |
180 | sllx %g5, 32, %g5 | 156 | sllx %g5, 32, %g5 |
181 | cmp %g4, %g5 | 157 | cmp %g4, %g5 |
182 | blu,pn %xcc, kvmap_do_obp | 158 | blu,pn %xcc, kvmap_dtlb_obp |
183 | nop | 159 | nop |
184 | 160 | ba,pt %xcc, kvmap_dtlb_vmalloc_addr | |
185 | kvmap_vmalloc_addr: | ||
186 | /* If we get here, a vmalloc addr was accessed, load kernel VPTE. */ | ||
187 | ldxa [%g3 + %g6] ASI_N, %g5 | ||
188 | brgez,pn %g5, longpath | ||
189 | nop | 161 | nop |
190 | 162 | ||
191 | kvmap_load: | 163 | kvmap_dtlb_longpath: |
192 | /* PTE is valid, load into TLB and return from trap. */ | 164 | rdpr %pstate, %g5 |
193 | stxa %g5, [%g0] ASI_DTLB_DATA_IN ! Reload TLB | 165 | wrpr %g5, PSTATE_AG | PSTATE_MG, %pstate |
194 | retry | 166 | rdpr %tl, %g4 |
167 | cmp %g4, 1 | ||
168 | mov TLB_TAG_ACCESS, %g4 | ||
169 | ldxa [%g4] ASI_DMMU, %g5 | ||
170 | be,pt %xcc, sparc64_realfault_common | ||
171 | mov FAULT_CODE_DTLB, %g4 | ||
172 | ba,pt %xcc, winfix_trampoline | ||
173 | nop | ||