aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
authorDavid S. Miller <davem@sunset.davemloft.net>2005-09-21 21:50:51 -0400
committerDavid S. Miller <davem@sunset.davemloft.net>2005-09-21 21:50:51 -0400
commit2a7e2990340a1c42321d79dc99755683ea5a1026 (patch)
tree4423f305fd52c7a28a38f00cd826d01ad60f2e97 /arch
parentefb0372bbaf5b829ff8c39db372779928af542a7 (diff)
[SPARC64]: Move kernel TLB miss handling into a seperate file.
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'arch')
-rw-r--r--arch/sparc64/kernel/dtlb_base.S8
-rw-r--r--arch/sparc64/kernel/entry.S153
-rw-r--r--arch/sparc64/kernel/head.S1
-rw-r--r--arch/sparc64/kernel/ktlb.S174
4 files changed, 179 insertions, 157 deletions
diff --git a/arch/sparc64/kernel/dtlb_base.S b/arch/sparc64/kernel/dtlb_base.S
index ded2fed23fcc..702d349c1e88 100644
--- a/arch/sparc64/kernel/dtlb_base.S
+++ b/arch/sparc64/kernel/dtlb_base.S
@@ -71,7 +71,7 @@
71from_tl1_trap: 71from_tl1_trap:
72 rdpr %tl, %g5 ! For TL==3 test 72 rdpr %tl, %g5 ! For TL==3 test
73 CREATE_VPTE_OFFSET1(%g4, %g6) ! Create VPTE offset 73 CREATE_VPTE_OFFSET1(%g4, %g6) ! Create VPTE offset
74 be,pn %xcc, 3f ! Yep, special processing 74 be,pn %xcc, kvmap ! Yep, special processing
75 CREATE_VPTE_OFFSET2(%g4, %g6) ! Create VPTE offset 75 CREATE_VPTE_OFFSET2(%g4, %g6) ! Create VPTE offset
76 cmp %g5, 4 ! Last trap level? 76 cmp %g5, 4 ! Last trap level?
77 be,pn %xcc, longpath ! Yep, cannot risk VPTE miss 77 be,pn %xcc, longpath ! Yep, cannot risk VPTE miss
@@ -83,9 +83,9 @@ from_tl1_trap:
83 nop ! Delay-slot 83 nop ! Delay-slot
849: stxa %g5, [%g0] ASI_DTLB_DATA_IN ! Reload TLB 849: stxa %g5, [%g0] ASI_DTLB_DATA_IN ! Reload TLB
85 retry ! Trap return 85 retry ! Trap return
863: brlz,pt %g4, 9b ! Kernel virtual map? 86 nop
87 xor %g2, %g4, %g5 ! Finish bit twiddles 87 nop
88 ba,a,pt %xcc, kvmap ! Yep, go check for obp/vmalloc 88 nop
89 89
90/* DTLB ** ICACHE line 3: winfixups+real_faults */ 90/* DTLB ** ICACHE line 3: winfixups+real_faults */
91longpath: 91longpath:
diff --git a/arch/sparc64/kernel/entry.S b/arch/sparc64/kernel/entry.S
index b48349527853..45cd3bbdb7e0 100644
--- a/arch/sparc64/kernel/entry.S
+++ b/arch/sparc64/kernel/entry.S
@@ -30,159 +30,6 @@
30 .text 30 .text
31 .align 32 31 .align 32
32 32
33 .globl sparc64_vpte_patchme1
34 .globl sparc64_vpte_patchme2
35/*
36 * On a second level vpte miss, check whether the original fault is to the OBP
37 * range (note that this is only possible for instruction miss, data misses to
38 * obp range do not use vpte). If so, go back directly to the faulting address.
39 * This is because we want to read the tpc, otherwise we have no way of knowing
40 * the 8k aligned faulting address if we are using >8k kernel pagesize. This
41 * also ensures no vpte range addresses are dropped into tlb while obp is
42 * executing (see inherit_locked_prom_mappings() rant).
43 */
44sparc64_vpte_nucleus:
45 /* Note that kvmap below has verified that the address is
46 * in the range MODULES_VADDR --> VMALLOC_END already. So
47 * here we need only check if it is an OBP address or not.
48 */
49 sethi %hi(LOW_OBP_ADDRESS), %g5
50 cmp %g4, %g5
51 blu,pn %xcc, sparc64_vpte_patchme1
52 mov 0x1, %g5
53 sllx %g5, 32, %g5
54 cmp %g4, %g5
55 blu,pn %xcc, obp_iaddr_patch
56 nop
57
58 /* These two instructions are patched by paginig_init(). */
59sparc64_vpte_patchme1:
60 sethi %hi(0), %g5
61sparc64_vpte_patchme2:
62 or %g5, %lo(0), %g5
63
64 /* With kernel PGD in %g5, branch back into dtlb_backend. */
65 ba,pt %xcc, sparc64_kpte_continue
66 andn %g1, 0x3, %g1 /* Finish PMD offset adjustment. */
67
68vpte_noent:
69 /* Restore previous TAG_ACCESS, %g5 is zero, and we will
70 * skip over the trap instruction so that the top level
71 * TLB miss handler will thing this %g5 value is just an
72 * invalid PTE, thus branching to full fault processing.
73 */
74 mov TLB_SFSR, %g1
75 stxa %g4, [%g1 + %g1] ASI_DMMU
76 done
77
78 .globl obp_iaddr_patch
79obp_iaddr_patch:
80 /* These two instructions patched by inherit_prom_mappings(). */
81 sethi %hi(0), %g5
82 or %g5, %lo(0), %g5
83
84 /* Behave as if we are at TL0. */
85 wrpr %g0, 1, %tl
86 rdpr %tpc, %g4 /* Find original faulting iaddr */
87 srlx %g4, 13, %g4 /* Throw out context bits */
88 sllx %g4, 13, %g4 /* g4 has vpn + ctx0 now */
89
90 /* Restore previous TAG_ACCESS. */
91 mov TLB_SFSR, %g1
92 stxa %g4, [%g1 + %g1] ASI_IMMU
93
94 /* Get PMD offset. */
95 srlx %g4, 23, %g6
96 and %g6, 0x7ff, %g6
97 sllx %g6, 2, %g6
98
99 /* Load PMD, is it valid? */
100 lduwa [%g5 + %g6] ASI_PHYS_USE_EC, %g5
101 brz,pn %g5, longpath
102 sllx %g5, 11, %g5
103
104 /* Get PTE offset. */
105 srlx %g4, 13, %g6
106 and %g6, 0x3ff, %g6
107 sllx %g6, 3, %g6
108
109 /* Load PTE. */
110 ldxa [%g5 + %g6] ASI_PHYS_USE_EC, %g5
111 brgez,pn %g5, longpath
112 nop
113
114 /* TLB load and return from trap. */
115 stxa %g5, [%g0] ASI_ITLB_DATA_IN
116 retry
117
118 .globl obp_daddr_patch
119obp_daddr_patch:
120 /* These two instructions patched by inherit_prom_mappings(). */
121 sethi %hi(0), %g5
122 or %g5, %lo(0), %g5
123
124 /* Get PMD offset. */
125 srlx %g4, 23, %g6
126 and %g6, 0x7ff, %g6
127 sllx %g6, 2, %g6
128
129 /* Load PMD, is it valid? */
130 lduwa [%g5 + %g6] ASI_PHYS_USE_EC, %g5
131 brz,pn %g5, longpath
132 sllx %g5, 11, %g5
133
134 /* Get PTE offset. */
135 srlx %g4, 13, %g6
136 and %g6, 0x3ff, %g6
137 sllx %g6, 3, %g6
138
139 /* Load PTE. */
140 ldxa [%g5 + %g6] ASI_PHYS_USE_EC, %g5
141 brgez,pn %g5, longpath
142 nop
143
144 /* TLB load and return from trap. */
145 stxa %g5, [%g0] ASI_DTLB_DATA_IN
146 retry
147
148/*
149 * On a first level data miss, check whether this is to the OBP range (note
150 * that such accesses can be made by prom, as well as by kernel using
151 * prom_getproperty on "address"), and if so, do not use vpte access ...
152 * rather, use information saved during inherit_prom_mappings() using 8k
153 * pagesize.
154 */
155 .align 32
156kvmap:
157 sethi %hi(MODULES_VADDR), %g5
158 cmp %g4, %g5
159 blu,pn %xcc, longpath
160 mov (VMALLOC_END >> 24), %g5
161 sllx %g5, 24, %g5
162 cmp %g4, %g5
163 bgeu,pn %xcc, longpath
164 nop
165
166kvmap_check_obp:
167 sethi %hi(LOW_OBP_ADDRESS), %g5
168 cmp %g4, %g5
169 blu,pn %xcc, kvmap_vmalloc_addr
170 mov 0x1, %g5
171 sllx %g5, 32, %g5
172 cmp %g4, %g5
173 blu,pn %xcc, obp_daddr_patch
174 nop
175
176kvmap_vmalloc_addr:
177 /* If we get here, a vmalloc addr was accessed, load kernel VPTE. */
178 ldxa [%g3 + %g6] ASI_N, %g5
179 brgez,pn %g5, longpath
180 nop
181
182 /* PTE is valid, load into TLB and return from trap. */
183 stxa %g5, [%g0] ASI_DTLB_DATA_IN ! Reload TLB
184 retry
185
186 /* This is trivial with the new code... */ 33 /* This is trivial with the new code... */
187 .globl do_fpdis 34 .globl do_fpdis
188do_fpdis: 35do_fpdis:
diff --git a/arch/sparc64/kernel/head.S b/arch/sparc64/kernel/head.S
index 1fa06c4e3bdb..dc3551f46b76 100644
--- a/arch/sparc64/kernel/head.S
+++ b/arch/sparc64/kernel/head.S
@@ -762,6 +762,7 @@ bootup_user_stack_end:
762swapper_pg_dir: 762swapper_pg_dir:
763 .word 0 763 .word 0
764 764
765#include "ktlb.S"
765#include "etrap.S" 766#include "etrap.S"
766#include "rtrap.S" 767#include "rtrap.S"
767#include "winfixup.S" 768#include "winfixup.S"
diff --git a/arch/sparc64/kernel/ktlb.S b/arch/sparc64/kernel/ktlb.S
new file mode 100644
index 000000000000..b7176792c9a2
--- /dev/null
+++ b/arch/sparc64/kernel/ktlb.S
@@ -0,0 +1,174 @@
1/* arch/sparc64/kernel/ktlb.S: Kernel mapping TLB miss handling.
2 *
3 * Copyright (C) 1995, 1997, 2005 David S. Miller <davem@davemloft.net>
4 * Copyright (C) 1996 Eddie C. Dost (ecd@brainaid.de)
5 * Copyright (C) 1996 Miguel de Icaza (miguel@nuclecu.unam.mx)
6 * Copyright (C) 1996,98,99 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
7*/
8
9#include <linux/config.h>
10#include <asm/head.h>
11#include <asm/asi.h>
12#include <asm/page.h>
13#include <asm/pgtable.h>
14
15 .text
16 .align 32
17
18 .globl sparc64_vpte_patchme1
19 .globl sparc64_vpte_patchme2
20/*
21 * On a second level vpte miss, check whether the original fault is to the OBP
22 * range (note that this is only possible for instruction miss, data misses to
23 * obp range do not use vpte). If so, go back directly to the faulting address.
24 * This is because we want to read the tpc, otherwise we have no way of knowing
25 * the 8k aligned faulting address if we are using >8k kernel pagesize. This
26 * also ensures no vpte range addresses are dropped into tlb while obp is
27 * executing (see inherit_locked_prom_mappings() rant).
28 */
29sparc64_vpte_nucleus:
30 /* Note that kvmap below has verified that the address is
31 * in the range MODULES_VADDR --> VMALLOC_END already. So
32 * here we need only check if it is an OBP address or not.
33 */
34 sethi %hi(LOW_OBP_ADDRESS), %g5
35 cmp %g4, %g5
36 blu,pn %xcc, sparc64_vpte_patchme1
37 mov 0x1, %g5
38 sllx %g5, 32, %g5
39 cmp %g4, %g5
40 blu,pn %xcc, obp_iaddr_patch
41 nop
42
43 /* These two instructions are patched by paginig_init(). */
44sparc64_vpte_patchme1:
45 sethi %hi(0), %g5
46sparc64_vpte_patchme2:
47 or %g5, %lo(0), %g5
48
49 /* With kernel PGD in %g5, branch back into dtlb_backend. */
50 ba,pt %xcc, sparc64_kpte_continue
51 andn %g1, 0x3, %g1 /* Finish PMD offset adjustment. */
52
53vpte_noent:
54 /* Restore previous TAG_ACCESS, %g5 is zero, and we will
55 * skip over the trap instruction so that the top level
56 * TLB miss handler will thing this %g5 value is just an
57 * invalid PTE, thus branching to full fault processing.
58 */
59 mov TLB_SFSR, %g1
60 stxa %g4, [%g1 + %g1] ASI_DMMU
61 done
62
63 .globl obp_iaddr_patch
64obp_iaddr_patch:
65 /* These two instructions patched by inherit_prom_mappings(). */
66 sethi %hi(0), %g5
67 or %g5, %lo(0), %g5
68
69 /* Behave as if we are at TL0. */
70 wrpr %g0, 1, %tl
71 rdpr %tpc, %g4 /* Find original faulting iaddr */
72 srlx %g4, 13, %g4 /* Throw out context bits */
73 sllx %g4, 13, %g4 /* g4 has vpn + ctx0 now */
74
75 /* Restore previous TAG_ACCESS. */
76 mov TLB_SFSR, %g1
77 stxa %g4, [%g1 + %g1] ASI_IMMU
78
79 /* Get PMD offset. */
80 srlx %g4, 23, %g6
81 and %g6, 0x7ff, %g6
82 sllx %g6, 2, %g6
83
84 /* Load PMD, is it valid? */
85 lduwa [%g5 + %g6] ASI_PHYS_USE_EC, %g5
86 brz,pn %g5, longpath
87 sllx %g5, 11, %g5
88
89 /* Get PTE offset. */
90 srlx %g4, 13, %g6
91 and %g6, 0x3ff, %g6
92 sllx %g6, 3, %g6
93
94 /* Load PTE. */
95 ldxa [%g5 + %g6] ASI_PHYS_USE_EC, %g5
96 brgez,pn %g5, longpath
97 nop
98
99 /* TLB load and return from trap. */
100 stxa %g5, [%g0] ASI_ITLB_DATA_IN
101 retry
102
103 .globl obp_daddr_patch
104obp_daddr_patch:
105 /* These two instructions patched by inherit_prom_mappings(). */
106 sethi %hi(0), %g5
107 or %g5, %lo(0), %g5
108
109 /* Get PMD offset. */
110 srlx %g4, 23, %g6
111 and %g6, 0x7ff, %g6
112 sllx %g6, 2, %g6
113
114 /* Load PMD, is it valid? */
115 lduwa [%g5 + %g6] ASI_PHYS_USE_EC, %g5
116 brz,pn %g5, longpath
117 sllx %g5, 11, %g5
118
119 /* Get PTE offset. */
120 srlx %g4, 13, %g6
121 and %g6, 0x3ff, %g6
122 sllx %g6, 3, %g6
123
124 /* Load PTE. */
125 ldxa [%g5 + %g6] ASI_PHYS_USE_EC, %g5
126 brgez,pn %g5, longpath
127 nop
128
129 /* TLB load and return from trap. */
130 stxa %g5, [%g0] ASI_DTLB_DATA_IN
131 retry
132
133/*
134 * On a first level data miss, check whether this is to the OBP range (note
135 * that such accesses can be made by prom, as well as by kernel using
136 * prom_getproperty on "address"), and if so, do not use vpte access ...
137 * rather, use information saved during inherit_prom_mappings() using 8k
138 * pagesize.
139 */
140 .align 32
141kvmap:
142 brlz,pt %g4, kvmap_load
143 xor %g2, %g4, %g5
144
145kvmap_nonlinear:
146 sethi %hi(MODULES_VADDR), %g5
147 cmp %g4, %g5
148 blu,pn %xcc, longpath
149 mov (VMALLOC_END >> 24), %g5
150 sllx %g5, 24, %g5
151 cmp %g4, %g5
152 bgeu,pn %xcc, longpath
153 nop
154
155kvmap_check_obp:
156 sethi %hi(LOW_OBP_ADDRESS), %g5
157 cmp %g4, %g5
158 blu,pn %xcc, kvmap_vmalloc_addr
159 mov 0x1, %g5
160 sllx %g5, 32, %g5
161 cmp %g4, %g5
162 blu,pn %xcc, obp_daddr_patch
163 nop
164
165kvmap_vmalloc_addr:
166 /* If we get here, a vmalloc addr was accessed, load kernel VPTE. */
167 ldxa [%g3 + %g6] ASI_N, %g5
168 brgez,pn %g5, longpath
169 nop
170
171kvmap_load:
172 /* PTE is valid, load into TLB and return from trap. */
173 stxa %g5, [%g0] ASI_DTLB_DATA_IN ! Reload TLB
174 retry