aboutsummaryrefslogtreecommitdiffstats
path: root/arch/sparc64/kernel/ktlb.S
diff options
context:
space:
mode:
authorDavid S. Miller <davem@sunset.davemloft.net>2005-09-25 19:46:57 -0400
committerDavid S. Miller <davem@sunset.davemloft.net>2005-09-25 19:46:57 -0400
commit56425306517ef28a9b480161cdb96d182172bc1d (patch)
tree204cfbef0e5d86954f87b6b40d79d57f8157e5ea /arch/sparc64/kernel/ktlb.S
parent52f26deb7c67d5f34910660200b925c1a2b8df8c (diff)
[SPARC64]: Add CONFIG_DEBUG_PAGEALLOC support.
The trick is that we do the kernel linear mapping TLB miss starting with an instruction sequence like this: ba,pt %xcc, kvmap_load xor %g2, %g4, %g5 succeeded by an instruction sequence which performs a full page table walk starting at swapper_pg_dir. We first take over the trap table from the firmware. Then, using this constant PTE generation for the linear mapping area above, we build the kernel page tables for the linear mapping. After this is setup, we patch that branch above into a "nop", which will cause TLB misses to fall through to the full page table walk. With this, the page unmapping for CONFIG_DEBUG_PAGEALLOC is trivial. Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'arch/sparc64/kernel/ktlb.S')
-rw-r--r--arch/sparc64/kernel/ktlb.S33
1 files changed, 32 insertions, 1 deletions
diff --git a/arch/sparc64/kernel/ktlb.S b/arch/sparc64/kernel/ktlb.S
index a591bc0ebc7b..7796b37f478c 100644
--- a/arch/sparc64/kernel/ktlb.S
+++ b/arch/sparc64/kernel/ktlb.S
@@ -132,9 +132,40 @@ kvmap_do_obp:
132 */ 132 */
133 .align 32 133 .align 32
134kvmap: 134kvmap:
135 brlz,pt %g4, kvmap_load 135 brgez,pn %g4, kvmap_nonlinear
136 nop
137
138#ifdef CONFIG_DEBUG_PAGEALLOC
139 .globl kvmap_linear_patch
140kvmap_linear_patch:
141#endif
142 ba,pt %xcc, kvmap_load
136 xor %g2, %g4, %g5 143 xor %g2, %g4, %g5
137 144
145#ifdef CONFIG_DEBUG_PAGEALLOC
146 sethi %hi(swapper_pg_dir), %g5
147 or %g5, %lo(swapper_pg_dir), %g5
148 sllx %g4, 64 - (PGDIR_SHIFT + PGDIR_BITS), %g6
149 srlx %g6, 64 - PAGE_SHIFT, %g6
150 andn %g6, 0x3, %g6
151 lduw [%g5 + %g6], %g5
152 brz,pn %g5, longpath
153 sllx %g4, 64 - (PMD_SHIFT + PMD_BITS), %g6
154 srlx %g6, 64 - PAGE_SHIFT, %g6
155 sllx %g5, 11, %g5
156 andn %g6, 0x3, %g6
157 lduwa [%g5 + %g6] ASI_PHYS_USE_EC, %g5
158 brz,pn %g5, longpath
159 sllx %g4, 64 - PMD_SHIFT, %g6
160 srlx %g6, 64 - PAGE_SHIFT, %g6
161 sllx %g5, 11, %g5
162 andn %g6, 0x7, %g6
163 ldxa [%g5 + %g6] ASI_PHYS_USE_EC, %g5
164 brz,pn %g5, longpath
165 nop
166 ba,a,pt %xcc, kvmap_load
167#endif
168
138kvmap_nonlinear: 169kvmap_nonlinear:
139 sethi %hi(MODULES_VADDR), %g5 170 sethi %hi(MODULES_VADDR), %g5
140 cmp %g4, %g5 171 cmp %g4, %g5