diff options
author | David S. Miller <davem@davemloft.net> | 2006-02-21 23:51:13 -0500 |
---|---|---|
committer | David S. Miller <davem@sunset.davemloft.net> | 2006-03-20 04:13:55 -0500 |
commit | 9cc3a1ac9a819cadff05ca37bb7f208013a22035 (patch) | |
tree | 601fa49272b540b3a3e6cc3728db27c525b73721 /arch/sparc64/kernel | |
parent | 30c91d576e9ea41c963e7f28643219bda73b0ddc (diff) |
[SPARC64]: Make use of Niagara 256MB PTEs for kernel mappings.
We use a bitmap, one bit for every 256MB of memory. If the
bit is set we can use a 256MB PTE for linear mappings, else
we have to use a 4MB PTE.
SUN4V support is there, and we can very easily add support
for Panther cpu 256MB PTEs in the future.
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'arch/sparc64/kernel')
-rw-r--r-- | arch/sparc64/kernel/ktlb.S | 29 |
1 files changed, 27 insertions, 2 deletions
diff --git a/arch/sparc64/kernel/ktlb.S b/arch/sparc64/kernel/ktlb.S index 883180be3d59..ae1dac17bc8d 100644 --- a/arch/sparc64/kernel/ktlb.S +++ b/arch/sparc64/kernel/ktlb.S | |||
@@ -133,8 +133,33 @@ kvmap_dtlb_4v: | |||
133 | brgez,pn %g4, kvmap_dtlb_nonlinear | 133 | brgez,pn %g4, kvmap_dtlb_nonlinear |
134 | nop | 134 | nop |
135 | 135 | ||
136 | sethi %hi(kern_linear_pte_xor), %g2 | 136 | sethi %hi(kpte_linear_bitmap), %g2 |
137 | ldx [%g2 + %lo(kern_linear_pte_xor)], %g2 | 137 | or %g2, %lo(kpte_linear_bitmap), %g2 |
138 | |||
139 | /* Clear the PAGE_OFFSET top virtual bits, then shift | ||
140 | * down to get a 256MB physical address index. | ||
141 | */ | ||
142 | sllx %g4, 21, %g5 | ||
143 | mov 1, %g7 | ||
144 | srlx %g5, 21 + 28, %g5 | ||
145 | |||
146 | /* Don't try this at home kids... this depends upon srlx | ||
147 | * only taking the low 6 bits of the shift count in %g5. | ||
148 | */ | ||
149 | sllx %g7, %g5, %g7 | ||
150 | |||
151 | /* Divide by 64 to get the offset into the bitmask. */ | ||
152 | srlx %g5, 6, %g5 | ||
153 | |||
154 | /* kern_linear_pte_xor[((mask & bit) ? 1 : 0)] */ | ||
155 | ldx [%g2 + %g5], %g2 | ||
156 | andcc %g2, %g7, %g0 | ||
157 | sethi %hi(kern_linear_pte_xor), %g5 | ||
158 | or %g5, %lo(kern_linear_pte_xor), %g5 | ||
159 | bne,a,pt %xcc, 1f | ||
160 | add %g5, 8, %g5 | ||
161 | |||
162 | 1: ldx [%g5], %g2 | ||
138 | 163 | ||
139 | .globl kvmap_linear_patch | 164 | .globl kvmap_linear_patch |
140 | kvmap_linear_patch: | 165 | kvmap_linear_patch: |