aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc/mm
diff options
context:
space:
mode:
authorAneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>2012-09-09 22:52:53 -0400
committerBenjamin Herrenschmidt <benh@kernel.crashing.org>2012-09-17 02:31:50 -0400
commitac8dc2823a30a2d166fed6e919ab2e576f8fca84 (patch)
tree5d8a89cf1254c119f1005a4ffda594a1a1df7612 /arch/powerpc/mm
parent7aa0727f3302931e698b3a7979ae5b9a4600da4e (diff)
powerpc/mm: Use the required number of VSID bits in slbmte
ASM_VSID_SCRAMBLE can leave non-zero bits in the high 28 bits of the result for 256MB segment (40 bits for 1T segment). Properly mask them before using the values in slbmte Reviewed-by: Paul Mackerras <paulus@samba.org> Signed-off-by: Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com> Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Diffstat (limited to 'arch/powerpc/mm')
-rw-r--r--arch/powerpc/mm/slb_low.S12
1 files changed, 10 insertions, 2 deletions
diff --git a/arch/powerpc/mm/slb_low.S b/arch/powerpc/mm/slb_low.S
index e132dc6ed1a9..3b75f19aaa22 100644
--- a/arch/powerpc/mm/slb_low.S
+++ b/arch/powerpc/mm/slb_low.S
@@ -223,7 +223,11 @@ _GLOBAL(slb_allocate_user)
223 */ 223 */
224slb_finish_load: 224slb_finish_load:
225 ASM_VSID_SCRAMBLE(r10,r9,256M) 225 ASM_VSID_SCRAMBLE(r10,r9,256M)
226 rldimi r11,r10,SLB_VSID_SHIFT,16 /* combine VSID and flags */ 226 /*
227 * bits above VSID_BITS_256M need to be ignored from r10
228 * also combine VSID and flags
229 */
230 rldimi r11,r10,SLB_VSID_SHIFT,(64 - (SLB_VSID_SHIFT + VSID_BITS_256M))
227 231
228 /* r3 = EA, r11 = VSID data */ 232 /* r3 = EA, r11 = VSID data */
229 /* 233 /*
@@ -287,7 +291,11 @@ _GLOBAL(slb_compare_rr_to_size)
287slb_finish_load_1T: 291slb_finish_load_1T:
288 srdi r10,r10,40-28 /* get 1T ESID */ 292 srdi r10,r10,40-28 /* get 1T ESID */
289 ASM_VSID_SCRAMBLE(r10,r9,1T) 293 ASM_VSID_SCRAMBLE(r10,r9,1T)
290 rldimi r11,r10,SLB_VSID_SHIFT_1T,16 /* combine VSID and flags */ 294 /*
295 * bits above VSID_BITS_1T need to be ignored from r10
296 * also combine VSID and flags
297 */
298 rldimi r11,r10,SLB_VSID_SHIFT_1T,(64 - (SLB_VSID_SHIFT_1T + VSID_BITS_1T))
291 li r10,MMU_SEGSIZE_1T 299 li r10,MMU_SEGSIZE_1T
292 rldimi r11,r10,SLB_VSID_SSIZE_SHIFT,0 /* insert segment size */ 300 rldimi r11,r10,SLB_VSID_SSIZE_SHIFT,0 /* insert segment size */
293 301