aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc
diff options
context:
space:
mode:
authorAneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>2013-03-12 23:34:55 -0400
committerBenjamin Herrenschmidt <benh@kernel.crashing.org>2013-03-16 21:45:44 -0400
commitaf81d7878c641629f2693ae3fdaf74b4af14dfca (patch)
tree0190cb054a3312df864a659e6e5b8bb2432b8cc4 /arch/powerpc
parentc60ac5693c47df32a2b4b18af97fca5635def015 (diff)
powerpc: Rename USER_ESID_BITS* to ESID_BITS*
Now we use ESID_BITS of kernel address to build proto vsid. So rename USER_ESIT_BITS to ESID_BITS Acked-by: Paul Mackerras <paulus@samba.org> Signed-off-by: Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com> Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org> CC: <stable@vger.kernel.org> [v3.8]
Diffstat (limited to 'arch/powerpc')
-rw-r--r--arch/powerpc/include/asm/mmu-hash64.h16
-rw-r--r--arch/powerpc/kernel/exceptions-64s.S2
-rw-r--r--arch/powerpc/kvm/book3s_64_mmu_host.c4
-rw-r--r--arch/powerpc/mm/pgtable_64.c2
-rw-r--r--arch/powerpc/mm/slb_low.S4
5 files changed, 14 insertions, 14 deletions
diff --git a/arch/powerpc/include/asm/mmu-hash64.h b/arch/powerpc/include/asm/mmu-hash64.h
index a32461f9d825..b59e06f507ea 100644
--- a/arch/powerpc/include/asm/mmu-hash64.h
+++ b/arch/powerpc/include/asm/mmu-hash64.h
@@ -378,12 +378,12 @@ extern void slb_set_size(u16 size);
378 */ 378 */
379 379
380#define CONTEXT_BITS 19 380#define CONTEXT_BITS 19
381#define USER_ESID_BITS 18 381#define ESID_BITS 18
382#define USER_ESID_BITS_1T 6 382#define ESID_BITS_1T 6
383 383
384/* 384/*
385 * 256MB segment 385 * 256MB segment
386 * The proto-VSID space has 2^(CONTEX_BITS + USER_ESID_BITS) - 1 segments 386 * The proto-VSID space has 2^(CONTEX_BITS + ESID_BITS) - 1 segments
387 * available for user + kernel mapping. The top 4 contexts are used for 387 * available for user + kernel mapping. The top 4 contexts are used for
388 * kernel mapping. Each segment contains 2^28 bytes. Each 388 * kernel mapping. Each segment contains 2^28 bytes. Each
389 * context maps 2^46 bytes (64TB) so we can support 2^19-1 contexts 389 * context maps 2^46 bytes (64TB) so we can support 2^19-1 contexts
@@ -396,15 +396,15 @@ extern void slb_set_size(u16 size);
396 * doesn't overflow 64 bits. It should also be co-prime to vsid_modulus 396 * doesn't overflow 64 bits. It should also be co-prime to vsid_modulus
397 */ 397 */
398#define VSID_MULTIPLIER_256M ASM_CONST(12538073) /* 24-bit prime */ 398#define VSID_MULTIPLIER_256M ASM_CONST(12538073) /* 24-bit prime */
399#define VSID_BITS_256M (CONTEXT_BITS + USER_ESID_BITS) 399#define VSID_BITS_256M (CONTEXT_BITS + ESID_BITS)
400#define VSID_MODULUS_256M ((1UL<<VSID_BITS_256M)-1) 400#define VSID_MODULUS_256M ((1UL<<VSID_BITS_256M)-1)
401 401
402#define VSID_MULTIPLIER_1T ASM_CONST(12538073) /* 24-bit prime */ 402#define VSID_MULTIPLIER_1T ASM_CONST(12538073) /* 24-bit prime */
403#define VSID_BITS_1T (CONTEXT_BITS + USER_ESID_BITS_1T) 403#define VSID_BITS_1T (CONTEXT_BITS + ESID_BITS_1T)
404#define VSID_MODULUS_1T ((1UL<<VSID_BITS_1T)-1) 404#define VSID_MODULUS_1T ((1UL<<VSID_BITS_1T)-1)
405 405
406 406
407#define USER_VSID_RANGE (1UL << (USER_ESID_BITS + SID_SHIFT)) 407#define USER_VSID_RANGE (1UL << (ESID_BITS + SID_SHIFT))
408 408
409/* 409/*
410 * This macro generates asm code to compute the VSID scramble 410 * This macro generates asm code to compute the VSID scramble
@@ -540,9 +540,9 @@ static inline unsigned long get_vsid(unsigned long context, unsigned long ea,
540 return 0; 540 return 0;
541 541
542 if (ssize == MMU_SEGSIZE_256M) 542 if (ssize == MMU_SEGSIZE_256M)
543 return vsid_scramble((context << USER_ESID_BITS) 543 return vsid_scramble((context << ESID_BITS)
544 | (ea >> SID_SHIFT), 256M); 544 | (ea >> SID_SHIFT), 256M);
545 return vsid_scramble((context << USER_ESID_BITS_1T) 545 return vsid_scramble((context << ESID_BITS_1T)
546 | (ea >> SID_SHIFT_1T), 1T); 546 | (ea >> SID_SHIFT_1T), 1T);
547} 547}
548 548
diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S
index b112359ea7a8..200afa5bcfb7 100644
--- a/arch/powerpc/kernel/exceptions-64s.S
+++ b/arch/powerpc/kernel/exceptions-64s.S
@@ -1472,7 +1472,7 @@ _GLOBAL(do_stab_bolted)
1472 addi r9,r9,(MAX_USER_CONTEXT + 1)@l 1472 addi r9,r9,(MAX_USER_CONTEXT + 1)@l
1473 1473
1474 srdi r10,r11,SID_SHIFT 1474 srdi r10,r11,SID_SHIFT
1475 rldimi r10,r9,USER_ESID_BITS,0 /* proto vsid */ 1475 rldimi r10,r9,ESID_BITS,0 /* proto vsid */
1476 ASM_VSID_SCRAMBLE(r10, r9, 256M) 1476 ASM_VSID_SCRAMBLE(r10, r9, 256M)
1477 rldic r9,r10,12,16 /* r9 = vsid << 12 */ 1477 rldic r9,r10,12,16 /* r9 = vsid << 12 */
1478 1478
diff --git a/arch/powerpc/kvm/book3s_64_mmu_host.c b/arch/powerpc/kvm/book3s_64_mmu_host.c
index ead58e317294..5d7d29a313eb 100644
--- a/arch/powerpc/kvm/book3s_64_mmu_host.c
+++ b/arch/powerpc/kvm/book3s_64_mmu_host.c
@@ -326,8 +326,8 @@ int kvmppc_mmu_init(struct kvm_vcpu *vcpu)
326 vcpu3s->context_id[0] = err; 326 vcpu3s->context_id[0] = err;
327 327
328 vcpu3s->proto_vsid_max = ((vcpu3s->context_id[0] + 1) 328 vcpu3s->proto_vsid_max = ((vcpu3s->context_id[0] + 1)
329 << USER_ESID_BITS) - 1; 329 << ESID_BITS) - 1;
330 vcpu3s->proto_vsid_first = vcpu3s->context_id[0] << USER_ESID_BITS; 330 vcpu3s->proto_vsid_first = vcpu3s->context_id[0] << ESID_BITS;
331 vcpu3s->proto_vsid_next = vcpu3s->proto_vsid_first; 331 vcpu3s->proto_vsid_next = vcpu3s->proto_vsid_first;
332 332
333 kvmppc_mmu_hpte_init(vcpu); 333 kvmppc_mmu_hpte_init(vcpu);
diff --git a/arch/powerpc/mm/pgtable_64.c b/arch/powerpc/mm/pgtable_64.c
index e212a271c7a4..654258f165ae 100644
--- a/arch/powerpc/mm/pgtable_64.c
+++ b/arch/powerpc/mm/pgtable_64.c
@@ -61,7 +61,7 @@
61#endif 61#endif
62 62
63#ifdef CONFIG_PPC_STD_MMU_64 63#ifdef CONFIG_PPC_STD_MMU_64
64#if TASK_SIZE_USER64 > (1UL << (USER_ESID_BITS + SID_SHIFT)) 64#if TASK_SIZE_USER64 > (1UL << (ESID_BITS + SID_SHIFT))
65#error TASK_SIZE_USER64 exceeds user VSID range 65#error TASK_SIZE_USER64 exceeds user VSID range
66#endif 66#endif
67#endif 67#endif
diff --git a/arch/powerpc/mm/slb_low.S b/arch/powerpc/mm/slb_low.S
index 77aafaa1ab09..17aa6dfceb34 100644
--- a/arch/powerpc/mm/slb_low.S
+++ b/arch/powerpc/mm/slb_low.S
@@ -232,7 +232,7 @@ _GLOBAL(slb_allocate_user)
232 * r3 = EA, r9 = context, r10 = ESID, r11 = flags, clobbers r9, cr7 = <> PAGE_OFFSET 232 * r3 = EA, r9 = context, r10 = ESID, r11 = flags, clobbers r9, cr7 = <> PAGE_OFFSET
233 */ 233 */
234slb_finish_load: 234slb_finish_load:
235 rldimi r10,r9,USER_ESID_BITS,0 235 rldimi r10,r9,ESID_BITS,0
236 ASM_VSID_SCRAMBLE(r10,r9,256M) 236 ASM_VSID_SCRAMBLE(r10,r9,256M)
237 /* 237 /*
238 * bits above VSID_BITS_256M need to be ignored from r10 238 * bits above VSID_BITS_256M need to be ignored from r10
@@ -301,7 +301,7 @@ _GLOBAL(slb_compare_rr_to_size)
301 */ 301 */
302slb_finish_load_1T: 302slb_finish_load_1T:
303 srdi r10,r10,(SID_SHIFT_1T - SID_SHIFT) /* get 1T ESID */ 303 srdi r10,r10,(SID_SHIFT_1T - SID_SHIFT) /* get 1T ESID */
304 rldimi r10,r9,USER_ESID_BITS_1T,0 304 rldimi r10,r9,ESID_BITS_1T,0
305 ASM_VSID_SCRAMBLE(r10,r9,1T) 305 ASM_VSID_SCRAMBLE(r10,r9,1T)
306 /* 306 /*
307 * bits above VSID_BITS_1T need to be ignored from r10 307 * bits above VSID_BITS_1T need to be ignored from r10