aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc/kernel
diff options
context:
space:
mode:
authorPaul Mackerras <paulus@samba.org>2007-10-11 06:37:10 -0400
committerPaul Mackerras <paulus@samba.org>2007-10-12 00:05:17 -0400
commit1189be6508d45183013ddb82b18f4934193de274 (patch)
tree58924481b4de56699e4a884dce8dc601e71cf7d1 /arch/powerpc/kernel
parent287e5d6fcccfa38b953cebe307e1ddfd32363355 (diff)
[POWERPC] Use 1TB segments
This makes the kernel use 1TB segments for all kernel mappings and for user addresses of 1TB and above, on machines which support them (currently POWER5+, POWER6 and PA6T). We detect that the machine supports 1TB segments by looking at the ibm,processor-segment-sizes property in the device tree. We don't currently use 1TB segments for user addresses < 1T, since that would effectively prevent 32-bit processes from using huge pages unless we also had a way to revert to using 256MB segments. That would be possible but would involve extra complications (such as keeping track of which segment size was used when HPTEs were inserted) and is not addressed here. Parts of this patch were originally written by Ben Herrenschmidt. Signed-off-by: Paul Mackerras <paulus@samba.org>
Diffstat (limited to 'arch/powerpc/kernel')
-rw-r--r--arch/powerpc/kernel/entry_64.S14
-rw-r--r--arch/powerpc/kernel/head_64.S2
-rw-r--r--arch/powerpc/kernel/process.c9
3 files changed, 21 insertions, 4 deletions
diff --git a/arch/powerpc/kernel/entry_64.S b/arch/powerpc/kernel/entry_64.S
index fbbd3f6f0064..0ec134034899 100644
--- a/arch/powerpc/kernel/entry_64.S
+++ b/arch/powerpc/kernel/entry_64.S
@@ -373,8 +373,16 @@ END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
373 373
374 ld r8,KSP(r4) /* new stack pointer */ 374 ld r8,KSP(r4) /* new stack pointer */
375BEGIN_FTR_SECTION 375BEGIN_FTR_SECTION
376 b 2f
377END_FTR_SECTION_IFCLR(CPU_FTR_SLB)
378BEGIN_FTR_SECTION
376 clrrdi r6,r8,28 /* get its ESID */ 379 clrrdi r6,r8,28 /* get its ESID */
377 clrrdi r9,r1,28 /* get current sp ESID */ 380 clrrdi r9,r1,28 /* get current sp ESID */
381END_FTR_SECTION_IFCLR(CPU_FTR_1T_SEGMENT)
382BEGIN_FTR_SECTION
383 clrrdi r6,r8,40 /* get its 1T ESID */
384 clrrdi r9,r1,40 /* get current sp 1T ESID */
385END_FTR_SECTION_IFSET(CPU_FTR_1T_SEGMENT)
378 clrldi. r0,r6,2 /* is new ESID c00000000? */ 386 clrldi. r0,r6,2 /* is new ESID c00000000? */
379 cmpd cr1,r6,r9 /* or is new ESID the same as current ESID? */ 387 cmpd cr1,r6,r9 /* or is new ESID the same as current ESID? */
380 cror eq,4*cr1+eq,eq 388 cror eq,4*cr1+eq,eq
@@ -384,6 +392,11 @@ BEGIN_FTR_SECTION
384 ld r7,KSP_VSID(r4) /* Get new stack's VSID */ 392 ld r7,KSP_VSID(r4) /* Get new stack's VSID */
385 oris r0,r6,(SLB_ESID_V)@h 393 oris r0,r6,(SLB_ESID_V)@h
386 ori r0,r0,(SLB_NUM_BOLTED-1)@l 394 ori r0,r0,(SLB_NUM_BOLTED-1)@l
395BEGIN_FTR_SECTION
396 li r9,MMU_SEGSIZE_1T /* insert B field */
397 oris r6,r6,(MMU_SEGSIZE_1T << SLBIE_SSIZE_SHIFT)@h
398 rldimi r7,r9,SLB_VSID_SSIZE_SHIFT,0
399END_FTR_SECTION_IFSET(CPU_FTR_1T_SEGMENT)
387 400
388 /* Update the last bolted SLB. No write barriers are needed 401 /* Update the last bolted SLB. No write barriers are needed
389 * here, provided we only update the current CPU's SLB shadow 402 * here, provided we only update the current CPU's SLB shadow
@@ -401,7 +414,6 @@ BEGIN_FTR_SECTION
401 isync 414 isync
402 415
4032: 4162:
404END_FTR_SECTION_IFSET(CPU_FTR_SLB)
405 clrrdi r7,r8,THREAD_SHIFT /* base of new stack */ 417 clrrdi r7,r8,THREAD_SHIFT /* base of new stack */
406 /* Note: this uses SWITCH_FRAME_SIZE rather than INT_FRAME_SIZE 418 /* Note: this uses SWITCH_FRAME_SIZE rather than INT_FRAME_SIZE
407 because we don't need to leave the 288-byte ABI gap at the 419 because we don't need to leave the 288-byte ABI gap at the
diff --git a/arch/powerpc/kernel/head_64.S b/arch/powerpc/kernel/head_64.S
index 22ac245bd59a..97c5857faf00 100644
--- a/arch/powerpc/kernel/head_64.S
+++ b/arch/powerpc/kernel/head_64.S
@@ -935,7 +935,7 @@ _GLOBAL(do_stab_bolted)
935 935
936 /* Calculate VSID */ 936 /* Calculate VSID */
937 /* This is a kernel address, so protovsid = ESID */ 937 /* This is a kernel address, so protovsid = ESID */
938 ASM_VSID_SCRAMBLE(r11, r9) 938 ASM_VSID_SCRAMBLE(r11, r9, 256M)
939 rldic r9,r11,12,16 /* r9 = vsid << 12 */ 939 rldic r9,r11,12,16 /* r9 = vsid << 12 */
940 940
941 /* Search the primary group for a free entry */ 941 /* Search the primary group for a free entry */
diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c
index 15998b57767c..7949c203cb89 100644
--- a/arch/powerpc/kernel/process.c
+++ b/arch/powerpc/kernel/process.c
@@ -564,10 +564,15 @@ int copy_thread(int nr, unsigned long clone_flags, unsigned long usp,
564 564
565#ifdef CONFIG_PPC64 565#ifdef CONFIG_PPC64
566 if (cpu_has_feature(CPU_FTR_SLB)) { 566 if (cpu_has_feature(CPU_FTR_SLB)) {
567 unsigned long sp_vsid = get_kernel_vsid(sp); 567 unsigned long sp_vsid;
568 unsigned long llp = mmu_psize_defs[mmu_linear_psize].sllp; 568 unsigned long llp = mmu_psize_defs[mmu_linear_psize].sllp;
569 569
570 sp_vsid <<= SLB_VSID_SHIFT; 570 if (cpu_has_feature(CPU_FTR_1T_SEGMENT))
571 sp_vsid = get_kernel_vsid(sp, MMU_SEGSIZE_1T)
572 << SLB_VSID_SHIFT_1T;
573 else
574 sp_vsid = get_kernel_vsid(sp, MMU_SEGSIZE_256M)
575 << SLB_VSID_SHIFT;
571 sp_vsid |= SLB_VSID_KERNEL | llp; 576 sp_vsid |= SLB_VSID_KERNEL | llp;
572 p->thread.ksp_vsid = sp_vsid; 577 p->thread.ksp_vsid = sp_vsid;
573 } 578 }