aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc
diff options
context:
space:
mode:
authorAnton Blanchard <anton@samba.org>2010-08-10 21:40:27 -0400
committerBenjamin Herrenschmidt <benh@kernel.crashing.org>2010-09-02 00:07:30 -0400
commitf89451fbd2b9f28f5ff156154989599ec062354b (patch)
tree1722b247079fb80f972449066c9f5c67a5564d4c /arch/powerpc
parent8c77391475bc3284a380fc46aaf0bcf26bde3ae6 (diff)
powerpc: Feature nop out reservation clear when stcx checks address
The POWER architecture does not require stcx to check that it is operating on the same address as the larx. This means it is possible for an an exception handler to execute a larx, get a reservation, decide not to do the stcx and then return back with an active reservation. If the interrupted code was in the middle of a larx/stcx sequence the stcx could incorrectly succeed. All recent POWER CPUs check the address before letting the stcx succeed so we can create a CPU feature and nop it out. As Ben suggested, we can only do this in our syscall path because there is a remote possibility some kernel code gets interrupted by an exception that ends up operating on the same cacheline. Thanks to Paul Mackerras and Derek Williams for the idea. To test this I used a very simple null syscall (actually getppid) testcase at http://ozlabs.org/~anton/junkcode/null_syscall.c I tested against 2.6.35-git10 with the following changes against the pseries_defconfig: CONFIG_VIRT_CPU_ACCOUNTING=n CONFIG_AUDIT=n CONFIG_PPC_4K_PAGES=n CONFIG_PPC_64K_PAGES=y CONFIG_FORCE_MAX_ZONEORDER=9 CONFIG_PPC_SUBPAGE_PROT=n CONFIG_FUNCTION_TRACER=n CONFIG_FUNCTION_GRAPH_TRACER=n CONFIG_IRQSOFF_TRACER=n CONFIG_STACK_TRACER=n to remove the overhead of virtual CPU accounting, syscall auditing and the ftrace mcount tracers. 64kB pages were enabled to minimise TLB misses. POWER6: +8.2% POWER7: +7.0% Another suggestion was to use a larx to something in the L1 instead of a stcx. This was almost as fast as removing the larx on POWER6, but only 3.5% faster on POWER7. We can use this to speed up the reservation clear in our exception exit code. Signed-off-by: Anton Blanchard <anton@samba.org> Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Diffstat (limited to 'arch/powerpc')
-rw-r--r--arch/powerpc/include/asm/cputable.h14
-rw-r--r--arch/powerpc/kernel/entry_64.S22
2 files changed, 31 insertions, 5 deletions
diff --git a/arch/powerpc/include/asm/cputable.h b/arch/powerpc/include/asm/cputable.h
index 3a40a992e594..f3a1fdd9cf08 100644
--- a/arch/powerpc/include/asm/cputable.h
+++ b/arch/powerpc/include/asm/cputable.h
@@ -198,6 +198,7 @@ extern const char *powerpc_base_platform;
198#define CPU_FTR_CP_USE_DCBTZ LONG_ASM_CONST(0x0040000000000000) 198#define CPU_FTR_CP_USE_DCBTZ LONG_ASM_CONST(0x0040000000000000)
199#define CPU_FTR_UNALIGNED_LD_STD LONG_ASM_CONST(0x0080000000000000) 199#define CPU_FTR_UNALIGNED_LD_STD LONG_ASM_CONST(0x0080000000000000)
200#define CPU_FTR_ASYM_SMT LONG_ASM_CONST(0x0100000000000000) 200#define CPU_FTR_ASYM_SMT LONG_ASM_CONST(0x0100000000000000)
201#define CPU_FTR_STCX_CHECKS_ADDRESS LONG_ASM_CONST(0x0200000000000000)
201 202
202#ifndef __ASSEMBLY__ 203#ifndef __ASSEMBLY__
203 204
@@ -392,28 +393,31 @@ extern const char *powerpc_base_platform;
392 CPU_FTR_MMCRA | CPU_FTR_CTRL) 393 CPU_FTR_MMCRA | CPU_FTR_CTRL)
393#define CPU_FTRS_POWER4 (CPU_FTR_USE_TB | CPU_FTR_LWSYNC | \ 394#define CPU_FTRS_POWER4 (CPU_FTR_USE_TB | CPU_FTR_LWSYNC | \
394 CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_CTRL | \ 395 CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_CTRL | \
395 CPU_FTR_MMCRA | CPU_FTR_CP_USE_DCBTZ) 396 CPU_FTR_MMCRA | CPU_FTR_CP_USE_DCBTZ | \
397 CPU_FTR_STCX_CHECKS_ADDRESS)
396#define CPU_FTRS_PPC970 (CPU_FTR_USE_TB | CPU_FTR_LWSYNC | \ 398#define CPU_FTRS_PPC970 (CPU_FTR_USE_TB | CPU_FTR_LWSYNC | \
397 CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_CTRL | \ 399 CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_CTRL | \
398 CPU_FTR_ALTIVEC_COMP | CPU_FTR_CAN_NAP | CPU_FTR_MMCRA | \ 400 CPU_FTR_ALTIVEC_COMP | CPU_FTR_CAN_NAP | CPU_FTR_MMCRA | \
399 CPU_FTR_CP_USE_DCBTZ) 401 CPU_FTR_CP_USE_DCBTZ | CPU_FTR_STCX_CHECKS_ADDRESS)
400#define CPU_FTRS_POWER5 (CPU_FTR_USE_TB | CPU_FTR_LWSYNC | \ 402#define CPU_FTRS_POWER5 (CPU_FTR_USE_TB | CPU_FTR_LWSYNC | \
401 CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_CTRL | \ 403 CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_CTRL | \
402 CPU_FTR_MMCRA | CPU_FTR_SMT | \ 404 CPU_FTR_MMCRA | CPU_FTR_SMT | \
403 CPU_FTR_COHERENT_ICACHE | CPU_FTR_LOCKLESS_TLBIE | \ 405 CPU_FTR_COHERENT_ICACHE | CPU_FTR_LOCKLESS_TLBIE | \
404 CPU_FTR_PURR) 406 CPU_FTR_PURR | CPU_FTR_STCX_CHECKS_ADDRESS)
405#define CPU_FTRS_POWER6 (CPU_FTR_USE_TB | CPU_FTR_LWSYNC | \ 407#define CPU_FTRS_POWER6 (CPU_FTR_USE_TB | CPU_FTR_LWSYNC | \
406 CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_CTRL | \ 408 CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_CTRL | \
407 CPU_FTR_MMCRA | CPU_FTR_SMT | \ 409 CPU_FTR_MMCRA | CPU_FTR_SMT | \
408 CPU_FTR_COHERENT_ICACHE | CPU_FTR_LOCKLESS_TLBIE | \ 410 CPU_FTR_COHERENT_ICACHE | CPU_FTR_LOCKLESS_TLBIE | \
409 CPU_FTR_PURR | CPU_FTR_SPURR | CPU_FTR_REAL_LE | \ 411 CPU_FTR_PURR | CPU_FTR_SPURR | CPU_FTR_REAL_LE | \
410 CPU_FTR_DSCR | CPU_FTR_UNALIGNED_LD_STD) 412 CPU_FTR_DSCR | CPU_FTR_UNALIGNED_LD_STD | \
413 CPU_FTR_STCX_CHECKS_ADDRESS)
411#define CPU_FTRS_POWER7 (CPU_FTR_USE_TB | CPU_FTR_LWSYNC | \ 414#define CPU_FTRS_POWER7 (CPU_FTR_USE_TB | CPU_FTR_LWSYNC | \
412 CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_CTRL | \ 415 CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_CTRL | \
413 CPU_FTR_MMCRA | CPU_FTR_SMT | \ 416 CPU_FTR_MMCRA | CPU_FTR_SMT | \
414 CPU_FTR_COHERENT_ICACHE | CPU_FTR_LOCKLESS_TLBIE | \ 417 CPU_FTR_COHERENT_ICACHE | CPU_FTR_LOCKLESS_TLBIE | \
415 CPU_FTR_PURR | CPU_FTR_SPURR | CPU_FTR_REAL_LE | \ 418 CPU_FTR_PURR | CPU_FTR_SPURR | CPU_FTR_REAL_LE | \
416 CPU_FTR_DSCR | CPU_FTR_SAO | CPU_FTR_ASYM_SMT) 419 CPU_FTR_DSCR | CPU_FTR_SAO | CPU_FTR_ASYM_SMT | \
420 CPU_FTR_STCX_CHECKS_ADDRESS)
417#define CPU_FTRS_CELL (CPU_FTR_USE_TB | CPU_FTR_LWSYNC | \ 421#define CPU_FTRS_CELL (CPU_FTR_USE_TB | CPU_FTR_LWSYNC | \
418 CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_CTRL | \ 422 CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_CTRL | \
419 CPU_FTR_ALTIVEC_COMP | CPU_FTR_MMCRA | CPU_FTR_SMT | \ 423 CPU_FTR_ALTIVEC_COMP | CPU_FTR_MMCRA | CPU_FTR_SMT | \
diff --git a/arch/powerpc/kernel/entry_64.S b/arch/powerpc/kernel/entry_64.S
index 42e9d908914a..4d5fa12ca6e8 100644
--- a/arch/powerpc/kernel/entry_64.S
+++ b/arch/powerpc/kernel/entry_64.S
@@ -202,7 +202,9 @@ syscall_exit:
202 bge- syscall_error 202 bge- syscall_error
203syscall_error_cont: 203syscall_error_cont:
204 ld r7,_NIP(r1) 204 ld r7,_NIP(r1)
205BEGIN_FTR_SECTION
205 stdcx. r0,0,r1 /* to clear the reservation */ 206 stdcx. r0,0,r1 /* to clear the reservation */
207END_FTR_SECTION_IFCLR(CPU_FTR_STCX_CHECKS_ADDRESS)
206 andi. r6,r8,MSR_PR 208 andi. r6,r8,MSR_PR
207 ld r4,_LINK(r1) 209 ld r4,_LINK(r1)
208 /* 210 /*
@@ -419,6 +421,17 @@ END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
419 sync 421 sync
420#endif /* CONFIG_SMP */ 422#endif /* CONFIG_SMP */
421 423
424 /*
425 * If we optimise away the clear of the reservation in system
426 * calls because we know the CPU tracks the address of the
427 * reservation, then we need to clear it here to cover the
428 * case that the kernel context switch path has no larx
429 * instructions.
430 */
431BEGIN_FTR_SECTION
432 ldarx r6,0,r1
433END_FTR_SECTION_IFSET(CPU_FTR_STCX_CHECKS_ADDRESS)
434
422 addi r6,r4,-THREAD /* Convert THREAD to 'current' */ 435 addi r6,r4,-THREAD /* Convert THREAD to 'current' */
423 std r6,PACACURRENT(r13) /* Set new 'current' */ 436 std r6,PACACURRENT(r13) /* Set new 'current' */
424 437
@@ -576,7 +589,16 @@ ALT_FW_FTR_SECTION_END_IFCLR(FW_FEATURE_ISERIES)
576 andi. r0,r3,MSR_RI 589 andi. r0,r3,MSR_RI
577 beq- unrecov_restore 590 beq- unrecov_restore
578 591
592 /*
593 * Clear the reservation. If we know the CPU tracks the address of
594 * the reservation then we can potentially save some cycles and use
595 * a larx. On POWER6 and POWER7 this is significantly faster.
596 */
597BEGIN_FTR_SECTION
579 stdcx. r0,0,r1 /* to clear the reservation */ 598 stdcx. r0,0,r1 /* to clear the reservation */
599FTR_SECTION_ELSE
600 ldarx r4,0,r1
601ALT_FTR_SECTION_END_IFCLR(CPU_FTR_STCX_CHECKS_ADDRESS)
580 602
581 /* 603 /*
582 * Clear RI before restoring r13. If we are returning to 604 * Clear RI before restoring r13. If we are returning to