aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc/include
diff options
context:
space:
mode:
authorMichael Neuling <mikey@neuling.org>2010-06-08 00:57:02 -0400
committerIngo Molnar <mingo@elte.hu>2010-06-09 05:13:14 -0400
commit76cbd8a8f8b0dddbff89a6708bd5bd13c0d21a00 (patch)
treea374c4cd3237835a54f883492dea9355eb3ccc16 /arch/powerpc/include
parent532cb4c401e225b084c14d6bd6a2f8ee561de2f1 (diff)
powerpc: Enable asymmetric SMT scheduling on POWER7
The POWER7 core has dynamic SMT mode switching which is controlled by the hypervisor. There are 3 SMT modes: SMT1 uses thread 0 SMT2 uses threads 0 & 1 SMT4 uses threads 0, 1, 2 & 3 When in any particular SMT mode, all threads have the same performance as each other (ie. at any moment in time, all threads perform the same). The SMT mode switching works such that when linux has threads 2 & 3 idle and 0 & 1 active, it will cede (H_CEDE hypercall) threads 2 and 3 in the idle loop and the hypervisor will automatically switch to SMT2 for that core (independent of other cores). The opposite is not true, so if threads 0 & 1 are idle and 2 & 3 are active, we will stay in SMT4 mode. Similarly if thread 0 is active and threads 1, 2 & 3 are idle, we'll go into SMT1 mode. If we can get the core into a lower SMT mode (SMT1 is best), the threads will perform better (since they share less core resources). Hence when we have idle threads, we want them to be the higher ones. This adds a feature bit for asymmetric packing to powerpc and then enables it on POWER7. Signed-off-by: Michael Neuling <mikey@neuling.org> Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> Acked-by: Benjamin Herrenschmidt <benh@kernel.crashing.org> Cc: Paul Mackerras <paulus@samba.org> Cc: linuxppc-dev@ozlabs.org LKML-Reference: <20100608045702.31FB5CC8C7@localhost.localdomain> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'arch/powerpc/include')
-rw-r--r--arch/powerpc/include/asm/cputable.h3
1 files changed, 2 insertions, 1 deletions
diff --git a/arch/powerpc/include/asm/cputable.h b/arch/powerpc/include/asm/cputable.h
index b0b21134f61a..4b611ca1a768 100644
--- a/arch/powerpc/include/asm/cputable.h
+++ b/arch/powerpc/include/asm/cputable.h
@@ -197,6 +197,7 @@ extern const char *powerpc_base_platform;
197#define CPU_FTR_SAO LONG_ASM_CONST(0x0020000000000000) 197#define CPU_FTR_SAO LONG_ASM_CONST(0x0020000000000000)
198#define CPU_FTR_CP_USE_DCBTZ LONG_ASM_CONST(0x0040000000000000) 198#define CPU_FTR_CP_USE_DCBTZ LONG_ASM_CONST(0x0040000000000000)
199#define CPU_FTR_UNALIGNED_LD_STD LONG_ASM_CONST(0x0080000000000000) 199#define CPU_FTR_UNALIGNED_LD_STD LONG_ASM_CONST(0x0080000000000000)
200#define CPU_FTR_ASYM_SMT LONG_ASM_CONST(0x0100000000000000)
200 201
201#ifndef __ASSEMBLY__ 202#ifndef __ASSEMBLY__
202 203
@@ -412,7 +413,7 @@ extern const char *powerpc_base_platform;
412 CPU_FTR_MMCRA | CPU_FTR_SMT | \ 413 CPU_FTR_MMCRA | CPU_FTR_SMT | \
413 CPU_FTR_COHERENT_ICACHE | CPU_FTR_LOCKLESS_TLBIE | \ 414 CPU_FTR_COHERENT_ICACHE | CPU_FTR_LOCKLESS_TLBIE | \
414 CPU_FTR_PURR | CPU_FTR_SPURR | CPU_FTR_REAL_LE | \ 415 CPU_FTR_PURR | CPU_FTR_SPURR | CPU_FTR_REAL_LE | \
415 CPU_FTR_DSCR | CPU_FTR_SAO) 416 CPU_FTR_DSCR | CPU_FTR_SAO | CPU_FTR_ASYM_SMT)
416#define CPU_FTRS_CELL (CPU_FTR_USE_TB | CPU_FTR_LWSYNC | \ 417#define CPU_FTRS_CELL (CPU_FTR_USE_TB | CPU_FTR_LWSYNC | \
417 CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_CTRL | \ 418 CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_CTRL | \
418 CPU_FTR_ALTIVEC_COMP | CPU_FTR_MMCRA | CPU_FTR_SMT | \ 419 CPU_FTR_ALTIVEC_COMP | CPU_FTR_MMCRA | CPU_FTR_SMT | \