aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc
diff options
context:
space:
mode:
authorwill schmidt <will_schmidt@vnet.ibm.com>2007-05-11 09:34:16 -0400
committerPaul Mackerras <paulus@samba.org>2007-05-11 21:32:47 -0400
commite147ec8f18082efb700763bed4fe24f73ca50a2c (patch)
tree58bfbafbffe4c9d2e993ed66b51472fcb82b2798 /arch/powerpc
parent435e0b2b165bcac86eeddf675383070f60587cbb (diff)
[POWERPC] Simplify smp_space_timers
Greatly simplify the function smp_space_timers. The stolen time calculation (per comment within the code) doesn't need the half-jiffy stagger any more. There isn't an issue with bouncing off global locks, so we really shouldn't need any sort of staggering at all. However, the last_jiffy value still needs to be set. This removes the extra stagger logic, and just sets the values. This change should benefit applications that rely on barrier synchronization, and will help cut down OS jitter. Boot tested across the board (G5,power3,power4,power5,970mp blade). Signed-off-by: Will Schmidt <will_schmidt@vnet.ibm.com> Signed-off-by: Paul Mackerras <paulus@samba.org>
Diffstat (limited to 'arch/powerpc')
-rw-r--r--arch/powerpc/kernel/time.c19
1 files changed, 2 insertions, 17 deletions
diff --git a/arch/powerpc/kernel/time.c b/arch/powerpc/kernel/time.c
index 7cedef8f5f70..2c8564d54e4d 100644
--- a/arch/powerpc/kernel/time.c
+++ b/arch/powerpc/kernel/time.c
@@ -711,30 +711,15 @@ void wakeup_decrementer(void)
711void __init smp_space_timers(unsigned int max_cpus) 711void __init smp_space_timers(unsigned int max_cpus)
712{ 712{
713 int i; 713 int i;
714 unsigned long half = tb_ticks_per_jiffy / 2;
715 unsigned long offset = tb_ticks_per_jiffy / max_cpus;
716 u64 previous_tb = per_cpu(last_jiffy, boot_cpuid); 714 u64 previous_tb = per_cpu(last_jiffy, boot_cpuid);
717 715
718 /* make sure tb > per_cpu(last_jiffy, cpu) for all cpus always */ 716 /* make sure tb > per_cpu(last_jiffy, cpu) for all cpus always */
719 previous_tb -= tb_ticks_per_jiffy; 717 previous_tb -= tb_ticks_per_jiffy;
720 /* 718
721 * The stolen time calculation for POWER5 shared-processor LPAR
722 * systems works better if the two threads' timebase interrupts
723 * are staggered by half a jiffy with respect to each other.
724 */
725 for_each_possible_cpu(i) { 719 for_each_possible_cpu(i) {
726 if (i == boot_cpuid) 720 if (i == boot_cpuid)
727 continue; 721 continue;
728 if (i == (boot_cpuid ^ 1)) 722 per_cpu(last_jiffy, i) = previous_tb;
729 per_cpu(last_jiffy, i) =
730 per_cpu(last_jiffy, boot_cpuid) - half;
731 else if (i & 1)
732 per_cpu(last_jiffy, i) =
733 per_cpu(last_jiffy, i ^ 1) + half;
734 else {
735 previous_tb += offset;
736 per_cpu(last_jiffy, i) = previous_tb;
737 }
738 } 723 }
739} 724}
740#endif 725#endif