aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2010-08-25 11:40:56 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2010-08-25 11:40:56 -0400
commit5e686019df425a4fd8003ce7f6eaccbe537331d8 (patch)
treecc78762bc0fb471562ca1b5f745834a983020623
parente09b4e9a8d15dce04bedf1b860abeec00de31aad (diff)
parentcd7240c0b900eb6d690ccee088a6c9b46dae815a (diff)
Merge branch 'sched-fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip
* 'sched-fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip: x86, tsc, sched: Recompute cyc2ns_offset's during resume from sleep states sched: Fix rq->clock synchronization when migrating tasks
-rw-r--r--arch/x86/include/asm/tsc.h2
-rw-r--r--arch/x86/kernel/tsc.c38
-rw-r--r--arch/x86/power/cpu.c2
-rw-r--r--kernel/sched_fair.c2
4 files changed, 44 insertions, 0 deletions
diff --git a/arch/x86/include/asm/tsc.h b/arch/x86/include/asm/tsc.h
index c0427295e8f5..1ca132fc0d03 100644
--- a/arch/x86/include/asm/tsc.h
+++ b/arch/x86/include/asm/tsc.h
@@ -59,5 +59,7 @@ extern void check_tsc_sync_source(int cpu);
59extern void check_tsc_sync_target(void); 59extern void check_tsc_sync_target(void);
60 60
61extern int notsc_setup(char *); 61extern int notsc_setup(char *);
62extern void save_sched_clock_state(void);
63extern void restore_sched_clock_state(void);
62 64
63#endif /* _ASM_X86_TSC_H */ 65#endif /* _ASM_X86_TSC_H */
diff --git a/arch/x86/kernel/tsc.c b/arch/x86/kernel/tsc.c
index ce8e50239332..d632934cb638 100644
--- a/arch/x86/kernel/tsc.c
+++ b/arch/x86/kernel/tsc.c
@@ -626,6 +626,44 @@ static void set_cyc2ns_scale(unsigned long cpu_khz, int cpu)
626 local_irq_restore(flags); 626 local_irq_restore(flags);
627} 627}
628 628
629static unsigned long long cyc2ns_suspend;
630
631void save_sched_clock_state(void)
632{
633 if (!sched_clock_stable)
634 return;
635
636 cyc2ns_suspend = sched_clock();
637}
638
639/*
640 * Even on processors with invariant TSC, TSC gets reset in some the
641 * ACPI system sleep states. And in some systems BIOS seem to reinit TSC to
642 * arbitrary value (still sync'd across cpu's) during resume from such sleep
643 * states. To cope up with this, recompute the cyc2ns_offset for each cpu so
644 * that sched_clock() continues from the point where it was left off during
645 * suspend.
646 */
647void restore_sched_clock_state(void)
648{
649 unsigned long long offset;
650 unsigned long flags;
651 int cpu;
652
653 if (!sched_clock_stable)
654 return;
655
656 local_irq_save(flags);
657
658 get_cpu_var(cyc2ns_offset) = 0;
659 offset = cyc2ns_suspend - sched_clock();
660
661 for_each_possible_cpu(cpu)
662 per_cpu(cyc2ns_offset, cpu) = offset;
663
664 local_irq_restore(flags);
665}
666
629#ifdef CONFIG_CPU_FREQ 667#ifdef CONFIG_CPU_FREQ
630 668
631/* Frequency scaling support. Adjust the TSC based timer when the cpu frequency 669/* Frequency scaling support. Adjust the TSC based timer when the cpu frequency
diff --git a/arch/x86/power/cpu.c b/arch/x86/power/cpu.c
index e7e8c5f54956..87bb35e34ef1 100644
--- a/arch/x86/power/cpu.c
+++ b/arch/x86/power/cpu.c
@@ -113,6 +113,7 @@ static void __save_processor_state(struct saved_context *ctxt)
113void save_processor_state(void) 113void save_processor_state(void)
114{ 114{
115 __save_processor_state(&saved_context); 115 __save_processor_state(&saved_context);
116 save_sched_clock_state();
116} 117}
117#ifdef CONFIG_X86_32 118#ifdef CONFIG_X86_32
118EXPORT_SYMBOL(save_processor_state); 119EXPORT_SYMBOL(save_processor_state);
@@ -229,6 +230,7 @@ static void __restore_processor_state(struct saved_context *ctxt)
229void restore_processor_state(void) 230void restore_processor_state(void)
230{ 231{
231 __restore_processor_state(&saved_context); 232 __restore_processor_state(&saved_context);
233 restore_sched_clock_state();
232} 234}
233#ifdef CONFIG_X86_32 235#ifdef CONFIG_X86_32
234EXPORT_SYMBOL(restore_processor_state); 236EXPORT_SYMBOL(restore_processor_state);
diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c
index 806d1b227a21..ab661ebc4895 100644
--- a/kernel/sched_fair.c
+++ b/kernel/sched_fair.c
@@ -3752,6 +3752,8 @@ static void task_fork_fair(struct task_struct *p)
3752 3752
3753 raw_spin_lock_irqsave(&rq->lock, flags); 3753 raw_spin_lock_irqsave(&rq->lock, flags);
3754 3754
3755 update_rq_clock(rq);
3756
3755 if (unlikely(task_cpu(p) != this_cpu)) 3757 if (unlikely(task_cpu(p) != this_cpu))
3756 __set_task_cpu(p, this_cpu); 3758 __set_task_cpu(p, this_cpu);
3757 3759