diff options
| author | Linus Torvalds <torvalds@linux-foundation.org> | 2010-10-21 15:55:43 -0400 |
|---|---|---|
| committer | Linus Torvalds <torvalds@linux-foundation.org> | 2010-10-21 15:55:43 -0400 |
| commit | bc4016f48161454a9a8e5eb209b0693c6cde9f62 (patch) | |
| tree | f470f5d711e975b152eec90282f5dd30a1d5dba5 /arch/x86/kernel/tsc.c | |
| parent | 5d70f79b5ef6ea2de4f72a37b2d96e2601e40a22 (diff) | |
| parent | b7dadc38797584f6203386da1947ed5edf516646 (diff) | |
Merge branch 'sched-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip
* 'sched-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip: (29 commits)
sched: Export account_system_vtime()
sched: Call tick_check_idle before __irq_enter
sched: Remove irq time from available CPU power
sched: Do not account irq time to current task
x86: Add IRQ_TIME_ACCOUNTING
sched: Add IRQ_TIME_ACCOUNTING, finer accounting of irq time
sched: Add a PF flag for ksoftirqd identification
sched: Consolidate account_system_vtime extern declaration
sched: Fix softirq time accounting
sched: Drop group_capacity to 1 only if local group has extra capacity
sched: Force balancing on newidle balance if local group has capacity
sched: Set group_imb only a task can be pulled from the busiest cpu
sched: Do not consider SCHED_IDLE tasks to be cache hot
sched: Drop all load weight manipulation for RT tasks
sched: Create special class for stop/migrate work
sched: Unindent labels
sched: Comment updates: fix default latency and granularity numbers
tracing/sched: Add sched_pi_setprio tracepoint
sched: Give CPU bound RT tasks preference
sched: Try not to migrate higher priority RT tasks
...
Diffstat (limited to 'arch/x86/kernel/tsc.c')
| -rw-r--r-- | arch/x86/kernel/tsc.c | 8 |
1 files changed, 8 insertions, 0 deletions
diff --git a/arch/x86/kernel/tsc.c b/arch/x86/kernel/tsc.c index 26a863a9c2a8..a1c2cd768538 100644 --- a/arch/x86/kernel/tsc.c +++ b/arch/x86/kernel/tsc.c | |||
| @@ -104,10 +104,14 @@ int __init notsc_setup(char *str) | |||
| 104 | 104 | ||
| 105 | __setup("notsc", notsc_setup); | 105 | __setup("notsc", notsc_setup); |
| 106 | 106 | ||
| 107 | static int no_sched_irq_time; | ||
| 108 | |||
| 107 | static int __init tsc_setup(char *str) | 109 | static int __init tsc_setup(char *str) |
| 108 | { | 110 | { |
| 109 | if (!strcmp(str, "reliable")) | 111 | if (!strcmp(str, "reliable")) |
| 110 | tsc_clocksource_reliable = 1; | 112 | tsc_clocksource_reliable = 1; |
| 113 | if (!strncmp(str, "noirqtime", 9)) | ||
| 114 | no_sched_irq_time = 1; | ||
| 111 | return 1; | 115 | return 1; |
| 112 | } | 116 | } |
| 113 | 117 | ||
| @@ -801,6 +805,7 @@ void mark_tsc_unstable(char *reason) | |||
| 801 | if (!tsc_unstable) { | 805 | if (!tsc_unstable) { |
| 802 | tsc_unstable = 1; | 806 | tsc_unstable = 1; |
| 803 | sched_clock_stable = 0; | 807 | sched_clock_stable = 0; |
| 808 | disable_sched_clock_irqtime(); | ||
| 804 | printk(KERN_INFO "Marking TSC unstable due to %s\n", reason); | 809 | printk(KERN_INFO "Marking TSC unstable due to %s\n", reason); |
| 805 | /* Change only the rating, when not registered */ | 810 | /* Change only the rating, when not registered */ |
| 806 | if (clocksource_tsc.mult) | 811 | if (clocksource_tsc.mult) |
| @@ -987,6 +992,9 @@ void __init tsc_init(void) | |||
| 987 | /* now allow native_sched_clock() to use rdtsc */ | 992 | /* now allow native_sched_clock() to use rdtsc */ |
| 988 | tsc_disabled = 0; | 993 | tsc_disabled = 0; |
| 989 | 994 | ||
| 995 | if (!no_sched_irq_time) | ||
| 996 | enable_sched_clock_irqtime(); | ||
| 997 | |||
| 990 | lpj = ((u64)tsc_khz * 1000); | 998 | lpj = ((u64)tsc_khz * 1000); |
| 991 | do_div(lpj, HZ); | 999 | do_div(lpj, HZ); |
| 992 | lpj_fine = lpj; | 1000 | lpj_fine = lpj; |
