diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2012-03-20 13:32:09 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2012-03-20 13:32:09 -0400 |
commit | 161f7a7161191ab9c2e97f787829ef8dd2b95771 (patch) | |
tree | 9776d3f963c7f0d247b7fb324eab4811a1302f67 /arch/x86/kernel | |
parent | 2ba68940c893c8f0bfc8573c041254251bb6aeab (diff) | |
parent | a078c6d0e6288fad6d83fb6d5edd91ddb7b6ab33 (diff) |
Merge branch 'timers-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull timer changes for v3.4 from Ingo Molnar
* 'timers-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (32 commits)
ntp: Fix integer overflow when setting time
math: Introduce div64_long
cs5535-clockevt: Allow the MFGPT IRQ to be shared
cs5535-clockevt: Don't ignore MFGPT on SMP-capable kernels
x86/time: Eliminate unused irq0_irqs counter
clocksource: scx200_hrt: Fix the build
x86/tsc: Reduce the TSC sync check time for core-siblings
timer: Fix bad idle check on irq entry
nohz: Remove ts->Einidle checks before restarting the tick
nohz: Remove update_ts_time_stat from tick_nohz_start_idle
clockevents: Leave the broadcast device in shutdown mode when not needed
clocksource: Load the ACPI PM clocksource asynchronously
clocksource: scx200_hrt: Convert scx200 to use clocksource_register_hz
clocksource: Get rid of clocksource_calc_mult_shift()
clocksource: dbx500: convert to clocksource_register_hz()
clocksource: scx200_hrt: use pr_<level> instead of printk
time: Move common updates to a function
time: Reorder so the hot data is together
time: Remove most of xtime_lock usage in timekeeping.c
ntp: Add ntp_lock to replace xtime_locking
...
Diffstat (limited to 'arch/x86/kernel')
-rw-r--r-- | arch/x86/kernel/time.c | 3 | ||||
-rw-r--r-- | arch/x86/kernel/tsc_sync.c | 29 |
2 files changed, 24 insertions, 8 deletions
diff --git a/arch/x86/kernel/time.c b/arch/x86/kernel/time.c index dd5fbf4101fc..c6eba2b42673 100644 --- a/arch/x86/kernel/time.c +++ b/arch/x86/kernel/time.c | |||
@@ -57,9 +57,6 @@ EXPORT_SYMBOL(profile_pc); | |||
57 | */ | 57 | */ |
58 | static irqreturn_t timer_interrupt(int irq, void *dev_id) | 58 | static irqreturn_t timer_interrupt(int irq, void *dev_id) |
59 | { | 59 | { |
60 | /* Keep nmi watchdog up to date */ | ||
61 | inc_irq_stat(irq0_irqs); | ||
62 | |||
63 | global_clock_event->event_handler(global_clock_event); | 60 | global_clock_event->event_handler(global_clock_event); |
64 | 61 | ||
65 | /* MCA bus quirk: Acknowledge irq0 by setting bit 7 in port 0x61 */ | 62 | /* MCA bus quirk: Acknowledge irq0 by setting bit 7 in port 0x61 */ |
diff --git a/arch/x86/kernel/tsc_sync.c b/arch/x86/kernel/tsc_sync.c index 9eba29b46cb7..fc25e60a5884 100644 --- a/arch/x86/kernel/tsc_sync.c +++ b/arch/x86/kernel/tsc_sync.c | |||
@@ -42,7 +42,7 @@ static __cpuinitdata int nr_warps; | |||
42 | /* | 42 | /* |
43 | * TSC-warp measurement loop running on both CPUs: | 43 | * TSC-warp measurement loop running on both CPUs: |
44 | */ | 44 | */ |
45 | static __cpuinit void check_tsc_warp(void) | 45 | static __cpuinit void check_tsc_warp(unsigned int timeout) |
46 | { | 46 | { |
47 | cycles_t start, now, prev, end; | 47 | cycles_t start, now, prev, end; |
48 | int i; | 48 | int i; |
@@ -51,9 +51,9 @@ static __cpuinit void check_tsc_warp(void) | |||
51 | start = get_cycles(); | 51 | start = get_cycles(); |
52 | rdtsc_barrier(); | 52 | rdtsc_barrier(); |
53 | /* | 53 | /* |
54 | * The measurement runs for 20 msecs: | 54 | * The measurement runs for 'timeout' msecs: |
55 | */ | 55 | */ |
56 | end = start + tsc_khz * 20ULL; | 56 | end = start + (cycles_t) tsc_khz * timeout; |
57 | now = start; | 57 | now = start; |
58 | 58 | ||
59 | for (i = 0; ; i++) { | 59 | for (i = 0; ; i++) { |
@@ -99,6 +99,25 @@ static __cpuinit void check_tsc_warp(void) | |||
99 | } | 99 | } |
100 | 100 | ||
101 | /* | 101 | /* |
102 | * If the target CPU coming online doesn't have any of its core-siblings | ||
103 | * online, a timeout of 20msec will be used for the TSC-warp measurement | ||
104 | * loop. Otherwise a smaller timeout of 2msec will be used, as we have some | ||
105 | * information about this socket already (and this information grows as we | ||
106 | * have more and more logical-siblings in that socket). | ||
107 | * | ||
108 | * Ideally we should be able to skip the TSC sync check on the other | ||
109 | * core-siblings, if the first logical CPU in a socket passed the sync test. | ||
110 | * But as the TSC is per-logical CPU and can potentially be modified wrongly | ||
111 | * by the bios, TSC sync test for smaller duration should be able | ||
112 | * to catch such errors. Also this will catch the condition where all the | ||
113 | * cores in the socket doesn't get reset at the same time. | ||
114 | */ | ||
115 | static inline unsigned int loop_timeout(int cpu) | ||
116 | { | ||
117 | return (cpumask_weight(cpu_core_mask(cpu)) > 1) ? 2 : 20; | ||
118 | } | ||
119 | |||
120 | /* | ||
102 | * Source CPU calls into this - it waits for the freshly booted | 121 | * Source CPU calls into this - it waits for the freshly booted |
103 | * target CPU to arrive and then starts the measurement: | 122 | * target CPU to arrive and then starts the measurement: |
104 | */ | 123 | */ |
@@ -135,7 +154,7 @@ void __cpuinit check_tsc_sync_source(int cpu) | |||
135 | */ | 154 | */ |
136 | atomic_inc(&start_count); | 155 | atomic_inc(&start_count); |
137 | 156 | ||
138 | check_tsc_warp(); | 157 | check_tsc_warp(loop_timeout(cpu)); |
139 | 158 | ||
140 | while (atomic_read(&stop_count) != cpus-1) | 159 | while (atomic_read(&stop_count) != cpus-1) |
141 | cpu_relax(); | 160 | cpu_relax(); |
@@ -183,7 +202,7 @@ void __cpuinit check_tsc_sync_target(void) | |||
183 | while (atomic_read(&start_count) != cpus) | 202 | while (atomic_read(&start_count) != cpus) |
184 | cpu_relax(); | 203 | cpu_relax(); |
185 | 204 | ||
186 | check_tsc_warp(); | 205 | check_tsc_warp(loop_timeout(smp_processor_id())); |
187 | 206 | ||
188 | /* | 207 | /* |
189 | * Ok, we are done: | 208 | * Ok, we are done: |