aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorTorben Hohn <torbenh@gmx.de>2011-01-27 09:59:56 -0500
committerThomas Gleixner <tglx@linutronix.de>2011-01-31 08:55:45 -0500
commit1aabd67d2e97e6affdf5a7c65f442ac91ace3f85 (patch)
treeddebf04e61ec7c436b2b9bb6a55bf58b0c6e75e4
parentdaad8b581e7f5e21a2f79e49d57d4f6a73b26510 (diff)
ia64: Switch do_timer() to xtime_update()
local_cpu_data->itm_next = new_itm; does not need to be protected by xtime_lock. xtime_update() takes the lock itself. Signed-off-by: Torben Hohn <torbenh@gmx.de> Cc: Fenghua Yu <fenghua.yu@intel.com> Cc: Tony Luck <tony.luck@intel.com> Cc: Peter Zijlstra <peterz@infradead.org> Cc: johnstul@us.ibm.com Cc: hch@infradead.org Cc: yong.zhang0@gmail.com LKML-Reference: <20110127145956.23248.49107.stgit@localhost> Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
-rw-r--r--arch/ia64/kernel/time.c19
-rw-r--r--arch/ia64/xen/time.c13
2 files changed, 10 insertions, 22 deletions
diff --git a/arch/ia64/kernel/time.c b/arch/ia64/kernel/time.c
index 9702fa92489e..156ad803d5b7 100644
--- a/arch/ia64/kernel/time.c
+++ b/arch/ia64/kernel/time.c
@@ -190,19 +190,10 @@ timer_interrupt (int irq, void *dev_id)
190 190
191 new_itm += local_cpu_data->itm_delta; 191 new_itm += local_cpu_data->itm_delta;
192 192
193 if (smp_processor_id() == time_keeper_id) { 193 if (smp_processor_id() == time_keeper_id)
194 /* 194 xtime_update(1);
195 * Here we are in the timer irq handler. We have irqs locally 195
196 * disabled, but we don't know if the timer_bh is running on 196 local_cpu_data->itm_next = new_itm;
197 * another CPU. We need to avoid to SMP race by acquiring the
198 * xtime_lock.
199 */
200 write_seqlock(&xtime_lock);
201 do_timer(1);
202 local_cpu_data->itm_next = new_itm;
203 write_sequnlock(&xtime_lock);
204 } else
205 local_cpu_data->itm_next = new_itm;
206 197
207 if (time_after(new_itm, ia64_get_itc())) 198 if (time_after(new_itm, ia64_get_itc()))
208 break; 199 break;
@@ -222,7 +213,7 @@ skip_process_time_accounting:
222 * comfort, we increase the safety margin by 213 * comfort, we increase the safety margin by
223 * intentionally dropping the next tick(s). We do NOT 214 * intentionally dropping the next tick(s). We do NOT
224 * update itm.next because that would force us to call 215 * update itm.next because that would force us to call
225 * do_timer() which in turn would let our clock run 216 * xtime_update() which in turn would let our clock run
226 * too fast (with the potentially devastating effect 217 * too fast (with the potentially devastating effect
227 * of losing monotony of time). 218 * of losing monotony of time).
228 */ 219 */
diff --git a/arch/ia64/xen/time.c b/arch/ia64/xen/time.c
index c1c544513e8d..1f8244a78bee 100644
--- a/arch/ia64/xen/time.c
+++ b/arch/ia64/xen/time.c
@@ -139,14 +139,11 @@ consider_steal_time(unsigned long new_itm)
139 run_posix_cpu_timers(p); 139 run_posix_cpu_timers(p);
140 delta_itm += local_cpu_data->itm_delta * (stolen + blocked); 140 delta_itm += local_cpu_data->itm_delta * (stolen + blocked);
141 141
142 if (cpu == time_keeper_id) { 142 if (cpu == time_keeper_id)
143 write_seqlock(&xtime_lock); 143 xtime_update(stolen + blocked);
144 do_timer(stolen + blocked); 144
145 local_cpu_data->itm_next = delta_itm + new_itm; 145 local_cpu_data->itm_next = delta_itm + new_itm;
146 write_sequnlock(&xtime_lock); 146
147 } else {
148 local_cpu_data->itm_next = delta_itm + new_itm;
149 }
150 per_cpu(xen_stolen_time, cpu) += NS_PER_TICK * stolen; 147 per_cpu(xen_stolen_time, cpu) += NS_PER_TICK * stolen;
151 per_cpu(xen_blocked_time, cpu) += NS_PER_TICK * blocked; 148 per_cpu(xen_blocked_time, cpu) += NS_PER_TICK * blocked;
152 } 149 }