aboutsummaryrefslogtreecommitdiffstats
path: root/arch/ia64
diff options
context:
space:
mode:
authorhawkes@sgi.com <hawkes@sgi.com>2006-02-14 13:40:17 -0500
committerTony Luck <tony.luck@intel.com>2006-02-15 16:37:04 -0500
commitdefbb2c929cbe89dc92239b303cd33d3c85e9a83 (patch)
tree85dbcfa407d4bfaecbce4f3556a73033b8f70caf /arch/ia64
parent4c2cd96696ae0896ce4bcf725b9f0eaffafeb640 (diff)
[IA64] ia64: simplify and fix udelay()
The original ia64 udelay() was simple, but flawed for platforms without synchronized ITCs: a preemption and migration to another CPU during the while-loop likely resulted in too-early termination or very, very lengthy looping. The first fix (now in 2.6.15) broke the delay loop into smaller, non-preemptible chunks, reenabling preemption between the chunks. This fix is flawed in that the total udelay is computed to be the sum of just the non-premptible while-loop pieces, i.e., not counting the time spent in the interim preemptible periods. If an interrupt or a migration occurs during one of these interim periods, then that time is invisible and only serves to lengthen the effective udelay(). This new fix backs out the current flawed fix and returns to a simple udelay(), fully preemptible and interruptible. It implements two simple alternative udelay() routines: one a default generic version that uses ia64_get_itc(), and the other an sn-specific version that uses that platform's RTC. Signed-off-by: John Hawkes <hawkes@sgi.com> Signed-off-by: Tony Luck <tony.luck@intel.com>
Diffstat (limited to 'arch/ia64')
-rw-r--r--arch/ia64/kernel/time.c39
-rw-r--r--arch/ia64/sn/kernel/sn2/timer.c19
2 files changed, 36 insertions, 22 deletions
diff --git a/arch/ia64/kernel/time.c b/arch/ia64/kernel/time.c
index a094ec49ccfa..307d01e15b2e 100644
--- a/arch/ia64/kernel/time.c
+++ b/arch/ia64/kernel/time.c
@@ -250,32 +250,27 @@ time_init (void)
250 set_normalized_timespec(&wall_to_monotonic, -xtime.tv_sec, -xtime.tv_nsec); 250 set_normalized_timespec(&wall_to_monotonic, -xtime.tv_sec, -xtime.tv_nsec);
251} 251}
252 252
253#define SMALLUSECS 100 253/*
254 254 * Generic udelay assumes that if preemption is allowed and the thread
255void 255 * migrates to another CPU, that the ITC values are synchronized across
256udelay (unsigned long usecs) 256 * all CPUs.
257 */
258static void
259ia64_itc_udelay (unsigned long usecs)
257{ 260{
258 unsigned long start; 261 unsigned long start = ia64_get_itc();
259 unsigned long cycles; 262 unsigned long end = start + usecs*local_cpu_data->cyc_per_usec;
260 unsigned long smallusecs;
261 263
262 /* 264 while (time_before(ia64_get_itc(), end))
263 * Execute the non-preemptible delay loop (because the ITC might 265 cpu_relax();
264 * not be synchronized between CPUS) in relatively short time 266}
265 * chunks, allowing preemption between the chunks.
266 */
267 while (usecs > 0) {
268 smallusecs = (usecs > SMALLUSECS) ? SMALLUSECS : usecs;
269 preempt_disable();
270 cycles = smallusecs*local_cpu_data->cyc_per_usec;
271 start = ia64_get_itc();
272 267
273 while (ia64_get_itc() - start < cycles) 268void (*ia64_udelay)(unsigned long usecs) = &ia64_itc_udelay;
274 cpu_relax();
275 269
276 preempt_enable(); 270void
277 usecs -= smallusecs; 271udelay (unsigned long usecs)
278 } 272{
273 (*ia64_udelay)(usecs);
279} 274}
280EXPORT_SYMBOL(udelay); 275EXPORT_SYMBOL(udelay);
281 276
diff --git a/arch/ia64/sn/kernel/sn2/timer.c b/arch/ia64/sn/kernel/sn2/timer.c
index deb9baf4d473..56a88b6df4b4 100644
--- a/arch/ia64/sn/kernel/sn2/timer.c
+++ b/arch/ia64/sn/kernel/sn2/timer.c
@@ -14,6 +14,7 @@
14 14
15#include <asm/hw_irq.h> 15#include <asm/hw_irq.h>
16#include <asm/system.h> 16#include <asm/system.h>
17#include <asm/timex.h>
17 18
18#include <asm/sn/leds.h> 19#include <asm/sn/leds.h>
19#include <asm/sn/shub_mmr.h> 20#include <asm/sn/shub_mmr.h>
@@ -28,9 +29,27 @@ static struct time_interpolator sn2_interpolator = {
28 .source = TIME_SOURCE_MMIO64 29 .source = TIME_SOURCE_MMIO64
29}; 30};
30 31
32/*
33 * sn udelay uses the RTC instead of the ITC because the ITC is not
34 * synchronized across all CPUs, and the thread may migrate to another CPU
35 * if preemption is enabled.
36 */
37static void
38ia64_sn_udelay (unsigned long usecs)
39{
40 unsigned long start = rtc_time();
41 unsigned long end = start +
42 usecs * sn_rtc_cycles_per_second / 1000000;
43
44 while (time_before((unsigned long)rtc_time(), end))
45 cpu_relax();
46}
47
31void __init sn_timer_init(void) 48void __init sn_timer_init(void)
32{ 49{
33 sn2_interpolator.frequency = sn_rtc_cycles_per_second; 50 sn2_interpolator.frequency = sn_rtc_cycles_per_second;
34 sn2_interpolator.addr = RTC_COUNTER_ADDR; 51 sn2_interpolator.addr = RTC_COUNTER_ADDR;
35 register_time_interpolator(&sn2_interpolator); 52 register_time_interpolator(&sn2_interpolator);
53
54 ia64_udelay = &ia64_sn_udelay;
36} 55}