diff options
author | Bob Picco <bob.picco@hp.com> | 2007-07-18 18:51:28 -0400 |
---|---|---|
committer | Tony Luck <tony.luck@intel.com> | 2007-07-20 14:23:02 -0400 |
commit | 1f564ad6d4182859612cbae452122e5eb2d62a76 (patch) | |
tree | ef98ce12ed64853437c35a81e29f0d1c09a4393b | |
parent | 0aa366f351d044703e25c8425e508170e80d83b1 (diff) |
[IA64] remove time interpolator
Remove time_interpolator code (This is generic code, but
only user was ia64. It has been superseded by the
CONFIG_GENERIC_TIME code).
Signed-off-by: Bob Picco <bob.picco@hp.com>
Signed-off-by: John Stultz <johnstul@us.ibm.com>
Signed-off-by: Peter Keilty <peter.keilty@hp.com>
Signed-off-by: Tony Luck <tony.luck@intel.com>
-rw-r--r-- | Documentation/time_interpolators.txt | 41 | ||||
-rw-r--r-- | include/linux/timex.h | 60 | ||||
-rw-r--r-- | kernel/time.c | 88 | ||||
-rw-r--r-- | kernel/time/ntp.c | 10 | ||||
-rw-r--r-- | kernel/time/timekeeping.c | 4 | ||||
-rw-r--r-- | kernel/timer.c | 188 |
6 files changed, 0 insertions, 391 deletions
diff --git a/Documentation/time_interpolators.txt b/Documentation/time_interpolators.txt deleted file mode 100644 index e3b60854fbc2..000000000000 --- a/Documentation/time_interpolators.txt +++ /dev/null | |||
@@ -1,41 +0,0 @@ | |||
1 | Time Interpolators | ||
2 | ------------------ | ||
3 | |||
4 | Time interpolators are a base of time calculation between timer ticks and | ||
5 | allow an accurate determination of time down to the accuracy of the time | ||
6 | source in nanoseconds. | ||
7 | |||
8 | The architecture specific code typically provides gettimeofday and | ||
9 | settimeofday under Linux. The time interpolator provides both if an arch | ||
10 | defines CONFIG_TIME_INTERPOLATION. The arch still must set up timer tick | ||
11 | operations and call the necessary functions to advance the clock. | ||
12 | |||
13 | With the time interpolator a standardized interface exists for time | ||
14 | interpolation between ticks. The provided logic is highly scalable | ||
15 | and has been tested in SMP situations of up to 512 CPUs. | ||
16 | |||
17 | If CONFIG_TIME_INTERPOLATION is defined then the architecture specific code | ||
18 | (or the device drivers - like HPET) may register time interpolators. | ||
19 | These are typically defined in the following way: | ||
20 | |||
21 | static struct time_interpolator my_interpolator { | ||
22 | .frequency = MY_FREQUENCY, | ||
23 | .source = TIME_SOURCE_MMIO32, | ||
24 | .shift = 8, /* scaling for higher accuracy */ | ||
25 | .drift = -1, /* Unknown drift */ | ||
26 | .jitter = 0 /* time source is stable */ | ||
27 | }; | ||
28 | |||
29 | void time_init(void) | ||
30 | { | ||
31 | .... | ||
32 | /* Initialization of the timer *. | ||
33 | my_interpolator.address = &my_timer; | ||
34 | register_time_interpolator(&my_interpolator); | ||
35 | .... | ||
36 | } | ||
37 | |||
38 | For more details see include/linux/timex.h and kernel/timer.c. | ||
39 | |||
40 | Christoph Lameter <christoph@lameter.com>, October 31, 2004 | ||
41 | |||
diff --git a/include/linux/timex.h b/include/linux/timex.h index da929dbbea2a..37ac3ff90faf 100644 --- a/include/linux/timex.h +++ b/include/linux/timex.h | |||
@@ -224,66 +224,6 @@ static inline int ntp_synced(void) | |||
224 | __x < 0 ? -(-__x >> __s) : __x >> __s; \ | 224 | __x < 0 ? -(-__x >> __s) : __x >> __s; \ |
225 | }) | 225 | }) |
226 | 226 | ||
227 | |||
228 | #ifdef CONFIG_TIME_INTERPOLATION | ||
229 | |||
230 | #define TIME_SOURCE_CPU 0 | ||
231 | #define TIME_SOURCE_MMIO64 1 | ||
232 | #define TIME_SOURCE_MMIO32 2 | ||
233 | #define TIME_SOURCE_FUNCTION 3 | ||
234 | |||
235 | /* For proper operations time_interpolator clocks must run slightly slower | ||
236 | * than the standard clock since the interpolator may only correct by having | ||
237 | * time jump forward during a tick. A slower clock is usually a side effect | ||
238 | * of the integer divide of the nanoseconds in a second by the frequency. | ||
239 | * The accuracy of the division can be increased by specifying a shift. | ||
240 | * However, this may cause the clock not to be slow enough. | ||
241 | * The interpolator will self-tune the clock by slowing down if no | ||
242 | * resets occur or speeding up if the time jumps per analysis cycle | ||
243 | * become too high. | ||
244 | * | ||
245 | * Setting jitter compensates for a fluctuating timesource by comparing | ||
246 | * to the last value read from the timesource to insure that an earlier value | ||
247 | * is not returned by a later call. The price to pay | ||
248 | * for the compensation is that the timer routines are not as scalable anymore. | ||
249 | */ | ||
250 | |||
251 | struct time_interpolator { | ||
252 | u16 source; /* time source flags */ | ||
253 | u8 shift; /* increases accuracy of multiply by shifting. */ | ||
254 | /* Note that bits may be lost if shift is set too high */ | ||
255 | u8 jitter; /* if set compensate for fluctuations */ | ||
256 | u32 nsec_per_cyc; /* set by register_time_interpolator() */ | ||
257 | void *addr; /* address of counter or function */ | ||
258 | cycles_t mask; /* mask the valid bits of the counter */ | ||
259 | unsigned long offset; /* nsec offset at last update of interpolator */ | ||
260 | u64 last_counter; /* counter value in units of the counter at last update */ | ||
261 | cycles_t last_cycle; /* Last timer value if TIME_SOURCE_JITTER is set */ | ||
262 | u64 frequency; /* frequency in counts/second */ | ||
263 | long drift; /* drift in parts-per-million (or -1) */ | ||
264 | unsigned long skips; /* skips forward */ | ||
265 | unsigned long ns_skipped; /* nanoseconds skipped */ | ||
266 | struct time_interpolator *next; | ||
267 | }; | ||
268 | |||
269 | extern void register_time_interpolator(struct time_interpolator *); | ||
270 | extern void unregister_time_interpolator(struct time_interpolator *); | ||
271 | extern void time_interpolator_reset(void); | ||
272 | extern unsigned long time_interpolator_get_offset(void); | ||
273 | extern void time_interpolator_update(long delta_nsec); | ||
274 | |||
275 | #else /* !CONFIG_TIME_INTERPOLATION */ | ||
276 | |||
277 | static inline void time_interpolator_reset(void) | ||
278 | { | ||
279 | } | ||
280 | |||
281 | static inline void time_interpolator_update(long delta_nsec) | ||
282 | { | ||
283 | } | ||
284 | |||
285 | #endif /* !CONFIG_TIME_INTERPOLATION */ | ||
286 | |||
287 | #define TICK_LENGTH_SHIFT 32 | 227 | #define TICK_LENGTH_SHIFT 32 |
288 | 228 | ||
289 | #ifdef CONFIG_NO_HZ | 229 | #ifdef CONFIG_NO_HZ |
diff --git a/kernel/time.c b/kernel/time.c index ffe19149d770..e325597f5bf5 100644 --- a/kernel/time.c +++ b/kernel/time.c | |||
@@ -136,7 +136,6 @@ static inline void warp_clock(void) | |||
136 | write_seqlock_irq(&xtime_lock); | 136 | write_seqlock_irq(&xtime_lock); |
137 | wall_to_monotonic.tv_sec -= sys_tz.tz_minuteswest * 60; | 137 | wall_to_monotonic.tv_sec -= sys_tz.tz_minuteswest * 60; |
138 | xtime.tv_sec += sys_tz.tz_minuteswest * 60; | 138 | xtime.tv_sec += sys_tz.tz_minuteswest * 60; |
139 | time_interpolator_reset(); | ||
140 | write_sequnlock_irq(&xtime_lock); | 139 | write_sequnlock_irq(&xtime_lock); |
141 | clock_was_set(); | 140 | clock_was_set(); |
142 | } | 141 | } |
@@ -309,92 +308,6 @@ struct timespec timespec_trunc(struct timespec t, unsigned gran) | |||
309 | } | 308 | } |
310 | EXPORT_SYMBOL(timespec_trunc); | 309 | EXPORT_SYMBOL(timespec_trunc); |
311 | 310 | ||
312 | #ifdef CONFIG_TIME_INTERPOLATION | ||
313 | void getnstimeofday (struct timespec *tv) | ||
314 | { | ||
315 | unsigned long seq,sec,nsec; | ||
316 | |||
317 | do { | ||
318 | seq = read_seqbegin(&xtime_lock); | ||
319 | sec = xtime.tv_sec; | ||
320 | nsec = xtime.tv_nsec+time_interpolator_get_offset(); | ||
321 | } while (unlikely(read_seqretry(&xtime_lock, seq))); | ||
322 | |||
323 | while (unlikely(nsec >= NSEC_PER_SEC)) { | ||
324 | nsec -= NSEC_PER_SEC; | ||
325 | ++sec; | ||
326 | } | ||
327 | tv->tv_sec = sec; | ||
328 | tv->tv_nsec = nsec; | ||
329 | } | ||
330 | EXPORT_SYMBOL_GPL(getnstimeofday); | ||
331 | |||
332 | int do_settimeofday (struct timespec *tv) | ||
333 | { | ||
334 | time_t wtm_sec, sec = tv->tv_sec; | ||
335 | long wtm_nsec, nsec = tv->tv_nsec; | ||
336 | |||
337 | if ((unsigned long)tv->tv_nsec >= NSEC_PER_SEC) | ||
338 | return -EINVAL; | ||
339 | |||
340 | write_seqlock_irq(&xtime_lock); | ||
341 | { | ||
342 | wtm_sec = wall_to_monotonic.tv_sec + (xtime.tv_sec - sec); | ||
343 | wtm_nsec = wall_to_monotonic.tv_nsec + (xtime.tv_nsec - nsec); | ||
344 | |||
345 | set_normalized_timespec(&xtime, sec, nsec); | ||
346 | set_normalized_timespec(&wall_to_monotonic, wtm_sec, wtm_nsec); | ||
347 | |||
348 | time_adjust = 0; /* stop active adjtime() */ | ||
349 | time_status |= STA_UNSYNC; | ||
350 | time_maxerror = NTP_PHASE_LIMIT; | ||
351 | time_esterror = NTP_PHASE_LIMIT; | ||
352 | time_interpolator_reset(); | ||
353 | } | ||
354 | write_sequnlock_irq(&xtime_lock); | ||
355 | clock_was_set(); | ||
356 | return 0; | ||
357 | } | ||
358 | EXPORT_SYMBOL(do_settimeofday); | ||
359 | |||
360 | void do_gettimeofday (struct timeval *tv) | ||
361 | { | ||
362 | unsigned long seq, nsec, usec, sec, offset; | ||
363 | do { | ||
364 | seq = read_seqbegin(&xtime_lock); | ||
365 | offset = time_interpolator_get_offset(); | ||
366 | sec = xtime.tv_sec; | ||
367 | nsec = xtime.tv_nsec; | ||
368 | } while (unlikely(read_seqretry(&xtime_lock, seq))); | ||
369 | |||
370 | usec = (nsec + offset) / 1000; | ||
371 | |||
372 | while (unlikely(usec >= USEC_PER_SEC)) { | ||
373 | usec -= USEC_PER_SEC; | ||
374 | ++sec; | ||
375 | } | ||
376 | |||
377 | tv->tv_sec = sec; | ||
378 | tv->tv_usec = usec; | ||
379 | |||
380 | /* | ||
381 | * Make sure xtime.tv_sec [returned by sys_time()] always | ||
382 | * follows the gettimeofday() result precisely. This | ||
383 | * condition is extremely unlikely, it can hit at most | ||
384 | * once per second: | ||
385 | */ | ||
386 | if (unlikely(xtime.tv_sec != tv->tv_sec)) { | ||
387 | unsigned long flags; | ||
388 | |||
389 | write_seqlock_irqsave(&xtime_lock, flags); | ||
390 | update_wall_time(); | ||
391 | write_sequnlock_irqrestore(&xtime_lock, flags); | ||
392 | } | ||
393 | } | ||
394 | EXPORT_SYMBOL(do_gettimeofday); | ||
395 | |||
396 | #else /* CONFIG_TIME_INTERPOLATION */ | ||
397 | |||
398 | #ifndef CONFIG_GENERIC_TIME | 311 | #ifndef CONFIG_GENERIC_TIME |
399 | /* | 312 | /* |
400 | * Simulate gettimeofday using do_gettimeofday which only allows a timeval | 313 | * Simulate gettimeofday using do_gettimeofday which only allows a timeval |
@@ -410,7 +323,6 @@ void getnstimeofday(struct timespec *tv) | |||
410 | } | 323 | } |
411 | EXPORT_SYMBOL_GPL(getnstimeofday); | 324 | EXPORT_SYMBOL_GPL(getnstimeofday); |
412 | #endif | 325 | #endif |
413 | #endif /* CONFIG_TIME_INTERPOLATION */ | ||
414 | 326 | ||
415 | /* Converts Gregorian date to seconds since 1970-01-01 00:00:00. | 327 | /* Converts Gregorian date to seconds since 1970-01-01 00:00:00. |
416 | * Assumes input in normal date format, i.e. 1980-12-31 23:59:59 | 328 | * Assumes input in normal date format, i.e. 1980-12-31 23:59:59 |
diff --git a/kernel/time/ntp.c b/kernel/time/ntp.c index 438c6b723ee2..b5e352597cbb 100644 --- a/kernel/time/ntp.c +++ b/kernel/time/ntp.c | |||
@@ -116,11 +116,6 @@ void second_overflow(void) | |||
116 | if (xtime.tv_sec % 86400 == 0) { | 116 | if (xtime.tv_sec % 86400 == 0) { |
117 | xtime.tv_sec--; | 117 | xtime.tv_sec--; |
118 | wall_to_monotonic.tv_sec++; | 118 | wall_to_monotonic.tv_sec++; |
119 | /* | ||
120 | * The timer interpolator will make time change | ||
121 | * gradually instead of an immediate jump by one second | ||
122 | */ | ||
123 | time_interpolator_update(-NSEC_PER_SEC); | ||
124 | time_state = TIME_OOP; | 119 | time_state = TIME_OOP; |
125 | printk(KERN_NOTICE "Clock: inserting leap second " | 120 | printk(KERN_NOTICE "Clock: inserting leap second " |
126 | "23:59:60 UTC\n"); | 121 | "23:59:60 UTC\n"); |
@@ -130,11 +125,6 @@ void second_overflow(void) | |||
130 | if ((xtime.tv_sec + 1) % 86400 == 0) { | 125 | if ((xtime.tv_sec + 1) % 86400 == 0) { |
131 | xtime.tv_sec++; | 126 | xtime.tv_sec++; |
132 | wall_to_monotonic.tv_sec--; | 127 | wall_to_monotonic.tv_sec--; |
133 | /* | ||
134 | * Use of time interpolator for a gradual change of | ||
135 | * time | ||
136 | */ | ||
137 | time_interpolator_update(NSEC_PER_SEC); | ||
138 | time_state = TIME_WAIT; | 128 | time_state = TIME_WAIT; |
139 | printk(KERN_NOTICE "Clock: deleting leap second " | 129 | printk(KERN_NOTICE "Clock: deleting leap second " |
140 | "23:59:59 UTC\n"); | 130 | "23:59:59 UTC\n"); |
diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c index 728cedfd3cbd..027d46c906e0 100644 --- a/kernel/time/timekeeping.c +++ b/kernel/time/timekeeping.c | |||
@@ -466,10 +466,6 @@ void update_wall_time(void) | |||
466 | second_overflow(); | 466 | second_overflow(); |
467 | } | 467 | } |
468 | 468 | ||
469 | /* interpolator bits */ | ||
470 | time_interpolator_update(clock->xtime_interval | ||
471 | >> clock->shift); | ||
472 | |||
473 | /* accumulate error between NTP and clock interval */ | 469 | /* accumulate error between NTP and clock interval */ |
474 | clock->error += current_tick_length(); | 470 | clock->error += current_tick_length(); |
475 | clock->error -= clock->xtime_interval << (TICK_LENGTH_SHIFT - clock->shift); | 471 | clock->error -= clock->xtime_interval << (TICK_LENGTH_SHIFT - clock->shift); |
diff --git a/kernel/timer.c b/kernel/timer.c index b7792fb03387..dbc03ab14eed 100644 --- a/kernel/timer.c +++ b/kernel/timer.c | |||
@@ -1349,194 +1349,6 @@ void __init init_timers(void) | |||
1349 | open_softirq(TIMER_SOFTIRQ, run_timer_softirq, NULL); | 1349 | open_softirq(TIMER_SOFTIRQ, run_timer_softirq, NULL); |
1350 | } | 1350 | } |
1351 | 1351 | ||
1352 | #ifdef CONFIG_TIME_INTERPOLATION | ||
1353 | |||
1354 | struct time_interpolator *time_interpolator __read_mostly; | ||
1355 | static struct time_interpolator *time_interpolator_list __read_mostly; | ||
1356 | static DEFINE_SPINLOCK(time_interpolator_lock); | ||
1357 | |||
1358 | static inline cycles_t time_interpolator_get_cycles(unsigned int src) | ||
1359 | { | ||
1360 | unsigned long (*x)(void); | ||
1361 | |||
1362 | switch (src) | ||
1363 | { | ||
1364 | case TIME_SOURCE_FUNCTION: | ||
1365 | x = time_interpolator->addr; | ||
1366 | return x(); | ||
1367 | |||
1368 | case TIME_SOURCE_MMIO64 : | ||
1369 | return readq_relaxed((void __iomem *)time_interpolator->addr); | ||
1370 | |||
1371 | case TIME_SOURCE_MMIO32 : | ||
1372 | return readl_relaxed((void __iomem *)time_interpolator->addr); | ||
1373 | |||
1374 | default: return get_cycles(); | ||
1375 | } | ||
1376 | } | ||
1377 | |||
1378 | static inline u64 time_interpolator_get_counter(int writelock) | ||
1379 | { | ||
1380 | unsigned int src = time_interpolator->source; | ||
1381 | |||
1382 | if (time_interpolator->jitter) | ||
1383 | { | ||
1384 | cycles_t lcycle; | ||
1385 | cycles_t now; | ||
1386 | |||
1387 | do { | ||
1388 | lcycle = time_interpolator->last_cycle; | ||
1389 | now = time_interpolator_get_cycles(src); | ||
1390 | if (lcycle && time_after(lcycle, now)) | ||
1391 | return lcycle; | ||
1392 | |||
1393 | /* When holding the xtime write lock, there's no need | ||
1394 | * to add the overhead of the cmpxchg. Readers are | ||
1395 | * force to retry until the write lock is released. | ||
1396 | */ | ||
1397 | if (writelock) { | ||
1398 | time_interpolator->last_cycle = now; | ||
1399 | return now; | ||
1400 | } | ||
1401 | /* Keep track of the last timer value returned. The use of cmpxchg here | ||
1402 | * will cause contention in an SMP environment. | ||
1403 | */ | ||
1404 | } while (unlikely(cmpxchg(&time_interpolator->last_cycle, lcycle, now) != lcycle)); | ||
1405 | return now; | ||
1406 | } | ||
1407 | else | ||
1408 | return time_interpolator_get_cycles(src); | ||
1409 | } | ||
1410 | |||
1411 | void time_interpolator_reset(void) | ||
1412 | { | ||
1413 | time_interpolator->offset = 0; | ||
1414 | time_interpolator->last_counter = time_interpolator_get_counter(1); | ||
1415 | } | ||
1416 | |||
1417 | #define GET_TI_NSECS(count,i) (((((count) - i->last_counter) & (i)->mask) * (i)->nsec_per_cyc) >> (i)->shift) | ||
1418 | |||
1419 | unsigned long time_interpolator_get_offset(void) | ||
1420 | { | ||
1421 | /* If we do not have a time interpolator set up then just return zero */ | ||
1422 | if (!time_interpolator) | ||
1423 | return 0; | ||
1424 | |||
1425 | return time_interpolator->offset + | ||
1426 | GET_TI_NSECS(time_interpolator_get_counter(0), time_interpolator); | ||
1427 | } | ||
1428 | |||
1429 | #define INTERPOLATOR_ADJUST 65536 | ||
1430 | #define INTERPOLATOR_MAX_SKIP 10*INTERPOLATOR_ADJUST | ||
1431 | |||
1432 | void time_interpolator_update(long delta_nsec) | ||
1433 | { | ||
1434 | u64 counter; | ||
1435 | unsigned long offset; | ||
1436 | |||
1437 | /* If there is no time interpolator set up then do nothing */ | ||
1438 | if (!time_interpolator) | ||
1439 | return; | ||
1440 | |||
1441 | /* | ||
1442 | * The interpolator compensates for late ticks by accumulating the late | ||
1443 | * time in time_interpolator->offset. A tick earlier than expected will | ||
1444 | * lead to a reset of the offset and a corresponding jump of the clock | ||
1445 | * forward. Again this only works if the interpolator clock is running | ||
1446 | * slightly slower than the regular clock and the tuning logic insures | ||
1447 | * that. | ||
1448 | */ | ||
1449 | |||
1450 | counter = time_interpolator_get_counter(1); | ||
1451 | offset = time_interpolator->offset + | ||
1452 | GET_TI_NSECS(counter, time_interpolator); | ||
1453 | |||
1454 | if (delta_nsec < 0 || (unsigned long) delta_nsec < offset) | ||
1455 | time_interpolator->offset = offset - delta_nsec; | ||
1456 | else { | ||
1457 | time_interpolator->skips++; | ||
1458 | time_interpolator->ns_skipped += delta_nsec - offset; | ||
1459 | time_interpolator->offset = 0; | ||
1460 | } | ||
1461 | time_interpolator->last_counter = counter; | ||
1462 | |||
1463 | /* Tuning logic for time interpolator invoked every minute or so. | ||
1464 | * Decrease interpolator clock speed if no skips occurred and an offset is carried. | ||
1465 | * Increase interpolator clock speed if we skip too much time. | ||
1466 | */ | ||
1467 | if (jiffies % INTERPOLATOR_ADJUST == 0) | ||
1468 | { | ||
1469 | if (time_interpolator->skips == 0 && time_interpolator->offset > tick_nsec) | ||
1470 | time_interpolator->nsec_per_cyc--; | ||
1471 | if (time_interpolator->ns_skipped > INTERPOLATOR_MAX_SKIP && time_interpolator->offset == 0) | ||
1472 | time_interpolator->nsec_per_cyc++; | ||
1473 | time_interpolator->skips = 0; | ||
1474 | time_interpolator->ns_skipped = 0; | ||
1475 | } | ||
1476 | } | ||
1477 | |||
1478 | static inline int | ||
1479 | is_better_time_interpolator(struct time_interpolator *new) | ||
1480 | { | ||
1481 | if (!time_interpolator) | ||
1482 | return 1; | ||
1483 | return new->frequency > 2*time_interpolator->frequency || | ||
1484 | (unsigned long)new->drift < (unsigned long)time_interpolator->drift; | ||
1485 | } | ||
1486 | |||
1487 | void | ||
1488 | register_time_interpolator(struct time_interpolator *ti) | ||
1489 | { | ||
1490 | unsigned long flags; | ||
1491 | |||
1492 | /* Sanity check */ | ||
1493 | BUG_ON(ti->frequency == 0 || ti->mask == 0); | ||
1494 | |||
1495 | ti->nsec_per_cyc = ((u64)NSEC_PER_SEC << ti->shift) / ti->frequency; | ||
1496 | spin_lock(&time_interpolator_lock); | ||
1497 | write_seqlock_irqsave(&xtime_lock, flags); | ||
1498 | if (is_better_time_interpolator(ti)) { | ||
1499 | time_interpolator = ti; | ||
1500 | time_interpolator_reset(); | ||
1501 | } | ||
1502 | write_sequnlock_irqrestore(&xtime_lock, flags); | ||
1503 | |||
1504 | ti->next = time_interpolator_list; | ||
1505 | time_interpolator_list = ti; | ||
1506 | spin_unlock(&time_interpolator_lock); | ||
1507 | } | ||
1508 | |||
1509 | void | ||
1510 | unregister_time_interpolator(struct time_interpolator *ti) | ||
1511 | { | ||
1512 | struct time_interpolator *curr, **prev; | ||
1513 | unsigned long flags; | ||
1514 | |||
1515 | spin_lock(&time_interpolator_lock); | ||
1516 | prev = &time_interpolator_list; | ||
1517 | for (curr = *prev; curr; curr = curr->next) { | ||
1518 | if (curr == ti) { | ||
1519 | *prev = curr->next; | ||
1520 | break; | ||
1521 | } | ||
1522 | prev = &curr->next; | ||
1523 | } | ||
1524 | |||
1525 | write_seqlock_irqsave(&xtime_lock, flags); | ||
1526 | if (ti == time_interpolator) { | ||
1527 | /* we lost the best time-interpolator: */ | ||
1528 | time_interpolator = NULL; | ||
1529 | /* find the next-best interpolator */ | ||
1530 | for (curr = time_interpolator_list; curr; curr = curr->next) | ||
1531 | if (is_better_time_interpolator(curr)) | ||
1532 | time_interpolator = curr; | ||
1533 | time_interpolator_reset(); | ||
1534 | } | ||
1535 | write_sequnlock_irqrestore(&xtime_lock, flags); | ||
1536 | spin_unlock(&time_interpolator_lock); | ||
1537 | } | ||
1538 | #endif /* CONFIG_TIME_INTERPOLATION */ | ||
1539 | |||
1540 | /** | 1352 | /** |
1541 | * msleep - sleep safely even with waitqueue interruptions | 1353 | * msleep - sleep safely even with waitqueue interruptions |
1542 | * @msecs: Time in milliseconds to sleep for | 1354 | * @msecs: Time in milliseconds to sleep for |