aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/timer.c
diff options
context:
space:
mode:
authorTony Luck <tony.luck@intel.com>2007-07-20 14:26:47 -0400
committerTony Luck <tony.luck@intel.com>2007-07-20 14:26:47 -0400
commitc36c282b88963d0957368a443168588e62301fda (patch)
tree6343887ae42a65635a61b4ad99fd7f3e8dd24758 /kernel/timer.c
parentf4fbfb0dda5577075a049eec7fb7ad38abca1912 (diff)
parent1f564ad6d4182859612cbae452122e5eb2d62a76 (diff)
Pull ia64-clocksource into release branch
Diffstat (limited to 'kernel/timer.c')
-rw-r--r--kernel/timer.c188
1 files changed, 0 insertions, 188 deletions
diff --git a/kernel/timer.c b/kernel/timer.c
index d1e8b975c7ae..6ce1952eea7d 100644
--- a/kernel/timer.c
+++ b/kernel/timer.c
@@ -1349,194 +1349,6 @@ void __init init_timers(void)
1349 open_softirq(TIMER_SOFTIRQ, run_timer_softirq, NULL); 1349 open_softirq(TIMER_SOFTIRQ, run_timer_softirq, NULL);
1350} 1350}
1351 1351
1352#ifdef CONFIG_TIME_INTERPOLATION
1353
1354struct time_interpolator *time_interpolator __read_mostly;
1355static struct time_interpolator *time_interpolator_list __read_mostly;
1356static DEFINE_SPINLOCK(time_interpolator_lock);
1357
1358static inline cycles_t time_interpolator_get_cycles(unsigned int src)
1359{
1360 unsigned long (*x)(void);
1361
1362 switch (src)
1363 {
1364 case TIME_SOURCE_FUNCTION:
1365 x = time_interpolator->addr;
1366 return x();
1367
1368 case TIME_SOURCE_MMIO64 :
1369 return readq_relaxed((void __iomem *)time_interpolator->addr);
1370
1371 case TIME_SOURCE_MMIO32 :
1372 return readl_relaxed((void __iomem *)time_interpolator->addr);
1373
1374 default: return get_cycles();
1375 }
1376}
1377
1378static inline u64 time_interpolator_get_counter(int writelock)
1379{
1380 unsigned int src = time_interpolator->source;
1381
1382 if (time_interpolator->jitter)
1383 {
1384 cycles_t lcycle;
1385 cycles_t now;
1386
1387 do {
1388 lcycle = time_interpolator->last_cycle;
1389 now = time_interpolator_get_cycles(src);
1390 if (lcycle && time_after(lcycle, now))
1391 return lcycle;
1392
1393 /* When holding the xtime write lock, there's no need
1394 * to add the overhead of the cmpxchg. Readers are
1395 * force to retry until the write lock is released.
1396 */
1397 if (writelock) {
1398 time_interpolator->last_cycle = now;
1399 return now;
1400 }
1401 /* Keep track of the last timer value returned. The use of cmpxchg here
1402 * will cause contention in an SMP environment.
1403 */
1404 } while (unlikely(cmpxchg(&time_interpolator->last_cycle, lcycle, now) != lcycle));
1405 return now;
1406 }
1407 else
1408 return time_interpolator_get_cycles(src);
1409}
1410
1411void time_interpolator_reset(void)
1412{
1413 time_interpolator->offset = 0;
1414 time_interpolator->last_counter = time_interpolator_get_counter(1);
1415}
1416
1417#define GET_TI_NSECS(count,i) (((((count) - i->last_counter) & (i)->mask) * (i)->nsec_per_cyc) >> (i)->shift)
1418
1419unsigned long time_interpolator_get_offset(void)
1420{
1421 /* If we do not have a time interpolator set up then just return zero */
1422 if (!time_interpolator)
1423 return 0;
1424
1425 return time_interpolator->offset +
1426 GET_TI_NSECS(time_interpolator_get_counter(0), time_interpolator);
1427}
1428
1429#define INTERPOLATOR_ADJUST 65536
1430#define INTERPOLATOR_MAX_SKIP 10*INTERPOLATOR_ADJUST
1431
1432void time_interpolator_update(long delta_nsec)
1433{
1434 u64 counter;
1435 unsigned long offset;
1436
1437 /* If there is no time interpolator set up then do nothing */
1438 if (!time_interpolator)
1439 return;
1440
1441 /*
1442 * The interpolator compensates for late ticks by accumulating the late
1443 * time in time_interpolator->offset. A tick earlier than expected will
1444 * lead to a reset of the offset and a corresponding jump of the clock
1445 * forward. Again this only works if the interpolator clock is running
1446 * slightly slower than the regular clock and the tuning logic insures
1447 * that.
1448 */
1449
1450 counter = time_interpolator_get_counter(1);
1451 offset = time_interpolator->offset +
1452 GET_TI_NSECS(counter, time_interpolator);
1453
1454 if (delta_nsec < 0 || (unsigned long) delta_nsec < offset)
1455 time_interpolator->offset = offset - delta_nsec;
1456 else {
1457 time_interpolator->skips++;
1458 time_interpolator->ns_skipped += delta_nsec - offset;
1459 time_interpolator->offset = 0;
1460 }
1461 time_interpolator->last_counter = counter;
1462
1463 /* Tuning logic for time interpolator invoked every minute or so.
1464 * Decrease interpolator clock speed if no skips occurred and an offset is carried.
1465 * Increase interpolator clock speed if we skip too much time.
1466 */
1467 if (jiffies % INTERPOLATOR_ADJUST == 0)
1468 {
1469 if (time_interpolator->skips == 0 && time_interpolator->offset > tick_nsec)
1470 time_interpolator->nsec_per_cyc--;
1471 if (time_interpolator->ns_skipped > INTERPOLATOR_MAX_SKIP && time_interpolator->offset == 0)
1472 time_interpolator->nsec_per_cyc++;
1473 time_interpolator->skips = 0;
1474 time_interpolator->ns_skipped = 0;
1475 }
1476}
1477
1478static inline int
1479is_better_time_interpolator(struct time_interpolator *new)
1480{
1481 if (!time_interpolator)
1482 return 1;
1483 return new->frequency > 2*time_interpolator->frequency ||
1484 (unsigned long)new->drift < (unsigned long)time_interpolator->drift;
1485}
1486
1487void
1488register_time_interpolator(struct time_interpolator *ti)
1489{
1490 unsigned long flags;
1491
1492 /* Sanity check */
1493 BUG_ON(ti->frequency == 0 || ti->mask == 0);
1494
1495 ti->nsec_per_cyc = ((u64)NSEC_PER_SEC << ti->shift) / ti->frequency;
1496 spin_lock(&time_interpolator_lock);
1497 write_seqlock_irqsave(&xtime_lock, flags);
1498 if (is_better_time_interpolator(ti)) {
1499 time_interpolator = ti;
1500 time_interpolator_reset();
1501 }
1502 write_sequnlock_irqrestore(&xtime_lock, flags);
1503
1504 ti->next = time_interpolator_list;
1505 time_interpolator_list = ti;
1506 spin_unlock(&time_interpolator_lock);
1507}
1508
1509void
1510unregister_time_interpolator(struct time_interpolator *ti)
1511{
1512 struct time_interpolator *curr, **prev;
1513 unsigned long flags;
1514
1515 spin_lock(&time_interpolator_lock);
1516 prev = &time_interpolator_list;
1517 for (curr = *prev; curr; curr = curr->next) {
1518 if (curr == ti) {
1519 *prev = curr->next;
1520 break;
1521 }
1522 prev = &curr->next;
1523 }
1524
1525 write_seqlock_irqsave(&xtime_lock, flags);
1526 if (ti == time_interpolator) {
1527 /* we lost the best time-interpolator: */
1528 time_interpolator = NULL;
1529 /* find the next-best interpolator */
1530 for (curr = time_interpolator_list; curr; curr = curr->next)
1531 if (is_better_time_interpolator(curr))
1532 time_interpolator = curr;
1533 time_interpolator_reset();
1534 }
1535 write_sequnlock_irqrestore(&xtime_lock, flags);
1536 spin_unlock(&time_interpolator_lock);
1537}
1538#endif /* CONFIG_TIME_INTERPOLATION */
1539
1540/** 1352/**
1541 * msleep - sleep safely even with waitqueue interruptions 1353 * msleep - sleep safely even with waitqueue interruptions
1542 * @msecs: Time in milliseconds to sleep for 1354 * @msecs: Time in milliseconds to sleep for