diff options
Diffstat (limited to 'kernel/time/hrtimer.c')
-rw-r--r-- | kernel/time/hrtimer.c | 69 |
1 files changed, 67 insertions, 2 deletions
diff --git a/kernel/time/hrtimer.c b/kernel/time/hrtimer.c index 65605530ee34..ce20111d3fe2 100644 --- a/kernel/time/hrtimer.c +++ b/kernel/time/hrtimer.c | |||
@@ -42,6 +42,10 @@ | |||
42 | #include <linux/freezer.h> | 42 | #include <linux/freezer.h> |
43 | #include <linux/compat.h> | 43 | #include <linux/compat.h> |
44 | 44 | ||
45 | #include <litmus/litmus.h> | ||
46 | #include <litmus/debug_trace.h> | ||
47 | #include <litmus/trace.h> | ||
48 | |||
45 | #include <linux/uaccess.h> | 49 | #include <linux/uaccess.h> |
46 | 50 | ||
47 | #include <trace/events/timer.h> | 51 | #include <trace/events/timer.h> |
@@ -1092,6 +1096,10 @@ static int __hrtimer_start_range_ns(struct hrtimer *timer, ktime_t tim, | |||
1092 | 1096 | ||
1093 | tim = hrtimer_update_lowres(timer, tim, mode); | 1097 | tim = hrtimer_update_lowres(timer, tim, mode); |
1094 | 1098 | ||
1099 | #ifdef CONFIG_REPORT_TIMER_LATENCY | ||
1100 | timer->when_added = base->get_time(); | ||
1101 | #endif | ||
1102 | |||
1095 | hrtimer_set_expires_range_ns(timer, tim, delta_ns); | 1103 | hrtimer_set_expires_range_ns(timer, tim, delta_ns); |
1096 | 1104 | ||
1097 | /* Switch the timer base, if necessary: */ | 1105 | /* Switch the timer base, if necessary: */ |
@@ -1546,6 +1554,9 @@ static void __hrtimer_run_queues(struct hrtimer_cpu_base *cpu_base, ktime_t now, | |||
1546 | { | 1554 | { |
1547 | struct hrtimer_clock_base *base; | 1555 | struct hrtimer_clock_base *base; |
1548 | unsigned int active = cpu_base->active_bases & active_mask; | 1556 | unsigned int active = cpu_base->active_bases & active_mask; |
1557 | #ifdef CONFIG_REPORT_TIMER_LATENCY | ||
1558 | ktime_t was_exp_nxt = cpu_base->expires_next; | ||
1559 | #endif | ||
1549 | 1560 | ||
1550 | for_each_active_base(base, cpu_base, active) { | 1561 | for_each_active_base(base, cpu_base, active) { |
1551 | struct timerqueue_node *node; | 1562 | struct timerqueue_node *node; |
@@ -1573,6 +1584,26 @@ static void __hrtimer_run_queues(struct hrtimer_cpu_base *cpu_base, ktime_t now, | |||
1573 | if (basenow < hrtimer_get_softexpires_tv64(timer)) | 1584 | if (basenow < hrtimer_get_softexpires_tv64(timer)) |
1574 | break; | 1585 | break; |
1575 | 1586 | ||
1587 | #ifdef CONFIG_REPORT_TIMER_LATENCY | ||
1588 | if (cpu_base->hres_active && (basenow.tv64 >= | ||
1589 | hrtimer_get_expires_tv64(timer) + | ||
1590 | ((s64) CONFIG_REPORT_TIMER_LATENCY_THRESHOLD))) { | ||
1591 | printk_ratelimited(KERN_WARNING "WARNING: " | ||
1592 | "P%d timer latency: %lld now: %lld " | ||
1593 | "basenow:%lld exp:%lld " | ||
1594 | "nxt:%lld added:%lld " | ||
1595 | "timer:%p fn:%p\n", | ||
1596 | smp_processor_id(), | ||
1597 | basenow.tv64 - hrtimer_get_expires_tv64(timer), | ||
1598 | now.tv64, basenow.tv64, | ||
1599 | hrtimer_get_expires_tv64(timer), | ||
1600 | hrtimer_get_softexpires(timer), | ||
1601 | was_exp_nxt.tv64, | ||
1602 | timer->when_added.tv64, | ||
1603 | timer, timer->function); | ||
1604 | } | ||
1605 | #endif | ||
1606 | |||
1576 | __run_hrtimer(cpu_base, base, timer, &basenow, flags); | 1607 | __run_hrtimer(cpu_base, base, timer, &basenow, flags); |
1577 | if (active_mask == HRTIMER_ACTIVE_SOFT) | 1608 | if (active_mask == HRTIMER_ACTIVE_SOFT) |
1578 | hrtimer_sync_wait_running(cpu_base, flags); | 1609 | hrtimer_sync_wait_running(cpu_base, flags); |
@@ -1679,9 +1710,14 @@ retry: | |||
1679 | */ | 1710 | */ |
1680 | cpu_base->nr_hangs++; | 1711 | cpu_base->nr_hangs++; |
1681 | cpu_base->hang_detected = 1; | 1712 | cpu_base->hang_detected = 1; |
1713 | |||
1714 | TRACE("hrtimer hang detected on P%d: #%u\n", cpu_base->cpu, | ||
1715 | cpu_base->nr_hangs); | ||
1716 | |||
1682 | raw_spin_unlock_irqrestore(&cpu_base->lock, flags); | 1717 | raw_spin_unlock_irqrestore(&cpu_base->lock, flags); |
1683 | 1718 | ||
1684 | delta = ktime_sub(now, entry_time); | 1719 | delta = ktime_sub(now, entry_time); |
1720 | TRACE("hrtimer hang delta.tv64:%u\n", (unsigned int) delta.tv64); | ||
1685 | if ((unsigned int)delta > cpu_base->max_hang_time) | 1721 | if ((unsigned int)delta > cpu_base->max_hang_time) |
1686 | cpu_base->max_hang_time = (unsigned int) delta; | 1722 | cpu_base->max_hang_time = (unsigned int) delta; |
1687 | /* | 1723 | /* |
@@ -1692,6 +1728,9 @@ retry: | |||
1692 | expires_next = ktime_add_ns(now, 100 * NSEC_PER_MSEC); | 1728 | expires_next = ktime_add_ns(now, 100 * NSEC_PER_MSEC); |
1693 | else | 1729 | else |
1694 | expires_next = ktime_add(now, delta); | 1730 | expires_next = ktime_add(now, delta); |
1731 | |||
1732 | TRACE("hrtimer expires_next:%llu\n", expires_next.tv64); | ||
1733 | |||
1695 | tick_program_event(expires_next, 1); | 1734 | tick_program_event(expires_next, 1); |
1696 | pr_warn_once("hrtimer: interrupt took %llu ns\n", ktime_to_ns(delta)); | 1735 | pr_warn_once("hrtimer: interrupt took %llu ns\n", ktime_to_ns(delta)); |
1697 | } | 1736 | } |
@@ -1762,8 +1801,21 @@ static enum hrtimer_restart hrtimer_wakeup(struct hrtimer *timer) | |||
1762 | struct task_struct *task = t->task; | 1801 | struct task_struct *task = t->task; |
1763 | 1802 | ||
1764 | t->task = NULL; | 1803 | t->task = NULL; |
1765 | if (task) | 1804 | if (task) { |
1805 | #ifdef CONFIG_SCHED_OVERHEAD_TRACE | ||
1806 | if (is_realtime(task)) { | ||
1807 | ktime_t expires = hrtimer_get_expires(timer); | ||
1808 | /* Fix up timers that were added past their due date, | ||
1809 | * because that's not really release latency. */ | ||
1810 | lt_t intended_release = max(expires.tv64, | ||
1811 | timer->when_added.tv64); | ||
1812 | TS_RELEASE_LATENCY(intended_release); | ||
1813 | } | ||
1814 | #endif | ||
1815 | TS_RELEASE_START; | ||
1766 | wake_up_process(task); | 1816 | wake_up_process(task); |
1817 | TS_RELEASE_END; | ||
1818 | } | ||
1767 | 1819 | ||
1768 | return HRTIMER_NORESTART; | 1820 | return HRTIMER_NORESTART; |
1769 | } | 1821 | } |
@@ -1916,9 +1968,19 @@ long hrtimer_nanosleep(const struct timespec64 *rqtp, | |||
1916 | u64 slack; | 1968 | u64 slack; |
1917 | 1969 | ||
1918 | slack = current->timer_slack_ns; | 1970 | slack = current->timer_slack_ns; |
1919 | if (dl_task(current) || rt_task(current)) | 1971 | if (dl_task(current) || rt_task(current) || is_realtime(current)) |
1920 | slack = 0; | 1972 | slack = 0; |
1921 | 1973 | ||
1974 | if (is_realtime(current) && (clockid == CLOCK_MONOTONIC) && | ||
1975 | (mode == HRTIMER_MODE_ABS)) { | ||
1976 | /* Special handling: to handle periodic activations correctly | ||
1977 | * despite timer jitter and overheads, the plugin might need to | ||
1978 | * know the time at which the task intends to wake up. */ | ||
1979 | tsk_rt(current)->doing_abs_nanosleep = 1; | ||
1980 | tsk_rt(current)->nanosleep_wakeup = ktime_to_ns( | ||
1981 | timespec_to_ktime(*rqtp)); | ||
1982 | } | ||
1983 | |||
1922 | hrtimer_init_sleeper_on_stack(&t, clockid, mode); | 1984 | hrtimer_init_sleeper_on_stack(&t, clockid, mode); |
1923 | hrtimer_set_expires_range_ns(&t.timer, timespec64_to_ktime(*rqtp), slack); | 1985 | hrtimer_set_expires_range_ns(&t.timer, timespec64_to_ktime(*rqtp), slack); |
1924 | ret = do_nanosleep(&t, mode); | 1986 | ret = do_nanosleep(&t, mode); |
@@ -1937,6 +1999,9 @@ long hrtimer_nanosleep(const struct timespec64 *rqtp, | |||
1937 | restart->nanosleep.expires = hrtimer_get_expires_tv64(&t.timer); | 1999 | restart->nanosleep.expires = hrtimer_get_expires_tv64(&t.timer); |
1938 | out: | 2000 | out: |
1939 | destroy_hrtimer_on_stack(&t.timer); | 2001 | destroy_hrtimer_on_stack(&t.timer); |
2002 | |||
2003 | tsk_rt(current)->doing_abs_nanosleep = 0; | ||
2004 | |||
1940 | return ret; | 2005 | return ret; |
1941 | } | 2006 | } |
1942 | 2007 | ||