diff options
| author | Linus Torvalds <torvalds@linux-foundation.org> | 2009-09-23 12:46:15 -0400 |
|---|---|---|
| committer | Linus Torvalds <torvalds@linux-foundation.org> | 2009-09-23 12:46:15 -0400 |
| commit | 31bbb9b58d1e8ebcf2b28c95c2250a9f8e31e397 (patch) | |
| tree | 6bb0c0490d66d32eca43e73abb28d8b3ab0e7b91 | |
| parent | ff830b8e5f999d1ccbd0282a666520f0b557daa4 (diff) | |
| parent | 3f0a525ebf4b8ef041a332bbe4a73aee94bb064b (diff) | |
Merge branch 'timers-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip
* 'timers-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip:
itimers: Add tracepoints for itimer
hrtimer: Add tracepoint for hrtimers
timers: Add tracepoints for timer_list timers
cputime: Optimize jiffies_to_cputime(1)
itimers: Simplify arm_timer() code a bit
itimers: Fix periodic tics precision
itimers: Merge ITIMER_VIRT and ITIMER_PROF
Trivial header file include conflicts in kernel/fork.c
| -rw-r--r-- | arch/ia64/include/asm/cputime.h | 1 | ||||
| -rw-r--r-- | arch/powerpc/include/asm/cputime.h | 13 | ||||
| -rw-r--r-- | arch/powerpc/kernel/time.c | 4 | ||||
| -rw-r--r-- | arch/s390/include/asm/cputime.h | 1 | ||||
| -rw-r--r-- | include/asm-generic/cputime.h | 1 | ||||
| -rw-r--r-- | include/linux/sched.h | 16 | ||||
| -rw-r--r-- | include/trace/events/timer.h | 342 | ||||
| -rw-r--r-- | kernel/fork.c | 9 | ||||
| -rw-r--r-- | kernel/hrtimer.c | 40 | ||||
| -rw-r--r-- | kernel/itimer.c | 169 | ||||
| -rw-r--r-- | kernel/posix-cpu-timers.c | 155 | ||||
| -rw-r--r-- | kernel/sched.c | 9 | ||||
| -rw-r--r-- | kernel/timer.c | 32 |
13 files changed, 620 insertions, 172 deletions
diff --git a/arch/ia64/include/asm/cputime.h b/arch/ia64/include/asm/cputime.h index d20b998cb91d..7fa8a8594660 100644 --- a/arch/ia64/include/asm/cputime.h +++ b/arch/ia64/include/asm/cputime.h | |||
| @@ -30,6 +30,7 @@ typedef u64 cputime_t; | |||
| 30 | typedef u64 cputime64_t; | 30 | typedef u64 cputime64_t; |
| 31 | 31 | ||
| 32 | #define cputime_zero ((cputime_t)0) | 32 | #define cputime_zero ((cputime_t)0) |
| 33 | #define cputime_one_jiffy jiffies_to_cputime(1) | ||
| 33 | #define cputime_max ((~((cputime_t)0) >> 1) - 1) | 34 | #define cputime_max ((~((cputime_t)0) >> 1) - 1) |
| 34 | #define cputime_add(__a, __b) ((__a) + (__b)) | 35 | #define cputime_add(__a, __b) ((__a) + (__b)) |
| 35 | #define cputime_sub(__a, __b) ((__a) - (__b)) | 36 | #define cputime_sub(__a, __b) ((__a) - (__b)) |
diff --git a/arch/powerpc/include/asm/cputime.h b/arch/powerpc/include/asm/cputime.h index f42e623030ee..fa19f3fe05ff 100644 --- a/arch/powerpc/include/asm/cputime.h +++ b/arch/powerpc/include/asm/cputime.h | |||
| @@ -18,6 +18,9 @@ | |||
| 18 | 18 | ||
| 19 | #ifndef CONFIG_VIRT_CPU_ACCOUNTING | 19 | #ifndef CONFIG_VIRT_CPU_ACCOUNTING |
| 20 | #include <asm-generic/cputime.h> | 20 | #include <asm-generic/cputime.h> |
| 21 | #ifdef __KERNEL__ | ||
| 22 | static inline void setup_cputime_one_jiffy(void) { } | ||
| 23 | #endif | ||
| 21 | #else | 24 | #else |
| 22 | 25 | ||
| 23 | #include <linux/types.h> | 26 | #include <linux/types.h> |
| @@ -49,6 +52,11 @@ typedef u64 cputime64_t; | |||
| 49 | #ifdef __KERNEL__ | 52 | #ifdef __KERNEL__ |
| 50 | 53 | ||
| 51 | /* | 54 | /* |
| 55 | * One jiffy in timebase units computed during initialization | ||
| 56 | */ | ||
| 57 | extern cputime_t cputime_one_jiffy; | ||
| 58 | |||
| 59 | /* | ||
| 52 | * Convert cputime <-> jiffies | 60 | * Convert cputime <-> jiffies |
| 53 | */ | 61 | */ |
| 54 | extern u64 __cputime_jiffies_factor; | 62 | extern u64 __cputime_jiffies_factor; |
| @@ -89,6 +97,11 @@ static inline cputime_t jiffies_to_cputime(const unsigned long jif) | |||
| 89 | return ct; | 97 | return ct; |
| 90 | } | 98 | } |
| 91 | 99 | ||
| 100 | static inline void setup_cputime_one_jiffy(void) | ||
| 101 | { | ||
| 102 | cputime_one_jiffy = jiffies_to_cputime(1); | ||
| 103 | } | ||
| 104 | |||
| 92 | static inline cputime64_t jiffies64_to_cputime64(const u64 jif) | 105 | static inline cputime64_t jiffies64_to_cputime64(const u64 jif) |
| 93 | { | 106 | { |
| 94 | cputime_t ct; | 107 | cputime_t ct; |
diff --git a/arch/powerpc/kernel/time.c b/arch/powerpc/kernel/time.c index df45a7449a66..92dc844299b6 100644 --- a/arch/powerpc/kernel/time.c +++ b/arch/powerpc/kernel/time.c | |||
| @@ -193,6 +193,8 @@ EXPORT_SYMBOL(__cputime_clockt_factor); | |||
| 193 | DEFINE_PER_CPU(unsigned long, cputime_last_delta); | 193 | DEFINE_PER_CPU(unsigned long, cputime_last_delta); |
| 194 | DEFINE_PER_CPU(unsigned long, cputime_scaled_last_delta); | 194 | DEFINE_PER_CPU(unsigned long, cputime_scaled_last_delta); |
| 195 | 195 | ||
| 196 | cputime_t cputime_one_jiffy; | ||
| 197 | |||
| 196 | static void calc_cputime_factors(void) | 198 | static void calc_cputime_factors(void) |
| 197 | { | 199 | { |
| 198 | struct div_result res; | 200 | struct div_result res; |
| @@ -501,6 +503,7 @@ static int __init iSeries_tb_recal(void) | |||
| 501 | tb_to_xs = divres.result_low; | 503 | tb_to_xs = divres.result_low; |
| 502 | vdso_data->tb_ticks_per_sec = tb_ticks_per_sec; | 504 | vdso_data->tb_ticks_per_sec = tb_ticks_per_sec; |
| 503 | vdso_data->tb_to_xs = tb_to_xs; | 505 | vdso_data->tb_to_xs = tb_to_xs; |
| 506 | setup_cputime_one_jiffy(); | ||
| 504 | } | 507 | } |
| 505 | else { | 508 | else { |
| 506 | printk( "Titan recalibrate: FAILED (difference > 4 percent)\n" | 509 | printk( "Titan recalibrate: FAILED (difference > 4 percent)\n" |
| @@ -960,6 +963,7 @@ void __init time_init(void) | |||
| 960 | tb_ticks_per_usec = ppc_tb_freq / 1000000; | 963 | tb_ticks_per_usec = ppc_tb_freq / 1000000; |
| 961 | tb_to_us = mulhwu_scale_factor(ppc_tb_freq, 1000000); | 964 | tb_to_us = mulhwu_scale_factor(ppc_tb_freq, 1000000); |
| 962 | calc_cputime_factors(); | 965 | calc_cputime_factors(); |
| 966 | setup_cputime_one_jiffy(); | ||
| 963 | 967 | ||
| 964 | /* | 968 | /* |
| 965 | * Calculate the length of each tick in ns. It will not be | 969 | * Calculate the length of each tick in ns. It will not be |
diff --git a/arch/s390/include/asm/cputime.h b/arch/s390/include/asm/cputime.h index 7a3817a656df..24b1244aadb9 100644 --- a/arch/s390/include/asm/cputime.h +++ b/arch/s390/include/asm/cputime.h | |||
| @@ -42,6 +42,7 @@ __div(unsigned long long n, unsigned int base) | |||
| 42 | #endif /* __s390x__ */ | 42 | #endif /* __s390x__ */ |
| 43 | 43 | ||
| 44 | #define cputime_zero (0ULL) | 44 | #define cputime_zero (0ULL) |
| 45 | #define cputime_one_jiffy jiffies_to_cputime(1) | ||
| 45 | #define cputime_max ((~0UL >> 1) - 1) | 46 | #define cputime_max ((~0UL >> 1) - 1) |
| 46 | #define cputime_add(__a, __b) ((__a) + (__b)) | 47 | #define cputime_add(__a, __b) ((__a) + (__b)) |
| 47 | #define cputime_sub(__a, __b) ((__a) - (__b)) | 48 | #define cputime_sub(__a, __b) ((__a) - (__b)) |
diff --git a/include/asm-generic/cputime.h b/include/asm-generic/cputime.h index 1c1fa422d18a..ca0f239f0e13 100644 --- a/include/asm-generic/cputime.h +++ b/include/asm-generic/cputime.h | |||
| @@ -7,6 +7,7 @@ | |||
| 7 | typedef unsigned long cputime_t; | 7 | typedef unsigned long cputime_t; |
| 8 | 8 | ||
| 9 | #define cputime_zero (0UL) | 9 | #define cputime_zero (0UL) |
| 10 | #define cputime_one_jiffy jiffies_to_cputime(1) | ||
| 10 | #define cputime_max ((~0UL >> 1) - 1) | 11 | #define cputime_max ((~0UL >> 1) - 1) |
| 11 | #define cputime_add(__a, __b) ((__a) + (__b)) | 12 | #define cputime_add(__a, __b) ((__a) + (__b)) |
| 12 | #define cputime_sub(__a, __b) ((__a) - (__b)) | 13 | #define cputime_sub(__a, __b) ((__a) - (__b)) |
diff --git a/include/linux/sched.h b/include/linux/sched.h index 3cbc6c0be666..cbf2a3b46280 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h | |||
| @@ -493,6 +493,13 @@ struct pacct_struct { | |||
| 493 | unsigned long ac_minflt, ac_majflt; | 493 | unsigned long ac_minflt, ac_majflt; |
| 494 | }; | 494 | }; |
| 495 | 495 | ||
| 496 | struct cpu_itimer { | ||
| 497 | cputime_t expires; | ||
| 498 | cputime_t incr; | ||
| 499 | u32 error; | ||
| 500 | u32 incr_error; | ||
| 501 | }; | ||
| 502 | |||
| 496 | /** | 503 | /** |
| 497 | * struct task_cputime - collected CPU time counts | 504 | * struct task_cputime - collected CPU time counts |
| 498 | * @utime: time spent in user mode, in &cputime_t units | 505 | * @utime: time spent in user mode, in &cputime_t units |
| @@ -587,9 +594,12 @@ struct signal_struct { | |||
| 587 | struct pid *leader_pid; | 594 | struct pid *leader_pid; |
| 588 | ktime_t it_real_incr; | 595 | ktime_t it_real_incr; |
| 589 | 596 | ||
| 590 | /* ITIMER_PROF and ITIMER_VIRTUAL timers for the process */ | 597 | /* |
| 591 | cputime_t it_prof_expires, it_virt_expires; | 598 | * ITIMER_PROF and ITIMER_VIRTUAL timers for the process, we use |
| 592 | cputime_t it_prof_incr, it_virt_incr; | 599 | * CPUCLOCK_PROF and CPUCLOCK_VIRT for indexing array as these |
| 600 | * values are defined to 0 and 1 respectively | ||
| 601 | */ | ||
| 602 | struct cpu_itimer it[2]; | ||
| 593 | 603 | ||
| 594 | /* | 604 | /* |
| 595 | * Thread group totals for process CPU timers. | 605 | * Thread group totals for process CPU timers. |
diff --git a/include/trace/events/timer.h b/include/trace/events/timer.h new file mode 100644 index 000000000000..1844c48d640e --- /dev/null +++ b/include/trace/events/timer.h | |||
| @@ -0,0 +1,342 @@ | |||
| 1 | #undef TRACE_SYSTEM | ||
| 2 | #define TRACE_SYSTEM timer | ||
| 3 | |||
| 4 | #if !defined(_TRACE_TIMER_H) || defined(TRACE_HEADER_MULTI_READ) | ||
| 5 | #define _TRACE_TIMER_H | ||
| 6 | |||
| 7 | #include <linux/tracepoint.h> | ||
| 8 | #include <linux/hrtimer.h> | ||
| 9 | #include <linux/timer.h> | ||
| 10 | |||
| 11 | /** | ||
| 12 | * timer_init - called when the timer is initialized | ||
| 13 | * @timer: pointer to struct timer_list | ||
| 14 | */ | ||
| 15 | TRACE_EVENT(timer_init, | ||
| 16 | |||
| 17 | TP_PROTO(struct timer_list *timer), | ||
| 18 | |||
| 19 | TP_ARGS(timer), | ||
| 20 | |||
| 21 | TP_STRUCT__entry( | ||
| 22 | __field( void *, timer ) | ||
| 23 | ), | ||
| 24 | |||
| 25 | TP_fast_assign( | ||
| 26 | __entry->timer = timer; | ||
| 27 | ), | ||
| 28 | |||
| 29 | TP_printk("timer %p", __entry->timer) | ||
| 30 | ); | ||
| 31 | |||
| 32 | /** | ||
| 33 | * timer_start - called when the timer is started | ||
| 34 | * @timer: pointer to struct timer_list | ||
| 35 | * @expires: the timers expiry time | ||
| 36 | */ | ||
| 37 | TRACE_EVENT(timer_start, | ||
| 38 | |||
| 39 | TP_PROTO(struct timer_list *timer, unsigned long expires), | ||
| 40 | |||
| 41 | TP_ARGS(timer, expires), | ||
| 42 | |||
| 43 | TP_STRUCT__entry( | ||
| 44 | __field( void *, timer ) | ||
| 45 | __field( void *, function ) | ||
| 46 | __field( unsigned long, expires ) | ||
| 47 | __field( unsigned long, now ) | ||
| 48 | ), | ||
| 49 | |||
| 50 | TP_fast_assign( | ||
| 51 | __entry->timer = timer; | ||
| 52 | __entry->function = timer->function; | ||
| 53 | __entry->expires = expires; | ||
| 54 | __entry->now = jiffies; | ||
| 55 | ), | ||
| 56 | |||
| 57 | TP_printk("timer %p: func %pf, expires %lu, timeout %ld", | ||
| 58 | __entry->timer, __entry->function, __entry->expires, | ||
| 59 | (long)__entry->expires - __entry->now) | ||
| 60 | ); | ||
| 61 | |||
| 62 | /** | ||
| 63 | * timer_expire_entry - called immediately before the timer callback | ||
| 64 | * @timer: pointer to struct timer_list | ||
| 65 | * | ||
| 66 | * Allows to determine the timer latency. | ||
| 67 | */ | ||
| 68 | TRACE_EVENT(timer_expire_entry, | ||
| 69 | |||
| 70 | TP_PROTO(struct timer_list *timer), | ||
| 71 | |||
| 72 | TP_ARGS(timer), | ||
| 73 | |||
| 74 | TP_STRUCT__entry( | ||
| 75 | __field( void *, timer ) | ||
| 76 | __field( unsigned long, now ) | ||
| 77 | ), | ||
| 78 | |||
| 79 | TP_fast_assign( | ||
| 80 | __entry->timer = timer; | ||
| 81 | __entry->now = jiffies; | ||
| 82 | ), | ||
| 83 | |||
| 84 | TP_printk("timer %p: now %lu", __entry->timer, __entry->now) | ||
| 85 | ); | ||
| 86 | |||
| 87 | /** | ||
| 88 | * timer_expire_exit - called immediately after the timer callback returns | ||
| 89 | * @timer: pointer to struct timer_list | ||
| 90 | * | ||
| 91 | * When used in combination with the timer_expire_entry tracepoint we can | ||
| 92 | * determine the runtime of the timer callback function. | ||
| 93 | * | ||
| 94 | * NOTE: Do NOT derefernce timer in TP_fast_assign. The pointer might | ||
| 95 | * be invalid. We solely track the pointer. | ||
| 96 | */ | ||
| 97 | TRACE_EVENT(timer_expire_exit, | ||
| 98 | |||
| 99 | TP_PROTO(struct timer_list *timer), | ||
| 100 | |||
| 101 | TP_ARGS(timer), | ||
| 102 | |||
| 103 | TP_STRUCT__entry( | ||
| 104 | __field(void *, timer ) | ||
| 105 | ), | ||
| 106 | |||
| 107 | TP_fast_assign( | ||
| 108 | __entry->timer = timer; | ||
| 109 | ), | ||
| 110 | |||
| 111 | TP_printk("timer %p", __entry->timer) | ||
| 112 | ); | ||
| 113 | |||
| 114 | /** | ||
| 115 | * timer_cancel - called when the timer is canceled | ||
| 116 | * @timer: pointer to struct timer_list | ||
| 117 | */ | ||
| 118 | TRACE_EVENT(timer_cancel, | ||
| 119 | |||
| 120 | TP_PROTO(struct timer_list *timer), | ||
| 121 | |||
| 122 | TP_ARGS(timer), | ||
| 123 | |||
| 124 | TP_STRUCT__entry( | ||
| 125 | __field( void *, timer ) | ||
| 126 | ), | ||
| 127 | |||
| 128 | TP_fast_assign( | ||
| 129 | __entry->timer = timer; | ||
| 130 | ), | ||
| 131 | |||
| 132 | TP_printk("timer %p", __entry->timer) | ||
| 133 | ); | ||
| 134 | |||
| 135 | /** | ||
| 136 | * hrtimer_init - called when the hrtimer is initialized | ||
| 137 | * @timer: pointer to struct hrtimer | ||
| 138 | * @clockid: the hrtimers clock | ||
| 139 | * @mode: the hrtimers mode | ||
| 140 | */ | ||
| 141 | TRACE_EVENT(hrtimer_init, | ||
| 142 | |||
| 143 | TP_PROTO(struct hrtimer *timer, clockid_t clockid, | ||
| 144 | enum hrtimer_mode mode), | ||
| 145 | |||
| 146 | TP_ARGS(timer, clockid, mode), | ||
| 147 | |||
| 148 | TP_STRUCT__entry( | ||
| 149 | __field( void *, timer ) | ||
| 150 | __field( clockid_t, clockid ) | ||
| 151 | __field( enum hrtimer_mode, mode ) | ||
| 152 | ), | ||
| 153 | |||
| 154 | TP_fast_assign( | ||
| 155 | __entry->timer = timer; | ||
| 156 | __entry->clockid = clockid; | ||
| 157 | __entry->mode = mode; | ||
| 158 | ), | ||
| 159 | |||
| 160 | TP_printk("hrtimer %p, clockid %s, mode %s", __entry->timer, | ||
| 161 | __entry->clockid == CLOCK_REALTIME ? | ||
| 162 | "CLOCK_REALTIME" : "CLOCK_MONOTONIC", | ||
| 163 | __entry->mode == HRTIMER_MODE_ABS ? | ||
| 164 | "HRTIMER_MODE_ABS" : "HRTIMER_MODE_REL") | ||
| 165 | ); | ||
| 166 | |||
| 167 | /** | ||
| 168 | * hrtimer_start - called when the hrtimer is started | ||
| 169 | * @timer: pointer to struct hrtimer | ||
| 170 | */ | ||
| 171 | TRACE_EVENT(hrtimer_start, | ||
| 172 | |||
| 173 | TP_PROTO(struct hrtimer *timer), | ||
| 174 | |||
| 175 | TP_ARGS(timer), | ||
| 176 | |||
| 177 | TP_STRUCT__entry( | ||
| 178 | __field( void *, timer ) | ||
| 179 | __field( void *, function ) | ||
| 180 | __field( s64, expires ) | ||
| 181 | __field( s64, softexpires ) | ||
| 182 | ), | ||
| 183 | |||
| 184 | TP_fast_assign( | ||
| 185 | __entry->timer = timer; | ||
| 186 | __entry->function = timer->function; | ||
| 187 | __entry->expires = hrtimer_get_expires(timer).tv64; | ||
| 188 | __entry->softexpires = hrtimer_get_softexpires(timer).tv64; | ||
| 189 | ), | ||
| 190 | |||
| 191 | TP_printk("hrtimer %p, func %pf, expires %llu, softexpires %llu", | ||
| 192 | __entry->timer, __entry->function, | ||
| 193 | (unsigned long long)ktime_to_ns((ktime_t) { | ||
| 194 | .tv64 = __entry->expires }), | ||
| 195 | (unsigned long long)ktime_to_ns((ktime_t) { | ||
| 196 | .tv64 = __entry->softexpires })) | ||
| 197 | ); | ||
| 198 | |||
| 199 | /** | ||
| 200 | * htimmer_expire_entry - called immediately before the hrtimer callback | ||
| 201 | * @timer: pointer to struct hrtimer | ||
| 202 | * @now: pointer to variable which contains current time of the | ||
| 203 | * timers base. | ||
| 204 | * | ||
| 205 | * Allows to determine the timer latency. | ||
| 206 | */ | ||
| 207 | TRACE_EVENT(hrtimer_expire_entry, | ||
| 208 | |||
| 209 | TP_PROTO(struct hrtimer *timer, ktime_t *now), | ||
| 210 | |||
| 211 | TP_ARGS(timer, now), | ||
| 212 | |||
| 213 | TP_STRUCT__entry( | ||
| 214 | __field( void *, timer ) | ||
| 215 | __field( s64, now ) | ||
| 216 | ), | ||
| 217 | |||
| 218 | TP_fast_assign( | ||
| 219 | __entry->timer = timer; | ||
| 220 | __entry->now = now->tv64; | ||
| 221 | ), | ||
| 222 | |||
| 223 | TP_printk("hrtimer %p, now %llu", __entry->timer, | ||
| 224 | (unsigned long long)ktime_to_ns((ktime_t) { | ||
| 225 | .tv64 = __entry->now })) | ||
| 226 | ); | ||
| 227 | |||
| 228 | /** | ||
| 229 | * hrtimer_expire_exit - called immediately after the hrtimer callback returns | ||
| 230 | * @timer: pointer to struct hrtimer | ||
| 231 | * | ||
| 232 | * When used in combination with the hrtimer_expire_entry tracepoint we can | ||
| 233 | * determine the runtime of the callback function. | ||
| 234 | */ | ||
| 235 | TRACE_EVENT(hrtimer_expire_exit, | ||
| 236 | |||
| 237 | TP_PROTO(struct hrtimer *timer), | ||
| 238 | |||
| 239 | TP_ARGS(timer), | ||
| 240 | |||
| 241 | TP_STRUCT__entry( | ||
| 242 | __field( void *, timer ) | ||
| 243 | ), | ||
| 244 | |||
| 245 | TP_fast_assign( | ||
| 246 | __entry->timer = timer; | ||
| 247 | ), | ||
| 248 | |||
| 249 | TP_printk("hrtimer %p", __entry->timer) | ||
| 250 | ); | ||
| 251 | |||
| 252 | /** | ||
| 253 | * hrtimer_cancel - called when the hrtimer is canceled | ||
| 254 | * @timer: pointer to struct hrtimer | ||
| 255 | */ | ||
| 256 | TRACE_EVENT(hrtimer_cancel, | ||
| 257 | |||
| 258 | TP_PROTO(struct hrtimer *timer), | ||
| 259 | |||
| 260 | TP_ARGS(timer), | ||
| 261 | |||
| 262 | TP_STRUCT__entry( | ||
| 263 | __field( void *, timer ) | ||
| 264 | ), | ||
| 265 | |||
| 266 | TP_fast_assign( | ||
| 267 | __entry->timer = timer; | ||
| 268 | ), | ||
| 269 | |||
| 270 | TP_printk("hrtimer %p", __entry->timer) | ||
| 271 | ); | ||
| 272 | |||
| 273 | /** | ||
| 274 | * itimer_state - called when itimer is started or canceled | ||
| 275 | * @which: name of the interval timer | ||
| 276 | * @value: the itimers value, itimer is canceled if value->it_value is | ||
| 277 | * zero, otherwise it is started | ||
| 278 | * @expires: the itimers expiry time | ||
| 279 | */ | ||
| 280 | TRACE_EVENT(itimer_state, | ||
| 281 | |||
| 282 | TP_PROTO(int which, const struct itimerval *const value, | ||
| 283 | cputime_t expires), | ||
| 284 | |||
| 285 | TP_ARGS(which, value, expires), | ||
| 286 | |||
| 287 | TP_STRUCT__entry( | ||
| 288 | __field( int, which ) | ||
| 289 | __field( cputime_t, expires ) | ||
| 290 | __field( long, value_sec ) | ||
| 291 | __field( long, value_usec ) | ||
| 292 | __field( long, interval_sec ) | ||
| 293 | __field( long, interval_usec ) | ||
| 294 | ), | ||
| 295 | |||
| 296 | TP_fast_assign( | ||
| 297 | __entry->which = which; | ||
| 298 | __entry->expires = expires; | ||
| 299 | __entry->value_sec = value->it_value.tv_sec; | ||
| 300 | __entry->value_usec = value->it_value.tv_usec; | ||
| 301 | __entry->interval_sec = value->it_interval.tv_sec; | ||
| 302 | __entry->interval_usec = value->it_interval.tv_usec; | ||
| 303 | ), | ||
| 304 | |||
| 305 | TP_printk("which %d, expires %lu, it_value %lu.%lu, it_interval %lu.%lu", | ||
| 306 | __entry->which, __entry->expires, | ||
| 307 | __entry->value_sec, __entry->value_usec, | ||
| 308 | __entry->interval_sec, __entry->interval_usec) | ||
| 309 | ); | ||
| 310 | |||
| 311 | /** | ||
| 312 | * itimer_expire - called when itimer expires | ||
| 313 | * @which: type of the interval timer | ||
| 314 | * @pid: pid of the process which owns the timer | ||
| 315 | * @now: current time, used to calculate the latency of itimer | ||
| 316 | */ | ||
| 317 | TRACE_EVENT(itimer_expire, | ||
| 318 | |||
| 319 | TP_PROTO(int which, struct pid *pid, cputime_t now), | ||
| 320 | |||
| 321 | TP_ARGS(which, pid, now), | ||
| 322 | |||
| 323 | TP_STRUCT__entry( | ||
| 324 | __field( int , which ) | ||
| 325 | __field( pid_t, pid ) | ||
| 326 | __field( cputime_t, now ) | ||
| 327 | ), | ||
| 328 | |||
| 329 | TP_fast_assign( | ||
| 330 | __entry->which = which; | ||
| 331 | __entry->now = now; | ||
| 332 | __entry->pid = pid_nr(pid); | ||
| 333 | ), | ||
| 334 | |||
| 335 | TP_printk("which %d, pid %d, now %lu", __entry->which, | ||
| 336 | (int) __entry->pid, __entry->now) | ||
| 337 | ); | ||
| 338 | |||
| 339 | #endif /* _TRACE_TIMER_H */ | ||
| 340 | |||
| 341 | /* This part must be outside protection */ | ||
| 342 | #include <trace/define_trace.h> | ||
diff --git a/kernel/fork.c b/kernel/fork.c index 8f45b0ebdda7..51ad0b0b7266 100644 --- a/kernel/fork.c +++ b/kernel/fork.c | |||
| @@ -63,6 +63,7 @@ | |||
| 63 | #include <linux/fs_struct.h> | 63 | #include <linux/fs_struct.h> |
| 64 | #include <linux/magic.h> | 64 | #include <linux/magic.h> |
| 65 | #include <linux/perf_event.h> | 65 | #include <linux/perf_event.h> |
| 66 | #include <linux/posix-timers.h> | ||
| 66 | 67 | ||
| 67 | #include <asm/pgtable.h> | 68 | #include <asm/pgtable.h> |
| 68 | #include <asm/pgalloc.h> | 69 | #include <asm/pgalloc.h> |
| @@ -805,10 +806,10 @@ static void posix_cpu_timers_init_group(struct signal_struct *sig) | |||
| 805 | thread_group_cputime_init(sig); | 806 | thread_group_cputime_init(sig); |
| 806 | 807 | ||
| 807 | /* Expiration times and increments. */ | 808 | /* Expiration times and increments. */ |
| 808 | sig->it_virt_expires = cputime_zero; | 809 | sig->it[CPUCLOCK_PROF].expires = cputime_zero; |
| 809 | sig->it_virt_incr = cputime_zero; | 810 | sig->it[CPUCLOCK_PROF].incr = cputime_zero; |
| 810 | sig->it_prof_expires = cputime_zero; | 811 | sig->it[CPUCLOCK_VIRT].expires = cputime_zero; |
| 811 | sig->it_prof_incr = cputime_zero; | 812 | sig->it[CPUCLOCK_VIRT].incr = cputime_zero; |
| 812 | 813 | ||
| 813 | /* Cached expiration times. */ | 814 | /* Cached expiration times. */ |
| 814 | sig->cputime_expires.prof_exp = cputime_zero; | 815 | sig->cputime_expires.prof_exp = cputime_zero; |
diff --git a/kernel/hrtimer.c b/kernel/hrtimer.c index c03f221fee44..e5d98ce50f89 100644 --- a/kernel/hrtimer.c +++ b/kernel/hrtimer.c | |||
| @@ -48,6 +48,8 @@ | |||
| 48 | 48 | ||
| 49 | #include <asm/uaccess.h> | 49 | #include <asm/uaccess.h> |
| 50 | 50 | ||
| 51 | #include <trace/events/timer.h> | ||
| 52 | |||
| 51 | /* | 53 | /* |
| 52 | * The timer bases: | 54 | * The timer bases: |
| 53 | * | 55 | * |
| @@ -442,6 +444,26 @@ static inline void debug_hrtimer_activate(struct hrtimer *timer) { } | |||
| 442 | static inline void debug_hrtimer_deactivate(struct hrtimer *timer) { } | 444 | static inline void debug_hrtimer_deactivate(struct hrtimer *timer) { } |
| 443 | #endif | 445 | #endif |
| 444 | 446 | ||
| 447 | static inline void | ||
| 448 | debug_init(struct hrtimer *timer, clockid_t clockid, | ||
| 449 | enum hrtimer_mode mode) | ||
| 450 | { | ||
| 451 | debug_hrtimer_init(timer); | ||
| 452 | trace_hrtimer_init(timer, clockid, mode); | ||
| 453 | } | ||
| 454 | |||
| 455 | static inline void debug_activate(struct hrtimer *timer) | ||
| 456 | { | ||
| 457 | debug_hrtimer_activate(timer); | ||
| 458 | trace_hrtimer_start(timer); | ||
| 459 | } | ||
| 460 | |||
| 461 | static inline void debug_deactivate(struct hrtimer *timer) | ||
| 462 | { | ||
| 463 | debug_hrtimer_deactivate(timer); | ||
| 464 | trace_hrtimer_cancel(timer); | ||
| 465 | } | ||
| 466 | |||
| 445 | /* High resolution timer related functions */ | 467 | /* High resolution timer related functions */ |
| 446 | #ifdef CONFIG_HIGH_RES_TIMERS | 468 | #ifdef CONFIG_HIGH_RES_TIMERS |
| 447 | 469 | ||
| @@ -798,7 +820,7 @@ static int enqueue_hrtimer(struct hrtimer *timer, | |||
| 798 | struct hrtimer *entry; | 820 | struct hrtimer *entry; |
| 799 | int leftmost = 1; | 821 | int leftmost = 1; |
| 800 | 822 | ||
| 801 | debug_hrtimer_activate(timer); | 823 | debug_activate(timer); |
| 802 | 824 | ||
| 803 | /* | 825 | /* |
| 804 | * Find the right place in the rbtree: | 826 | * Find the right place in the rbtree: |
| @@ -884,7 +906,7 @@ remove_hrtimer(struct hrtimer *timer, struct hrtimer_clock_base *base) | |||
| 884 | * reprogramming happens in the interrupt handler. This is a | 906 | * reprogramming happens in the interrupt handler. This is a |
| 885 | * rare case and less expensive than a smp call. | 907 | * rare case and less expensive than a smp call. |
| 886 | */ | 908 | */ |
| 887 | debug_hrtimer_deactivate(timer); | 909 | debug_deactivate(timer); |
| 888 | timer_stats_hrtimer_clear_start_info(timer); | 910 | timer_stats_hrtimer_clear_start_info(timer); |
| 889 | reprogram = base->cpu_base == &__get_cpu_var(hrtimer_bases); | 911 | reprogram = base->cpu_base == &__get_cpu_var(hrtimer_bases); |
| 890 | __remove_hrtimer(timer, base, HRTIMER_STATE_INACTIVE, | 912 | __remove_hrtimer(timer, base, HRTIMER_STATE_INACTIVE, |
| @@ -1117,7 +1139,7 @@ static void __hrtimer_init(struct hrtimer *timer, clockid_t clock_id, | |||
| 1117 | void hrtimer_init(struct hrtimer *timer, clockid_t clock_id, | 1139 | void hrtimer_init(struct hrtimer *timer, clockid_t clock_id, |
| 1118 | enum hrtimer_mode mode) | 1140 | enum hrtimer_mode mode) |
| 1119 | { | 1141 | { |
| 1120 | debug_hrtimer_init(timer); | 1142 | debug_init(timer, clock_id, mode); |
| 1121 | __hrtimer_init(timer, clock_id, mode); | 1143 | __hrtimer_init(timer, clock_id, mode); |
| 1122 | } | 1144 | } |
| 1123 | EXPORT_SYMBOL_GPL(hrtimer_init); | 1145 | EXPORT_SYMBOL_GPL(hrtimer_init); |
| @@ -1141,7 +1163,7 @@ int hrtimer_get_res(const clockid_t which_clock, struct timespec *tp) | |||
| 1141 | } | 1163 | } |
| 1142 | EXPORT_SYMBOL_GPL(hrtimer_get_res); | 1164 | EXPORT_SYMBOL_GPL(hrtimer_get_res); |
| 1143 | 1165 | ||
| 1144 | static void __run_hrtimer(struct hrtimer *timer) | 1166 | static void __run_hrtimer(struct hrtimer *timer, ktime_t *now) |
| 1145 | { | 1167 | { |
| 1146 | struct hrtimer_clock_base *base = timer->base; | 1168 | struct hrtimer_clock_base *base = timer->base; |
| 1147 | struct hrtimer_cpu_base *cpu_base = base->cpu_base; | 1169 | struct hrtimer_cpu_base *cpu_base = base->cpu_base; |
| @@ -1150,7 +1172,7 @@ static void __run_hrtimer(struct hrtimer *timer) | |||
| 1150 | 1172 | ||
| 1151 | WARN_ON(!irqs_disabled()); | 1173 | WARN_ON(!irqs_disabled()); |
| 1152 | 1174 | ||
| 1153 | debug_hrtimer_deactivate(timer); | 1175 | debug_deactivate(timer); |
| 1154 | __remove_hrtimer(timer, base, HRTIMER_STATE_CALLBACK, 0); | 1176 | __remove_hrtimer(timer, base, HRTIMER_STATE_CALLBACK, 0); |
| 1155 | timer_stats_account_hrtimer(timer); | 1177 | timer_stats_account_hrtimer(timer); |
| 1156 | fn = timer->function; | 1178 | fn = timer->function; |
| @@ -1161,7 +1183,9 @@ static void __run_hrtimer(struct hrtimer *timer) | |||
| 1161 | * the timer base. | 1183 | * the timer base. |
| 1162 | */ | 1184 | */ |
| 1163 | spin_unlock(&cpu_base->lock); | 1185 | spin_unlock(&cpu_base->lock); |
| 1186 | trace_hrtimer_expire_entry(timer, now); | ||
| 1164 | restart = fn(timer); | 1187 | restart = fn(timer); |
| 1188 | trace_hrtimer_expire_exit(timer); | ||
| 1165 | spin_lock(&cpu_base->lock); | 1189 | spin_lock(&cpu_base->lock); |
| 1166 | 1190 | ||
| 1167 | /* | 1191 | /* |
| @@ -1272,7 +1296,7 @@ void hrtimer_interrupt(struct clock_event_device *dev) | |||
| 1272 | break; | 1296 | break; |
| 1273 | } | 1297 | } |
| 1274 | 1298 | ||
| 1275 | __run_hrtimer(timer); | 1299 | __run_hrtimer(timer, &basenow); |
| 1276 | } | 1300 | } |
| 1277 | base++; | 1301 | base++; |
| 1278 | } | 1302 | } |
| @@ -1394,7 +1418,7 @@ void hrtimer_run_queues(void) | |||
| 1394 | hrtimer_get_expires_tv64(timer)) | 1418 | hrtimer_get_expires_tv64(timer)) |
| 1395 | break; | 1419 | break; |
| 1396 | 1420 | ||
| 1397 | __run_hrtimer(timer); | 1421 | __run_hrtimer(timer, &base->softirq_time); |
| 1398 | } | 1422 | } |
| 1399 | spin_unlock(&cpu_base->lock); | 1423 | spin_unlock(&cpu_base->lock); |
| 1400 | } | 1424 | } |
| @@ -1571,7 +1595,7 @@ static void migrate_hrtimer_list(struct hrtimer_clock_base *old_base, | |||
| 1571 | while ((node = rb_first(&old_base->active))) { | 1595 | while ((node = rb_first(&old_base->active))) { |
| 1572 | timer = rb_entry(node, struct hrtimer, node); | 1596 | timer = rb_entry(node, struct hrtimer, node); |
| 1573 | BUG_ON(hrtimer_callback_running(timer)); | 1597 | BUG_ON(hrtimer_callback_running(timer)); |
| 1574 | debug_hrtimer_deactivate(timer); | 1598 | debug_deactivate(timer); |
| 1575 | 1599 | ||
| 1576 | /* | 1600 | /* |
| 1577 | * Mark it as STATE_MIGRATE not INACTIVE otherwise the | 1601 | * Mark it as STATE_MIGRATE not INACTIVE otherwise the |
diff --git a/kernel/itimer.c b/kernel/itimer.c index 58762f7077ec..b03451ede528 100644 --- a/kernel/itimer.c +++ b/kernel/itimer.c | |||
| @@ -12,6 +12,7 @@ | |||
| 12 | #include <linux/time.h> | 12 | #include <linux/time.h> |
| 13 | #include <linux/posix-timers.h> | 13 | #include <linux/posix-timers.h> |
| 14 | #include <linux/hrtimer.h> | 14 | #include <linux/hrtimer.h> |
| 15 | #include <trace/events/timer.h> | ||
| 15 | 16 | ||
| 16 | #include <asm/uaccess.h> | 17 | #include <asm/uaccess.h> |
| 17 | 18 | ||
| @@ -41,10 +42,43 @@ static struct timeval itimer_get_remtime(struct hrtimer *timer) | |||
| 41 | return ktime_to_timeval(rem); | 42 | return ktime_to_timeval(rem); |
| 42 | } | 43 | } |
| 43 | 44 | ||
| 45 | static void get_cpu_itimer(struct task_struct *tsk, unsigned int clock_id, | ||
| 46 | struct itimerval *const value) | ||
| 47 | { | ||
| 48 | cputime_t cval, cinterval; | ||
| 49 | struct cpu_itimer *it = &tsk->signal->it[clock_id]; | ||
| 50 | |||
| 51 | spin_lock_irq(&tsk->sighand->siglock); | ||
| 52 | |||
| 53 | cval = it->expires; | ||
| 54 | cinterval = it->incr; | ||
| 55 | if (!cputime_eq(cval, cputime_zero)) { | ||
| 56 | struct task_cputime cputime; | ||
| 57 | cputime_t t; | ||
| 58 | |||
| 59 | thread_group_cputimer(tsk, &cputime); | ||
| 60 | if (clock_id == CPUCLOCK_PROF) | ||
| 61 | t = cputime_add(cputime.utime, cputime.stime); | ||
| 62 | else | ||
| 63 | /* CPUCLOCK_VIRT */ | ||
| 64 | t = cputime.utime; | ||
| 65 | |||
| 66 | if (cputime_le(cval, t)) | ||
| 67 | /* about to fire */ | ||
| 68 | cval = cputime_one_jiffy; | ||
| 69 | else | ||
| 70 | cval = cputime_sub(cval, t); | ||
| 71 | } | ||
| 72 | |||
| 73 | spin_unlock_irq(&tsk->sighand->siglock); | ||
| 74 | |||
| 75 | cputime_to_timeval(cval, &value->it_value); | ||
| 76 | cputime_to_timeval(cinterval, &value->it_interval); | ||
| 77 | } | ||
| 78 | |||
| 44 | int do_getitimer(int which, struct itimerval *value) | 79 | int do_getitimer(int which, struct itimerval *value) |
| 45 | { | 80 | { |
| 46 | struct task_struct *tsk = current; | 81 | struct task_struct *tsk = current; |
| 47 | cputime_t cinterval, cval; | ||
| 48 | 82 | ||
| 49 | switch (which) { | 83 | switch (which) { |
| 50 | case ITIMER_REAL: | 84 | case ITIMER_REAL: |
| @@ -55,44 +89,10 @@ int do_getitimer(int which, struct itimerval *value) | |||
| 55 | spin_unlock_irq(&tsk->sighand->siglock); | 89 | spin_unlock_irq(&tsk->sighand->siglock); |
| 56 | break; | 90 | break; |
| 57 | case ITIMER_VIRTUAL: | 91 | case ITIMER_VIRTUAL: |
| 58 | spin_lock_irq(&tsk->sighand->siglock); | 92 | get_cpu_itimer(tsk, CPUCLOCK_VIRT, value); |
| 59 | cval = tsk->signal->it_virt_expires; | ||
| 60 | cinterval = tsk->signal->it_virt_incr; | ||
| 61 | if (!cputime_eq(cval, cputime_zero)) { | ||
| 62 | struct task_cputime cputime; | ||
| 63 | cputime_t utime; | ||
| 64 | |||
| 65 | thread_group_cputimer(tsk, &cputime); | ||
| 66 | utime = cputime.utime; | ||
| 67 | if (cputime_le(cval, utime)) { /* about to fire */ | ||
| 68 | cval = jiffies_to_cputime(1); | ||
| 69 | } else { | ||
| 70 | cval = cputime_sub(cval, utime); | ||
| 71 | } | ||
| 72 | } | ||
| 73 | spin_unlock_irq(&tsk->sighand->siglock); | ||
| 74 | cputime_to_timeval(cval, &value->it_value); | ||
| 75 | cputime_to_timeval(cinterval, &value->it_interval); | ||
| 76 | break; | 93 | break; |
| 77 | case ITIMER_PROF: | 94 | case ITIMER_PROF: |
| 78 | spin_lock_irq(&tsk->sighand->siglock); | 95 | get_cpu_itimer(tsk, CPUCLOCK_PROF, value); |
| 79 | cval = tsk->signal->it_prof_expires; | ||
| 80 | cinterval = tsk->signal->it_prof_incr; | ||
| 81 | if (!cputime_eq(cval, cputime_zero)) { | ||
| 82 | struct task_cputime times; | ||
| 83 | cputime_t ptime; | ||
| 84 | |||
| 85 | thread_group_cputimer(tsk, ×); | ||
| 86 | ptime = cputime_add(times.utime, times.stime); | ||
| 87 | if (cputime_le(cval, ptime)) { /* about to fire */ | ||
| 88 | cval = jiffies_to_cputime(1); | ||
| 89 | } else { | ||
| 90 | cval = cputime_sub(cval, ptime); | ||
| 91 | } | ||
| 92 | } | ||
| 93 | spin_unlock_irq(&tsk->sighand->siglock); | ||
| 94 | cputime_to_timeval(cval, &value->it_value); | ||
| 95 | cputime_to_timeval(cinterval, &value->it_interval); | ||
| 96 | break; | 96 | break; |
| 97 | default: | 97 | default: |
| 98 | return(-EINVAL); | 98 | return(-EINVAL); |
| @@ -123,11 +123,62 @@ enum hrtimer_restart it_real_fn(struct hrtimer *timer) | |||
| 123 | struct signal_struct *sig = | 123 | struct signal_struct *sig = |
| 124 | container_of(timer, struct signal_struct, real_timer); | 124 | container_of(timer, struct signal_struct, real_timer); |
| 125 | 125 | ||
| 126 | trace_itimer_expire(ITIMER_REAL, sig->leader_pid, 0); | ||
| 126 | kill_pid_info(SIGALRM, SEND_SIG_PRIV, sig->leader_pid); | 127 | kill_pid_info(SIGALRM, SEND_SIG_PRIV, sig->leader_pid); |
| 127 | 128 | ||
| 128 | return HRTIMER_NORESTART; | 129 | return HRTIMER_NORESTART; |
| 129 | } | 130 | } |
| 130 | 131 | ||
| 132 | static inline u32 cputime_sub_ns(cputime_t ct, s64 real_ns) | ||
| 133 | { | ||
| 134 | struct timespec ts; | ||
| 135 | s64 cpu_ns; | ||
| 136 | |||
| 137 | cputime_to_timespec(ct, &ts); | ||
| 138 | cpu_ns = timespec_to_ns(&ts); | ||
| 139 | |||
| 140 | return (cpu_ns <= real_ns) ? 0 : cpu_ns - real_ns; | ||
| 141 | } | ||
| 142 | |||
| 143 | static void set_cpu_itimer(struct task_struct *tsk, unsigned int clock_id, | ||
| 144 | const struct itimerval *const value, | ||
| 145 | struct itimerval *const ovalue) | ||
| 146 | { | ||
| 147 | cputime_t cval, nval, cinterval, ninterval; | ||
| 148 | s64 ns_ninterval, ns_nval; | ||
| 149 | struct cpu_itimer *it = &tsk->signal->it[clock_id]; | ||
| 150 | |||
| 151 | nval = timeval_to_cputime(&value->it_value); | ||
| 152 | ns_nval = timeval_to_ns(&value->it_value); | ||
| 153 | ninterval = timeval_to_cputime(&value->it_interval); | ||
| 154 | ns_ninterval = timeval_to_ns(&value->it_interval); | ||
| 155 | |||
| 156 | it->incr_error = cputime_sub_ns(ninterval, ns_ninterval); | ||
| 157 | it->error = cputime_sub_ns(nval, ns_nval); | ||
| 158 | |||
| 159 | spin_lock_irq(&tsk->sighand->siglock); | ||
| 160 | |||
| 161 | cval = it->expires; | ||
| 162 | cinterval = it->incr; | ||
| 163 | if (!cputime_eq(cval, cputime_zero) || | ||
| 164 | !cputime_eq(nval, cputime_zero)) { | ||
| 165 | if (cputime_gt(nval, cputime_zero)) | ||
| 166 | nval = cputime_add(nval, cputime_one_jiffy); | ||
| 167 | set_process_cpu_timer(tsk, clock_id, &nval, &cval); | ||
| 168 | } | ||
| 169 | it->expires = nval; | ||
| 170 | it->incr = ninterval; | ||
| 171 | trace_itimer_state(clock_id == CPUCLOCK_VIRT ? | ||
| 172 | ITIMER_VIRTUAL : ITIMER_PROF, value, nval); | ||
| 173 | |||
| 174 | spin_unlock_irq(&tsk->sighand->siglock); | ||
| 175 | |||
| 176 | if (ovalue) { | ||
| 177 | cputime_to_timeval(cval, &ovalue->it_value); | ||
| 178 | cputime_to_timeval(cinterval, &ovalue->it_interval); | ||
| 179 | } | ||
| 180 | } | ||
| 181 | |||
| 131 | /* | 182 | /* |
| 132 | * Returns true if the timeval is in canonical form | 183 | * Returns true if the timeval is in canonical form |
| 133 | */ | 184 | */ |
| @@ -139,7 +190,6 @@ int do_setitimer(int which, struct itimerval *value, struct itimerval *ovalue) | |||
| 139 | struct task_struct *tsk = current; | 190 | struct task_struct *tsk = current; |
| 140 | struct hrtimer *timer; | 191 | struct hrtimer *timer; |
| 141 | ktime_t expires; | 192 | ktime_t expires; |
| 142 | cputime_t cval, cinterval, nval, ninterval; | ||
| 143 | 193 | ||
| 144 | /* | 194 | /* |
| 145 | * Validate the timevals in value. | 195 | * Validate the timevals in value. |
| @@ -171,51 +221,14 @@ again: | |||
| 171 | } else | 221 | } else |
| 172 | tsk->signal->it_real_incr.tv64 = 0; | 222 | tsk->signal->it_real_incr.tv64 = 0; |
| 173 | 223 | ||
| 224 | trace_itimer_state(ITIMER_REAL, value, 0); | ||
| 174 | spin_unlock_irq(&tsk->sighand->siglock); | 225 | spin_unlock_irq(&tsk->sighand->siglock); |
| 175 | break; | 226 | break; |
| 176 | case ITIMER_VIRTUAL: | 227 | case ITIMER_VIRTUAL: |
| 177 | nval = timeval_to_cputime(&value->it_value); | 228 | set_cpu_itimer(tsk, CPUCLOCK_VIRT, value, ovalue); |
| 178 | ninterval = timeval_to_cputime(&value->it_interval); | ||
| 179 | spin_lock_irq(&tsk->sighand->siglock); | ||
| 180 | cval = tsk->signal->it_virt_expires; | ||
| 181 | cinterval = tsk->signal->it_virt_incr; | ||
| 182 | if (!cputime_eq(cval, cputime_zero) || | ||
| 183 | !cputime_eq(nval, cputime_zero)) { | ||
| 184 | if (cputime_gt(nval, cputime_zero)) | ||
| 185 | nval = cputime_add(nval, | ||
| 186 | jiffies_to_cputime(1)); | ||
| 187 | set_process_cpu_timer(tsk, CPUCLOCK_VIRT, | ||
| 188 | &nval, &cval); | ||
| 189 | } | ||
| 190 | tsk->signal->it_virt_expires = nval; | ||
| 191 | tsk->signal->it_virt_incr = ninterval; | ||
| 192 | spin_unlock_irq(&tsk->sighand->siglock); | ||
| 193 | if (ovalue) { | ||
| 194 | cputime_to_timeval(cval, &ovalue->it_value); | ||
| 195 | cputime_to_timeval(cinterval, &ovalue->it_interval); | ||
| 196 | } | ||
| 197 | break; | 229 | break; |
| 198 | case ITIMER_PROF: | 230 | case ITIMER_PROF: |
| 199 | nval = timeval_to_cputime(&value->it_value); | 231 | set_cpu_itimer(tsk, CPUCLOCK_PROF, value, ovalue); |
| 200 | ninterval = timeval_to_cputime(&value->it_interval); | ||
| 201 | spin_lock_irq(&tsk->sighand->siglock); | ||
| 202 | cval = tsk->signal->it_prof_expires; | ||
| 203 | cinterval = tsk->signal->it_prof_incr; | ||
| 204 | if (!cputime_eq(cval, cputime_zero) || | ||
| 205 | !cputime_eq(nval, cputime_zero)) { | ||
| 206 | if (cputime_gt(nval, cputime_zero)) | ||
| 207 | nval = cputime_add(nval, | ||
| 208 | jiffies_to_cputime(1)); | ||
| 209 | set_process_cpu_timer(tsk, CPUCLOCK_PROF, | ||
| 210 | &nval, &cval); | ||
| 211 | } | ||
| 212 | tsk->signal->it_prof_expires = nval; | ||
| 213 | tsk->signal->it_prof_incr = ninterval; | ||
| 214 | spin_unlock_irq(&tsk->sighand->siglock); | ||
| 215 | if (ovalue) { | ||
| 216 | cputime_to_timeval(cval, &ovalue->it_value); | ||
| 217 | cputime_to_timeval(cinterval, &ovalue->it_interval); | ||
| 218 | } | ||
| 219 | break; | 232 | break; |
| 220 | default: | 233 | default: |
| 221 | return -EINVAL; | 234 | return -EINVAL; |
diff --git a/kernel/posix-cpu-timers.c b/kernel/posix-cpu-timers.c index e33a21cb9407..5c9dc228747b 100644 --- a/kernel/posix-cpu-timers.c +++ b/kernel/posix-cpu-timers.c | |||
| @@ -8,17 +8,18 @@ | |||
| 8 | #include <linux/math64.h> | 8 | #include <linux/math64.h> |
| 9 | #include <asm/uaccess.h> | 9 | #include <asm/uaccess.h> |
| 10 | #include <linux/kernel_stat.h> | 10 | #include <linux/kernel_stat.h> |
| 11 | #include <trace/events/timer.h> | ||
| 11 | 12 | ||
| 12 | /* | 13 | /* |
| 13 | * Called after updating RLIMIT_CPU to set timer expiration if necessary. | 14 | * Called after updating RLIMIT_CPU to set timer expiration if necessary. |
| 14 | */ | 15 | */ |
| 15 | void update_rlimit_cpu(unsigned long rlim_new) | 16 | void update_rlimit_cpu(unsigned long rlim_new) |
| 16 | { | 17 | { |
| 17 | cputime_t cputime; | 18 | cputime_t cputime = secs_to_cputime(rlim_new); |
| 19 | struct signal_struct *const sig = current->signal; | ||
| 18 | 20 | ||
| 19 | cputime = secs_to_cputime(rlim_new); | 21 | if (cputime_eq(sig->it[CPUCLOCK_PROF].expires, cputime_zero) || |
| 20 | if (cputime_eq(current->signal->it_prof_expires, cputime_zero) || | 22 | cputime_gt(sig->it[CPUCLOCK_PROF].expires, cputime)) { |
| 21 | cputime_gt(current->signal->it_prof_expires, cputime)) { | ||
| 22 | spin_lock_irq(¤t->sighand->siglock); | 23 | spin_lock_irq(¤t->sighand->siglock); |
| 23 | set_process_cpu_timer(current, CPUCLOCK_PROF, &cputime, NULL); | 24 | set_process_cpu_timer(current, CPUCLOCK_PROF, &cputime, NULL); |
| 24 | spin_unlock_irq(¤t->sighand->siglock); | 25 | spin_unlock_irq(¤t->sighand->siglock); |
| @@ -542,6 +543,17 @@ static void clear_dead_task(struct k_itimer *timer, union cpu_time_count now) | |||
| 542 | now); | 543 | now); |
| 543 | } | 544 | } |
| 544 | 545 | ||
| 546 | static inline int expires_gt(cputime_t expires, cputime_t new_exp) | ||
| 547 | { | ||
| 548 | return cputime_eq(expires, cputime_zero) || | ||
| 549 | cputime_gt(expires, new_exp); | ||
| 550 | } | ||
| 551 | |||
| 552 | static inline int expires_le(cputime_t expires, cputime_t new_exp) | ||
| 553 | { | ||
| 554 | return !cputime_eq(expires, cputime_zero) && | ||
| 555 | cputime_le(expires, new_exp); | ||
| 556 | } | ||
| 545 | /* | 557 | /* |
| 546 | * Insert the timer on the appropriate list before any timers that | 558 | * Insert the timer on the appropriate list before any timers that |
| 547 | * expire later. This must be called with the tasklist_lock held | 559 | * expire later. This must be called with the tasklist_lock held |
| @@ -586,34 +598,32 @@ static void arm_timer(struct k_itimer *timer, union cpu_time_count now) | |||
| 586 | */ | 598 | */ |
| 587 | 599 | ||
| 588 | if (CPUCLOCK_PERTHREAD(timer->it_clock)) { | 600 | if (CPUCLOCK_PERTHREAD(timer->it_clock)) { |
| 601 | union cpu_time_count *exp = &nt->expires; | ||
| 602 | |||
| 589 | switch (CPUCLOCK_WHICH(timer->it_clock)) { | 603 | switch (CPUCLOCK_WHICH(timer->it_clock)) { |
| 590 | default: | 604 | default: |
| 591 | BUG(); | 605 | BUG(); |
| 592 | case CPUCLOCK_PROF: | 606 | case CPUCLOCK_PROF: |
| 593 | if (cputime_eq(p->cputime_expires.prof_exp, | 607 | if (expires_gt(p->cputime_expires.prof_exp, |
| 594 | cputime_zero) || | 608 | exp->cpu)) |
| 595 | cputime_gt(p->cputime_expires.prof_exp, | 609 | p->cputime_expires.prof_exp = exp->cpu; |
| 596 | nt->expires.cpu)) | ||
| 597 | p->cputime_expires.prof_exp = | ||
| 598 | nt->expires.cpu; | ||
| 599 | break; | 610 | break; |
| 600 | case CPUCLOCK_VIRT: | 611 | case CPUCLOCK_VIRT: |
| 601 | if (cputime_eq(p->cputime_expires.virt_exp, | 612 | if (expires_gt(p->cputime_expires.virt_exp, |
| 602 | cputime_zero) || | 613 | exp->cpu)) |
| 603 | cputime_gt(p->cputime_expires.virt_exp, | 614 | p->cputime_expires.virt_exp = exp->cpu; |
| 604 | nt->expires.cpu)) | ||
| 605 | p->cputime_expires.virt_exp = | ||
| 606 | nt->expires.cpu; | ||
| 607 | break; | 615 | break; |
| 608 | case CPUCLOCK_SCHED: | 616 | case CPUCLOCK_SCHED: |
| 609 | if (p->cputime_expires.sched_exp == 0 || | 617 | if (p->cputime_expires.sched_exp == 0 || |
| 610 | p->cputime_expires.sched_exp > | 618 | p->cputime_expires.sched_exp > exp->sched) |
| 611 | nt->expires.sched) | ||
| 612 | p->cputime_expires.sched_exp = | 619 | p->cputime_expires.sched_exp = |
| 613 | nt->expires.sched; | 620 | exp->sched; |
| 614 | break; | 621 | break; |
| 615 | } | 622 | } |
| 616 | } else { | 623 | } else { |
| 624 | struct signal_struct *const sig = p->signal; | ||
| 625 | union cpu_time_count *exp = &timer->it.cpu.expires; | ||
| 626 | |||
| 617 | /* | 627 | /* |
| 618 | * For a process timer, set the cached expiration time. | 628 | * For a process timer, set the cached expiration time. |
| 619 | */ | 629 | */ |
| @@ -621,30 +631,23 @@ static void arm_timer(struct k_itimer *timer, union cpu_time_count now) | |||
| 621 | default: | 631 | default: |
| 622 | BUG(); | 632 | BUG(); |
| 623 | case CPUCLOCK_VIRT: | 633 | case CPUCLOCK_VIRT: |
| 624 | if (!cputime_eq(p->signal->it_virt_expires, | 634 | if (expires_le(sig->it[CPUCLOCK_VIRT].expires, |
| 625 | cputime_zero) && | 635 | exp->cpu)) |
| 626 | cputime_lt(p->signal->it_virt_expires, | ||
| 627 | timer->it.cpu.expires.cpu)) | ||
| 628 | break; | 636 | break; |
| 629 | p->signal->cputime_expires.virt_exp = | 637 | sig->cputime_expires.virt_exp = exp->cpu; |
| 630 | timer->it.cpu.expires.cpu; | ||
| 631 | break; | 638 | break; |
| 632 | case CPUCLOCK_PROF: | 639 | case CPUCLOCK_PROF: |
| 633 | if (!cputime_eq(p->signal->it_prof_expires, | 640 | if (expires_le(sig->it[CPUCLOCK_PROF].expires, |
| 634 | cputime_zero) && | 641 | exp->cpu)) |
| 635 | cputime_lt(p->signal->it_prof_expires, | ||
| 636 | timer->it.cpu.expires.cpu)) | ||
| 637 | break; | 642 | break; |
| 638 | i = p->signal->rlim[RLIMIT_CPU].rlim_cur; | 643 | i = sig->rlim[RLIMIT_CPU].rlim_cur; |
| 639 | if (i != RLIM_INFINITY && | 644 | if (i != RLIM_INFINITY && |
| 640 | i <= cputime_to_secs(timer->it.cpu.expires.cpu)) | 645 | i <= cputime_to_secs(exp->cpu)) |
| 641 | break; | 646 | break; |
| 642 | p->signal->cputime_expires.prof_exp = | 647 | sig->cputime_expires.prof_exp = exp->cpu; |
| 643 | timer->it.cpu.expires.cpu; | ||
| 644 | break; | 648 | break; |
| 645 | case CPUCLOCK_SCHED: | 649 | case CPUCLOCK_SCHED: |
| 646 | p->signal->cputime_expires.sched_exp = | 650 | sig->cputime_expires.sched_exp = exp->sched; |
| 647 | timer->it.cpu.expires.sched; | ||
| 648 | break; | 651 | break; |
| 649 | } | 652 | } |
| 650 | } | 653 | } |
| @@ -1071,6 +1074,40 @@ static void stop_process_timers(struct task_struct *tsk) | |||
| 1071 | spin_unlock_irqrestore(&cputimer->lock, flags); | 1074 | spin_unlock_irqrestore(&cputimer->lock, flags); |
| 1072 | } | 1075 | } |
| 1073 | 1076 | ||
| 1077 | static u32 onecputick; | ||
| 1078 | |||
| 1079 | static void check_cpu_itimer(struct task_struct *tsk, struct cpu_itimer *it, | ||
| 1080 | cputime_t *expires, cputime_t cur_time, int signo) | ||
| 1081 | { | ||
| 1082 | if (cputime_eq(it->expires, cputime_zero)) | ||
| 1083 | return; | ||
| 1084 | |||
| 1085 | if (cputime_ge(cur_time, it->expires)) { | ||
| 1086 | if (!cputime_eq(it->incr, cputime_zero)) { | ||
| 1087 | it->expires = cputime_add(it->expires, it->incr); | ||
| 1088 | it->error += it->incr_error; | ||
| 1089 | if (it->error >= onecputick) { | ||
| 1090 | it->expires = cputime_sub(it->expires, | ||
| 1091 | cputime_one_jiffy); | ||
| 1092 | it->error -= onecputick; | ||
| 1093 | } | ||
| 1094 | } else { | ||
| 1095 | it->expires = cputime_zero; | ||
| 1096 | } | ||
| 1097 | |||
| 1098 | trace_itimer_expire(signo == SIGPROF ? | ||
| 1099 | ITIMER_PROF : ITIMER_VIRTUAL, | ||
| 1100 | tsk->signal->leader_pid, cur_time); | ||
| 1101 | __group_send_sig_info(signo, SEND_SIG_PRIV, tsk); | ||
| 1102 | } | ||
| 1103 | |||
| 1104 | if (!cputime_eq(it->expires, cputime_zero) && | ||
| 1105 | (cputime_eq(*expires, cputime_zero) || | ||
| 1106 | cputime_lt(it->expires, *expires))) { | ||
| 1107 | *expires = it->expires; | ||
| 1108 | } | ||
| 1109 | } | ||
| 1110 | |||
| 1074 | /* | 1111 | /* |
| 1075 | * Check for any per-thread CPU timers that have fired and move them | 1112 | * Check for any per-thread CPU timers that have fired and move them |
| 1076 | * off the tsk->*_timers list onto the firing list. Per-thread timers | 1113 | * off the tsk->*_timers list onto the firing list. Per-thread timers |
| @@ -1090,10 +1127,10 @@ static void check_process_timers(struct task_struct *tsk, | |||
| 1090 | * Don't sample the current process CPU clocks if there are no timers. | 1127 | * Don't sample the current process CPU clocks if there are no timers. |
| 1091 | */ | 1128 | */ |
| 1092 | if (list_empty(&timers[CPUCLOCK_PROF]) && | 1129 | if (list_empty(&timers[CPUCLOCK_PROF]) && |
| 1093 | cputime_eq(sig->it_prof_expires, cputime_zero) && | 1130 | cputime_eq(sig->it[CPUCLOCK_PROF].expires, cputime_zero) && |
| 1094 | sig->rlim[RLIMIT_CPU].rlim_cur == RLIM_INFINITY && | 1131 | sig->rlim[RLIMIT_CPU].rlim_cur == RLIM_INFINITY && |
| 1095 | list_empty(&timers[CPUCLOCK_VIRT]) && | 1132 | list_empty(&timers[CPUCLOCK_VIRT]) && |
| 1096 | cputime_eq(sig->it_virt_expires, cputime_zero) && | 1133 | cputime_eq(sig->it[CPUCLOCK_VIRT].expires, cputime_zero) && |
| 1097 | list_empty(&timers[CPUCLOCK_SCHED])) { | 1134 | list_empty(&timers[CPUCLOCK_SCHED])) { |
| 1098 | stop_process_timers(tsk); | 1135 | stop_process_timers(tsk); |
| 1099 | return; | 1136 | return; |
| @@ -1153,38 +1190,11 @@ static void check_process_timers(struct task_struct *tsk, | |||
| 1153 | /* | 1190 | /* |
| 1154 | * Check for the special case process timers. | 1191 | * Check for the special case process timers. |
| 1155 | */ | 1192 | */ |
| 1156 | if (!cputime_eq(sig->it_prof_expires, cputime_zero)) { | 1193 | check_cpu_itimer(tsk, &sig->it[CPUCLOCK_PROF], &prof_expires, ptime, |
| 1157 | if (cputime_ge(ptime, sig->it_prof_expires)) { | 1194 | SIGPROF); |
| 1158 | /* ITIMER_PROF fires and reloads. */ | 1195 | check_cpu_itimer(tsk, &sig->it[CPUCLOCK_VIRT], &virt_expires, utime, |
| 1159 | sig->it_prof_expires = sig->it_prof_incr; | 1196 | SIGVTALRM); |
| 1160 | if (!cputime_eq(sig->it_prof_expires, cputime_zero)) { | 1197 | |
| 1161 | sig->it_prof_expires = cputime_add( | ||
| 1162 | sig->it_prof_expires, ptime); | ||
| 1163 | } | ||
| 1164 | __group_send_sig_info(SIGPROF, SEND_SIG_PRIV, tsk); | ||
| 1165 | } | ||
| 1166 | if (!cputime_eq(sig->it_prof_expires, cputime_zero) && | ||
| 1167 | (cputime_eq(prof_expires, cputime_zero) || | ||
| 1168 | cputime_lt(sig->it_prof_expires, prof_expires))) { | ||
| 1169 | prof_expires = sig->it_prof_expires; | ||
| 1170 | } | ||
| 1171 | } | ||
| 1172 | if (!cputime_eq(sig->it_virt_expires, cputime_zero)) { | ||
| 1173 | if (cputime_ge(utime, sig->it_virt_expires)) { | ||
| 1174 | /* ITIMER_VIRTUAL fires and reloads. */ | ||
| 1175 | sig->it_virt_expires = sig->it_virt_incr; | ||
| 1176 | if (!cputime_eq(sig->it_virt_expires, cputime_zero)) { | ||
| 1177 | sig->it_virt_expires = cputime_add( | ||
| 1178 | sig->it_virt_expires, utime); | ||
| 1179 | } | ||
| 1180 | __group_send_sig_info(SIGVTALRM, SEND_SIG_PRIV, tsk); | ||
| 1181 | } | ||
| 1182 | if (!cputime_eq(sig->it_virt_expires, cputime_zero) && | ||
| 1183 | (cputime_eq(virt_expires, cputime_zero) || | ||
| 1184 | cputime_lt(sig->it_virt_expires, virt_expires))) { | ||
| 1185 | virt_expires = sig->it_virt_expires; | ||
| 1186 | } | ||
| 1187 | } | ||
| 1188 | if (sig->rlim[RLIMIT_CPU].rlim_cur != RLIM_INFINITY) { | 1198 | if (sig->rlim[RLIMIT_CPU].rlim_cur != RLIM_INFINITY) { |
| 1189 | unsigned long psecs = cputime_to_secs(ptime); | 1199 | unsigned long psecs = cputime_to_secs(ptime); |
| 1190 | cputime_t x; | 1200 | cputime_t x; |
| @@ -1457,7 +1467,7 @@ void set_process_cpu_timer(struct task_struct *tsk, unsigned int clock_idx, | |||
| 1457 | if (!cputime_eq(*oldval, cputime_zero)) { | 1467 | if (!cputime_eq(*oldval, cputime_zero)) { |
| 1458 | if (cputime_le(*oldval, now.cpu)) { | 1468 | if (cputime_le(*oldval, now.cpu)) { |
| 1459 | /* Just about to fire. */ | 1469 | /* Just about to fire. */ |
| 1460 | *oldval = jiffies_to_cputime(1); | 1470 | *oldval = cputime_one_jiffy; |
| 1461 | } else { | 1471 | } else { |
| 1462 | *oldval = cputime_sub(*oldval, now.cpu); | 1472 | *oldval = cputime_sub(*oldval, now.cpu); |
| 1463 | } | 1473 | } |
| @@ -1703,10 +1713,15 @@ static __init int init_posix_cpu_timers(void) | |||
| 1703 | .nsleep = thread_cpu_nsleep, | 1713 | .nsleep = thread_cpu_nsleep, |
| 1704 | .nsleep_restart = thread_cpu_nsleep_restart, | 1714 | .nsleep_restart = thread_cpu_nsleep_restart, |
| 1705 | }; | 1715 | }; |
| 1716 | struct timespec ts; | ||
| 1706 | 1717 | ||
| 1707 | register_posix_clock(CLOCK_PROCESS_CPUTIME_ID, &process); | 1718 | register_posix_clock(CLOCK_PROCESS_CPUTIME_ID, &process); |
| 1708 | register_posix_clock(CLOCK_THREAD_CPUTIME_ID, &thread); | 1719 | register_posix_clock(CLOCK_THREAD_CPUTIME_ID, &thread); |
| 1709 | 1720 | ||
| 1721 | cputime_to_timespec(cputime_one_jiffy, &ts); | ||
| 1722 | onecputick = ts.tv_nsec; | ||
| 1723 | WARN_ON(ts.tv_sec != 0); | ||
| 1724 | |||
| 1710 | return 0; | 1725 | return 0; |
| 1711 | } | 1726 | } |
| 1712 | __initcall(init_posix_cpu_timers); | 1727 | __initcall(init_posix_cpu_timers); |
diff --git a/kernel/sched.c b/kernel/sched.c index 0ac9053c21d6..2f76e06bea58 100644 --- a/kernel/sched.c +++ b/kernel/sched.c | |||
| @@ -5092,17 +5092,16 @@ void account_idle_time(cputime_t cputime) | |||
| 5092 | */ | 5092 | */ |
| 5093 | void account_process_tick(struct task_struct *p, int user_tick) | 5093 | void account_process_tick(struct task_struct *p, int user_tick) |
| 5094 | { | 5094 | { |
| 5095 | cputime_t one_jiffy = jiffies_to_cputime(1); | 5095 | cputime_t one_jiffy_scaled = cputime_to_scaled(cputime_one_jiffy); |
| 5096 | cputime_t one_jiffy_scaled = cputime_to_scaled(one_jiffy); | ||
| 5097 | struct rq *rq = this_rq(); | 5096 | struct rq *rq = this_rq(); |
| 5098 | 5097 | ||
| 5099 | if (user_tick) | 5098 | if (user_tick) |
| 5100 | account_user_time(p, one_jiffy, one_jiffy_scaled); | 5099 | account_user_time(p, cputime_one_jiffy, one_jiffy_scaled); |
| 5101 | else if ((p != rq->idle) || (irq_count() != HARDIRQ_OFFSET)) | 5100 | else if ((p != rq->idle) || (irq_count() != HARDIRQ_OFFSET)) |
| 5102 | account_system_time(p, HARDIRQ_OFFSET, one_jiffy, | 5101 | account_system_time(p, HARDIRQ_OFFSET, cputime_one_jiffy, |
| 5103 | one_jiffy_scaled); | 5102 | one_jiffy_scaled); |
| 5104 | else | 5103 | else |
| 5105 | account_idle_time(one_jiffy); | 5104 | account_idle_time(cputime_one_jiffy); |
| 5106 | } | 5105 | } |
| 5107 | 5106 | ||
| 5108 | /* | 5107 | /* |
diff --git a/kernel/timer.c b/kernel/timer.c index 811e5c391456..5db5a8d26811 100644 --- a/kernel/timer.c +++ b/kernel/timer.c | |||
| @@ -46,6 +46,9 @@ | |||
| 46 | #include <asm/timex.h> | 46 | #include <asm/timex.h> |
| 47 | #include <asm/io.h> | 47 | #include <asm/io.h> |
| 48 | 48 | ||
| 49 | #define CREATE_TRACE_POINTS | ||
| 50 | #include <trace/events/timer.h> | ||
| 51 | |||
| 49 | u64 jiffies_64 __cacheline_aligned_in_smp = INITIAL_JIFFIES; | 52 | u64 jiffies_64 __cacheline_aligned_in_smp = INITIAL_JIFFIES; |
| 50 | 53 | ||
| 51 | EXPORT_SYMBOL(jiffies_64); | 54 | EXPORT_SYMBOL(jiffies_64); |
| @@ -521,6 +524,25 @@ static inline void debug_timer_activate(struct timer_list *timer) { } | |||
| 521 | static inline void debug_timer_deactivate(struct timer_list *timer) { } | 524 | static inline void debug_timer_deactivate(struct timer_list *timer) { } |
| 522 | #endif | 525 | #endif |
| 523 | 526 | ||
| 527 | static inline void debug_init(struct timer_list *timer) | ||
| 528 | { | ||
| 529 | debug_timer_init(timer); | ||
| 530 | trace_timer_init(timer); | ||
| 531 | } | ||
| 532 | |||
| 533 | static inline void | ||
| 534 | debug_activate(struct timer_list *timer, unsigned long expires) | ||
| 535 | { | ||
| 536 | debug_timer_activate(timer); | ||
| 537 | trace_timer_start(timer, expires); | ||
| 538 | } | ||
| 539 | |||
| 540 | static inline void debug_deactivate(struct timer_list *timer) | ||
| 541 | { | ||
| 542 | debug_timer_deactivate(timer); | ||
| 543 | trace_timer_cancel(timer); | ||
| 544 | } | ||
| 545 | |||
| 524 | static void __init_timer(struct timer_list *timer, | 546 | static void __init_timer(struct timer_list *timer, |
| 525 | const char *name, | 547 | const char *name, |
| 526 | struct lock_class_key *key) | 548 | struct lock_class_key *key) |
| @@ -549,7 +571,7 @@ void init_timer_key(struct timer_list *timer, | |||
| 549 | const char *name, | 571 | const char *name, |
| 550 | struct lock_class_key *key) | 572 | struct lock_class_key *key) |
| 551 | { | 573 | { |
| 552 | debug_timer_init(timer); | 574 | debug_init(timer); |
| 553 | __init_timer(timer, name, key); | 575 | __init_timer(timer, name, key); |
| 554 | } | 576 | } |
| 555 | EXPORT_SYMBOL(init_timer_key); | 577 | EXPORT_SYMBOL(init_timer_key); |
| @@ -568,7 +590,7 @@ static inline void detach_timer(struct timer_list *timer, | |||
| 568 | { | 590 | { |
| 569 | struct list_head *entry = &timer->entry; | 591 | struct list_head *entry = &timer->entry; |
| 570 | 592 | ||
| 571 | debug_timer_deactivate(timer); | 593 | debug_deactivate(timer); |
| 572 | 594 | ||
| 573 | __list_del(entry->prev, entry->next); | 595 | __list_del(entry->prev, entry->next); |
| 574 | if (clear_pending) | 596 | if (clear_pending) |
| @@ -632,7 +654,7 @@ __mod_timer(struct timer_list *timer, unsigned long expires, | |||
| 632 | goto out_unlock; | 654 | goto out_unlock; |
| 633 | } | 655 | } |
| 634 | 656 | ||
| 635 | debug_timer_activate(timer); | 657 | debug_activate(timer, expires); |
| 636 | 658 | ||
| 637 | new_base = __get_cpu_var(tvec_bases); | 659 | new_base = __get_cpu_var(tvec_bases); |
| 638 | 660 | ||
| @@ -787,7 +809,7 @@ void add_timer_on(struct timer_list *timer, int cpu) | |||
| 787 | BUG_ON(timer_pending(timer) || !timer->function); | 809 | BUG_ON(timer_pending(timer) || !timer->function); |
| 788 | spin_lock_irqsave(&base->lock, flags); | 810 | spin_lock_irqsave(&base->lock, flags); |
| 789 | timer_set_base(timer, base); | 811 | timer_set_base(timer, base); |
| 790 | debug_timer_activate(timer); | 812 | debug_activate(timer, timer->expires); |
| 791 | if (time_before(timer->expires, base->next_timer) && | 813 | if (time_before(timer->expires, base->next_timer) && |
| 792 | !tbase_get_deferrable(timer->base)) | 814 | !tbase_get_deferrable(timer->base)) |
| 793 | base->next_timer = timer->expires; | 815 | base->next_timer = timer->expires; |
| @@ -1000,7 +1022,9 @@ static inline void __run_timers(struct tvec_base *base) | |||
| 1000 | */ | 1022 | */ |
| 1001 | lock_map_acquire(&lockdep_map); | 1023 | lock_map_acquire(&lockdep_map); |
| 1002 | 1024 | ||
| 1025 | trace_timer_expire_entry(timer); | ||
| 1003 | fn(data); | 1026 | fn(data); |
| 1027 | trace_timer_expire_exit(timer); | ||
| 1004 | 1028 | ||
| 1005 | lock_map_release(&lockdep_map); | 1029 | lock_map_release(&lockdep_map); |
| 1006 | 1030 | ||
