diff options
Diffstat (limited to 'kernel/time')
-rw-r--r-- | kernel/time/clockevents.c | 46 | ||||
-rw-r--r-- | kernel/time/clocksource.c | 123 | ||||
-rw-r--r-- | kernel/time/tick-broadcast.c | 42 | ||||
-rw-r--r-- | kernel/time/tick-common.c | 20 | ||||
-rw-r--r-- | kernel/time/tick-internal.h | 1 | ||||
-rw-r--r-- | kernel/time/tick-oneshot.c | 4 | ||||
-rw-r--r-- | kernel/time/tick-sched.c | 141 | ||||
-rw-r--r-- | kernel/time/timecompare.c | 8 | ||||
-rw-r--r-- | kernel/time/timekeeping.c | 104 | ||||
-rw-r--r-- | kernel/time/timer_list.c | 25 | ||||
-rw-r--r-- | kernel/time/timer_stats.c | 18 |
11 files changed, 372 insertions, 160 deletions
diff --git a/kernel/time/clockevents.c b/kernel/time/clockevents.c index 620b58abdc32..d7395fdfb9f3 100644 --- a/kernel/time/clockevents.c +++ b/kernel/time/clockevents.c | |||
@@ -20,6 +20,8 @@ | |||
20 | #include <linux/sysdev.h> | 20 | #include <linux/sysdev.h> |
21 | #include <linux/tick.h> | 21 | #include <linux/tick.h> |
22 | 22 | ||
23 | #include "tick-internal.h" | ||
24 | |||
23 | /* The registered clock event devices */ | 25 | /* The registered clock event devices */ |
24 | static LIST_HEAD(clockevent_devices); | 26 | static LIST_HEAD(clockevent_devices); |
25 | static LIST_HEAD(clockevents_released); | 27 | static LIST_HEAD(clockevents_released); |
@@ -28,7 +30,7 @@ static LIST_HEAD(clockevents_released); | |||
28 | static RAW_NOTIFIER_HEAD(clockevents_chain); | 30 | static RAW_NOTIFIER_HEAD(clockevents_chain); |
29 | 31 | ||
30 | /* Protection for the above */ | 32 | /* Protection for the above */ |
31 | static DEFINE_SPINLOCK(clockevents_lock); | 33 | static DEFINE_RAW_SPINLOCK(clockevents_lock); |
32 | 34 | ||
33 | /** | 35 | /** |
34 | * clockevents_delta2ns - Convert a latch value (device ticks) to nanoseconds | 36 | * clockevents_delta2ns - Convert a latch value (device ticks) to nanoseconds |
@@ -37,10 +39,9 @@ static DEFINE_SPINLOCK(clockevents_lock); | |||
37 | * | 39 | * |
38 | * Math helper, returns latch value converted to nanoseconds (bound checked) | 40 | * Math helper, returns latch value converted to nanoseconds (bound checked) |
39 | */ | 41 | */ |
40 | unsigned long clockevent_delta2ns(unsigned long latch, | 42 | u64 clockevent_delta2ns(unsigned long latch, struct clock_event_device *evt) |
41 | struct clock_event_device *evt) | ||
42 | { | 43 | { |
43 | u64 clc = ((u64) latch << evt->shift); | 44 | u64 clc = (u64) latch << evt->shift; |
44 | 45 | ||
45 | if (unlikely(!evt->mult)) { | 46 | if (unlikely(!evt->mult)) { |
46 | evt->mult = 1; | 47 | evt->mult = 1; |
@@ -50,10 +51,10 @@ unsigned long clockevent_delta2ns(unsigned long latch, | |||
50 | do_div(clc, evt->mult); | 51 | do_div(clc, evt->mult); |
51 | if (clc < 1000) | 52 | if (clc < 1000) |
52 | clc = 1000; | 53 | clc = 1000; |
53 | if (clc > LONG_MAX) | 54 | if (clc > KTIME_MAX) |
54 | clc = LONG_MAX; | 55 | clc = KTIME_MAX; |
55 | 56 | ||
56 | return (unsigned long) clc; | 57 | return clc; |
57 | } | 58 | } |
58 | EXPORT_SYMBOL_GPL(clockevent_delta2ns); | 59 | EXPORT_SYMBOL_GPL(clockevent_delta2ns); |
59 | 60 | ||
@@ -140,9 +141,9 @@ int clockevents_register_notifier(struct notifier_block *nb) | |||
140 | unsigned long flags; | 141 | unsigned long flags; |
141 | int ret; | 142 | int ret; |
142 | 143 | ||
143 | spin_lock_irqsave(&clockevents_lock, flags); | 144 | raw_spin_lock_irqsave(&clockevents_lock, flags); |
144 | ret = raw_notifier_chain_register(&clockevents_chain, nb); | 145 | ret = raw_notifier_chain_register(&clockevents_chain, nb); |
145 | spin_unlock_irqrestore(&clockevents_lock, flags); | 146 | raw_spin_unlock_irqrestore(&clockevents_lock, flags); |
146 | 147 | ||
147 | return ret; | 148 | return ret; |
148 | } | 149 | } |
@@ -184,13 +185,13 @@ void clockevents_register_device(struct clock_event_device *dev) | |||
184 | BUG_ON(dev->mode != CLOCK_EVT_MODE_UNUSED); | 185 | BUG_ON(dev->mode != CLOCK_EVT_MODE_UNUSED); |
185 | BUG_ON(!dev->cpumask); | 186 | BUG_ON(!dev->cpumask); |
186 | 187 | ||
187 | spin_lock_irqsave(&clockevents_lock, flags); | 188 | raw_spin_lock_irqsave(&clockevents_lock, flags); |
188 | 189 | ||
189 | list_add(&dev->list, &clockevent_devices); | 190 | list_add(&dev->list, &clockevent_devices); |
190 | clockevents_do_notify(CLOCK_EVT_NOTIFY_ADD, dev); | 191 | clockevents_do_notify(CLOCK_EVT_NOTIFY_ADD, dev); |
191 | clockevents_notify_released(); | 192 | clockevents_notify_released(); |
192 | 193 | ||
193 | spin_unlock_irqrestore(&clockevents_lock, flags); | 194 | raw_spin_unlock_irqrestore(&clockevents_lock, flags); |
194 | } | 195 | } |
195 | EXPORT_SYMBOL_GPL(clockevents_register_device); | 196 | EXPORT_SYMBOL_GPL(clockevents_register_device); |
196 | 197 | ||
@@ -237,10 +238,11 @@ void clockevents_exchange_device(struct clock_event_device *old, | |||
237 | */ | 238 | */ |
238 | void clockevents_notify(unsigned long reason, void *arg) | 239 | void clockevents_notify(unsigned long reason, void *arg) |
239 | { | 240 | { |
240 | struct list_head *node, *tmp; | 241 | struct clock_event_device *dev, *tmp; |
241 | unsigned long flags; | 242 | unsigned long flags; |
243 | int cpu; | ||
242 | 244 | ||
243 | spin_lock_irqsave(&clockevents_lock, flags); | 245 | raw_spin_lock_irqsave(&clockevents_lock, flags); |
244 | clockevents_do_notify(reason, arg); | 246 | clockevents_do_notify(reason, arg); |
245 | 247 | ||
246 | switch (reason) { | 248 | switch (reason) { |
@@ -249,13 +251,25 @@ void clockevents_notify(unsigned long reason, void *arg) | |||
249 | * Unregister the clock event devices which were | 251 | * Unregister the clock event devices which were |
250 | * released from the users in the notify chain. | 252 | * released from the users in the notify chain. |
251 | */ | 253 | */ |
252 | list_for_each_safe(node, tmp, &clockevents_released) | 254 | list_for_each_entry_safe(dev, tmp, &clockevents_released, list) |
253 | list_del(node); | 255 | list_del(&dev->list); |
256 | /* | ||
257 | * Now check whether the CPU has left unused per cpu devices | ||
258 | */ | ||
259 | cpu = *((int *)arg); | ||
260 | list_for_each_entry_safe(dev, tmp, &clockevent_devices, list) { | ||
261 | if (cpumask_test_cpu(cpu, dev->cpumask) && | ||
262 | cpumask_weight(dev->cpumask) == 1 && | ||
263 | !tick_is_broadcast_device(dev)) { | ||
264 | BUG_ON(dev->mode != CLOCK_EVT_MODE_UNUSED); | ||
265 | list_del(&dev->list); | ||
266 | } | ||
267 | } | ||
254 | break; | 268 | break; |
255 | default: | 269 | default: |
256 | break; | 270 | break; |
257 | } | 271 | } |
258 | spin_unlock_irqrestore(&clockevents_lock, flags); | 272 | raw_spin_unlock_irqrestore(&clockevents_lock, flags); |
259 | } | 273 | } |
260 | EXPORT_SYMBOL_GPL(clockevents_notify); | 274 | EXPORT_SYMBOL_GPL(clockevents_notify); |
261 | #endif | 275 | #endif |
diff --git a/kernel/time/clocksource.c b/kernel/time/clocksource.c index 5e18c6ab2c6a..13700833c181 100644 --- a/kernel/time/clocksource.c +++ b/kernel/time/clocksource.c | |||
@@ -39,7 +39,7 @@ void timecounter_init(struct timecounter *tc, | |||
39 | tc->cycle_last = cc->read(cc); | 39 | tc->cycle_last = cc->read(cc); |
40 | tc->nsec = start_tstamp; | 40 | tc->nsec = start_tstamp; |
41 | } | 41 | } |
42 | EXPORT_SYMBOL(timecounter_init); | 42 | EXPORT_SYMBOL_GPL(timecounter_init); |
43 | 43 | ||
44 | /** | 44 | /** |
45 | * timecounter_read_delta - get nanoseconds since last call of this function | 45 | * timecounter_read_delta - get nanoseconds since last call of this function |
@@ -83,7 +83,7 @@ u64 timecounter_read(struct timecounter *tc) | |||
83 | 83 | ||
84 | return nsec; | 84 | return nsec; |
85 | } | 85 | } |
86 | EXPORT_SYMBOL(timecounter_read); | 86 | EXPORT_SYMBOL_GPL(timecounter_read); |
87 | 87 | ||
88 | u64 timecounter_cyc2time(struct timecounter *tc, | 88 | u64 timecounter_cyc2time(struct timecounter *tc, |
89 | cycle_t cycle_tstamp) | 89 | cycle_t cycle_tstamp) |
@@ -105,7 +105,60 @@ u64 timecounter_cyc2time(struct timecounter *tc, | |||
105 | 105 | ||
106 | return nsec; | 106 | return nsec; |
107 | } | 107 | } |
108 | EXPORT_SYMBOL(timecounter_cyc2time); | 108 | EXPORT_SYMBOL_GPL(timecounter_cyc2time); |
109 | |||
110 | /** | ||
111 | * clocks_calc_mult_shift - calculate mult/shift factors for scaled math of clocks | ||
112 | * @mult: pointer to mult variable | ||
113 | * @shift: pointer to shift variable | ||
114 | * @from: frequency to convert from | ||
115 | * @to: frequency to convert to | ||
116 | * @minsec: guaranteed runtime conversion range in seconds | ||
117 | * | ||
118 | * The function evaluates the shift/mult pair for the scaled math | ||
119 | * operations of clocksources and clockevents. | ||
120 | * | ||
121 | * @to and @from are frequency values in HZ. For clock sources @to is | ||
122 | * NSEC_PER_SEC == 1GHz and @from is the counter frequency. For clock | ||
123 | * event @to is the counter frequency and @from is NSEC_PER_SEC. | ||
124 | * | ||
125 | * The @minsec conversion range argument controls the time frame in | ||
126 | * seconds which must be covered by the runtime conversion with the | ||
127 | * calculated mult and shift factors. This guarantees that no 64bit | ||
128 | * overflow happens when the input value of the conversion is | ||
129 | * multiplied with the calculated mult factor. Larger ranges may | ||
130 | * reduce the conversion accuracy by chosing smaller mult and shift | ||
131 | * factors. | ||
132 | */ | ||
133 | void | ||
134 | clocks_calc_mult_shift(u32 *mult, u32 *shift, u32 from, u32 to, u32 minsec) | ||
135 | { | ||
136 | u64 tmp; | ||
137 | u32 sft, sftacc= 32; | ||
138 | |||
139 | /* | ||
140 | * Calculate the shift factor which is limiting the conversion | ||
141 | * range: | ||
142 | */ | ||
143 | tmp = ((u64)minsec * from) >> 32; | ||
144 | while (tmp) { | ||
145 | tmp >>=1; | ||
146 | sftacc--; | ||
147 | } | ||
148 | |||
149 | /* | ||
150 | * Find the conversion shift/mult pair which has the best | ||
151 | * accuracy and fits the maxsec conversion range: | ||
152 | */ | ||
153 | for (sft = 32; sft > 0; sft--) { | ||
154 | tmp = (u64) to << sft; | ||
155 | do_div(tmp, from); | ||
156 | if ((tmp >> sftacc) == 0) | ||
157 | break; | ||
158 | } | ||
159 | *mult = tmp; | ||
160 | *shift = sft; | ||
161 | } | ||
109 | 162 | ||
110 | /*[Clocksource internal variables]--------- | 163 | /*[Clocksource internal variables]--------- |
111 | * curr_clocksource: | 164 | * curr_clocksource: |
@@ -290,7 +343,19 @@ static void clocksource_resume_watchdog(void) | |||
290 | { | 343 | { |
291 | unsigned long flags; | 344 | unsigned long flags; |
292 | 345 | ||
293 | spin_lock_irqsave(&watchdog_lock, flags); | 346 | /* |
347 | * We use trylock here to avoid a potential dead lock when | ||
348 | * kgdb calls this code after the kernel has been stopped with | ||
349 | * watchdog_lock held. When watchdog_lock is held we just | ||
350 | * return and accept, that the watchdog might trigger and mark | ||
351 | * the monitored clock source (usually TSC) unstable. | ||
352 | * | ||
353 | * This does not affect the other caller clocksource_resume() | ||
354 | * because at this point the kernel is UP, interrupts are | ||
355 | * disabled and nothing can hold watchdog_lock. | ||
356 | */ | ||
357 | if (!spin_trylock_irqsave(&watchdog_lock, flags)) | ||
358 | return; | ||
294 | clocksource_reset_watchdog(); | 359 | clocksource_reset_watchdog(); |
295 | spin_unlock_irqrestore(&watchdog_lock, flags); | 360 | spin_unlock_irqrestore(&watchdog_lock, flags); |
296 | } | 361 | } |
@@ -405,14 +470,55 @@ void clocksource_resume(void) | |||
405 | * clocksource_touch_watchdog - Update watchdog | 470 | * clocksource_touch_watchdog - Update watchdog |
406 | * | 471 | * |
407 | * Update the watchdog after exception contexts such as kgdb so as not | 472 | * Update the watchdog after exception contexts such as kgdb so as not |
408 | * to incorrectly trip the watchdog. | 473 | * to incorrectly trip the watchdog. This might fail when the kernel |
409 | * | 474 | * was stopped in code which holds watchdog_lock. |
410 | */ | 475 | */ |
411 | void clocksource_touch_watchdog(void) | 476 | void clocksource_touch_watchdog(void) |
412 | { | 477 | { |
413 | clocksource_resume_watchdog(); | 478 | clocksource_resume_watchdog(); |
414 | } | 479 | } |
415 | 480 | ||
481 | /** | ||
482 | * clocksource_max_deferment - Returns max time the clocksource can be deferred | ||
483 | * @cs: Pointer to clocksource | ||
484 | * | ||
485 | */ | ||
486 | static u64 clocksource_max_deferment(struct clocksource *cs) | ||
487 | { | ||
488 | u64 max_nsecs, max_cycles; | ||
489 | |||
490 | /* | ||
491 | * Calculate the maximum number of cycles that we can pass to the | ||
492 | * cyc2ns function without overflowing a 64-bit signed result. The | ||
493 | * maximum number of cycles is equal to ULLONG_MAX/cs->mult which | ||
494 | * is equivalent to the below. | ||
495 | * max_cycles < (2^63)/cs->mult | ||
496 | * max_cycles < 2^(log2((2^63)/cs->mult)) | ||
497 | * max_cycles < 2^(log2(2^63) - log2(cs->mult)) | ||
498 | * max_cycles < 2^(63 - log2(cs->mult)) | ||
499 | * max_cycles < 1 << (63 - log2(cs->mult)) | ||
500 | * Please note that we add 1 to the result of the log2 to account for | ||
501 | * any rounding errors, ensure the above inequality is satisfied and | ||
502 | * no overflow will occur. | ||
503 | */ | ||
504 | max_cycles = 1ULL << (63 - (ilog2(cs->mult) + 1)); | ||
505 | |||
506 | /* | ||
507 | * The actual maximum number of cycles we can defer the clocksource is | ||
508 | * determined by the minimum of max_cycles and cs->mask. | ||
509 | */ | ||
510 | max_cycles = min_t(u64, max_cycles, (u64) cs->mask); | ||
511 | max_nsecs = clocksource_cyc2ns(max_cycles, cs->mult, cs->shift); | ||
512 | |||
513 | /* | ||
514 | * To ensure that the clocksource does not wrap whilst we are idle, | ||
515 | * limit the time the clocksource can be deferred by 12.5%. Please | ||
516 | * note a margin of 12.5% is used because this can be computed with | ||
517 | * a shift, versus say 10% which would require division. | ||
518 | */ | ||
519 | return max_nsecs - (max_nsecs >> 5); | ||
520 | } | ||
521 | |||
416 | #ifdef CONFIG_GENERIC_TIME | 522 | #ifdef CONFIG_GENERIC_TIME |
417 | 523 | ||
418 | /** | 524 | /** |
@@ -511,6 +617,9 @@ static void clocksource_enqueue(struct clocksource *cs) | |||
511 | */ | 617 | */ |
512 | int clocksource_register(struct clocksource *cs) | 618 | int clocksource_register(struct clocksource *cs) |
513 | { | 619 | { |
620 | /* calculate max idle time permitted for this clocksource */ | ||
621 | cs->max_idle_ns = clocksource_max_deferment(cs); | ||
622 | |||
514 | mutex_lock(&clocksource_mutex); | 623 | mutex_lock(&clocksource_mutex); |
515 | clocksource_enqueue(cs); | 624 | clocksource_enqueue(cs); |
516 | clocksource_select(); | 625 | clocksource_select(); |
@@ -580,7 +689,7 @@ sysfs_show_current_clocksources(struct sys_device *dev, | |||
580 | * @count: length of buffer | 689 | * @count: length of buffer |
581 | * | 690 | * |
582 | * Takes input from sysfs interface for manually overriding the default | 691 | * Takes input from sysfs interface for manually overriding the default |
583 | * clocksource selction. | 692 | * clocksource selection. |
584 | */ | 693 | */ |
585 | static ssize_t sysfs_override_clocksource(struct sys_device *dev, | 694 | static ssize_t sysfs_override_clocksource(struct sys_device *dev, |
586 | struct sysdev_attribute *attr, | 695 | struct sysdev_attribute *attr, |
diff --git a/kernel/time/tick-broadcast.c b/kernel/time/tick-broadcast.c index c2ec25087a35..b3bafd5fc66d 100644 --- a/kernel/time/tick-broadcast.c +++ b/kernel/time/tick-broadcast.c | |||
@@ -31,7 +31,7 @@ static struct tick_device tick_broadcast_device; | |||
31 | /* FIXME: Use cpumask_var_t. */ | 31 | /* FIXME: Use cpumask_var_t. */ |
32 | static DECLARE_BITMAP(tick_broadcast_mask, NR_CPUS); | 32 | static DECLARE_BITMAP(tick_broadcast_mask, NR_CPUS); |
33 | static DECLARE_BITMAP(tmpmask, NR_CPUS); | 33 | static DECLARE_BITMAP(tmpmask, NR_CPUS); |
34 | static DEFINE_SPINLOCK(tick_broadcast_lock); | 34 | static DEFINE_RAW_SPINLOCK(tick_broadcast_lock); |
35 | static int tick_broadcast_force; | 35 | static int tick_broadcast_force; |
36 | 36 | ||
37 | #ifdef CONFIG_TICK_ONESHOT | 37 | #ifdef CONFIG_TICK_ONESHOT |
@@ -96,7 +96,7 @@ int tick_device_uses_broadcast(struct clock_event_device *dev, int cpu) | |||
96 | unsigned long flags; | 96 | unsigned long flags; |
97 | int ret = 0; | 97 | int ret = 0; |
98 | 98 | ||
99 | spin_lock_irqsave(&tick_broadcast_lock, flags); | 99 | raw_spin_lock_irqsave(&tick_broadcast_lock, flags); |
100 | 100 | ||
101 | /* | 101 | /* |
102 | * Devices might be registered with both periodic and oneshot | 102 | * Devices might be registered with both periodic and oneshot |
@@ -122,7 +122,7 @@ int tick_device_uses_broadcast(struct clock_event_device *dev, int cpu) | |||
122 | tick_broadcast_clear_oneshot(cpu); | 122 | tick_broadcast_clear_oneshot(cpu); |
123 | } | 123 | } |
124 | } | 124 | } |
125 | spin_unlock_irqrestore(&tick_broadcast_lock, flags); | 125 | raw_spin_unlock_irqrestore(&tick_broadcast_lock, flags); |
126 | return ret; | 126 | return ret; |
127 | } | 127 | } |
128 | 128 | ||
@@ -161,13 +161,13 @@ static void tick_do_broadcast(struct cpumask *mask) | |||
161 | */ | 161 | */ |
162 | static void tick_do_periodic_broadcast(void) | 162 | static void tick_do_periodic_broadcast(void) |
163 | { | 163 | { |
164 | spin_lock(&tick_broadcast_lock); | 164 | raw_spin_lock(&tick_broadcast_lock); |
165 | 165 | ||
166 | cpumask_and(to_cpumask(tmpmask), | 166 | cpumask_and(to_cpumask(tmpmask), |
167 | cpu_online_mask, tick_get_broadcast_mask()); | 167 | cpu_online_mask, tick_get_broadcast_mask()); |
168 | tick_do_broadcast(to_cpumask(tmpmask)); | 168 | tick_do_broadcast(to_cpumask(tmpmask)); |
169 | 169 | ||
170 | spin_unlock(&tick_broadcast_lock); | 170 | raw_spin_unlock(&tick_broadcast_lock); |
171 | } | 171 | } |
172 | 172 | ||
173 | /* | 173 | /* |
@@ -212,7 +212,7 @@ static void tick_do_broadcast_on_off(unsigned long *reason) | |||
212 | unsigned long flags; | 212 | unsigned long flags; |
213 | int cpu, bc_stopped; | 213 | int cpu, bc_stopped; |
214 | 214 | ||
215 | spin_lock_irqsave(&tick_broadcast_lock, flags); | 215 | raw_spin_lock_irqsave(&tick_broadcast_lock, flags); |
216 | 216 | ||
217 | cpu = smp_processor_id(); | 217 | cpu = smp_processor_id(); |
218 | td = &per_cpu(tick_cpu_device, cpu); | 218 | td = &per_cpu(tick_cpu_device, cpu); |
@@ -263,7 +263,7 @@ static void tick_do_broadcast_on_off(unsigned long *reason) | |||
263 | tick_broadcast_setup_oneshot(bc); | 263 | tick_broadcast_setup_oneshot(bc); |
264 | } | 264 | } |
265 | out: | 265 | out: |
266 | spin_unlock_irqrestore(&tick_broadcast_lock, flags); | 266 | raw_spin_unlock_irqrestore(&tick_broadcast_lock, flags); |
267 | } | 267 | } |
268 | 268 | ||
269 | /* | 269 | /* |
@@ -299,7 +299,7 @@ void tick_shutdown_broadcast(unsigned int *cpup) | |||
299 | unsigned long flags; | 299 | unsigned long flags; |
300 | unsigned int cpu = *cpup; | 300 | unsigned int cpu = *cpup; |
301 | 301 | ||
302 | spin_lock_irqsave(&tick_broadcast_lock, flags); | 302 | raw_spin_lock_irqsave(&tick_broadcast_lock, flags); |
303 | 303 | ||
304 | bc = tick_broadcast_device.evtdev; | 304 | bc = tick_broadcast_device.evtdev; |
305 | cpumask_clear_cpu(cpu, tick_get_broadcast_mask()); | 305 | cpumask_clear_cpu(cpu, tick_get_broadcast_mask()); |
@@ -309,7 +309,7 @@ void tick_shutdown_broadcast(unsigned int *cpup) | |||
309 | clockevents_shutdown(bc); | 309 | clockevents_shutdown(bc); |
310 | } | 310 | } |
311 | 311 | ||
312 | spin_unlock_irqrestore(&tick_broadcast_lock, flags); | 312 | raw_spin_unlock_irqrestore(&tick_broadcast_lock, flags); |
313 | } | 313 | } |
314 | 314 | ||
315 | void tick_suspend_broadcast(void) | 315 | void tick_suspend_broadcast(void) |
@@ -317,13 +317,13 @@ void tick_suspend_broadcast(void) | |||
317 | struct clock_event_device *bc; | 317 | struct clock_event_device *bc; |
318 | unsigned long flags; | 318 | unsigned long flags; |
319 | 319 | ||
320 | spin_lock_irqsave(&tick_broadcast_lock, flags); | 320 | raw_spin_lock_irqsave(&tick_broadcast_lock, flags); |
321 | 321 | ||
322 | bc = tick_broadcast_device.evtdev; | 322 | bc = tick_broadcast_device.evtdev; |
323 | if (bc) | 323 | if (bc) |
324 | clockevents_shutdown(bc); | 324 | clockevents_shutdown(bc); |
325 | 325 | ||
326 | spin_unlock_irqrestore(&tick_broadcast_lock, flags); | 326 | raw_spin_unlock_irqrestore(&tick_broadcast_lock, flags); |
327 | } | 327 | } |
328 | 328 | ||
329 | int tick_resume_broadcast(void) | 329 | int tick_resume_broadcast(void) |
@@ -332,7 +332,7 @@ int tick_resume_broadcast(void) | |||
332 | unsigned long flags; | 332 | unsigned long flags; |
333 | int broadcast = 0; | 333 | int broadcast = 0; |
334 | 334 | ||
335 | spin_lock_irqsave(&tick_broadcast_lock, flags); | 335 | raw_spin_lock_irqsave(&tick_broadcast_lock, flags); |
336 | 336 | ||
337 | bc = tick_broadcast_device.evtdev; | 337 | bc = tick_broadcast_device.evtdev; |
338 | 338 | ||
@@ -351,7 +351,7 @@ int tick_resume_broadcast(void) | |||
351 | break; | 351 | break; |
352 | } | 352 | } |
353 | } | 353 | } |
354 | spin_unlock_irqrestore(&tick_broadcast_lock, flags); | 354 | raw_spin_unlock_irqrestore(&tick_broadcast_lock, flags); |
355 | 355 | ||
356 | return broadcast; | 356 | return broadcast; |
357 | } | 357 | } |
@@ -405,7 +405,7 @@ static void tick_handle_oneshot_broadcast(struct clock_event_device *dev) | |||
405 | ktime_t now, next_event; | 405 | ktime_t now, next_event; |
406 | int cpu; | 406 | int cpu; |
407 | 407 | ||
408 | spin_lock(&tick_broadcast_lock); | 408 | raw_spin_lock(&tick_broadcast_lock); |
409 | again: | 409 | again: |
410 | dev->next_event.tv64 = KTIME_MAX; | 410 | dev->next_event.tv64 = KTIME_MAX; |
411 | next_event.tv64 = KTIME_MAX; | 411 | next_event.tv64 = KTIME_MAX; |
@@ -443,7 +443,7 @@ again: | |||
443 | if (tick_broadcast_set_event(next_event, 0)) | 443 | if (tick_broadcast_set_event(next_event, 0)) |
444 | goto again; | 444 | goto again; |
445 | } | 445 | } |
446 | spin_unlock(&tick_broadcast_lock); | 446 | raw_spin_unlock(&tick_broadcast_lock); |
447 | } | 447 | } |
448 | 448 | ||
449 | /* | 449 | /* |
@@ -457,7 +457,7 @@ void tick_broadcast_oneshot_control(unsigned long reason) | |||
457 | unsigned long flags; | 457 | unsigned long flags; |
458 | int cpu; | 458 | int cpu; |
459 | 459 | ||
460 | spin_lock_irqsave(&tick_broadcast_lock, flags); | 460 | raw_spin_lock_irqsave(&tick_broadcast_lock, flags); |
461 | 461 | ||
462 | /* | 462 | /* |
463 | * Periodic mode does not care about the enter/exit of power | 463 | * Periodic mode does not care about the enter/exit of power |
@@ -492,7 +492,7 @@ void tick_broadcast_oneshot_control(unsigned long reason) | |||
492 | } | 492 | } |
493 | 493 | ||
494 | out: | 494 | out: |
495 | spin_unlock_irqrestore(&tick_broadcast_lock, flags); | 495 | raw_spin_unlock_irqrestore(&tick_broadcast_lock, flags); |
496 | } | 496 | } |
497 | 497 | ||
498 | /* | 498 | /* |
@@ -563,13 +563,13 @@ void tick_broadcast_switch_to_oneshot(void) | |||
563 | struct clock_event_device *bc; | 563 | struct clock_event_device *bc; |
564 | unsigned long flags; | 564 | unsigned long flags; |
565 | 565 | ||
566 | spin_lock_irqsave(&tick_broadcast_lock, flags); | 566 | raw_spin_lock_irqsave(&tick_broadcast_lock, flags); |
567 | 567 | ||
568 | tick_broadcast_device.mode = TICKDEV_MODE_ONESHOT; | 568 | tick_broadcast_device.mode = TICKDEV_MODE_ONESHOT; |
569 | bc = tick_broadcast_device.evtdev; | 569 | bc = tick_broadcast_device.evtdev; |
570 | if (bc) | 570 | if (bc) |
571 | tick_broadcast_setup_oneshot(bc); | 571 | tick_broadcast_setup_oneshot(bc); |
572 | spin_unlock_irqrestore(&tick_broadcast_lock, flags); | 572 | raw_spin_unlock_irqrestore(&tick_broadcast_lock, flags); |
573 | } | 573 | } |
574 | 574 | ||
575 | 575 | ||
@@ -581,7 +581,7 @@ void tick_shutdown_broadcast_oneshot(unsigned int *cpup) | |||
581 | unsigned long flags; | 581 | unsigned long flags; |
582 | unsigned int cpu = *cpup; | 582 | unsigned int cpu = *cpup; |
583 | 583 | ||
584 | spin_lock_irqsave(&tick_broadcast_lock, flags); | 584 | raw_spin_lock_irqsave(&tick_broadcast_lock, flags); |
585 | 585 | ||
586 | /* | 586 | /* |
587 | * Clear the broadcast mask flag for the dead cpu, but do not | 587 | * Clear the broadcast mask flag for the dead cpu, but do not |
@@ -589,7 +589,7 @@ void tick_shutdown_broadcast_oneshot(unsigned int *cpup) | |||
589 | */ | 589 | */ |
590 | cpumask_clear_cpu(cpu, tick_get_broadcast_oneshot_mask()); | 590 | cpumask_clear_cpu(cpu, tick_get_broadcast_oneshot_mask()); |
591 | 591 | ||
592 | spin_unlock_irqrestore(&tick_broadcast_lock, flags); | 592 | raw_spin_unlock_irqrestore(&tick_broadcast_lock, flags); |
593 | } | 593 | } |
594 | 594 | ||
595 | /* | 595 | /* |
diff --git a/kernel/time/tick-common.c b/kernel/time/tick-common.c index 83c4417b6a3c..b6b898d2eeef 100644 --- a/kernel/time/tick-common.c +++ b/kernel/time/tick-common.c | |||
@@ -34,7 +34,7 @@ DEFINE_PER_CPU(struct tick_device, tick_cpu_device); | |||
34 | ktime_t tick_next_period; | 34 | ktime_t tick_next_period; |
35 | ktime_t tick_period; | 35 | ktime_t tick_period; |
36 | int tick_do_timer_cpu __read_mostly = TICK_DO_TIMER_BOOT; | 36 | int tick_do_timer_cpu __read_mostly = TICK_DO_TIMER_BOOT; |
37 | DEFINE_SPINLOCK(tick_device_lock); | 37 | static DEFINE_RAW_SPINLOCK(tick_device_lock); |
38 | 38 | ||
39 | /* | 39 | /* |
40 | * Debugging: see timer_list.c | 40 | * Debugging: see timer_list.c |
@@ -209,7 +209,7 @@ static int tick_check_new_device(struct clock_event_device *newdev) | |||
209 | int cpu, ret = NOTIFY_OK; | 209 | int cpu, ret = NOTIFY_OK; |
210 | unsigned long flags; | 210 | unsigned long flags; |
211 | 211 | ||
212 | spin_lock_irqsave(&tick_device_lock, flags); | 212 | raw_spin_lock_irqsave(&tick_device_lock, flags); |
213 | 213 | ||
214 | cpu = smp_processor_id(); | 214 | cpu = smp_processor_id(); |
215 | if (!cpumask_test_cpu(cpu, newdev->cpumask)) | 215 | if (!cpumask_test_cpu(cpu, newdev->cpumask)) |
@@ -268,7 +268,7 @@ static int tick_check_new_device(struct clock_event_device *newdev) | |||
268 | if (newdev->features & CLOCK_EVT_FEAT_ONESHOT) | 268 | if (newdev->features & CLOCK_EVT_FEAT_ONESHOT) |
269 | tick_oneshot_notify(); | 269 | tick_oneshot_notify(); |
270 | 270 | ||
271 | spin_unlock_irqrestore(&tick_device_lock, flags); | 271 | raw_spin_unlock_irqrestore(&tick_device_lock, flags); |
272 | return NOTIFY_STOP; | 272 | return NOTIFY_STOP; |
273 | 273 | ||
274 | out_bc: | 274 | out_bc: |
@@ -278,7 +278,7 @@ out_bc: | |||
278 | if (tick_check_broadcast_device(newdev)) | 278 | if (tick_check_broadcast_device(newdev)) |
279 | ret = NOTIFY_STOP; | 279 | ret = NOTIFY_STOP; |
280 | 280 | ||
281 | spin_unlock_irqrestore(&tick_device_lock, flags); | 281 | raw_spin_unlock_irqrestore(&tick_device_lock, flags); |
282 | 282 | ||
283 | return ret; | 283 | return ret; |
284 | } | 284 | } |
@@ -311,7 +311,7 @@ static void tick_shutdown(unsigned int *cpup) | |||
311 | struct clock_event_device *dev = td->evtdev; | 311 | struct clock_event_device *dev = td->evtdev; |
312 | unsigned long flags; | 312 | unsigned long flags; |
313 | 313 | ||
314 | spin_lock_irqsave(&tick_device_lock, flags); | 314 | raw_spin_lock_irqsave(&tick_device_lock, flags); |
315 | td->mode = TICKDEV_MODE_PERIODIC; | 315 | td->mode = TICKDEV_MODE_PERIODIC; |
316 | if (dev) { | 316 | if (dev) { |
317 | /* | 317 | /* |
@@ -322,7 +322,7 @@ static void tick_shutdown(unsigned int *cpup) | |||
322 | clockevents_exchange_device(dev, NULL); | 322 | clockevents_exchange_device(dev, NULL); |
323 | td->evtdev = NULL; | 323 | td->evtdev = NULL; |
324 | } | 324 | } |
325 | spin_unlock_irqrestore(&tick_device_lock, flags); | 325 | raw_spin_unlock_irqrestore(&tick_device_lock, flags); |
326 | } | 326 | } |
327 | 327 | ||
328 | static void tick_suspend(void) | 328 | static void tick_suspend(void) |
@@ -330,9 +330,9 @@ static void tick_suspend(void) | |||
330 | struct tick_device *td = &__get_cpu_var(tick_cpu_device); | 330 | struct tick_device *td = &__get_cpu_var(tick_cpu_device); |
331 | unsigned long flags; | 331 | unsigned long flags; |
332 | 332 | ||
333 | spin_lock_irqsave(&tick_device_lock, flags); | 333 | raw_spin_lock_irqsave(&tick_device_lock, flags); |
334 | clockevents_shutdown(td->evtdev); | 334 | clockevents_shutdown(td->evtdev); |
335 | spin_unlock_irqrestore(&tick_device_lock, flags); | 335 | raw_spin_unlock_irqrestore(&tick_device_lock, flags); |
336 | } | 336 | } |
337 | 337 | ||
338 | static void tick_resume(void) | 338 | static void tick_resume(void) |
@@ -341,7 +341,7 @@ static void tick_resume(void) | |||
341 | unsigned long flags; | 341 | unsigned long flags; |
342 | int broadcast = tick_resume_broadcast(); | 342 | int broadcast = tick_resume_broadcast(); |
343 | 343 | ||
344 | spin_lock_irqsave(&tick_device_lock, flags); | 344 | raw_spin_lock_irqsave(&tick_device_lock, flags); |
345 | clockevents_set_mode(td->evtdev, CLOCK_EVT_MODE_RESUME); | 345 | clockevents_set_mode(td->evtdev, CLOCK_EVT_MODE_RESUME); |
346 | 346 | ||
347 | if (!broadcast) { | 347 | if (!broadcast) { |
@@ -350,7 +350,7 @@ static void tick_resume(void) | |||
350 | else | 350 | else |
351 | tick_resume_oneshot(); | 351 | tick_resume_oneshot(); |
352 | } | 352 | } |
353 | spin_unlock_irqrestore(&tick_device_lock, flags); | 353 | raw_spin_unlock_irqrestore(&tick_device_lock, flags); |
354 | } | 354 | } |
355 | 355 | ||
356 | /* | 356 | /* |
diff --git a/kernel/time/tick-internal.h b/kernel/time/tick-internal.h index b1c05bf75ee0..290eefbc1f60 100644 --- a/kernel/time/tick-internal.h +++ b/kernel/time/tick-internal.h | |||
@@ -6,7 +6,6 @@ | |||
6 | #define TICK_DO_TIMER_BOOT -2 | 6 | #define TICK_DO_TIMER_BOOT -2 |
7 | 7 | ||
8 | DECLARE_PER_CPU(struct tick_device, tick_cpu_device); | 8 | DECLARE_PER_CPU(struct tick_device, tick_cpu_device); |
9 | extern spinlock_t tick_device_lock; | ||
10 | extern ktime_t tick_next_period; | 9 | extern ktime_t tick_next_period; |
11 | extern ktime_t tick_period; | 10 | extern ktime_t tick_period; |
12 | extern int tick_do_timer_cpu __read_mostly; | 11 | extern int tick_do_timer_cpu __read_mostly; |
diff --git a/kernel/time/tick-oneshot.c b/kernel/time/tick-oneshot.c index a96c0e2b89cf..0a8a213016f0 100644 --- a/kernel/time/tick-oneshot.c +++ b/kernel/time/tick-oneshot.c | |||
@@ -50,9 +50,9 @@ int tick_dev_program_event(struct clock_event_device *dev, ktime_t expires, | |||
50 | dev->min_delta_ns += dev->min_delta_ns >> 1; | 50 | dev->min_delta_ns += dev->min_delta_ns >> 1; |
51 | 51 | ||
52 | printk(KERN_WARNING | 52 | printk(KERN_WARNING |
53 | "CE: %s increasing min_delta_ns to %lu nsec\n", | 53 | "CE: %s increasing min_delta_ns to %llu nsec\n", |
54 | dev->name ? dev->name : "?", | 54 | dev->name ? dev->name : "?", |
55 | dev->min_delta_ns << 1); | 55 | (unsigned long long) dev->min_delta_ns << 1); |
56 | 56 | ||
57 | i = 0; | 57 | i = 0; |
58 | } | 58 | } |
diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c index 89aed5933ed4..f992762d7f51 100644 --- a/kernel/time/tick-sched.c +++ b/kernel/time/tick-sched.c | |||
@@ -134,18 +134,13 @@ __setup("nohz=", setup_tick_nohz); | |||
134 | * value. We do this unconditionally on any cpu, as we don't know whether the | 134 | * value. We do this unconditionally on any cpu, as we don't know whether the |
135 | * cpu, which has the update task assigned is in a long sleep. | 135 | * cpu, which has the update task assigned is in a long sleep. |
136 | */ | 136 | */ |
137 | static void tick_nohz_update_jiffies(void) | 137 | static void tick_nohz_update_jiffies(ktime_t now) |
138 | { | 138 | { |
139 | int cpu = smp_processor_id(); | 139 | int cpu = smp_processor_id(); |
140 | struct tick_sched *ts = &per_cpu(tick_cpu_sched, cpu); | 140 | struct tick_sched *ts = &per_cpu(tick_cpu_sched, cpu); |
141 | unsigned long flags; | 141 | unsigned long flags; |
142 | ktime_t now; | ||
143 | |||
144 | if (!ts->tick_stopped) | ||
145 | return; | ||
146 | 142 | ||
147 | cpumask_clear_cpu(cpu, nohz_cpu_mask); | 143 | cpumask_clear_cpu(cpu, nohz_cpu_mask); |
148 | now = ktime_get(); | ||
149 | ts->idle_waketime = now; | 144 | ts->idle_waketime = now; |
150 | 145 | ||
151 | local_irq_save(flags); | 146 | local_irq_save(flags); |
@@ -155,20 +150,17 @@ static void tick_nohz_update_jiffies(void) | |||
155 | touch_softlockup_watchdog(); | 150 | touch_softlockup_watchdog(); |
156 | } | 151 | } |
157 | 152 | ||
158 | static void tick_nohz_stop_idle(int cpu) | 153 | static void tick_nohz_stop_idle(int cpu, ktime_t now) |
159 | { | 154 | { |
160 | struct tick_sched *ts = &per_cpu(tick_cpu_sched, cpu); | 155 | struct tick_sched *ts = &per_cpu(tick_cpu_sched, cpu); |
156 | ktime_t delta; | ||
161 | 157 | ||
162 | if (ts->idle_active) { | 158 | delta = ktime_sub(now, ts->idle_entrytime); |
163 | ktime_t now, delta; | 159 | ts->idle_lastupdate = now; |
164 | now = ktime_get(); | 160 | ts->idle_sleeptime = ktime_add(ts->idle_sleeptime, delta); |
165 | delta = ktime_sub(now, ts->idle_entrytime); | 161 | ts->idle_active = 0; |
166 | ts->idle_lastupdate = now; | ||
167 | ts->idle_sleeptime = ktime_add(ts->idle_sleeptime, delta); | ||
168 | ts->idle_active = 0; | ||
169 | 162 | ||
170 | sched_clock_idle_wakeup_event(0); | 163 | sched_clock_idle_wakeup_event(0); |
171 | } | ||
172 | } | 164 | } |
173 | 165 | ||
174 | static ktime_t tick_nohz_start_idle(struct tick_sched *ts) | 166 | static ktime_t tick_nohz_start_idle(struct tick_sched *ts) |
@@ -216,6 +208,7 @@ void tick_nohz_stop_sched_tick(int inidle) | |||
216 | struct tick_sched *ts; | 208 | struct tick_sched *ts; |
217 | ktime_t last_update, expires, now; | 209 | ktime_t last_update, expires, now; |
218 | struct clock_event_device *dev = __get_cpu_var(tick_cpu_device).evtdev; | 210 | struct clock_event_device *dev = __get_cpu_var(tick_cpu_device).evtdev; |
211 | u64 time_delta; | ||
219 | int cpu; | 212 | int cpu; |
220 | 213 | ||
221 | local_irq_save(flags); | 214 | local_irq_save(flags); |
@@ -263,7 +256,7 @@ void tick_nohz_stop_sched_tick(int inidle) | |||
263 | 256 | ||
264 | if (ratelimit < 10) { | 257 | if (ratelimit < 10) { |
265 | printk(KERN_ERR "NOHZ: local_softirq_pending %02x\n", | 258 | printk(KERN_ERR "NOHZ: local_softirq_pending %02x\n", |
266 | local_softirq_pending()); | 259 | (unsigned int) local_softirq_pending()); |
267 | ratelimit++; | 260 | ratelimit++; |
268 | } | 261 | } |
269 | goto end; | 262 | goto end; |
@@ -275,14 +268,18 @@ void tick_nohz_stop_sched_tick(int inidle) | |||
275 | seq = read_seqbegin(&xtime_lock); | 268 | seq = read_seqbegin(&xtime_lock); |
276 | last_update = last_jiffies_update; | 269 | last_update = last_jiffies_update; |
277 | last_jiffies = jiffies; | 270 | last_jiffies = jiffies; |
271 | time_delta = timekeeping_max_deferment(); | ||
278 | } while (read_seqretry(&xtime_lock, seq)); | 272 | } while (read_seqretry(&xtime_lock, seq)); |
279 | 273 | ||
280 | /* Get the next timer wheel timer */ | 274 | if (rcu_needs_cpu(cpu) || printk_needs_cpu(cpu) || |
281 | next_jiffies = get_next_timer_interrupt(last_jiffies); | 275 | arch_needs_cpu(cpu)) { |
282 | delta_jiffies = next_jiffies - last_jiffies; | 276 | next_jiffies = last_jiffies + 1; |
283 | |||
284 | if (rcu_needs_cpu(cpu) || printk_needs_cpu(cpu)) | ||
285 | delta_jiffies = 1; | 277 | delta_jiffies = 1; |
278 | } else { | ||
279 | /* Get the next timer wheel timer */ | ||
280 | next_jiffies = get_next_timer_interrupt(last_jiffies); | ||
281 | delta_jiffies = next_jiffies - last_jiffies; | ||
282 | } | ||
286 | /* | 283 | /* |
287 | * Do not stop the tick, if we are only one off | 284 | * Do not stop the tick, if we are only one off |
288 | * or if the cpu is required for rcu | 285 | * or if the cpu is required for rcu |
@@ -294,22 +291,51 @@ void tick_nohz_stop_sched_tick(int inidle) | |||
294 | if ((long)delta_jiffies >= 1) { | 291 | if ((long)delta_jiffies >= 1) { |
295 | 292 | ||
296 | /* | 293 | /* |
297 | * calculate the expiry time for the next timer wheel | ||
298 | * timer | ||
299 | */ | ||
300 | expires = ktime_add_ns(last_update, tick_period.tv64 * | ||
301 | delta_jiffies); | ||
302 | |||
303 | /* | ||
304 | * If this cpu is the one which updates jiffies, then | 294 | * If this cpu is the one which updates jiffies, then |
305 | * give up the assignment and let it be taken by the | 295 | * give up the assignment and let it be taken by the |
306 | * cpu which runs the tick timer next, which might be | 296 | * cpu which runs the tick timer next, which might be |
307 | * this cpu as well. If we don't drop this here the | 297 | * this cpu as well. If we don't drop this here the |
308 | * jiffies might be stale and do_timer() never | 298 | * jiffies might be stale and do_timer() never |
309 | * invoked. | 299 | * invoked. Keep track of the fact that it was the one |
300 | * which had the do_timer() duty last. If this cpu is | ||
301 | * the one which had the do_timer() duty last, we | ||
302 | * limit the sleep time to the timekeeping | ||
303 | * max_deferement value which we retrieved | ||
304 | * above. Otherwise we can sleep as long as we want. | ||
310 | */ | 305 | */ |
311 | if (cpu == tick_do_timer_cpu) | 306 | if (cpu == tick_do_timer_cpu) { |
312 | tick_do_timer_cpu = TICK_DO_TIMER_NONE; | 307 | tick_do_timer_cpu = TICK_DO_TIMER_NONE; |
308 | ts->do_timer_last = 1; | ||
309 | } else if (tick_do_timer_cpu != TICK_DO_TIMER_NONE) { | ||
310 | time_delta = KTIME_MAX; | ||
311 | ts->do_timer_last = 0; | ||
312 | } else if (!ts->do_timer_last) { | ||
313 | time_delta = KTIME_MAX; | ||
314 | } | ||
315 | |||
316 | /* | ||
317 | * calculate the expiry time for the next timer wheel | ||
318 | * timer. delta_jiffies >= NEXT_TIMER_MAX_DELTA signals | ||
319 | * that there is no timer pending or at least extremely | ||
320 | * far into the future (12 days for HZ=1000). In this | ||
321 | * case we set the expiry to the end of time. | ||
322 | */ | ||
323 | if (likely(delta_jiffies < NEXT_TIMER_MAX_DELTA)) { | ||
324 | /* | ||
325 | * Calculate the time delta for the next timer event. | ||
326 | * If the time delta exceeds the maximum time delta | ||
327 | * permitted by the current clocksource then adjust | ||
328 | * the time delta accordingly to ensure the | ||
329 | * clocksource does not wrap. | ||
330 | */ | ||
331 | time_delta = min_t(u64, time_delta, | ||
332 | tick_period.tv64 * delta_jiffies); | ||
333 | } | ||
334 | |||
335 | if (time_delta < KTIME_MAX) | ||
336 | expires = ktime_add_ns(last_update, time_delta); | ||
337 | else | ||
338 | expires.tv64 = KTIME_MAX; | ||
313 | 339 | ||
314 | if (delta_jiffies > 1) | 340 | if (delta_jiffies > 1) |
315 | cpumask_set_cpu(cpu, nohz_cpu_mask); | 341 | cpumask_set_cpu(cpu, nohz_cpu_mask); |
@@ -342,22 +368,19 @@ void tick_nohz_stop_sched_tick(int inidle) | |||
342 | 368 | ||
343 | ts->idle_sleeps++; | 369 | ts->idle_sleeps++; |
344 | 370 | ||
371 | /* Mark expires */ | ||
372 | ts->idle_expires = expires; | ||
373 | |||
345 | /* | 374 | /* |
346 | * delta_jiffies >= NEXT_TIMER_MAX_DELTA signals that | 375 | * If the expiration time == KTIME_MAX, then |
347 | * there is no timer pending or at least extremly far | 376 | * in this case we simply stop the tick timer. |
348 | * into the future (12 days for HZ=1000). In this case | ||
349 | * we simply stop the tick timer: | ||
350 | */ | 377 | */ |
351 | if (unlikely(delta_jiffies >= NEXT_TIMER_MAX_DELTA)) { | 378 | if (unlikely(expires.tv64 == KTIME_MAX)) { |
352 | ts->idle_expires.tv64 = KTIME_MAX; | ||
353 | if (ts->nohz_mode == NOHZ_MODE_HIGHRES) | 379 | if (ts->nohz_mode == NOHZ_MODE_HIGHRES) |
354 | hrtimer_cancel(&ts->sched_timer); | 380 | hrtimer_cancel(&ts->sched_timer); |
355 | goto out; | 381 | goto out; |
356 | } | 382 | } |
357 | 383 | ||
358 | /* Mark expiries */ | ||
359 | ts->idle_expires = expires; | ||
360 | |||
361 | if (ts->nohz_mode == NOHZ_MODE_HIGHRES) { | 384 | if (ts->nohz_mode == NOHZ_MODE_HIGHRES) { |
362 | hrtimer_start(&ts->sched_timer, expires, | 385 | hrtimer_start(&ts->sched_timer, expires, |
363 | HRTIMER_MODE_ABS_PINNED); | 386 | HRTIMER_MODE_ABS_PINNED); |
@@ -436,7 +459,11 @@ void tick_nohz_restart_sched_tick(void) | |||
436 | ktime_t now; | 459 | ktime_t now; |
437 | 460 | ||
438 | local_irq_disable(); | 461 | local_irq_disable(); |
439 | tick_nohz_stop_idle(cpu); | 462 | if (ts->idle_active || (ts->inidle && ts->tick_stopped)) |
463 | now = ktime_get(); | ||
464 | |||
465 | if (ts->idle_active) | ||
466 | tick_nohz_stop_idle(cpu, now); | ||
440 | 467 | ||
441 | if (!ts->inidle || !ts->tick_stopped) { | 468 | if (!ts->inidle || !ts->tick_stopped) { |
442 | ts->inidle = 0; | 469 | ts->inidle = 0; |
@@ -450,7 +477,6 @@ void tick_nohz_restart_sched_tick(void) | |||
450 | 477 | ||
451 | /* Update jiffies first */ | 478 | /* Update jiffies first */ |
452 | select_nohz_load_balancer(0); | 479 | select_nohz_load_balancer(0); |
453 | now = ktime_get(); | ||
454 | tick_do_update_jiffies64(now); | 480 | tick_do_update_jiffies64(now); |
455 | cpumask_clear_cpu(cpu, nohz_cpu_mask); | 481 | cpumask_clear_cpu(cpu, nohz_cpu_mask); |
456 | 482 | ||
@@ -584,22 +610,18 @@ static void tick_nohz_switch_to_nohz(void) | |||
584 | * timer and do not touch the other magic bits which need to be done | 610 | * timer and do not touch the other magic bits which need to be done |
585 | * when idle is left. | 611 | * when idle is left. |
586 | */ | 612 | */ |
587 | static void tick_nohz_kick_tick(int cpu) | 613 | static void tick_nohz_kick_tick(int cpu, ktime_t now) |
588 | { | 614 | { |
589 | #if 0 | 615 | #if 0 |
590 | /* Switch back to 2.6.27 behaviour */ | 616 | /* Switch back to 2.6.27 behaviour */ |
591 | 617 | ||
592 | struct tick_sched *ts = &per_cpu(tick_cpu_sched, cpu); | 618 | struct tick_sched *ts = &per_cpu(tick_cpu_sched, cpu); |
593 | ktime_t delta, now; | 619 | ktime_t delta; |
594 | |||
595 | if (!ts->tick_stopped) | ||
596 | return; | ||
597 | 620 | ||
598 | /* | 621 | /* |
599 | * Do not touch the tick device, when the next expiry is either | 622 | * Do not touch the tick device, when the next expiry is either |
600 | * already reached or less/equal than the tick period. | 623 | * already reached or less/equal than the tick period. |
601 | */ | 624 | */ |
602 | now = ktime_get(); | ||
603 | delta = ktime_sub(hrtimer_get_expires(&ts->sched_timer), now); | 625 | delta = ktime_sub(hrtimer_get_expires(&ts->sched_timer), now); |
604 | if (delta.tv64 <= tick_period.tv64) | 626 | if (delta.tv64 <= tick_period.tv64) |
605 | return; | 627 | return; |
@@ -608,9 +630,26 @@ static void tick_nohz_kick_tick(int cpu) | |||
608 | #endif | 630 | #endif |
609 | } | 631 | } |
610 | 632 | ||
633 | static inline void tick_check_nohz(int cpu) | ||
634 | { | ||
635 | struct tick_sched *ts = &per_cpu(tick_cpu_sched, cpu); | ||
636 | ktime_t now; | ||
637 | |||
638 | if (!ts->idle_active && !ts->tick_stopped) | ||
639 | return; | ||
640 | now = ktime_get(); | ||
641 | if (ts->idle_active) | ||
642 | tick_nohz_stop_idle(cpu, now); | ||
643 | if (ts->tick_stopped) { | ||
644 | tick_nohz_update_jiffies(now); | ||
645 | tick_nohz_kick_tick(cpu, now); | ||
646 | } | ||
647 | } | ||
648 | |||
611 | #else | 649 | #else |
612 | 650 | ||
613 | static inline void tick_nohz_switch_to_nohz(void) { } | 651 | static inline void tick_nohz_switch_to_nohz(void) { } |
652 | static inline void tick_check_nohz(int cpu) { } | ||
614 | 653 | ||
615 | #endif /* NO_HZ */ | 654 | #endif /* NO_HZ */ |
616 | 655 | ||
@@ -620,11 +659,7 @@ static inline void tick_nohz_switch_to_nohz(void) { } | |||
620 | void tick_check_idle(int cpu) | 659 | void tick_check_idle(int cpu) |
621 | { | 660 | { |
622 | tick_check_oneshot_broadcast(cpu); | 661 | tick_check_oneshot_broadcast(cpu); |
623 | #ifdef CONFIG_NO_HZ | 662 | tick_check_nohz(cpu); |
624 | tick_nohz_stop_idle(cpu); | ||
625 | tick_nohz_update_jiffies(); | ||
626 | tick_nohz_kick_tick(cpu); | ||
627 | #endif | ||
628 | } | 663 | } |
629 | 664 | ||
630 | /* | 665 | /* |
diff --git a/kernel/time/timecompare.c b/kernel/time/timecompare.c index 71e7f1a19156..12f5c55090be 100644 --- a/kernel/time/timecompare.c +++ b/kernel/time/timecompare.c | |||
@@ -40,7 +40,7 @@ ktime_t timecompare_transform(struct timecompare *sync, | |||
40 | 40 | ||
41 | return ns_to_ktime(nsec); | 41 | return ns_to_ktime(nsec); |
42 | } | 42 | } |
43 | EXPORT_SYMBOL(timecompare_transform); | 43 | EXPORT_SYMBOL_GPL(timecompare_transform); |
44 | 44 | ||
45 | int timecompare_offset(struct timecompare *sync, | 45 | int timecompare_offset(struct timecompare *sync, |
46 | s64 *offset, | 46 | s64 *offset, |
@@ -89,7 +89,7 @@ int timecompare_offset(struct timecompare *sync, | |||
89 | * source time | 89 | * source time |
90 | */ | 90 | */ |
91 | sample.offset = | 91 | sample.offset = |
92 | ktime_to_ns(ktime_add(end, start)) / 2 - | 92 | (ktime_to_ns(end) + ktime_to_ns(start)) / 2 - |
93 | ts; | 93 | ts; |
94 | 94 | ||
95 | /* simple insertion sort based on duration */ | 95 | /* simple insertion sort based on duration */ |
@@ -131,7 +131,7 @@ int timecompare_offset(struct timecompare *sync, | |||
131 | 131 | ||
132 | return used; | 132 | return used; |
133 | } | 133 | } |
134 | EXPORT_SYMBOL(timecompare_offset); | 134 | EXPORT_SYMBOL_GPL(timecompare_offset); |
135 | 135 | ||
136 | void __timecompare_update(struct timecompare *sync, | 136 | void __timecompare_update(struct timecompare *sync, |
137 | u64 source_tstamp) | 137 | u64 source_tstamp) |
@@ -188,4 +188,4 @@ void __timecompare_update(struct timecompare *sync, | |||
188 | } | 188 | } |
189 | } | 189 | } |
190 | } | 190 | } |
191 | EXPORT_SYMBOL(__timecompare_update); | 191 | EXPORT_SYMBOL_GPL(__timecompare_update); |
diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c index c3a4e2907eaa..e2ab064c6d41 100644 --- a/kernel/time/timekeeping.c +++ b/kernel/time/timekeeping.c | |||
@@ -177,7 +177,7 @@ void timekeeping_leap_insert(int leapsecond) | |||
177 | { | 177 | { |
178 | xtime.tv_sec += leapsecond; | 178 | xtime.tv_sec += leapsecond; |
179 | wall_to_monotonic.tv_sec -= leapsecond; | 179 | wall_to_monotonic.tv_sec -= leapsecond; |
180 | update_vsyscall(&xtime, timekeeper.clock); | 180 | update_vsyscall(&xtime, timekeeper.clock, timekeeper.mult); |
181 | } | 181 | } |
182 | 182 | ||
183 | #ifdef CONFIG_GENERIC_TIME | 183 | #ifdef CONFIG_GENERIC_TIME |
@@ -337,7 +337,7 @@ int do_settimeofday(struct timespec *tv) | |||
337 | timekeeper.ntp_error = 0; | 337 | timekeeper.ntp_error = 0; |
338 | ntp_clear(); | 338 | ntp_clear(); |
339 | 339 | ||
340 | update_vsyscall(&xtime, timekeeper.clock); | 340 | update_vsyscall(&xtime, timekeeper.clock, timekeeper.mult); |
341 | 341 | ||
342 | write_sequnlock_irqrestore(&xtime_lock, flags); | 342 | write_sequnlock_irqrestore(&xtime_lock, flags); |
343 | 343 | ||
@@ -488,6 +488,17 @@ int timekeeping_valid_for_hres(void) | |||
488 | } | 488 | } |
489 | 489 | ||
490 | /** | 490 | /** |
491 | * timekeeping_max_deferment - Returns max time the clocksource can be deferred | ||
492 | * | ||
493 | * Caller must observe xtime_lock via read_seqbegin/read_seqretry to | ||
494 | * ensure that the clocksource does not change! | ||
495 | */ | ||
496 | u64 timekeeping_max_deferment(void) | ||
497 | { | ||
498 | return timekeeper.clock->max_idle_ns; | ||
499 | } | ||
500 | |||
501 | /** | ||
491 | * read_persistent_clock - Return time from the persistent clock. | 502 | * read_persistent_clock - Return time from the persistent clock. |
492 | * | 503 | * |
493 | * Weak dummy function for arches that do not yet support it. | 504 | * Weak dummy function for arches that do not yet support it. |
@@ -722,6 +733,51 @@ static void timekeeping_adjust(s64 offset) | |||
722 | timekeeper.ntp_error_shift; | 733 | timekeeper.ntp_error_shift; |
723 | } | 734 | } |
724 | 735 | ||
736 | |||
737 | /** | ||
738 | * logarithmic_accumulation - shifted accumulation of cycles | ||
739 | * | ||
740 | * This functions accumulates a shifted interval of cycles into | ||
741 | * into a shifted interval nanoseconds. Allows for O(log) accumulation | ||
742 | * loop. | ||
743 | * | ||
744 | * Returns the unconsumed cycles. | ||
745 | */ | ||
746 | static cycle_t logarithmic_accumulation(cycle_t offset, int shift) | ||
747 | { | ||
748 | u64 nsecps = (u64)NSEC_PER_SEC << timekeeper.shift; | ||
749 | |||
750 | /* If the offset is smaller then a shifted interval, do nothing */ | ||
751 | if (offset < timekeeper.cycle_interval<<shift) | ||
752 | return offset; | ||
753 | |||
754 | /* Accumulate one shifted interval */ | ||
755 | offset -= timekeeper.cycle_interval << shift; | ||
756 | timekeeper.clock->cycle_last += timekeeper.cycle_interval << shift; | ||
757 | |||
758 | timekeeper.xtime_nsec += timekeeper.xtime_interval << shift; | ||
759 | while (timekeeper.xtime_nsec >= nsecps) { | ||
760 | timekeeper.xtime_nsec -= nsecps; | ||
761 | xtime.tv_sec++; | ||
762 | second_overflow(); | ||
763 | } | ||
764 | |||
765 | /* Accumulate into raw time */ | ||
766 | raw_time.tv_nsec += timekeeper.raw_interval << shift;; | ||
767 | while (raw_time.tv_nsec >= NSEC_PER_SEC) { | ||
768 | raw_time.tv_nsec -= NSEC_PER_SEC; | ||
769 | raw_time.tv_sec++; | ||
770 | } | ||
771 | |||
772 | /* Accumulate error between NTP and clock interval */ | ||
773 | timekeeper.ntp_error += tick_length << shift; | ||
774 | timekeeper.ntp_error -= timekeeper.xtime_interval << | ||
775 | (timekeeper.ntp_error_shift + shift); | ||
776 | |||
777 | return offset; | ||
778 | } | ||
779 | |||
780 | |||
725 | /** | 781 | /** |
726 | * update_wall_time - Uses the current clocksource to increment the wall time | 782 | * update_wall_time - Uses the current clocksource to increment the wall time |
727 | * | 783 | * |
@@ -732,6 +788,7 @@ void update_wall_time(void) | |||
732 | struct clocksource *clock; | 788 | struct clocksource *clock; |
733 | cycle_t offset; | 789 | cycle_t offset; |
734 | u64 nsecs; | 790 | u64 nsecs; |
791 | int shift = 0, maxshift; | ||
735 | 792 | ||
736 | /* Make sure we're fully resumed: */ | 793 | /* Make sure we're fully resumed: */ |
737 | if (unlikely(timekeeping_suspended)) | 794 | if (unlikely(timekeeping_suspended)) |
@@ -745,33 +802,22 @@ void update_wall_time(void) | |||
745 | #endif | 802 | #endif |
746 | timekeeper.xtime_nsec = (s64)xtime.tv_nsec << timekeeper.shift; | 803 | timekeeper.xtime_nsec = (s64)xtime.tv_nsec << timekeeper.shift; |
747 | 804 | ||
748 | /* normally this loop will run just once, however in the | 805 | /* |
749 | * case of lost or late ticks, it will accumulate correctly. | 806 | * With NO_HZ we may have to accumulate many cycle_intervals |
807 | * (think "ticks") worth of time at once. To do this efficiently, | ||
808 | * we calculate the largest doubling multiple of cycle_intervals | ||
809 | * that is smaller then the offset. We then accumulate that | ||
810 | * chunk in one go, and then try to consume the next smaller | ||
811 | * doubled multiple. | ||
750 | */ | 812 | */ |
813 | shift = ilog2(offset) - ilog2(timekeeper.cycle_interval); | ||
814 | shift = max(0, shift); | ||
815 | /* Bound shift to one less then what overflows tick_length */ | ||
816 | maxshift = (8*sizeof(tick_length) - (ilog2(tick_length)+1)) - 1; | ||
817 | shift = min(shift, maxshift); | ||
751 | while (offset >= timekeeper.cycle_interval) { | 818 | while (offset >= timekeeper.cycle_interval) { |
752 | u64 nsecps = (u64)NSEC_PER_SEC << timekeeper.shift; | 819 | offset = logarithmic_accumulation(offset, shift); |
753 | 820 | shift--; | |
754 | /* accumulate one interval */ | ||
755 | offset -= timekeeper.cycle_interval; | ||
756 | clock->cycle_last += timekeeper.cycle_interval; | ||
757 | |||
758 | timekeeper.xtime_nsec += timekeeper.xtime_interval; | ||
759 | if (timekeeper.xtime_nsec >= nsecps) { | ||
760 | timekeeper.xtime_nsec -= nsecps; | ||
761 | xtime.tv_sec++; | ||
762 | second_overflow(); | ||
763 | } | ||
764 | |||
765 | raw_time.tv_nsec += timekeeper.raw_interval; | ||
766 | if (raw_time.tv_nsec >= NSEC_PER_SEC) { | ||
767 | raw_time.tv_nsec -= NSEC_PER_SEC; | ||
768 | raw_time.tv_sec++; | ||
769 | } | ||
770 | |||
771 | /* accumulate error between NTP and clock interval */ | ||
772 | timekeeper.ntp_error += tick_length; | ||
773 | timekeeper.ntp_error -= timekeeper.xtime_interval << | ||
774 | timekeeper.ntp_error_shift; | ||
775 | } | 821 | } |
776 | 822 | ||
777 | /* correct the clock when NTP error is too big */ | 823 | /* correct the clock when NTP error is too big */ |
@@ -811,7 +857,7 @@ void update_wall_time(void) | |||
811 | update_xtime_cache(nsecs); | 857 | update_xtime_cache(nsecs); |
812 | 858 | ||
813 | /* check to see if there is a new clocksource to use */ | 859 | /* check to see if there is a new clocksource to use */ |
814 | update_vsyscall(&xtime, timekeeper.clock); | 860 | update_vsyscall(&xtime, timekeeper.clock, timekeeper.mult); |
815 | } | 861 | } |
816 | 862 | ||
817 | /** | 863 | /** |
@@ -834,6 +880,7 @@ void getboottime(struct timespec *ts) | |||
834 | 880 | ||
835 | set_normalized_timespec(ts, -boottime.tv_sec, -boottime.tv_nsec); | 881 | set_normalized_timespec(ts, -boottime.tv_sec, -boottime.tv_nsec); |
836 | } | 882 | } |
883 | EXPORT_SYMBOL_GPL(getboottime); | ||
837 | 884 | ||
838 | /** | 885 | /** |
839 | * monotonic_to_bootbased - Convert the monotonic time to boot based. | 886 | * monotonic_to_bootbased - Convert the monotonic time to boot based. |
@@ -843,6 +890,7 @@ void monotonic_to_bootbased(struct timespec *ts) | |||
843 | { | 890 | { |
844 | *ts = timespec_add_safe(*ts, total_sleep_time); | 891 | *ts = timespec_add_safe(*ts, total_sleep_time); |
845 | } | 892 | } |
893 | EXPORT_SYMBOL_GPL(monotonic_to_bootbased); | ||
846 | 894 | ||
847 | unsigned long get_seconds(void) | 895 | unsigned long get_seconds(void) |
848 | { | 896 | { |
diff --git a/kernel/time/timer_list.c b/kernel/time/timer_list.c index 1b5b7aa2fdfd..bdfb8dd1050c 100644 --- a/kernel/time/timer_list.c +++ b/kernel/time/timer_list.c | |||
@@ -84,7 +84,7 @@ print_active_timers(struct seq_file *m, struct hrtimer_clock_base *base, | |||
84 | 84 | ||
85 | next_one: | 85 | next_one: |
86 | i = 0; | 86 | i = 0; |
87 | spin_lock_irqsave(&base->cpu_base->lock, flags); | 87 | raw_spin_lock_irqsave(&base->cpu_base->lock, flags); |
88 | 88 | ||
89 | curr = base->first; | 89 | curr = base->first; |
90 | /* | 90 | /* |
@@ -100,13 +100,13 @@ next_one: | |||
100 | 100 | ||
101 | timer = rb_entry(curr, struct hrtimer, node); | 101 | timer = rb_entry(curr, struct hrtimer, node); |
102 | tmp = *timer; | 102 | tmp = *timer; |
103 | spin_unlock_irqrestore(&base->cpu_base->lock, flags); | 103 | raw_spin_unlock_irqrestore(&base->cpu_base->lock, flags); |
104 | 104 | ||
105 | print_timer(m, timer, &tmp, i, now); | 105 | print_timer(m, timer, &tmp, i, now); |
106 | next++; | 106 | next++; |
107 | goto next_one; | 107 | goto next_one; |
108 | } | 108 | } |
109 | spin_unlock_irqrestore(&base->cpu_base->lock, flags); | 109 | raw_spin_unlock_irqrestore(&base->cpu_base->lock, flags); |
110 | } | 110 | } |
111 | 111 | ||
112 | static void | 112 | static void |
@@ -150,6 +150,9 @@ static void print_cpu(struct seq_file *m, int cpu, u64 now) | |||
150 | P_ns(expires_next); | 150 | P_ns(expires_next); |
151 | P(hres_active); | 151 | P(hres_active); |
152 | P(nr_events); | 152 | P(nr_events); |
153 | P(nr_retries); | ||
154 | P(nr_hangs); | ||
155 | P_ns(max_hang_time); | ||
153 | #endif | 156 | #endif |
154 | #undef P | 157 | #undef P |
155 | #undef P_ns | 158 | #undef P_ns |
@@ -204,10 +207,12 @@ print_tickdevice(struct seq_file *m, struct tick_device *td, int cpu) | |||
204 | return; | 207 | return; |
205 | } | 208 | } |
206 | SEQ_printf(m, "%s\n", dev->name); | 209 | SEQ_printf(m, "%s\n", dev->name); |
207 | SEQ_printf(m, " max_delta_ns: %lu\n", dev->max_delta_ns); | 210 | SEQ_printf(m, " max_delta_ns: %llu\n", |
208 | SEQ_printf(m, " min_delta_ns: %lu\n", dev->min_delta_ns); | 211 | (unsigned long long) dev->max_delta_ns); |
209 | SEQ_printf(m, " mult: %lu\n", dev->mult); | 212 | SEQ_printf(m, " min_delta_ns: %llu\n", |
210 | SEQ_printf(m, " shift: %d\n", dev->shift); | 213 | (unsigned long long) dev->min_delta_ns); |
214 | SEQ_printf(m, " mult: %u\n", dev->mult); | ||
215 | SEQ_printf(m, " shift: %u\n", dev->shift); | ||
211 | SEQ_printf(m, " mode: %d\n", dev->mode); | 216 | SEQ_printf(m, " mode: %d\n", dev->mode); |
212 | SEQ_printf(m, " next_event: %Ld nsecs\n", | 217 | SEQ_printf(m, " next_event: %Ld nsecs\n", |
213 | (unsigned long long) ktime_to_ns(dev->next_event)); | 218 | (unsigned long long) ktime_to_ns(dev->next_event)); |
@@ -232,10 +237,10 @@ static void timer_list_show_tickdevices(struct seq_file *m) | |||
232 | #ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST | 237 | #ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST |
233 | print_tickdevice(m, tick_get_broadcast_device(), -1); | 238 | print_tickdevice(m, tick_get_broadcast_device(), -1); |
234 | SEQ_printf(m, "tick_broadcast_mask: %08lx\n", | 239 | SEQ_printf(m, "tick_broadcast_mask: %08lx\n", |
235 | tick_get_broadcast_mask()->bits[0]); | 240 | cpumask_bits(tick_get_broadcast_mask())[0]); |
236 | #ifdef CONFIG_TICK_ONESHOT | 241 | #ifdef CONFIG_TICK_ONESHOT |
237 | SEQ_printf(m, "tick_broadcast_oneshot_mask: %08lx\n", | 242 | SEQ_printf(m, "tick_broadcast_oneshot_mask: %08lx\n", |
238 | tick_get_broadcast_oneshot_mask()->bits[0]); | 243 | cpumask_bits(tick_get_broadcast_oneshot_mask())[0]); |
239 | #endif | 244 | #endif |
240 | SEQ_printf(m, "\n"); | 245 | SEQ_printf(m, "\n"); |
241 | #endif | 246 | #endif |
@@ -252,7 +257,7 @@ static int timer_list_show(struct seq_file *m, void *v) | |||
252 | u64 now = ktime_to_ns(ktime_get()); | 257 | u64 now = ktime_to_ns(ktime_get()); |
253 | int cpu; | 258 | int cpu; |
254 | 259 | ||
255 | SEQ_printf(m, "Timer List Version: v0.4\n"); | 260 | SEQ_printf(m, "Timer List Version: v0.5\n"); |
256 | SEQ_printf(m, "HRTIMER_MAX_CLOCK_BASES: %d\n", HRTIMER_MAX_CLOCK_BASES); | 261 | SEQ_printf(m, "HRTIMER_MAX_CLOCK_BASES: %d\n", HRTIMER_MAX_CLOCK_BASES); |
257 | SEQ_printf(m, "now at %Ld nsecs\n", (unsigned long long)now); | 262 | SEQ_printf(m, "now at %Ld nsecs\n", (unsigned long long)now); |
258 | 263 | ||
diff --git a/kernel/time/timer_stats.c b/kernel/time/timer_stats.c index ee5681f8d7ec..2f3b585b8d7d 100644 --- a/kernel/time/timer_stats.c +++ b/kernel/time/timer_stats.c | |||
@@ -86,7 +86,7 @@ static DEFINE_SPINLOCK(table_lock); | |||
86 | /* | 86 | /* |
87 | * Per-CPU lookup locks for fast hash lookup: | 87 | * Per-CPU lookup locks for fast hash lookup: |
88 | */ | 88 | */ |
89 | static DEFINE_PER_CPU(spinlock_t, lookup_lock); | 89 | static DEFINE_PER_CPU(raw_spinlock_t, tstats_lookup_lock); |
90 | 90 | ||
91 | /* | 91 | /* |
92 | * Mutex to serialize state changes with show-stats activities: | 92 | * Mutex to serialize state changes with show-stats activities: |
@@ -238,14 +238,14 @@ void timer_stats_update_stats(void *timer, pid_t pid, void *startf, | |||
238 | /* | 238 | /* |
239 | * It doesnt matter which lock we take: | 239 | * It doesnt matter which lock we take: |
240 | */ | 240 | */ |
241 | spinlock_t *lock; | 241 | raw_spinlock_t *lock; |
242 | struct entry *entry, input; | 242 | struct entry *entry, input; |
243 | unsigned long flags; | 243 | unsigned long flags; |
244 | 244 | ||
245 | if (likely(!timer_stats_active)) | 245 | if (likely(!timer_stats_active)) |
246 | return; | 246 | return; |
247 | 247 | ||
248 | lock = &per_cpu(lookup_lock, raw_smp_processor_id()); | 248 | lock = &per_cpu(tstats_lookup_lock, raw_smp_processor_id()); |
249 | 249 | ||
250 | input.timer = timer; | 250 | input.timer = timer; |
251 | input.start_func = startf; | 251 | input.start_func = startf; |
@@ -253,7 +253,7 @@ void timer_stats_update_stats(void *timer, pid_t pid, void *startf, | |||
253 | input.pid = pid; | 253 | input.pid = pid; |
254 | input.timer_flag = timer_flag; | 254 | input.timer_flag = timer_flag; |
255 | 255 | ||
256 | spin_lock_irqsave(lock, flags); | 256 | raw_spin_lock_irqsave(lock, flags); |
257 | if (!timer_stats_active) | 257 | if (!timer_stats_active) |
258 | goto out_unlock; | 258 | goto out_unlock; |
259 | 259 | ||
@@ -264,7 +264,7 @@ void timer_stats_update_stats(void *timer, pid_t pid, void *startf, | |||
264 | atomic_inc(&overflow_count); | 264 | atomic_inc(&overflow_count); |
265 | 265 | ||
266 | out_unlock: | 266 | out_unlock: |
267 | spin_unlock_irqrestore(lock, flags); | 267 | raw_spin_unlock_irqrestore(lock, flags); |
268 | } | 268 | } |
269 | 269 | ||
270 | static void print_name_offset(struct seq_file *m, unsigned long addr) | 270 | static void print_name_offset(struct seq_file *m, unsigned long addr) |
@@ -348,9 +348,11 @@ static void sync_access(void) | |||
348 | int cpu; | 348 | int cpu; |
349 | 349 | ||
350 | for_each_online_cpu(cpu) { | 350 | for_each_online_cpu(cpu) { |
351 | spin_lock_irqsave(&per_cpu(lookup_lock, cpu), flags); | 351 | raw_spinlock_t *lock = &per_cpu(tstats_lookup_lock, cpu); |
352 | |||
353 | raw_spin_lock_irqsave(lock, flags); | ||
352 | /* nothing */ | 354 | /* nothing */ |
353 | spin_unlock_irqrestore(&per_cpu(lookup_lock, cpu), flags); | 355 | raw_spin_unlock_irqrestore(lock, flags); |
354 | } | 356 | } |
355 | } | 357 | } |
356 | 358 | ||
@@ -408,7 +410,7 @@ void __init init_timer_stats(void) | |||
408 | int cpu; | 410 | int cpu; |
409 | 411 | ||
410 | for_each_possible_cpu(cpu) | 412 | for_each_possible_cpu(cpu) |
411 | spin_lock_init(&per_cpu(lookup_lock, cpu)); | 413 | raw_spin_lock_init(&per_cpu(tstats_lookup_lock, cpu)); |
412 | } | 414 | } |
413 | 415 | ||
414 | static int __init init_tstats_procfs(void) | 416 | static int __init init_tstats_procfs(void) |